Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import requests
|
2 |
+
from bs4 import BeautifulSoup
|
3 |
+
import fitz # PyMuPDF
|
4 |
+
import os
|
5 |
+
import openai
|
6 |
+
import re
|
7 |
+
import gradio as gr
|
8 |
+
|
9 |
+
def download_paper(paper_url):
|
10 |
+
"""指定したURLから論文のPDFをダウンロードし、ローカルに一時ファイルとして保存する。"""
|
11 |
+
response = requests.get(paper_url)
|
12 |
+
temp_pdf_path = "temp_paper.pdf"
|
13 |
+
with open(temp_pdf_path, 'wb') as f:
|
14 |
+
f.write(response.content)
|
15 |
+
return temp_pdf_path
|
16 |
+
|
17 |
+
def extract_text_from_pdf(pdf_path):
|
18 |
+
"""PDFファイルからテキストを抽出する。"""
|
19 |
+
doc = fitz.open(pdf_path)
|
20 |
+
text = ""
|
21 |
+
for page in doc:
|
22 |
+
text += page.get_text()
|
23 |
+
return text
|
24 |
+
|
25 |
+
def summarize_paper(paper_id):
|
26 |
+
"""論文IDを基に論文の内容を日本語で要約する。"""
|
27 |
+
paper_url = f"https://arxiv.org/pdf/{paper_id}.pdf"
|
28 |
+
pdf_path = download_paper(paper_url)
|
29 |
+
text = extract_text_from_pdf(pdf_path)
|
30 |
+
summary = summarize_text_with_chat(text)
|
31 |
+
os.remove(pdf_path) # 一時ファイルを削除
|
32 |
+
return summary
|
33 |
+
|
34 |
+
def fetch_paper_links(url):
|
35 |
+
"""指定したURLから特定の形式に完全にマッチするリンクを取得し、重複を排除する(順序保持)"""
|
36 |
+
response = requests.get(url)
|
37 |
+
soup = BeautifulSoup(response.text, 'html.parser')
|
38 |
+
# パターンの開始(^)と終了($)を指定して、完全一致を検出
|
39 |
+
pattern = re.compile(r'^/papers/\d+\.\d+$')
|
40 |
+
links = []
|
41 |
+
for a in soup.find_all('a', href=True):
|
42 |
+
href = a['href']
|
43 |
+
if pattern.match(href) and href not in links:
|
44 |
+
links.append(href)
|
45 |
+
return links
|
46 |
+
|
47 |
+
def summarize_text_with_chat(text, max_length=10000):
|
48 |
+
"""テキストをOpenAIのChat APIを使用して要約する。"""
|
49 |
+
openai.api_key = 'OPEN_AI_API_KEYS'
|
50 |
+
|
51 |
+
# テキストを指定の最大長に制限
|
52 |
+
trimmed_text = text[:max_length]
|
53 |
+
|
54 |
+
response = openai.chat.completions.create(
|
55 |
+
model="gpt-3.5-turbo",
|
56 |
+
messages=[
|
57 |
+
{"role": "system", "content": "次の文書を要約してください。必ず'## タイトル', '## 要約', '## 専門用語解説'を記載してください。"},
|
58 |
+
{"role": "user", "content": trimmed_text}
|
59 |
+
],
|
60 |
+
temperature=0.7,
|
61 |
+
max_tokens=1000
|
62 |
+
)
|
63 |
+
|
64 |
+
summary_text = response.choices[0].message.content
|
65 |
+
total_token = response.usage.total_tokens
|
66 |
+
return summary_text, total_token
|
67 |
+
|
68 |
+
def gradio_interface():
|
69 |
+
papers_url = 'https://huggingface.co/papers' # デフォルトURL
|
70 |
+
paper_links = fetch_paper_links(papers_url)
|
71 |
+
paper_ids = set(link.split('/')[-1] for link in paper_links)
|
72 |
+
|
73 |
+
total_tokens_used = 0
|
74 |
+
summaries = []
|
75 |
+
|
76 |
+
for paper_id in paper_ids:
|
77 |
+
summary_info = f"Processing paper ID: {paper_id}\n"
|
78 |
+
try:
|
79 |
+
summary, tokens_used = summarize_paper(paper_id)
|
80 |
+
total_tokens_used += tokens_used
|
81 |
+
summary_info += f'論文ID: {paper_id} の要約:\n{summary}\n'
|
82 |
+
except Exception as e:
|
83 |
+
summary_info += f"Error processing paper ID {paper_id}: {e}\n"
|
84 |
+
|
85 |
+
summaries.append(summary_info)
|
86 |
+
|
87 |
+
summaries_markdown = "\n---\n".join(summaries) # 要約を水平線で区切る
|
88 |
+
return summaries_markdown + f"\n全ての要約で使用されたトータルトークン数: {total_tokens_used}"
|
89 |
+
|
90 |
+
# Gradioインターフェースの設定
|
91 |
+
iface = gr.Interface(
|
92 |
+
fn=gradio_interface,
|
93 |
+
inputs=[], # 入力部分を削除
|
94 |
+
outputs=gr.Markdown(),
|
95 |
+
title="論文要約ツール",
|
96 |
+
description="[Daily Papers](https://huggingface.co/papers)に掲載された本日の論文を取得し、それらを要約してMarkdown形式で表示します。"
|
97 |
+
)
|
98 |
+
|
99 |
+
if __name__ == "__main__":
|
100 |
+
iface.launch()
|