Sirus1 commited on
Commit
46823f8
1 Parent(s): 9b435c0

Upload 3 files

Browse files
.gitattributes CHANGED
@@ -32,3 +32,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ The[[:space:]]Elements[[:space:]]of[[:space:]]Statisitcal[[:space:]]Learning.pdf filter=lfs diff=lfs merge=lfs -text
The Elements of Statisitcal Learning.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1da55e44889d3c45c0e0068b8b953ba71a95a85784485b0d14f52f34bb7f9eca
3
+ size 13303613
app.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import numpy as np
3
+ import tensorflow_hub as hub
4
+ import openai
5
+ import os
6
+ import tensorflow_text
7
+ from sklearn.neighbors import NearestNeighbors
8
+ import gradio as gr
9
+ import requests
10
+ import json
11
+ import fitz
12
+
13
+ #这里填写调用openai需要的密钥
14
+ openai.api_key = '9481961416fa4c8e883047c5679cf971'
15
+ openai.api_base = 'https://demopro-oai-we2.openai.azure.com/'
16
+ openai.api_type = 'azure'
17
+ openai.api_version = '2022-12-01'
18
+
19
+ #将嵌套的列表展平
20
+ def flatten(_2d_list):
21
+ flat_list = []
22
+ for element in _2d_list:
23
+ if type(element) is list:
24
+ for item in element:
25
+ flat_list.append(item)
26
+ else:
27
+ flat_list.append(element)
28
+ return flat_list
29
+
30
+
31
+ def preprocess(text):
32
+ text = text.replace('\n', ' ')
33
+ text = re.sub('\s+', ' ', text)
34
+ return text
35
+
36
+ #将pdf文档按段落分
37
+ # def pdf_to_text(path):
38
+ # doc = pdfplumber.open(path)
39
+ # pages = doc.pages
40
+ # text_list=[]
41
+ # for page,d in enumerate(pages):
42
+ # d=d.extract_text()
43
+ # d=preprocess(d)
44
+ # text_list.append(d)
45
+ # doc.close()
46
+
47
+ # return text_list
48
+
49
+
50
+
51
+ def pdf_to_text(path, start_page=1, end_page=None):
52
+ doc = fitz.open(path)
53
+ total_pages = doc.page_count
54
+
55
+ if end_page is None:
56
+ end_page = total_pages
57
+
58
+ text_list = []
59
+
60
+ for i in range(start_page - 1, end_page):
61
+ text = doc.load_page(i).get_text("text")
62
+ text = preprocess(text)
63
+ text_list.append(text)
64
+
65
+ doc.close()
66
+ return text_list
67
+
68
+
69
+ def text_to_chunks(texts, word_length=150, start_page=1):
70
+ text_toks = [t.split(' ') for t in texts]
71
+ page_nums = []
72
+ chunks = []
73
+
74
+ for idx, words in enumerate(text_toks):
75
+ for i in range(0, len(words), word_length):
76
+ chunk = words[i : i + word_length]
77
+ if (
78
+ (i + word_length) > len(words)
79
+ and (len(chunk) < word_length)
80
+ and (len(text_toks) != (idx + 1))
81
+ ):
82
+ text_toks[idx + 1] = chunk + text_toks[idx + 1]
83
+ continue
84
+ chunk = ' '.join(chunk).strip()
85
+ chunk = f'[Page no. {idx+start_page}]' + ' ' + '"' + chunk + '"'
86
+ chunks.append(chunk)
87
+ return chunks
88
+
89
+ history=pdf_to_text('The Elements of Statisitcal Learning.pdf',start_page=20)
90
+ history=text_to_chunks(history,start_page=1)
91
+
92
+
93
+ def encoder(text):
94
+ embed=openai.Embedding.create(input=text, engine="text-embedding-ada-002")
95
+ return embed.get('data')[0].get('embedding')
96
+
97
+
98
+ #定义语义搜索类
99
+ class SemanticSearch:
100
+
101
+ def __init__(self):
102
+ #类初始化,使用google公司的多语言语句编码,第一次运行时需要十几分钟的时间下载
103
+ self.use =hub.load('https://tfhub.dev/google/universal-sentence-encoder-multilingual/3')
104
+ self.fitted = False
105
+
106
+ def get_text_embedding(self, texts, batch=1000):
107
+ embeddings = []
108
+ for i in range(0, len(texts), batch):
109
+ text_batch = texts[i : (i + batch)]
110
+ emb_batch = self.use(text_batch)
111
+ embeddings.append(emb_batch)
112
+ embeddings = np.vstack(embeddings)
113
+ return embeddings
114
+
115
+
116
+
117
+ #K近邻算法,找到与问题最相似的 k 个段落,这里的 k 即n_neighbors=10
118
+ def fit(self, data, batch=1000, n_neighbors=5):
119
+ self.data = data
120
+ self.embeddings = self.get_text_embedding(data, batch=batch)
121
+ n_neighbors = min(n_neighbors, len(self.embeddings))
122
+ self.nn = NearestNeighbors(n_neighbors=n_neighbors)
123
+ self.nn.fit(self.embeddings)
124
+ self.fitted = True
125
+
126
+ #定义了该方法后,实例就可以被当作函数调用,text参数即用户提出的问题,inp_emb为其转化成的向量
127
+ def __call__(self, text, return_data=True):
128
+ inp_emb = self.use([text])
129
+
130
+
131
+ neighbors = self.nn.kneighbors(inp_emb, return_distance=False)[0]
132
+
133
+ if return_data:
134
+ return [self.data[i] for i in neighbors]
135
+ else:
136
+ return neighbors
137
+
138
+
139
+ #openai的api接口,engine参数为我们选择的大语言模型,prompt即提示词
140
+ def generate_text(prompt, engine="text-davinci-003"):
141
+ completions = openai.Completion.create(
142
+ engine=engine,
143
+ prompt=prompt,
144
+ max_tokens=512,
145
+ n=1,
146
+ stop=None,
147
+ temperature=0.7,
148
+ )
149
+ message = completions.choices[0].text
150
+ return message
151
+
152
+
153
+ def generate_answer(question):
154
+ #匹配与问题最相近的n个段落,前面定义了n=10
155
+ topn_chunks = recommender(question)
156
+ prompt = ""
157
+ prompt += 'search results:\n\n'
158
+
159
+ #把匹配到的段落加进提示词
160
+ for c in topn_chunks:
161
+ prompt += c + '\n\n'
162
+
163
+ #提示词
164
+ prompt += '''
165
+ Instructions: 如果搜索结果中找不到相关信息,只需要回答'未在该文档中找到相关信息'。
166
+ 如果找到了相关信息,请使用中文回答,回答尽量精确简洁。并在句子的末尾使用[七年级上册/七年级下册页码]符号引用每个参考文献(每个结果的开头都有这个编号)
167
+ 如果不确定答案是否正确,就仅给出相似段落的来源,不要回复错误的答案。
168
+ \n\nQuery: {question}\nAnswer:
169
+ '''
170
+
171
+ prompt += f"Query: {question}\nAnswer:"
172
+ answer = generate_text(prompt,"text-davinci-003")
173
+ return answer
174
+
175
+
176
+ recommender = SemanticSearch()
177
+ recommender.fit(history)
178
+
179
+
180
+ #以下为web客户端搭建,运行后产生客户端界面
181
+ def ask_api(question):
182
+
183
+ if question.strip() == '':
184
+ return '[ERROR]: 未输入问题'
185
+
186
+ return generate_answer(question)
187
+
188
+ title = 'Chat With Statistical Learning'
189
+ description = """ 该机器人将以Trevor Hastie等人所著的The Elements of Statistical Learning Data Mining, Inference, and Prediction
190
+ (即我们上课所用的课本)为主题回答你的问题,如果所问问题与书的内容无关,将会返回"未在该文档中找到相关信息"
191
+ """
192
+
193
+ with gr.Blocks() as demo:
194
+ gr.Markdown(f'<center><h1>{title}</h1></center>')
195
+ gr.Markdown(description)
196
+
197
+ with gr.Row():
198
+ with gr.Group():
199
+ question = gr.Textbox(label='请输入你的问题')
200
+ btn = gr.Button(value='提交')
201
+ btn.style(full_width=True)
202
+
203
+ with gr.Group():
204
+ answer = gr.Textbox(label='回答:')
205
+
206
+ btn.click(
207
+ ask_api,
208
+ inputs=[question],
209
+ outputs=[answer]
210
+ )
211
+
212
+ #参数share=True会产生一个公开网页,别人可以通过访问该网页使用你的模型,前提是你需要正在运行这段代码(将自己的电脑当作服务器)
213
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ fitz
2
+ gradio
3
+ tensorflow
4
+ tensorflow_hub
5
+ tensorflow_text
6
+ pdfplumber
7
+ openai
8
+ numpy
9
+ requests
10
+ scikit-learn