sushantgo commited on
Commit
120efdc
1 Parent(s): 886d623

Upload folder using huggingface_hub

Browse files
.env CHANGED
@@ -1 +1,3 @@
1
- OPENAI_API_KEY = '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'
 
 
 
1
+ OPENAI_API_KEY='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'
2
+ PINECONE_ENV='gcp-starter'
3
+ PINECONE_API_KEY='9c64a98d-d7d3-4409-b108-c294a7a8c469s'
.ipynb_checkpoints/chains-checkpoint.ipynb ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 6,
6
+ "id": "1a305246-59d9-453e-9903-ca462611f1eb",
7
+ "metadata": {},
8
+ "outputs": [
9
+ {
10
+ "data": {
11
+ "text/html": [
12
+ "<div>\n",
13
+ "<style scoped>\n",
14
+ " .dataframe tbody tr th:only-of-type {\n",
15
+ " vertical-align: middle;\n",
16
+ " }\n",
17
+ "\n",
18
+ " .dataframe tbody tr th {\n",
19
+ " vertical-align: top;\n",
20
+ " }\n",
21
+ "\n",
22
+ " .dataframe thead th {\n",
23
+ " text-align: right;\n",
24
+ " }\n",
25
+ "</style>\n",
26
+ "<table border=\"1\" class=\"dataframe\">\n",
27
+ " <thead>\n",
28
+ " <tr style=\"text-align: right;\">\n",
29
+ " <th></th>\n",
30
+ " <th>Product</th>\n",
31
+ " <th>Review</th>\n",
32
+ " </tr>\n",
33
+ " </thead>\n",
34
+ " <tbody>\n",
35
+ " <tr>\n",
36
+ " <th>0</th>\n",
37
+ " <td>Queen Size sheet Set</td>\n",
38
+ " <td>I ordered a king size set.</td>\n",
39
+ " </tr>\n",
40
+ " <tr>\n",
41
+ " <th>1</th>\n",
42
+ " <td>Waterproof Phone Pcuch</td>\n",
43
+ " <td>I loved the waterproof sac</td>\n",
44
+ " </tr>\n",
45
+ " <tr>\n",
46
+ " <th>2</th>\n",
47
+ " <td>Luxury Air Mattress</td>\n",
48
+ " <td>This mattress had a small hole in the top</td>\n",
49
+ " </tr>\n",
50
+ " <tr>\n",
51
+ " <th>3</th>\n",
52
+ " <td>Pillows Insert</td>\n",
53
+ " <td>this is the best pillow filters on amazon</td>\n",
54
+ " </tr>\n",
55
+ " <tr>\n",
56
+ " <th>4</th>\n",
57
+ " <td>Milk Frother Handheld\\nm</td>\n",
58
+ " <td>I loved this product But they only seem to I</td>\n",
59
+ " </tr>\n",
60
+ " </tbody>\n",
61
+ "</table>\n",
62
+ "</div>"
63
+ ],
64
+ "text/plain": [
65
+ " Product Review\n",
66
+ "0 Queen Size sheet Set I ordered a king size set.\n",
67
+ "1 Waterproof Phone Pcuch I loved the waterproof sac\n",
68
+ "2 Luxury Air Mattress This mattress had a small hole in the top\n",
69
+ "3 Pillows Insert this is the best pillow filters on amazon\n",
70
+ "4 Milk Frother Handheld\\nm I loved this product But they only seem to I"
71
+ ]
72
+ },
73
+ "execution_count": 6,
74
+ "metadata": {},
75
+ "output_type": "execute_result"
76
+ }
77
+ ],
78
+ "source": [
79
+ "import openai\n",
80
+ "import os\n",
81
+ "\n",
82
+ "from dotenv import load_dotenv, find_dotenv\n",
83
+ "_ = load_dotenv(find_dotenv())\n",
84
+ "import pandas as pd\n",
85
+ "\n",
86
+ "df = pd.read_csv('Data.csv')\n",
87
+ "df.head()\n"
88
+ ]
89
+ },
90
+ {
91
+ "cell_type": "code",
92
+ "execution_count": 7,
93
+ "id": "8f75bedd-2b00-466a-a00f-bc7c47f2f7a8",
94
+ "metadata": {},
95
+ "outputs": [],
96
+ "source": [
97
+ "from langchain.chat_models import ChatOpenAI\n",
98
+ "from langchain.prompts import ChatPromptTemplate\n",
99
+ "from langchain.chains import LLMChain"
100
+ ]
101
+ },
102
+ {
103
+ "cell_type": "code",
104
+ "execution_count": 8,
105
+ "id": "edc76aa3-1d1f-41d2-93a9-8297268817ea",
106
+ "metadata": {},
107
+ "outputs": [],
108
+ "source": [
109
+ "llm = ChatOpenAI(temperature=0.9)"
110
+ ]
111
+ },
112
+ {
113
+ "cell_type": "code",
114
+ "execution_count": 9,
115
+ "id": "27f2d3c9-9601-4b31-8d35-bae9538ee2a0",
116
+ "metadata": {},
117
+ "outputs": [],
118
+ "source": [
119
+ "prompt = ChatPromptTemplate.from_template(\"what is the best name to describe a company that make {product}\")"
120
+ ]
121
+ },
122
+ {
123
+ "cell_type": "code",
124
+ "execution_count": 10,
125
+ "id": "03ef9080-77ba-4406-a402-e4d5b889be47",
126
+ "metadata": {},
127
+ "outputs": [],
128
+ "source": [
129
+ "chain = LLMChain(llm=llm, prompt=prompt)"
130
+ ]
131
+ },
132
+ {
133
+ "cell_type": "code",
134
+ "execution_count": 11,
135
+ "id": "0a172ea1-18ce-407b-bcd9-b4146c5f5610",
136
+ "metadata": {},
137
+ "outputs": [
138
+ {
139
+ "data": {
140
+ "text/plain": [
141
+ "\"1. Royal Rest\\n2. Majestic Beddings\\n3. Luxurio Linens\\n4. Queen's Haven\\n5. Dreamy Slumber\\n6. Palace Sheets\\n7. Regal Comfort\\n8. Crowned Beddings\\n9. Serene Sleep\\n10. Imperial Linens\""
142
+ ]
143
+ },
144
+ "execution_count": 11,
145
+ "metadata": {},
146
+ "output_type": "execute_result"
147
+ }
148
+ ],
149
+ "source": [
150
+ "product = \"Queen Size Sheet Set\"\n",
151
+ "chain.run(product)"
152
+ ]
153
+ },
154
+ {
155
+ "cell_type": "code",
156
+ "execution_count": null,
157
+ "id": "b0ff950c-1c09-4ba5-8661-626fb69b24ca",
158
+ "metadata": {},
159
+ "outputs": [],
160
+ "source": []
161
+ }
162
+ ],
163
+ "metadata": {
164
+ "kernelspec": {
165
+ "display_name": "Python 3 (ipykernel)",
166
+ "language": "python",
167
+ "name": "python3"
168
+ },
169
+ "language_info": {
170
+ "codemirror_mode": {
171
+ "name": "ipython",
172
+ "version": 3
173
+ },
174
+ "file_extension": ".py",
175
+ "mimetype": "text/x-python",
176
+ "name": "python",
177
+ "nbconvert_exporter": "python",
178
+ "pygments_lexer": "ipython3",
179
+ "version": "3.11.4"
180
+ }
181
+ },
182
+ "nbformat": 4,
183
+ "nbformat_minor": 5
184
+ }
.ipynb_checkpoints/llm-app-checkpoint.ipynb ADDED
@@ -0,0 +1,755 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 9,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "import openai\n",
10
+ "import os\n",
11
+ "\n",
12
+ "from dotenv import load_dotenv, find_dotenv\n",
13
+ "_ = load_dotenv(find_dotenv())\n",
14
+ "openai.api_key = os.environ['OPENAI_API_KEY']\n",
15
+ "\n",
16
+ "def get_completion(prompt, model = 'gpt-3.5-turbo'):\n",
17
+ " messages = [{\n",
18
+ " \"role\": \"user\",\n",
19
+ " \"content\": prompt\n",
20
+ " }]\n",
21
+ " response = openai.ChatCompletion.create(\n",
22
+ " model=model,\n",
23
+ " messages=messages,\n",
24
+ " temperature=0\n",
25
+ " )\n",
26
+ " return response.choices[0].message['content']"
27
+ ]
28
+ },
29
+ {
30
+ "cell_type": "code",
31
+ "execution_count": 11,
32
+ "metadata": {},
33
+ "outputs": [
34
+ {
35
+ "data": {
36
+ "text/plain": [
37
+ "'4 + 4 equals 8.'"
38
+ ]
39
+ },
40
+ "execution_count": 11,
41
+ "metadata": {},
42
+ "output_type": "execute_result"
43
+ }
44
+ ],
45
+ "source": [
46
+ "get_completion('what is 4+4?')"
47
+ ]
48
+ },
49
+ {
50
+ "cell_type": "code",
51
+ "execution_count": 12,
52
+ "metadata": {},
53
+ "outputs": [],
54
+ "source": [
55
+ "email_text = \"\"\"\n",
56
+ "Arrr, I be fuming that me blender lid \\\n",
57
+ "flew off and splattered the kitchen walls \\\n",
58
+ "with smoothie! And to make matters worse, \\\n",
59
+ "the warranty don't cover the cost of cleaning up me kitchen. I need yer help \\\n",
60
+ "right now, matey!\n",
61
+ "\"\"\""
62
+ ]
63
+ },
64
+ {
65
+ "cell_type": "code",
66
+ "execution_count": 13,
67
+ "metadata": {},
68
+ "outputs": [],
69
+ "source": [
70
+ "style = \"\"\"American English \\\n",
71
+ "in a calm and respectful tone\"\"\""
72
+ ]
73
+ },
74
+ {
75
+ "cell_type": "code",
76
+ "execution_count": 14,
77
+ "metadata": {},
78
+ "outputs": [
79
+ {
80
+ "name": "stdout",
81
+ "output_type": "stream",
82
+ "text": [
83
+ "Translate the text that into delimated by triple backticks \n",
84
+ "into a style that is American English in a calm and respectful tone.\n",
85
+ "text: ```\n",
86
+ "Arrr, I be fuming that me blender lid flew off and splattered the kitchen walls with smoothie! And to make matters worse, the warranty don't cover the cost of cleaning up me kitchen. I need yer help right now, matey!\n",
87
+ "```\n",
88
+ "\n"
89
+ ]
90
+ }
91
+ ],
92
+ "source": [
93
+ "prompt = f\"\"\"Translate the text \\\n",
94
+ "that into delimated by triple backticks \n",
95
+ "into a style that is {style}.\n",
96
+ "text: ```{email_text}```\n",
97
+ "\"\"\"\n",
98
+ "print(prompt)"
99
+ ]
100
+ },
101
+ {
102
+ "cell_type": "code",
103
+ "execution_count": 15,
104
+ "metadata": {},
105
+ "outputs": [],
106
+ "source": [
107
+ "response = get_completion(prompt)"
108
+ ]
109
+ },
110
+ {
111
+ "cell_type": "code",
112
+ "execution_count": 8,
113
+ "metadata": {},
114
+ "outputs": [
115
+ {
116
+ "data": {
117
+ "text/plain": [
118
+ "\"Arrgh, I'm quite frustrated that my blender lid flew off and made a mess of the kitchen walls with smoothie! And to add insult to injury, the warranty doesn't cover the expenses of cleaning up my kitchen. I kindly request your assistance at this moment, my friend!\""
119
+ ]
120
+ },
121
+ "execution_count": 8,
122
+ "metadata": {},
123
+ "output_type": "execute_result"
124
+ }
125
+ ],
126
+ "source": [
127
+ "response"
128
+ ]
129
+ },
130
+ {
131
+ "cell_type": "code",
132
+ "execution_count": 16,
133
+ "metadata": {},
134
+ "outputs": [],
135
+ "source": [
136
+ "from langchain.chat_models import ChatOpenAI"
137
+ ]
138
+ },
139
+ {
140
+ "cell_type": "code",
141
+ "execution_count": 17,
142
+ "metadata": {},
143
+ "outputs": [],
144
+ "source": [
145
+ "chat = ChatOpenAI(temperature=0.0)"
146
+ ]
147
+ },
148
+ {
149
+ "cell_type": "code",
150
+ "execution_count": 7,
151
+ "metadata": {},
152
+ "outputs": [
153
+ {
154
+ "data": {
155
+ "text/plain": [
156
+ "ChatOpenAI(cache=None, verbose=False, callbacks=None, callback_manager=None, tags=None, metadata=None, client=<class 'openai.api_resources.chat_completion.ChatCompletion'>, model_name='gpt-3.5-turbo', temperature=0.0, model_kwargs={}, openai_api_key='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', openai_api_base='', openai_organization='', openai_proxy='', request_timeout=None, max_retries=6, streaming=False, n=1, max_tokens=None, tiktoken_model_name=None)"
157
+ ]
158
+ },
159
+ "execution_count": 7,
160
+ "metadata": {},
161
+ "output_type": "execute_result"
162
+ }
163
+ ],
164
+ "source": [
165
+ "chat"
166
+ ]
167
+ },
168
+ {
169
+ "cell_type": "code",
170
+ "execution_count": 18,
171
+ "metadata": {},
172
+ "outputs": [],
173
+ "source": [
174
+ "template_string = \"\"\"Translate the text \\\n",
175
+ "that into delimited by triple backticks \\\n",
176
+ "into a style that is {style}. \\\n",
177
+ "text: ```{text}```\n",
178
+ "\"\"\""
179
+ ]
180
+ },
181
+ {
182
+ "cell_type": "code",
183
+ "execution_count": 19,
184
+ "metadata": {},
185
+ "outputs": [],
186
+ "source": [
187
+ "from langchain.prompts import ChatPromptTemplate\n",
188
+ "from langchain.chains import LLMChain\n",
189
+ "from langchain.chat_models import ChatOpenAI\n",
190
+ "prompt_template = ChatPromptTemplate.from_template(template_string)\n",
191
+ "\n"
192
+ ]
193
+ },
194
+ {
195
+ "cell_type": "code",
196
+ "execution_count": 18,
197
+ "metadata": {},
198
+ "outputs": [
199
+ {
200
+ "data": {
201
+ "text/plain": [
202
+ "PromptTemplate(input_variables=['style', 'text'], output_parser=None, partial_variables={}, template='Translate the text that into delimited by triple backticks into a style that is {style}. text: ```{text}```\\n', template_format='f-string', validate_template=True)"
203
+ ]
204
+ },
205
+ "execution_count": 18,
206
+ "metadata": {},
207
+ "output_type": "execute_result"
208
+ }
209
+ ],
210
+ "source": [
211
+ "prompt_template.messages[0].prompt"
212
+ ]
213
+ },
214
+ {
215
+ "cell_type": "code",
216
+ "execution_count": 19,
217
+ "metadata": {},
218
+ "outputs": [
219
+ {
220
+ "data": {
221
+ "text/plain": [
222
+ "['style', 'text']"
223
+ ]
224
+ },
225
+ "execution_count": 19,
226
+ "metadata": {},
227
+ "output_type": "execute_result"
228
+ }
229
+ ],
230
+ "source": [
231
+ "prompt_template.messages[0].prompt.input_variables"
232
+ ]
233
+ },
234
+ {
235
+ "cell_type": "code",
236
+ "execution_count": 20,
237
+ "metadata": {},
238
+ "outputs": [],
239
+ "source": [
240
+ "customer_style = \"\"\"American English \\\n",
241
+ "in a calm and respectful tone\"\"\""
242
+ ]
243
+ },
244
+ {
245
+ "cell_type": "code",
246
+ "execution_count": null,
247
+ "metadata": {},
248
+ "outputs": [],
249
+ "source": []
250
+ },
251
+ {
252
+ "cell_type": "code",
253
+ "execution_count": 23,
254
+ "metadata": {},
255
+ "outputs": [
256
+ {
257
+ "data": {
258
+ "text/plain": [
259
+ "\"\\nArrr, I be fuming that me blender lid flew off and splattered the kitchen walls with smoothie! And to make matters worse, the warranty don't cover the cost of cleaning up me kitchen. I need yer help right now, matey!\\n\""
260
+ ]
261
+ },
262
+ "execution_count": 23,
263
+ "metadata": {},
264
+ "output_type": "execute_result"
265
+ }
266
+ ],
267
+ "source": [
268
+ "email_text"
269
+ ]
270
+ },
271
+ {
272
+ "cell_type": "code",
273
+ "execution_count": 21,
274
+ "metadata": {},
275
+ "outputs": [],
276
+ "source": [
277
+ "customer_messages = prompt_template.format_messages(style=customer_style, text=email_text)\n"
278
+ ]
279
+ },
280
+ {
281
+ "cell_type": "code",
282
+ "execution_count": 26,
283
+ "metadata": {},
284
+ "outputs": [
285
+ {
286
+ "data": {
287
+ "text/plain": [
288
+ "HumanMessage(content=\"Translate the text that into delimited by triple backticks into a style that is American English in a calm and respectful tone. text: ```\\nArrr, I be fuming that me blender lid flew off and splattered the kitchen walls with smoothie! And to make matters worse, the warranty don't cover the cost of cleaning up me kitchen. I need yer help right now, matey!\\n```\\n\", additional_kwargs={}, example=False)"
289
+ ]
290
+ },
291
+ "execution_count": 26,
292
+ "metadata": {},
293
+ "output_type": "execute_result"
294
+ }
295
+ ],
296
+ "source": [
297
+ "customer_messages[0]"
298
+ ]
299
+ },
300
+ {
301
+ "cell_type": "code",
302
+ "execution_count": 22,
303
+ "metadata": {},
304
+ "outputs": [
305
+ {
306
+ "name": "stdout",
307
+ "output_type": "stream",
308
+ "text": [
309
+ "<class 'langchain.schema.messages.HumanMessage'>\n",
310
+ "<class 'list'>\n"
311
+ ]
312
+ }
313
+ ],
314
+ "source": [
315
+ "print(type(customer_messages[0]))\n",
316
+ "print(type(customer_messages))"
317
+ ]
318
+ },
319
+ {
320
+ "cell_type": "code",
321
+ "execution_count": 33,
322
+ "metadata": {},
323
+ "outputs": [
324
+ {
325
+ "name": "stdout",
326
+ "output_type": "stream",
327
+ "text": [
328
+ "content=\"Translate the text that into delimited by triple backticks into a style that is American English in a calm and respectful tone. text: ```\\nArrr, I be fuming that me blender lid flew off and splattered the kitchen walls with smoothie! And to make matters worse, the warranty don't cover the cost of cleaning up me kitchen. I need yer help right now, matey!\\n```\\n\" additional_kwargs={} example=False\n"
329
+ ]
330
+ }
331
+ ],
332
+ "source": [
333
+ "print(customer_messages[0])"
334
+ ]
335
+ },
336
+ {
337
+ "cell_type": "code",
338
+ "execution_count": 34,
339
+ "metadata": {},
340
+ "outputs": [],
341
+ "source": [
342
+ "customer_response = chat(customer_messages)"
343
+ ]
344
+ },
345
+ {
346
+ "cell_type": "code",
347
+ "execution_count": 37,
348
+ "metadata": {},
349
+ "outputs": [
350
+ {
351
+ "name": "stdout",
352
+ "output_type": "stream",
353
+ "text": [
354
+ "I'm really frustrated that my blender lid flew off and made a mess of the kitchen walls with smoothie! And to add to my frustration, the warranty doesn't cover the cost of cleaning up my kitchen. I could really use your help at this moment, my friend!\n"
355
+ ]
356
+ }
357
+ ],
358
+ "source": [
359
+ "print(customer_response.content)"
360
+ ]
361
+ },
362
+ {
363
+ "cell_type": "code",
364
+ "execution_count": 23,
365
+ "metadata": {},
366
+ "outputs": [],
367
+ "source": [
368
+ "service_reply = \"\"\"Hey There Customer, \\\n",
369
+ "the warranty does not cover \\\n",
370
+ "cleaning expenses for your kitchen \\\n",
371
+ "because it's your fault that \\\n",
372
+ "you misused your blender \\\n",
373
+ "by forgotting to put the lid on before \\\n",
374
+ "starting the blender. \\\n",
375
+ "Tough luck! see ya!\n",
376
+ "\"\"\"\n"
377
+ ]
378
+ },
379
+ {
380
+ "cell_type": "code",
381
+ "execution_count": 24,
382
+ "metadata": {},
383
+ "outputs": [],
384
+ "source": [
385
+ "service_reply_pirate = \"\"\"\\\n",
386
+ "a polite tone \\\n",
387
+ "that speaks in English Pirate\\\n",
388
+ "\"\"\""
389
+ ]
390
+ },
391
+ {
392
+ "cell_type": "code",
393
+ "execution_count": null,
394
+ "metadata": {},
395
+ "outputs": [],
396
+ "source": []
397
+ },
398
+ {
399
+ "cell_type": "code",
400
+ "execution_count": 25,
401
+ "metadata": {},
402
+ "outputs": [],
403
+ "source": [
404
+ "service_messages = prompt_template.format_messages(style=service_reply, text=service_reply_pirate)"
405
+ ]
406
+ },
407
+ {
408
+ "cell_type": "code",
409
+ "execution_count": 44,
410
+ "metadata": {},
411
+ "outputs": [
412
+ {
413
+ "name": "stdout",
414
+ "output_type": "stream",
415
+ "text": [
416
+ "Translate the text that into delimited by triple backticks into a style that is Hey There Customer, the warranty does not cover cleaning expenses for your kitchen because it's your fault that you misused your blender by forgotting to put the lid on before starting the blender. Tough luck! see ya!\n",
417
+ ". text: ```a polite tone that speaks in English Pirate```\n",
418
+ "\n"
419
+ ]
420
+ }
421
+ ],
422
+ "source": [
423
+ "print(service_messages[0].content)"
424
+ ]
425
+ },
426
+ {
427
+ "cell_type": "code",
428
+ "execution_count": 26,
429
+ "metadata": {},
430
+ "outputs": [],
431
+ "source": [
432
+ "service_response = chat(service_messages)"
433
+ ]
434
+ },
435
+ {
436
+ "cell_type": "code",
437
+ "execution_count": 47,
438
+ "metadata": {},
439
+ "outputs": [
440
+ {
441
+ "name": "stdout",
442
+ "output_type": "stream",
443
+ "text": [
444
+ "Ahoy there, matey! The warranty be not coverin' the costs o' cleanin' yer galley due to yer own folly. Ye be misusin' yer blender by forgettin' to secure the lid afore startin' it. Tough luck, me heartie! Fare thee well!\n"
445
+ ]
446
+ }
447
+ ],
448
+ "source": [
449
+ "print(service_response.content)"
450
+ ]
451
+ },
452
+ {
453
+ "cell_type": "code",
454
+ "execution_count": 48,
455
+ "metadata": {},
456
+ "outputs": [
457
+ {
458
+ "data": {
459
+ "text/plain": [
460
+ "{'gift': False, 'delivery_days': 5, 'price_value': 'pretty affordable!'}"
461
+ ]
462
+ },
463
+ "execution_count": 48,
464
+ "metadata": {},
465
+ "output_type": "execute_result"
466
+ }
467
+ ],
468
+ "source": [
469
+ "{\n",
470
+ " \"gift\": False,\n",
471
+ " \"delivery_days\": 5,\n",
472
+ " \"price_value\": \"pretty affordable!\"\n",
473
+ "}"
474
+ ]
475
+ },
476
+ {
477
+ "cell_type": "code",
478
+ "execution_count": 27,
479
+ "metadata": {},
480
+ "outputs": [],
481
+ "source": [
482
+ "customer_review = \"\"\" \\\n",
483
+ "This leaf blower is pretty amazing. It has four settings: \\\n",
484
+ "candle blower, gentle breeze, windy city, and tornado. \\\n",
485
+ "It arrived in two days, just in time for my wife's \\\n",
486
+ "anniversary present. \\\n",
487
+ "I think my wife liked it so much she was speechless. \\\n",
488
+ "So far I've been the only one using it, and I've been \\\n",
489
+ "using it every other morning to clear the leaves on our lawn \\\n",
490
+ "It's slightly more expensive than the other leaf blowers \\\n",
491
+ "out there, But I think its worth it for the extra features.\n",
492
+ "\"\"\"\n",
493
+ "\n",
494
+ "review_template = \"\"\"\n",
495
+ "For the following text, extra the following information: \\\n",
496
+ "\n",
497
+ "gift: Was the item purchased as a gift for someone else? Ans should be in boolean\n",
498
+ "delivery_days: How many days it take for the product to deliver?\n",
499
+ "price_value: Extract any sentence about the value or price,\n",
500
+ "\n",
501
+ "format the output as JSON with the following keys:\n",
502
+ "gift\n",
503
+ "delivery_days\n",
504
+ "price_value\n",
505
+ "\n",
506
+ "text = {text}\n",
507
+ "\"\"\""
508
+ ]
509
+ },
510
+ {
511
+ "cell_type": "code",
512
+ "execution_count": 28,
513
+ "metadata": {},
514
+ "outputs": [
515
+ {
516
+ "name": "stdout",
517
+ "output_type": "stream",
518
+ "text": [
519
+ "input_variables=['text'] output_parser=None partial_variables={} messages=[HumanMessagePromptTemplate(prompt=PromptTemplate(input_variables=['text'], output_parser=None, partial_variables={}, template='\\nFor the following text, extra the following information: \\ngift: Was the item purchased as a gift for someone else? Ans should be in boolean\\ndelivery_days: How many days it take for the product to deliver?\\nprice_value: Extract any sentence about the value or price,\\n\\nformat the output as JSON with the following keys:\\ngift\\ndelivery_days\\nprice_value\\n\\ntext = {text}\\n', template_format='f-string', validate_template=True), additional_kwargs={})]\n"
520
+ ]
521
+ }
522
+ ],
523
+ "source": [
524
+ "promp_temp = ChatPromptTemplate.from_template(review_template)\n",
525
+ "print(promp_temp)"
526
+ ]
527
+ },
528
+ {
529
+ "cell_type": "code",
530
+ "execution_count": 29,
531
+ "metadata": {},
532
+ "outputs": [
533
+ {
534
+ "name": "stderr",
535
+ "output_type": "stream",
536
+ "text": [
537
+ "Retrying langchain.chat_models.openai.ChatOpenAI.completion_with_retry.<locals>._completion_with_retry in 4.0 seconds as it raised Timeout: Request timed out: HTTPSConnectionPool(host='api.openai.com', port=443): Read timed out. (read timeout=600).\n"
538
+ ]
539
+ },
540
+ {
541
+ "name": "stdout",
542
+ "output_type": "stream",
543
+ "text": [
544
+ "{\n",
545
+ " \"gift\": false,\n",
546
+ " \"delivery_days\": 2,\n",
547
+ " \"price_value\": \"It's slightly more expensive than the other leaf blowers out there, But I think its worth it for the extra features.\"\n",
548
+ "}\n"
549
+ ]
550
+ }
551
+ ],
552
+ "source": [
553
+ "messages = promp_temp.format_messages(text=customer_review)\n",
554
+ "chat = ChatOpenAI(temperature=0.0)\n",
555
+ "response = chat(messages)\n",
556
+ "print(response.content)"
557
+ ]
558
+ },
559
+ {
560
+ "cell_type": "code",
561
+ "execution_count": 60,
562
+ "metadata": {},
563
+ "outputs": [
564
+ {
565
+ "data": {
566
+ "text/plain": [
567
+ "str"
568
+ ]
569
+ },
570
+ "execution_count": 60,
571
+ "metadata": {},
572
+ "output_type": "execute_result"
573
+ }
574
+ ],
575
+ "source": [
576
+ "type(response.content)"
577
+ ]
578
+ },
579
+ {
580
+ "cell_type": "code",
581
+ "execution_count": null,
582
+ "metadata": {},
583
+ "outputs": [],
584
+ "source": []
585
+ },
586
+ {
587
+ "cell_type": "code",
588
+ "execution_count": 30,
589
+ "metadata": {},
590
+ "outputs": [
591
+ {
592
+ "name": "stdout",
593
+ "output_type": "stream",
594
+ "text": [
595
+ "The output should be a markdown code snippet formatted in the following schema, including the leading and trailing \"```json\" and \"```\":\n",
596
+ "\n",
597
+ "```json\n",
598
+ "{\n",
599
+ "\t\"gift\": string // was the item purchased as a gift for someone else? Answer True if yes, false if not or unknown.\n",
600
+ "\t\"delivery_days\": string // How many days it take for the product to arrive?\n",
601
+ "\t\"price_value\": string // Extract any sentence about the value or price\n",
602
+ "}\n",
603
+ "```\n"
604
+ ]
605
+ }
606
+ ],
607
+ "source": [
608
+ "from langchain.output_parsers import ResponseSchema\n",
609
+ "from langchain.output_parsers import StructuredOutputParser\n",
610
+ "\n",
611
+ "gift_schema = ResponseSchema(name=\"gift\", description=\"was the item purchased as a gift for someone else? Answer True if yes, false if not or unknown.\")\n",
612
+ "delivery_days_schema = ResponseSchema(name=\"delivery_days\", description=\"How many days it take for the product to arrive?\")\n",
613
+ "price_value_schema = ResponseSchema(name=\"price_value\", description=\"Extract any sentence about the value or price\")\n",
614
+ "\n",
615
+ "response_schemas = [gift_schema, delivery_days_schema, price_value_schema]\n",
616
+ "output_parser = StructuredOutputParser.from_response_schemas(response_schemas)\n",
617
+ "format_instructions = output_parser.get_format_instructions()\n",
618
+ "print(format_instructions)"
619
+ ]
620
+ },
621
+ {
622
+ "cell_type": "code",
623
+ "execution_count": 5,
624
+ "metadata": {},
625
+ "outputs": [
626
+ {
627
+ "data": {
628
+ "text/plain": [
629
+ "'The output should be a markdown code snippet formatted in the following schema, including the leading and trailing \"```json\" and \"```\":\\n\\n```json\\n{\\n\\t\"gift\": string // was the item purchased as a gift for someone else? Answer True if yes, false if not or unknown.\\n\\t\"delivery_days\": string // How many days it take for the product to arrive?\\n\\t\"price_value\": string // Extract any sentence about the value or price\\n}\\n```'"
630
+ ]
631
+ },
632
+ "execution_count": 5,
633
+ "metadata": {},
634
+ "output_type": "execute_result"
635
+ }
636
+ ],
637
+ "source": [
638
+ "format_instructions"
639
+ ]
640
+ },
641
+ {
642
+ "cell_type": "code",
643
+ "execution_count": 32,
644
+ "metadata": {},
645
+ "outputs": [],
646
+ "source": [
647
+ "review_template_2 = \"\"\"\\\n",
648
+ "For the following text, extract the following information:\n",
649
+ "\n",
650
+ "gift: Was the item purchased as a gift for someone else? \\\n",
651
+ "Answer True if yes, False if not or unknown.\n",
652
+ "\n",
653
+ "delivery_days: How many days did it take for the product\\\n",
654
+ "to arrive? If this information is not found, output -1.\n",
655
+ "\n",
656
+ "price_value: Extract any sentences about the value or price,\\\n",
657
+ "and output them as a comma separated Python list.\n",
658
+ "\n",
659
+ "text: {text}\n",
660
+ "\n",
661
+ "{format_instructions}\n",
662
+ "\"\"\"\n",
663
+ "\n",
664
+ "prompt = ChatPromptTemplate.from_template(template=review_template_2)\n",
665
+ "\n"
666
+ ]
667
+ },
668
+ {
669
+ "cell_type": "code",
670
+ "execution_count": 38,
671
+ "metadata": {},
672
+ "outputs": [],
673
+ "source": [
674
+ "messages = promp_temp.format_messages(text=customer_review, \n",
675
+ " format_instructions=format_instructions)"
676
+ ]
677
+ },
678
+ {
679
+ "cell_type": "code",
680
+ "execution_count": 42,
681
+ "metadata": {},
682
+ "outputs": [
683
+ {
684
+ "name": "stdout",
685
+ "output_type": "stream",
686
+ "text": [
687
+ "For the following text, extract the following information:\n",
688
+ "\n",
689
+ "gift: Was the item purchased as a gift for someone else? Answer True if yes, False if not or unknown.\n",
690
+ "\n",
691
+ "delivery_days: How many days did it take for the productto arrive? If this information is not found, output -1.\n",
692
+ "\n",
693
+ "price_value: Extract any sentences about the value or price,and output them as a comma separated Python list.\n",
694
+ "\n",
695
+ "text: {text}\n",
696
+ "\n",
697
+ "{format_instructions}\n",
698
+ "\n"
699
+ ]
700
+ }
701
+ ],
702
+ "source": [
703
+ "print(review_template_2)"
704
+ ]
705
+ },
706
+ {
707
+ "cell_type": "code",
708
+ "execution_count": 40,
709
+ "metadata": {},
710
+ "outputs": [
711
+ {
712
+ "data": {
713
+ "text/plain": [
714
+ "[HumanMessage(content=\"\\nFor the following text, extra the following information: \\ngift: Was the item purchased as a gift for someone else? Ans should be in boolean\\ndelivery_days: How many days it take for the product to deliver?\\nprice_value: Extract any sentence about the value or price,\\n\\nformat the output as JSON with the following keys:\\ngift\\ndelivery_days\\nprice_value\\n\\ntext = This leaf blower is pretty amazing. It has four settings: candle blower, gentle breeze, windy city, and tornado. It arrived in two days, just in time for my wife's anniversary present. I think my wife liked it so much she was speechless. So far I've been the only one using it, and I've been using it every other morning to clear the leaves on our lawn It's slightly more expensive than the other leaf blowers out there, But I think its worth it for the extra features.\\n\\n\", additional_kwargs={}, example=False)]"
715
+ ]
716
+ },
717
+ "execution_count": 40,
718
+ "metadata": {},
719
+ "output_type": "execute_result"
720
+ }
721
+ ],
722
+ "source": [
723
+ "messages"
724
+ ]
725
+ },
726
+ {
727
+ "cell_type": "code",
728
+ "execution_count": null,
729
+ "metadata": {},
730
+ "outputs": [],
731
+ "source": []
732
+ }
733
+ ],
734
+ "metadata": {
735
+ "kernelspec": {
736
+ "display_name": "Python 3 (ipykernel)",
737
+ "language": "python",
738
+ "name": "python3"
739
+ },
740
+ "language_info": {
741
+ "codemirror_mode": {
742
+ "name": "ipython",
743
+ "version": 3
744
+ },
745
+ "file_extension": ".py",
746
+ "mimetype": "text/x-python",
747
+ "name": "python",
748
+ "nbconvert_exporter": "python",
749
+ "pygments_lexer": "ipython3",
750
+ "version": "3.11.4"
751
+ }
752
+ },
753
+ "nbformat": 4,
754
+ "nbformat_minor": 4
755
+ }
.ipynb_checkpoints/memory-checkpoint.ipynb ADDED
@@ -0,0 +1,515 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "id": "15d94dba-6acc-4d42-b4c9-5c417fae885d",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": [
10
+ "import os\n",
11
+ "\n",
12
+ "from dotenv import load_dotenv, find_dotenv\n",
13
+ "_ = load_dotenv(find_dotenv())\n",
14
+ "\n",
15
+ "from langchain.chains import ConversationChain\n",
16
+ "from langchain.chat_models import ChatOpenAI\n",
17
+ "from langchain.memory import ConversationBufferMemory\n",
18
+ "\n",
19
+ "llm = ChatOpenAI(temperature=0.0)\n",
20
+ "memory = ConversationBufferMemory()\n",
21
+ "conversion = ConversationChain(\n",
22
+ " llm=llm,\n",
23
+ " memory=memory,\n",
24
+ " verbose=False\n",
25
+ ")"
26
+ ]
27
+ },
28
+ {
29
+ "cell_type": "code",
30
+ "execution_count": 2,
31
+ "id": "29f611c2-3693-493b-91da-ad11a9191aeb",
32
+ "metadata": {},
33
+ "outputs": [
34
+ {
35
+ "data": {
36
+ "text/plain": [
37
+ "\"Hello Sushant! It's nice to meet you. How can I assist you today?\""
38
+ ]
39
+ },
40
+ "execution_count": 2,
41
+ "metadata": {},
42
+ "output_type": "execute_result"
43
+ }
44
+ ],
45
+ "source": [
46
+ "conversion.predict(input='Hi, My name is sushant')"
47
+ ]
48
+ },
49
+ {
50
+ "cell_type": "code",
51
+ "execution_count": 3,
52
+ "id": "5ebb0e23-9bde-49ad-b187-8a7b8765270a",
53
+ "metadata": {},
54
+ "outputs": [
55
+ {
56
+ "data": {
57
+ "text/plain": [
58
+ "'5 + 4 equals 9.'"
59
+ ]
60
+ },
61
+ "execution_count": 3,
62
+ "metadata": {},
63
+ "output_type": "execute_result"
64
+ }
65
+ ],
66
+ "source": [
67
+ "conversion.predict(input='whats is 5+4')"
68
+ ]
69
+ },
70
+ {
71
+ "cell_type": "code",
72
+ "execution_count": 4,
73
+ "id": "65e5b06b-3f08-43e2-8fd1-ac3d5106cf40",
74
+ "metadata": {},
75
+ "outputs": [
76
+ {
77
+ "data": {
78
+ "text/plain": [
79
+ "'Your name is Sushant.'"
80
+ ]
81
+ },
82
+ "execution_count": 4,
83
+ "metadata": {},
84
+ "output_type": "execute_result"
85
+ }
86
+ ],
87
+ "source": [
88
+ "conversion.predict(input='what is my name')"
89
+ ]
90
+ },
91
+ {
92
+ "cell_type": "code",
93
+ "execution_count": 5,
94
+ "id": "8fd49cb9-b0f8-494b-87b4-84762ced5ba5",
95
+ "metadata": {},
96
+ "outputs": [
97
+ {
98
+ "data": {
99
+ "text/plain": [
100
+ "\"Human: Hi, My name is sushant\\nAI: Hello Sushant! It's nice to meet you. How can I assist you today?\\nHuman: whats is 5+4\\nAI: 5 + 4 equals 9.\\nHuman: what is my name\\nAI: Your name is Sushant.\""
101
+ ]
102
+ },
103
+ "execution_count": 5,
104
+ "metadata": {},
105
+ "output_type": "execute_result"
106
+ }
107
+ ],
108
+ "source": [
109
+ "memory.buffer"
110
+ ]
111
+ },
112
+ {
113
+ "cell_type": "code",
114
+ "execution_count": 6,
115
+ "id": "a326a54b-cdf3-4be1-ad6f-465433b60d6b",
116
+ "metadata": {},
117
+ "outputs": [
118
+ {
119
+ "data": {
120
+ "text/plain": [
121
+ "{'history': \"Human: Hi, My name is sushant\\nAI: Hello Sushant! It's nice to meet you. How can I assist you today?\\nHuman: whats is 5+4\\nAI: 5 + 4 equals 9.\\nHuman: what is my name\\nAI: Your name is Sushant.\"}"
122
+ ]
123
+ },
124
+ "execution_count": 6,
125
+ "metadata": {},
126
+ "output_type": "execute_result"
127
+ }
128
+ ],
129
+ "source": [
130
+ "memory.load_memory_variables({})"
131
+ ]
132
+ },
133
+ {
134
+ "cell_type": "code",
135
+ "execution_count": 7,
136
+ "id": "9b43105b-147f-4d45-bd12-11d04c5ce230",
137
+ "metadata": {},
138
+ "outputs": [],
139
+ "source": [
140
+ "memory.save_context({\"input\": \"Hi There!\"}, {\"output\": \"Hello\"})"
141
+ ]
142
+ },
143
+ {
144
+ "cell_type": "code",
145
+ "execution_count": 8,
146
+ "id": "5be149da-ab1b-447d-87c2-17173aae4221",
147
+ "metadata": {},
148
+ "outputs": [
149
+ {
150
+ "data": {
151
+ "text/plain": [
152
+ "{'history': \"Human: Hi, My name is sushant\\nAI: Hello Sushant! It's nice to meet you. How can I assist you today?\\nHuman: whats is 5+4\\nAI: 5 + 4 equals 9.\\nHuman: what is my name\\nAI: Your name is Sushant.\\nHuman: Hi There!\\nAI: Hello\"}"
153
+ ]
154
+ },
155
+ "execution_count": 8,
156
+ "metadata": {},
157
+ "output_type": "execute_result"
158
+ }
159
+ ],
160
+ "source": [
161
+ "memory.load_memory_variables({})"
162
+ ]
163
+ },
164
+ {
165
+ "cell_type": "code",
166
+ "execution_count": 9,
167
+ "id": "99869539-72ae-4ee9-a5dc-2a490d4a3301",
168
+ "metadata": {},
169
+ "outputs": [
170
+ {
171
+ "data": {
172
+ "text/plain": [
173
+ "{'history': \"Human: I'm fine\\nAI: Ok\"}"
174
+ ]
175
+ },
176
+ "execution_count": 9,
177
+ "metadata": {},
178
+ "output_type": "execute_result"
179
+ }
180
+ ],
181
+ "source": [
182
+ "from langchain.memory import ConversationBufferWindowMemory\n",
183
+ "memory = ConversationBufferWindowMemory(k=1) # k=1 where k equals to no of conversion we want here 1.\n",
184
+ "\n",
185
+ "memory.save_context({\"input\": 'Hello'}, {\"output\": 'There'})\n",
186
+ "memory.save_context({\"input\": \"I'm fine\"}, {\"output\": 'Ok'})\n",
187
+ "\n",
188
+ "memory.load_memory_variables({})"
189
+ ]
190
+ },
191
+ {
192
+ "cell_type": "code",
193
+ "execution_count": 10,
194
+ "id": "eb7f4dca-2fd7-4980-91ab-ee3db6532ce2",
195
+ "metadata": {},
196
+ "outputs": [],
197
+ "source": [
198
+ "llm = ChatOpenAI(temperature=0.0)\n",
199
+ "memory = ConversationBufferMemory()\n",
200
+ "conversation = ConversationChain(\n",
201
+ " llm=llm,\n",
202
+ " memory=memory,\n",
203
+ " verbose=True\n",
204
+ ")"
205
+ ]
206
+ },
207
+ {
208
+ "cell_type": "code",
209
+ "execution_count": 11,
210
+ "id": "4da61d3b-a455-42a3-9654-5d54abb7ee76",
211
+ "metadata": {},
212
+ "outputs": [
213
+ {
214
+ "name": "stdout",
215
+ "output_type": "stream",
216
+ "text": [
217
+ "\n",
218
+ "\n",
219
+ "\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
220
+ "Prompt after formatting:\n",
221
+ "\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
222
+ "\n",
223
+ "Current conversation:\n",
224
+ "\n",
225
+ "Human: Hello I'm Sushant\n",
226
+ "AI:\u001b[0m\n",
227
+ "\n",
228
+ "\u001b[1m> Finished chain.\u001b[0m\n"
229
+ ]
230
+ },
231
+ {
232
+ "data": {
233
+ "text/plain": [
234
+ "'Hello Sushant! How can I assist you today?'"
235
+ ]
236
+ },
237
+ "execution_count": 11,
238
+ "metadata": {},
239
+ "output_type": "execute_result"
240
+ }
241
+ ],
242
+ "source": [
243
+ "conversation.predict(input=\"Hello I'm Sushant\")"
244
+ ]
245
+ },
246
+ {
247
+ "cell_type": "code",
248
+ "execution_count": 12,
249
+ "id": "3cbafc44-0668-44bc-9cec-869b01f6f315",
250
+ "metadata": {},
251
+ "outputs": [
252
+ {
253
+ "name": "stdout",
254
+ "output_type": "stream",
255
+ "text": [
256
+ "\n",
257
+ "\n",
258
+ "\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
259
+ "Prompt after formatting:\n",
260
+ "\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
261
+ "\n",
262
+ "Current conversation:\n",
263
+ "Human: Hello I'm Sushant\n",
264
+ "AI: Hello Sushant! How can I assist you today?\n",
265
+ "Human: who is armstrong\n",
266
+ "AI:\u001b[0m\n",
267
+ "\n",
268
+ "\u001b[1m> Finished chain.\u001b[0m\n"
269
+ ]
270
+ },
271
+ {
272
+ "data": {
273
+ "text/plain": [
274
+ "'Armstrong can refer to several different people. One famous person named Armstrong is Neil Armstrong, who was an American astronaut and the first person to walk on the moon. Another famous Armstrong is Louis Armstrong, who was a jazz musician and trumpeter. There are also many other people with the last name Armstrong, so it would depend on which specific Armstrong you are referring to.'"
275
+ ]
276
+ },
277
+ "execution_count": 12,
278
+ "metadata": {},
279
+ "output_type": "execute_result"
280
+ }
281
+ ],
282
+ "source": [
283
+ "conversation.predict(input=\"who is armstrong\")"
284
+ ]
285
+ },
286
+ {
287
+ "cell_type": "code",
288
+ "execution_count": 13,
289
+ "id": "b30e8043-3df1-42c0-873b-958aaea17f04",
290
+ "metadata": {},
291
+ "outputs": [
292
+ {
293
+ "name": "stdout",
294
+ "output_type": "stream",
295
+ "text": [
296
+ "\n",
297
+ "\n",
298
+ "\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
299
+ "Prompt after formatting:\n",
300
+ "\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
301
+ "\n",
302
+ "Current conversation:\n",
303
+ "Human: Hello I'm Sushant\n",
304
+ "AI: Hello Sushant! How can I assist you today?\n",
305
+ "Human: who is armstrong\n",
306
+ "AI: Armstrong can refer to several different people. One famous person named Armstrong is Neil Armstrong, who was an American astronaut and the first person to walk on the moon. Another famous Armstrong is Louis Armstrong, who was a jazz musician and trumpeter. There are also many other people with the last name Armstrong, so it would depend on which specific Armstrong you are referring to.\n",
307
+ "Human: okay whats my name.?\n",
308
+ "AI:\u001b[0m\n",
309
+ "\n",
310
+ "\u001b[1m> Finished chain.\u001b[0m\n"
311
+ ]
312
+ },
313
+ {
314
+ "data": {
315
+ "text/plain": [
316
+ "\"I'm sorry, but I don't have access to personal information about individuals unless it has been shared with me in the course of our conversation. I am designed to respect user privacy and confidentiality.\""
317
+ ]
318
+ },
319
+ "execution_count": 13,
320
+ "metadata": {},
321
+ "output_type": "execute_result"
322
+ }
323
+ ],
324
+ "source": [
325
+ "\n",
326
+ "conversation.predict(input=\"okay whats my name.?\")"
327
+ ]
328
+ },
329
+ {
330
+ "cell_type": "code",
331
+ "execution_count": 16,
332
+ "id": "d8c620c0-4ff3-4013-aada-c75982d0cc8f",
333
+ "metadata": {},
334
+ "outputs": [],
335
+ "source": [
336
+ "from langchain.memory import ConversationTokenBufferMemory\n",
337
+ "from langchain.llms import OpenAI\n",
338
+ "llm = ChatOpenAI(temperature=0.0)\n",
339
+ "\n",
340
+ "memory = ConversationTokenBufferMemory(llm=llm, max_token_limit=10)\n",
341
+ "memory.save_context({\"input\": \"Hi There can I ask you something\"}, {\"Output\": \"Yes You can please ask now\"})\n",
342
+ "memory.save_context({\"input\": \"Hi There ng\"}, {\"Output\": \"ask now\"})"
343
+ ]
344
+ },
345
+ {
346
+ "cell_type": "code",
347
+ "execution_count": 17,
348
+ "id": "1586a789-dfbb-4077-a5ae-d8031738ae40",
349
+ "metadata": {},
350
+ "outputs": [
351
+ {
352
+ "data": {
353
+ "text/plain": [
354
+ "{'history': 'AI: ask now'}"
355
+ ]
356
+ },
357
+ "execution_count": 17,
358
+ "metadata": {},
359
+ "output_type": "execute_result"
360
+ }
361
+ ],
362
+ "source": [
363
+ "memory.load_memory_variables({})"
364
+ ]
365
+ },
366
+ {
367
+ "cell_type": "code",
368
+ "execution_count": 19,
369
+ "id": "78037aaa-080c-4dd4-aa96-bea8c8644efb",
370
+ "metadata": {},
371
+ "outputs": [],
372
+ "source": [
373
+ "from langchain.memory import ConversationSummaryBufferMemory\n",
374
+ "schedule = \"There is a meeting at 8am with your product team. \\\n",
375
+ "You will need your powerpoint presentation prepared. \\\n",
376
+ "9am-12pm have time to work on your LangChain \\\n",
377
+ "project which will go quickly because Langchain is such a powerful tool. \\\n",
378
+ "At Noon, lunch at the italian resturant with a customer who is driving \\\n",
379
+ "from over an hour away to meet you to understand the latest in AI. \\\n",
380
+ "Be sure to bring your laptop to show the latest LLM demo.\"\n",
381
+ "\n",
382
+ "memory = ConversationSummaryBufferMemory(llm=llm, max_token_limit=100)\n",
383
+ "memory.save_context({\"input\": \"Hello\"}, {\"output\": \"What's up\"})\n",
384
+ "memory.save_context({\"input\": \"Not much, just hanging\"},\n",
385
+ " {\"output\": \"Cool\"})\n",
386
+ "memory.save_context({\"input\": \"What is on the schedule today?\"}, \n",
387
+ " {\"output\": f\"{schedule}\"})\n"
388
+ ]
389
+ },
390
+ {
391
+ "cell_type": "code",
392
+ "execution_count": 20,
393
+ "id": "f326d9da-da8d-41a8-a449-c23c8fc08601",
394
+ "metadata": {},
395
+ "outputs": [
396
+ {
397
+ "data": {
398
+ "text/plain": [
399
+ "{'history': 'System: The human and AI exchange greetings. The human mentions that they are not doing much. The AI informs the human about their schedule for the day, including a meeting with the product team, working on the LangChain project, and having lunch with a customer to discuss the latest in AI. The AI also reminds the human to bring their laptop to show a demo.'}"
400
+ ]
401
+ },
402
+ "execution_count": 20,
403
+ "metadata": {},
404
+ "output_type": "execute_result"
405
+ }
406
+ ],
407
+ "source": [
408
+ "memory.load_memory_variables({})"
409
+ ]
410
+ },
411
+ {
412
+ "cell_type": "code",
413
+ "execution_count": 21,
414
+ "id": "57c3a5fd-356a-42de-965b-7513a1f686c4",
415
+ "metadata": {},
416
+ "outputs": [],
417
+ "source": [
418
+ "conversation = ConversationChain(\n",
419
+ " llm=llm, \n",
420
+ " memory = memory,\n",
421
+ " verbose=True\n",
422
+ ")"
423
+ ]
424
+ },
425
+ {
426
+ "cell_type": "code",
427
+ "execution_count": 22,
428
+ "id": "f5286d9b-cb56-4530-b532-c1b90f8f4e28",
429
+ "metadata": {},
430
+ "outputs": [
431
+ {
432
+ "name": "stdout",
433
+ "output_type": "stream",
434
+ "text": [
435
+ "\n",
436
+ "\n",
437
+ "\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
438
+ "Prompt after formatting:\n",
439
+ "\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
440
+ "\n",
441
+ "Current conversation:\n",
442
+ "System: The human and AI exchange greetings. The human mentions that they are not doing much. The AI informs the human about their schedule for the day, including a meeting with the product team, working on the LangChain project, and having lunch with a customer to discuss the latest in AI. The AI also reminds the human to bring their laptop to show a demo.\n",
443
+ "Human: What would be a good demo to show?\n",
444
+ "AI:\u001b[0m\n",
445
+ "\n",
446
+ "\u001b[1m> Finished chain.\u001b[0m\n"
447
+ ]
448
+ },
449
+ {
450
+ "data": {
451
+ "text/plain": [
452
+ "'A good demo to show would be a live demonstration of the LangChain project. You can showcase the features and functionality of the language translation software, highlighting its accuracy and efficiency. Additionally, you could also demonstrate any recent updates or improvements made to the project. This would give the customer a firsthand experience of the capabilities of the AI technology and its potential benefits for their business.'"
453
+ ]
454
+ },
455
+ "execution_count": 22,
456
+ "metadata": {},
457
+ "output_type": "execute_result"
458
+ }
459
+ ],
460
+ "source": [
461
+ "conversation.predict(input=\"What would be a good demo to show?\")"
462
+ ]
463
+ },
464
+ {
465
+ "cell_type": "code",
466
+ "execution_count": 23,
467
+ "id": "620520ef-7cf1-421b-be81-b7f0c93ae6d4",
468
+ "metadata": {},
469
+ "outputs": [
470
+ {
471
+ "data": {
472
+ "text/plain": [
473
+ "{'history': 'System: The human and AI exchange greetings. The human mentions that they are not doing much. The AI informs the human about their schedule for the day, including a meeting with the product team, working on the LangChain project, and having lunch with a customer to discuss the latest in AI. The AI also reminds the human to bring their laptop to show a demo.\\nHuman: What would be a good demo to show?\\nAI: A good demo to show would be a live demonstration of the LangChain project. You can showcase the features and functionality of the language translation software, highlighting its accuracy and efficiency. Additionally, you could also demonstrate any recent updates or improvements made to the project. This would give the customer a firsthand experience of the capabilities of the AI technology and its potential benefits for their business.'}"
474
+ ]
475
+ },
476
+ "execution_count": 23,
477
+ "metadata": {},
478
+ "output_type": "execute_result"
479
+ }
480
+ ],
481
+ "source": [
482
+ "memory.load_memory_variables({})"
483
+ ]
484
+ },
485
+ {
486
+ "cell_type": "code",
487
+ "execution_count": null,
488
+ "id": "8583b417-2698-408a-b84d-6ec92f91d494",
489
+ "metadata": {},
490
+ "outputs": [],
491
+ "source": []
492
+ }
493
+ ],
494
+ "metadata": {
495
+ "kernelspec": {
496
+ "display_name": "Python 3 (ipykernel)",
497
+ "language": "python",
498
+ "name": "python3"
499
+ },
500
+ "language_info": {
501
+ "codemirror_mode": {
502
+ "name": "ipython",
503
+ "version": 3
504
+ },
505
+ "file_extension": ".py",
506
+ "mimetype": "text/x-python",
507
+ "name": "python",
508
+ "nbconvert_exporter": "python",
509
+ "pygments_lexer": "ipython3",
510
+ "version": "3.11.4"
511
+ }
512
+ },
513
+ "nbformat": 4,
514
+ "nbformat_minor": 5
515
+ }
.ipynb_checkpoints/questions_and_answers-checkpoint.ipynb ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 12,
6
+ "id": "5be86be3-be9d-4707-ba0f-d4d82518bc2b",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": [
10
+ "import os\n",
11
+ "\n",
12
+ "from dotenv import load_dotenv, find_dotenv\n",
13
+ "_ = load_dotenv(find_dotenv()) # read local .env file"
14
+ ]
15
+ },
16
+ {
17
+ "cell_type": "code",
18
+ "execution_count": 13,
19
+ "id": "3653c1eb-db1c-4546-9357-b86c12ff7347",
20
+ "metadata": {},
21
+ "outputs": [],
22
+ "source": [
23
+ "from langchain.chains import RetrievalQA\n",
24
+ "from langchain.chat_models import ChatOpenAI\n",
25
+ "from langchain.document_loaders import CSVLoader\n",
26
+ "from langchain.vectorstores import DocArrayInMemorySearch\n",
27
+ "from IPython.display import display, Markdown"
28
+ ]
29
+ },
30
+ {
31
+ "cell_type": "code",
32
+ "execution_count": 14,
33
+ "id": "cb4b06e3-f3ba-4210-86e0-69cf1fbab9ff",
34
+ "metadata": {},
35
+ "outputs": [],
36
+ "source": [
37
+ "file = 'Data.csv'\n",
38
+ "loader = CSVLoader(file_path=file)"
39
+ ]
40
+ },
41
+ {
42
+ "cell_type": "code",
43
+ "execution_count": 18,
44
+ "id": "09c5e552-32a3-4aad-9a16-d45eb42a8197",
45
+ "metadata": {},
46
+ "outputs": [
47
+ {
48
+ "ename": "ImportError",
49
+ "evalue": "cannot import name 'URL' from 'sqlalchemy' (/home/sushantgo/Downloads/conda/lib/python3.11/site-packages/sqlalchemy/__init__.py)",
50
+ "output_type": "error",
51
+ "traceback": [
52
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
53
+ "\u001b[0;31mImportError\u001b[0m Traceback (most recent call last)",
54
+ "Cell \u001b[0;32mIn[18], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mlangchain\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mindexes\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m VectorstoreIndexCreator\n",
55
+ "File \u001b[0;32m~/Downloads/conda/lib/python3.11/site-packages/langchain/indexes/__init__.py:17\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;124;03m\"\"\"Code to support various indexing workflows.\u001b[39;00m\n\u001b[1;32m 2\u001b[0m \n\u001b[1;32m 3\u001b[0m \u001b[38;5;124;03mProvides code to:\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 14\u001b[0m \u001b[38;5;124;03mdocuments that were derived from parent documents by chunking.)\u001b[39;00m\n\u001b[1;32m 15\u001b[0m \u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 16\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mlangchain\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mindexes\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01m_api\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m IndexingResult, index\n\u001b[0;32m---> 17\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mlangchain\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mindexes\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01m_sql_record_manager\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m SQLRecordManager\n\u001b[1;32m 18\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mlangchain\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mindexes\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mgraph\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m GraphIndexCreator\n\u001b[1;32m 19\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mlangchain\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mindexes\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mvectorstore\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m VectorstoreIndexCreator\n",
56
+ "File \u001b[0;32m~/Downloads/conda/lib/python3.11/site-packages/langchain/indexes/_sql_record_manager.py:21\u001b[0m\n\u001b[1;32m 18\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01muuid\u001b[39;00m\n\u001b[1;32m 19\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mtyping\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m Any, Dict, Generator, List, Optional, Sequence, Union\n\u001b[0;32m---> 21\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01msqlalchemy\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m (\n\u001b[1;32m 22\u001b[0m URL,\n\u001b[1;32m 23\u001b[0m Column,\n\u001b[1;32m 24\u001b[0m Engine,\n\u001b[1;32m 25\u001b[0m Float,\n\u001b[1;32m 26\u001b[0m Index,\n\u001b[1;32m 27\u001b[0m String,\n\u001b[1;32m 28\u001b[0m UniqueConstraint,\n\u001b[1;32m 29\u001b[0m and_,\n\u001b[1;32m 30\u001b[0m create_engine,\n\u001b[1;32m 31\u001b[0m text,\n\u001b[1;32m 32\u001b[0m )\n\u001b[1;32m 33\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01msqlalchemy\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mext\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mdeclarative\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m declarative_base\n\u001b[1;32m 34\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01msqlalchemy\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01morm\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m Session, sessionmaker\n",
57
+ "\u001b[0;31mImportError\u001b[0m: cannot import name 'URL' from 'sqlalchemy' (/home/sushantgo/Downloads/conda/lib/python3.11/site-packages/sqlalchemy/__init__.py)"
58
+ ]
59
+ }
60
+ ],
61
+ "source": [
62
+ "from langchain.indexes import VectorstoreIndexCreator"
63
+ ]
64
+ },
65
+ {
66
+ "cell_type": "code",
67
+ "execution_count": null,
68
+ "id": "eaee53eb-59f2-4c95-b68f-30d35c2268f7",
69
+ "metadata": {},
70
+ "outputs": [],
71
+ "source": []
72
+ }
73
+ ],
74
+ "metadata": {
75
+ "kernelspec": {
76
+ "display_name": "Python 3 (ipykernel)",
77
+ "language": "python",
78
+ "name": "python3"
79
+ },
80
+ "language_info": {
81
+ "codemirror_mode": {
82
+ "name": "ipython",
83
+ "version": 3
84
+ },
85
+ "file_extension": ".py",
86
+ "mimetype": "text/x-python",
87
+ "name": "python",
88
+ "nbconvert_exporter": "python",
89
+ "pygments_lexer": "ipython3",
90
+ "version": "3.11.4"
91
+ }
92
+ },
93
+ "nbformat": 4,
94
+ "nbformat_minor": 5
95
+ }
Data.csv ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ Product,markdown
2
+ Queen Size sheet Set, glasgow is sun protection shirt
3
+ Waterproof Phone Pcuch, I loved the waterproof sac
4
+ Luxury Air Mattress, This mattress had a small hole in the top
5
+ Pillows Insert, this is the best pillow filters on amazon
6
+ Milk Frother Handheld\nm, I loved this product But they only seem to I
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
  title: Simple_Langchain_Gradio_App
3
- app_file: langchain-app.py
4
  sdk: gradio
5
  sdk_version: 3.46.0
6
  ---
 
1
  ---
2
  title: Simple_Langchain_Gradio_App
3
+ app_file: langchain.ipynb
4
  sdk: gradio
5
  sdk_version: 3.46.0
6
  ---
Untitled.ipynb CHANGED
@@ -2,61 +2,135 @@
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
- "execution_count": 4,
6
- "id": "34b50665-3063-4b26-b496-ca1633e8a38e",
7
  "metadata": {},
8
  "outputs": [
9
  {
10
- "name": "stdin",
11
- "output_type": "stream",
12
- "text": [
13
- "Enter your message who is sushant godse?\n"
14
- ]
15
- },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  {
17
  "name": "stdout",
18
  "output_type": "stream",
19
  "text": [
20
- "I'm sorry, but I couldn't find any information about a person named Sushant Godse. It's possible that this person may not be widely known or may not have a significant online presence.\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  ]
22
  }
23
  ],
24
  "source": [
25
- "import openai\n",
26
- "import os\n",
27
- "\n",
28
- "openai.api_key = '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'\n",
29
- "MODEL_NAME = 'gpt-3.5-turbo'\n",
30
- "\n",
31
- "class Assigment3:\n",
32
- " input_val = ''\n",
33
- " def __init__(self, model):\n",
34
- " self.model = model\n",
35
- " \n",
36
- " def get_input(self):\n",
37
- " self.input_val = input('Enter your message')\n",
38
- "\n",
39
- " def get_completion(self):\n",
40
- " messages = [{\n",
41
- " 'role': 'user',\n",
42
- " 'content': self.input_val\n",
43
- " }]\n",
44
- " response = openai.ChatCompletion.create(\n",
45
- " model=self.model,\n",
46
- " messages=messages,\n",
47
- " temperature=0\n",
48
- " )\n",
49
- " return response.choices[0].message['content']\n",
50
- " \n",
51
- "obj = Assigment3(MODEL_NAME)\n",
52
- "obj.get_input()\n",
53
- "print(obj.get_completion())"
54
  ]
55
  },
56
  {
57
  "cell_type": "code",
58
  "execution_count": null,
59
- "id": "575f233a-32e5-4700-8874-b7bf9428a51b",
60
  "metadata": {},
61
  "outputs": [],
62
  "source": []
 
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
+ "execution_count": 1,
6
+ "id": "cc06fe17-c615-453a-9d54-b04f861ee732",
7
  "metadata": {},
8
  "outputs": [
9
  {
10
+ "data": {
11
+ "text/plain": [
12
+ "True"
13
+ ]
14
+ },
15
+ "execution_count": 1,
16
+ "metadata": {},
17
+ "output_type": "execute_result"
18
+ }
19
+ ],
20
+ "source": [
21
+ "from dotenv import load_dotenv, find_dotenv\n",
22
+ "load_dotenv(find_dotenv())"
23
+ ]
24
+ },
25
+ {
26
+ "cell_type": "code",
27
+ "execution_count": 2,
28
+ "id": "9495c8de-b218-436b-8145-78b222ed4545",
29
+ "metadata": {},
30
+ "outputs": [
31
+ {
32
+ "data": {
33
+ "text/plain": [
34
+ "'\\n\\nA large language model is a type of artificial intelligence (AI) system that uses a large set of text data to generate text-based predictions and generate new data.'"
35
+ ]
36
+ },
37
+ "execution_count": 2,
38
+ "metadata": {},
39
+ "output_type": "execute_result"
40
+ }
41
+ ],
42
+ "source": [
43
+ "from langchain.llms import OpenAI\n",
44
+ "llm = OpenAI(model_name=\"text-davinci-003\")\n",
45
+ "llm(\"explain large language model in one sentence\")"
46
+ ]
47
+ },
48
+ {
49
+ "cell_type": "code",
50
+ "execution_count": 5,
51
+ "id": "3cf04b99-b937-4fec-a5f1-093b638245b9",
52
+ "metadata": {},
53
+ "outputs": [],
54
+ "source": [
55
+ "from langchain.schema import (\n",
56
+ " AIMessage,\n",
57
+ " HumanMessage,\n",
58
+ " SystemMessage\n",
59
+ ")\n",
60
+ "from langchain.chat_models import ChatOpenAI"
61
+ ]
62
+ },
63
+ {
64
+ "cell_type": "code",
65
+ "execution_count": 6,
66
+ "id": "6b42d0b0-d353-483f-994c-0ae45af8e7f8",
67
+ "metadata": {},
68
+ "outputs": [],
69
+ "source": [
70
+ "chat = ChatOpenAI(model_name=\"gpt-3.5-turbo\", temperature=0.3)\n",
71
+ "messages= [\n",
72
+ " SystemMessage(content=\"You are an expert data scientist\"),\n",
73
+ " HumanMessage(content=\"Write a python script that trains a neural network on stimulated data\")\n",
74
+ "]\n",
75
+ "response=chat(messages)"
76
+ ]
77
+ },
78
+ {
79
+ "cell_type": "code",
80
+ "execution_count": 7,
81
+ "id": "3b07b556-21cc-4c36-9d80-4f1fd2d83cba",
82
+ "metadata": {},
83
+ "outputs": [
84
  {
85
  "name": "stdout",
86
  "output_type": "stream",
87
  "text": [
88
+ "Sure! Here's an example of a Python script that trains a neural network on simulated data using the Keras library:\n",
89
+ "\n",
90
+ "```python\n",
91
+ "import numpy as np\n",
92
+ "from keras.models import Sequential\n",
93
+ "from keras.layers import Dense\n",
94
+ "\n",
95
+ "# Generate simulated data\n",
96
+ "np.random.seed(0)\n",
97
+ "X = np.random.rand(100, 2)\n",
98
+ "y = np.random.randint(2, size=100)\n",
99
+ "\n",
100
+ "# Define the neural network model\n",
101
+ "model = Sequential()\n",
102
+ "model.add(Dense(10, input_dim=2, activation='relu'))\n",
103
+ "model.add(Dense(1, activation='sigmoid'))\n",
104
+ "\n",
105
+ "# Compile the model\n",
106
+ "model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n",
107
+ "\n",
108
+ "# Train the model\n",
109
+ "model.fit(X, y, epochs=10, batch_size=10)\n",
110
+ "\n",
111
+ "# Evaluate the model\n",
112
+ "loss, accuracy = model.evaluate(X, y)\n",
113
+ "print(f\"Loss: {loss}, Accuracy: {accuracy}\")\n",
114
+ "```\n",
115
+ "\n",
116
+ "In this script, we first generate simulated data using `np.random.rand()` and `np.random.randint()` functions. The input data `X` is a 2-dimensional array of random values between 0 and 1, and the target labels `y` are binary values (0 or 1).\n",
117
+ "\n",
118
+ "We then define a simple neural network model using the `Sequential` class from Keras. It consists of a single hidden layer with 10 neurons and a ReLU activation function, followed by an output layer with a single neuron and a sigmoid activation function.\n",
119
+ "\n",
120
+ "The model is compiled with the binary cross-entropy loss function and the Adam optimizer. We then train the model using the `fit()` function, specifying the number of epochs and batch size.\n",
121
+ "\n",
122
+ "Finally, we evaluate the trained model on the same data and print the loss and accuracy.\n"
123
  ]
124
  }
125
  ],
126
  "source": [
127
+ "print(response.content, end='\\n')"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128
  ]
129
  },
130
  {
131
  "cell_type": "code",
132
  "execution_count": null,
133
+ "id": "30193256-290b-4c90-b91c-33ae893a271d",
134
  "metadata": {},
135
  "outputs": [],
136
  "source": []
Untitled1.ipynb CHANGED
@@ -1,75 +1,9 @@
1
  {
2
  "cells": [
3
- {
4
- "cell_type": "code",
5
- "execution_count": 2,
6
- "id": "a05e9e6e-7a2b-4e02-a37a-678f065092f9",
7
- "metadata": {},
8
- "outputs": [
9
- {
10
- "name": "stdout",
11
- "output_type": "stream",
12
- "text": [
13
- "To guide a model towards the desired output and avoid irrelevant or incorrect responses, it is important to provide clear and specific instructions, even if they are longer and provide more clarity and context.\n"
14
- ]
15
- }
16
- ],
17
- "source": [
18
- "\n",
19
- "import openai\n",
20
- "import os\n",
21
- "\n",
22
- "openai.api_key = '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'\n",
23
- "MODEL_NAME = 'gpt-3.5-turbo'\n",
24
- "\n",
25
- "TEXT = f\"\"\"\n",
26
- "You should express what you want a model to do by \\\n",
27
- "providing instructions that are as clear and \\\n",
28
- "specific as you can possibly make them. \\\n",
29
- "This will guide the model towards the desired output, \\\n",
30
- "and reduce the chances of receiving irrelevent \\\n",
31
- "or incorrec5 responses. Don't confuse writing a \\\n",
32
- "clear prompt with writing a short prompt. \\\n",
33
- "In many case, longer prompts provide more clearity \\\n",
34
- "and context for the model, which can lead to \\\n",
35
- "more detailed and relevant outputs.\n",
36
- "\"\"\"\n",
37
- "\n",
38
- "PROMPT = f\"\"\"\n",
39
- "summarize the text delimited by triple backticks\\\n",
40
- "into a single sentence.\n",
41
- "```{TEXT}```\n",
42
- "\"\"\"\n",
43
- "\n",
44
- "class Assigment4:\n",
45
- " input_val = ''\n",
46
- " def __init__(self, model):\n",
47
- " self.model = model\n",
48
- " \n",
49
- " def get_input(self):\n",
50
- " self.input_val = input('Enter your message')\n",
51
- "\n",
52
- " def get_completion(self, prompt):\n",
53
- " messages = [{\n",
54
- " 'role': 'user',\n",
55
- " 'content': prompt\n",
56
- " }]\n",
57
- " response = openai.ChatCompletion.create(\n",
58
- " model=self.model,\n",
59
- " messages=messages,\n",
60
- " temperature=0\n",
61
- " )\n",
62
- " return response.choices[0].message['content']\n",
63
- "\n",
64
- " \n",
65
- "obj = Assigment4(MODEL_NAME)\n",
66
- "print(obj.get_completion(PROMPT))"
67
- ]
68
- },
69
  {
70
  "cell_type": "code",
71
  "execution_count": null,
72
- "id": "d3bb3a9b-d418-484b-81c0-33515dba32c9",
73
  "metadata": {},
74
  "outputs": [],
75
  "source": []
 
1
  {
2
  "cells": [
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  {
4
  "cell_type": "code",
5
  "execution_count": null,
6
+ "id": "7340944d-f451-47e1-b939-f83936a54ee5",
7
  "metadata": {},
8
  "outputs": [],
9
  "source": []
chains.ipynb ADDED
@@ -0,0 +1,783 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 31,
6
+ "id": "1a305246-59d9-453e-9903-ca462611f1eb",
7
+ "metadata": {},
8
+ "outputs": [
9
+ {
10
+ "data": {
11
+ "text/html": [
12
+ "<div>\n",
13
+ "<style scoped>\n",
14
+ " .dataframe tbody tr th:only-of-type {\n",
15
+ " vertical-align: middle;\n",
16
+ " }\n",
17
+ "\n",
18
+ " .dataframe tbody tr th {\n",
19
+ " vertical-align: top;\n",
20
+ " }\n",
21
+ "\n",
22
+ " .dataframe thead th {\n",
23
+ " text-align: right;\n",
24
+ " }\n",
25
+ "</style>\n",
26
+ "<table border=\"1\" class=\"dataframe\">\n",
27
+ " <thead>\n",
28
+ " <tr style=\"text-align: right;\">\n",
29
+ " <th></th>\n",
30
+ " <th>Product</th>\n",
31
+ " <th>Review</th>\n",
32
+ " </tr>\n",
33
+ " </thead>\n",
34
+ " <tbody>\n",
35
+ " <tr>\n",
36
+ " <th>0</th>\n",
37
+ " <td>Queen Size sheet Set</td>\n",
38
+ " <td>I ordered a king size set.</td>\n",
39
+ " </tr>\n",
40
+ " <tr>\n",
41
+ " <th>1</th>\n",
42
+ " <td>Waterproof Phone Pcuch</td>\n",
43
+ " <td>I loved the waterproof sac</td>\n",
44
+ " </tr>\n",
45
+ " <tr>\n",
46
+ " <th>2</th>\n",
47
+ " <td>Luxury Air Mattress</td>\n",
48
+ " <td>This mattress had a small hole in the top</td>\n",
49
+ " </tr>\n",
50
+ " <tr>\n",
51
+ " <th>3</th>\n",
52
+ " <td>Pillows Insert</td>\n",
53
+ " <td>this is the best pillow filters on amazon</td>\n",
54
+ " </tr>\n",
55
+ " <tr>\n",
56
+ " <th>4</th>\n",
57
+ " <td>Milk Frother Handheld\\nm</td>\n",
58
+ " <td>I loved this product But they only seem to I</td>\n",
59
+ " </tr>\n",
60
+ " </tbody>\n",
61
+ "</table>\n",
62
+ "</div>"
63
+ ],
64
+ "text/plain": [
65
+ " Product Review\n",
66
+ "0 Queen Size sheet Set I ordered a king size set.\n",
67
+ "1 Waterproof Phone Pcuch I loved the waterproof sac\n",
68
+ "2 Luxury Air Mattress This mattress had a small hole in the top\n",
69
+ "3 Pillows Insert this is the best pillow filters on amazon\n",
70
+ "4 Milk Frother Handheld\\nm I loved this product But they only seem to I"
71
+ ]
72
+ },
73
+ "execution_count": 31,
74
+ "metadata": {},
75
+ "output_type": "execute_result"
76
+ }
77
+ ],
78
+ "source": [
79
+ "import openai\n",
80
+ "import os\n",
81
+ "\n",
82
+ "from dotenv import load_dotenv, find_dotenv\n",
83
+ "_ = load_dotenv(find_dotenv())\n",
84
+ "import pandas as pd\n",
85
+ "\n",
86
+ "df = pd.read_csv('Data.csv')\n",
87
+ "df.head()\n"
88
+ ]
89
+ },
90
+ {
91
+ "cell_type": "code",
92
+ "execution_count": 32,
93
+ "id": "8f75bedd-2b00-466a-a00f-bc7c47f2f7a8",
94
+ "metadata": {},
95
+ "outputs": [],
96
+ "source": [
97
+ "from langchain.chat_models import ChatOpenAI\n",
98
+ "from langchain.prompts import ChatPromptTemplate\n",
99
+ "from langchain.chains import LLMChain"
100
+ ]
101
+ },
102
+ {
103
+ "cell_type": "code",
104
+ "execution_count": 33,
105
+ "id": "edc76aa3-1d1f-41d2-93a9-8297268817ea",
106
+ "metadata": {},
107
+ "outputs": [],
108
+ "source": [
109
+ "llm = ChatOpenAI(temperature=0.9)"
110
+ ]
111
+ },
112
+ {
113
+ "cell_type": "code",
114
+ "execution_count": 34,
115
+ "id": "27f2d3c9-9601-4b31-8d35-bae9538ee2a0",
116
+ "metadata": {},
117
+ "outputs": [],
118
+ "source": [
119
+ "# Prompt template 1\n",
120
+ "prompt = ChatPromptTemplate.from_template(\"what is the best name to describe a company that make {product}\")"
121
+ ]
122
+ },
123
+ {
124
+ "cell_type": "code",
125
+ "execution_count": 35,
126
+ "id": "03ef9080-77ba-4406-a402-e4d5b889be47",
127
+ "metadata": {},
128
+ "outputs": [],
129
+ "source": [
130
+ "# Chain 1\n",
131
+ "chain = LLMChain(llm=llm, prompt=prompt)"
132
+ ]
133
+ },
134
+ {
135
+ "cell_type": "code",
136
+ "execution_count": 36,
137
+ "id": "0a172ea1-18ce-407b-bcd9-b4146c5f5610",
138
+ "metadata": {},
139
+ "outputs": [
140
+ {
141
+ "data": {
142
+ "text/plain": [
143
+ "'1. Royal Comfort Linens\\n2. Majestic Bedding Co.\\n3. Crowned Sleep Essentials\\n4. Regal Dreams Bedding\\n5. Luxurious Queen Linens\\n6. Elite Serenity Bedding\\n7. Imperial Queen Bedding\\n8. Opulence Bed Linens\\n9. Grandeur Sleep Essentials\\n10. Prestige Queen Linens'"
144
+ ]
145
+ },
146
+ "execution_count": 36,
147
+ "metadata": {},
148
+ "output_type": "execute_result"
149
+ }
150
+ ],
151
+ "source": [
152
+ "product = \"Queen Size Sheet Set\"\n",
153
+ "chain.run(product)"
154
+ ]
155
+ },
156
+ {
157
+ "cell_type": "code",
158
+ "execution_count": 37,
159
+ "id": "b0ff950c-1c09-4ba5-8661-626fb69b24ca",
160
+ "metadata": {},
161
+ "outputs": [
162
+ {
163
+ "name": "stdout",
164
+ "output_type": "stream",
165
+ "text": [
166
+ "\n",
167
+ "\n",
168
+ "\u001b[1m> Entering new SimpleSequentialChain chain...\u001b[0m\n",
169
+ "\u001b[36;1m\u001b[1;3mRoyal Comfort Linens\u001b[0m\n",
170
+ "\u001b[33;1m\u001b[1;3mRoyal Comfort Linens is a luxurious bedding company that offers high-quality and stylish linens for a truly regal sleep experience.\u001b[0m\n",
171
+ "\n",
172
+ "\u001b[1m> Finished chain.\u001b[0m\n"
173
+ ]
174
+ },
175
+ {
176
+ "data": {
177
+ "text/plain": [
178
+ "'Royal Comfort Linens is a luxurious bedding company that offers high-quality and stylish linens for a truly regal sleep experience.'"
179
+ ]
180
+ },
181
+ "execution_count": 37,
182
+ "metadata": {},
183
+ "output_type": "execute_result"
184
+ }
185
+ ],
186
+ "source": [
187
+ "from langchain.chains import SimpleSequentialChain\n",
188
+ "# Prompt Template 2\n",
189
+ "prompt_2 = ChatPromptTemplate.from_template(\"Write a 20 words description for the following company: {company_name}\")\n",
190
+ "\n",
191
+ "# chain 2\n",
192
+ "chain_two = LLMChain(llm=llm, prompt=prompt_2)\n",
193
+ "\n",
194
+ "overall_simple_chain = SimpleSequentialChain(chains=[chain, chain_two], verbose=True)\n",
195
+ "overall_simple_chain.run(product)"
196
+ ]
197
+ },
198
+ {
199
+ "cell_type": "code",
200
+ "execution_count": 38,
201
+ "id": "8ab602a8-9871-4881-b3eb-fbc1e9a90e08",
202
+ "metadata": {},
203
+ "outputs": [],
204
+ "source": [
205
+ "from langchain.chains import SequentialChain"
206
+ ]
207
+ },
208
+ {
209
+ "cell_type": "code",
210
+ "execution_count": 39,
211
+ "id": "7101b29c-cc62-48ed-925c-962c9da6c40b",
212
+ "metadata": {},
213
+ "outputs": [],
214
+ "source": [
215
+ "llm_model = 'gpt-3.5-turbo'\n",
216
+ "llm = ChatOpenAI(temperature=0.9, model=llm_model)\n",
217
+ "\n",
218
+ "# prompt template 1: translate to english\n",
219
+ "first_prompt = ChatPromptTemplate.from_template(\n",
220
+ " \"Translate the following review to english:\"\n",
221
+ " \"\\n\\n{Review}\"\n",
222
+ ")\n",
223
+ "# chain 1: input= Review and output= English_Review\n",
224
+ "chain_one = LLMChain(llm=llm, prompt=first_prompt, \n",
225
+ " output_key=\"English_Review\"\n",
226
+ " )\n"
227
+ ]
228
+ },
229
+ {
230
+ "cell_type": "code",
231
+ "execution_count": 40,
232
+ "id": "cd8f164e-e91e-4fa5-a27a-40714aacdc32",
233
+ "metadata": {},
234
+ "outputs": [],
235
+ "source": [
236
+ "second_prompt = ChatPromptTemplate.from_template(\n",
237
+ " \"Can you summarize the following review in 1 sentence:\"\n",
238
+ " \"\\n\\n{English_Review}\"\n",
239
+ ")\n",
240
+ "# chain 2: input= English_Review and output= summary\n",
241
+ "chain_two = LLMChain(llm=llm, prompt=second_prompt, \n",
242
+ " output_key=\"summary\"\n",
243
+ " )"
244
+ ]
245
+ },
246
+ {
247
+ "cell_type": "code",
248
+ "execution_count": 41,
249
+ "id": "6a77ec8a-d5da-4889-9803-1676a03d046b",
250
+ "metadata": {},
251
+ "outputs": [],
252
+ "source": [
253
+ "# prompt template 3: translate to english\n",
254
+ "third_prompt = ChatPromptTemplate.from_template(\n",
255
+ " \"What language is the following review:\\n\\n{Review}\"\n",
256
+ ")\n",
257
+ "# chain 3: input= Review and output= language\n",
258
+ "chain_three = LLMChain(llm=llm, prompt=third_prompt,\n",
259
+ " output_key=\"language\"\n",
260
+ " )\n"
261
+ ]
262
+ },
263
+ {
264
+ "cell_type": "code",
265
+ "execution_count": 42,
266
+ "id": "5fd5d216-74f1-4c73-9ba5-008a57398d65",
267
+ "metadata": {},
268
+ "outputs": [],
269
+ "source": [
270
+ "# prompt template 4: follow up message\n",
271
+ "fourth_prompt = ChatPromptTemplate.from_template(\n",
272
+ " \"Write a follow up response to the following \"\n",
273
+ " \"summary in the specified language:\"\n",
274
+ " \"\\n\\nSummary: {summary}\\n\\nLanguage: {language}\"\n",
275
+ ")\n",
276
+ "# chain 4: input= summary, language and output= followup_message\n",
277
+ "chain_four = LLMChain(llm=llm, prompt=fourth_prompt,\n",
278
+ " output_key=\"followup_message\"\n",
279
+ " )"
280
+ ]
281
+ },
282
+ {
283
+ "cell_type": "code",
284
+ "execution_count": 43,
285
+ "id": "6217843a-4244-48da-8622-0f753984a8f4",
286
+ "metadata": {},
287
+ "outputs": [],
288
+ "source": [
289
+ "# overall_chain: input= Review \n",
290
+ "# and output= English_Review,summary, followup_message\n",
291
+ "overall_chain = SequentialChain(\n",
292
+ " chains=[chain_one, chain_two, chain_three, chain_four],\n",
293
+ " input_variables=[\"Review\"],\n",
294
+ " output_variables=[\"English_Review\", \"summary\",\"followup_message\"],\n",
295
+ " verbose=True\n",
296
+ ")"
297
+ ]
298
+ },
299
+ {
300
+ "cell_type": "code",
301
+ "execution_count": 48,
302
+ "id": "2c58c69a-e943-4257-a61c-579ca6eb8f4f",
303
+ "metadata": {},
304
+ "outputs": [
305
+ {
306
+ "name": "stdout",
307
+ "output_type": "stream",
308
+ "text": [
309
+ "\n",
310
+ "\n",
311
+ "\u001b[1m> Entering new SequentialChain chain...\u001b[0m\n",
312
+ "\n",
313
+ "\u001b[1m> Finished chain.\u001b[0m\n"
314
+ ]
315
+ },
316
+ {
317
+ "data": {
318
+ "text/plain": [
319
+ "{'Review': ' this is the best pillow filters on amazon',\n",
320
+ " 'English_Review': 'Este es el mejor filtro de almohadas en Amazon.',\n",
321
+ " 'summary': 'This is the best pillow filter on Amazon.',\n",
322
+ " 'followup_message': \"Response: Thank you for your review! We appreciate your feedback and we're glad to hear that you think our pillow filter is the best on Amazon. We strive to provide high-quality products to our customers, and your satisfaction is our top priority. If you have any further questions or need assistance, please don't hesitate to reach out.\"}"
323
+ ]
324
+ },
325
+ "execution_count": 48,
326
+ "metadata": {},
327
+ "output_type": "execute_result"
328
+ }
329
+ ],
330
+ "source": [
331
+ "review = df.Review[3]\n",
332
+ "overall_chain(review)"
333
+ ]
334
+ },
335
+ {
336
+ "cell_type": "code",
337
+ "execution_count": 45,
338
+ "id": "c5f9c68b-020b-418a-842f-f2c32f958389",
339
+ "metadata": {},
340
+ "outputs": [
341
+ {
342
+ "data": {
343
+ "text/html": [
344
+ "<div>\n",
345
+ "<style scoped>\n",
346
+ " .dataframe tbody tr th:only-of-type {\n",
347
+ " vertical-align: middle;\n",
348
+ " }\n",
349
+ "\n",
350
+ " .dataframe tbody tr th {\n",
351
+ " vertical-align: top;\n",
352
+ " }\n",
353
+ "\n",
354
+ " .dataframe thead th {\n",
355
+ " text-align: right;\n",
356
+ " }\n",
357
+ "</style>\n",
358
+ "<table border=\"1\" class=\"dataframe\">\n",
359
+ " <thead>\n",
360
+ " <tr style=\"text-align: right;\">\n",
361
+ " <th></th>\n",
362
+ " <th>Product</th>\n",
363
+ " <th>Review</th>\n",
364
+ " </tr>\n",
365
+ " </thead>\n",
366
+ " <tbody>\n",
367
+ " <tr>\n",
368
+ " <th>0</th>\n",
369
+ " <td>Queen Size sheet Set</td>\n",
370
+ " <td>I ordered a king size set.</td>\n",
371
+ " </tr>\n",
372
+ " <tr>\n",
373
+ " <th>1</th>\n",
374
+ " <td>Waterproof Phone Pcuch</td>\n",
375
+ " <td>I loved the waterproof sac</td>\n",
376
+ " </tr>\n",
377
+ " <tr>\n",
378
+ " <th>2</th>\n",
379
+ " <td>Luxury Air Mattress</td>\n",
380
+ " <td>This mattress had a small hole in the top</td>\n",
381
+ " </tr>\n",
382
+ " <tr>\n",
383
+ " <th>3</th>\n",
384
+ " <td>Pillows Insert</td>\n",
385
+ " <td>this is the best pillow filters on amazon</td>\n",
386
+ " </tr>\n",
387
+ " <tr>\n",
388
+ " <th>4</th>\n",
389
+ " <td>Milk Frother Handheld\\nm</td>\n",
390
+ " <td>I loved this product But they only seem to I</td>\n",
391
+ " </tr>\n",
392
+ " </tbody>\n",
393
+ "</table>\n",
394
+ "</div>"
395
+ ],
396
+ "text/plain": [
397
+ " Product Review\n",
398
+ "0 Queen Size sheet Set I ordered a king size set.\n",
399
+ "1 Waterproof Phone Pcuch I loved the waterproof sac\n",
400
+ "2 Luxury Air Mattress This mattress had a small hole in the top\n",
401
+ "3 Pillows Insert this is the best pillow filters on amazon\n",
402
+ "4 Milk Frother Handheld\\nm I loved this product But they only seem to I"
403
+ ]
404
+ },
405
+ "execution_count": 45,
406
+ "metadata": {},
407
+ "output_type": "execute_result"
408
+ }
409
+ ],
410
+ "source": [
411
+ "df"
412
+ ]
413
+ },
414
+ {
415
+ "cell_type": "code",
416
+ "execution_count": 46,
417
+ "id": "27355bcf-2787-470d-a41a-ea5b5565a68a",
418
+ "metadata": {},
419
+ "outputs": [
420
+ {
421
+ "data": {
422
+ "text/plain": [
423
+ "0 Queen Size sheet Set\n",
424
+ "1 Waterproof Phone Pcuch\n",
425
+ "2 Luxury Air Mattress\n",
426
+ "3 Pillows Insert\n",
427
+ "4 Milk Frother Handheld\\nm\n",
428
+ "Name: Product, dtype: object"
429
+ ]
430
+ },
431
+ "execution_count": 46,
432
+ "metadata": {},
433
+ "output_type": "execute_result"
434
+ }
435
+ ],
436
+ "source": [
437
+ "df['Product']"
438
+ ]
439
+ },
440
+ {
441
+ "cell_type": "code",
442
+ "execution_count": 49,
443
+ "id": "8de4ae55-2cab-43fd-a2e9-542157919cbb",
444
+ "metadata": {},
445
+ "outputs": [],
446
+ "source": [
447
+ "physics_template = \"\"\"You are a very smart physics professor. \\\n",
448
+ "You are great at answering questions about physics in a concise\\\n",
449
+ "and easy to understand manner. \\\n",
450
+ "When you don't know the answer to a question you admit\\\n",
451
+ "that you don't know.\n",
452
+ "\n",
453
+ "Here is a question:\n",
454
+ "{input}\"\"\"\n",
455
+ "\n",
456
+ "\n",
457
+ "math_template = \"\"\"You are a very good mathematician. \\\n",
458
+ "You are great at answering math questions. \\\n",
459
+ "You are so good because you are able to break down \\\n",
460
+ "hard problems into their component parts, \n",
461
+ "answer the component parts, and then put them together\\\n",
462
+ "to answer the broader question.\n",
463
+ "\n",
464
+ "Here is a question:\n",
465
+ "{input}\"\"\"\n",
466
+ "\n",
467
+ "history_template = \"\"\"You are a very good historian. \\\n",
468
+ "You have an excellent knowledge of and understanding of people,\\\n",
469
+ "events and contexts from a range of historical periods. \\\n",
470
+ "You have the ability to think, reflect, debate, discuss and \\\n",
471
+ "evaluate the past. You have a respect for historical evidence\\\n",
472
+ "and the ability to make use of it to support your explanations \\\n",
473
+ "and judgements.\n",
474
+ "\n",
475
+ "Here is a question:\n",
476
+ "{input}\"\"\"\n",
477
+ "\n",
478
+ "\n",
479
+ "computerscience_template = \"\"\" You are a successful computer scientist.\\\n",
480
+ "You have a passion for creativity, collaboration,\\\n",
481
+ "forward-thinking, confidence, strong problem-solving capabilities,\\\n",
482
+ "understanding of theories and algorithms, and excellent communication \\\n",
483
+ "skills. You are great at answering coding questions. \\\n",
484
+ "You are so good because you know how to solve a problem by \\\n",
485
+ "describing the solution in imperative steps \\\n",
486
+ "that a machine can easily interpret and you know how to \\\n",
487
+ "choose a solution that has a good balance between \\\n",
488
+ "time complexity and space complexity. \n",
489
+ "\n",
490
+ "Here is a question:\n",
491
+ "{input}\"\"\""
492
+ ]
493
+ },
494
+ {
495
+ "cell_type": "code",
496
+ "execution_count": 50,
497
+ "id": "ec18c579-c130-490f-ad56-a2acd4f8bd60",
498
+ "metadata": {},
499
+ "outputs": [],
500
+ "source": [
501
+ "prompt_infos = [\n",
502
+ " {\n",
503
+ " \"name\": \"physics\", \n",
504
+ " \"description\": \"Good for answering questions about physics\", \n",
505
+ " \"prompt_template\": physics_template\n",
506
+ " },\n",
507
+ " {\n",
508
+ " \"name\": \"math\", \n",
509
+ " \"description\": \"Good for answering math questions\", \n",
510
+ " \"prompt_template\": math_template\n",
511
+ " },\n",
512
+ " {\n",
513
+ " \"name\": \"History\", \n",
514
+ " \"description\": \"Good for answering history questions\", \n",
515
+ " \"prompt_template\": history_template\n",
516
+ " },\n",
517
+ " {\n",
518
+ " \"name\": \"computer science\", \n",
519
+ " \"description\": \"Good for answering computer science questions\", \n",
520
+ " \"prompt_template\": computerscience_template\n",
521
+ " }\n",
522
+ "]"
523
+ ]
524
+ },
525
+ {
526
+ "cell_type": "code",
527
+ "execution_count": 51,
528
+ "id": "b38acbf5-d22c-4951-b2e0-c2beed545474",
529
+ "metadata": {},
530
+ "outputs": [],
531
+ "source": [
532
+ "from langchain.chains.router import MultiPromptChain\n",
533
+ "from langchain.chains.router.llm_router import LLMRouterChain,RouterOutputParser\n",
534
+ "from langchain.prompts import PromptTemplate\n",
535
+ "\n",
536
+ "llm = ChatOpenAI(temperature=0, model=llm_model)"
537
+ ]
538
+ },
539
+ {
540
+ "cell_type": "code",
541
+ "execution_count": 52,
542
+ "id": "eecb95b3-38ed-4433-a918-8febfe1945d0",
543
+ "metadata": {},
544
+ "outputs": [],
545
+ "source": [
546
+ "destination_chains = {}\n",
547
+ "for p_info in prompt_infos:\n",
548
+ " name = p_info[\"name\"]\n",
549
+ " prompt_template = p_info[\"prompt_template\"]\n",
550
+ " prompt = ChatPromptTemplate.from_template(template=prompt_template)\n",
551
+ " chain = LLMChain(llm=llm, prompt=prompt)\n",
552
+ " destination_chains[name] = chain \n",
553
+ " \n",
554
+ "destinations = [f\"{p['name']}: {p['description']}\" for p in prompt_infos]\n",
555
+ "destinations_str = \"\\n\".join(destinations)"
556
+ ]
557
+ },
558
+ {
559
+ "cell_type": "code",
560
+ "execution_count": 53,
561
+ "id": "228a3439-008d-47cd-ac10-fca1eccba51c",
562
+ "metadata": {},
563
+ "outputs": [],
564
+ "source": [
565
+ "default_prompt = ChatPromptTemplate.from_template(\"{input}\")\n",
566
+ "default_chain = LLMChain(llm=llm, prompt=default_prompt)"
567
+ ]
568
+ },
569
+ {
570
+ "cell_type": "code",
571
+ "execution_count": 54,
572
+ "id": "8b393708-04d1-40ab-a8ed-a1862d00b326",
573
+ "metadata": {},
574
+ "outputs": [],
575
+ "source": [
576
+ "MULTI_PROMPT_ROUTER_TEMPLATE = \"\"\"Given a raw text input to a \\\n",
577
+ "language model select the model prompt best suited for the input. \\\n",
578
+ "You will be given the names of the available prompts and a \\\n",
579
+ "description of what the prompt is best suited for. \\\n",
580
+ "You may also revise the original input if you think that revising\\\n",
581
+ "it will ultimately lead to a better response from the language model.\n",
582
+ "\n",
583
+ "<< FORMATTING >>\n",
584
+ "Return a markdown code snippet with a JSON object formatted to look like:\n",
585
+ "```json\n",
586
+ "{{{{\n",
587
+ " \"destination\": string \\ name of the prompt to use or \"DEFAULT\"\n",
588
+ " \"next_inputs\": string \\ a potentially modified version of the original input\n",
589
+ "}}}}\n",
590
+ "```\n",
591
+ "\n",
592
+ "REMEMBER: \"destination\" MUST be one of the candidate prompt \\\n",
593
+ "names specified below OR it can be \"DEFAULT\" if the input is not\\\n",
594
+ "well suited for any of the candidate prompts.\n",
595
+ "REMEMBER: \"next_inputs\" can just be the original input \\\n",
596
+ "if you don't think any modifications are needed.\n",
597
+ "\n",
598
+ "<< CANDIDATE PROMPTS >>\n",
599
+ "{destinations}\n",
600
+ "\n",
601
+ "<< INPUT >>\n",
602
+ "{{input}}\n",
603
+ "\n",
604
+ "<< OUTPUT (remember to include the ```json)>>\"\"\""
605
+ ]
606
+ },
607
+ {
608
+ "cell_type": "code",
609
+ "execution_count": 55,
610
+ "id": "2f40b20c-89ed-4217-9176-60a9ab5d45d2",
611
+ "metadata": {},
612
+ "outputs": [],
613
+ "source": [
614
+ "router_template = MULTI_PROMPT_ROUTER_TEMPLATE.format(\n",
615
+ " destinations=destinations_str\n",
616
+ ")\n",
617
+ "router_prompt = PromptTemplate(\n",
618
+ " template=router_template,\n",
619
+ " input_variables=[\"input\"],\n",
620
+ " output_parser=RouterOutputParser(),\n",
621
+ ")\n",
622
+ "\n",
623
+ "router_chain = LLMRouterChain.from_llm(llm, router_prompt)"
624
+ ]
625
+ },
626
+ {
627
+ "cell_type": "code",
628
+ "execution_count": 56,
629
+ "id": "f89d7a92-3696-4fb9-890a-36f5bd6d3752",
630
+ "metadata": {},
631
+ "outputs": [],
632
+ "source": [
633
+ "chain = MultiPromptChain(router_chain=router_chain, \n",
634
+ " destination_chains=destination_chains, \n",
635
+ " default_chain=default_chain, verbose=True\n",
636
+ " )"
637
+ ]
638
+ },
639
+ {
640
+ "cell_type": "code",
641
+ "execution_count": 57,
642
+ "id": "449208c8-2b58-4770-bb45-ae3c0778b432",
643
+ "metadata": {},
644
+ "outputs": [
645
+ {
646
+ "name": "stdout",
647
+ "output_type": "stream",
648
+ "text": [
649
+ "\n",
650
+ "\n",
651
+ "\u001b[1m> Entering new MultiPromptChain chain...\u001b[0m\n"
652
+ ]
653
+ },
654
+ {
655
+ "name": "stderr",
656
+ "output_type": "stream",
657
+ "text": [
658
+ "/home/sushantgo/Downloads/conda/lib/python3.11/site-packages/langchain/chains/llm.py:280: UserWarning: The predict_and_parse method is deprecated, instead pass an output parser directly to LLMChain.\n",
659
+ " warnings.warn(\n"
660
+ ]
661
+ },
662
+ {
663
+ "name": "stdout",
664
+ "output_type": "stream",
665
+ "text": [
666
+ "physics: {'input': 'What is black body radiation?'}\n",
667
+ "\u001b[1m> Finished chain.\u001b[0m\n"
668
+ ]
669
+ },
670
+ {
671
+ "data": {
672
+ "text/plain": [
673
+ "'Black body radiation refers to the electromagnetic radiation emitted by an object that absorbs all incident radiation and reflects or transmits none. It is called \"black body\" because it absorbs all wavelengths of light, appearing black at room temperature. \\n\\nAccording to Planck\\'s law, black body radiation is characterized by a continuous spectrum of wavelengths and intensities, which depend on the temperature of the object. As the temperature increases, the peak intensity of the radiation shifts to shorter wavelengths, resulting in a change in color from red to orange, yellow, white, and eventually blue at very high temperatures.\\n\\nBlack body radiation is a fundamental concept in physics and has various applications, including understanding the behavior of stars, explaining the cosmic microwave background radiation, and developing technologies like incandescent light bulbs and thermal imaging devices.'"
674
+ ]
675
+ },
676
+ "execution_count": 57,
677
+ "metadata": {},
678
+ "output_type": "execute_result"
679
+ }
680
+ ],
681
+ "source": [
682
+ "chain.run(\"What is black body radiation?\")"
683
+ ]
684
+ },
685
+ {
686
+ "cell_type": "code",
687
+ "execution_count": 58,
688
+ "id": "5c204f5e-5d20-4061-b08c-ad61999d3107",
689
+ "metadata": {},
690
+ "outputs": [
691
+ {
692
+ "name": "stdout",
693
+ "output_type": "stream",
694
+ "text": [
695
+ "\n",
696
+ "\n",
697
+ "\u001b[1m> Entering new MultiPromptChain chain...\u001b[0m\n",
698
+ "math: {'input': 'what is 2 + 2'}\n",
699
+ "\u001b[1m> Finished chain.\u001b[0m\n"
700
+ ]
701
+ },
702
+ {
703
+ "data": {
704
+ "text/plain": [
705
+ "\"Thank you for your kind words! As a mathematician, I am happy to help with any math questions, no matter how simple or complex they may be.\\n\\nNow, let's solve the problem at hand. The question is asking for the sum of 2 and 2. To find the answer, we can simply add the two numbers together:\\n\\n2 + 2 = 4\\n\\nTherefore, the sum of 2 and 2 is 4.\""
706
+ ]
707
+ },
708
+ "execution_count": 58,
709
+ "metadata": {},
710
+ "output_type": "execute_result"
711
+ }
712
+ ],
713
+ "source": [
714
+ "chain.run(\"what is 2 + 2\")"
715
+ ]
716
+ },
717
+ {
718
+ "cell_type": "code",
719
+ "execution_count": 59,
720
+ "id": "60de9868-1821-414e-9564-3847c62dbef9",
721
+ "metadata": {},
722
+ "outputs": [
723
+ {
724
+ "name": "stdout",
725
+ "output_type": "stream",
726
+ "text": [
727
+ "\n",
728
+ "\n",
729
+ "\u001b[1m> Entering new MultiPromptChain chain...\u001b[0m\n",
730
+ "biology: {'input': 'Why does every cell in our body contain DNA?'}"
731
+ ]
732
+ },
733
+ {
734
+ "ename": "ValueError",
735
+ "evalue": "Received invalid destination chain name 'biology'",
736
+ "output_type": "error",
737
+ "traceback": [
738
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
739
+ "\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)",
740
+ "Cell \u001b[0;32mIn[59], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[43mchain\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mWhy does every cell in our body contain DNA?\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n",
741
+ "File \u001b[0;32m~/Downloads/conda/lib/python3.11/site-packages/langchain/chains/base.py:487\u001b[0m, in \u001b[0;36mChain.run\u001b[0;34m(self, callbacks, tags, metadata, *args, **kwargs)\u001b[0m\n\u001b[1;32m 485\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(args) \u001b[38;5;241m!=\u001b[39m \u001b[38;5;241m1\u001b[39m:\n\u001b[1;32m 486\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m`run` supports only one positional argument.\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m--> 487\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43margs\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;241;43m0\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcallbacks\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcallbacks\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtags\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtags\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmetadata\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mmetadata\u001b[49m\u001b[43m)\u001b[49m[\n\u001b[1;32m 488\u001b[0m _output_key\n\u001b[1;32m 489\u001b[0m ]\n\u001b[1;32m 491\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m kwargs \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m args:\n\u001b[1;32m 492\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m(kwargs, callbacks\u001b[38;5;241m=\u001b[39mcallbacks, tags\u001b[38;5;241m=\u001b[39mtags, metadata\u001b[38;5;241m=\u001b[39mmetadata)[\n\u001b[1;32m 493\u001b[0m _output_key\n\u001b[1;32m 494\u001b[0m ]\n",
742
+ "File \u001b[0;32m~/Downloads/conda/lib/python3.11/site-packages/langchain/chains/base.py:292\u001b[0m, in \u001b[0;36mChain.__call__\u001b[0;34m(self, inputs, return_only_outputs, callbacks, tags, metadata, run_name, include_run_info)\u001b[0m\n\u001b[1;32m 290\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mBaseException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 291\u001b[0m run_manager\u001b[38;5;241m.\u001b[39mon_chain_error(e)\n\u001b[0;32m--> 292\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m e\n\u001b[1;32m 293\u001b[0m run_manager\u001b[38;5;241m.\u001b[39mon_chain_end(outputs)\n\u001b[1;32m 294\u001b[0m final_outputs: Dict[\u001b[38;5;28mstr\u001b[39m, Any] \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mprep_outputs(\n\u001b[1;32m 295\u001b[0m inputs, outputs, return_only_outputs\n\u001b[1;32m 296\u001b[0m )\n",
743
+ "File \u001b[0;32m~/Downloads/conda/lib/python3.11/site-packages/langchain/chains/base.py:286\u001b[0m, in \u001b[0;36mChain.__call__\u001b[0;34m(self, inputs, return_only_outputs, callbacks, tags, metadata, run_name, include_run_info)\u001b[0m\n\u001b[1;32m 279\u001b[0m run_manager \u001b[38;5;241m=\u001b[39m callback_manager\u001b[38;5;241m.\u001b[39mon_chain_start(\n\u001b[1;32m 280\u001b[0m dumpd(\u001b[38;5;28mself\u001b[39m),\n\u001b[1;32m 281\u001b[0m inputs,\n\u001b[1;32m 282\u001b[0m name\u001b[38;5;241m=\u001b[39mrun_name,\n\u001b[1;32m 283\u001b[0m )\n\u001b[1;32m 284\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 285\u001b[0m outputs \u001b[38;5;241m=\u001b[39m (\n\u001b[0;32m--> 286\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call\u001b[49m\u001b[43m(\u001b[49m\u001b[43minputs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mrun_manager\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrun_manager\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 287\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m new_arg_supported\n\u001b[1;32m 288\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_call(inputs)\n\u001b[1;32m 289\u001b[0m )\n\u001b[1;32m 290\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mBaseException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 291\u001b[0m run_manager\u001b[38;5;241m.\u001b[39mon_chain_error(e)\n",
744
+ "File \u001b[0;32m~/Downloads/conda/lib/python3.11/site-packages/langchain/chains/router/base.py:105\u001b[0m, in \u001b[0;36mMultiRouteChain._call\u001b[0;34m(self, inputs, run_manager)\u001b[0m\n\u001b[1;32m 103\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdefault_chain(route\u001b[38;5;241m.\u001b[39mnext_inputs, callbacks\u001b[38;5;241m=\u001b[39mcallbacks)\n\u001b[1;32m 104\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m--> 105\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[1;32m 106\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mReceived invalid destination chain name \u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mroute\u001b[38;5;241m.\u001b[39mdestination\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 107\u001b[0m )\n",
745
+ "\u001b[0;31mValueError\u001b[0m: Received invalid destination chain name 'biology'"
746
+ ]
747
+ }
748
+ ],
749
+ "source": [
750
+ "chain.run(\"Why does every cell in our body contain DNA?\")"
751
+ ]
752
+ },
753
+ {
754
+ "cell_type": "code",
755
+ "execution_count": null,
756
+ "id": "d9cbed99-9f90-4065-9b8f-a12de403c15a",
757
+ "metadata": {},
758
+ "outputs": [],
759
+ "source": []
760
+ }
761
+ ],
762
+ "metadata": {
763
+ "kernelspec": {
764
+ "display_name": "Python 3 (ipykernel)",
765
+ "language": "python",
766
+ "name": "python3"
767
+ },
768
+ "language_info": {
769
+ "codemirror_mode": {
770
+ "name": "ipython",
771
+ "version": 3
772
+ },
773
+ "file_extension": ".py",
774
+ "mimetype": "text/x-python",
775
+ "name": "python",
776
+ "nbconvert_exporter": "python",
777
+ "pygments_lexer": "ipython3",
778
+ "version": "3.11.4"
779
+ }
780
+ },
781
+ "nbformat": 4,
782
+ "nbformat_minor": 5
783
+ }
langchain.ipynb ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "import os\n",
10
+ "import gradio as gr\n",
11
+ "from dotenv import load_dotenv, find_dotenv\n",
12
+ "_ = load_dotenv(find_dotenv())\n",
13
+ "\n",
14
+ "from langchain.chains import ConversationChain\n",
15
+ "from langchain.chat_models import ChatOpenAI\n",
16
+ "from langchain.memory import ConversationBufferMemory\n",
17
+ "\n",
18
+ "llm = ChatOpenAI(temperature=0.0)\n",
19
+ "memory = ConversationBufferMemory()\n",
20
+ "conversion = ConversationChain(\n",
21
+ " llm=llm,\n",
22
+ " memory=memory,\n",
23
+ " verbose=False\n",
24
+ ")\n",
25
+ "\n",
26
+ "def takeinput(name):\n",
27
+ " output_str = conversion.predict(input=name)\n",
28
+ " return output_str\n",
29
+ "\n",
30
+ "demo = gr.Interface(\n",
31
+ " fn=takeinput,\n",
32
+ " inputs=[\"text\"],\n",
33
+ " outputs=[\"text\"]\n",
34
+ ")\n",
35
+ "\n",
36
+ "demo.launch(share=True)\n"
37
+ ]
38
+ }
39
+ ],
40
+ "metadata": {
41
+ "language_info": {
42
+ "name": "python"
43
+ },
44
+ "orig_nbformat": 4
45
+ },
46
+ "nbformat": 4,
47
+ "nbformat_minor": 2
48
+ }
llm-app.ipynb ADDED
@@ -0,0 +1,755 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 9,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "import openai\n",
10
+ "import os\n",
11
+ "\n",
12
+ "from dotenv import load_dotenv, find_dotenv\n",
13
+ "_ = load_dotenv(find_dotenv())\n",
14
+ "openai.api_key = os.environ['OPENAI_API_KEY']\n",
15
+ "\n",
16
+ "def get_completion(prompt, model = 'gpt-3.5-turbo'):\n",
17
+ " messages = [{\n",
18
+ " \"role\": \"user\",\n",
19
+ " \"content\": prompt\n",
20
+ " }]\n",
21
+ " response = openai.ChatCompletion.create(\n",
22
+ " model=model,\n",
23
+ " messages=messages,\n",
24
+ " temperature=0\n",
25
+ " )\n",
26
+ " return response.choices[0].message['content']"
27
+ ]
28
+ },
29
+ {
30
+ "cell_type": "code",
31
+ "execution_count": 11,
32
+ "metadata": {},
33
+ "outputs": [
34
+ {
35
+ "data": {
36
+ "text/plain": [
37
+ "'4 + 4 equals 8.'"
38
+ ]
39
+ },
40
+ "execution_count": 11,
41
+ "metadata": {},
42
+ "output_type": "execute_result"
43
+ }
44
+ ],
45
+ "source": [
46
+ "get_completion('what is 4+4?')"
47
+ ]
48
+ },
49
+ {
50
+ "cell_type": "code",
51
+ "execution_count": 12,
52
+ "metadata": {},
53
+ "outputs": [],
54
+ "source": [
55
+ "email_text = \"\"\"\n",
56
+ "Arrr, I be fuming that me blender lid \\\n",
57
+ "flew off and splattered the kitchen walls \\\n",
58
+ "with smoothie! And to make matters worse, \\\n",
59
+ "the warranty don't cover the cost of cleaning up me kitchen. I need yer help \\\n",
60
+ "right now, matey!\n",
61
+ "\"\"\""
62
+ ]
63
+ },
64
+ {
65
+ "cell_type": "code",
66
+ "execution_count": 13,
67
+ "metadata": {},
68
+ "outputs": [],
69
+ "source": [
70
+ "style = \"\"\"American English \\\n",
71
+ "in a calm and respectful tone\"\"\""
72
+ ]
73
+ },
74
+ {
75
+ "cell_type": "code",
76
+ "execution_count": 14,
77
+ "metadata": {},
78
+ "outputs": [
79
+ {
80
+ "name": "stdout",
81
+ "output_type": "stream",
82
+ "text": [
83
+ "Translate the text that into delimated by triple backticks \n",
84
+ "into a style that is American English in a calm and respectful tone.\n",
85
+ "text: ```\n",
86
+ "Arrr, I be fuming that me blender lid flew off and splattered the kitchen walls with smoothie! And to make matters worse, the warranty don't cover the cost of cleaning up me kitchen. I need yer help right now, matey!\n",
87
+ "```\n",
88
+ "\n"
89
+ ]
90
+ }
91
+ ],
92
+ "source": [
93
+ "prompt = f\"\"\"Translate the text \\\n",
94
+ "that into delimated by triple backticks \n",
95
+ "into a style that is {style}.\n",
96
+ "text: ```{email_text}```\n",
97
+ "\"\"\"\n",
98
+ "print(prompt)"
99
+ ]
100
+ },
101
+ {
102
+ "cell_type": "code",
103
+ "execution_count": 15,
104
+ "metadata": {},
105
+ "outputs": [],
106
+ "source": [
107
+ "response = get_completion(prompt)"
108
+ ]
109
+ },
110
+ {
111
+ "cell_type": "code",
112
+ "execution_count": 8,
113
+ "metadata": {},
114
+ "outputs": [
115
+ {
116
+ "data": {
117
+ "text/plain": [
118
+ "\"Arrgh, I'm quite frustrated that my blender lid flew off and made a mess of the kitchen walls with smoothie! And to add insult to injury, the warranty doesn't cover the expenses of cleaning up my kitchen. I kindly request your assistance at this moment, my friend!\""
119
+ ]
120
+ },
121
+ "execution_count": 8,
122
+ "metadata": {},
123
+ "output_type": "execute_result"
124
+ }
125
+ ],
126
+ "source": [
127
+ "response"
128
+ ]
129
+ },
130
+ {
131
+ "cell_type": "code",
132
+ "execution_count": 16,
133
+ "metadata": {},
134
+ "outputs": [],
135
+ "source": [
136
+ "from langchain.chat_models import ChatOpenAI"
137
+ ]
138
+ },
139
+ {
140
+ "cell_type": "code",
141
+ "execution_count": 17,
142
+ "metadata": {},
143
+ "outputs": [],
144
+ "source": [
145
+ "chat = ChatOpenAI(temperature=0.0)"
146
+ ]
147
+ },
148
+ {
149
+ "cell_type": "code",
150
+ "execution_count": 7,
151
+ "metadata": {},
152
+ "outputs": [
153
+ {
154
+ "data": {
155
+ "text/plain": [
156
+ "ChatOpenAI(cache=None, verbose=False, callbacks=None, callback_manager=None, tags=None, metadata=None, client=<class 'openai.api_resources.chat_completion.ChatCompletion'>, model_name='gpt-3.5-turbo', temperature=0.0, model_kwargs={}, openai_api_key='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', openai_api_base='', openai_organization='', openai_proxy='', request_timeout=None, max_retries=6, streaming=False, n=1, max_tokens=None, tiktoken_model_name=None)"
157
+ ]
158
+ },
159
+ "execution_count": 7,
160
+ "metadata": {},
161
+ "output_type": "execute_result"
162
+ }
163
+ ],
164
+ "source": [
165
+ "chat"
166
+ ]
167
+ },
168
+ {
169
+ "cell_type": "code",
170
+ "execution_count": 18,
171
+ "metadata": {},
172
+ "outputs": [],
173
+ "source": [
174
+ "template_string = \"\"\"Translate the text \\\n",
175
+ "that into delimited by triple backticks \\\n",
176
+ "into a style that is {style}. \\\n",
177
+ "text: ```{text}```\n",
178
+ "\"\"\""
179
+ ]
180
+ },
181
+ {
182
+ "cell_type": "code",
183
+ "execution_count": 19,
184
+ "metadata": {},
185
+ "outputs": [],
186
+ "source": [
187
+ "from langchain.prompts import ChatPromptTemplate\n",
188
+ "from langchain.chains import LLMChain\n",
189
+ "from langchain.chat_models import ChatOpenAI\n",
190
+ "prompt_template = ChatPromptTemplate.from_template(template_string)\n",
191
+ "\n"
192
+ ]
193
+ },
194
+ {
195
+ "cell_type": "code",
196
+ "execution_count": 18,
197
+ "metadata": {},
198
+ "outputs": [
199
+ {
200
+ "data": {
201
+ "text/plain": [
202
+ "PromptTemplate(input_variables=['style', 'text'], output_parser=None, partial_variables={}, template='Translate the text that into delimited by triple backticks into a style that is {style}. text: ```{text}```\\n', template_format='f-string', validate_template=True)"
203
+ ]
204
+ },
205
+ "execution_count": 18,
206
+ "metadata": {},
207
+ "output_type": "execute_result"
208
+ }
209
+ ],
210
+ "source": [
211
+ "prompt_template.messages[0].prompt"
212
+ ]
213
+ },
214
+ {
215
+ "cell_type": "code",
216
+ "execution_count": 19,
217
+ "metadata": {},
218
+ "outputs": [
219
+ {
220
+ "data": {
221
+ "text/plain": [
222
+ "['style', 'text']"
223
+ ]
224
+ },
225
+ "execution_count": 19,
226
+ "metadata": {},
227
+ "output_type": "execute_result"
228
+ }
229
+ ],
230
+ "source": [
231
+ "prompt_template.messages[0].prompt.input_variables"
232
+ ]
233
+ },
234
+ {
235
+ "cell_type": "code",
236
+ "execution_count": 20,
237
+ "metadata": {},
238
+ "outputs": [],
239
+ "source": [
240
+ "customer_style = \"\"\"American English \\\n",
241
+ "in a calm and respectful tone\"\"\""
242
+ ]
243
+ },
244
+ {
245
+ "cell_type": "code",
246
+ "execution_count": null,
247
+ "metadata": {},
248
+ "outputs": [],
249
+ "source": []
250
+ },
251
+ {
252
+ "cell_type": "code",
253
+ "execution_count": 23,
254
+ "metadata": {},
255
+ "outputs": [
256
+ {
257
+ "data": {
258
+ "text/plain": [
259
+ "\"\\nArrr, I be fuming that me blender lid flew off and splattered the kitchen walls with smoothie! And to make matters worse, the warranty don't cover the cost of cleaning up me kitchen. I need yer help right now, matey!\\n\""
260
+ ]
261
+ },
262
+ "execution_count": 23,
263
+ "metadata": {},
264
+ "output_type": "execute_result"
265
+ }
266
+ ],
267
+ "source": [
268
+ "email_text"
269
+ ]
270
+ },
271
+ {
272
+ "cell_type": "code",
273
+ "execution_count": 21,
274
+ "metadata": {},
275
+ "outputs": [],
276
+ "source": [
277
+ "customer_messages = prompt_template.format_messages(style=customer_style, text=email_text)\n"
278
+ ]
279
+ },
280
+ {
281
+ "cell_type": "code",
282
+ "execution_count": 26,
283
+ "metadata": {},
284
+ "outputs": [
285
+ {
286
+ "data": {
287
+ "text/plain": [
288
+ "HumanMessage(content=\"Translate the text that into delimited by triple backticks into a style that is American English in a calm and respectful tone. text: ```\\nArrr, I be fuming that me blender lid flew off and splattered the kitchen walls with smoothie! And to make matters worse, the warranty don't cover the cost of cleaning up me kitchen. I need yer help right now, matey!\\n```\\n\", additional_kwargs={}, example=False)"
289
+ ]
290
+ },
291
+ "execution_count": 26,
292
+ "metadata": {},
293
+ "output_type": "execute_result"
294
+ }
295
+ ],
296
+ "source": [
297
+ "customer_messages[0]"
298
+ ]
299
+ },
300
+ {
301
+ "cell_type": "code",
302
+ "execution_count": 22,
303
+ "metadata": {},
304
+ "outputs": [
305
+ {
306
+ "name": "stdout",
307
+ "output_type": "stream",
308
+ "text": [
309
+ "<class 'langchain.schema.messages.HumanMessage'>\n",
310
+ "<class 'list'>\n"
311
+ ]
312
+ }
313
+ ],
314
+ "source": [
315
+ "print(type(customer_messages[0]))\n",
316
+ "print(type(customer_messages))"
317
+ ]
318
+ },
319
+ {
320
+ "cell_type": "code",
321
+ "execution_count": 33,
322
+ "metadata": {},
323
+ "outputs": [
324
+ {
325
+ "name": "stdout",
326
+ "output_type": "stream",
327
+ "text": [
328
+ "content=\"Translate the text that into delimited by triple backticks into a style that is American English in a calm and respectful tone. text: ```\\nArrr, I be fuming that me blender lid flew off and splattered the kitchen walls with smoothie! And to make matters worse, the warranty don't cover the cost of cleaning up me kitchen. I need yer help right now, matey!\\n```\\n\" additional_kwargs={} example=False\n"
329
+ ]
330
+ }
331
+ ],
332
+ "source": [
333
+ "print(customer_messages[0])"
334
+ ]
335
+ },
336
+ {
337
+ "cell_type": "code",
338
+ "execution_count": 34,
339
+ "metadata": {},
340
+ "outputs": [],
341
+ "source": [
342
+ "customer_response = chat(customer_messages)"
343
+ ]
344
+ },
345
+ {
346
+ "cell_type": "code",
347
+ "execution_count": 37,
348
+ "metadata": {},
349
+ "outputs": [
350
+ {
351
+ "name": "stdout",
352
+ "output_type": "stream",
353
+ "text": [
354
+ "I'm really frustrated that my blender lid flew off and made a mess of the kitchen walls with smoothie! And to add to my frustration, the warranty doesn't cover the cost of cleaning up my kitchen. I could really use your help at this moment, my friend!\n"
355
+ ]
356
+ }
357
+ ],
358
+ "source": [
359
+ "print(customer_response.content)"
360
+ ]
361
+ },
362
+ {
363
+ "cell_type": "code",
364
+ "execution_count": 23,
365
+ "metadata": {},
366
+ "outputs": [],
367
+ "source": [
368
+ "service_reply = \"\"\"Hey There Customer, \\\n",
369
+ "the warranty does not cover \\\n",
370
+ "cleaning expenses for your kitchen \\\n",
371
+ "because it's your fault that \\\n",
372
+ "you misused your blender \\\n",
373
+ "by forgotting to put the lid on before \\\n",
374
+ "starting the blender. \\\n",
375
+ "Tough luck! see ya!\n",
376
+ "\"\"\"\n"
377
+ ]
378
+ },
379
+ {
380
+ "cell_type": "code",
381
+ "execution_count": 24,
382
+ "metadata": {},
383
+ "outputs": [],
384
+ "source": [
385
+ "service_reply_pirate = \"\"\"\\\n",
386
+ "a polite tone \\\n",
387
+ "that speaks in English Pirate\\\n",
388
+ "\"\"\""
389
+ ]
390
+ },
391
+ {
392
+ "cell_type": "code",
393
+ "execution_count": null,
394
+ "metadata": {},
395
+ "outputs": [],
396
+ "source": []
397
+ },
398
+ {
399
+ "cell_type": "code",
400
+ "execution_count": 25,
401
+ "metadata": {},
402
+ "outputs": [],
403
+ "source": [
404
+ "service_messages = prompt_template.format_messages(style=service_reply, text=service_reply_pirate)"
405
+ ]
406
+ },
407
+ {
408
+ "cell_type": "code",
409
+ "execution_count": 44,
410
+ "metadata": {},
411
+ "outputs": [
412
+ {
413
+ "name": "stdout",
414
+ "output_type": "stream",
415
+ "text": [
416
+ "Translate the text that into delimited by triple backticks into a style that is Hey There Customer, the warranty does not cover cleaning expenses for your kitchen because it's your fault that you misused your blender by forgotting to put the lid on before starting the blender. Tough luck! see ya!\n",
417
+ ". text: ```a polite tone that speaks in English Pirate```\n",
418
+ "\n"
419
+ ]
420
+ }
421
+ ],
422
+ "source": [
423
+ "print(service_messages[0].content)"
424
+ ]
425
+ },
426
+ {
427
+ "cell_type": "code",
428
+ "execution_count": 26,
429
+ "metadata": {},
430
+ "outputs": [],
431
+ "source": [
432
+ "service_response = chat(service_messages)"
433
+ ]
434
+ },
435
+ {
436
+ "cell_type": "code",
437
+ "execution_count": 47,
438
+ "metadata": {},
439
+ "outputs": [
440
+ {
441
+ "name": "stdout",
442
+ "output_type": "stream",
443
+ "text": [
444
+ "Ahoy there, matey! The warranty be not coverin' the costs o' cleanin' yer galley due to yer own folly. Ye be misusin' yer blender by forgettin' to secure the lid afore startin' it. Tough luck, me heartie! Fare thee well!\n"
445
+ ]
446
+ }
447
+ ],
448
+ "source": [
449
+ "print(service_response.content)"
450
+ ]
451
+ },
452
+ {
453
+ "cell_type": "code",
454
+ "execution_count": 48,
455
+ "metadata": {},
456
+ "outputs": [
457
+ {
458
+ "data": {
459
+ "text/plain": [
460
+ "{'gift': False, 'delivery_days': 5, 'price_value': 'pretty affordable!'}"
461
+ ]
462
+ },
463
+ "execution_count": 48,
464
+ "metadata": {},
465
+ "output_type": "execute_result"
466
+ }
467
+ ],
468
+ "source": [
469
+ "{\n",
470
+ " \"gift\": False,\n",
471
+ " \"delivery_days\": 5,\n",
472
+ " \"price_value\": \"pretty affordable!\"\n",
473
+ "}"
474
+ ]
475
+ },
476
+ {
477
+ "cell_type": "code",
478
+ "execution_count": 27,
479
+ "metadata": {},
480
+ "outputs": [],
481
+ "source": [
482
+ "customer_review = \"\"\" \\\n",
483
+ "This leaf blower is pretty amazing. It has four settings: \\\n",
484
+ "candle blower, gentle breeze, windy city, and tornado. \\\n",
485
+ "It arrived in two days, just in time for my wife's \\\n",
486
+ "anniversary present. \\\n",
487
+ "I think my wife liked it so much she was speechless. \\\n",
488
+ "So far I've been the only one using it, and I've been \\\n",
489
+ "using it every other morning to clear the leaves on our lawn \\\n",
490
+ "It's slightly more expensive than the other leaf blowers \\\n",
491
+ "out there, But I think its worth it for the extra features.\n",
492
+ "\"\"\"\n",
493
+ "\n",
494
+ "review_template = \"\"\"\n",
495
+ "For the following text, extra the following information: \\\n",
496
+ "\n",
497
+ "gift: Was the item purchased as a gift for someone else? Ans should be in boolean\n",
498
+ "delivery_days: How many days it take for the product to deliver?\n",
499
+ "price_value: Extract any sentence about the value or price,\n",
500
+ "\n",
501
+ "format the output as JSON with the following keys:\n",
502
+ "gift\n",
503
+ "delivery_days\n",
504
+ "price_value\n",
505
+ "\n",
506
+ "text = {text}\n",
507
+ "\"\"\""
508
+ ]
509
+ },
510
+ {
511
+ "cell_type": "code",
512
+ "execution_count": 28,
513
+ "metadata": {},
514
+ "outputs": [
515
+ {
516
+ "name": "stdout",
517
+ "output_type": "stream",
518
+ "text": [
519
+ "input_variables=['text'] output_parser=None partial_variables={} messages=[HumanMessagePromptTemplate(prompt=PromptTemplate(input_variables=['text'], output_parser=None, partial_variables={}, template='\\nFor the following text, extra the following information: \\ngift: Was the item purchased as a gift for someone else? Ans should be in boolean\\ndelivery_days: How many days it take for the product to deliver?\\nprice_value: Extract any sentence about the value or price,\\n\\nformat the output as JSON with the following keys:\\ngift\\ndelivery_days\\nprice_value\\n\\ntext = {text}\\n', template_format='f-string', validate_template=True), additional_kwargs={})]\n"
520
+ ]
521
+ }
522
+ ],
523
+ "source": [
524
+ "promp_temp = ChatPromptTemplate.from_template(review_template)\n",
525
+ "print(promp_temp)"
526
+ ]
527
+ },
528
+ {
529
+ "cell_type": "code",
530
+ "execution_count": 29,
531
+ "metadata": {},
532
+ "outputs": [
533
+ {
534
+ "name": "stderr",
535
+ "output_type": "stream",
536
+ "text": [
537
+ "Retrying langchain.chat_models.openai.ChatOpenAI.completion_with_retry.<locals>._completion_with_retry in 4.0 seconds as it raised Timeout: Request timed out: HTTPSConnectionPool(host='api.openai.com', port=443): Read timed out. (read timeout=600).\n"
538
+ ]
539
+ },
540
+ {
541
+ "name": "stdout",
542
+ "output_type": "stream",
543
+ "text": [
544
+ "{\n",
545
+ " \"gift\": false,\n",
546
+ " \"delivery_days\": 2,\n",
547
+ " \"price_value\": \"It's slightly more expensive than the other leaf blowers out there, But I think its worth it for the extra features.\"\n",
548
+ "}\n"
549
+ ]
550
+ }
551
+ ],
552
+ "source": [
553
+ "messages = promp_temp.format_messages(text=customer_review)\n",
554
+ "chat = ChatOpenAI(temperature=0.0)\n",
555
+ "response = chat(messages)\n",
556
+ "print(response.content)"
557
+ ]
558
+ },
559
+ {
560
+ "cell_type": "code",
561
+ "execution_count": 60,
562
+ "metadata": {},
563
+ "outputs": [
564
+ {
565
+ "data": {
566
+ "text/plain": [
567
+ "str"
568
+ ]
569
+ },
570
+ "execution_count": 60,
571
+ "metadata": {},
572
+ "output_type": "execute_result"
573
+ }
574
+ ],
575
+ "source": [
576
+ "type(response.content)"
577
+ ]
578
+ },
579
+ {
580
+ "cell_type": "code",
581
+ "execution_count": null,
582
+ "metadata": {},
583
+ "outputs": [],
584
+ "source": []
585
+ },
586
+ {
587
+ "cell_type": "code",
588
+ "execution_count": 30,
589
+ "metadata": {},
590
+ "outputs": [
591
+ {
592
+ "name": "stdout",
593
+ "output_type": "stream",
594
+ "text": [
595
+ "The output should be a markdown code snippet formatted in the following schema, including the leading and trailing \"```json\" and \"```\":\n",
596
+ "\n",
597
+ "```json\n",
598
+ "{\n",
599
+ "\t\"gift\": string // was the item purchased as a gift for someone else? Answer True if yes, false if not or unknown.\n",
600
+ "\t\"delivery_days\": string // How many days it take for the product to arrive?\n",
601
+ "\t\"price_value\": string // Extract any sentence about the value or price\n",
602
+ "}\n",
603
+ "```\n"
604
+ ]
605
+ }
606
+ ],
607
+ "source": [
608
+ "from langchain.output_parsers import ResponseSchema\n",
609
+ "from langchain.output_parsers import StructuredOutputParser\n",
610
+ "\n",
611
+ "gift_schema = ResponseSchema(name=\"gift\", description=\"was the item purchased as a gift for someone else? Answer True if yes, false if not or unknown.\")\n",
612
+ "delivery_days_schema = ResponseSchema(name=\"delivery_days\", description=\"How many days it take for the product to arrive?\")\n",
613
+ "price_value_schema = ResponseSchema(name=\"price_value\", description=\"Extract any sentence about the value or price\")\n",
614
+ "\n",
615
+ "response_schemas = [gift_schema, delivery_days_schema, price_value_schema]\n",
616
+ "output_parser = StructuredOutputParser.from_response_schemas(response_schemas)\n",
617
+ "format_instructions = output_parser.get_format_instructions()\n",
618
+ "print(format_instructions)"
619
+ ]
620
+ },
621
+ {
622
+ "cell_type": "code",
623
+ "execution_count": 5,
624
+ "metadata": {},
625
+ "outputs": [
626
+ {
627
+ "data": {
628
+ "text/plain": [
629
+ "'The output should be a markdown code snippet formatted in the following schema, including the leading and trailing \"```json\" and \"```\":\\n\\n```json\\n{\\n\\t\"gift\": string // was the item purchased as a gift for someone else? Answer True if yes, false if not or unknown.\\n\\t\"delivery_days\": string // How many days it take for the product to arrive?\\n\\t\"price_value\": string // Extract any sentence about the value or price\\n}\\n```'"
630
+ ]
631
+ },
632
+ "execution_count": 5,
633
+ "metadata": {},
634
+ "output_type": "execute_result"
635
+ }
636
+ ],
637
+ "source": [
638
+ "format_instructions"
639
+ ]
640
+ },
641
+ {
642
+ "cell_type": "code",
643
+ "execution_count": 32,
644
+ "metadata": {},
645
+ "outputs": [],
646
+ "source": [
647
+ "review_template_2 = \"\"\"\\\n",
648
+ "For the following text, extract the following information:\n",
649
+ "\n",
650
+ "gift: Was the item purchased as a gift for someone else? \\\n",
651
+ "Answer True if yes, False if not or unknown.\n",
652
+ "\n",
653
+ "delivery_days: How many days did it take for the product\\\n",
654
+ "to arrive? If this information is not found, output -1.\n",
655
+ "\n",
656
+ "price_value: Extract any sentences about the value or price,\\\n",
657
+ "and output them as a comma separated Python list.\n",
658
+ "\n",
659
+ "text: {text}\n",
660
+ "\n",
661
+ "{format_instructions}\n",
662
+ "\"\"\"\n",
663
+ "\n",
664
+ "prompt = ChatPromptTemplate.from_template(template=review_template_2)\n",
665
+ "\n"
666
+ ]
667
+ },
668
+ {
669
+ "cell_type": "code",
670
+ "execution_count": 38,
671
+ "metadata": {},
672
+ "outputs": [],
673
+ "source": [
674
+ "messages = promp_temp.format_messages(text=customer_review, \n",
675
+ " format_instructions=format_instructions)"
676
+ ]
677
+ },
678
+ {
679
+ "cell_type": "code",
680
+ "execution_count": 42,
681
+ "metadata": {},
682
+ "outputs": [
683
+ {
684
+ "name": "stdout",
685
+ "output_type": "stream",
686
+ "text": [
687
+ "For the following text, extract the following information:\n",
688
+ "\n",
689
+ "gift: Was the item purchased as a gift for someone else? Answer True if yes, False if not or unknown.\n",
690
+ "\n",
691
+ "delivery_days: How many days did it take for the productto arrive? If this information is not found, output -1.\n",
692
+ "\n",
693
+ "price_value: Extract any sentences about the value or price,and output them as a comma separated Python list.\n",
694
+ "\n",
695
+ "text: {text}\n",
696
+ "\n",
697
+ "{format_instructions}\n",
698
+ "\n"
699
+ ]
700
+ }
701
+ ],
702
+ "source": [
703
+ "print(review_template_2)"
704
+ ]
705
+ },
706
+ {
707
+ "cell_type": "code",
708
+ "execution_count": 40,
709
+ "metadata": {},
710
+ "outputs": [
711
+ {
712
+ "data": {
713
+ "text/plain": [
714
+ "[HumanMessage(content=\"\\nFor the following text, extra the following information: \\ngift: Was the item purchased as a gift for someone else? Ans should be in boolean\\ndelivery_days: How many days it take for the product to deliver?\\nprice_value: Extract any sentence about the value or price,\\n\\nformat the output as JSON with the following keys:\\ngift\\ndelivery_days\\nprice_value\\n\\ntext = This leaf blower is pretty amazing. It has four settings: candle blower, gentle breeze, windy city, and tornado. It arrived in two days, just in time for my wife's anniversary present. I think my wife liked it so much she was speechless. So far I've been the only one using it, and I've been using it every other morning to clear the leaves on our lawn It's slightly more expensive than the other leaf blowers out there, But I think its worth it for the extra features.\\n\\n\", additional_kwargs={}, example=False)]"
715
+ ]
716
+ },
717
+ "execution_count": 40,
718
+ "metadata": {},
719
+ "output_type": "execute_result"
720
+ }
721
+ ],
722
+ "source": [
723
+ "messages"
724
+ ]
725
+ },
726
+ {
727
+ "cell_type": "code",
728
+ "execution_count": null,
729
+ "metadata": {},
730
+ "outputs": [],
731
+ "source": []
732
+ }
733
+ ],
734
+ "metadata": {
735
+ "kernelspec": {
736
+ "display_name": "Python 3 (ipykernel)",
737
+ "language": "python",
738
+ "name": "python3"
739
+ },
740
+ "language_info": {
741
+ "codemirror_mode": {
742
+ "name": "ipython",
743
+ "version": 3
744
+ },
745
+ "file_extension": ".py",
746
+ "mimetype": "text/x-python",
747
+ "name": "python",
748
+ "nbconvert_exporter": "python",
749
+ "pygments_lexer": "ipython3",
750
+ "version": "3.11.4"
751
+ }
752
+ },
753
+ "nbformat": 4,
754
+ "nbformat_minor": 4
755
+ }
memory.ipynb ADDED
@@ -0,0 +1,515 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "id": "15d94dba-6acc-4d42-b4c9-5c417fae885d",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": [
10
+ "import os\n",
11
+ "\n",
12
+ "from dotenv import load_dotenv, find_dotenv\n",
13
+ "_ = load_dotenv(find_dotenv())\n",
14
+ "\n",
15
+ "from langchain.chains import ConversationChain\n",
16
+ "from langchain.chat_models import ChatOpenAI\n",
17
+ "from langchain.memory import ConversationBufferMemory\n",
18
+ "\n",
19
+ "llm = ChatOpenAI(temperature=0.0)\n",
20
+ "memory = ConversationBufferMemory()\n",
21
+ "conversion = ConversationChain(\n",
22
+ " llm=llm,\n",
23
+ " memory=memory,\n",
24
+ " verbose=False\n",
25
+ ")"
26
+ ]
27
+ },
28
+ {
29
+ "cell_type": "code",
30
+ "execution_count": 2,
31
+ "id": "29f611c2-3693-493b-91da-ad11a9191aeb",
32
+ "metadata": {},
33
+ "outputs": [
34
+ {
35
+ "data": {
36
+ "text/plain": [
37
+ "\"The average distance between the Sun and Earth is about 93 million miles (150 million kilometers). This distance is known as an astronomical unit (AU). However, it's important to note that the distance between the Sun and Earth varies slightly throughout the year due to the elliptical shape of Earth's orbit around the Sun.\""
38
+ ]
39
+ },
40
+ "execution_count": 2,
41
+ "metadata": {},
42
+ "output_type": "execute_result"
43
+ }
44
+ ],
45
+ "source": [
46
+ "conversion.predict(input='what is the distance between sun and earth?')"
47
+ ]
48
+ },
49
+ {
50
+ "cell_type": "code",
51
+ "execution_count": 3,
52
+ "id": "5ebb0e23-9bde-49ad-b187-8a7b8765270a",
53
+ "metadata": {},
54
+ "outputs": [
55
+ {
56
+ "data": {
57
+ "text/plain": [
58
+ "'5 + 4 equals 9.'"
59
+ ]
60
+ },
61
+ "execution_count": 3,
62
+ "metadata": {},
63
+ "output_type": "execute_result"
64
+ }
65
+ ],
66
+ "source": [
67
+ "conversion.predict(input='whats is 5+4')"
68
+ ]
69
+ },
70
+ {
71
+ "cell_type": "code",
72
+ "execution_count": 4,
73
+ "id": "65e5b06b-3f08-43e2-8fd1-ac3d5106cf40",
74
+ "metadata": {},
75
+ "outputs": [
76
+ {
77
+ "data": {
78
+ "text/plain": [
79
+ "'Your name is Sushant.'"
80
+ ]
81
+ },
82
+ "execution_count": 4,
83
+ "metadata": {},
84
+ "output_type": "execute_result"
85
+ }
86
+ ],
87
+ "source": [
88
+ "conversion.predict(input='what is my name')"
89
+ ]
90
+ },
91
+ {
92
+ "cell_type": "code",
93
+ "execution_count": 5,
94
+ "id": "8fd49cb9-b0f8-494b-87b4-84762ced5ba5",
95
+ "metadata": {},
96
+ "outputs": [
97
+ {
98
+ "data": {
99
+ "text/plain": [
100
+ "\"Human: Hi, My name is sushant\\nAI: Hello Sushant! It's nice to meet you. How can I assist you today?\\nHuman: whats is 5+4\\nAI: 5 + 4 equals 9.\\nHuman: what is my name\\nAI: Your name is Sushant.\""
101
+ ]
102
+ },
103
+ "execution_count": 5,
104
+ "metadata": {},
105
+ "output_type": "execute_result"
106
+ }
107
+ ],
108
+ "source": [
109
+ "memory.buffer"
110
+ ]
111
+ },
112
+ {
113
+ "cell_type": "code",
114
+ "execution_count": 6,
115
+ "id": "a326a54b-cdf3-4be1-ad6f-465433b60d6b",
116
+ "metadata": {},
117
+ "outputs": [
118
+ {
119
+ "data": {
120
+ "text/plain": [
121
+ "{'history': \"Human: Hi, My name is sushant\\nAI: Hello Sushant! It's nice to meet you. How can I assist you today?\\nHuman: whats is 5+4\\nAI: 5 + 4 equals 9.\\nHuman: what is my name\\nAI: Your name is Sushant.\"}"
122
+ ]
123
+ },
124
+ "execution_count": 6,
125
+ "metadata": {},
126
+ "output_type": "execute_result"
127
+ }
128
+ ],
129
+ "source": [
130
+ "memory.load_memory_variables({})"
131
+ ]
132
+ },
133
+ {
134
+ "cell_type": "code",
135
+ "execution_count": 7,
136
+ "id": "9b43105b-147f-4d45-bd12-11d04c5ce230",
137
+ "metadata": {},
138
+ "outputs": [],
139
+ "source": [
140
+ "memory.save_context({\"input\": \"Hi There!\"}, {\"output\": \"Hello\"})"
141
+ ]
142
+ },
143
+ {
144
+ "cell_type": "code",
145
+ "execution_count": 8,
146
+ "id": "5be149da-ab1b-447d-87c2-17173aae4221",
147
+ "metadata": {},
148
+ "outputs": [
149
+ {
150
+ "data": {
151
+ "text/plain": [
152
+ "{'history': \"Human: Hi, My name is sushant\\nAI: Hello Sushant! It's nice to meet you. How can I assist you today?\\nHuman: whats is 5+4\\nAI: 5 + 4 equals 9.\\nHuman: what is my name\\nAI: Your name is Sushant.\\nHuman: Hi There!\\nAI: Hello\"}"
153
+ ]
154
+ },
155
+ "execution_count": 8,
156
+ "metadata": {},
157
+ "output_type": "execute_result"
158
+ }
159
+ ],
160
+ "source": [
161
+ "memory.load_memory_variables({})"
162
+ ]
163
+ },
164
+ {
165
+ "cell_type": "code",
166
+ "execution_count": 9,
167
+ "id": "99869539-72ae-4ee9-a5dc-2a490d4a3301",
168
+ "metadata": {},
169
+ "outputs": [
170
+ {
171
+ "data": {
172
+ "text/plain": [
173
+ "{'history': \"Human: I'm fine\\nAI: Ok\"}"
174
+ ]
175
+ },
176
+ "execution_count": 9,
177
+ "metadata": {},
178
+ "output_type": "execute_result"
179
+ }
180
+ ],
181
+ "source": [
182
+ "from langchain.memory import ConversationBufferWindowMemory\n",
183
+ "memory = ConversationBufferWindowMemory(k=1) # k=1 where k equals to no of conversion we want here 1.\n",
184
+ "\n",
185
+ "memory.save_context({\"input\": 'Hello'}, {\"output\": 'There'})\n",
186
+ "memory.save_context({\"input\": \"I'm fine\"}, {\"output\": 'Ok'})\n",
187
+ "\n",
188
+ "memory.load_memory_variables({})"
189
+ ]
190
+ },
191
+ {
192
+ "cell_type": "code",
193
+ "execution_count": 10,
194
+ "id": "eb7f4dca-2fd7-4980-91ab-ee3db6532ce2",
195
+ "metadata": {},
196
+ "outputs": [],
197
+ "source": [
198
+ "llm = ChatOpenAI(temperature=0.0)\n",
199
+ "memory = ConversationBufferMemory()\n",
200
+ "conversation = ConversationChain(\n",
201
+ " llm=llm,\n",
202
+ " memory=memory,\n",
203
+ " verbose=True\n",
204
+ ")"
205
+ ]
206
+ },
207
+ {
208
+ "cell_type": "code",
209
+ "execution_count": 11,
210
+ "id": "4da61d3b-a455-42a3-9654-5d54abb7ee76",
211
+ "metadata": {},
212
+ "outputs": [
213
+ {
214
+ "name": "stdout",
215
+ "output_type": "stream",
216
+ "text": [
217
+ "\n",
218
+ "\n",
219
+ "\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
220
+ "Prompt after formatting:\n",
221
+ "\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
222
+ "\n",
223
+ "Current conversation:\n",
224
+ "\n",
225
+ "Human: Hello I'm Sushant\n",
226
+ "AI:\u001b[0m\n",
227
+ "\n",
228
+ "\u001b[1m> Finished chain.\u001b[0m\n"
229
+ ]
230
+ },
231
+ {
232
+ "data": {
233
+ "text/plain": [
234
+ "'Hello Sushant! How can I assist you today?'"
235
+ ]
236
+ },
237
+ "execution_count": 11,
238
+ "metadata": {},
239
+ "output_type": "execute_result"
240
+ }
241
+ ],
242
+ "source": [
243
+ "conversation.predict(input=\"Hello I'm Sushant\")"
244
+ ]
245
+ },
246
+ {
247
+ "cell_type": "code",
248
+ "execution_count": 12,
249
+ "id": "3cbafc44-0668-44bc-9cec-869b01f6f315",
250
+ "metadata": {},
251
+ "outputs": [
252
+ {
253
+ "name": "stdout",
254
+ "output_type": "stream",
255
+ "text": [
256
+ "\n",
257
+ "\n",
258
+ "\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
259
+ "Prompt after formatting:\n",
260
+ "\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
261
+ "\n",
262
+ "Current conversation:\n",
263
+ "Human: Hello I'm Sushant\n",
264
+ "AI: Hello Sushant! How can I assist you today?\n",
265
+ "Human: who is armstrong\n",
266
+ "AI:\u001b[0m\n",
267
+ "\n",
268
+ "\u001b[1m> Finished chain.\u001b[0m\n"
269
+ ]
270
+ },
271
+ {
272
+ "data": {
273
+ "text/plain": [
274
+ "'Armstrong can refer to several different people. One famous person named Armstrong is Neil Armstrong, who was an American astronaut and the first person to walk on the moon. Another famous Armstrong is Louis Armstrong, who was a jazz musician and trumpeter. There are also many other people with the last name Armstrong, so it would depend on which specific Armstrong you are referring to.'"
275
+ ]
276
+ },
277
+ "execution_count": 12,
278
+ "metadata": {},
279
+ "output_type": "execute_result"
280
+ }
281
+ ],
282
+ "source": [
283
+ "conversation.predict(input=\"who is armstrong\")"
284
+ ]
285
+ },
286
+ {
287
+ "cell_type": "code",
288
+ "execution_count": 13,
289
+ "id": "b30e8043-3df1-42c0-873b-958aaea17f04",
290
+ "metadata": {},
291
+ "outputs": [
292
+ {
293
+ "name": "stdout",
294
+ "output_type": "stream",
295
+ "text": [
296
+ "\n",
297
+ "\n",
298
+ "\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
299
+ "Prompt after formatting:\n",
300
+ "\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
301
+ "\n",
302
+ "Current conversation:\n",
303
+ "Human: Hello I'm Sushant\n",
304
+ "AI: Hello Sushant! How can I assist you today?\n",
305
+ "Human: who is armstrong\n",
306
+ "AI: Armstrong can refer to several different people. One famous person named Armstrong is Neil Armstrong, who was an American astronaut and the first person to walk on the moon. Another famous Armstrong is Louis Armstrong, who was a jazz musician and trumpeter. There are also many other people with the last name Armstrong, so it would depend on which specific Armstrong you are referring to.\n",
307
+ "Human: okay whats my name.?\n",
308
+ "AI:\u001b[0m\n",
309
+ "\n",
310
+ "\u001b[1m> Finished chain.\u001b[0m\n"
311
+ ]
312
+ },
313
+ {
314
+ "data": {
315
+ "text/plain": [
316
+ "\"I'm sorry, but I don't have access to personal information about individuals unless it has been shared with me in the course of our conversation. I am designed to respect user privacy and confidentiality.\""
317
+ ]
318
+ },
319
+ "execution_count": 13,
320
+ "metadata": {},
321
+ "output_type": "execute_result"
322
+ }
323
+ ],
324
+ "source": [
325
+ "\n",
326
+ "conversation.predict(input=\"okay whats my name.?\")"
327
+ ]
328
+ },
329
+ {
330
+ "cell_type": "code",
331
+ "execution_count": 16,
332
+ "id": "d8c620c0-4ff3-4013-aada-c75982d0cc8f",
333
+ "metadata": {},
334
+ "outputs": [],
335
+ "source": [
336
+ "from langchain.memory import ConversationTokenBufferMemory\n",
337
+ "from langchain.llms import OpenAI\n",
338
+ "llm = ChatOpenAI(temperature=0.0)\n",
339
+ "\n",
340
+ "memory = ConversationTokenBufferMemory(llm=llm, max_token_limit=10)\n",
341
+ "memory.save_context({\"input\": \"Hi There can I ask you something\"}, {\"Output\": \"Yes You can please ask now\"})\n",
342
+ "memory.save_context({\"input\": \"Hi There ng\"}, {\"Output\": \"ask now\"})"
343
+ ]
344
+ },
345
+ {
346
+ "cell_type": "code",
347
+ "execution_count": 17,
348
+ "id": "1586a789-dfbb-4077-a5ae-d8031738ae40",
349
+ "metadata": {},
350
+ "outputs": [
351
+ {
352
+ "data": {
353
+ "text/plain": [
354
+ "{'history': 'AI: ask now'}"
355
+ ]
356
+ },
357
+ "execution_count": 17,
358
+ "metadata": {},
359
+ "output_type": "execute_result"
360
+ }
361
+ ],
362
+ "source": [
363
+ "memory.load_memory_variables({})"
364
+ ]
365
+ },
366
+ {
367
+ "cell_type": "code",
368
+ "execution_count": 19,
369
+ "id": "78037aaa-080c-4dd4-aa96-bea8c8644efb",
370
+ "metadata": {},
371
+ "outputs": [],
372
+ "source": [
373
+ "from langchain.memory import ConversationSummaryBufferMemory\n",
374
+ "schedule = \"There is a meeting at 8am with your product team. \\\n",
375
+ "You will need your powerpoint presentation prepared. \\\n",
376
+ "9am-12pm have time to work on your LangChain \\\n",
377
+ "project which will go quickly because Langchain is such a powerful tool. \\\n",
378
+ "At Noon, lunch at the italian resturant with a customer who is driving \\\n",
379
+ "from over an hour away to meet you to understand the latest in AI. \\\n",
380
+ "Be sure to bring your laptop to show the latest LLM demo.\"\n",
381
+ "\n",
382
+ "memory = ConversationSummaryBufferMemory(llm=llm, max_token_limit=100)\n",
383
+ "memory.save_context({\"input\": \"Hello\"}, {\"output\": \"What's up\"})\n",
384
+ "memory.save_context({\"input\": \"Not much, just hanging\"},\n",
385
+ " {\"output\": \"Cool\"})\n",
386
+ "memory.save_context({\"input\": \"What is on the schedule today?\"}, \n",
387
+ " {\"output\": f\"{schedule}\"})\n"
388
+ ]
389
+ },
390
+ {
391
+ "cell_type": "code",
392
+ "execution_count": 20,
393
+ "id": "f326d9da-da8d-41a8-a449-c23c8fc08601",
394
+ "metadata": {},
395
+ "outputs": [
396
+ {
397
+ "data": {
398
+ "text/plain": [
399
+ "{'history': 'System: The human and AI exchange greetings. The human mentions that they are not doing much. The AI informs the human about their schedule for the day, including a meeting with the product team, working on the LangChain project, and having lunch with a customer to discuss the latest in AI. The AI also reminds the human to bring their laptop to show a demo.'}"
400
+ ]
401
+ },
402
+ "execution_count": 20,
403
+ "metadata": {},
404
+ "output_type": "execute_result"
405
+ }
406
+ ],
407
+ "source": [
408
+ "memory.load_memory_variables({})"
409
+ ]
410
+ },
411
+ {
412
+ "cell_type": "code",
413
+ "execution_count": 21,
414
+ "id": "57c3a5fd-356a-42de-965b-7513a1f686c4",
415
+ "metadata": {},
416
+ "outputs": [],
417
+ "source": [
418
+ "conversation = ConversationChain(\n",
419
+ " llm=llm, \n",
420
+ " memory = memory,\n",
421
+ " verbose=True\n",
422
+ ")"
423
+ ]
424
+ },
425
+ {
426
+ "cell_type": "code",
427
+ "execution_count": 22,
428
+ "id": "f5286d9b-cb56-4530-b532-c1b90f8f4e28",
429
+ "metadata": {},
430
+ "outputs": [
431
+ {
432
+ "name": "stdout",
433
+ "output_type": "stream",
434
+ "text": [
435
+ "\n",
436
+ "\n",
437
+ "\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
438
+ "Prompt after formatting:\n",
439
+ "\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
440
+ "\n",
441
+ "Current conversation:\n",
442
+ "System: The human and AI exchange greetings. The human mentions that they are not doing much. The AI informs the human about their schedule for the day, including a meeting with the product team, working on the LangChain project, and having lunch with a customer to discuss the latest in AI. The AI also reminds the human to bring their laptop to show a demo.\n",
443
+ "Human: What would be a good demo to show?\n",
444
+ "AI:\u001b[0m\n",
445
+ "\n",
446
+ "\u001b[1m> Finished chain.\u001b[0m\n"
447
+ ]
448
+ },
449
+ {
450
+ "data": {
451
+ "text/plain": [
452
+ "'A good demo to show would be a live demonstration of the LangChain project. You can showcase the features and functionality of the language translation software, highlighting its accuracy and efficiency. Additionally, you could also demonstrate any recent updates or improvements made to the project. This would give the customer a firsthand experience of the capabilities of the AI technology and its potential benefits for their business.'"
453
+ ]
454
+ },
455
+ "execution_count": 22,
456
+ "metadata": {},
457
+ "output_type": "execute_result"
458
+ }
459
+ ],
460
+ "source": [
461
+ "conversation.predict(input=\"What would be a good demo to show?\")"
462
+ ]
463
+ },
464
+ {
465
+ "cell_type": "code",
466
+ "execution_count": 23,
467
+ "id": "620520ef-7cf1-421b-be81-b7f0c93ae6d4",
468
+ "metadata": {},
469
+ "outputs": [
470
+ {
471
+ "data": {
472
+ "text/plain": [
473
+ "{'history': 'System: The human and AI exchange greetings. The human mentions that they are not doing much. The AI informs the human about their schedule for the day, including a meeting with the product team, working on the LangChain project, and having lunch with a customer to discuss the latest in AI. The AI also reminds the human to bring their laptop to show a demo.\\nHuman: What would be a good demo to show?\\nAI: A good demo to show would be a live demonstration of the LangChain project. You can showcase the features and functionality of the language translation software, highlighting its accuracy and efficiency. Additionally, you could also demonstrate any recent updates or improvements made to the project. This would give the customer a firsthand experience of the capabilities of the AI technology and its potential benefits for their business.'}"
474
+ ]
475
+ },
476
+ "execution_count": 23,
477
+ "metadata": {},
478
+ "output_type": "execute_result"
479
+ }
480
+ ],
481
+ "source": [
482
+ "memory.load_memory_variables({})"
483
+ ]
484
+ },
485
+ {
486
+ "cell_type": "code",
487
+ "execution_count": null,
488
+ "id": "8583b417-2698-408a-b84d-6ec92f91d494",
489
+ "metadata": {},
490
+ "outputs": [],
491
+ "source": []
492
+ }
493
+ ],
494
+ "metadata": {
495
+ "kernelspec": {
496
+ "display_name": "Python 3 (ipykernel)",
497
+ "language": "python",
498
+ "name": "python3"
499
+ },
500
+ "language_info": {
501
+ "codemirror_mode": {
502
+ "name": "ipython",
503
+ "version": 3
504
+ },
505
+ "file_extension": ".py",
506
+ "mimetype": "text/x-python",
507
+ "name": "python",
508
+ "nbconvert_exporter": "python",
509
+ "pygments_lexer": "ipython3",
510
+ "version": "3.11.4"
511
+ }
512
+ },
513
+ "nbformat": 4,
514
+ "nbformat_minor": 5
515
+ }
questions_and_answers.ipynb ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 13,
6
+ "id": "5be86be3-be9d-4707-ba0f-d4d82518bc2b",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": [
10
+ "import os\n",
11
+ "\n",
12
+ "from dotenv import load_dotenv, find_dotenv\n",
13
+ "_ = load_dotenv(find_dotenv()) # read local .env file"
14
+ ]
15
+ },
16
+ {
17
+ "cell_type": "code",
18
+ "execution_count": 14,
19
+ "id": "3653c1eb-db1c-4546-9357-b86c12ff7347",
20
+ "metadata": {},
21
+ "outputs": [],
22
+ "source": [
23
+ "from langchain.chains import RetrievalQA\n",
24
+ "from langchain.chat_models import ChatOpenAI\n",
25
+ "from langchain.document_loaders import CSVLoader\n",
26
+ "from langchain.vectorstores import DocArrayInMemorySearch\n",
27
+ "from IPython.display import display, Markdown"
28
+ ]
29
+ },
30
+ {
31
+ "cell_type": "code",
32
+ "execution_count": 15,
33
+ "id": "cb4b06e3-f3ba-4210-86e0-69cf1fbab9ff",
34
+ "metadata": {},
35
+ "outputs": [],
36
+ "source": [
37
+ "file = 'Data.csv'\n",
38
+ "loader = CSVLoader(file_path=file)"
39
+ ]
40
+ },
41
+ {
42
+ "cell_type": "code",
43
+ "execution_count": 16,
44
+ "id": "09c5e552-32a3-4aad-9a16-d45eb42a8197",
45
+ "metadata": {},
46
+ "outputs": [],
47
+ "source": [
48
+ "from langchain.indexes import VectorstoreIndexCreator"
49
+ ]
50
+ },
51
+ {
52
+ "cell_type": "code",
53
+ "execution_count": 17,
54
+ "id": "eaee53eb-59f2-4c95-b68f-30d35c2268f7",
55
+ "metadata": {},
56
+ "outputs": [],
57
+ "source": [
58
+ "index = VectorstoreIndexCreator(\n",
59
+ " vectorstore_cls=DocArrayInMemorySearch\n",
60
+ ").from_loaders([loader])"
61
+ ]
62
+ },
63
+ {
64
+ "cell_type": "code",
65
+ "execution_count": 18,
66
+ "id": "28358e5c-9fb0-475c-914d-e752d3f03293",
67
+ "metadata": {},
68
+ "outputs": [],
69
+ "source": [
70
+ "query =\"Please list all your shirts with sun protection \\\n",
71
+ "in a table in markdown and summarize each one.\""
72
+ ]
73
+ },
74
+ {
75
+ "cell_type": "code",
76
+ "execution_count": 19,
77
+ "id": "4e417607-fb44-4841-b6b7-da2f72e31742",
78
+ "metadata": {},
79
+ "outputs": [],
80
+ "source": [
81
+ "response = index.query(query)"
82
+ ]
83
+ },
84
+ {
85
+ "cell_type": "code",
86
+ "execution_count": 21,
87
+ "id": "091b6e7e-bd97-4beb-a812-fc1d34f1a369",
88
+ "metadata": {},
89
+ "outputs": [
90
+ {
91
+ "data": {
92
+ "text/plain": [
93
+ "<langchain.document_loaders.csv_loader.CSVLoader at 0x7feb9054d950>"
94
+ ]
95
+ },
96
+ "execution_count": 21,
97
+ "metadata": {},
98
+ "output_type": "execute_result"
99
+ }
100
+ ],
101
+ "source": [
102
+ "loader"
103
+ ]
104
+ },
105
+ {
106
+ "cell_type": "code",
107
+ "execution_count": null,
108
+ "id": "fd3139bd-7fae-4747-adf9-9eb93515a19c",
109
+ "metadata": {},
110
+ "outputs": [],
111
+ "source": []
112
+ }
113
+ ],
114
+ "metadata": {
115
+ "kernelspec": {
116
+ "display_name": "Python 3 (ipykernel)",
117
+ "language": "python",
118
+ "name": "python3"
119
+ },
120
+ "language_info": {
121
+ "codemirror_mode": {
122
+ "name": "ipython",
123
+ "version": 3
124
+ },
125
+ "file_extension": ".py",
126
+ "mimetype": "text/x-python",
127
+ "name": "python",
128
+ "nbconvert_exporter": "python",
129
+ "pygments_lexer": "ipython3",
130
+ "version": "3.11.4"
131
+ }
132
+ },
133
+ "nbformat": 4,
134
+ "nbformat_minor": 5
135
+ }
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ python-dotenv==1.0.0
2
+ langchain==0.0.137
3
+ pinecone-client==2.2.1