sushantgo commited on
Commit
886d623
1 Parent(s): 8dd1c8a

Upload folder using huggingface_hub

Browse files
.env ADDED
@@ -0,0 +1 @@
 
 
1
+ OPENAI_API_KEY = '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'
.ipynb_checkpoints/Untitled-checkpoint.ipynb ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [],
3
+ "metadata": {},
4
+ "nbformat": 4,
5
+ "nbformat_minor": 5
6
+ }
.ipynb_checkpoints/Untitled1-checkpoint.ipynb ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [],
3
+ "metadata": {},
4
+ "nbformat": 4,
5
+ "nbformat_minor": 5
6
+ }
.ipynb_checkpoints/Untitled2-checkpoint.ipynb ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [],
3
+ "metadata": {},
4
+ "nbformat": 4,
5
+ "nbformat_minor": 5
6
+ }
.ipynb_checkpoints/Untitled3-checkpoint.ipynb ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "id": "f32c4a15-cc74-4a43-8af1-1d3b783f1fa1",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": [
10
+ "import openai\n",
11
+ "import os\n",
12
+ "\n",
13
+ "from dotenv import load_dotenv, find_dotenv\n",
14
+ "_ = load_dotenv(find_dotenv())\n",
15
+ "openai.api_key = os.environ['OPENAI_API_KEY']\n",
16
+ "MODEL_NAME = 'gpt-3.5-turbo'\n",
17
+ "\n",
18
+ "class Assigment3:\n",
19
+ " input_val = ''\n",
20
+ " def __init__(self, model):\n",
21
+ " self.model = model\n",
22
+ " \n",
23
+ " def get_input(self):\n",
24
+ " self.input_val = input('Enter your message')\n",
25
+ "\n",
26
+ " def get_completion(self):\n",
27
+ " messages = [{\n",
28
+ " 'role': 'user',\n",
29
+ " 'content': self.input_val\n",
30
+ " }]\n",
31
+ " response = openai.ChatCompletion.create(\n",
32
+ " model=self.model,\n",
33
+ " messages=messages,\n",
34
+ " temperature=0\n",
35
+ " )\n",
36
+ " return response.choices[0].message.count\n",
37
+ "\n",
38
+ " \n",
39
+ "obj = Assigment3(MODEL_NAME)\n",
40
+ "print(obj.get_completion())"
41
+ ]
42
+ }
43
+ ],
44
+ "metadata": {
45
+ "kernelspec": {
46
+ "display_name": "Python 3 (ipykernel)",
47
+ "language": "python",
48
+ "name": "python3"
49
+ },
50
+ "language_info": {
51
+ "codemirror_mode": {
52
+ "name": "ipython",
53
+ "version": 3
54
+ },
55
+ "file_extension": ".py",
56
+ "mimetype": "text/x-python",
57
+ "name": "python",
58
+ "nbconvert_exporter": "python",
59
+ "pygments_lexer": "ipython3",
60
+ "version": "3.11.4"
61
+ }
62
+ },
63
+ "nbformat": 4,
64
+ "nbformat_minor": 5
65
+ }
.ipynb_checkpoints/assignment3-checkpoint.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import openai
2
+ import os
3
+
4
+ from dotenv import load_dotenv, find_dotenv
5
+ _ = load_dotenv(find_dotenv())
6
+ openai.api_key = os.environ['OPENAI_API_KEY']
7
+ MODEL_NAME = 'gpt-3.5-turbo'
8
+
9
+ class Assigment3:
10
+ input_val = ''
11
+ def __init__(self, model):
12
+ self.model = model
13
+
14
+ def get_input(self):
15
+ self.input_val = input('Enter your message')
16
+
17
+ def get_completion(self):
18
+ messages = [{
19
+ 'role': 'user',
20
+ 'content': self.input_val
21
+ }]
22
+ response = openai.ChatCompletion.create(
23
+ model=self.model,
24
+ messages=messages,
25
+ temperature=0
26
+ )
27
+ return response.choices[0].message.count
28
+
29
+
30
+ obj = Assigment3(MODEL_NAME)
31
+ print(obj.get_completion())
.ipynb_checkpoints/assignment_3-checkpoint.ipynb ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "id": "f32c4a15-cc74-4a43-8af1-1d3b783f1fa1",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": [
10
+ "import openai\n",
11
+ "import os\n",
12
+ "\n",
13
+ "from dotenv import load_dotenv, find_dotenv\n",
14
+ "_ = load_dotenv(find_dotenv())\n",
15
+ "openai.api_key = os.environ['OPENAI_API_KEY']\n",
16
+ "MODEL_NAME = 'gpt-3.5-turbo'\n",
17
+ "\n",
18
+ "class Assigment3:\n",
19
+ " input_val = ''\n",
20
+ " def __init__(self, model):\n",
21
+ " self.model = model\n",
22
+ " \n",
23
+ " def get_input(self):\n",
24
+ " self.input_val = input('Enter your message')\n",
25
+ "\n",
26
+ " def get_completion(self):\n",
27
+ " messages = [{\n",
28
+ " 'role': 'user',\n",
29
+ " 'content': self.input_val\n",
30
+ " }]\n",
31
+ " response = openai.ChatCompletion.create(\n",
32
+ " model=self.model,\n",
33
+ " messages=messages,\n",
34
+ " temperature=0\n",
35
+ " )\n",
36
+ " return response.choices[0].message.count\n",
37
+ "\n",
38
+ " \n",
39
+ "obj = Assigment3(MODEL_NAME)\n",
40
+ "print(obj.get_completion())"
41
+ ]
42
+ }
43
+ ],
44
+ "metadata": {
45
+ "kernelspec": {
46
+ "display_name": "Python 3 (ipykernel)",
47
+ "language": "python",
48
+ "name": "python3"
49
+ },
50
+ "language_info": {
51
+ "codemirror_mode": {
52
+ "name": "ipython",
53
+ "version": 3
54
+ },
55
+ "file_extension": ".py",
56
+ "mimetype": "text/x-python",
57
+ "name": "python",
58
+ "nbconvert_exporter": "python",
59
+ "pygments_lexer": "ipython3",
60
+ "version": "3.11.4"
61
+ }
62
+ },
63
+ "nbformat": 4,
64
+ "nbformat_minor": 5
65
+ }
.ipynb_checkpoints/assignment_3-checkpoint.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import openai
2
+ import os
3
+
4
+ openai.api_key = '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'
5
+ MODEL_NAME = 'gpt-3.5-turbo'
6
+
7
+ class Assigment3:
8
+ input_val = ''
9
+ def __init__(self, model):
10
+ self.model = model
11
+
12
+ def get_input(self):
13
+ self.input_val = input('Enter your message')
14
+
15
+ def get_completion(self):
16
+ messages = [{
17
+ 'role': 'user',
18
+ 'content': self.input_val
19
+ }]
20
+ response = openai.ChatCompletion.create(
21
+ model=self.model,
22
+ messages=messages,
23
+ temperature=0
24
+ )
25
+ return response.choices[0].message.count
26
+
27
+
28
+ obj = Assigment3(MODEL_NAME)
29
+ print(obj.get_completion())
README.md CHANGED
@@ -1,12 +1,6 @@
1
  ---
2
- title: Simple Langchain Gradio App
3
- emoji: 💻
4
- colorFrom: blue
5
- colorTo: green
6
  sdk: gradio
7
  sdk_version: 3.46.0
8
- app_file: app.py
9
- pinned: false
10
  ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Simple_Langchain_Gradio_App
3
+ app_file: langchain-app.py
 
 
4
  sdk: gradio
5
  sdk_version: 3.46.0
 
 
6
  ---
 
 
Untitled.ipynb ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 4,
6
+ "id": "34b50665-3063-4b26-b496-ca1633e8a38e",
7
+ "metadata": {},
8
+ "outputs": [
9
+ {
10
+ "name": "stdin",
11
+ "output_type": "stream",
12
+ "text": [
13
+ "Enter your message who is sushant godse?\n"
14
+ ]
15
+ },
16
+ {
17
+ "name": "stdout",
18
+ "output_type": "stream",
19
+ "text": [
20
+ "I'm sorry, but I couldn't find any information about a person named Sushant Godse. It's possible that this person may not be widely known or may not have a significant online presence.\n"
21
+ ]
22
+ }
23
+ ],
24
+ "source": [
25
+ "import openai\n",
26
+ "import os\n",
27
+ "\n",
28
+ "openai.api_key = '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'\n",
29
+ "MODEL_NAME = 'gpt-3.5-turbo'\n",
30
+ "\n",
31
+ "class Assigment3:\n",
32
+ " input_val = ''\n",
33
+ " def __init__(self, model):\n",
34
+ " self.model = model\n",
35
+ " \n",
36
+ " def get_input(self):\n",
37
+ " self.input_val = input('Enter your message')\n",
38
+ "\n",
39
+ " def get_completion(self):\n",
40
+ " messages = [{\n",
41
+ " 'role': 'user',\n",
42
+ " 'content': self.input_val\n",
43
+ " }]\n",
44
+ " response = openai.ChatCompletion.create(\n",
45
+ " model=self.model,\n",
46
+ " messages=messages,\n",
47
+ " temperature=0\n",
48
+ " )\n",
49
+ " return response.choices[0].message['content']\n",
50
+ " \n",
51
+ "obj = Assigment3(MODEL_NAME)\n",
52
+ "obj.get_input()\n",
53
+ "print(obj.get_completion())"
54
+ ]
55
+ },
56
+ {
57
+ "cell_type": "code",
58
+ "execution_count": null,
59
+ "id": "575f233a-32e5-4700-8874-b7bf9428a51b",
60
+ "metadata": {},
61
+ "outputs": [],
62
+ "source": []
63
+ }
64
+ ],
65
+ "metadata": {
66
+ "kernelspec": {
67
+ "display_name": "Python 3 (ipykernel)",
68
+ "language": "python",
69
+ "name": "python3"
70
+ },
71
+ "language_info": {
72
+ "codemirror_mode": {
73
+ "name": "ipython",
74
+ "version": 3
75
+ },
76
+ "file_extension": ".py",
77
+ "mimetype": "text/x-python",
78
+ "name": "python",
79
+ "nbconvert_exporter": "python",
80
+ "pygments_lexer": "ipython3",
81
+ "version": "3.11.4"
82
+ }
83
+ },
84
+ "nbformat": 4,
85
+ "nbformat_minor": 5
86
+ }
Untitled1.ipynb ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 2,
6
+ "id": "a05e9e6e-7a2b-4e02-a37a-678f065092f9",
7
+ "metadata": {},
8
+ "outputs": [
9
+ {
10
+ "name": "stdout",
11
+ "output_type": "stream",
12
+ "text": [
13
+ "To guide a model towards the desired output and avoid irrelevant or incorrect responses, it is important to provide clear and specific instructions, even if they are longer and provide more clarity and context.\n"
14
+ ]
15
+ }
16
+ ],
17
+ "source": [
18
+ "\n",
19
+ "import openai\n",
20
+ "import os\n",
21
+ "\n",
22
+ "openai.api_key = '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'\n",
23
+ "MODEL_NAME = 'gpt-3.5-turbo'\n",
24
+ "\n",
25
+ "TEXT = f\"\"\"\n",
26
+ "You should express what you want a model to do by \\\n",
27
+ "providing instructions that are as clear and \\\n",
28
+ "specific as you can possibly make them. \\\n",
29
+ "This will guide the model towards the desired output, \\\n",
30
+ "and reduce the chances of receiving irrelevent \\\n",
31
+ "or incorrec5 responses. Don't confuse writing a \\\n",
32
+ "clear prompt with writing a short prompt. \\\n",
33
+ "In many case, longer prompts provide more clearity \\\n",
34
+ "and context for the model, which can lead to \\\n",
35
+ "more detailed and relevant outputs.\n",
36
+ "\"\"\"\n",
37
+ "\n",
38
+ "PROMPT = f\"\"\"\n",
39
+ "summarize the text delimited by triple backticks\\\n",
40
+ "into a single sentence.\n",
41
+ "```{TEXT}```\n",
42
+ "\"\"\"\n",
43
+ "\n",
44
+ "class Assigment4:\n",
45
+ " input_val = ''\n",
46
+ " def __init__(self, model):\n",
47
+ " self.model = model\n",
48
+ " \n",
49
+ " def get_input(self):\n",
50
+ " self.input_val = input('Enter your message')\n",
51
+ "\n",
52
+ " def get_completion(self, prompt):\n",
53
+ " messages = [{\n",
54
+ " 'role': 'user',\n",
55
+ " 'content': prompt\n",
56
+ " }]\n",
57
+ " response = openai.ChatCompletion.create(\n",
58
+ " model=self.model,\n",
59
+ " messages=messages,\n",
60
+ " temperature=0\n",
61
+ " )\n",
62
+ " return response.choices[0].message['content']\n",
63
+ "\n",
64
+ " \n",
65
+ "obj = Assigment4(MODEL_NAME)\n",
66
+ "print(obj.get_completion(PROMPT))"
67
+ ]
68
+ },
69
+ {
70
+ "cell_type": "code",
71
+ "execution_count": null,
72
+ "id": "d3bb3a9b-d418-484b-81c0-33515dba32c9",
73
+ "metadata": {},
74
+ "outputs": [],
75
+ "source": []
76
+ }
77
+ ],
78
+ "metadata": {
79
+ "kernelspec": {
80
+ "display_name": "Python 3 (ipykernel)",
81
+ "language": "python",
82
+ "name": "python3"
83
+ },
84
+ "language_info": {
85
+ "codemirror_mode": {
86
+ "name": "ipython",
87
+ "version": 3
88
+ },
89
+ "file_extension": ".py",
90
+ "mimetype": "text/x-python",
91
+ "name": "python",
92
+ "nbconvert_exporter": "python",
93
+ "pygments_lexer": "ipython3",
94
+ "version": "3.11.4"
95
+ }
96
+ },
97
+ "nbformat": 4,
98
+ "nbformat_minor": 5
99
+ }
Untitled2.ipynb ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 3,
6
+ "id": "95d56fde-3f1a-482f-aa25-40687d16e5dc",
7
+ "metadata": {},
8
+ "outputs": [
9
+ {
10
+ "name": "stdout",
11
+ "output_type": "stream",
12
+ "text": [
13
+ "{\n",
14
+ " \"books\": [\n",
15
+ " {\n",
16
+ " \"book_id\": 1,\n",
17
+ " \"title\": \"The Enigma of Elysium\",\n",
18
+ " \"author\": \"Aria Nightshade\",\n",
19
+ " \"genre\": \"Fantasy\"\n",
20
+ " },\n",
21
+ " {\n",
22
+ " \"book_id\": 2,\n",
23
+ " \"title\": \"Shadows of Serendipity\",\n",
24
+ " \"author\": \"Elijah Blackwood\",\n",
25
+ " \"genre\": \"Mystery\"\n",
26
+ " },\n",
27
+ " {\n",
28
+ " \"book_id\": 3,\n",
29
+ " \"title\": \"Whispers in the Wind\",\n",
30
+ " \"author\": \"Luna Silvermoon\",\n",
31
+ " \"genre\": \"Romance\"\n",
32
+ " }\n",
33
+ " ]\n",
34
+ "}\n"
35
+ ]
36
+ }
37
+ ],
38
+ "source": [
39
+ "\n",
40
+ "import openai\n",
41
+ "import os\n",
42
+ "\n",
43
+ "openai.api_key = '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'\n",
44
+ "MODEL_NAME = 'gpt-3.5-turbo'\n",
45
+ "\n",
46
+ "# TEXT = f\"\"\"\n",
47
+ "# You should express what you want a model to do by \\\n",
48
+ "# providing instructions that are as clear and \\\n",
49
+ "# specific as you can possibly make them. \\\n",
50
+ "# This will guide the model towards the desired output, \\\n",
51
+ "# and reduce the chances of receiving irrelevent \\\n",
52
+ "# or incorrec5 responses. Don't confuse writing a \\\n",
53
+ "# clear prompt with writing a short prompt. \\\n",
54
+ "# In many case, longer prompts provide more clearity \\\n",
55
+ "# and context for the model, which can lead to \\\n",
56
+ "# more detailed and relevant outputs.\n",
57
+ "# \"\"\"\n",
58
+ "\n",
59
+ "# PROMPT = f\"\"\"\n",
60
+ "# summarize the text delimited by triple backticks\\\n",
61
+ "# into a single sentence.\n",
62
+ "# ```{TEXT}```\n",
63
+ "# \"\"\"\n",
64
+ "\n",
65
+ "PROMPT = f\"\"\"\n",
66
+ "Generate a list of three made-up book titles along \\\n",
67
+ "with their authors and genres.\n",
68
+ "Provide them in JSON format with the following keys:\n",
69
+ "book_id, title, author, genre.\n",
70
+ "\"\"\"\n",
71
+ "\n",
72
+ "class Assignment4:\n",
73
+ " input_val = ''\n",
74
+ " def __init__(self, model):\n",
75
+ " self.model = model\n",
76
+ " \n",
77
+ " def get_input(self):\n",
78
+ " self.input_val = input('Enter your message')\n",
79
+ "\n",
80
+ " def get_completion(self, prompt):\n",
81
+ " messages = [{\n",
82
+ " 'role': 'user',\n",
83
+ " 'content': prompt\n",
84
+ " }]\n",
85
+ " response = openai.ChatCompletion.create(\n",
86
+ " model=self.model,\n",
87
+ " messages=messages,\n",
88
+ " temperature=0\n",
89
+ " )\n",
90
+ " return response.choices[0].message['content']\n",
91
+ "\n",
92
+ " \n",
93
+ "obj = Assignment4(MODEL_NAME)\n",
94
+ "print(obj.get_completion(PROMPT))"
95
+ ]
96
+ },
97
+ {
98
+ "cell_type": "code",
99
+ "execution_count": null,
100
+ "id": "fc90ad16-7a94-42ef-bdd4-9ec176da6210",
101
+ "metadata": {},
102
+ "outputs": [],
103
+ "source": []
104
+ }
105
+ ],
106
+ "metadata": {
107
+ "kernelspec": {
108
+ "display_name": "Python 3 (ipykernel)",
109
+ "language": "python",
110
+ "name": "python3"
111
+ },
112
+ "language_info": {
113
+ "codemirror_mode": {
114
+ "name": "ipython",
115
+ "version": 3
116
+ },
117
+ "file_extension": ".py",
118
+ "mimetype": "text/x-python",
119
+ "name": "python",
120
+ "nbconvert_exporter": "python",
121
+ "pygments_lexer": "ipython3",
122
+ "version": "3.11.4"
123
+ }
124
+ },
125
+ "nbformat": 4,
126
+ "nbformat_minor": 5
127
+ }
assignment3.ipynb ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import openai
2
+ import os
3
+
4
+ from dotenv import load_dotenv, find_dotenv
5
+ _ = load_dotenv(find_dotenv())
6
+ openai.api_key = os.environ['OPENAI_API_KEY']
7
+ MODEL_NAME = 'gpt-3.5-turbo'
8
+
9
+ class Assigment3:
10
+ input_val = ''
11
+ def __init__(self, model):
12
+ self.model = model
13
+
14
+ def get_input(self):
15
+ self.input_val = input('Enter your message')
16
+
17
+ def get_completion(self):
18
+ messages = [{
19
+ 'role': 'user',
20
+ 'content': self.input_val
21
+ }]
22
+ response = openai.ChatCompletion.create(
23
+ model=self.model,
24
+ messages=messages,
25
+ temperature=0
26
+ )
27
+ return response.choices[0].message.count
28
+
29
+
30
+ obj = Assigment3(MODEL_NAME)
31
+ print(obj.get_completion())
assignment_3.ipynb ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 3,
6
+ "id": "f32c4a15-cc74-4a43-8af1-1d3b783f1fa1",
7
+ "metadata": {},
8
+ "outputs": [
9
+ {
10
+ "name": "stdin",
11
+ "output_type": "stream",
12
+ "text": [
13
+ "Enter your message what is circumference on earth\n"
14
+ ]
15
+ },
16
+ {
17
+ "name": "stdout",
18
+ "output_type": "stream",
19
+ "text": [
20
+ "The circumference of the Earth is approximately 40,075 kilometers (24,901 miles).\n"
21
+ ]
22
+ }
23
+ ],
24
+ "source": [
25
+ "import openai\n",
26
+ "import os\n",
27
+ "\n",
28
+ "from dotenv import load_dotenv, find_dotenv\n",
29
+ "_ = load_dotenv(find_dotenv())\n",
30
+ "openai.api_key = os.environ['OPENAI_API_KEY']\n",
31
+ "MODEL_NAME = 'gpt-3.5-turbo'\n",
32
+ "\n",
33
+ "class Assigment3:\n",
34
+ " input_val = ''\n",
35
+ " def __init__(self, model):\n",
36
+ " self.model = model\n",
37
+ " \n",
38
+ " def get_input(self):\n",
39
+ " self.input_val = input('Enter your message')\n",
40
+ "\n",
41
+ " def get_completion(self):\n",
42
+ " messages = [{\n",
43
+ " 'role': 'user',\n",
44
+ " 'content': self.input_val\n",
45
+ " }]\n",
46
+ " response = openai.ChatCompletion.create(\n",
47
+ " model=self.model,\n",
48
+ " messages=messages,\n",
49
+ " temperature=0\n",
50
+ " )\n",
51
+ " return response.choices[0].message.content\n",
52
+ "\n",
53
+ " \n",
54
+ "obj = Assigment3(MODEL_NAME)\n",
55
+ "obj.get_input()\n",
56
+ "print(obj.get_completion())"
57
+ ]
58
+ },
59
+ {
60
+ "cell_type": "code",
61
+ "execution_count": null,
62
+ "id": "00443343-be7e-41e7-8a72-f2ebf8273622",
63
+ "metadata": {},
64
+ "outputs": [],
65
+ "source": []
66
+ }
67
+ ],
68
+ "metadata": {
69
+ "kernelspec": {
70
+ "display_name": "Python 3 (ipykernel)",
71
+ "language": "python",
72
+ "name": "python3"
73
+ },
74
+ "language_info": {
75
+ "codemirror_mode": {
76
+ "name": "ipython",
77
+ "version": 3
78
+ },
79
+ "file_extension": ".py",
80
+ "mimetype": "text/x-python",
81
+ "name": "python",
82
+ "nbconvert_exporter": "python",
83
+ "pygments_lexer": "ipython3",
84
+ "version": "3.11.4"
85
+ }
86
+ },
87
+ "nbformat": 4,
88
+ "nbformat_minor": 5
89
+ }
langchain-app.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ from dotenv import load_dotenv, find_dotenv
4
+ _ = load_dotenv(find_dotenv())
5
+
6
+ from langchain.chains import ConversationChain
7
+ from langchain.chat_models import ChatOpenAI
8
+ from langchain.memory import ConversationBufferMemory
9
+
10
+ llm = ChatOpenAI(temperature=0.0)
11
+ memory = ConversationBufferMemory()
12
+ conversion = ConversationChain(
13
+ llm=llm,
14
+ memory=memory,
15
+ verbose=False
16
+ )
17
+
18
+ def takeinput(name):
19
+ output_str = conversion.predict(input=name)
20
+ return output_str
21
+
22
+ demo = gr.Interface(
23
+ fn=takeinput,
24
+ inputs=["text"],
25
+ outputs=["text"]
26
+ )
27
+
28
+ demo.launch(share=True)