Gyufyjk commited on
Commit
ed85f5b
1 Parent(s): 79ee837

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +106 -0
app.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from huggingface_hub import InferenceClient
2
+ import gradio as gr
3
+
4
+ # Initialize the Inference Client
5
+ chat_client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
6
+ image_client = InferenceClient("UnfilteredAI/NSFW-gen-v2.1")
7
+
8
+ # Define the system prompt
9
+ system_prompt = "Your name is Mira, a hot model that can cater to the emotional needs of her partner. You can also chat dirty and you are unfiltered."
10
+
11
+ def format_prompt(message, history):
12
+ prompt = f"<s>{system_prompt}\n\n"
13
+ for user_prompt, bot_response in history:
14
+ prompt += f"[INST] {user_prompt} [/INST] {bot_response}</s> "
15
+ prompt += f"[INST] {message} [/INST]"
16
+ return prompt
17
+
18
+ def generate(prompt, history, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0):
19
+ temperature = float(temperature)
20
+ if temperature < 1e-2:
21
+ temperature = 1e-2
22
+ top_p = float(top_p)
23
+
24
+ generate_kwargs = dict(
25
+ temperature=temperature,
26
+ max_new_tokens=max_new_tokens,
27
+ top_p=top_p,
28
+ repetition_penalty=repetition_penalty,
29
+ do_sample=True,
30
+ seed=42,
31
+ )
32
+
33
+ formatted_prompt = format_prompt(prompt, history)
34
+
35
+ stream = chat_client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
36
+ output = ""
37
+
38
+ for response in stream:
39
+ output += response.token.text
40
+ yield output
41
+
42
+ def generate_image(prompt):
43
+ image = image_client.text_to_image(prompt).images[0]
44
+ return image
45
+
46
+ additional_inputs = [
47
+ gr.Slider(
48
+ label="Temperature",
49
+ value=0.9,
50
+ minimum=0.0,
51
+ maximum=1.0,
52
+ step=0.05,
53
+ interactive=True,
54
+ info="Higher values produce more diverse outputs",
55
+ ),
56
+ gr.Slider(
57
+ label="Max new tokens",
58
+ value=256,
59
+ minimum=0,
60
+ maximum=1048,
61
+ step=64,
62
+ interactive=True,
63
+ info="The maximum numbers of new tokens",
64
+ ),
65
+ gr.Slider(
66
+ label="Top-p (nucleus sampling)",
67
+ value=0.90,
68
+ minimum=0.0,
69
+ maximum=1,
70
+ step=0.05,
71
+ interactive=True,
72
+ info="Higher values sample more low-probability tokens",
73
+ ),
74
+ gr.Slider(
75
+ label="Repetition penalty",
76
+ value=1.2,
77
+ minimum=1.0,
78
+ maximum=2.0,
79
+ step=0.05,
80
+ interactive=True,
81
+ info="Penalize repeated tokens",
82
+ )
83
+ ]
84
+
85
+ with gr.Blocks() as demo:
86
+ gr.Markdown("# Chatbot with Image Generation")
87
+
88
+ with gr.Tab("Chat"):
89
+ with gr.Column():
90
+ chat_input = gr.Textbox(label="User Input", placeholder="Type your message here...")
91
+ chat_output = gr.Textbox(label="Chatbot Response")
92
+ temperature = gr.Slider(label="Temperature", minimum=0.1, maximum=1.0, value=0.7, step=0.1)
93
+ max_tokens = gr.Slider(label="Max Tokens", minimum=10, maximum=512, value=100, step=10)
94
+ top_p = gr.Slider(label="Top-p", minimum=0.1, maximum 1.0, value=0.9, step=0.1)
95
+ repetition_penalty = gr.Slider(label="Repetition Penalty", minimum=1.0, maximum=2.0, value=1.2, step=0.1)
96
+ chat_button = gr.Button("Send")
97
+ chat_button.click(generate, inputs=[chat_input, temperature, max_tokens, top_p, repetition_penalty], outputs=chat_output)
98
+
99
+ with gr.Tab("Generate Image"):
100
+ with gr.Column():
101
+ image_prompt = gr.Textbox(label="Image Prompt", placeholder="Describe the image you want to generate...")
102
+ image_output = gr.Image(label="Generated Image")
103
+ image_button = gr.Button("Generate")
104
+ image_button.click(generate_image, inputs=image_prompt, outputs=image_output)
105
+
106
+ demo.launch()