File size: 1,008 Bytes
e54945c
9c3145b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e54945c
 
9c3145b
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
import gradio as gr
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer

# Load the model and tokenizer
model_name = "FreedomIntelligence/HuatuoGPT-Vision-7B"
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)

# Function to generate a response using the model
def generate_response(user_input):
    messages = [{"role": "user", "content": user_input}]
    response = pipe(messages)[0]['generated_text']
    return response

# Gradio interface
iface = gr.Interface(
    fn=generate_response,          # The function to call to generate the output
    inputs="text",                 # Single text input field
    outputs="text",                # Single text output field
    title="HuatuoGPT-Vision-7B",   # Title of the interface
    description="A text generation model powered by HuatuoGPT-Vision-7B. Ask anything!",
)

# Launch the interface
iface.launch()