|
import gradio as gr |
|
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer |
|
|
|
|
|
model_name = "FreedomIntelligence/HuatuoGPT-Vision-7B" |
|
model = AutoModelForCausalLM.from_pretrained(model_name) |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) |
|
|
|
|
|
def generate_response(user_input): |
|
messages = [{"role": "user", "content": user_input}] |
|
response = pipe(messages)[0]['generated_text'] |
|
return response |
|
|
|
|
|
iface = gr.Interface( |
|
fn=generate_response, |
|
inputs="text", |
|
outputs="text", |
|
title="HuatuoGPT-Vision-7B", |
|
description="A text generation model powered by HuatuoGPT-Vision-7B. Ask anything!", |
|
) |
|
|
|
|
|
iface.launch() |
|
|