import gradio as gr import torch import pytube as pt from transformers import pipeline from huggingface_hub import model_info transcribe_model_ckpt = "openai/whisper-small" lang = "en" transcribe_pipe = pipeline( task="automatic-speech-recognition", model=model_ckpt, chunk_length_s=30, ) transcribe_pipe.model.config.forced_decoder_ids = transcribe_pipe.tokenizer.get_decoder_prompt_ids(language=lang, task="transcribe") def yt_transcribe(yt_url): yt = pt.YouTube(yt_url) html_embed_str = _return_yt_html_embed(yt_url) stream = yt.streams.filter(only_audio=True)[0] stream.download(filename="audio.mp3") text = transcribe_pipe("audio.mp3")["text"] return html_embed_str, text qa_model_ckpt = "deepset/tinyroberta-squad2" qa_pipe = pipeline('question-answering', model=model_ckpt, tokenizer=model_ckpt) def get_answer(query,context): QA_input = { 'question': query, 'context': context } res = nlp(QA_input)["answer"] return res def update(name): return f"Welcome to Gradio, {name}!" with gr.Blocks() as demo: gr.Markdown("Start typing below and then click **Run** to see the output.") with gr.Row(): inp = gr.Textbox(placeholder="What is your name?") out = gr.Textbox() btn = gr.Button("Run") btn.click(fn=update, inputs=inp, outputs=out) demo.launch()