File size: 3,569 Bytes
455359d
 
 
c89eb2d
 
 
 
 
 
 
 
455359d
 
c89eb2d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
455359d
c89eb2d
 
 
 
 
 
 
 
 
 
 
 
 
455359d
 
c89eb2d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
455359d
c89eb2d
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
import os
os.system("pip install git+https://github.com/openai/whisper.git")


import gradio as gr
import whisper
import io
import os
import numpy as np
from datetime import datetime

import assets

def sendToWhisper(audio_record, audio_upload, task, models_selected, language_toggle, language_selected, without_timestamps):
    results = []

    audio = None
    if audio_record is not None:
        audio = audio_record
    elif audio_upload is not None:
        audio = audio_upload
    else:
        return [["Invalid input"]*5]

    audio = whisper.load_audio(audio)
    audio = whisper.pad_or_trim(audio)

    for model_name in models_selected:
        start = datetime.now()
        model = whisper.load_model(model_name)
        mel = whisper.log_mel_spectrogram(audio).to(model.device)
        options = whisper.DecodingOptions(fp16 = False, without_timestamps=without_timestamps, task=task)
        if language_toggle:
            options = whisper.DecodingOptions(fp16 = False, without_timestamps=without_timestamps, task=task, language=language_selected)
        language = ""
        prob = 0
        if model_name in assets.lang_detect:
            _, probs = model.detect_language(mel)
            language = max(probs, key=probs.get)
            prob = probs[language]
        else:
            language="en"
            options = whisper.DecodingOptions(fp16 = False, without_timestamps=without_timestamps, task=task, language="en")
        output_text = whisper.decode(model, mel, options)
        results.append([model_name, output_text.text, language, str(prob), str((datetime.now() - start).total_seconds())])
    return results

avail_models = whisper.available_models()



with gr.Blocks(css=assets.css) as demo:
    gr.Markdown("This is a demo to use Open AI's Speech to Text (ASR) Model: Whisper. Learn more about the models here on [Github](https://github.com/openai/whisper/search?q=DecodingOptions&type=) FYI: The larger models take a lot longer to transcribe the text :)")
    gr.Markdown("Here are sample audio files to try out: [Sample Audio](https://drive.google.com/drive/folders/1qYek06ZVeKr9f5Jf35eqi-9CnjNIp98u?usp=sharing)")
    gr.Markdown("Built by:[@davidtsong](https://twitter.com/davidtsong)")
    
    # with gr.Row():
    with gr.Column():

        # with gr.Column():
        gr.Markdown("## Input")

        with gr.Row():
            audio_record = gr.Audio(source="microphone", label="Audio to transcribe", type="filepath",elem_id="audio_inputs")
            audio_upload = gr.Audio(source="upload", type="filepath", interactive=True,elem_id="audio_inputs")

        models_selected = gr.CheckboxGroup(avail_models, label="Models to use")
        with gr.Accordion("Settings", open=False):
            task = gr.Dropdown(["transcribe", "translate"], label="Task", value="transcribe")
            language_toggle = gr.Dropdown(["Automatic", "Manual"], label="Language Selection", value="Automatic")
            language_selected = gr.Dropdown(list(assets.LANGUAGES.keys()), label="Language")
            without_timestamps = gr.Checkbox(label="Without timestamps",value=True)
        submit = gr.Button(label="Run")
        
    # with gr.Row():
        # with gr.Column():
        gr.Markdown("## Output")
        output = gr.Dataframe(headers=["Model", "Text", "Language", "Language Confidence","Time(s)"], label="Results", wrap=True)

    submit.click(fn=sendToWhisper, inputs=[audio_record, audio_upload, task, models_selected, language_toggle, language_selected, without_timestamps], outputs=output)

demo.launch()