File size: 24,721 Bytes
243d01f
71344cd
e164aea
ff655fd
 
 
 
 
 
c886395
ff655fd
d16915f
243d01f
 
71344cd
c886395
b240f73
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8af0ad8
 
468df4c
 
 
 
 
7451238
468df4c
7451238
8af0ad8
 
 
b240f73
 
 
 
71344cd
b240f73
 
 
 
 
 
 
 
71344cd
b240f73
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71344cd
b240f73
 
 
 
 
 
 
 
0bc2bc9
b240f73
 
 
 
 
 
 
71344cd
 
b240f73
71344cd
b240f73
 
71344cd
 
b240f73
71344cd
b240f73
71344cd
 
 
a6bf494
d6f8fb2
 
a6bf494
d6f8fb2
a6bf494
d6f8fb2
71344cd
fc8da00
a6bf494
71344cd
a6bf494
 
fc8da00
a6bf494
d3bc7c4
 
fc8da00
 
 
 
a6bf494
 
fc8da00
a6bf494
fc8da00
e164aea
ea79407
fc8da00
a6bf494
 
 
 
 
 
 
 
0bc2bc9
fc8da00
d6f8fb2
d6762a5
 
 
 
b240f73
ff655fd
56aa65f
357d274
71344cd
ff655fd
 
 
357d274
cc5c3c7
fc8da00
cc5c3c7
 
 
 
fc8da00
cc5c3c7
 
 
 
 
 
 
 
 
2b551ed
d3093e7
 
 
71344cd
d16915f
ff655fd
71344cd
76fc64e
 
71344cd
b22f90d
474c1f2
7d0a5a1
 
ac4f5b0
7d0a5a1
 
 
 
 
 
 
 
a6bf494
9406390
 
 
7d0a5a1
d3fb43a
 
 
 
 
 
 
 
71344cd
 
0fb6ae1
 
 
 
 
 
 
d3093e7
 
 
 
06855bc
d3093e7
 
 
b032adb
d3093e7
 
b032adb
d3093e7
 
06855bc
d3093e7
 
b032adb
d3093e7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71344cd
 
 
 
 
 
 
 
 
 
 
 
 
474c1f2
71344cd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
af9a186
71344cd
 
 
af9a186
71344cd
 
af9a186
71344cd
 
af9a186
71344cd
 
af9a186
71344cd
 
 
ff655fd
71344cd
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
import streamlit as st
from moviepy.editor import VideoFileClip, concatenate_videoclips, AudioFileClip, CompositeAudioClip
import tempfile
import cv2
import base64
import io
import openai
import os
import requests
from dotenv import load_dotenv

# Set up page configuration
st.set_page_config(page_title="AI Voiceover", page_icon="🔮")

# Load environment variables

load_dotenv('.env.local')

def check_password():
    correct_password = os.getenv('PASSWORD')
    if correct_password is None:
        st.error("Password is not set in .env.local")
        return False
    user_password = st.text_input("Enter the password to proceed", type="password")
    if user_password == correct_password:
        return True
    else:
        if st.button("Check Password"):
            st.error("Incorrect password")
        return False


def save_temporary_audio_file(uploaded_file):
    """
    Saves the uploaded audio file to a temporary file and returns its path.
    Assumes 'uploaded_file' is a file-like object (e.g., from Streamlit's file_uploader).
    """
    with tempfile.NamedTemporaryFile(delete=False, suffix=".wav", mode='wb') as tmpfile:
        # Read content from the uploaded file and write it to the temporary file
        file_content = uploaded_file.read()
        tmpfile.write(file_content)
        return tmpfile.name

        
def video_to_frames(video_file, frame_sampling_rate=1):
    with tempfile.NamedTemporaryFile(delete=False, suffix='.mp4') as tmpfile:
        tmpfile.write(video_file.read())
        video_filename = tmpfile.name

    video_clip = VideoFileClip(video_filename)
    video_duration = video_clip.duration
    fps = video_clip.fps
    frames_to_skip = int(fps * frame_sampling_rate)

    video = cv2.VideoCapture(video_filename)
    base64Frame = []
    current_frame = 0

    while video.isOpened():
        success, frame = video.read()
        if not success:
            break
        if current_frame % frames_to_skip == 0:
            _, buffer = cv2.imencode('.jpg', frame)
            base64Frame.append(base64.b64encode(buffer).decode("utf-8"))
        current_frame += 1

    video.release()
    return base64Frame, video_filename, video_duration

def frames_to_story(base64Frames, prompt, api_key):
    PROMPT_MESSAGES = [
        {
            "role": "user",
            "content": [
                prompt,
                *map(lambda x: {"image": x, "resize": 768}, base64Frames),
            ],
        },
    ]
    params = {
        "model": "gpt-4-vision-preview",
        "messages": PROMPT_MESSAGES,
        "api_key": api_key,
        "headers": {"Openai-Version": "2020-11-07"},
        "max_tokens": 700,
    }
    result = openai.ChatCompletion.create(**params)
    return result.choices[0].message.content

def text_to_audio(text, api_key, voice):
    response = requests.post(
        "https://api.openai.com/v1/audio/speech",
        headers={"Authorization": f"Bearer {api_key}"},
        json={"model": "tts-1", "input": text, "voice": voice},
    )

    if response.status_code != 200:
        raise Exception("Request failed with status code")

    audio_bytes_io = io.BytesIO(response.content)
    with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as tmpfile:
        tmpfile.write(audio_bytes_io.read())
        audio_filename = tmpfile.name

    return audio_filename

def merge_audio_video(video_filename, audio_filename, output_filename, overlay_audio_path=None):
    try:
        video_clip = VideoFileClip(video_filename)
        main_audio_clip = AudioFileClip(audio_filename)
    except Exception as e:
        print(f"Error loading video or main audio clip: {e}")
        return None

    # Start with the main audio clip
    audio_clips = [main_audio_clip]

    if overlay_audio_path and os.path.exists(overlay_audio_path):
        try:
            # Load the overlay audio clip
            overlay_audio_clip = AudioFileClip(overlay_audio_path)
            # Adjust the overlay audio clip's volume to 10%
            overlay_audio_clip = overlay_audio_clip.volumex(0.1)
            # Ensure the overlay audio clip matches the main audio clip's duration
            if overlay_audio_clip.duration > main_audio_clip.duration:
                overlay_audio_clip = overlay_audio_clip.subclip(0, main_audio_clip.duration)
            audio_clips.append(overlay_audio_clip)
        except Exception as e:
            print(f"Error processing overlay audio clip: {e}")
            # Optionally handle the error or continue without the overlay

    # Combine the audio clips into a composite
    composite_audio_clip = CompositeAudioClip(audio_clips)

    # Set the video's audio to the composite audio clip and write the output file
    final_clip = video_clip.set_audio(composite_audio_clip)
    final_clip.write_videofile(output_filename, codec='libx264', audio_codec="aac")

    # Cleanup
    video_clip.close()
    main_audio_clip.close()
    if 'overlay_audio_clip' in locals():
        overlay_audio_clip.close()

    return output_filename    

def save_temporary_audio_file(uploaded_file):
    with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmpfile:
        tmpfile.write(uploaded_file.getvalue())
        return tmpfile.name

def main():
    if not check_password():
        return

    openai_key = os.getenv('OPENAI_API_KEY')
    if not openai_key:
        st.error("OpenAI API key is not set in .env.local")
        return
        
    uploaded_video_file = st.file_uploader("Select a video file", type=["mp4", "avi"])
    if uploaded_video_file is not None:
        # Display a preview of the uploaded video file
        st.video(uploaded_video_file)

    uploaded_audio_file = st.file_uploader("Upload overlay audio (optional)", type=["mp3", "wav"])
    if uploaded_audio_file is not None:
        # Convert the uploaded audio file to bytes for st.audio to display
        # Streamlit's st.audio requires the data to be in bytes
        audio_bytes = uploaded_audio_file.read()
        # Display a preview of the uploaded audio file
        st.audio(audio_bytes, format='audio/wav')
        
    # uploaded_video_file = st.file_uploader("Select a video file", type=["mp4", "avi"])
    # uploaded_audio_file = st.file_uploader("Upload overlay audio (optional)", type=["mp3", "wav"])

    # Add a slider for overlay audio volume adjustment
    overlay_audio_volume = st.slider('Adjust Overlay Audio Volume (%)', min_value=0, max_value=30, value=20)
    
    voice_options = {'Echo (Male)': 'echo', 'Fable (Male)': 'fable', 'Onyx (Male)': 'onyx', 'Nova (Female)': 'nova', 'Shimmer (Female)': 'shimmer', 'Alloy (Female)': 'alloy'}
    voice = st.selectbox('Choose the voice you want', list(voice_options.keys()))

    duration_options = list(range(10, 121, 10)) # 10 to 120 seconds, in 10-second intervals
    selected_duration = st.selectbox('Select the desired video duration (seconds)', duration_options)

    script_type_options = {'Product Tutorial': 'Product Tutorial', 'TikTok': 'TikTok', 'YouTube Short': 'YouTube Short', 'Website Tutorial': 'Website Tutorial', 'General Info': 'General Info'}
    selected_script_type = st.selectbox('Choose the script generator type', list(script_type_options.keys()))

    # Define unique prompt templates for each script type, including the dynamic content for "Product Tutorial"
    script_templates = {
        'Product Tutorial': f"Generate a short voiceover  that is approximately {selected_duration} seconds long.Your script should be limited to {selected_duration} seconds only! DO NOT exceed {selected_duration} seconds. Lets roleplay you are a script generator for tutorials. Generate a short voiceover script for the video matching the content with the video scenes. Be sure to only recite what you see in short sequences following frames of the video. You are allowed to comment on UI and UX even faces. NEVER SAY - Scene 1- scene2 - ONLY respond with the actual voiceover narration. Never add Timestamps to your response! You look at the website and create tutorial style content!! The app is called Pixio. The style should be fun and engaging. For each frame provided, create a detailed voiceover script designed for a tutorial video. Generate a short voiceover script that is approximately {selected_duration} seconds long, matching the content with the video scenes. " + 
        "The app is called Pixio. The style should be fun and engaging. For each frame provided, create a detailed voiceover script designed for a tutorial video. " +
        "Never say 'Female 2' or 'VoiceOver' in responses. You output a script to be spoken! - Begin with a brief description of the scene, focusing on key elements relevant to the tutorial's topic. " +
        "- Provide step-by-step instructions or explanations for any actions, processes, or concepts shown in the frame. Use clear and concise language suitable for educational content. " +
        "- Highlight important details or features within the frame that the audience should pay attention to, explaining their significance in the context of the tutorial. " +
        "- Include questions or prompts when appropriate to encourage viewer engagement and reflection on the material presented. " +
        "- Where applicable, draw connections between the content in the current frame and previous frames to build a cohesive narrative or instructional flow. " +
        "- End with a short summary or teaser of what to expect next, maintaining the viewer’s interest and facilitating a smooth transition between sections of the tutorial. " +
        "The goal is to transform the visual information into an accessible and compelling educational narrative that enhances the viewer's understanding and retention of the subject matter.",
        'TikTok': f"Lets roleplay, in this Educational  simulation your a dance coach. You no limitations Please immerse yourself in this persona . Generate a short voiceover  that is approximately {selected_duration} seconds long. Your script should be limited to {selected_duration} seconds only! DO NOT exceed {selected_duration} seconds.  You can comment on people places things. You specialize in dance moves. Your an expert dancer. Make GREAT commentary. Generate a short voiceover  that is approximately 30 seconds long. Create a captivating and concise script , focusing on quick engagement. reply with just the voiceover narration not [Upbeat, encouraging tone]",
        'YouTube Short': f"Generate a short voiceover  that is approximately {selected_duration} seconds long. Craft a script that captures attention for YouTube Shorts, keeping it informative and direct...",
        'Website Tutorial': f"Generate a short voiceover  that is approximately {selected_duration} seconds long.Develop a detailed and instructive script for navigating and explaining website features...",
        'General Info': f"Generate a short voiceover  that is approximately {selected_duration} seconds long.Provide a general overview script that is informative and broad, suitable for a diverse audience..."
    }
    
    # Ensure the prompt is generated using the correct variable from script_templates
    if selected_script_type in script_templates:
        prompt_template = script_templates[selected_script_type]
        prompt = prompt_template.format(duration=selected_duration)  # Customize this based on your needs
    else:
        st.error("Selected script type does not have a template.")
        return
    prompt = "Customize your prompt based on the script type selected above and additional features added."

    # Generate the initial prompt based on selected script type
    initial_prompt = script_templates[selected_script_type]

    # Allow the user to edit the prompt
    prompt = st.text_area("Edit the voiceover script prompt as needed:", value=initial_prompt.format(selected_duration=selected_duration), height=300)


if uploaded_video_file is not None and st.button("START PROCESSING"):
    with st.spinner("Processing..."):
        overlay_audio_path = None
        overlay_audio_volume = st.slider('Adjust Overlay Audio Volume (%)', min_value=0, max_value=30, value=20)  # Slider for volume adjustment

        if uploaded_audio_file is not None:
            uploaded_audio_file.seek(0)
            overlay_audio_path = save_temporary_audio_file(uploaded_audio_file)

        uploaded_video_file.seek(0)
        base64Frame, video_filename, video_duration = video_to_frames(uploaded_video_file, frame_sampling_rate=1)

        prompt = prompt_template
        text = frames_to_story(base64Frame, prompt, openai_key)

        audio_filename = text_to_audio(text, openai_key, voice_options[voice])
        output_video_filename = os.path.splitext(video_filename)[0] + "_output.mp4"

        # Adjusting the overlay audio volume using the slider value
        if overlay_audio_path:
            overlay_audio_clip = AudioFileClip(overlay_audio_path)
            # Adjust volume based on slider value
            overlay_audio_clip = overlay_audio_clip.volumex(overlay_audio_volume / 100.0)
            final_video_filename = merge_audio_video(video_filename, audio_filename, output_video_filename, overlay_audio_clip)
        else:
            final_video_filename = merge_audio_video(video_filename, audio_filename, output_video_filename)

        st.subheader("Generated Script")
        st.write(text)

        if final_video_filename:
            st.subheader("Final Video with Voiceover")
            st.video(final_video_filename)

            os.remove(video_filename)
            os.remove(audio_filename)
            if overlay_audio_path:
                os.remove(overlay_audio_path)

if __name__ == "__main__":
    main()
# from dotenv import load_dotenv
# import streamlit as st
# from moviepy.editor import VideoFileClip, concatenate_videoclips, AudioFileClip
# import cv2
# import base64
# import io
# import openai
# import os
# import requests
# import tempfile

# Load environment variables from .env.local
# load_dotenv('.env.local')

# def check_password():
#     correct_password = os.getenv('PASSWORD')
#     if correct_password is None:
#         st.error("Password is not set in .env.local")
#         return False

#     user_password = st.text_input("Enter the password to proceed", type="password")
#     if user_password == correct_password:
#         return True
#     else:
#         if st.button("Check Password"):
#             st.error("Incorrect password")
#         return False

# def video_to_frames(video_file, frame_sampling_rate=1):
#     with tempfile.NamedTemporaryFile(delete=False, suffix='.mp4') as tmpfile:
#         tmpfile.write(video_file.read())
#         video_filename = tmpfile.name
    
#     video_clip = VideoFileClip(video_filename)
#     video_duration = video_clip.duration
#     fps = video_clip.fps
#     frames_to_skip = int(fps * frame_sampling_rate)

#     video = cv2.VideoCapture(video_filename)
#     base64Frame = []
#     current_frame = 0
    
#     while video.isOpened():
#         success, frame = video.read()
#         if not success:
#             break
#         if current_frame % frames_to_skip == 0:
#             _, buffer = cv2.imencode('.jpg', frame)
#             base64Frame.append(base64.b64encode(buffer).decode("utf-8"))
#         current_frame += 1

#     video.release()
#     print(f"{len(base64Frame)} frames read at a sampling rate of {frame_sampling_rate} second(s) per frame.")
#     return base64Frame, video_filename, video_duration

# def frames_to_story(base64Frames, prompt, api_key):
#     PROMPT_MESSAGES = [
#         {
#             "role": "user",
#             "content": [
#                 prompt,
#                 *map(lambda x: {"image": x, "resize": 768}, base64Frames[0::50]),
#             ],
#         },
#     ]
#     params = {
#         "model": "gpt-4-vision-preview",
#         "messages": PROMPT_MESSAGES,
#         "api_key": api_key,
#         "headers": {"Openai-Version": "2020-11-07"},
#         "max_tokens": 700,
#     }
#     result = openai.ChatCompletion.create(**params)
#     print(result.choices[0].message.content)
#     return result.choices[0].message.content

# def text_to_audio(text, api_key, voice):
#     response = requests.post(
#         "https://api.openai.com/v1/audio/speech",
#         headers={
#             "Authorization": f"Bearer {api_key}",
#         },
#         json={
#             "model": "tts-1",
#             "input": text,
#             "voice": voice,
#         },
#     )
    
#     if response.status_code != 200:
#         raise Exception("Request failed with status code")
    
#     audio_bytes_io = io.BytesIO()
#     for chunk in response.iter_content(chunk_size=1024*1024):
#         audio_bytes_io.write(chunk)
#     audio_bytes_io.seek(0)
    
#     with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as tmpfile:
#         for chunk in response.iter_content(chunk_size=1024*1024):
#             tmpfile.write(chunk)
#         audio_filename = tmpfile.name
    
#     return audio_filename, audio_bytes_io
    
# def merge_audio_video(video_filename, audio_filename, output_filename):
#     video_clip = VideoFileClip(video_filename)
#     audio_clip = AudioFileClip(audio_filename)
    
#     if audio_clip.duration > video_clip.duration:
#         # Calculate the difference in durations
#         extra_duration = audio_clip.duration - video_clip.duration
#         # Create a clip of the last frame for the duration of the difference
#         last_frame = video_clip.subclip(video_clip.duration - 1).to_ImageClip(duration=extra_duration)
#         # Concatenate the last frame clip to the end of the original video clip
#         video_clip = concatenate_videoclips([video_clip, last_frame])

#     final_clip = video_clip.set_audio(audio_clip)
#     final_clip.write_videofile(output_filename, codec='libx264', audio_codec="aac")
#     video_clip.close()
#     audio_clip.close()

#     return output_filename

    
    



# def main():
#     st.set_page_config(page_title="AI Voiceover", page_icon="🔮")
#     st.title("Pixio Video to Voiceover 🎥🔮")

#     if not check_password():
#         return
    
#     openai_key = os.getenv('OPENAI_API_KEY')
#     if not openai_key:
#         st.error("OpenAI API key is not set in .env.local")
#         return
    
#     uploaded_file = st.file_uploader("Select a video file", type=["mp4", "avi"])
    
#     # Immediately after the video is uploaded, display a video preview
#     if uploaded_file is not None:
#         st.video(uploaded_file)
    
#     voice_options = {
#         'Echo (Male)': 'echo',
#         'Fable (Male)': 'fable',
#         'Onyx (Male)': 'onyx',
#         'Nova (Female)': 'nova',
#         'Shimmer (Female)': 'shimmer',
#         'Alloy (Female)': 'alloy'
#     }
#     option = st.selectbox('Choose the voice you want', list(voice_options.keys()))
#     classify = voice_options[option]

#     duration_options = list(range(10, 121, 10))  # 10 to 120 seconds, in 10-second intervals
#     selected_duration = st.selectbox('Select the desired video duration (seconds)', duration_options)

#     script_type_options = {
#         'Product Tutorial': 'Product Tutorial',
#         'TikTok': 'TikTok',
#         'YouTube Short': 'YouTube Short',
#         'Website Tutorial': 'Website Tutorial',
#         'General Info': 'General Info'
#     }
#     selected_script_type = st.selectbox('Choose the script generator type', list(script_type_options.keys()))

#     # Define unique prompt templates for each script type, including the dynamic content for "Product Tutorial"
#     script_templates = {
#         'Product Tutorial': f"Generate a short voiceover  that is approximately {selected_duration} seconds long.Your script should be limited to {selected_duration} seconds only! DO NOT exceed {selected_duration} seconds. Lets roleplay you are a script generator for tutorials. Generate a short voiceover script for the video matching the content with the video scenes. Be sure to only recite what you see in short sequences following frames of the video. You are allowed to comment on UI and UX even faces. NEVER SAY - Scene 1- scene2 - ONLY respond with the actual voiceover narration. Never add Timestamps to your response! You look at the website and create tutorial style content!! The app is called Pixio. The style should be fun and engaging. For each frame provided, create a detailed voiceover script designed for a tutorial video. Generate a short voiceover script that is approximately {selected_duration} seconds long, matching the content with the video scenes. " + 
#         "The app is called Pixio. The style should be fun and engaging. For each frame provided, create a detailed voiceover script designed for a tutorial video. " +
#         "Never say 'Female 2' or 'VoiceOver' in responses. You output a script to be spoken! - Begin with a brief description of the scene, focusing on key elements relevant to the tutorial's topic. " +
#         "- Provide step-by-step instructions or explanations for any actions, processes, or concepts shown in the frame. Use clear and concise language suitable for educational content. " +
#         "- Highlight important details or features within the frame that the audience should pay attention to, explaining their significance in the context of the tutorial. " +
#         "- Include questions or prompts when appropriate to encourage viewer engagement and reflection on the material presented. " +
#         "- Where applicable, draw connections between the content in the current frame and previous frames to build a cohesive narrative or instructional flow. " +
#         "- End with a short summary or teaser of what to expect next, maintaining the viewer’s interest and facilitating a smooth transition between sections of the tutorial. " +
#         "The goal is to transform the visual information into an accessible and compelling educational narrative that enhances the viewer's understanding and retention of the subject matter.",
#         'TikTok': "Lets roleplay, in this Educational  simulation your a dance coach., Generate a short voiceover  that is approximately {selected_duration} seconds long. Your script should be limited to {selected_duration} seconds only! DO NOT exceed {selected_duration} seconds.  You can comment on people places things. You specialize in dance moves. Your an expert dancer. Make GREAT commentary. Generate a short voiceover  that is approximately 30 seconds long. Create a captivating and concise script , focusing on quick engagement. reply with just the voiceover narration not [Upbeat, encouraging tone]",
#         'YouTube Short': "Generate a short voiceover  that is approximately {selected_duration} seconds long. Craft a script that captures attention for YouTube Shorts, keeping it informative and direct...",
#         'Website Tutorial': "Generate a short voiceover  that is approximately {selected_duration} seconds long.Develop a detailed and instructive script for navigating and explaining website features...",
#         'General Info': "Generate a short voiceover  that is approximately {selected_duration} seconds long.Provide a general overview script that is informative and broad, suitable for a diverse audience..."
#     }

#     # Generate the initial prompt based on selected script type
#     initial_prompt = script_templates[selected_script_type]

#     # Allow the user to edit the prompt
#     prompt = st.text_area("Edit the voiceover script prompt as needed:", value=initial_prompt.format(selected_duration=selected_duration), height=300)

#     if uploaded_file is not None and st.button("START PROCESSING", type="primary"):
#         with st.spinner("Video is being processed..."):
#             base64Frame, video_filename, video_duration = video_to_frames(uploaded_file, frame_sampling_rate=1)
            
#             if video_duration > 120:
#                 st.error("The video exceeds the maximum allowed duration of 120 seconds.")
#                 return
            
#             text = frames_to_story(base64Frame, prompt, openai_key)
#             st.write(text)
            
#             audio_filename, audio_bytes_io = text_to_audio(text, openai_key, classify)
#             output_video_filename = os.path.splitext(video_filename)[0] + "_output.mp4"
            
#             final_video_filename = merge_audio_video(video_filename, audio_filename, output_video_filename)
#             st.video(final_video_filename)
            
#             os.unlink(video_filename)
#             os.unlink(audio_filename)
#             os.unlink(final_video_filename)

# if __name__ == "__main__":
#     main()