abhicodes's picture
Upload 3 files
8c9e75a verified
raw
history blame
No virus
8.92 kB
import math
import os
from io import BytesIO
import gradio as gr
import cv2
import requests
from pydub import AudioSegment
from faster_whisper import WhisperModel
theme = gr.themes.Base(
primary_hue="cyan",
secondary_hue="blue",
neutral_hue="slate",
)
model = WhisperModel("small", device="cpu", compute_type="int8")
API_KEY = os.getenv("API_KEY")
FACE_API_URL = "https://api-inference.huggingface.co/models/dima806/facial_emotions_image_detection"
TEXT_API_URL = "https://api-inference.huggingface.co/models/SamLowe/roberta-base-go_emotions"
headers = {"Authorization": "Bearer " + API_KEY + ""}
result = []
def extract_frames(video_path):
cap = cv2.VideoCapture(video_path)
fps = int(cap.get(cv2.CAP_PROP_FPS))
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
interval = fps
images = []
for i in range(0, total_frames, interval):
cap.set(cv2.CAP_PROP_POS_FRAMES, i)
ret, frame = cap.read()
if ret:
_, img_encoded = cv2.imencode('.jpg', frame)
img_bytes = img_encoded.tobytes()
response = requests.post(FACE_API_URL, headers=headers, data=img_bytes)
temp = {item['label']: item['score'] for item in response.json()}
result.append(temp)
images.append((cv2.cvtColor(frame, cv2.COLOR_BGR2RGB), f"Sentiments: {temp}"))
print("Frame extraction completed.")
cap.release()
return images, result
def analyze_sentiment(text):
response = requests.post(TEXT_API_URL, headers=headers, json=text)
sentiment_list = response.json()[0]
sentiment_results = {results['label']: results['score'] for results in sentiment_list}
return sentiment_results
def video_to_audio(input_video):
cap = cv2.VideoCapture(input_video)
fps = int(cap.get(cv2.CAP_PROP_FPS))
audio = AudioSegment.from_file(input_video)
audio_binary = audio.export(format="wav").read()
audio_bytesio = BytesIO(audio_binary)
segments, info = model.transcribe(audio_bytesio, beam_size=5)
print("Detected language '%s' with probability %f" % (info.language, info.language_probability))
frames_images, frames_sentiments = extract_frames(input_video)
transcript = ''
audio_divide_sentiment = ''
video_sentiment_markdown = ''
video_sentiment_final = []
final_output = []
for segment in segments:
transcript = transcript + segment.text + " "
transcript_segment_sentiment = analyze_sentiment(segment.text)
audio_divide_sentiment += "[%.2fs -> %.2fs] %s : %s`\`" % (segment.start, segment.end, segment.text, transcript_segment_sentiment)
emotion_totals = {
'admiration': 0.0,
'amusement': 0.0,
'angry': 0.0,
'annoyance': 0.0,
'approval': 0.0,
'caring': 0.0,
'confusion': 0.0,
'curiosity': 0.0,
'desire': 0.0,
'disappointment': 0.0,
'disapproval': 0.0,
'disgust': 0.0,
'embarrassment': 0.0,
'excitement': 0.0,
'fear': 0.0,
'gratitude': 0.0,
'grief': 0.0,
'happy': 0.0,
'love': 0.0,
'nervousness': 0.0,
'optimism': 0.0,
'pride': 0.0,
'realization': 0.0,
'relief': 0.0,
'remorse': 0.0,
'sad': 0.0,
'surprise': 0.0,
'neutral': 0.0
}
counter = 0
for i in range(math.ceil(segment.start), math.floor(segment.end)):
for emotion in frames_sentiments[i].keys():
emotion_totals[emotion] += frames_sentiments[i].get(emotion)
counter += 1
for emotion in emotion_totals:
emotion_totals[emotion] /= counter
video_sentiment_final.append(emotion_totals)
video_segment_sentiment = {key: value for key, value in emotion_totals.items() if value != 0.0}
video_sentiment_markdown += f"Frame {fps*math.ceil(segment.start)} - Frame {fps*math.floor(segment.end)} : {video_segment_sentiment}`\`"
segment_finals = {segment.id: (segment.text, segment.start, segment.end, transcript_segment_sentiment, video_segment_sentiment)}
final_output.append(segment_finals)
total_transcript_sentiment = {key: value for key, value in analyze_sentiment(transcript).items() if value >= 0.01}
emotion_finals = {
'admiration': 0.0,
'amusement': 0.0,
'angry': 0.0,
'annoyance': 0.0,
'approval': 0.0,
'caring': 0.0,
'confusion': 0.0,
'curiosity': 0.0,
'desire': 0.0,
'disappointment': 0.0,
'disapproval': 0.0,
'disgust': 0.0,
'embarrassment': 0.0,
'excitement': 0.0,
'fear': 0.0,
'gratitude': 0.0,
'grief': 0.0,
'happy': 0.0,
'love': 0.0,
'nervousness': 0.0,
'optimism': 0.0,
'pride': 0.0,
'realization': 0.0,
'relief': 0.0,
'remorse': 0.0,
'sad': 0.0,
'surprise': 0.0,
'neutral': 0.0
}
for i in range(0, video_sentiment_final.__len__()-1):
for emotion in video_sentiment_final[i].keys():
emotion_finals[emotion] += video_sentiment_final[i].get(emotion)
for emotion in emotion_finals:
emotion_finals[emotion] /= video_sentiment_final.__len__()
emotion_finals = {key: value for key, value in emotion_finals.items() if value != 0.0}
print("Processing Completed!!")
return str(final_output), frames_images, total_transcript_sentiment, audio_divide_sentiment, video_sentiment_markdown, emotion_finals
with gr.Blocks(theme=theme, css=".gradio-container { background: rgba(255, 255, 255, 0.2) !important; box-shadow: 0 8px 32px 0 rgba( 31, 38, 135, 0.37 ) !important; backdrop-filter: blur( 10px ) !important; -webkit-backdrop-filter: blur( 10px ) !important; border-radius: 10px !important; border: 1px solid rgba( 0, 0, 0, 0.5 ) !important;}") as Video:
with gr.Column():
gr.Markdown("""# Cross Model Machine Learning Model""")
with gr.Row():
gr.Markdown("""
### πŸ€– A cross-model ML model for video processing in healthcare sentiment analysis involves combining different machine learning models to analyze sentiments expressed in healthcare-related videos.
- Facial Expression Recognition Model [Google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) 😊😒😰
- Speech Recognition Model [OpenAI/Whisper](https://github.com/openai/whisper) πŸ—£οΈπŸŽ€
- Text Analysis Model [RoBERTa-base-go-emotions](https://huggingface.co/SamLowe/roberta-base-go_emotions) πŸ“πŸ“œ
- Contextual Understanding Model (Sentiment Analysis) πŸ”„πŸŒ
""")
gr.Markdown("""### By combining the outputs of these models, the cross-model approach aims to capture a more comprehensive view of the sentiment within the healthcare-related video. This way, healthcare providers can gain insights into patient experiences and emotions, facilitating better understanding and improvements in healthcare services. πŸ‘©β€βš•οΈπŸ“ˆπŸ‘¨β€βš•οΈ """)
with gr.Row():
with gr.Column():
input_video = gr.Video(sources=["upload", "webcam"])
button = gr.Button("Process", variant="primary")
gr.Examples(inputs=input_video, examples=[os.path.join(os.path.dirname(__file__), "test_video_1.mp4")])
with gr.Row():
overall_score = gr.Label(label="Overall Score")
video_sentiment_final = gr.Label(label="Video Sentiment Score")
with gr.Column():
frames_gallery = gr.Gallery(label="Video Frames", show_label=True, elem_id="gallery", columns=[3], rows=[1], object_fit="contain", height="auto")
with gr.Accordion(label="JSON detailed Responses", open=False):
json_output = gr.Textbox(label="JSON Output", info="Overall scores of the above video in segments.", show_label=True, lines=5, show_copy_button=True, interactive=False)
audio_sentiment = gr.Textbox(label="Audio Sentiments", info="Outputs of Audio Processing from the video.", show_label=True, lines=5, show_copy_button=True, interactive=False)
video_sentiment_markdown = gr.Textbox(label="Video Sentiments", info="Outputs of Video Frames processing from the video.", show_label=True, lines=5, show_copy_button=True, interactive=False)
button.click(
fn=video_to_audio,
inputs=input_video,
outputs=[json_output, frames_gallery, overall_score, audio_sentiment, video_sentiment_markdown, video_sentiment_final]
)
Video.launch()