abhicodes commited on
Commit
8c9e75a
β€’
1 Parent(s): f528991

Upload 3 files

Browse files
Files changed (4) hide show
  1. .gitattributes +1 -0
  2. app.py +216 -0
  3. requirements.txt +3 -0
  4. test_video_1.mp4 +3 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ test_video_1.mp4 filter=lfs diff=lfs merge=lfs -text
app.py ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import os
3
+ from io import BytesIO
4
+ import gradio as gr
5
+ import cv2
6
+ import requests
7
+ from pydub import AudioSegment
8
+ from faster_whisper import WhisperModel
9
+
10
+ theme = gr.themes.Base(
11
+ primary_hue="cyan",
12
+ secondary_hue="blue",
13
+ neutral_hue="slate",
14
+ )
15
+
16
+ model = WhisperModel("small", device="cpu", compute_type="int8")
17
+
18
+ API_KEY = os.getenv("API_KEY")
19
+
20
+ FACE_API_URL = "https://api-inference.huggingface.co/models/dima806/facial_emotions_image_detection"
21
+ TEXT_API_URL = "https://api-inference.huggingface.co/models/SamLowe/roberta-base-go_emotions"
22
+ headers = {"Authorization": "Bearer " + API_KEY + ""}
23
+
24
+ result = []
25
+
26
+
27
+ def extract_frames(video_path):
28
+ cap = cv2.VideoCapture(video_path)
29
+ fps = int(cap.get(cv2.CAP_PROP_FPS))
30
+ total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
31
+ interval = fps
32
+
33
+ images = []
34
+
35
+ for i in range(0, total_frames, interval):
36
+ cap.set(cv2.CAP_PROP_POS_FRAMES, i)
37
+ ret, frame = cap.read()
38
+ if ret:
39
+ _, img_encoded = cv2.imencode('.jpg', frame)
40
+ img_bytes = img_encoded.tobytes()
41
+
42
+ response = requests.post(FACE_API_URL, headers=headers, data=img_bytes)
43
+ temp = {item['label']: item['score'] for item in response.json()}
44
+ result.append(temp)
45
+
46
+ images.append((cv2.cvtColor(frame, cv2.COLOR_BGR2RGB), f"Sentiments: {temp}"))
47
+
48
+ print("Frame extraction completed.")
49
+
50
+ cap.release()
51
+ return images, result
52
+
53
+
54
+ def analyze_sentiment(text):
55
+ response = requests.post(TEXT_API_URL, headers=headers, json=text)
56
+ sentiment_list = response.json()[0]
57
+ sentiment_results = {results['label']: results['score'] for results in sentiment_list}
58
+ return sentiment_results
59
+
60
+
61
+ def video_to_audio(input_video):
62
+ cap = cv2.VideoCapture(input_video)
63
+ fps = int(cap.get(cv2.CAP_PROP_FPS))
64
+ audio = AudioSegment.from_file(input_video)
65
+ audio_binary = audio.export(format="wav").read()
66
+ audio_bytesio = BytesIO(audio_binary)
67
+
68
+ segments, info = model.transcribe(audio_bytesio, beam_size=5)
69
+
70
+ print("Detected language '%s' with probability %f" % (info.language, info.language_probability))
71
+
72
+ frames_images, frames_sentiments = extract_frames(input_video)
73
+
74
+ transcript = ''
75
+ audio_divide_sentiment = ''
76
+ video_sentiment_markdown = ''
77
+ video_sentiment_final = []
78
+ final_output = []
79
+
80
+ for segment in segments:
81
+ transcript = transcript + segment.text + " "
82
+ transcript_segment_sentiment = analyze_sentiment(segment.text)
83
+ audio_divide_sentiment += "[%.2fs -> %.2fs] %s : %s`\`" % (segment.start, segment.end, segment.text, transcript_segment_sentiment)
84
+
85
+ emotion_totals = {
86
+ 'admiration': 0.0,
87
+ 'amusement': 0.0,
88
+ 'angry': 0.0,
89
+ 'annoyance': 0.0,
90
+ 'approval': 0.0,
91
+ 'caring': 0.0,
92
+ 'confusion': 0.0,
93
+ 'curiosity': 0.0,
94
+ 'desire': 0.0,
95
+ 'disappointment': 0.0,
96
+ 'disapproval': 0.0,
97
+ 'disgust': 0.0,
98
+ 'embarrassment': 0.0,
99
+ 'excitement': 0.0,
100
+ 'fear': 0.0,
101
+ 'gratitude': 0.0,
102
+ 'grief': 0.0,
103
+ 'happy': 0.0,
104
+ 'love': 0.0,
105
+ 'nervousness': 0.0,
106
+ 'optimism': 0.0,
107
+ 'pride': 0.0,
108
+ 'realization': 0.0,
109
+ 'relief': 0.0,
110
+ 'remorse': 0.0,
111
+ 'sad': 0.0,
112
+ 'surprise': 0.0,
113
+ 'neutral': 0.0
114
+ }
115
+
116
+ counter = 0
117
+ for i in range(math.ceil(segment.start), math.floor(segment.end)):
118
+ for emotion in frames_sentiments[i].keys():
119
+ emotion_totals[emotion] += frames_sentiments[i].get(emotion)
120
+ counter += 1
121
+
122
+ for emotion in emotion_totals:
123
+ emotion_totals[emotion] /= counter
124
+
125
+ video_sentiment_final.append(emotion_totals)
126
+
127
+ video_segment_sentiment = {key: value for key, value in emotion_totals.items() if value != 0.0}
128
+
129
+ video_sentiment_markdown += f"Frame {fps*math.ceil(segment.start)} - Frame {fps*math.floor(segment.end)} : {video_segment_sentiment}`\`"
130
+
131
+ segment_finals = {segment.id: (segment.text, segment.start, segment.end, transcript_segment_sentiment, video_segment_sentiment)}
132
+ final_output.append(segment_finals)
133
+
134
+ total_transcript_sentiment = {key: value for key, value in analyze_sentiment(transcript).items() if value >= 0.01}
135
+
136
+ emotion_finals = {
137
+ 'admiration': 0.0,
138
+ 'amusement': 0.0,
139
+ 'angry': 0.0,
140
+ 'annoyance': 0.0,
141
+ 'approval': 0.0,
142
+ 'caring': 0.0,
143
+ 'confusion': 0.0,
144
+ 'curiosity': 0.0,
145
+ 'desire': 0.0,
146
+ 'disappointment': 0.0,
147
+ 'disapproval': 0.0,
148
+ 'disgust': 0.0,
149
+ 'embarrassment': 0.0,
150
+ 'excitement': 0.0,
151
+ 'fear': 0.0,
152
+ 'gratitude': 0.0,
153
+ 'grief': 0.0,
154
+ 'happy': 0.0,
155
+ 'love': 0.0,
156
+ 'nervousness': 0.0,
157
+ 'optimism': 0.0,
158
+ 'pride': 0.0,
159
+ 'realization': 0.0,
160
+ 'relief': 0.0,
161
+ 'remorse': 0.0,
162
+ 'sad': 0.0,
163
+ 'surprise': 0.0,
164
+ 'neutral': 0.0
165
+ }
166
+
167
+ for i in range(0, video_sentiment_final.__len__()-1):
168
+ for emotion in video_sentiment_final[i].keys():
169
+ emotion_finals[emotion] += video_sentiment_final[i].get(emotion)
170
+
171
+ for emotion in emotion_finals:
172
+ emotion_finals[emotion] /= video_sentiment_final.__len__()
173
+
174
+ emotion_finals = {key: value for key, value in emotion_finals.items() if value != 0.0}
175
+
176
+ print("Processing Completed!!")
177
+
178
+ return str(final_output), frames_images, total_transcript_sentiment, audio_divide_sentiment, video_sentiment_markdown, emotion_finals
179
+
180
+
181
+ with gr.Blocks(theme=theme, css=".gradio-container { background: rgba(255, 255, 255, 0.2) !important; box-shadow: 0 8px 32px 0 rgba( 31, 38, 135, 0.37 ) !important; backdrop-filter: blur( 10px ) !important; -webkit-backdrop-filter: blur( 10px ) !important; border-radius: 10px !important; border: 1px solid rgba( 0, 0, 0, 0.5 ) !important;}") as Video:
182
+ with gr.Column():
183
+ gr.Markdown("""# Cross Model Machine Learning Model""")
184
+ with gr.Row():
185
+ gr.Markdown("""
186
+ ### πŸ€– A cross-model ML model for video processing in healthcare sentiment analysis involves combining different machine learning models to analyze sentiments expressed in healthcare-related videos.
187
+ - Facial Expression Recognition Model [Google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) 😊😒😰
188
+ - Speech Recognition Model [OpenAI/Whisper](https://github.com/openai/whisper) πŸ—£οΈπŸŽ€
189
+ - Text Analysis Model [RoBERTa-base-go-emotions](https://huggingface.co/SamLowe/roberta-base-go_emotions) πŸ“πŸ“œ
190
+ - Contextual Understanding Model (Sentiment Analysis) πŸ”„πŸŒ
191
+ """)
192
+ gr.Markdown("""### By combining the outputs of these models, the cross-model approach aims to capture a more comprehensive view of the sentiment within the healthcare-related video. This way, healthcare providers can gain insights into patient experiences and emotions, facilitating better understanding and improvements in healthcare services. πŸ‘©β€βš•οΈπŸ“ˆπŸ‘¨β€βš•οΈ """)
193
+
194
+ with gr.Row():
195
+ with gr.Column():
196
+ input_video = gr.Video(sources=["upload", "webcam"])
197
+ button = gr.Button("Process", variant="primary")
198
+ gr.Examples(inputs=input_video, examples=[os.path.join(os.path.dirname(__file__), "test_video_1.mp4")])
199
+ with gr.Row():
200
+ overall_score = gr.Label(label="Overall Score")
201
+ video_sentiment_final = gr.Label(label="Video Sentiment Score")
202
+
203
+ with gr.Column():
204
+ frames_gallery = gr.Gallery(label="Video Frames", show_label=True, elem_id="gallery", columns=[3], rows=[1], object_fit="contain", height="auto")
205
+ with gr.Accordion(label="JSON detailed Responses", open=False):
206
+ json_output = gr.Textbox(label="JSON Output", info="Overall scores of the above video in segments.", show_label=True, lines=5, show_copy_button=True, interactive=False)
207
+ audio_sentiment = gr.Textbox(label="Audio Sentiments", info="Outputs of Audio Processing from the video.", show_label=True, lines=5, show_copy_button=True, interactive=False)
208
+ video_sentiment_markdown = gr.Textbox(label="Video Sentiments", info="Outputs of Video Frames processing from the video.", show_label=True, lines=5, show_copy_button=True, interactive=False)
209
+
210
+ button.click(
211
+ fn=video_to_audio,
212
+ inputs=input_video,
213
+ outputs=[json_output, frames_gallery, overall_score, audio_sentiment, video_sentiment_markdown, video_sentiment_final]
214
+ )
215
+
216
+ Video.launch()
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ opencv-python
2
+ pydub
3
+ faster_whisper
test_video_1.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9aea0f64701df624fdab94c311a38b51bbefe1dea785f4ff079c1d755850233
3
+ size 10982596