kushagra124 commited on
Commit
2677815
1 Parent(s): 3caebd7

adding app with CLIP image segmentation

Browse files
Files changed (1) hide show
  1. app.py +9 -15
app.py CHANGED
@@ -52,35 +52,29 @@ def detect_using_clip(image,prompts=[],threshould=0.4):
52
  prompt = prompt.lower()
53
 
54
  model_detections[prompt] = [rescale_bbox(prop.bbox,orig_image_shape=image.shape[:2],model_shape=predicted_image.shape[0]) for prop in props]
55
- a = np.expand_dims(predicted_image,axis=-1)
56
- print(a.shape,image.shape[:2])
57
- predicted_images[prompt]= cv2.resize(np.expand_dims(predicted_image,axis=-1),(h,w),interpolation = cv2.INTER_LINEAR)
58
-
59
  return model_detections , predicted_images
60
 
61
- def visualize_images(image,detections,predicted_image,prompt):
62
  alpha = 0.7
63
- H,W = image.shape[:2]
64
  prompt = prompt.lower()
65
- image_copy = image.copy()
66
- mask_image = create_mask(image=image_copy,image_mask=predicted_image)
67
 
68
  if prompt not in detections.keys():
69
  print("prompt not in query ..")
70
- return image_copy
71
- for bbox in detections[prompt]:
72
- cv2.rectangle(image_copy, (int(bbox[1]), int(bbox[0])), (int(bbox[3]), int(bbox[2])), (255, 0, 0), 2)
73
- cv2.putText(image_copy,str(prompt),(int(bbox[1]), int(bbox[0])),cv2.FONT_HERSHEY_SIMPLEX, 2, 255)
74
- final_image = cv2.addWeighted(image_copy,alpha,mask_image,1-alpha,0)
75
  return final_image
76
 
77
  def shot(image, labels_text,selected_categoty):
78
  prompts = labels_text.split(',')
79
  prompts = list(map(lambda x: x.strip(),prompts))
80
-
81
  model_detections,predicted_images = detect_using_clip(image,prompts=prompts)
82
 
83
- category_image = visualize_images(image=image,detections=model_detections,predicted_image=predicted_images,prompt=selected_categoty)
 
84
  return category_image
85
 
86
  iface = gr.Interface(fn=shot,
 
52
  prompt = prompt.lower()
53
 
54
  model_detections[prompt] = [rescale_bbox(prop.bbox,orig_image_shape=image.shape[:2],model_shape=predicted_image.shape[0]) for prop in props]
55
+ predicted_images[prompt]= predicted_image
 
 
 
56
  return model_detections , predicted_images
57
 
58
+ def visualize_images(image,detections,predicted_images,prompt):
59
  alpha = 0.7
60
+ # H,W = image.shape[:2]
61
  prompt = prompt.lower()
62
+ image_resize = cv2.resize(image,(352,352))
63
+ mask_image = create_mask(image=image_resize,image_mask=predicted_images[prompt])
64
 
65
  if prompt not in detections.keys():
66
  print("prompt not in query ..")
67
+ return image_resize
68
+ final_image = cv2.addWeighted(image_resize,alpha,mask_image,1-alpha,0)
 
 
 
69
  return final_image
70
 
71
  def shot(image, labels_text,selected_categoty):
72
  prompts = labels_text.split(',')
73
  prompts = list(map(lambda x: x.strip(),prompts))
 
74
  model_detections,predicted_images = detect_using_clip(image,prompts=prompts)
75
 
76
+ category_image = visualize_images(image=image,detections=model_detections,predicted_images=predicted_images,prompt=selected_categoty)
77
+
78
  return category_image
79
 
80
  iface = gr.Interface(fn=shot,