From f4e402f8e30bee63a7c1f3c264b0229b4cc752ec Mon Sep 17 00:00:00 2001
From: "Hailu, Dawit Abiy" <dawit.hailu@uni-hamburg.de>
Date: Fri, 11 Mar 2022 12:11:53 +0000
Subject: [PATCH] Upload New File

---
 pipe.py | 132 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 132 insertions(+)
 create mode 100644 pipe.py

diff --git a/pipe.py b/pipe.py
new file mode 100644
index 0000000..134e732
--- /dev/null
+++ b/pipe.py
@@ -0,0 +1,132 @@
+import cv2
+import mediapipe as mp
+import os
+import time
+import deepface
+
+mp_face_detection = mp.solutions.face_detection 
+mp_drawing = mp.solutions.drawing_utils
+
+# For static images:
+# IMAGE_FILES = []
+# with mp_face_detection.FaceDetection(
+#     model_selection=1, min_detection_confidence=0.5) as face_detection:
+#   for idx, file in enumerate(IMAGE_FILES):
+#     image = cv2.imread(file)
+#     # Convert the BGR image to RGB and process it with MediaPipe Face Detection.
+#     results = face_detection.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
+
+#     # Draw face detections of each face.
+#     if not results.detections:
+#       continue
+#     annotated_image = image.copy()
+#     for detection in results.detections:
+#       print('Nose tip:')
+#       print(mp_face_detection.get_key_point(
+#           detection, mp_face_detection.FaceKeyPoint.NOSE_TIP))
+#       mp_drawing.draw_detection(annotated_image, detection)
+#     cv2.imwrite('/tmp/annotated_image' + str(idx) + '.png', annotated_image)
+
+# For webcam input:
+# findcounter = 0
+# notfindcounter = 0
+# totalcounter = 0
+
+# filename = 'video2.mp4'
+# frames_per_second = 24.0
+# res = '720p'
+
+# # Set resolution for the video capture
+# # Function adapted from https://kirr.co/0l6qmh
+# def change_res(cap, width, height):
+#     cap.set(3, width)
+#     cap.set(4, height)
+
+# # Standard Video Dimensions Sizes
+# STD_DIMENSIONS =  {
+#     "480p": (640, 480),
+#     "720p": (1280, 720),
+#     "1080p": (1920, 1080),
+#     "4k": (3840, 2160),
+# }
+
+
+# # grab resolution dimensions and set video capture to it.
+# def get_dims(cap, res='720'):
+#     width, height = STD_DIMENSIONS["480p"]
+#     if res in STD_DIMENSIONS:
+#         width,height = STD_DIMENSIONS[res]
+#     ## change the current caputre device
+#     ## to the resulting resolution
+#     change_res(cap, width, height)
+#     return width, height
+
+# # Video Encoding, might require additional installs
+# # Types of Codes: http://www.fourcc.org/codecs.php
+# VIDEO_TYPE = {
+#     'avi': cv2.VideoWriter_fourcc(*'XVID'),
+#     #'mp4': cv2.VideoWriter_fourcc(*'H264'),
+#     'mp4': cv2.VideoWriter_fourcc(*'XVID'),
+# }
+
+# def get_video_type(filename):
+#     filename, ext = os.path.splitext(filename)
+#     if ext in VIDEO_TYPE:
+#       return  VIDEO_TYPE[ext]
+#     return VIDEO_TYPE['avi']
+
+# # FPS = 1/30
+# # FPS_MS = int(FPS * 1000)
+# num_frames = 120
+
+cap = cv2.VideoCapture(0)
+# cap = cv2.VideoCapture("/home/beyondem/Downloads/VID_20220225_170957.mp4")
+# out = cv2.VideoWriter(filename, get_video_type(filename), 30, get_dims(cap, res))
+# cap = cv2.VideoCapture(1)
+fd = mp_face_detection.FaceDetection(min_detection_confidence=0.4, model_selection=1)
+
+
+with fd as face_detection:
+  while cap.isOpened():
+    start = time.time()
+    success, image = cap.read()
+    # time.sleep(0.5)
+    
+    if not success:
+      print("Ignoring empty camera frame.")
+      # If loading a video, use 'break' instead of 'continue'.
+      continue
+
+
+    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
+    
+    results = face_detection.process(image)
+    
+    end = time.time()
+    # Time elapsed
+    seconds = end - start
+    fps = 1/seconds
+
+    # Draw the face detection annotations on the image.
+    image.flags.writeable = True
+    image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
+    if results.detections:
+      
+    #   print(len(results.detections)) 
+      for detection in results.detections:
+        mp_drawing.draw_detection(image, detection)
+        
+    
+    image = cv2.flip(image, 1)
+    font = cv2.FONT_HERSHEY_SIMPLEX
+    cv2.putText(image, str(fps) ,(100,351), font, 1,(110,110,255),2,cv2.LINE_AA)
+    
+    # Flip the image horizontally for a selfie-view display.
+    cv2.imshow('MediaPipe Face Detection', image)
+
+    if cv2.waitKey(5) & 0xFF == 27:
+      break
+cap.release()
+# out.release()
+cv2.destroyAllWindows()
+
-- 
GitLab