diff --git a/retinafacefinal_copy.ipynb b/retinafacefinal_copy.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..fd1dd7beadf184d6485dc2591b4e596b6114f5c3
--- /dev/null
+++ b/retinafacefinal_copy.ipynb
@@ -0,0 +1,297 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": 23,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from retinaface import RetinaFace\n",
+    "from deepface import DeepFace\n",
+    "\n",
+    "import matplotlib.pyplot as plt\n",
+    "import cv2\n",
+    "\n",
+    "import numpy as np\n",
+    "from utilpack.util import *\n",
+    "import os\n",
+    "\n",
+    "import mediapipe as mp\n",
+    "\n",
+    "import time"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "\n",
+    "from keras.preprocessing import image\n",
+    "def resize_img(img,target_size,grayscale=False, return_region = False):\n",
+    "\n",
+    "    if img.shape[0] > 0 and img.shape[1] > 0:\n",
+    "        factor_0 = target_size[0] / img.shape[0]\n",
+    "        factor_1 = target_size[1] / img.shape[1]\n",
+    "        factor = min(factor_0, factor_1)\n",
+    "\n",
+    "        dsize = (int(img.shape[1] * factor), int(img.shape[0] * factor))\n",
+    "\t\t\n",
+    "        img = cv2.resize(img, dsize)\n",
+    "\n",
+    "\t\t# Then pad the other side to the target size by adding black pixels\n",
+    "\t\t\n",
+    "        diff_0 = target_size[0] - img.shape[0]\n",
+    "\t\t\n",
+    "        diff_1 = target_size[1] - img.shape[1]\n",
+    "\t\t\n",
+    "        if grayscale == False:\n",
+    "\t\t\t# Put the base image in the middle of the padded image\n",
+    "\t\t\t\n",
+    "            img = np.pad(img, ((diff_0 // 2, diff_0 - diff_0 // 2), (diff_1 // 2, diff_1 - diff_1 // 2), (0, 0)), 'constant')\n",
+    "\t\t\n",
+    "        else:\n",
+    "\t\t\t\n",
+    "            img = np.pad(img, ((diff_0 // 2, diff_0 - diff_0 // 2), (diff_1 // 2, diff_1 - diff_1 // 2)), 'constant')\n",
+    "\n",
+    "\t#------------------------------------------\n",
+    "\n",
+    "\t#double check: if target image is not still the same size with target.\n",
+    "\t\n",
+    "    if img.shape[0:2] != target_size:\n",
+    "\t\t\n",
+    "        img = cv2.resize(img, target_size)\n",
+    "\n",
+    "\t#---------------------------------------------------\n",
+    "\n",
+    "\t#normalizing the image pixels\n",
+    "\n",
+    "\t\n",
+    "    img_pixels = image.img_to_array(img) #what this line doing? must?\n",
+    "\t\n",
+    "    img_pixels = np.expand_dims(img_pixels, axis = 0)\n",
+    "\t\n",
+    "    img_pixels /= 255 #normalize input in [0, 1]\n",
+    "\n",
+    "\t#---------------------------------------------------\n",
+    "\n",
+    "\n",
+    "\t\n",
+    "    if return_region == True:\n",
+    "        return img_pixels, region\n",
+    "    else:\n",
+    "\t\t\n",
+    "        return img_pixels"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 35,
+   "metadata": {},
+   "outputs": [
+    {
+     "ename": "ValueError",
+     "evalue": "invalid detector_backend passed - mediapipe",
+     "output_type": "error",
+     "traceback": [
+      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
+      "\u001b[0;31mValueError\u001b[0m                                Traceback (most recent call last)",
+      "\u001b[0;32m<ipython-input-35-302a201e2cc2>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m      4\u001b[0m \u001b[0mimg1_path\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m\"images/frame_screenshot_07.02.2022_1.png\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      5\u001b[0m \u001b[0mimg2_path\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m\"images/frame_screenshot_07.02.2022.png\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 6\u001b[0;31m \u001b[0mface\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mDeepFace\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdetectFace\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mimg1_path\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m224\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m224\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mbackends\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m5\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0menforce_detection\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m      7\u001b[0m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mDeepFace\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mverify\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mimg1_path\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mimg2_path\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmodel_name\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mmodels\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m5\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      8\u001b[0m \u001b[0mplt\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mimshow\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mface\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
+      "\u001b[0;32m/home/beyondem/miniconda3/envs/compare/lib/python3.6/site-packages/deepface/DeepFace.py\u001b[0m in \u001b[0;36mdetectFace\u001b[0;34m(img_path, target_size, detector_backend, enforce_detection, align)\u001b[0m\n\u001b[1;32m    816\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    817\u001b[0m \timg = functions.preprocess_face(img = img_path, target_size = target_size, detector_backend = detector_backend\n\u001b[0;32m--> 818\u001b[0;31m \t\t, enforce_detection = enforce_detection, align = align)[0] #preprocess_face returns (1, 224, 224, 3)\n\u001b[0m\u001b[1;32m    819\u001b[0m         \u001b[0;32mreturn\u001b[0m \u001b[0mimg\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m:\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m:\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m-\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;31m#bgr to rgb\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    820\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
+      "\u001b[0;32m/home/beyondem/miniconda3/envs/compare/lib/python3.6/site-packages/deepface/commons/functions.py\u001b[0m in \u001b[0;36mpreprocess_face\u001b[0;34m(img, target_size, grayscale, enforce_detection, detector_backend, return_region, align)\u001b[0m\n\u001b[1;32m    176\u001b[0m         \u001b[0mbase_img\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mimg\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcopy\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    177\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 178\u001b[0;31m         \u001b[0mimg\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mregion\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdetect_face\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mimg\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mimg\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdetector_backend\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdetector_backend\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mgrayscale\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mgrayscale\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0menforce_detection\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0menforce_detection\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0malign\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0malign\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    179\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    180\u001b[0m         \u001b[0;31m#--------------------------\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
+      "\u001b[0;32m/home/beyondem/miniconda3/envs/compare/lib/python3.6/site-packages/deepface/commons/functions.py\u001b[0m in \u001b[0;36mdetect_face\u001b[0;34m(img, detector_backend, grayscale, enforce_detection, align)\u001b[0m\n\u001b[1;32m    108\u001b[0m         \u001b[0;31m#this call should be completed very fast because it will return found in memory\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    109\u001b[0m         \u001b[0;31m#it will not build face detector model in each call (consider for loops)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 110\u001b[0;31m         \u001b[0mface_detector\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mFaceDetector\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbuild_model\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdetector_backend\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    111\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    112\u001b[0m         \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
+      "\u001b[0;32m/home/beyondem/miniconda3/envs/compare/lib/python3.6/site-packages/deepface/detectors/FaceDetector.py\u001b[0m in \u001b[0;36mbuild_model\u001b[0;34m(detector_backend)\u001b[0m\n\u001b[1;32m     28\u001b[0m             \u001b[0;31m#print(detector_backend,\" built\")\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     29\u001b[0m         \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 30\u001b[0;31m             \u001b[0;32mraise\u001b[0m \u001b[0mValueError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"invalid detector_backend passed - \"\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0mdetector_backend\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     31\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     32\u001b[0m     \u001b[0;32mreturn\u001b[0m \u001b[0mface_detector_obj\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mdetector_backend\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
+      "\u001b[0;31mValueError\u001b[0m: invalid detector_backend passed - mediapipe"
+     ]
+    }
+   ],
+   "source": [
+    "#conda install -c conda-forge dlib\n",
+    "backends = ['opencv', 'ssd', 'Dlib', 'mtcnn', 'retinaface', 'mediapipe']\n",
+    "models = [\"VGG-Face\", \"Facenet\", \"openFace\", \"DeepFace\", \"DeepID\", \"ArcFace\", \"Dlib\"]\n",
+    "img1_path = \"images/frame_screenshot_07.02.2022_1.png\"\n",
+    "img2_path = \"images/frame_screenshot_07.02.2022.png\"\n",
+    "face = DeepFace.detectFace(img1_path,(224,224),backends[5], enforce_detection=False)\n",
+    "result = DeepFace.verify(img1_path, img2_path, model_name=models[5])\n",
+    "plt.imshow(face)\n",
+    "print(result)\n",
+    "face.shape\n",
+    "\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "<class 'numpy.ndarray'>\n",
+      "(1, 220, 220, 3)\n",
+      "145200\n"
+     ]
+    },
+    {
+     "ename": "ValueError",
+     "evalue": "Input image needs to have 3 channels at must not be empty.",
+     "output_type": "error",
+     "traceback": [
+      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
+      "\u001b[0;31mValueError\u001b[0m                                Traceback (most recent call last)",
+      "\u001b[0;32m<ipython-input-4-a2440682971e>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m     57\u001b[0m     \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mimg\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msize\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     58\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 59\u001b[0;31m     \u001b[0mobj\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mRetinaFace\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdetect_faces\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mimg\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     60\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     61\u001b[0m     \u001b[0;31m# obj, region = DeepFace.functions.initialize_input(img)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
+      "\u001b[0;32m/home/beyondem/miniconda3/envs/compare/lib/python3.6/site-packages/retinaface/RetinaFace.py\u001b[0m in \u001b[0;36mdetect_faces\u001b[0;34m(img_path, threshold, model, allow_upscaling)\u001b[0m\n\u001b[1;32m     61\u001b[0m     \"\"\"\n\u001b[1;32m     62\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 63\u001b[0;31m     \u001b[0mimg\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mget_image\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mimg_path\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     64\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     65\u001b[0m     \u001b[0;31m#---------------------------\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
+      "\u001b[0;32m/home/beyondem/miniconda3/envs/compare/lib/python3.6/site-packages/retinaface/RetinaFace.py\u001b[0m in \u001b[0;36mget_image\u001b[0;34m(img_path)\u001b[0m\n\u001b[1;32m     52\u001b[0m     \u001b[0;31m# Validate image shape\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     53\u001b[0m     \u001b[0;32mif\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mimg\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m!=\u001b[0m \u001b[0;36m3\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mprod\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mimg\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 54\u001b[0;31m         \u001b[0;32mraise\u001b[0m \u001b[0mValueError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"Input image needs to have 3 channels at must not be empty.\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     55\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     56\u001b[0m     \u001b[0;32mreturn\u001b[0m \u001b[0mimg\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
+      "\u001b[0;31mValueError\u001b[0m: Input image needs to have 3 channels at must not be empty."
+     ]
+    }
+   ],
+   "source": [
+    "filename = 'video.mp4'\n",
+    "frames_per_second = 24.0\n",
+    "res = '720p'\n",
+    "\n",
+    "# Set resolution for the video capture\n",
+    "# Function adapted from https://kirr.co/0l6qmh\n",
+    "def change_res(cap, width, height):\n",
+    "    cap.set(3, width)\n",
+    "    cap.set(4, height)\n",
+    "\n",
+    "# Standard Video Dimensions Sizes\n",
+    "STD_DIMENSIONS =  {\n",
+    "    \"480p\": (640, 480),\n",
+    "    \"720p\": (1280, 720),\n",
+    "    \"1080p\": (1920, 1080),\n",
+    "    \"4k\": (3840, 2160),\n",
+    "}\n",
+    "\n",
+    "\n",
+    "# grab resolution dimensions and set video capture to it.\n",
+    "def get_dims(cap, res='720'):\n",
+    "    width, height = STD_DIMENSIONS[\"480p\"]\n",
+    "    if res in STD_DIMENSIONS:\n",
+    "        width,height = STD_DIMENSIONS[res]\n",
+    "    ## change the current caputre device\n",
+    "    ## to the resulting resolution\n",
+    "    change_res(cap, width, height)\n",
+    "    return width, height\n",
+    "\n",
+    "# Video Encoding, might require additional installs\n",
+    "# Types of Codes: http://www.fourcc.org/codecs.php\n",
+    "VIDEO_TYPE = {\n",
+    "    'avi': cv2.VideoWriter_fourcc(*'XVID'),\n",
+    "    #'mp4': cv2.VideoWriter_fourcc(*'H264'),\n",
+    "    'mp4': cv2.VideoWriter_fourcc(*'XVID'),\n",
+    "}\n",
+    "\n",
+    "def get_video_type(filename):\n",
+    "    filename, ext = os.path.splitext(filename)\n",
+    "    if ext in VIDEO_TYPE:\n",
+    "      return  VIDEO_TYPE[ext]\n",
+    "    return VIDEO_TYPE['avi']\n",
+    "\n",
+    "# FPS = 1/30\n",
+    "# FPS_MS = int(FPS * 1000)\n",
+    "\n",
+    "cap = cv2.VideoCapture(0)\n",
+    "out = cv2.VideoWriter(filename, get_video_type(filename), 25, get_dims(cap, res))\n",
+    "\n",
+    "while True:\n",
+    "    ret, frame = cap.read()\n",
+    "    out.write(frame)\n",
+    "    \n",
+    "    img = resize_img(frame,(220,220))\n",
+    "    print(type(img))\n",
+    "    print(img.shape)\n",
+    "    print(img.size)\n",
+    "\n",
+    "    obj = RetinaFace.detect_faces(img)\n",
+    "    \n",
+    "    # obj, region = DeepFace.functions.initialize_input(img)\n",
+    "    # img = np.asfarray(obj)\n",
+    "\n",
+    "    # dsize = (int(img.shape[2] * 224), int(img.shape[3] * 224))\n",
+    "    # img = cv2.resize(img, (1,(img.shape[2] * 224), (img.shape[2] * 224), )\n",
+    "\n",
+    "    # print(type(img))\n",
+    "    # print(img.shape)\n",
+    "    # print(img.size)\n",
+    "\n",
+    "    \n",
+    "\n",
+    "    # img = DeepFace.functions.preprocess_face(img,(224,224),False,False)\n",
+    "    # print(img)\n",
+    "\n",
+    "    # DeepFace.functions.detect_face(np.asfarray(img_list))\n",
+    "\n",
+    "    # for key in obj.keys():\n",
+    "    #     identity = obj[key]\n",
+    "    #     # print(identity)\n",
+    "    #     facial_area = identity[\"facial_area\"]\n",
+    "    #     cv2.rectangle(frame, (facial_area[2],facial_area[3]),(facial_area[0],facial_area[1]),(255,255,255),1)\n",
+    "\n",
+    "    # # plt.figure(figsize=(20,20))\n",
+    "    # plt.imshow(frame[:,:,::-1])\n",
+    "    # plt.show\n",
+    "    \n",
+    "    img_cv = cv2.imshow('frame',frame)\n",
+    "    if cv2.waitKey(1) & 0xFF == ord('q'):\n",
+    "        break\n",
+    "\n",
+    "\n",
+    "cap.release()\n",
+    "out.release()\n",
+    "cv2.destroyAllWindows()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "    "
+   ]
+  }
+ ],
+ "metadata": {
+  "interpreter": {
+   "hash": "e13dd8a849d904006525f725a94fd9290dba9eb122ef777785557ae33676f1f8"
+  },
+  "kernelspec": {
+   "display_name": "compare",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.6.13"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 4
+}