-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdetection.py
121 lines (100 loc) · 3.59 KB
/
detection.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
import os
import imutils
import cv2
import dlib
import time
import multiprocessing
from imutils import face_utils
from scipy.spatial import distance
from playsound import playsound
from utilities import eye_aspect_ratio, mouth_aspect_ratio
from notify_run import Notify
from playsound import playsound
def helper():
# Eyes and mouth threshold value
eyeThresh = 0.25
mouthThresh = 0.60
# frame to check
frame_check_eye = 5
frame_check_mouth = 5
# Initializing the Face Detector object
detect = dlib.get_frontal_face_detector()
# Loading the trained model
predict = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
# Getting the eyes and mouth index
(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_68_IDXS["left_eye"]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_68_IDXS["right_eye"]
(mStart, mEnd) = face_utils.FACIAL_LANDMARKS_68_IDXS["mouth"]
# Initializing the Video capturing object
cap=cv2.VideoCapture(0)
# Initializing the flags for eyes and mouth
flag_eye=0
flag_mouth=0
# Calculating the Euclidean distance between facial landmark points of eyes and mouth
while True:
ret, frame=cap.read()
frame = imutils.resize(frame, height = 800, width=1000)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
subjects = detect(gray, 0)
for subject in subjects:
shape = predict(gray, subject)
shape = face_utils.shape_to_np(shape)
leftEye = shape[lStart:lEnd]
rightEye = shape[rStart:rEnd]
mouth = shape[mStart:mEnd]
leftEAR = eye_aspect_ratio(leftEye)
rightEAR = eye_aspect_ratio(rightEye)
ear = (leftEAR + rightEAR) / 2.0
leftEyeHull = cv2.convexHull(leftEye)
rightEyeHull = cv2.convexHull(rightEye)
mar = mouth_aspect_ratio(mouth)
mouthHull = cv2.convexHull(mouth)
# Drawing the overlay on the face
cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)
cv2.drawContours(frame, [mouth], -1, (255, 0, 0), 1)
cv2.putText(frame, "Eye Aspect Ratio: {}".format(ear), (5, 50),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,255,255), 2)
cv2.putText(frame, "Mouth Aspect Ratio: {}".format(mar), (5, 80),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,255,255), 2)
# Comparing threshold value of Mouth Aspect Ratio (MAR)
if mar > mouthThresh:
flag_mouth += 1
if flag_mouth >= frame_check_mouth:
cv2.putText(frame, "****************** YOU ARE YAWNING *******************", (10, 370),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,0,255), 2)
time.sleep(3)
p = multiprocessing.Process(target=playsound, args=("Alarm.wav",))
p.start()
time.sleep(6)
p.terminate()
notify = Notify()
notify.send("HELP!!! THIS PERSON IS FEELING DROWSY ")
else:
flag_mouth = 0
# Comparing threshold value of Eye Aspect Ratio (EAR)
if ear < eyeThresh:
flag_eye += 1
if flag_eye >= frame_check_eye:
cv2.putText(frame, "****************** YOU ARE SLEEPING *******************", (10,400),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
time.sleep(3)
p = multiprocessing.Process(target=playsound, args=("Alarm.wav",))
p.start()
time.sleep(6)
p.terminate()
notify = Notify()
notify.send("HELP!! THIS PERSON IS FEELING DROWSY ")
else:
flag_eye = 0
# Plotting the frame
cv2.imshow("Frame", frame)
# Waiting for exit key
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
# Destroying all windows
cv2.destroyAllWindows()
cap.stop()
def main():
helper()
if __name__ == '__main__':
main()