-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathvirtualMouseControl.py
More file actions
256 lines (205 loc) · 11.9 KB
/
virtualMouseControl.py
File metadata and controls
256 lines (205 loc) · 11.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
# -Program Name : Intelligen Teaching Web Based Application - virtualMouseControl.py
# -Description : Use for generate implement virtual mouse control functions. The main usage is using Mediapipe and self-trained model to recognise and gesture, then use PyautoGUI for control the mouse
# -First Written on: 25 Feb 2023
# -Editted on: 1 May 2023
import cv2
import mediapipe as mp
import numpy as np
import sys
import pyautogui as pg
import os
import datetime
from keras.models import load_model
mp_hands = mp.solutions.hands
hands = mp_hands.Hands(max_num_hands=1)
mp_drawing = mp.solutions.drawing_utils
screenWidth, screenHeight = pg.size() #get the size of screen
frameR=110 #limit the area of drag
pg.FAILSAFE =False
tidId=[4,8,12,16,20] #finger tip id
clk=1
clk2=1
dragLeft = False
SStime = 1
Snumber = 0
startAction=False
currentTime=datetime.datetime.now().strftime("%Y%m%d_%H%M")
label=['screenshot','none',"open"]
model = load_model('gesture_model_virtual_mouse.h5')
def landmark_to_vector(landmarks):
vector = []
for landmark in landmarks:
vector.append(landmark.x)
vector.append(landmark.y)
vector.append(landmark.z)
return vector
def gen():
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_BUFFERSIZE,1)
while cap.isOpened():
status, frame = cap.read()
if status:
#mirror the view
frame = cv2.flip(frame,1)
h,w,c = frame.shape
outcomes = hands.process(frame)
cv2.rectangle(frame,(frameR,frameR),(w-frameR,h-frameR),(0,0,255),2)#draw box to limit the area of action
if outcomes.multi_hand_landmarks:
landmarkList =[]
hand= outcomes.multi_handedness[0].classification[0].label
for hand_landmarks in outcomes.multi_hand_landmarks:
vector = landmark_to_vector(hand_landmarks.landmark)
#print(vector)
#sys.stdout.flush()
new_vector = np.array(vector).reshape(1,63)
prediction = model.predict(new_vector)
index = np.argmax(prediction)
for id, landmarks in enumerate(hand_landmarks.landmark):
#get the location of hand
cx,cy = int(landmarks.x*w),int(landmarks.y*h)
landmarkList.append([id,cx,cy])
#----------count finger raise---------------
fingers=[]
if hand == "Right":
if landmarkList[tidId[0]][1] < landmarkList[tidId[0]-1][1]: #thumb
fingers.append(1)
else:
fingers.append(0)
else:
if landmarkList[tidId[0]][1] > landmarkList[tidId[0]-1][1]: #thumb
fingers.append(1)
else:
fingers.append(0)
for id in range(1,5): #finger 2-5
if landmarkList[tidId[id]][2] < landmarkList[tidId[id]-3][2]:
fingers.append(1)
else:
fingers.append(0)
#print(fingers)
#sys.stdout.flush()
#allow access to all functions of virtual mouse by opem palm
if label[index] == "open" and fingers==[1,1,1,1,1]:
global startAction
startAction = True
if startAction == True :
#draw landmarks on video
mp_drawing.draw_landmarks(frame,hand_landmarks,mp_hands.HAND_CONNECTIONS)
global dragLeft, drawMode
#calculate the size of hand
size = ((landmarkList[8][1]- landmarkList[0][1])**2 + (landmarkList[8][2] - landmarkList[0][2])**2)**0.5
#print("Hand Size: "+ str(size))
#sys.stdout.flush()
#----------do actions---------------
#action 1 +2 move + drag
if fingers[1] == 1 and fingers[2] == 0 and fingers[3] == 0 and fingers[4] == 0:
#draw circle on point 12(finger tip of index finger)
cv2.circle(frame,(landmarkList[8][1],landmarkList[8][2]),10,(0,0,255),cv2.FILLED)
# Define a list to store the last 5 mouse positions
mouse_positions = []
# In the loop where you generate the mouse position
x = np.interp(landmarkList[8][1], (frameR, w-frameR), (0, screenWidth))
y = np.interp(landmarkList[8][2], (frameR, w-frameR), (0, screenWidth))
# Add the new position to the list
mouse_positions.append((x, y))
# If the list is longer than 5, remove the oldest position
if len(mouse_positions) > 7:
mouse_positions.pop(0)
# Take the average of the last 5 positions
x_avg = sum(pos[0] for pos in mouse_positions) / len(mouse_positions)
y_avg = sum(pos[1] for pos in mouse_positions) / len(mouse_positions)
# Move the mouse to the averaged position
pg.moveTo(x_avg, y_avg)
if fingers[0] == 1:
if dragLeft == False:
pg.mouseDown(button='left')
dragLeft = True
else:
if dragLeft == True:
pg.mouseUp(button='left')
dragLeft = False
#action 3 left click
elif fingers == [0,1,1,0,0]:
global clk
#calculate the distance of index finger and middle finger
#draw circle on point 12(finger tip of index finger)
cv2.circle(frame,(landmarkList[8][1],landmarkList[8][2]),10,(255,0,0),cv2.FILLED)
cv2.line(frame,(landmarkList[8][1],landmarkList[8][2]),(landmarkList[12][1],landmarkList[12][2]),(255,0,0),8)
cv2.circle(frame,(landmarkList[12][1],landmarkList[12][2]),10,(255,0,0),cv2.FILLED)
length = abs(landmarkList[8][1] - landmarkList[12][1])
#print(length/size)
#sys.stdout.flush()
if(length/size) <= 0.15 and clk>0:
# print("Close!")
# sys.stdout.flush()
pg.click()
clk=-1
else:
clk=1
#action 4 right click
elif fingers == [1,1,1,0,0]:
#calculate the distance of index finger and middle finger
#draw circles and line
global clk2
cv2.circle(frame,(landmarkList[8][1],landmarkList[8][2]),10,(255,0,0),cv2.FILLED)
cv2.line(frame,(landmarkList[8][1],landmarkList[8][2]),(landmarkList[12][1],landmarkList[12][2]),(255,0,0),8)
cv2.circle(frame,(landmarkList[12][1],landmarkList[12][2]),10,(255,0,0),cv2.FILLED)
length = abs(landmarkList[8][1] - landmarkList[12][1])
#print(length/size)
#sys.stdout.flush()
if(length/size) <= 0.15 and clk2>0:
# print("Close!")
# sys.stdout.flush()
pg.rightClick()
clk2=-1
else:
clk2=1
#action 5 and 6 scroll up and down
elif fingers == [0,1,1,1,1] and (landmarkList[4][1] > landmarkList[5][1]):
folded=0
fingers_folded=0
# print("in scoroll fnction!")
# sys.stdout.flush()
for id in range(1,5): #finger 2-5
if landmarkList[tidId[id]][2] > landmarkList[tidId[id]-1][2]: #4 finger folded
fingers_folded += 1
if fingers_folded == 4:
folded = 1
elif fingers_folded == 0:
folded = -1
else:
folded = 0
if folded== 1:
pg.scroll(-20)
elif folded ==-1:
pg.scroll(20)
#action 7: screenshot
elif label[index] == "screenshot" and fingers[2] ==1 and fingers[3] ==1 and fingers[4] ==1:
folded2=False
global SStime, Snumber
for id in range(2,4): #finger 2-5
if landmarkList[tidId[id]][2] > landmarkList[tidId[id]-1][2]: #3 finger folded
folded2 = True
else:
folded2 = False
SStime=0
# print(folded2)
# sys.stdout.flush()
if SStime == 0 and folded2==True:
screenshot=pg.screenshot()
filepath =os.path.dirname(os.path.abspath(__file__))
#print(filepath+"\screenshots\screenshot" + currentTime +".jpg")
screenshot.save(filepath+"\screenshots\screenshot" + currentTime +" (" +str(Snumber) +")"+".jpg")
#sys.stdout.flush()
cv2.putText(frame, 'captured',(20,40),cv2.FONT_HERSHEY_DUPLEX,1,(0,0,255),2)
SStime = 1
Snumber+=1
if SStime == 1:
cv2.putText(frame, 'captured',(20,40),cv2.FONT_HERSHEY_DUPLEX,1,(0,0,255),2)
else:
startAction = False
frame = cv2.resize(frame, (640, 480))
status, jpeg = cv2.imencode('.jpg', frame)
frame = jpeg.tobytes()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
cap.release()