Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- # HAND DETECTION_MOTOR_PUMP/Python
- # By: Randy Canada
- import cv2
- import mediapipe as mp
- import serial
- import time
- print(cv2.__version__)
- arduino = serial.Serial('COM3', 115200)
- class poseTracker:
- def __init__(self, width=1280, height=720):
- self.width = width
- self.height = height
- self.cam = cv2.VideoCapture(0, cv2.CAP_DSHOW)
- self.cam.set(cv2.CAP_PROP_FRAME_WIDTH, width)
- self.cam.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
- self.cam.set(cv2.CAP_PROP_FPS, 30)
- self.cam.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG'))
- self.hands = mp.solutions.hands.Hands(static_image_mode=False, max_num_hands=2, min_detection_confidence=0.3, min_tracking_confidence=0.3)
- self.faceMesh = mp.solutions.face_mesh.FaceMesh(static_image_mode=False, max_num_faces=3, min_detection_confidence=0.5, min_tracking_confidence=0.5)
- self.mpDraw = mp.solutions.drawing_utils
- self.last_gesture = None # Initialize last_gesture
- self.last_gesture_time = time.time() # Initialize last_gesture_time
- def mesh(self, frame):
- frameRGB = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
- results = self.faceMesh.process(frameRGB)
- meshBound = []
- if results.multi_face_landmarks:
- for faceLandmarks in results.multi_face_landmarks:
- indexCount = 0
- for lm in faceLandmarks.landmark:
- meshBound.append((int(lm.x * self.width), int(lm.y * self.height)))
- return meshBound
- def handDetection(self, frame):
- frameRGB = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
- hands_results = self.hands.process(frameRGB)
- self.drawSpecCircle = self.mpDraw.DrawingSpec(thickness=1, circle_radius=1, color=(255, 0, 0))
- self.drawSpecLine = self.mpDraw.DrawingSpec(thickness=2, circle_radius=2, color=(0, 255, 0))
- myHands = []
- handsType = []
- if hands_results.multi_hand_landmarks:
- for hand_landmarks, handedness in zip(hands_results.multi_hand_landmarks, hands_results.multi_handedness):
- myHand = []
- count = 0
- for Landmark in hand_landmarks.landmark:
- #cv2.putText(frame, str(count), (int(Landmark.x * self.width), int(Landmark.y * self.height)), cv2.FONT_HERSHEY_SCRIPT_COMPLEX, fontScale=0.8, color=(0, 0, 0), thickness=1)
- count += 1
- myHand.append((int(Landmark.x * self.width), int(Landmark.y * self.height)))
- myHands.append(myHand)
- handsType.append(handedness.classification[0].label)
- # Customize the color of hand connections here
- self.mpDraw.draw_landmarks(frame, hand_landmarks, mp.solutions.hands.HAND_CONNECTIONS,self.drawSpecCircle,self.drawSpecLine)
- return myHands, handsType
- def run(self):
- while True:
- ret, frame = self.cam.read()
- if not ret:
- break
- # meshLocation = self.mesh(frame)
- myHands1, handsType = self.handDetection(frame)
- right_hand_points = []
- left_hand_points = []
- gesture_detected = False
- if myHands1:
- for hand, handType in zip(myHands1, handsType):
- if handType == 'Right':
- right_hand_points.extend(hand)
- elif handType == 'Left':
- left_hand_points.extend(hand)
- print(handsType)
- rightThumbDown = (right_hand_points and right_hand_points[0][1] < right_hand_points[1][1] < right_hand_points[2][1] < right_hand_points[3][1] < right_hand_points[4][1]
- and right_hand_points[4][1] > right_hand_points[6][1])
- leftThumbDown = (left_hand_points and left_hand_points[0][1] < left_hand_points[1][1] < left_hand_points[2][1] < left_hand_points[3][1] < left_hand_points[4][1]
- and left_hand_points[4][1] > left_hand_points[6][1])
- rightThumbUp = (right_hand_points and right_hand_points[4][1] < right_hand_points[3][1] < right_hand_points[2][1] < right_hand_points[1][1] and
- right_hand_points[6][1] > right_hand_points[4][1] and right_hand_points[12][1] > right_hand_points[4][1]
- and right_hand_points[16][1] > right_hand_points[4][1] and right_hand_points[17][1] > right_hand_points[4][1])
- leftThumbUp = (left_hand_points and left_hand_points[4][1] < left_hand_points[3][1] < left_hand_points[2][1] < left_hand_points[1][1] and
- left_hand_points[6][1] > left_hand_points[4][1] and left_hand_points[12][1] > left_hand_points[4][1] and
- left_hand_points[16][1] > left_hand_points[4][1] and left_hand_points[17][1] > left_hand_points[4][1])
- highFive = ((right_hand_points and right_hand_points[20][1] < right_hand_points[18][1] and right_hand_points[8][1] < right_hand_points[0][1]) or
- (left_hand_points and left_hand_points[20][1] < left_hand_points[18][1]) and left_hand_points[8][1] < left_hand_points[0][1])
- if (rightThumbUp and not leftThumbDown) or (leftThumbUp and not rightThumbDown):
- gesture = 'Thumb'
- cv2.putText(frame, 'Start Pouring!', (500, 100), cv2.FONT_ITALIC, fontScale=2, color=(255, 0, 0), thickness=3)
- gesture_detected = True
- elif highFive:
- gesture = 'HighFive'
- cv2.putText(frame, 'Stop Pouring!', (500, 100), cv2.FONT_ITALIC, fontScale=2, color=(0, 0, 255), thickness=3)
- gesture_detected = True
- else:
- gesture = 'Unknown'
- cv2.putText(frame, 'Unknown Gesture', (500, 100), cv2.FONT_ITALIC, fontScale=2, color=(0, 255, 0), thickness=3)
- gesture_detected =False
- current_time = time.time()
- if gesture != self.last_gesture and (current_time - self.last_gesture_time) > 1: # Debounce time of 1 second
- self.last_gesture = gesture
- self.last_gesture_time = current_time
- if gesture == 'Thumb':
- gesture = gesture + '\r'
- print(f"Sending gesture: {gesture.strip()}")
- arduino.write(gesture.encode())
- if gesture == 'HighFive':
- gesture = gesture + '\r'
- print(f"Sending gesture: {gesture.strip()}")
- arduino.write(b'HighFive\r')
- else:
- if self.last_gesture != "Unknown":
- self.last_gesture = "Unknown"
- print("Sending gesture: Unknown")
- #self.last_gesture = None
- cv2.imshow('my WEBcam', frame)
- cv2.moveWindow('my WEBcam', 0, 0)
- if cv2.waitKey(1) & 0xff == ord('q'):
- break
- self.cam.release()
- cv2.destroy
- pose_Tracker = poseTracker()
- pose_Tracker.run()
- arduino.close()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement