Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import cv2
- import face_recognition
- from datetime import datetime
- # Load pre-trained face detection classifier
- face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
- # Load pre-registered face encodings
- registered_encodings = []
- # Register faces
- image1 = face_recognition.load_image_file('person1.jpg')
- face_locations1 = face_recognition.face_locations(image1)
- face_encodings1 = face_recognition.face_encodings(image1, face_locations1)
- for encoding in face_encodings1:
- registered_encodings.append(encoding)
- image2 = face_recognition.load_image_file('person2.jpg')
- face_locations2 = face_recognition.face_locations(image2)
- face_encodings2 = face_recognition.face_encodings(image2, face_locations2)
- for encoding in face_encodings2:
- registered_encodings.append(encoding)
- # Open the video file
- cap = cv2.VideoCapture('recorded_movie.mp4')
- # Set the output video codec and dimensions
- fourcc = cv2.VideoWriter_fourcc(*'mp4v')
- width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
- height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
- out = cv2.VideoWriter('output_movie.mp4', fourcc, 20.0, (width, height))
- # Initialize the log file
- log_file = open('recognized_persons.log', 'w')
- # Loop through each frame of the video
- while cap.isOpened():
- # Read the frame
- ret, frame = cap.read()
- if not ret:
- break
- # Convert the frame to grayscale for face detection
- gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
- # Detect faces in the frame
- faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5)
- # Loop through each detected face
- for (x, y, w, h) in faces:
- # Extract the face region from the frame
- face_region = frame[y:y+h, x:x+w]
- # Convert the face region to a fixed-size encoding
- encoding = face_recognition.face_encodings(face_region)
- # Compare the encoding to the pre-registered face encodings
- for i, registered_encoding in enumerate(registered_encodings):
- if face_recognition.compare_faces([encoding], registered_encoding)[0]:
- # If a match is found, draw a green rectangle around the face
- cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
- # Write the recognized person's name and the current timestamp to the log file
- timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
- log_file.write(f'{timestamp}: Recognized person {i+1}\n')
- break
- # Write the modified frame to the output video
- out.write(frame)
- # Release the video capture and output objects
- cap.release()
- out.release()
- cv2.destroyAllWindows()
- # Close the log file
- log_file.close()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement