Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import logging
- import logging.handlers
- import os
- from Queue import Queue
- from threading import Thread, Event
- from timeit import default_timer as timer
- import cv2
- import numpy as np
- # ============================================================================
- QUEUE_MAXSIZE = 8
- # ============================================================================
- def init_logging(log_to_console = True):
- import sys
- main_logger = logging.getLogger()
- LOG_FILENAME = 'debug.log'
- # Check if log exists and should therefore be rolled
- needRoll = os.path.isfile(LOG_FILENAME)
- formatter = logging.Formatter(
- fmt='%(asctime)s.%(msecs)03d %(levelname)-8s <%(threadName)s> [%(name)s] %(message)s'
- , datefmt='%Y-%m-%d %H:%M:%S')
- handler_file = logging.handlers.RotatingFileHandler(LOG_FILENAME
- , maxBytes = 2**24
- , backupCount = 10)
- handler_file.setFormatter(formatter)
- main_logger.addHandler(handler_file)
- if not sys.executable.endswith("pythonw.exe") and log_to_console:
- handler_stream = logging.StreamHandler(sys.stdout)
- handler_stream.setFormatter(formatter)
- main_logger.addHandler(handler_stream)
- main_logger.setLevel(logging.DEBUG)
- if needRoll:
- # Roll over on application start
- handler_file.doRollover()
- # ============================================================================
- def load_alpha(filename, q_alpha_fg, q_alpha_bg, stop_event):
- logger = logging.getLogger('load_alpha')
- video = cv2.VideoCapture(filename)
- if not video.isOpened():
- logger.error("Unable to open input video '%s'.", filename)
- exit(-1)
- alpha = [] # Cache
- alpha_inv = [] # Cache
- frame_count = 0
- total_time = 0
- while not stop_event.is_set():
- t0 = timer()
- res, alpha_raw = video.read()
- if not res:
- break # End of video
- if frame_count == 0: # Pre-allocate
- for i in range(q_alpha_fg.maxsize + 2):
- alpha.append(np.zeros_like(alpha_raw, np.float32))
- for i in range(q_alpha_bg.maxsize + 2):
- alpha_inv.append(np.zeros_like(alpha_raw, np.float32))
- k = frame_count % (q_alpha_fg.maxsize + 2)
- k_i = frame_count % (q_alpha_bg.maxsize + 2)
- np.multiply(alpha_raw, np.float32(1/255.0), alpha[k])
- cv2.subtract((1.0, 1.0, 1.0, 0.0), alpha[k], alpha_inv[k_i])
- q_alpha_fg.put(alpha[k])
- q_alpha_bg.put(alpha_inv[k_i])
- t1 = timer()
- logger.debug("Processed frame %d in %0.4f ms", frame_count, (t1-t0)*1000)
- frame_count += 1
- total_time += (t1-t0)
- q_alpha_fg.put(None) # Signal end
- q_alpha_bg.put(None) # Signal end
- if (frame_count > 0):
- logger.debug("Done (average iteration %0.4f ms).", (total_time / frame_count) * 1000)
- else:
- logger.debug("Done")
- # ============================================================================
- def weigh_frames(filename, q_alpha, q_output, logname='weigh_frames'):
- logger = logging.getLogger(logname)
- video = cv2.VideoCapture(filename)
- if not video.isOpened():
- logger.error("Unable to open input video '%s'.", filename)
- exit(-1)
- frame_weighed = [] # Cache
- frame_count = 0
- total_time = 0
- while True:
- t0 = timer()
- res, frame_raw = video.read()
- alpha = q_alpha.get()
- if (not res) or (alpha is None):
- break # End of video
- if frame_count == 0: # Pre-allocate
- for i in range(q_output.maxsize + 2):
- frame_weighed.append(np.zeros_like(frame_raw, np.float32))
- k = frame_count % (q_output.maxsize + 2)
- cv2.multiply(frame_raw, alpha, frame_weighed[k], dtype=cv2.CV_32FC3)
- q_output.put(frame_weighed[k])
- q_alpha.task_done()
- t1 = timer()
- logger.debug("Processed frame %d in %0.4f ms", frame_count, (t1-t0)*1000)
- frame_count += 1
- total_time += (t1-t0)
- q_output.put(None) # Signal end
- if (frame_count > 0):
- logger.debug("Done (average iteration %0.4f ms).", (total_time / frame_count) * 1000)
- else:
- logger.debug("Done")
- # ============================================================================
- def blend_frames(q_fq, q_bg, q_output):
- logger = logging.getLogger('blend_frames')
- result = [] # Cache
- frame_count = 0
- total_time = 0
- while True:
- t0 = timer()
- foreground = q_fq.get()
- background = q_bg.get()
- if (foreground is None) or (background is None):
- break # End of video
- if frame_count == 0: # Pre-allocate
- for i in range(q_output.maxsize + 2):
- result.append(np.zeros_like(foreground, np.uint8))
- k = frame_count % (q_output.maxsize + 2)
- cv2.add(foreground, background, result[k], dtype=cv2.CV_8UC3)
- q_output.put(result[k])
- q_fq.task_done()
- q_bg.task_done()
- t1 = timer()
- logger.debug("Processed frame %d in %0.4f ms", frame_count, (t1-t0)*1000)
- frame_count += 1
- total_time += (t1-t0)
- q_output.put(None) # Signal end
- if (frame_count > 0):
- logger.debug("Done (average iteration %0.4f ms).", (total_time / frame_count) * 1000)
- else:
- logger.debug("Done")
- # ============================================================================
- init_logging(False)
- logger = logging.getLogger('main')
- q_alpha_fg = Queue(maxsize=QUEUE_MAXSIZE)
- q_alpha_bg = Queue(maxsize=QUEUE_MAXSIZE)
- q_weighed_fg = Queue(maxsize=QUEUE_MAXSIZE)
- q_weighed_bg = Queue(maxsize=QUEUE_MAXSIZE)
- q_result = Queue(maxsize=QUEUE_MAXSIZE)
- stop_event = Event()
- FOREGROUND_FILENAME = 'circle.mp4'
- BACKGROUND_FILENAME = 'video.mp4'
- ALPHA_FILENAME = 'circle_alpha.mp4'
- try:
- workers = []
- worker = Thread(target=load_alpha, args=(ALPHA_FILENAME, q_alpha_fg, q_alpha_bg, stop_event))
- workers.append(worker)
- worker = Thread(target=weigh_frames, args=(FOREGROUND_FILENAME, q_alpha_fg, q_weighed_fg, 'weigh_fg'))
- workers.append(worker)
- worker = Thread(target=weigh_frames, args=(BACKGROUND_FILENAME, q_alpha_bg, q_weighed_bg, 'weigh_bg'))
- workers.append(worker)
- worker = Thread(target=blend_frames, args=(q_weighed_fg, q_weighed_bg, q_result))
- workers.append(worker)
- # Start all threads
- for worker in workers:
- worker.start()
- frame_count = 0
- total_time = 0
- while True:
- t0 = timer()
- frame = q_result.get()
- if frame is None:
- break # End of video
- if not q_result.empty():
- logger.debug("Skip frame, more waiting...")
- else:
- cv2.imshow('My Image', frame)
- if cv2.waitKey(1) == ord('q'):
- logger.debug("Exit requested, setting stop event.")
- stop_event.set()
- q_result.task_done()
- t1 = timer()
- logger.debug("Processed frame %d in %0.4f ms", frame_count, (t1-t0)*1000)
- frame_count += 1
- total_time += (t1-t0)
- if (frame_count > 0):
- logger.debug("Done (average iteration %0.4f ms).", (total_time / frame_count) * 1000)
- else:
- logger.debug("Done")
- for worker in workers:
- worker.join()
- logger.debug("Worker joined.")
- logger.debug("Done")
- except KeyboardInterrupt:
- stop_event.set()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement