Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import cv2
- import numpy as np
- import multiprocessing
- from yolo import Yolo
- import time
- def multiproces(frame,end_frame):
- video = cv2.VideoCapture('k.m4v')
- video.set(cv2.CAP_PROP_POS_FRAMES, frame)
- success, kadr = video.read()
- while frame <= end_frame:
- bboxes, idxs, indeks = detect(kadr, target_class_id)
- for box in bboxes:
- box[0] = round(box[0] * fxy)
- box[1] = round(box[1] * fxy)
- box[2] = round(box[2] * fxy)
- box[3] = round(box[3] * fxy)
- multiTracker = cv2.legacy.MultiTracker_create()
- kadr_res = cv2.resize(kadr, None, fx=fxy, fy=fxy)
- for bbox in bboxes:
- multiTracker.add(createTracker(), kadr_res, bbox)
- j = 0
- while j <= 30:
- _, bboxes = multiTracker.update(kadr_res)
- j += 5
- frame += 5
- video.set(cv2.CAP_PROP_POS_FRAMES, frame)
- success, kadr = video.read()
- kadr_res = cv2.resize(kadr, None, fx=fxy, fy=fxy)
- boxes = []
- for i in idxs:
- boxes.append(bboxes[i[0]])
- for i in range(6):
- mas[frame-i] = boxes
- draw(kadr,boxes)
- #индекс и сохранение
- #рисовка и многопоточность
- def draw(kadr, boxes):
- for i in boxes:
- x = round(i[0]*1/fxy)
- y = round(i[1]*1/fxy)
- w = round(i[2]*1/fxy)
- h = round(i[3]*1/fxy)
- cv2.rectangle(kadr, (x, y), (x + w, y + h), colors, 2)
- cv2.imshow("output", kadr)
- cv2.waitKey(0)
- video = cv2.VideoCapture('k.m4v')
- length = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
- mas = []
- for i in range(length):
- mas.append(i)
- fxy = 1/8
- colors = [255, 255, 255]#цвет рамки
- weights = "yolov3.weights"#Веса и классы
- config = "yolov3.cfg"
- labels = "yolov3.txt"
- target_class_id = 0
- conf_thresh = 0.5
- nms_thresh = 0.5
- net = Yolo(config, weights, labels, conf_thresh, nms_thresh)
- def createTracker():#Создание отдельного трекера
- tracker = cv2.legacy.TrackerCSRT_create()
- return tracker
- def detect(kadr, target_class_id):#Выхов функцции детекции
- data, idxs, indeks = net.detect(kadr, target_class_id)
- return data, idxs, indeks
- if __name__ == '__main__':
- video = cv2.VideoCapture('k.m4v')
- length = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
- delenie = round(length/4)
- p1 = multiprocessing.Process(target=multiproces, args=(0,delenie*1,))
- p2 = multiprocessing.Process(target=multiproces, args=(delenie*1,delenie*2,))
- p3 = multiprocessing.Process(target=multiproces, args=(delenie*2,delenie*3,))
- p4 = multiprocessing.Process(target=multiproces, args=(delenie*3,length,))
- p1.start()
- p2.start()
- p3.start()
- p4.start()
- p1.join()
- p2.join()
- p3.join()
- p4.join()
- import cv2
- import numpy as np
- class Yolo:
- def __init__(self, cfg, weights, names, conf_thresh, nms_thresh): #Создание и загрузка весов детекции объетов
- self.ct = conf_thresh
- self.nmst = nms_thresh
- self.net = cv2.dnn.readNet(weights, cfg)
- print("Finished: " + str(weights))
- self.classes = [0]
- layer_names = self.net.getLayerNames()
- self.output_layers = [layer_names[i[0]-1] for i in self.net.getUnconnectedOutLayers()]
- def detect(self, img, target_id):#Финальная функция возвращает индекс параметры рисования и координаты объетов
- b, c, ids, idxs = self.get_detection_data(img, target_id)
- return b, idxs, ids
- def get_detection_data(self, img, target_id):#Подгон разрешения и обработка по уже найденым объетам
- layer_outputs = self.get_inf(img)
- height, width = img.shape[:2]
- b, c, ids, idxs = self.thresh(layer_outputs, width, height, target_id)
- return b, c, ids, idxs
- def get_inf(self, img):#Подгон разрешения и запуск нейросети
- blob = cv2.dnn.blobFromImage(img, 1 / 255.0, (416, 416), swapRB=True, crop=False)
- self.net.setInput(blob)
- layer_outputs = self.net.forward(self.output_layers)
- return layer_outputs
- def thresh(self, layer_outputs, width, height, target_id):#Сам алгоритм классов индексов и координат
- boxes = []
- confidences = []
- class_ids = []
- for output in layer_outputs:
- for detection in output:
- scores = detection[5:]
- class_id = np.argmax(scores)
- confidence = scores[class_id]
- if confidence > self.ct and class_id == target_id:
- box = detection[0:4] * np.array([width, height, width, height])
- (cx, cy, w, h) = box.astype('int')
- tx = int(cx - (w / 2))
- ty = int(cy - (h / 2))
- boxes.append([tx, ty, int(w), int(h)])
- confidences.append(float(confidence))
- class_ids.append(class_id)
- idxs = cv2.dnn.NMSBoxes(boxes, confidences, self.ct, self.nmst)
- return boxes, confidences, class_ids, idxs
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement