adding trackers that might work
							parent
							
								
									bbc3fefdfc
								
							
						
					
					
						commit
						e1a2324b0c
					
				| @ -0,0 +1,149 @@ | ||||
| # importing libraries | ||||
| import cv2 | ||||
| import numpy as np | ||||
| from scipy import stats | ||||
| 
 | ||||
| 
 | ||||
| drawing = False | ||||
| point1 = () | ||||
| point2 = () | ||||
| 
 | ||||
| def mouse_drawing(event, x, y, flags, params): | ||||
|     global point1, point2, drawing | ||||
|     if event == cv2.EVENT_LBUTTONDOWN: | ||||
|         if drawing is False: | ||||
|             drawing = True | ||||
|             point1 = (x, y) | ||||
|         else: | ||||
|             drawing = False | ||||
| 
 | ||||
|     elif event == cv2.EVENT_MOUSEMOVE: | ||||
|         if drawing is True: | ||||
|             point2 = (x, y) | ||||
| 
 | ||||
| # Our ROI, defined by two points | ||||
| p1, p2 = None, None | ||||
| state = 0 | ||||
| 
 | ||||
| # Called every time a mouse event happen | ||||
| def on_mouse(event, x, y, flags, userdata): | ||||
|     global state, point1, point2 | ||||
|      | ||||
|     # Left click | ||||
|     if event == cv2.EVENT_LBUTTONUP: | ||||
|         # Select first point | ||||
|         if state == 0: | ||||
|             point1 = (x,y) | ||||
|             state += 1 | ||||
|         # Select second point | ||||
|         elif state == 1: | ||||
|             point2 = (x,y) | ||||
|             state += 1 | ||||
| 
 | ||||
| 
 | ||||
| #xFine = (848, 187, 225, 21.0) | ||||
| #yFine = (604, 402, 20.5, 276) | ||||
| 
 | ||||
| xFine = (848, 187, 848 + 225, 187 + 21.0) | ||||
| yFine = (604, 402, 604 + 20.5, 402 + 276) | ||||
| 
 | ||||
| frameCountMod = 0 | ||||
| centroidX = [0, 0] | ||||
| centroidY = [0, 0] | ||||
| 
 | ||||
| def track(frame, ROI, centroid, update): | ||||
|     if(update): | ||||
|         crop = frame[int(ROI[1]):int(ROI[3]), int(ROI[0]):int(ROI[2])] | ||||
|         crop = cv2.cvtColor(crop, cv2.COLOR_RGB2GRAY) | ||||
|         crop = cv2.GaussianBlur(crop,(7,7),cv2.BORDER_DEFAULT) | ||||
| 
 | ||||
|         #ret, thresh = cv2.threshold(crop, 100, 255, cv2.THRESH_OTSU + cv2.THRESH_BINARY) | ||||
|         ret,thresh = cv2.threshold(crop, 50, 255, 0) | ||||
|          | ||||
|         M = cv2.moments(thresh) | ||||
| 
 | ||||
|         # calculate x,y coordinate of center | ||||
|         if M["m00"] != 0: | ||||
|             centroid[0] = int(M["m10"] / M["m00"]) | ||||
|             centroid[1] = int(M["m01"] / M["m00"]) | ||||
|         #else: | ||||
|         #    cX, cY = 0, 0 | ||||
|         #print(cY) | ||||
|     cv2.circle(frame, (int(ROI[0]) + centroid[0], int(ROI[1]) + centroid[1]), 3, (0, 255, 0), -1) | ||||
| 
 | ||||
| cv2.namedWindow("Frame") | ||||
| cv2.setMouseCallback("Frame", mouse_drawing) | ||||
| cv2.namedWindow("Process") | ||||
|    | ||||
| # Create a VideoCapture object and read from input file | ||||
| cap = cv2.VideoCapture("/home/mwinter/Portfolio/a_history_of_the_domino_problem/a_history_of_the_domino_problem_source/recs/a_history_of_the_domino_problem_final_documentation_hq.mp4") | ||||
| cap.set(cv2.CAP_PROP_POS_FRAMES, 10000) | ||||
| 
 | ||||
| # Check if camera opened successfully | ||||
| if (cap.isOpened()== False): | ||||
|     print("Error opening video file") | ||||
|    | ||||
| frameCountMod = 0 | ||||
| centroidX = [0, 0] | ||||
| centroidY = [0, 0] | ||||
| 
 | ||||
| # Read until video is completed | ||||
| while(cap.isOpened()): | ||||
|        | ||||
|     # Capture frame-by-frame | ||||
|     ret, frame = cap.read() | ||||
|     if ret == True: | ||||
|     # Display the resulting frame | ||||
| 
 | ||||
|         if point1 and point2: | ||||
|             px = sorted([point1[0], point2[0]]) | ||||
|             py = sorted([point1[1], point2[1]]) | ||||
|             #cv2.rectangle(frame, point1, point2, (0, 255, 0)) | ||||
|             xFine = (px[0], py[0], px[1], py[1]) | ||||
| 
 | ||||
|         crop = frame[int(xFine[1]):int(xFine[3]), int(xFine[0]):int(xFine[2])] | ||||
|         gray = cv2.cvtColor(crop, cv2.COLOR_BGR2GRAY) | ||||
|         fm = cv2.Laplacian(gray, cv2.CV_64F).var() | ||||
| 
 | ||||
|         kernel = np.ones((30, 50), np.uint8) | ||||
| 
 | ||||
|         text = "Not Blurry" | ||||
|         blur = cv2.GaussianBlur(cv2.bitwise_not(crop),(1001,3), 3, 1) * 3 | ||||
|         dilation = cv2.dilate(gray, kernel, iterations=1) | ||||
|         ret,dilation = cv2.threshold(dilation,20,255,cv2.THRESH_BINARY_INV) | ||||
| 
 | ||||
|         mean = pow(dilation.mean(), 3) | ||||
|         # if the focus measure is less than the supplied threshold, | ||||
|         # then the image should be considered "blurry" | ||||
|         if fm < 100.0: | ||||
|             text = "Blurry" | ||||
|         # show the image | ||||
|         cv2.rectangle(frame, (int(xFine[0]), int(xFine[1])), (int(xFine[2]),int(xFine[3])), (0, 255, 0)) | ||||
|         cv2.rectangle(frame, (int(xFine[0]), int(xFine[1])), (int(xFine[2]),int(xFine[3])), (0, 255, 0)) | ||||
|         cv2.putText(frame, "{}: {:.2f}".format(text, fm), (10, 30), | ||||
| 		cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 3) | ||||
|         cv2.putText(frame, "{}: {:.2f}".format("Brightness", mean), (10, 100), | ||||
|         cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 3) | ||||
|              | ||||
|         cv2.imshow("Frame", frame) | ||||
|         cv2.imshow("Process", crop) | ||||
| 
 | ||||
|            | ||||
|     # Press Q on keyboard to exit | ||||
|         key = cv2.waitKey(100) | ||||
| 
 | ||||
|         if key == 32: | ||||
|             cv2.waitKey() | ||||
|         elif key == ord('q'): | ||||
|             break | ||||
|    | ||||
| # Break the loop | ||||
|     else: | ||||
|         break | ||||
|    | ||||
| # When everything done, release | ||||
| # the video capture object | ||||
| cap.release() | ||||
|    | ||||
| # Closes all the frames | ||||
| cv2.destroyAllWindows() | ||||
| @ -0,0 +1,148 @@ | ||||
| # importing libraries | ||||
| import cv2 | ||||
| import numpy as np | ||||
| from scipy import stats | ||||
| 
 | ||||
| 
 | ||||
| rectToSet = 'x' | ||||
| moving = False | ||||
| roiXCenter = (960, 195) | ||||
| roiYCenter = (615, 530) | ||||
| w = 10 | ||||
| l1 = 50 | ||||
| l2 = 150 | ||||
| l3 = 20 | ||||
| roiXInner = (roiXCenter[0] - l1, roiXCenter[1] - w, roiXCenter[0] + l1, roiXCenter[1] + w) | ||||
| roiXOuter = (roiXCenter[0] - l2, roiXCenter[1] - w, roiXCenter[0] + l2, roiXCenter[1] + w) | ||||
| roiXCourse = (roiXCenter[0] - l3, roiXCenter[1] + (w * 1), roiXCenter[0] + l3, roiXCenter[1] + (w * 3)) | ||||
| 
 | ||||
| roiYInner = (roiYCenter[0] - w, roiYCenter[1] - l1, roiYCenter[0] + w, roiYCenter[1] + l1) | ||||
| roiYOuter = (roiYCenter[0] - w, roiYCenter[1] - l2, roiYCenter[0] + w, roiYCenter[1] + l2) | ||||
| roiYCourse = (roiYCenter[0] + (w * 1), roiYCenter[1] - l3, roiYCenter[0] + (w * 3), roiYCenter[1] + l3) | ||||
| 
 | ||||
| dilationVal = 75 | ||||
| 
 | ||||
| def moveROI(event, x, y, flags, params): | ||||
|     global roiXCenter, roiYCenter, roiXInner, roiXOuter, roiXCourse, roiYInner, roiYOuter, roiYCourse, moving | ||||
|     if event == cv2.EVENT_LBUTTONDOWN: | ||||
|         moving = True | ||||
| 
 | ||||
|     elif event==cv2.EVENT_MOUSEMOVE: | ||||
|         if moving==True: | ||||
|             if rectToSet=='x': | ||||
|                 roiXCenter = (x, y) | ||||
|                 roiXInner = (roiXCenter[0] - l1, roiXCenter[1] - w, roiXCenter[0] + l1, roiXCenter[1] + w) | ||||
|                 roiXOuter = (roiXCenter[0] - l2, roiXCenter[1] - w, roiXCenter[0] + l2, roiXCenter[1] + w) | ||||
|                 roiXCourse = (roiXCenter[0] - l3, roiXCenter[1] + (w * 1), roiXCenter[0] + l3, roiXCenter[1] + (w * 3)) | ||||
| 
 | ||||
|             elif rectToSet=='y': | ||||
|                 roiYCenter = (x, y) | ||||
|                 roiYInner = (roiYCenter[0] - w, roiYCenter[1] - l1, roiYCenter[0] + w, roiYCenter[1] + l1) | ||||
|                 roiYOuter = (roiYCenter[0] - w, roiYCenter[1] - l2, roiYCenter[0] + w, roiYCenter[1] + l2) | ||||
|                 roiYCourse = (roiYCenter[0] + (w * 1), roiYCenter[1] - l3, roiYCenter[0] + (w * 3), roiYCenter[1] + l3) | ||||
| 
 | ||||
|     elif event == cv2.EVENT_LBUTTONUP: | ||||
|         moving = False | ||||
|      | ||||
| 
 | ||||
| cv2.namedWindow("Frame") | ||||
| cv2.setMouseCallback("Frame", moveROI) | ||||
| #cv2.namedWindow("Process") | ||||
|    | ||||
| # Create a VideoCapture object and read from input file | ||||
| cap = cv2.VideoCapture("/home/mwinter/Portfolio/a_history_of_the_domino_problem/a_history_of_the_domino_problem_source/recs/a_history_of_the_domino_problem_final_documentation_hq.mp4") | ||||
| cap.set(cv2.CAP_PROP_POS_FRAMES, 10000) | ||||
| 
 | ||||
| # Check if camera opened successfully | ||||
| if (cap.isOpened()== False): | ||||
|     print("Error opening video file") | ||||
| 
 | ||||
| 
 | ||||
| def track(frame, roiInner, roiOuter, roiCourse): | ||||
|     w = 30 | ||||
|     l1 = 30 | ||||
|     l2 = 100 | ||||
|     cropFine = frame[roiOuter[1]:roiOuter[3], roiOuter[0]:roiOuter[2]] | ||||
|     cropCourse = frame[roiCourse[1]:roiCourse[3], roiCourse[0]:roiCourse[2]] | ||||
|     #gray = cv2.cvtColor(crop, cv2.COLOR_BGR2GRAY) | ||||
| 
 | ||||
|     #may not need any of this | ||||
|     kernel = np.ones((dilationVal, dilationVal), np.uint8) | ||||
|     dilation = cv2.dilate(cropFine, kernel, iterations=1) | ||||
|     ret,tresh = cv2.threshold(dilation,20,255,cv2.THRESH_BINARY) | ||||
| 
 | ||||
|     #mean = pow(frame[roiInner[1]:roiInner[3], roiInner[0]:roiInner[2]].mean(), 3) | ||||
|     frame[roiOuter[1]:roiOuter[3], roiOuter[0]:roiOuter[2]] = tresh | ||||
|     meanFine = pow(frame[roiInner[1]:roiInner[3], roiInner[0]:roiInner[2]].mean(), 2) | ||||
|     meanCourse = frame[roiCourse[1]:roiCourse[3], roiCourse[0]:roiCourse[2]].mean() | ||||
|      | ||||
|     mean = 0 | ||||
|     if(meanCourse > 10): | ||||
|         mean = meanFine | ||||
| 
 | ||||
|     distance = pow(255, 2) - mean | ||||
| 
 | ||||
|     return distance | ||||
| 
 | ||||
| def drawRects(frame): | ||||
|     cv2.rectangle(frame, (roiXOuter[0], roiXOuter[1]), (roiXOuter[2], roiXOuter[3]), (0, 255, 0)) | ||||
|     cv2.rectangle(frame, (roiXCenter[0] - l1, roiXInner[1]), (roiXCenter[0], roiXInner[3]), (0, 255, 0)) | ||||
|     cv2.rectangle(frame, (roiXCenter[0], roiXInner[1]), (roiXCenter[0] + l1, roiXInner[3]), (0, 255, 0)) | ||||
|     cv2.rectangle(frame, (roiXCourse[0], roiXCourse[1]), (roiXCourse[2], roiXCourse[3]), (0, 255, 0)) | ||||
| 
 | ||||
|     cv2.rectangle(frame, (roiYOuter[0], roiYOuter[1]), (roiYOuter[2], roiYOuter[3]), (0, 255, 0)) | ||||
|     cv2.rectangle(frame, (roiYInner[0], roiYCenter[1] - l1), (roiYInner[2], roiYCenter[1]), (0, 255, 0)) | ||||
|     cv2.rectangle(frame, (roiYInner[0], roiYCenter[1]), (roiYInner[2], roiYCenter[1] + l1), (0, 255, 0)) | ||||
|     cv2.rectangle(frame, (roiYCourse[0], roiYCourse[1]), (roiYCourse[2], roiYCourse[3]), (0, 255, 0)) | ||||
| 
 | ||||
| # Read until video is completed | ||||
| while(cap.isOpened()): | ||||
|        | ||||
|     # Capture frame-by-frame | ||||
|     ret, frame = cap.read() | ||||
|     if ret == True: | ||||
|     # Display the resulting frame | ||||
|              | ||||
|         distanceX = track(frame, roiXInner, roiXOuter, roiXCourse) | ||||
|         distanceY = track(frame, roiYInner, roiYOuter, roiYCourse) | ||||
| 
 | ||||
|         drawRects(frame) | ||||
| 
 | ||||
|         cv2.putText(frame, "{}: {:.2f}".format("distance x", distanceX), (10, 100), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 3) | ||||
|         cv2.putText(frame, "{}: {:.2f}".format("distance y", distanceY), (10, 200), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 3) | ||||
| 
 | ||||
|         #fm = cv2.Laplacian(gray, cv2.CV_64F).var() | ||||
|         #cv2.putText(frame, "{}: {:.2f}".format("blur", fm), (10, 30), | ||||
| 		#cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 3) | ||||
|              | ||||
|         cv2.imshow("Frame", frame) | ||||
|         #cv2.imshow("Process", tresh) | ||||
| 
 | ||||
|            | ||||
|     # Press Q on keyboard to exit | ||||
|         key = cv2.waitKey(1) | ||||
| 
 | ||||
|         if key == 32: | ||||
|             cv2.waitKey() | ||||
|         elif key == ord('+'): | ||||
|             dilationVal = dilationVal + 1 | ||||
|         elif key == ord('-'): | ||||
|             if dilationVal > 0: | ||||
|                 dilationVal = dilationVal - 1 | ||||
|         elif key == ord('x'): | ||||
|             rectToSet = 'x' | ||||
|         elif key == ord('y'): | ||||
|             rectToSet = 'y' | ||||
|         elif key == ord('q'): | ||||
|             break | ||||
|    | ||||
| # Break the loop | ||||
|     else: | ||||
|         break | ||||
|    | ||||
| # When everything done, release | ||||
| # the video capture object | ||||
| cap.release() | ||||
|    | ||||
| # Closes all the frames | ||||
| cv2.destroyAllWindows() | ||||
					Loading…
					
					
				
		Reference in New Issue