Messy code, loads of shit commented out, it actually computes stuff though

This commit is contained in:
= 2019-03-26 00:35:03 +01:00
commit b3537ee781
3 changed files with 185 additions and 0 deletions

BIN
IMG_2070.jpeg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 778 KiB

BIN
pls_bo33.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 302 KiB

185
runner.py Normal file
View File

@ -0,0 +1,185 @@
import cv2
import numpy as np
out_height, out_width = 500, 500
dstPoints = np.array([(out_height, 0), (0, 0), (0, out_width), (out_height, out_width)])
img = cv2.imread("IMG_2070.jpeg")
img2 = cv2.imread("pls_bo33.jpg")
img_tmp = img.copy()
gray_tmp = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
gray_tmp = np.float32(gray_tmp)
'''
dst = cv2.cornerHarris(gray_tmp, 20, 3, 0.04)
#result is dilated for marking the corners, not important
dst = cv2.dilate(dst,None)
# Threshold for an optimal value, it may vary depending on the image.
img_tmp[dst>0.01*dst.max()]=[0,0,255]
cv2.imwrite('fuck.jpg',img_tmp)
img = cv2.GaussianBlur(img,(5,5),0)
kernel = np.ones((3,3),np.float32)
kernel[0,1] = 0
kernel[0,2] = -1
kernel[1,0] = 3
kernel[1,1] = 0
kernel[1,2] = -3
kernel[2,1] = 0
kernel[2,2] = -1
img = cv2.filter2D(img,-1,kernel)
'''
img_tmp_tmp = img.copy()
gray_2 = cv2.cvtColor(img_tmp_tmp, cv2.COLOR_BGR2GRAY)
gray_3 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
MAX_FEATURES = 500
GOOD_MATCH_PERCENT = 0.002
cv2.imwrite('pls_lasse.jpg', img)
img_tmp = img.copy()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
sift = cv2.xfeatures2d.SIFT_create()
kp = sift.detect(gray_2, None)
kp2 = sift.detect(gray_3, None)
kp, des = sift.compute(gray_2, kp)
kp2, des2 = sift.compute(gray_3, kp2)
cv2.drawKeypoints(img_tmp_tmp, keypoints=kp, outImage=img_tmp_tmp)
cv2.imwrite('keypoints_img.jpg', img_tmp_tmp)
matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_SL2)
matches = matcher.match(des, des2, None)
# Sort matches by score
matches.sort(key=lambda x: x.distance, reverse=False)
# Remove poor matches
numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)
matches = matches[:numGoodMatches]
# Draw the top matches
imMatches = cv2.drawMatches(img_tmp_tmp, kp, img2, kp2, matches, None)
cv2.imwrite("matches.jpg", imMatches)
# Extract location of good matches
points1 = np.zeros((len(matches), 2), dtype=np.float32)
points2 = np.zeros((len(matches), 2), dtype=np.float32)
for i, match in enumerate(matches):
points1[i, :] = kp[match.queryIdx].pt
points2[i, :] = kp2[match.trainIdx].pt
h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)
height, width, channels = img2.shape
im1Reg = cv2.warpPerspective(img_tmp_tmp, h, (width, height))
cv2.imwrite('homo_pls_fuck.jpg', im1Reg)
'''
gray_tmp = gray.copy()
gray_tmp = np.float32(gray_tmp)
dst = cv2.cornerHarris(gray_tmp,10,17,0.1)
#result is dilated for marking the corners, not important
dst = cv2.dilate(dst,None)
# Threshold for an optimal value, it may vary depending on the image.
img_tmp[dst>0.07*dst.max()]=[0,0,255]
cv2.imwrite('fuck.jpg',img_tmp)
'''
'''
ret, corners = cv2.findChessboardCorners(gray, (3,3), None)
imgpoints = []
print(ret)
if ret == True:
corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)
imgpoints.append(corners)
# Draw and display the corners
cv2.drawChessboardCorners(img, (3,3), corners2, ret)
cv2.imwrite('corners_chess.jpg', img)
'''
'''
# Detect edges using Canny
canny_output = cv2.Canny(gray, 140, 160)
cv2.imwrite('canny_out.jpg', canny_output)
'''
'''
ret, thresholded = cv2.threshold(gray, 80, 255, cv2.THRESH_BINARY)
cv2.imwrite('threshold_out.jpg', thresholded)
lines = cv2.HoughLinesP(canny_output, 0.1, np.pi/60, 1, 30, 20)
for line in lines:
print(line)
cv2.line(img, (line[0][0], line[0][1]), (line[0][2], line[0][3]), (0, 0, 255), 2, 8)
cv2.imwrite('lined_chess.jpg', img)
_, contours, hierarchy = cv2.findContours(canny_output, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
'''
#cv2.drawContours(img, contours, -1, (255, 0, 0), 2)
'''
pls_square = []
prev_max = -1
for contour in contours:
approx = cv2.approxPolyDP(contour, 0.01 * cv2.arcLength(contour, True), True)
if len(approx) == 4:
point_set = [(x[0,0], x[0,1]) for x in approx]
max_x = max([x[0] for x in point_set])
min_x = min([x[0] for x in point_set])
max_y = max([x[1] for x in point_set])
min_y = min([x[1] for x in point_set])
print(((max_x - min_x) * (max_y - min_y)))
pls_square.append(approx)
#if ((max_x - min_x) * (max_y - min_y)) > prev_max:
# prev_max = ((max_x - min_x) * (max_y - min_y))
# pls_square = approx
print(pls_square)
#h, mask = cv2.findHomography(pls_square, dstPoints, cv2.RANSAC)
#height, width, channels = img.shape
#warped = cv2.warpPerspective(img, h, (out_height, out_width))
cv2.drawContours(img, contours, -1, (255, 0, 0), 2)
cv2.imwrite('contours_chess.jpg', img)
#cv2.imwrite('homo_img_chess.jpg', warped)'''