Now using FLANN feature matching instead. Still using SIFT feature matcher

This commit is contained in:
= 2019-03-26 21:58:38 +01:00
parent b3537ee781
commit 8f01dabb66
3 changed files with 70 additions and 6 deletions

BIN
IMG_2086.jpeg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 723 KiB

BIN
new_baseline_board.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.2 MiB

View File

@ -6,8 +6,8 @@ dstPoints = np.array([(out_height, 0), (0, 0), (0, out_width), (out_height, out
img = cv2.imread("IMG_2070.jpeg")
img2 = cv2.imread("pls_bo33.jpg")
img = cv2.imread("IMG_2086.jpeg")
img2 = cv2.imread("new_baseline_board.png")
img_tmp = img.copy()
gray_tmp = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
@ -48,7 +48,7 @@ gray_2 = cv2.cvtColor(img_tmp_tmp, cv2.COLOR_BGR2GRAY)
gray_3 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
MAX_FEATURES = 500
GOOD_MATCH_PERCENT = 0.002
GOOD_MATCH_PERCENT = 0.0005
cv2.imwrite('pls_lasse.jpg', img)
@ -57,6 +57,8 @@ img_tmp = img.copy()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
sift = cv2.xfeatures2d.SIFT_create()
#sift = cv2.ORB_create(MAX_FEATURES)
#sift = cv2.xfeatures2d.SURF_create()
kp = sift.detect(gray_2, None)
kp2 = sift.detect(gray_3, None)
@ -66,11 +68,68 @@ kp2, des2 = sift.compute(gray_3, kp2)
cv2.drawKeypoints(img_tmp_tmp, keypoints=kp, outImage=img_tmp_tmp)
cv2.imwrite('keypoints_img.jpg', img_tmp_tmp)
matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_SL2)
matches = matcher.match(des, des2, None)
# FLANN parameters
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 8)
search_params = dict(checks=100) # or pass empty dictionary
#matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
#matches = matcher.match(des, des2, None)
flann = cv2.FlannBasedMatcher(index_params,search_params)
matches = flann.knnMatch(des, des2, k=2)
# Need to draw only good matches, so create a mask
matchesMask = [[0,0] for i in range(len(matches))]
good_matches = []
# ratio test as per Lowe's paper
for i,(m,n) in enumerate(matches):
if m.distance < 0.5*n.distance:
matchesMask[i]=[1,0]
good_matches.append([m,n])
draw_params = dict(matchColor = (0,255,0),
singlePointColor = (255,0,0),
matchesMask = matchesMask,
flags = 0)
img3 = cv2.drawMatchesKnn(img_tmp_tmp, kp, img2, kp2, matches, None, **draw_params)
cv2.imwrite("matches.jpg", img3)
matches.sort(key=lambda x: x[0].distance, reverse=False)
# Remove poor matches
numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)
#good_matches = matches[:numGoodMatches]
# Extract location of good matches
points1 = np.zeros((len(good_matches), 2), dtype=np.float32)
points2 = np.zeros((len(good_matches), 2), dtype=np.float32)
for i, (m, n) in enumerate(good_matches):
points1[i, :] = kp[m.queryIdx].pt
points2[i, :] = kp2[m.trainIdx].pt
print(points1)
print(len(points2))
h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)
height, width, channels = img2.shape
im1Reg = cv2.warpPerspective(img_tmp_tmp, h, (width, height))
cv2.imwrite('homo_pls_fuck.jpg', im1Reg)
'''
# Sort matches by score
matches.sort(key=lambda x: x.distance, reverse=False)
matches.sort(key=lambda x: x[0].distance, reverse=False)
# Remove poor matches
numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)
@ -88,13 +147,18 @@ for i, match in enumerate(matches):
points1[i, :] = kp[match.queryIdx].pt
points2[i, :] = kp2[match.trainIdx].pt
print(len(points1))
print(len(points2))
'''
'''
h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)
height, width, channels = img2.shape
im1Reg = cv2.warpPerspective(img_tmp_tmp, h, (width, height))
cv2.imwrite('homo_pls_fuck.jpg', im1Reg)
'''
'''