This commit is contained in:
Alexander Munch-Hansen 2019-04-04 12:59:37 +02:00
parent 8f01dabb66
commit 6d8ffccf7d
9388 changed files with 953 additions and 164 deletions

538
main.py Normal file
View File

@ -0,0 +1,538 @@
from functools import lru_cache
import cv2
import runner
from sklearn.externals import joblib
import numpy as np
import operator
import glob
import os
import heapq
import math
pieces = ['rook', 'knight']
#pieces = ['rook', 'knight']
#piece_to_symbol = {'rook': 1, 'knight': 2, 'empty': 0}
piece_to_symbol = {'rook': 1, 'knight': 2}
colors = ['black', 'white']
def classify(image, sift : cv2.xfeatures2d_SIFT, file, rank, empty_bias=False):
centers = np.load("training_data/centers.npy")
probs = {'rook': {'black': 0, 'white': 0}, 'knight': {'black': 0, 'white': 0}, 'empty': {'black': 0, 'white': 0}}
#probs = {'rook': 0, 'knight': 0, 'empty': 0}
for piece in pieces:
for color in colors:
#color = runner.compute_color(file, rank)
classifier = joblib.load(f"classifiers/classifier_{piece}/{color}.pkl")
features = runner.generate_bag_of_words(image, centers, sift)
prob = classifier.predict_proba(features)
probs[piece][color] = prob[0, 1]
if empty_bias:
probs['empty'] *= 1.2
return probs
def pred_test(file, rank, mystery_image=None, empty_bias=False):
sift = cv2.xfeatures2d.SIFT_create()
if mystery_image is None:
mystery_image = cv2.imread("training_images/rook/white/rook_training_D4_2.png")
probs = classify(mystery_image, sift, file, rank, empty_bias=empty_bias)
return probs
def pre_process_and_train():
runner.do_pre_processing()
runner.train_pieces_svm()
def build_board_from_dict(board_dict : dict):
sift = cv2.xfeatures2d.SIFT_create()
board = [[0]*8 for _ in range(8)]
counter = 0
for idx, value in enumerate(board_dict.values()):
probs = classify(value, sift)
likely_piece = max(probs.items(), key=operator.itemgetter(1))[0]
symbol = piece_to_symbol[likely_piece]
column = idx // 8
row = (idx % 7)
board[row][column] = symbol
print(probs)
if likely_piece != 'empty':
counter += 1
print(counter)
print(64/(counter-1))
return board
def detect_using_nn(spec_image):
probs = {'rook': 0, 'knight': 0}
for piece in pieces:
piece_class = piece_to_symbol[piece]
win_size = (64, 64)
classifier = joblib.load("classifiers/neural_net_" + piece + ".pkl")
spec_image = cv2.resize(spec_image, (64, 128))
features = np.reshape(spec_image, (1, np.product(spec_image.shape)))
prob = classifier.predict_proba(features)
print(piece)
print(prob[0,1])
def test_entire_board():
board = cv2.imread("homo_pls_fuck.jpg")
warped = runner.warp_board(board)
board_dict = runner.get_squares(warped)
board = build_board_from_dict(board_dict)
print(board)
def lel_test():
# img = cv2.imread('training_images/rook/white/rook_training_D4_2.png')
counter = 0
for filename in glob.glob(os.path.join("training_images", "empty", "*", "*.png")):
img = cv2.imread(filename)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 3, 3)
# binarize the image
#ret, bw = cv2.threshold(gray, 100, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# find connected components
connectivity = 4
nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(ret, connectivity, cv2.CV_32S)
sizes = stats[1:, -1]
nb_components = nb_components - 1
min_size = 250 # threshhold value for objects in scene
img2 = np.zeros((img.shape), np.uint8)
for i in range(0, nb_components + 1):
# use if sizes[i] >= min_size: to identify your objects
color = np.random.randint(255, size=3)
# draw the bounding rectangele around each object
cv2.rectangle(img2, (stats[i][0], stats[i][1]), (stats[i][0] + stats[i][2], stats[i][1] + stats[i][3]),
(0, 255, 0), 2)
img2[output == i + 1] = color
#print(nb_components+1)
if nb_components+1 >= 4:
counter += 1
print(filename)
cv2.imshow("lel", img2)
cv2.waitKey(0)
print(counter)
def selective_search(image, use_fast=False, use_slow=False):
# speed-up using multithreads
cv2.setUseOptimized(True)
cv2.setNumThreads(4)
if type(image) == str:
# read image
im = cv2.imread(image)
else:
im = image
# resize image
#newHeight = 200
#newWidth = int(im.shape[1] * 150 / im.shape[0])
#im = cv2.resize(im, (newWidth, newHeight))
#im = cv2.imread(image)
#lel, im = cv2.threshold(im, 128, 255, cv2.THRESH_BINARY)
# create Selective Search Segmentation Object using default parameters
ss = cv2.ximgproc.segmentation.createSelectiveSearchSegmentation()
# set input image on which we will run segmentation
ss.setBaseImage(im)
# Switch to fast but low recall Selective Search method
ss.switchToSingleStrategy()
if (use_fast):
ss.switchToSelectiveSearchFast()
# Switch to high recall but slow Selective Search method
elif (use_slow):
ss.switchToSelectiveSearchQuality()
# run selective search segmentation on input image
rects = ss.process()
#print('Total Number of Region Proposals: {}'.format(len(rects)))
# number of region proposals to show
numShowRects = 150
# increment to increase/decrease total number
# of reason proposals to be shown
increment = 1
best_proposals = []
while True:
# create a copy of original image
# itereate over all the region proposals
for i, rect in enumerate(rects):
imOut = im.copy()
# draw rectangle for region proposal till numShowRects
if (i < numShowRects):
x, y, w, h = rect
# cv2.rectangle(imOut, (x, y), (x + w, y + h), (0, 255, 0), 1, cv2.LINE_AA)
# size = (max(w, x) - min(w, x)) * ((max(h, y) - min(h, y)))
top_left = (x,y)
bottom_left = (x, y+h)
top_right = (x+w, y)
bottom_right = (x+w, y+h)
rect_width = bottom_right[0] - bottom_left[0]
rect_height = bottom_right[1] - top_right[1]
size = rect_width * rect_height
#print(f"({x}, {y}), ({w}, {h})\n Of size: { size }")
#cv2.rectangle(imOut, (x, y), (x + w, y + h), (0, 255, 0), 1, cv2.LINE_AA)
#cv2.imshow("lel", imOut)
#cv2.waitKey(0)
best_proposals.append((rect, size))
#if size > biggest_size:
# biggest_rect = (x, y, w, h)
# biggest_size = size
# print(f"New biggest: \n({x}, {y}), ({w}, {h})\nOf size: {biggest_size}")
else:
break
height, width, channels = im.shape
center_x = width // 2
center_y = (height // 2)+5
dists = []
#print(f"Amount of best proposals:\n{len(best_proposals)}")
#print(f"lel: {len(heapq.nlargest(10, best_proposals, key=lambda x: x[1]))}")
for i in heapq.nlargest(10, best_proposals, key=lambda x: x[1]):
width, height, channels = im.shape
#print(width * height)
#print(i[1])
x, y, w, h = i[0]
if i[1] <= (width*height)*0.8 and i[1] > (width*height)*0.25:
imCop = imOut.copy()
#cv2.rectangle(imCop, (x, y), (x + w, y + h), (0, 255, 0), 2, cv2.LINE_AA)
#cv2.imshow("lel", imCop)
#cv2.waitKey(0)
#cv2.rectangle(imCop, (x, y), (x + w, y + h), (0, 255, 0), 4, cv2.LINE_AA)
top_left = (x,y)
bottom_left = (x, y+h)
top_right = (x+w, y)
bottom_right = (x+w, y+h)
box_center_x = (top_left[0]+bottom_left[0]+top_right[0]+bottom_right[0]) // 4
box_center_y = (top_left[1]+bottom_left[1]+top_right[1]+bottom_right[1]) // 4
#print(f"{box_center_x}, {box_center_y}, {center_x}, {center_y}")
dist = (center_x - box_center_x) ** 2 + (center_y - box_center_y) ** 2
print(dist)
dists.append([i, dist])
cv2.drawMarker(imCop, position=(x+w, h+y), color=(255, 0, 0), thickness=3)
cv2.drawMarker(imCop, position=(x+w, y), color=(255, 0, 0), thickness=3)
cv2.drawMarker(imCop, position=(x, y), color=(255, 0, 0), thickness=3)
cv2.drawMarker(imCop, position=(x, y+h), color=(255, 0, 0), thickness=3)
cv2.drawMarker(imCop, position=(box_center_x, box_center_y), color=(0, 255, 0), thickness=3)
cv2.drawMarker(imCop, position=(center_x, center_y), color=(0, 0, 255), thickness=3)
#cv2.imshow("lel", imCop)
#cv2.waitKey(0)
#print("-------"*5)
for pls in dists:
imCop = imOut.copy()
x, y, w, h = pls[0][0]
#print(x,y,w,h)
#print(pls[1])
top_left = (x, y)
bottom_left = (x, y + h)
top_right = (x + w, y)
bottom_right = (x + w, y + h)
cv2.drawMarker(imCop, position=(x + w, h + y), color=(255, 0, 0), thickness=3)
cv2.drawMarker(imCop, position=(x + w, y), color=(255, 0, 0), thickness=3)
cv2.drawMarker(imCop, position=(x, y), color=(255, 0, 0), thickness=3)
cv2.drawMarker(imCop, position=(x, y + h), color=(255, 0, 0), thickness=3)
box_center_x = (top_left[0] + bottom_left[0] + top_right[0] + bottom_right[0]) // 4
box_center_y = (top_left[1] + bottom_left[1] + top_right[1] + bottom_right[1]) // 4
cv2.drawMarker(imCop, position=(box_center_x, box_center_y), color=(0, 255, 0), thickness=3)
cv2.drawMarker(imCop, position=(center_y, center_x), color=(0, 0, 255), thickness=3)
cv2.rectangle(imCop, (x, y), (x + w, y + h), (0, 255, 0), 2, cv2.LINE_AA)
#cv2.imshow("lel", imCop)
#cv2.waitKey(0)
imCop = imOut.copy()
best = heapq.nsmallest(1, dists, key=lambda x: x[1])
if (len(best) == 0):
return ((0, 0), (0, height), (width, 0), (width, height))
x, y, w, h = best[0][0][0]
cv2.rectangle(imCop, (x, y), (x + w, y + h), (0, 255, 0), 4, cv2.LINE_AA)
top_left = (x, y)
bottom_left = (x, y + h)
top_right = (x + w, y)
bottom_right = (x + w, y + h)
cv2.drawMarker(imCop, position=(x + w, h + y), color=(255, 0, 0), thickness=3)
cv2.drawMarker(imCop, position=(x + w, y), color=(255, 0, 0), thickness=3)
cv2.drawMarker(imCop, position=(x, y), color=(255, 0, 0), thickness=3)
cv2.drawMarker(imCop, position=(x, y + h), color=(255, 0, 0), thickness=3)
box_center_x = (top_left[0] + bottom_left[0] + top_right[0] + bottom_right[0]) // 4
box_center_y = (top_left[1] + bottom_left[1] + top_right[1] + bottom_right[1]) // 4
cv2.drawMarker(imCop, position=(box_center_x, box_center_y), color=(0, 255, 0), thickness=3)
cv2.drawMarker(imCop, position=(center_x, center_y), color=(0, 0, 255), thickness=3)
#cv2.imshow("lel", imCop)
#cv2.waitKey(0)
return (top_left, bottom_left, top_right, bottom_right)
# show output
cv2.imshow("Output", imOut)
# record key press
k = cv2.waitKey(0) & 0xFF
# m is pressed
if k == 109:
# increase total number of rectangles to show by increment
numShowRects += increment
# l is pressed
elif k == 108 and numShowRects > increment:
# decrease total number of rectangles to show by increment
numShowRects -= increment
# q is pressed
elif k == 113:
break
# close image show window
cv2.destroyAllWindows()
def predict(square, file, rank):
color = runner.compute_color(file, rank)
empty_var_classifier = load_classifier(f"classifiers/classifier_empty_var/{color}.pkl")
magnitude_of_var = np.linalg.norm(cv2.meanStdDev(square)[1])
prob = empty_var_classifier.predict_proba(np.array(magnitude_of_var).reshape(-1, 1))
print(prob[0, 1])
if (prob[0, 1]) > 0.5:
return 'empty'
return None
@lru_cache()
def load_classifier(filename):
return joblib.load(filename)
if __name__ == '__main__':
board = cv2.imread("whole_boards/board_102_1554110461.608167_.png")
warped = runner.warp_board(board)
files = "ABCDEFGH"
ranks = [1,2,3,4,5,6,7,8]
counter = 0
for file in files:
for rank in ranks:
square = runner.get_square(warped, file, rank)
if predict(square, file, rank) == 'empty':
counter += 1
print(counter)
exit()
square = runner.get_square(warped, "D", 2)
gray_square = cv2.cvtColor(square, cv2.COLOR_BGR2GRAY)
print(cv2.meanStdDev(gray_square)[1])
print(cv2.meanStdDev(square)[1])
cv2.imshow("square", square)
cv2.waitKey(0)
print(pred_test("C", 2, square))
sift: cv2.xfeatures2d_SIFT = cv2.xfeatures2d.SIFT_create()
gray = cv2.cvtColor(square, cv2.COLOR_BGR2GRAY)
kp, desc = sift.detectAndCompute(gray, None)
cv2.drawKeypoints(square, kp, square)
cv2.imshow("kp", square)
cv2.waitKey(0)
exit()
board = cv2.imread("whole_boards/board_202_1554154094.001122_.png")
runner.fetch_empty_fields(board)
exit()
warped = runner.warp_board(board)
counter = 0
#square = runner.get_square(warped, "A", 3)
#top_left, bottom_left, top_right, bottom_right = selective_search(square, use_fast=True)
#cropped = square[top_left[1]:bottom_right[1], top_left[0]:bottom_right[0]]
for file in files:
for rank in ranks:
square = runner.get_square(warped, file, rank)
top_left, bottom_left, top_right, bottom_right = selective_search(square, use_fast=True)
cropped = square[top_left[1]:bottom_right[1], top_left[0]:bottom_right[0]]
rect_width = bottom_right[0] - bottom_left[0]
rect_height = bottom_right[1] - top_right[1]
size = rect_width * rect_height
square_height, square_width, channels = square.shape
empty_bias = (size == square_height*square_width)
if size == square_height*square_width:
print(f"{file}{rank} is likely empty")
res = pred_test(file, rank, mystery_image=square, empty_bias=empty_bias)
print(res)
if (max(res.items(), key=operator.itemgetter(1))[0] == 'empty'):
counter += 1
print(f"Amount of empty fields: {counter}")
#print("Non-cropped:\t",pred_test(square))
#print("Cropped:\t",pred_test(cropped))
#cv2.imshow("square", square)
#cv2.waitKey(0)
#runner.do_pre_processing()
#runner.train()
#img = "warped_square_B5.png"
#detect_using_nn(img)
#selective_search("training_images/empty/colorless/warped_square_A6.png", use_fast=True)
#selective_search("warped_square_B5.png", use_fast=True)
img = "training_images/rook/white/rook_training_D4_7.png"
#img = "training_images/rook/white_square/rook_training_E4_10.png"
#img = "training_images/knight/white_square/training_D5_134.png"
#top_left, bottom_left, top_right, bottom_right = selective_search(img, use_fast=True)
#cropped = cv2.imread(img)[top_left[1]:bottom_right[1], top_left[0]:bottom_right[0]]
#cv2.imshow("output", cropped)
#print(pred_test(cropped))
#cv2.waitKey(0)
#lel_test()
# test_entire_board()
#board = [[0, 0, 1, 2, 0, 0, 0, 2], [0, 1, 2, 2, 1, 0, 0, 1], [0, 0, 0, 0, 1, 0, 2, 0], [0, 2, 2, 1, 1, 2, 2, 0], [0, 1, 0, 0, 1, 2, 0, 0], [0, 0, 0, 0, 0, 2, 2, 0], [0, 0, 0, 2, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]]
#for i in board:
# print(i)
#warped = cv2.imread("homo_pls_fuck.jpg")
#square = runner.get_square(warped, "D", 4)
#print(pred_test(square))
#cv2.imshow("lel", square)
#cv2.waitKey(0)

55
opencv_video.py Normal file
View File

@ -0,0 +1,55 @@
import itertools
from pathlib import Path
from threading import Thread
from time import sleep
import numpy as np
import cv2
import runner
from datetime import datetime
import utils
#cap = cv2.VideoCapture(0)
#cap = cv2.VideoCapture("rtsp://10.192.49.108:8080/h264_ulaw.sdp")
cap = cv2.VideoCapture(0)
piece = "knight"
color = "black"
rank = 8
pieces = {
'knight': [("E", rank), ("H", rank)],
'rook': [("A", rank), ("F", rank)],
'bishop': [("C", rank), ("D", rank)],
'king': [("G", rank)],
'queen': [("B", rank)]
}
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Display the resulting frame
cv2.imshow('frame', frame)
if cv2.waitKey(100) & 0xFF == ord('c'):
print(f"capturing frame")
# cv2.imwrite(f"single_frame_{counter}.png", frame)
utils.imwrite(f"whole_boards/boards_for_empty/board_{datetime.utcnow().timestamp()}_.png", frame)
warped = runner.warp_board(frame)
runner.save_empty_fields(warped, skip_rank=rank)
for piece, positions in pieces.items():
for position in positions:
square = runner.get_square(warped, position[0], position[1])
x, y = position
utils.imwrite(f"training_images/{piece}/{runner.compute_color(x, y)}_square/training_{x}{str(y)}_{datetime.utcnow().timestamp()}.png", square)
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()

517
runner.py
View File

@ -1,249 +1,438 @@
import cv2 import cv2
import numpy as np import numpy as np
import glob
import os
from sklearn import cluster
from sklearn import metrics
from sklearn import svm
from sklearn.externals import joblib
from sklearn import neural_network
import heapq
from datetime import datetime
import utils
out_height, out_width = 500, 500
dstPoints = np.array([(out_height, 0), (0, 0), (0, out_width), (out_height, out_width)])
pieces = ["rook", "knight"]
colors = ['black', 'white']
img = cv2.imread("IMG_2086.jpeg")
img2 = cv2.imread("new_baseline_board.png")
img_tmp = img.copy()
gray_tmp = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
gray_tmp = np.float32(gray_tmp)
'''
dst = cv2.cornerHarris(gray_tmp, 20, 3, 0.04)
#result is dilated for marking the corners, not important
dst = cv2.dilate(dst,None)
# Threshold for an optimal value, it may vary depending on the image.
img_tmp[dst>0.01*dst.max()]=[0,0,255]
cv2.imwrite('fuck.jpg',img_tmp) def selective_search(image, use_fast=False, use_slow=False, image_name = None):
# speed-up using multithreads
cv2.setUseOptimized(True)
cv2.setNumThreads(4)
im = image
img_out = im.copy()
# create Selective Search Segmentation Object using default parameters
ss = cv2.ximgproc.segmentation.createSelectiveSearchSegmentation()
# set input image on which we will run segmentation
ss.setBaseImage(im)
img = cv2.GaussianBlur(img,(5,5),0) ss.switchToSingleStrategy()
kernel = np.ones((3,3),np.float32) if (use_fast):
kernel[0,1] = 0 ss.switchToSelectiveSearchFast()
kernel[0,2] = -1
kernel[1,0] = 3
kernel[1,1] = 0
kernel[1,2] = -3
kernel[2,1] = 0
kernel[2,2] = -1
img = cv2.filter2D(img,-1,kernel)
''' elif (use_slow):
ss.switchToSelectiveSearchQuality()
# run selective search segmentation on input image
rects = ss.process()
img_tmp_tmp = img.copy() # number of region proposals to show
gray_2 = cv2.cvtColor(img_tmp_tmp, cv2.COLOR_BGR2GRAY) numShowRects = 150
gray_3 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
MAX_FEATURES = 500 best_proposals = []
GOOD_MATCH_PERCENT = 0.0005
while True:
# create a copy of original image
cv2.imwrite('pls_lasse.jpg', img) # itereate over all the region proposals
for i, rect in enumerate(rects):
imOut = im.copy()
img_tmp = img.copy() # draw rectangle for region proposal till numShowRects
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) if (i < numShowRects):
x, y, w, h = rect
sift = cv2.xfeatures2d.SIFT_create() top_left = (x,y)
#sift = cv2.ORB_create(MAX_FEATURES) bottom_left = (x, y+h)
#sift = cv2.xfeatures2d.SURF_create() top_right = (x+w, y)
kp = sift.detect(gray_2, None) bottom_right = (x+w, y+h)
kp2 = sift.detect(gray_3, None)
kp, des = sift.compute(gray_2, kp) rect_width = bottom_right[0] - bottom_left[0]
kp2, des2 = sift.compute(gray_3, kp2) rect_height = bottom_right[1] - top_right[1]
cv2.drawKeypoints(img_tmp_tmp, keypoints=kp, outImage=img_tmp_tmp) size = rect_width * rect_height
cv2.imwrite('keypoints_img.jpg', img_tmp_tmp)
# FLANN parameters best_proposals.append((rect, size))
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 8)
search_params = dict(checks=100) # or pass empty dictionary
#matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING) else:
#matches = matcher.match(des, des2, None) break
height, width, channels = im.shape
center_x = width // 2
center_y = (height // 2)+5
dists = []
flann = cv2.FlannBasedMatcher(index_params,search_params) for i in heapq.nlargest(10, best_proposals, key=lambda x: x[1]):
matches = flann.knnMatch(des, des2, k=2) width, height, channels = im.shape
# Need to draw only good matches, so create a mask x, y, w, h = i[0]
matchesMask = [[0,0] for i in range(len(matches))]
if i[1] < (width*height)*0.90 and i[1] > (width*height)*0.25:
top_left = (x,y)
bottom_left = (x, y+h)
top_right = (x+w, y)
bottom_right = (x+w, y+h)
good_matches = [] box_center_x = (top_left[0]+bottom_left[0]+top_right[0]+bottom_right[0]) // 4
# ratio test as per Lowe's paper box_center_y = (top_left[1]+bottom_left[1]+top_right[1]+bottom_right[1]) // 4
for i,(m,n) in enumerate(matches):
if m.distance < 0.5*n.distance:
matchesMask[i]=[1,0]
good_matches.append([m,n])
draw_params = dict(matchColor = (0,255,0), dist = (center_x - box_center_x) ** 2 + (center_y - box_center_y) ** 2
singlePointColor = (255,0,0), dists.append([i, dist])
matchesMask = matchesMask,
flags = 0)
img3 = cv2.drawMatchesKnn(img_tmp_tmp, kp, img2, kp2, matches, None, **draw_params)
cv2.imwrite("matches.jpg", img3)
imCop = imOut.copy()
matches.sort(key=lambda x: x[0].distance, reverse=False)
# Remove poor matches
numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)
#good_matches = matches[:numGoodMatches]
# Extract location of good matches print(image_name)
points1 = np.zeros((len(good_matches), 2), dtype=np.float32) best = heapq.nsmallest(1, dists, key=lambda x: x[1])
points2 = np.zeros((len(good_matches), 2), dtype=np.float32) x, y, w, h = best[0][0][0]
cv2.rectangle(imCop, (x, y), (x + w, y + h), (0, 255, 0), 4, cv2.LINE_AA)
for i, (m, n) in enumerate(good_matches): top_left = (x, y)
points1[i, :] = kp[m.queryIdx].pt bottom_right = (x + w, y + h)
points2[i, :] = kp2[m.trainIdx].pt
print(points1)
print(len(points2))
h, mask = cv2.findHomography(points1, points2, cv2.RANSAC) cropped = img_out[top_left[1]:bottom_right[1], top_left[0]:bottom_right[0]]
return cropped
height, width, channels = img2.shape # show output
im1Reg = cv2.warpPerspective(img_tmp_tmp, h, (width, height)) cv2.imshow("Output", imOut)
cv2.imwrite('homo_pls_fuck.jpg', im1Reg) # record key press
k = cv2.waitKey(0) & 0xFF
''' # m is pressed
if k == 109:
# increase total number of rectangles to show by increment
numShowRects += increment
# l is pressed
elif k == 108 and numShowRects > increment:
# decrease total number of rectangles to show by increment
numShowRects -= increment
# q is pressed
elif k == 113:
break
# close image show window
cv2.destroyAllWindows()
# Sort matches by score
matches.sort(key=lambda x: x[0].distance, reverse=False)
# Remove poor matches def generate_centers(number_of_clusters, sift : cv2.xfeatures2d_SIFT):
numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT) features = None
matches = matches[:numGoodMatches] for piece in pieces:
for color in colors:
for filename in glob.glob(os.path.join("training_images", piece, f"{color}_square", "*.png")):
image = cv2.imread(filename)
#image = selective_search(image, use_fast=True)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
kp, desc = sift.detectAndCompute(gray, None)
# Draw the top matches if features is None:
imMatches = cv2.drawMatches(img_tmp_tmp, kp, img2, kp2, matches, None) features = np.array(desc)
cv2.imwrite("matches.jpg", imMatches) else:
print(f"{piece}, {color}, {filename}")
features = np.vstack((features, desc))
# Extract location of good matches k_means = cluster.KMeans(number_of_clusters)
points1 = np.zeros((len(matches), 2), dtype=np.float32) k_means.fit(features)
points2 = np.zeros((len(matches), 2), dtype=np.float32) return k_means.cluster_centers_
for i, match in enumerate(matches):
points1[i, :] = kp[match.queryIdx].pt
points2[i, :] = kp2[match.trainIdx].pt
print(len(points1)) def generate_bag_of_words(image, centers, sift : cv2.xfeatures2d_SIFT):
print(len(points2)) num_centers = centers.shape[0]
''' histogram = np.zeros((1, num_centers))
''' gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
h, mask = cv2.findHomography(points1, points2, cv2.RANSAC) kp, desc = sift.detectAndCompute(gray_image, None)
height, width, channels = img2.shape if not kp:
im1Reg = cv2.warpPerspective(img_tmp_tmp, h, (width, height)) return histogram
cv2.imwrite('homo_pls_fuck.jpg', im1Reg) distances = metrics.pairwise.pairwise_distances(desc, centers)
''' best_centers = np.argmin(distances, axis=1)
for i in best_centers:
histogram[0,i] = histogram[0,i] + 1
histogram = histogram / np.sum(histogram)
''' return histogram
gray_tmp = gray.copy()
gray_tmp = np.float32(gray_tmp)
dst = cv2.cornerHarris(gray_tmp,10,17,0.1)
#result is dilated for marking the corners, not important
dst = cv2.dilate(dst,None)
# Threshold for an optimal value, it may vary depending on the image. def do_pre_processing():
img_tmp[dst>0.07*dst.max()]=[0,0,255] sift = cv2.xfeatures2d.SIFT_create()
centers = generate_centers(8, sift)
cv2.imwrite('fuck.jpg',img_tmp) np.save("training_data/centers", centers)
'''
for piece in pieces:
for color in colors:
for filename in glob.glob(os.path.join("training_images", piece, f"{color}_square", "*.png")):
image = cv2.imread(filename)
#image = selective_search(image, image_name=filename, use_fast=True)
bow_features = generate_bag_of_words(image, centers, sift)
np.save(f"training_data/{piece}/{color}_square/" + os.path.basename(filename), bow_features)
'''
ret, corners = cv2.findChessboardCorners(gray, (3,3), None)
imgpoints = [] def load_training_data(spec_piece, color):
X = None
Y = None
print(ret) for piece in pieces:
piece_class = int(spec_piece == piece)
for filename in glob.glob(os.path.join("training_data", piece, f"{color}_square", "*.npy")):
data = np.load(filename)
if X is None:
X = np.array(data)
Y = np.array([piece_class])
else:
X = np.vstack((X, data))
Y = np.vstack((Y, [piece_class]))
return X, Y
if ret == True:
corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)
imgpoints.append(corners)
# Draw and display the corners
cv2.drawChessboardCorners(img, (3,3), corners2, ret)
cv2.imwrite('corners_chess.jpg', img)
'''
''' def train_empty_or_piece_var():
# Detect edges using Canny pieces = ['empty', 'knight', 'rook']
canny_output = cv2.Canny(gray, 140, 160) for color in colors:
cv2.imwrite('canny_out.jpg', canny_output) X = None
''' Y = None
'''
ret, thresholded = cv2.threshold(gray, 80, 255, cv2.THRESH_BINARY)
cv2.imwrite('threshold_out.jpg', thresholded)
lines = cv2.HoughLinesP(canny_output, 0.1, np.pi/60, 1, 30, 20) total_weight = 0
for line in lines: for piece in pieces:
print(line) total_weight += len(glob.glob(os.path.join("training_images", f"{piece}", f"{color}_square", "*.png")))
cv2.line(img, (line[0][0], line[0][1]), (line[0][2], line[0][3]), (0, 0, 255), 2, 8)
cv2.imwrite('lined_chess.jpg', img) current_weight = len(glob.glob(os.path.join("training_images", 'empty', f"{color}_square", "*.png")))
for piece in pieces:
piece_class = int('empty' == piece)
for filename in glob.glob(os.path.join("training_images", piece, f"{color}_square", "*.png")):
img = cv2.imread(filename)
magnitude_of_var = np.linalg.norm(cv2.meanStdDev(img)[1])
_, contours, hierarchy = cv2.findContours(canny_output, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) if X is None:
''' X = np.array(magnitude_of_var)
Y = np.array([piece_class])
else:
X = np.vstack((X, magnitude_of_var))
Y = np.vstack((Y, [piece_class]))
#cv2.drawContours(img, contours, -1, (255, 0, 0), 2) classifier = svm.SVC(class_weight={0: current_weight, 1: total_weight - current_weight}, probability=True)
classifier.fit(X, Y)
joblib.dump(classifier, f"classifiers/classifier_empty_var/{color}.pkl")
'''
pls_square = []
prev_max = -1
for contour in contours:
approx = cv2.approxPolyDP(contour, 0.01 * cv2.arcLength(contour, True), True)
if len(approx) == 4:
point_set = [(x[0,0], x[0,1]) for x in approx]
max_x = max([x[0] for x in point_set])
min_x = min([x[0] for x in point_set])
max_y = max([x[1] for x in point_set])
min_y = min([x[1] for x in point_set])
print(((max_x - min_x) * (max_y - min_y))) def train_pieces_svm():
for piece in pieces:
for color in colors:
# TODO: Consider removing empty from total_weights, so all classifiers do not consider empty pieces
total_weights = len(glob.glob(os.path.join("training_images", "*", f"{color}_square", "*.png")))
current_weight = len(glob.glob(os.path.join("training_images", piece, f"{color}_square", "*.png")))
pls_square.append(approx) print(f"Trainig for piece: {piece}")
X, Y = load_training_data(piece, color)
classifier = svm.SVC(class_weight={0: current_weight, 1: total_weights - current_weight}, probability=True)
classifier.fit(X, Y)
joblib.dump(classifier, f"classifiers/classifier_{piece}/{color}.pkl")
#if ((max_x - min_x) * (max_y - min_y)) > prev_max:
# prev_max = ((max_x - min_x) * (max_y - min_y))
# pls_square = approx
print(pls_square) def compute_features(training_image):
#h, mask = cv2.findHomography(pls_square, dstPoints, cv2.RANSAC) sift = cv2.xfeatures2d.SIFT_create()
#height, width, channels = img.shape gray_training_image = cv2.cvtColor(training_image, cv2.COLOR_BGR2GRAY)
#warped = cv2.warpPerspective(img, h, (out_height, out_width)) kp = sift.detect(gray_training_image)
kp, desc = sift.compute(gray_training_image, kp)
cv2.drawKeypoints(training_image, kp, training_image)
return training_image
def warp_board(camera_image, debug_image=None):
#cv2.imwrite('camera_image.png', camera_image)
baseline = cv2.imread("new_baseline_board.png")
camera_image_gray = cv2.cvtColor(camera_image, cv2.COLOR_BGR2GRAY)
baseline_gray = cv2.cvtColor(baseline, cv2.COLOR_BGR2GRAY)
sift = cv2.xfeatures2d.SIFT_create()
camera_image_keypoints = sift.detect(camera_image_gray, None)
baseline_keypoints = sift.detect(baseline_gray, None)
camera_image_keypoints, des = sift.compute(camera_image_gray, camera_image_keypoints)
baseline_keypoints, des2 = sift.compute(baseline_gray, baseline_keypoints)
if debug_image is not None:
cv2.drawKeypoints(camera_image, keypoints=camera_image_keypoints, outImage=debug_image)
cv2.imwrite('keypoints_img.jpg', camera_image)
# FLANN parameters
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=8)
search_params = dict(checks=100) # or pass empty dictionary
flann = cv2.FlannBasedMatcher(index_params,search_params)
matches = flann.knnMatch(des, des2, k=2)
# Need to draw only good matches, so create a mask
matchesMask = [[0,0] for i in range(len(matches))]
good_matches = []
# ratio test as per Lowe's paper
for i,(m,n) in enumerate(matches):
if m.distance < 0.55*n.distance:
matchesMask[i]=[1,0]
good_matches.append([m,n])
draw_params = dict(matchColor=(0,255,0),
singlePointColor=(255,0,0),
matchesMask=matchesMask,
flags=0)
img3 = cv2.drawMatchesKnn(camera_image,
camera_image_keypoints,
baseline,
baseline_keypoints,
matches,
None,
**draw_params)
cv2.imwrite("matches.jpg", img3)
# Extract location of good matches
points1 = np.zeros((len(good_matches), 2), dtype=np.float32)
points2 = np.zeros((len(good_matches), 2), dtype=np.float32)
for i, (m, n) in enumerate(good_matches):
points1[i, :] = camera_image_keypoints[m.queryIdx].pt
points2[i, :] = baseline_keypoints[m.trainIdx].pt
# print(len(points2))
h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)
height, width, channels = baseline.shape
im1Reg = cv2.warpPerspective(camera_image, h, (width, height))
# cv2.imwrite('homo_pls_fuck.jpg', im1Reg)
return im1Reg
def get_square(warped_board, file, rank):
files = "ABCDEFGH"
file = files.index(file)
rank = 8 - rank
width, _, _ = warped_board.shape # board is square anyway
side = int(width * 0.04)
size = width - 2 * side
square_size = size // 8
padding = 0
x1 = side + (square_size * file)
x2 = x1 + square_size
y1 = max(0, side + (square_size * rank) - padding)
y2 = min(width, y1 + square_size + padding)
square = warped_board[y1:y2, x1:x2]
return square
def get_squares(warped_board):
result = {}
for file in "ABCDEFGH":
for rank in range(1, 9):
square = get_square(warped_board, file, rank)
result[f"{file}{rank}"] = square
# cv2.imwrite(f"warped_square_{file}{rank}.png", square)
return result
def load_data_nn(spec_piece):
X = None
Y = None
for piece in pieces:
piece_class = int(spec_piece == piece)
for filename in glob.glob(os.path.join("training_images", piece, "*", "*.png")):
image = cv2.imread(filename)
image = cv2.resize(image, (64, 128))
data = np.reshape(image, (1, np.product(image.shape)))
if X is None:
if piece_class == 1:
for _ in range(10):
X = np.array(data)
Y = np.array([piece_class])
else:
X = np.array(data)
Y = np.array([piece_class])
else:
if piece_class == 1:
for _ in range(10):
X = np.vstack((X, data))
Y = np.vstack((Y, [piece_class]))
else:
X = np.vstack((X, data))
Y = np.vstack((Y, [piece_class]))
return (X, Y)
def train_nn():
for piece in pieces:
X, Y = load_data_nn(piece)
classifier = neural_network.MLPClassifier(hidden_layer_sizes=64)
classifier.fit(X, Y)
joblib.dump(classifier, "classifiers/neural_net_" + piece + ".pkl")
def letter_to_int(letter):
alphabet = list('ABCDEFGH')
return alphabet.index(letter) + 1
def compute_color(file, rank):
if ((letter_to_int(file)+rank) % 2):
return 'white'
else:
return 'black'
def save_empty_fields(warped, skip_rank=None):
alpha = "ABCDEFGH"
ranks = [1, 2, 3, 4, 5, 6, 7, 8]
if skip_rank is not None:
ranks.remove(skip_rank)
for file in alpha:
for rank in ranks:
square = get_square(warped, file, rank)
color = compute_color(file, rank)
utils.imwrite(f"training_images/empty/{color}_square/training_{file}{rank}_{datetime.utcnow().timestamp()}.png", square)
cv2.drawContours(img, contours, -1, (255, 0, 0), 2)
cv2.imwrite('contours_chess.jpg', img)
#cv2.imwrite('homo_img_chess.jpg', warped)'''

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 43 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 43 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 41 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 43 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 43 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 44 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 43 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 45 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 44 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 45 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 44 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 41 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 43 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 44 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 43 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 41 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 41 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 41 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 41 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 44 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 44 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 44 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 43 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 43 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 43 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 43 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 41 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 41 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 39 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 39 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 39 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 39 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 39 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 39 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 41 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 41 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 41 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 41 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 41 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 41 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 39 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 39 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 41 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 41 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 41 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 41 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 41 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 41 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 41 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 39 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 41 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 39 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 41 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 38 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 39 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 38 KiB

Some files were not shown because too many files have changed in this diff Show More