advancedskrald/runner.py

277 lines
9.7 KiB
Python
Raw Normal View History

import cv2
2019-04-04 10:59:37 +00:00
import glob
import os
from datetime import datetime
from typing import Tuple
import numpy as np
from sklearn import cluster, metrics, svm
from sklearn.externals import joblib
2019-04-04 16:27:19 +00:00
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
2019-04-10 20:32:30 +00:00
from sklearn import neural_network
2019-04-04 16:27:19 +00:00
2019-04-10 11:39:50 +00:00
from util import RANK, POSITION, imwrite, PIECE, COLOR, Squares, OUR_PIECES
def generate_centers(number_of_clusters, sift: cv2.xfeatures2d_SIFT):
2019-04-10 11:39:50 +00:00
features = None
for piece in OUR_PIECES:
for color in COLOR:
2019-04-10 11:39:50 +00:00
for filename in glob.glob(f"training_images/{piece}/{color}_square/*.png"):
2019-04-04 10:59:37 +00:00
image = cv2.imread(filename)
#image = selective_search(image, use_fast=True)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
kp, desc = sift.detectAndCompute(gray, None)
print(f"{piece}, {color}, {filename}")
2019-04-10 11:39:50 +00:00
if features is None:
features = np.array(desc)
else:
print(f"{piece}, {color}, {filename}")
features = np.vstack((features, desc))
features = np.array(features)
2019-04-04 10:59:37 +00:00
k_means = cluster.KMeans(number_of_clusters)
k_means.fit(features)
return k_means.cluster_centers_
def generate_bag_of_words(image, centers, sift: cv2.xfeatures2d_SIFT):
2019-04-04 10:59:37 +00:00
num_centers = centers.shape[0]
histogram = np.zeros((1, num_centers))
2019-04-04 10:59:37 +00:00
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
kp, desc = sift.detectAndCompute(gray_image, None)
2019-04-04 10:59:37 +00:00
if not kp:
return histogram
2019-04-04 10:59:37 +00:00
distances = metrics.pairwise.pairwise_distances(desc, centers)
best_centers = np.argmin(distances, axis=1)
for i in best_centers: # TODO: Could do this way faster in one line with numpy somehow
histogram[0, i] += + 1
return histogram / np.sum(histogram)
def do_pre_processing() -> None:
2019-04-04 10:59:37 +00:00
sift = cv2.xfeatures2d.SIFT_create()
centers = generate_centers(8, sift)
2019-04-04 10:59:37 +00:00
np.save("training_data/centers", centers)
2019-04-10 11:39:50 +00:00
for piece in OUR_PIECES:
for color in COLOR:
2019-04-10 11:39:50 +00:00
for filename in glob.glob(f"training_images/{piece}/{color}_square/*.png"):
2019-04-04 10:59:37 +00:00
image = cv2.imread(filename)
#image = selective_search(image, image_name=filename, use_fast=True)
bow_features = generate_bag_of_words(image, centers, sift)
np.save(f"training_data/{piece}/{color}_square/{os.path.basename(filename)}", bow_features)
def load_training_data(piece: PIECE, color: COLOR) -> Tuple[np.array, np.array]:
X = []
Y = []
2019-04-10 11:39:50 +00:00
for p in OUR_PIECES:
for filename in glob.glob(f"training_data/{piece}/{color}_square/*.npy"):
2019-04-04 10:59:37 +00:00
data = np.load(filename)
2019-04-10 11:39:50 +00:00
X.append(data[0])
Y.append(p == piece)
return np.array(X), np.array(Y)
def train_empty_or_piece_hist() -> None:
for square_color in COLOR:
X = []
Y = []
2019-04-10 11:39:50 +00:00
for piece in OUR_PIECES + (PIECE.EMPTY,):
for filename in glob.glob(f"training_images/{piece}/{square_color}_square/*.png"):
2019-04-04 10:59:37 +00:00
img = cv2.imread(filename)
y, x = np.histogram(img.ravel(), bins=32, range=[0, 256])
X.append(y)
Y.append(piece == PIECE.EMPTY)
2019-04-04 16:27:19 +00:00
classifier = make_pipeline(StandardScaler(),
svm.SVC(C=10.0, gamma=0.01, probability=True))
classifier.fit(np.array(X), np.array(Y))
2019-04-04 16:27:19 +00:00
joblib.dump(classifier, f"classifiers/classifier_empty/white_piece_on_{square_color}_square.pkl")
def train_pieces_svm() -> None:
2019-04-10 11:39:50 +00:00
for piece in OUR_PIECES:
for color in COLOR:
2019-04-10 11:39:50 +00:00
total_weights = len(glob.glob(f"training_images/{piece}/{color}_square/*.png"))
2019-04-10 11:39:50 +00:00
for piece in OUR_PIECES:
for color in COLOR:
current_weight = len(glob.glob(f"training_images/{piece}/{color}_square/*.png"))
print(f"Training for piece: {piece}")
2019-04-04 10:59:37 +00:00
X, Y = load_training_data(piece, color)
2019-04-10 20:32:30 +00:00
classifier = svm.SVC(C=10, gamma=0.01, class_weight={0: 15, 1: 0.8}, probability=True)
2019-04-04 10:59:37 +00:00
classifier.fit(X, Y)
joblib.dump(classifier, f"classifiers/classifier_{piece}/{color}.pkl")
2019-04-10 20:32:30 +00:00
def train_pieces_svm_canny() -> None:
for square_color in COLOR:
X = []
Y = []
for piece in OUR_PIECES + (PIECE.EMPTY,):
for filename in glob.glob(f"training_images/{piece}/{square_color}_square/*.png"):
img = cv2.imread(filename)
y, x = np.histogram(img.ravel(), bins=32, range=[0, 256])
X.append(y)
Y.append(piece == PIECE.EMPTY)
classifier = make_pipeline(StandardScaler(),
svm.SVC(C=10.0, gamma=0.01, probability=True))
classifier.fit(np.array(X), np.array(Y))
joblib.dump(classifier, f"classifiers/classifier_empty/white_piece_on_{square_color}_square.pkl")
def warp_board(camera_image, debug_image=None, src_points: list = None, dst_points: list = None) -> np.ndarray:
2019-04-04 10:59:37 +00:00
baseline = cv2.imread("new_baseline_board.png")
camera_image_gray = cv2.cvtColor(camera_image, cv2.COLOR_BGR2GRAY)
baseline_gray = cv2.cvtColor(baseline, cv2.COLOR_BGR2GRAY)
sift = cv2.xfeatures2d.SIFT_create()
camera_image_keypoints = sift.detect(camera_image_gray, None)
baseline_keypoints = sift.detect(baseline_gray, None)
camera_image_keypoints, des = sift.compute(camera_image_gray, camera_image_keypoints)
baseline_keypoints, des2 = sift.compute(baseline_gray, baseline_keypoints)
if debug_image is not None:
cv2.drawKeypoints(camera_image, keypoints=camera_image_keypoints, outImage=debug_image)
cv2.imwrite("keypoints_img.jpg", camera_image)
2019-04-04 10:59:37 +00:00
# FLANN parameters
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=8)
search_params = dict(checks=100) # or pass empty dictionary
flann = cv2.FlannBasedMatcher(index_params,search_params)
matches = flann.knnMatch(des, des2, k=2)
# Need to draw only good matches, so create a mask
matchesMask = [[0, 0] for _ in range(len(matches))]
2019-04-04 10:59:37 +00:00
# Ratio test as per Lowe's paper
2019-04-04 10:59:37 +00:00
good_matches = []
for i, (m, n) in enumerate(matches):
2019-04-04 10:59:37 +00:00
if m.distance < 0.55*n.distance:
matchesMask[i] = [1, 0]
good_matches.append([m, n])
img3 = cv2.drawMatchesKnn(
camera_image,
camera_image_keypoints,
baseline,
baseline_keypoints,
matches,
None,
matchColor=(0, 255, 0),
singlePointColor=(255, 0, 0),
matchesMask=matchesMask,
flags=0
)
2019-04-04 10:59:37 +00:00
cv2.imwrite("matches.jpg", img3)
# Extract location of good matches
points1 = np.zeros((len(good_matches), 2), dtype=np.float32)
points2 = np.zeros((len(good_matches), 2), dtype=np.float32)
for i, (m, n) in enumerate(good_matches):
points1[i, :] = camera_image_keypoints[m.queryIdx].pt
points2[i, :] = baseline_keypoints[m.trainIdx].pt
if src_points is not None:
src_points.extend(points1)
2019-04-11 11:39:19 +00:00
if dst_points is not None:
dst_points.extend(points2)
2019-04-11 11:39:19 +00:00
2019-04-04 10:59:37 +00:00
h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)
height, width, channels = baseline.shape
return cv2.warpPerspective(camera_image, h, (width, height))
2019-04-04 10:59:37 +00:00
def get_square(warped_board: np.ndarray, position: POSITION) -> np.ndarray:
2019-04-04 10:59:37 +00:00
width, _, _ = warped_board.shape # board is square anyway
2019-04-04 16:27:19 +00:00
side = int(width * 0.045)
2019-04-04 10:59:37 +00:00
size = width - 2 * side
square_size = size // 8
2019-04-10 20:32:30 +00:00
padding = 2
2019-04-04 10:59:37 +00:00
x1 = side + (square_size * (position.file - 1))
2019-04-10 20:32:30 +00:00
x2 = x1 + square_size + padding
y1 = max(0, side + (square_size * (8 - position.rank)) - padding) # 8 - rank because chessboard is from 8 to 1
2019-04-04 10:59:37 +00:00
y2 = min(width, y1 + square_size + padding)
square = warped_board[y1:y2, x1:x2]
return square
def get_squares(warped_board: np.ndarray) -> Squares:
# cv2.imwrite(f"warped_square_{square}.png", square)
return {position: get_square(warped_board, position)
for position in POSITION}
2019-04-04 16:27:19 +00:00
def save_empty_fields(warped_board: np.ndarray, skip_rank: RANK = None) -> None:
for position in POSITION:
if position.rank == skip_rank:
continue
square = get_square(warped_board, position)
imwrite(f"training_images/empty/{position.color}_square/training_{position}_{datetime.utcnow().timestamp()}.png", square)
2019-04-04 16:27:19 +00:00
2019-04-10 20:32:30 +00:00
def load_data_nn(spec_piece, color):
X = None
Y = None
for piece in OUR_PIECES:
piece_class = int(spec_piece == piece)
for filename in glob.glob(f"training_images/{piece}/{color}_square/*.png"):
image = cv2.imread(filename)
data = np.reshape(image, (1, np.product(image.shape)))
if X is None:
if piece_class == 1:
for _ in range(10):
X = np.array(data)
Y = np.array([piece_class])
else:
for _ in range(5):
X = np.array(data)
Y = np.array([piece_class])
else:
if piece_class == 1:
for _ in range(10):
X = np.vstack((X, data))
Y = np.vstack((Y, [piece_class]))
else:
for _ in range(5):
X = np.vstack((X, data))
Y = np.vstack((Y, [piece_class]))
return X, Y
def train_nn():
for piece in OUR_PIECES:
for color in COLOR:
X, Y = load_data_nn(piece, color)
classifier = neural_network.MLPClassifier(hidden_layer_sizes=256)
classifier.fit(X, Y)
joblib.dump(classifier, f"classifiers/neural_net_{piece}/{color}.pkl")
if __name__ == '__main__':
2019-04-10 20:32:30 +00:00
train_nn()