advancedskrald/runner.py

286 lines
9.9 KiB
Python

from os import path
from pathlib import Path
import cv2
import glob
import os
from datetime import datetime
from typing import Tuple
import numpy as np
from sklearn import cluster, metrics, svm
from sklearn.externals import joblib
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn import neural_network
from util import RANK, POSITION, imwrite, PIECE, COLOR, Squares, OUR_PIECES
here: Path = Path(__file__).parent
def generate_centers(number_of_clusters, sift: cv2.xfeatures2d_SIFT):
features = None
for piece in OUR_PIECES:
for color in COLOR:
for filename in glob.glob(f"training_images/{piece}/{color}_square/*.png"):
image = cv2.imread(filename)
#image = selective_search(image, use_fast=True)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
kp, desc = sift.detectAndCompute(gray, None)
print(f"{piece}, {color}, {filename}")
if features is None:
features = np.array(desc)
else:
print(f"{piece}, {color}, {filename}")
features = np.vstack((features, desc))
features = np.array(features)
k_means = cluster.KMeans(number_of_clusters)
k_means.fit(features)
return k_means.cluster_centers_
def generate_bag_of_words(image, centers, sift: cv2.xfeatures2d_SIFT):
num_centers = centers.shape[0]
histogram = np.zeros((1, num_centers))
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
kp, desc = sift.detectAndCompute(gray_image, None)
if not kp:
return histogram
distances = metrics.pairwise.pairwise_distances(desc, centers)
best_centers = np.argmin(distances, axis=1)
for i in best_centers: # TODO: Could do this way faster in one line with numpy somehow
histogram[0, i] += + 1
return histogram / np.sum(histogram)
def do_pre_processing() -> None:
sift = cv2.xfeatures2d.SIFT_create()
centers = generate_centers(8, sift)
np.save("training_data/centers", centers)
for piece in OUR_PIECES:
for color in COLOR:
for filename in glob.glob(f"training_images/{piece}/{color}_square/*.png"):
image = cv2.imread(filename)
#image = selective_search(image, image_name=filename, use_fast=True)
bow_features = generate_bag_of_words(image, centers, sift)
np.save(f"training_data/{piece}/{color}_square/{os.path.basename(filename)}", bow_features)
def load_training_data(piece: PIECE, color: COLOR) -> Tuple[np.array, np.array]:
X = []
Y = []
for p in OUR_PIECES:
for filename in glob.glob(f"training_data/{piece}/{color}_square/*.npy"):
data = np.load(filename)
X.append(data[0])
Y.append(p == piece)
return np.array(X), np.array(Y)
def train_empty_or_piece_hist() -> None:
for square_color in COLOR:
X = []
Y = []
for piece in OUR_PIECES + (PIECE.EMPTY,):
for filename in glob.glob(f"training_images/{piece}/{square_color}_square/*.png"):
img = cv2.imread(filename)
y, x = np.histogram(img.ravel(), bins=32, range=[0, 256])
X.append(y)
Y.append(piece == PIECE.EMPTY)
classifier = make_pipeline(StandardScaler(),
svm.SVC(C=10.0, gamma=0.01, probability=True))
classifier.fit(np.array(X), np.array(Y))
joblib.dump(classifier, f"classifiers/classifier_empty/white_piece_on_{square_color}_square.pkl")
def train_pieces_svm() -> None:
for piece in OUR_PIECES:
for color in COLOR:
total_weights = len(glob.glob(f"training_images/{piece}/{color}_square/*.png"))
for piece in OUR_PIECES:
for color in COLOR:
current_weight = len(glob.glob(f"training_images/{piece}/{color}_square/*.png"))
print(f"Training for piece: {piece}")
X, Y = load_training_data(piece, color)
classifier = svm.SVC(C=10, gamma=0.01, class_weight={0: 15, 1: 0.8}, probability=True)
classifier.fit(X, Y)
joblib.dump(classifier, f"classifiers/classifier_{piece}/{color}.pkl")
def train_pieces_svm_canny() -> None:
for square_color in COLOR:
X = []
Y = []
for piece in OUR_PIECES + (PIECE.EMPTY,):
for filename in glob.glob(f"training_images/{piece}/{square_color}_square/*.png"):
img = cv2.imread(filename)
y, x = np.histogram(img.ravel(), bins=32, range=[0, 256])
X.append(y)
Y.append(piece == PIECE.EMPTY)
classifier = make_pipeline(StandardScaler(),
svm.SVC(C=10.0, gamma=0.01, probability=True))
classifier.fit(np.array(X), np.array(Y))
joblib.dump(classifier, f"classifiers/classifier_empty/white_piece_on_{square_color}_square.pkl")
def warp_board(camera_image, debug_image=None, src_points: list = None, dst_points: list = None, short_circuit=False) -> np.ndarray:
baseline = cv2.imread(str(here.joinpath("new_baseline_board.png")))
camera_image_gray = cv2.cvtColor(camera_image, cv2.COLOR_BGR2GRAY)
baseline_gray = cv2.cvtColor(baseline, cv2.COLOR_BGR2GRAY)
sift = cv2.xfeatures2d.SIFT_create()
camera_image_keypoints = sift.detect(camera_image_gray, None)
baseline_keypoints = sift.detect(baseline_gray, None)
camera_image_keypoints, des = sift.compute(camera_image_gray, camera_image_keypoints)
baseline_keypoints, des2 = sift.compute(baseline_gray, baseline_keypoints)
if debug_image is not None:
cv2.drawKeypoints(camera_image, keypoints=camera_image_keypoints, outImage=debug_image)
cv2.imwrite("keypoints_img.jpg", camera_image)
# FLANN parameters
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=8)
search_params = dict(checks=100) # or pass empty dictionary
flann = cv2.FlannBasedMatcher(index_params,search_params)
matches = flann.knnMatch(des, des2, k=2)
# Need to draw only good matches, so create a mask
matchesMask = [[0, 0] for _ in range(len(matches))]
# Ratio test as per Lowe's paper
good_matches = []
for i, (m, n) in enumerate(matches):
if m.distance < 0.55*n.distance:
matchesMask[i] = [1, 0]
good_matches.append([m, n])
img3 = cv2.drawMatchesKnn(
camera_image,
camera_image_keypoints,
baseline,
baseline_keypoints,
matches,
None,
matchColor=(0, 255, 0),
singlePointColor=(255, 0, 0),
matchesMask=matchesMask,
flags=0
)
cv2.imwrite("matches.jpg", img3)
# Extract location of good matches
points1 = np.zeros((len(good_matches), 2), dtype=np.float32)
points2 = np.zeros((len(good_matches), 2), dtype=np.float32)
for i, (m, n) in enumerate(good_matches):
points1[i, :] = camera_image_keypoints[m.queryIdx].pt
points2[i, :] = baseline_keypoints[m.trainIdx].pt
if src_points is not None:
src_points.extend(points1)
if dst_points is not None:
dst_points.extend(points2)
if short_circuit:
return points1, points2
h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)
height, width, channels = baseline.shape
return cv2.warpPerspective(camera_image, h, (width, height))
def get_square(warped_board: np.ndarray, position: POSITION) -> np.ndarray:
width, _, _ = warped_board.shape # board is square anyway
side = int(width * 0.045)
size = width - 2 * side
square_size = size // 8
padding = 2
x1 = side + (square_size * (position.file - 1))
x2 = x1 + square_size + padding
y1 = max(0, side + (square_size * (8 - position.rank)) - padding) # 8 - rank because chessboard is from 8 to 1
y2 = min(width, y1 + square_size + padding)
square = warped_board[y1:y2, x1:x2]
return square
def get_squares(warped_board: np.ndarray) -> Squares:
# cv2.imwrite(f"warped_square_{square}.png", square)
return {position: get_square(warped_board, position)
for position in POSITION}
def save_empty_fields(warped_board: np.ndarray, skip_rank: RANK = None) -> None:
for position in POSITION:
if position.rank == skip_rank:
continue
square = get_square(warped_board, position)
imwrite(f"training_images/empty/{position.color}_square/training_{position}_{datetime.utcnow().timestamp()}.png", square)
def load_data_nn(spec_piece, color):
X = None
Y = None
for piece in OUR_PIECES:
piece_class = int(spec_piece == piece)
for filename in glob.glob(f"training_images/{piece}/{color}_square/*.png"):
image = cv2.imread(filename)
data = np.reshape(image, (1, np.product(image.shape)))
if X is None:
if piece_class == 1:
for _ in range(10):
X = np.array(data)
Y = np.array([piece_class])
else:
for _ in range(5):
X = np.array(data)
Y = np.array([piece_class])
else:
if piece_class == 1:
for _ in range(10):
X = np.vstack((X, data))
Y = np.vstack((Y, [piece_class]))
else:
for _ in range(5):
X = np.vstack((X, data))
Y = np.vstack((Y, [piece_class]))
return X, Y
def train_nn():
for piece in OUR_PIECES:
for color in COLOR:
X, Y = load_data_nn(piece, color)
classifier = neural_network.MLPClassifier(hidden_layer_sizes=256)
classifier.fit(X, Y)
joblib.dump(classifier, f"classifiers/neural_net_{piece}/{color}.pkl")
if __name__ == '__main__':
train_nn()