diff --git a/adapter.py b/adapter.py new file mode 100644 index 00000000..5cc70e78 --- /dev/null +++ b/adapter.py @@ -0,0 +1,25 @@ +import json + +import cv2 +import numpy as np + +from runner import warp_board + +# Run this script as: 'python3 adapter.py < ~/Pictures/board.png' or pipe a binary image to stdin + +# Load binary image from stdin +with open(0, "rb") as stdin: + array = np.frombuffer(stdin.read(), dtype='uint8') +camera_img = cv2.imdecode(array, flags=cv2.IMREAD_COLOR) + +# Warp board, saving the homography points as well +points = [] +warped = warp_board(camera_img, dst_points=points) + + +# Finally, output to stdout for unity to read +result = { + "points": points +} + +print(json.dumps(result)) diff --git a/runner.py b/runner.py index a69ee945..7db425b0 100644 --- a/runner.py +++ b/runner.py @@ -131,7 +131,7 @@ def train_pieces_svm_canny() -> None: joblib.dump(classifier, f"classifiers/classifier_empty/white_piece_on_{square_color}_square.pkl") -def warp_board(camera_image, debug_image=None) -> np.ndarray: +def warp_board(camera_image, debug_image=None, dst_points: list = None) -> np.ndarray: baseline = cv2.imread("new_baseline_board.png") camera_image_gray = cv2.cvtColor(camera_image, cv2.COLOR_BGR2GRAY) @@ -188,6 +188,9 @@ def warp_board(camera_image, debug_image=None) -> np.ndarray: points1[i, :] = camera_image_keypoints[m.queryIdx].pt points2[i, :] = baseline_keypoints[m.trainIdx].pt + if dst_points is not None: + dst_points.extend(zip(points1, points2)) + h, mask = cv2.findHomography(points1, points2, cv2.RANSAC) height, width, channels = baseline.shape