diff --git a/adapter.py b/adapter.py new file mode 100644 index 00000000..e173f723 --- /dev/null +++ b/adapter.py @@ -0,0 +1,28 @@ +import base64 +import json + +import cv2 +import sys +import numpy as np + +from runner import warp_board + + +# Load base64 encoded image from stdin +stdin = sys.stdin.readline() +stdin_decoded = base64.b64decode(stdin) +img_array = np.frombuffer(stdin_decoded, dtype=np.uint8) +camera_img = cv2.imdecode(img_array, flags=cv2.IMREAD_COLOR) + +# Warp board, saving the homography points as well +src_points = dst_points = [] +warped = warp_board(camera_img, src_points=src_points, dst_points=dst_points) + + +# Finally, output to stdout for unity to read +result = { + "src_points": [p.tolist() for p in src_points], + "dst_points": [p.tolist() for p in dst_points], +} + +print(json.dumps(result)) diff --git a/runner.py b/runner.py index a69ee945..9c3d7fa8 100644 --- a/runner.py +++ b/runner.py @@ -131,7 +131,7 @@ def train_pieces_svm_canny() -> None: joblib.dump(classifier, f"classifiers/classifier_empty/white_piece_on_{square_color}_square.pkl") -def warp_board(camera_image, debug_image=None) -> np.ndarray: +def warp_board(camera_image, debug_image=None, src_points: list = None, dst_points: list = None) -> np.ndarray: baseline = cv2.imread("new_baseline_board.png") camera_image_gray = cv2.cvtColor(camera_image, cv2.COLOR_BGR2GRAY) @@ -188,6 +188,11 @@ def warp_board(camera_image, debug_image=None) -> np.ndarray: points1[i, :] = camera_image_keypoints[m.queryIdx].pt points2[i, :] = baseline_keypoints[m.trainIdx].pt + if src_points is not None: + src_points.extend(points1) + if dst_points is not None: + dst_points.extend(points2) + h, mask = cv2.findHomography(points1, points2, cv2.RANSAC) height, width, channels = baseline.shape