From d3d2d362a6566464521426cbc9ace3b9dbbac875 Mon Sep 17 00:00:00 2001 From: "Casper V. Kristensen" Date: Thu, 11 Apr 2019 13:39:19 +0200 Subject: [PATCH 1/2] Python adapter. --- adapter.py | 25 +++++++++++++++++++++++++ runner.py | 5 ++++- 2 files changed, 29 insertions(+), 1 deletion(-) create mode 100644 adapter.py diff --git a/adapter.py b/adapter.py new file mode 100644 index 00000000..5cc70e78 --- /dev/null +++ b/adapter.py @@ -0,0 +1,25 @@ +import json + +import cv2 +import numpy as np + +from runner import warp_board + +# Run this script as: 'python3 adapter.py < ~/Pictures/board.png' or pipe a binary image to stdin + +# Load binary image from stdin +with open(0, "rb") as stdin: + array = np.frombuffer(stdin.read(), dtype='uint8') +camera_img = cv2.imdecode(array, flags=cv2.IMREAD_COLOR) + +# Warp board, saving the homography points as well +points = [] +warped = warp_board(camera_img, dst_points=points) + + +# Finally, output to stdout for unity to read +result = { + "points": points +} + +print(json.dumps(result)) diff --git a/runner.py b/runner.py index a69ee945..7db425b0 100644 --- a/runner.py +++ b/runner.py @@ -131,7 +131,7 @@ def train_pieces_svm_canny() -> None: joblib.dump(classifier, f"classifiers/classifier_empty/white_piece_on_{square_color}_square.pkl") -def warp_board(camera_image, debug_image=None) -> np.ndarray: +def warp_board(camera_image, debug_image=None, dst_points: list = None) -> np.ndarray: baseline = cv2.imread("new_baseline_board.png") camera_image_gray = cv2.cvtColor(camera_image, cv2.COLOR_BGR2GRAY) @@ -188,6 +188,9 @@ def warp_board(camera_image, debug_image=None) -> np.ndarray: points1[i, :] = camera_image_keypoints[m.queryIdx].pt points2[i, :] = baseline_keypoints[m.trainIdx].pt + if dst_points is not None: + dst_points.extend(zip(points1, points2)) + h, mask = cv2.findHomography(points1, points2, cv2.RANSAC) height, width, channels = baseline.shape From 7d0393752676ee9913c39f1b7f039496986f1ccb Mon Sep 17 00:00:00 2001 From: Alexander Munch-Hansen Date: Thu, 11 Apr 2019 14:25:48 +0200 Subject: [PATCH 2/2] Adapter works. Btw when you look in the commit graph to see how much Alexander did, remember that this was Casper's commit. --- adapter.py | 19 +++++++++++-------- runner.py | 6 ++++-- 2 files changed, 15 insertions(+), 10 deletions(-) diff --git a/adapter.py b/adapter.py index 5cc70e78..e173f723 100644 --- a/adapter.py +++ b/adapter.py @@ -1,25 +1,28 @@ +import base64 import json import cv2 +import sys import numpy as np from runner import warp_board -# Run this script as: 'python3 adapter.py < ~/Pictures/board.png' or pipe a binary image to stdin -# Load binary image from stdin -with open(0, "rb") as stdin: - array = np.frombuffer(stdin.read(), dtype='uint8') -camera_img = cv2.imdecode(array, flags=cv2.IMREAD_COLOR) +# Load base64 encoded image from stdin +stdin = sys.stdin.readline() +stdin_decoded = base64.b64decode(stdin) +img_array = np.frombuffer(stdin_decoded, dtype=np.uint8) +camera_img = cv2.imdecode(img_array, flags=cv2.IMREAD_COLOR) # Warp board, saving the homography points as well -points = [] -warped = warp_board(camera_img, dst_points=points) +src_points = dst_points = [] +warped = warp_board(camera_img, src_points=src_points, dst_points=dst_points) # Finally, output to stdout for unity to read result = { - "points": points + "src_points": [p.tolist() for p in src_points], + "dst_points": [p.tolist() for p in dst_points], } print(json.dumps(result)) diff --git a/runner.py b/runner.py index 7db425b0..9c3d7fa8 100644 --- a/runner.py +++ b/runner.py @@ -131,7 +131,7 @@ def train_pieces_svm_canny() -> None: joblib.dump(classifier, f"classifiers/classifier_empty/white_piece_on_{square_color}_square.pkl") -def warp_board(camera_image, debug_image=None, dst_points: list = None) -> np.ndarray: +def warp_board(camera_image, debug_image=None, src_points: list = None, dst_points: list = None) -> np.ndarray: baseline = cv2.imread("new_baseline_board.png") camera_image_gray = cv2.cvtColor(camera_image, cv2.COLOR_BGR2GRAY) @@ -188,8 +188,10 @@ def warp_board(camera_image, debug_image=None, dst_points: list = None) -> np.nd points1[i, :] = camera_image_keypoints[m.queryIdx].pt points2[i, :] = baseline_keypoints[m.trainIdx].pt + if src_points is not None: + src_points.extend(points1) if dst_points is not None: - dst_points.extend(zip(points1, points2)) + dst_points.extend(points2) h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)