Adapter works. Btw when you look in the commit graph to see how much Alexander did, remember that this was Casper's commit.
This commit is contained in:
parent
d3d2d362a6
commit
7d03937526
19
adapter.py
19
adapter.py
|
@ -1,25 +1,28 @@
|
||||||
|
import base64
|
||||||
import json
|
import json
|
||||||
|
|
||||||
import cv2
|
import cv2
|
||||||
|
import sys
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
from runner import warp_board
|
from runner import warp_board
|
||||||
|
|
||||||
# Run this script as: 'python3 adapter.py < ~/Pictures/board.png' or pipe a binary image to stdin
|
|
||||||
|
|
||||||
# Load binary image from stdin
|
# Load base64 encoded image from stdin
|
||||||
with open(0, "rb") as stdin:
|
stdin = sys.stdin.readline()
|
||||||
array = np.frombuffer(stdin.read(), dtype='uint8')
|
stdin_decoded = base64.b64decode(stdin)
|
||||||
camera_img = cv2.imdecode(array, flags=cv2.IMREAD_COLOR)
|
img_array = np.frombuffer(stdin_decoded, dtype=np.uint8)
|
||||||
|
camera_img = cv2.imdecode(img_array, flags=cv2.IMREAD_COLOR)
|
||||||
|
|
||||||
# Warp board, saving the homography points as well
|
# Warp board, saving the homography points as well
|
||||||
points = []
|
src_points = dst_points = []
|
||||||
warped = warp_board(camera_img, dst_points=points)
|
warped = warp_board(camera_img, src_points=src_points, dst_points=dst_points)
|
||||||
|
|
||||||
|
|
||||||
# Finally, output to stdout for unity to read
|
# Finally, output to stdout for unity to read
|
||||||
result = {
|
result = {
|
||||||
"points": points
|
"src_points": [p.tolist() for p in src_points],
|
||||||
|
"dst_points": [p.tolist() for p in dst_points],
|
||||||
}
|
}
|
||||||
|
|
||||||
print(json.dumps(result))
|
print(json.dumps(result))
|
||||||
|
|
|
@ -131,7 +131,7 @@ def train_pieces_svm_canny() -> None:
|
||||||
joblib.dump(classifier, f"classifiers/classifier_empty/white_piece_on_{square_color}_square.pkl")
|
joblib.dump(classifier, f"classifiers/classifier_empty/white_piece_on_{square_color}_square.pkl")
|
||||||
|
|
||||||
|
|
||||||
def warp_board(camera_image, debug_image=None, dst_points: list = None) -> np.ndarray:
|
def warp_board(camera_image, debug_image=None, src_points: list = None, dst_points: list = None) -> np.ndarray:
|
||||||
baseline = cv2.imread("new_baseline_board.png")
|
baseline = cv2.imread("new_baseline_board.png")
|
||||||
|
|
||||||
camera_image_gray = cv2.cvtColor(camera_image, cv2.COLOR_BGR2GRAY)
|
camera_image_gray = cv2.cvtColor(camera_image, cv2.COLOR_BGR2GRAY)
|
||||||
|
@ -188,8 +188,10 @@ def warp_board(camera_image, debug_image=None, dst_points: list = None) -> np.nd
|
||||||
points1[i, :] = camera_image_keypoints[m.queryIdx].pt
|
points1[i, :] = camera_image_keypoints[m.queryIdx].pt
|
||||||
points2[i, :] = baseline_keypoints[m.trainIdx].pt
|
points2[i, :] = baseline_keypoints[m.trainIdx].pt
|
||||||
|
|
||||||
|
if src_points is not None:
|
||||||
|
src_points.extend(points1)
|
||||||
if dst_points is not None:
|
if dst_points is not None:
|
||||||
dst_points.extend(zip(points1, points2))
|
dst_points.extend(points2)
|
||||||
|
|
||||||
h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)
|
h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue
Block a user