diff --git a/README.md b/README.md index b06a6a6..e34d008 100644 --- a/README.md +++ b/README.md @@ -1,58 +1,96 @@ +# Swarm Robotics Localization + [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) -# FYP-localization-robots +Python tooling for ArUco-based localization, marker generation, and camera calibration. -Python repository for address the Localization problem of Swarm Robotics +## Project structure -### Requirements +- markers/ - Generate ArUco marker PNGs and a PDF. +- scripts/ - Main localization pipeline (OpenCV + MQTT) and supporting tools. +- scripts/board/ - Camera calibration workflow. +- scripts/previousScripts/ - Legacy demos and prototypes. +- docs/ - External documentation entry point. -Please install following pip packages if they aren't pre-installed +## Shared virtual environment -``` -pip install numpy +This repo uses a shared root .venv and a single global requirements.txt. -pip install aruco -pip install paho-mqtt -pip3 install opencv-python -pip3 install opencv-contrib-python -pip3 install pyyaml +Windows PowerShell: + +```powershell +python -m venv .venv +.\.venv\Scripts\Activate.ps1 +pip install -r requirements.txt ``` -or use following command +macOS/Linux: -``` -pip install -r requirement.txt +```bash +python3 -m venv .venv +source .venv/bin/activate +pip install -r requirements.txt ``` -You need to copy and rename the following files in _scripts_ directory and fill the necessary configuration details before run the scripts. +## Configuration -- config-mapping_sample.yaml INTO config-mapping.yaml -- config-mqtt_sample.yaml INTO config-mqtt.yaml +If you run `./run.sh` (Unix-like shells), missing configs are auto-created from samples on first run. +If you run `python scripts/script.py` directly, copy the sample configs in scripts/ first: -### Run the scripts +- scripts/config-mapping_sample.yaml -> scripts/config-mapping.yaml +- scripts/config-mqtt_sample.yaml -> scripts/config-mqtt.yaml -You can try scripts on the ./scripts/ directory +## Entry points -### Build a executable file (not working so far) - -Install PyInstaller from PyPI: +Markers: +```powershell +cd markers +python marker_generator.py ``` -pip install pyinstaller + +Localization pipeline: + +```powershell +cd scripts +python script.py ``` -Go to your program’s directory and run: +Calibration: +```powershell +cd scripts/board +python calibrate.py ``` -pyinstaller --onefile script.py + +Legacy demos: + +```powershell +cd scripts/previousScripts +python script2.py ``` -After, please make sure to copy the './board' folder into the directory which executes the exe file. +## Outputs + +- markers/generated/ - PNG marker images +- markers/aruco_markers.pdf - Generated marker PDF +- scripts/board/calibration_data.txt - Saved calibration data + +## Troubleshooting + +- cv2.aruco missing: install opencv-contrib-python (not opencv-python only). +- MyPy import-untyped: reportlab has no type hints; ignore or add a stub package. +- Camera not opening: try a different camera index in scripts/script.py. +- MQTT connection errors: verify scripts/config-mqtt.yaml values and broker reachability. + +## Notes + +- run.sh is a convenience script for Unix-like shells. On Windows, use the commands above. +- Raspberry Pi camera script is documented in scripts/README.md. -### Read More +## Read more -- [ar-markers 0.5.0](https://pypi.org/project/ar-markers/) -- [Augmented Reality using ArUco Markers in OpenCV (C++ / Python)](https://www.learnopencv.com/augmented-reality-using-aruco-markers-in-opencv-c-python/) -- [OpenCV: Detection of ArUco Markers](https://docs.opencv.org/trunk/d5/dae/tutorial_aruco_detection.html) -- [OpenCV: Detection of ArUco Boards](https://docs.opencv.org/master/db/da9/tutorial_aruco_board_detection.html) -- [Calibrating the board](https://mecaruco2.readthedocs.io/en/latest/notebooks_rst/Aruco/sandbox/ludovic/aruco_calibration_rotation.html) +- [ar-markers](https://pypi.org/project/ar-markers/) +- [Augmented Reality using ArUco Markers in OpenCV](https://www.learnopencv.com/augmented-reality-using-aruco-markers-in-opencv-c-python/) +- [OpenCV ArUco detection](https://docs.opencv.org/master/d5/dae/tutorial_aruco_detection.html) +- [OpenCV ArUco board detection](https://docs.opencv.org/master/db/da9/tutorial_aruco_board_detection.html) diff --git a/markers/README.md b/markers/README.md new file mode 100644 index 0000000..4ec0ebc --- /dev/null +++ b/markers/README.md @@ -0,0 +1,29 @@ +# Markers + +Generate ArUco marker PNGs and a PDF sheet. + +## Setup + +Use the shared root .venv and the global requirements.txt: + +```powershell +python -m venv .venv +.\.venv\Scripts\Activate.ps1 +pip install -r ..\requirements.txt +``` + +## Run + +```powershell +python marker_generator.py +``` + +## Outputs + +- generated/ - Marker PNG files +- aruco_markers.pdf - PDF sheet + +## Troubleshooting + +- cv2.aruco missing: install opencv-contrib-python. +- reportlab import warnings: ignore MyPy import-untyped warnings or add stubs. diff --git a/markers/aruco_markers.pdf b/markers/aruco_markers.pdf new file mode 100644 index 0000000..fd16722 Binary files /dev/null and b/markers/aruco_markers.pdf differ diff --git a/markers/marker_generator.py b/markers/marker_generator.py index b61de42..5ffd260 100644 --- a/markers/marker_generator.py +++ b/markers/marker_generator.py @@ -1,17 +1,69 @@ +# This is the updated version of the aruco marker generater. +# Using this you can generate pdf at once + import cv2 as cv -import numpy as np +import os +from reportlab.platypus import SimpleDocTemplate, Image, Table # type: ignore +from reportlab.lib import colors # type: ignore +from reportlab.lib.pagesizes import A4 # type: ignore +from reportlab.lib.units import mm # type: ignore + +# Output PDF file +pdf_path = "aruco_markers.pdf" -# Load the predefined dictionary -dictionary = cv.aruco.Dictionary_get(cv.aruco.DICT_6X6_250) +# Create temporary folder for images +temp_dir = "./generated" +os.makedirs(temp_dir, exist_ok=True) -# Generate the marker +# Load predefined dictionary +dictionary = cv.aruco.getPredefinedDictionary(cv.aruco.DICT_6X6_250) -markers = [int(x) for x in range(0, 10)] +# Marker settings +markers = list(range(0, 10)) image_size = 600 -for m in markers: - markerImage = np.zeros((image_size, image_size), dtype=np.uint8) +# Generate marker images +image_paths = [] +for marker_id in markers: + marker_image = cv.aruco.generateImageMarker( + dictionary, + marker_id, + image_size + ) + + path = os.path.join(temp_dir, f"marker{marker_id}.png") + cv.imwrite(path, marker_image) + image_paths.append(path) + +print("Markers generated.") + +# Create PDF +doc = SimpleDocTemplate(pdf_path, pagesize=A4) +elements = [] + +# Prepare images in grid (2 columns) +table_data = [] +row = [] + +for i, img_path in enumerate(image_paths): + img = Image(img_path, width=70*mm, height=70*mm) + row.append(img) + + if len(row) == 2: + table_data.append(row) + row = [] + +# Add remaining images if odd count +if row: + table_data.append(row) + +table = Table(table_data) +table.setStyle([ + ('GRID', (0, 0), (-1, -1), 0.5, colors.grey), + ('ALIGN', (0, 0), (-1, -1), 'CENTER') +]) + +elements.append(table) +doc.build(elements) - # drawMarkers(dict, marker_id, pixel_size, output_image, border_width) - markerImage = cv.aruco.drawMarker(dictionary, m, image_size, markerImage, 1); - cv.imwrite("./generated/marker" + str(m) + ".png", markerImage); +print(f"PDF created successfully: {pdf_path}") diff --git a/markers/pdf/ar_markers_0-5.pdf b/markers/pdf/ar_markers_0-5.pdf deleted file mode 100644 index acef79a..0000000 Binary files a/markers/pdf/ar_markers_0-5.pdf and /dev/null differ diff --git a/markers/pdf/ar_markers_5-9.pdf b/markers/pdf/ar_markers_5-9.pdf deleted file mode 100644 index 72a5c69..0000000 Binary files a/markers/pdf/ar_markers_5-9.pdf and /dev/null differ diff --git a/requirements.txt b/requirements.txt index 1ca3237..d14f563 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,5 @@ -opencv-python==4.3.0.36 -numpy==1.22.0 -ar-markers==0.5.0 -paho-mqtt==1.5.0 -opencv-contrib-python==4.5.1.48 -pyyaml==5.4 +numpy +opencv-contrib-python +paho-mqtt +pyyaml +reportlab diff --git a/run.sh b/run.sh index d126eb2..6ea2b71 100644 --- a/run.sh +++ b/run.sh @@ -1,4 +1,5 @@ #!/bin/sh cd ./scripts + python3 ./script.py diff --git a/scripts/README.md b/scripts/README.md new file mode 100644 index 0000000..92cdb49 --- /dev/null +++ b/scripts/README.md @@ -0,0 +1,49 @@ +# Localization Scripts + +Main OpenCV + MQTT localization pipeline and support scripts. + +## Setup + +Use the shared root .venv and the global requirements.txt: + +```powershell +python -m venv .venv +.\.venv\Scripts\Activate.ps1 +pip install -r ..\requirements.txt +``` + +## Configuration + +If you run `../run.sh`, missing configs are auto-created from samples on first run. +If you run `python script.py` directly, copy the sample configs before running: + +- config-mapping_sample.yaml -> config-mapping.yaml +- config-mqtt_sample.yaml -> config-mqtt.yaml + +## Run + +Main localization pipeline: + +```powershell +python script.py +``` + +Raspberry Pi camera demo (Raspberry Pi OS only): + +```powershell +python pycamera.py +``` + +## Outputs + +- No files are written by default. Results are shown in the OpenCV window and published to MQTT topics. + +## OS notes + +- pycamera.py requires the Raspberry Pi camera stack and the picamera package. + +## Troubleshooting + +- cv2.aruco missing: install opencv-contrib-python. +- Camera not opening: change camera index in script.py. +- MQTT errors: verify broker settings in config-mqtt.yaml. diff --git a/scripts/board/README.md b/scripts/board/README.md new file mode 100644 index 0000000..8832132 --- /dev/null +++ b/scripts/board/README.md @@ -0,0 +1,32 @@ +# Camera Calibration + +Calibrate the camera from chessboard images and write calibration_data.txt. + +## Setup + +Use the shared root .venv and the global requirements.txt: + +```powershell +python -m venv .venv +.\.venv\Scripts\Activate.ps1 +pip install -r ..\..\requirements.txt +``` + +## Inputs + +- sample/\*.jpg - Chessboard images (7x6 corners) for calibration. + +## Run + +```powershell +python calibrate.py +``` + +## Outputs + +- calibration_data.txt - Camera matrix and distortion coefficients. + +## Troubleshooting + +- No corners detected: ensure the chessboard has 7x6 inner corners and good lighting. +- cv2 missing: install opencv-contrib-python. diff --git a/scripts/board/calibrate.py b/scripts/board/calibrate.py index a9cb501..ffb2579 100644 --- a/scripts/board/calibrate.py +++ b/scripts/board/calibrate.py @@ -14,7 +14,8 @@ def save_coefficients(mtx, dist, path): - # Save the camera matrix and the distortion coefficients to given path/file. + # Save the camera matrix and the distortion coefficients to given + # path/file. cv_file = cv2.FileStorage(path, cv2.FILE_STORAGE_WRITE) cv_file.write("K", mtx) cv_file.write("D", dist) @@ -44,21 +45,27 @@ def load_coefficients(path): # Arrays to store object points and image points from all the images. objpoints = [] # 3d point in real world space imgpoints = [] # 2d points in image plane. +image_size = None -images = glob.glob('./sample/*.jpg') +images = glob.glob('./sample/*.png') for fname in images: img = cv2.imread(fname) + if img is None: + continue gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) + if image_size is None: + image_size = gray.shape[::-1] # Find the chess board corners ret, corners = cv2.findChessboardCorners(gray, (7, 6), None) # If found, add object points, image points (after refining them) - if ret == True: + if ret: objpoints.append(objp) - corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria) + corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), + criteria) imgpoints.append(corners2) # Draw and display the corners @@ -68,6 +75,12 @@ def load_coefficients(path): cv2.destroyAllWindows() +if not objpoints: + raise RuntimeError("No valid chessboard detections found in ./sample/*.jpg") + +if image_size is None: + raise RuntimeError("No readable images found in ./sample/*.jpg") + print('objpoints') print(objpoints) @@ -76,7 +89,13 @@ def load_coefficients(path): # Camera Calibration ---------------------------------------------------------- -ret, cameraMatrix, distCoeffs, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None) +ret, cameraMatrix, distCoeffs, rvecs, tvecs = cv2.calibrateCamera( + objpoints, + imgpoints, + image_size, + None, + None, +) # print(cameraMatrix) # print(distCoeffs) @@ -88,12 +107,13 @@ def load_coefficients(path): # Error estimations ---------------------------------------------------------- -mean_error = 0 -tot_error = 0 +tot_error = 0.0 for i in range(len(objpoints)): - imgpoints2, _ = cv2.projectPoints(objpoints[i], rvecs[i], tvecs[i], cameraMatrix, distCoeffs) + imgpoints2, _ = cv2.projectPoints(objpoints[i], rvecs[i], tvecs[i], + cameraMatrix, distCoeffs) error = cv2.norm(imgpoints[i], imgpoints2, cv2.NORM_L2) / len(imgpoints2) tot_error += error -print(["total error: ", mean_error / len(objpoints)]) +mean_error = tot_error / len(objpoints) +print(["total error: ", mean_error]) diff --git a/scripts/board/chessboard.py b/scripts/board/chessboard.py new file mode 100644 index 0000000..2c31e90 --- /dev/null +++ b/scripts/board/chessboard.py @@ -0,0 +1,17 @@ +import numpy as np +import cv2 + +# Chessboard size (number of squares) +rows = 7 +cols = 8 +square_size = 100 # pixels per square + +board = np.zeros((rows * square_size, cols * square_size), dtype=np.uint8) + +for i in range(rows): + for j in range(cols): + if (i + j) % 2 == 0: + board[i*square_size:(i+1)*square_size, + j*square_size:(j+1)*square_size] = 255 + +cv2.imwrite("chessboard_8x7.png", board) diff --git a/scripts/board/sample/chessboard_8x7.png b/scripts/board/sample/chessboard_8x7.png new file mode 100644 index 0000000..603fdd1 Binary files /dev/null and b/scripts/board/sample/chessboard_8x7.png differ diff --git a/scripts/config-mqtt_sample.yaml b/scripts/config-mqtt_sample.yaml index 0154cfb..6f43677 100644 --- a/scripts/config-mqtt_sample.yaml +++ b/scripts/config-mqtt_sample.yaml @@ -3,3 +3,5 @@ mqtt_server: "127.0.0.1" mqtt_port: 1883 mqtt_keepalive: 600 +mqtt_username: "" +mqtt_password: "" diff --git a/scripts/previousScripts/README.md b/scripts/previousScripts/README.md new file mode 100644 index 0000000..fc13035 --- /dev/null +++ b/scripts/previousScripts/README.md @@ -0,0 +1,32 @@ +# Legacy Scripts + +Historical demos and prototypes. These are kept for reference but still runnable. + +## Setup + +Use the shared root .venv and the global requirements.txt: + +```powershell +python -m venv .venv +.\.venv\Scripts\Activate.ps1 +pip install -r ..\..\requirements.txt +``` + +## Run + +```powershell +python script1.py +python script2.py +python script3.py +python script4.py +``` + +## Notes + +- script1.py uses the ar-markers package and a webcam. +- script3.py expects calibration data at ../board/calibration_data.txt. + +## Troubleshooting + +- cv2.aruco missing: install opencv-contrib-python. +- ar_markers import errors: install ar-markers. diff --git a/scripts/previousScripts/script2.py b/scripts/previousScripts/script2.py index cc2d8ee..10792ae 100644 --- a/scripts/previousScripts/script2.py +++ b/scripts/previousScripts/script2.py @@ -1,21 +1,29 @@ - - # ------------------------------------------------------------ # Example using openCV aruco library # ------------------------------------------------------------ - - import cv2 as cv -import numpy as np +from typing import Optional # Load the predefined dictionary -dictionary = cv.aruco.Dictionary_get(cv.aruco.DICT_6X6_250) - -# Load the dictionary that was used to generate the markers. -dictionary = cv.aruco.Dictionary_get(cv.aruco.DICT_6X6_250) +dictionary = cv.aruco.getPredefinedDictionary(cv.aruco.DICT_6X6_250) # Initialize the detector parameters using default values -parameters = cv.aruco.DetectorParameters_create() +parameters = cv.aruco.DetectorParameters() + +if hasattr(cv.aruco, "ArucoDetector"): + detector: Optional[cv.aruco.ArucoDetector] = cv.aruco.ArucoDetector( + dictionary, + parameters, + ) +else: + detector = None + + +def detect_markers(frame): + if detector is not None: + return detector.detectMarkers(frame) + return cv.aruco.detectMarkers(frame, dictionary, parameters=parameters) + if __name__ == '__main__': print('Press "q" to quit') @@ -30,13 +38,14 @@ # Detect the markers in the image # markers = detect_markers(frame) - # detectMarkers(inputImage, dictionary, markerCorners, markerIds, parameters, rejectedCandidates); - markerCorners, markerIds, rejectedCandidates = cv.aruco.detectMarkers(frame, dictionary, parameters=parameters) + # detectMarkers(inputImage, dictionary, markerCorners, + # markerIds, parameters, rejectedCandidates) + markerCorners, markerIds, rejectedCandidates = detect_markers(frame) - if(markerIds!= None): + if markerIds is not None: # drawDetectedMarkers(outputImage, markerCorners, markerIds) - cv.aruco.drawDetectedMarkers(frame, markerCorners, markerIds); + cv.aruco.drawDetectedMarkers(frame, markerCorners, markerIds) for marker in markerIds: print(marker) @@ -52,6 +61,7 @@ cv.destroyAllWindows() +# estimatePoseSingleMarkers(markerCorners, size_of_marker_in_real, +# cameraMatrix, distCoeffs, rvecs, tvecs) -# estimatePoseSingleMarkers(markerCorners, size_of_marker_in_real, cameraMatrix, distCoeffs, rvecs, tvecs); diff --git a/scripts/previousScripts/script3.py b/scripts/previousScripts/script3.py index bef5f46..aea12ff 100644 --- a/scripts/previousScripts/script3.py +++ b/scripts/previousScripts/script3.py @@ -1,16 +1,37 @@ import cv2 as cv -import numpy as np +from typing import Optional # Load the predefined dictionary -dictionary = cv.aruco.Dictionary_get(cv.aruco.DICT_6X6_250) - -# Load the dictionary that was used to generate the markers. -dictionary = cv.aruco.Dictionary_get(cv.aruco.DICT_6X6_250) +dictionary = cv.aruco.getPredefinedDictionary(cv.aruco.DICT_6X6_250) # Initialize the detector parameters using default values -parameters = cv.aruco.DetectorParameters_create() +parameters = cv.aruco.DetectorParameters() + +if hasattr(cv.aruco, "ArucoDetector"): + detector: Optional[cv.aruco.ArucoDetector] = cv.aruco.ArucoDetector( + dictionary, + parameters, + ) +else: + detector = None + + +def detect_markers(frame): + if detector is not None: + return detector.detectMarkers(frame) + return cv.aruco.detectMarkers(frame, dictionary, parameters=parameters) + + +def draw_axes(frame, camera_matrix, dist_coeffs, rvec, tvec, length) -> None: + if hasattr(cv, "drawFrameAxes"): + cv.drawFrameAxes(frame, camera_matrix, dist_coeffs, rvec, tvec, length) + else: + draw_axis = getattr(cv.aruco, "drawAxis", None) + if draw_axis is not None: + draw_axis(frame, camera_matrix, dist_coeffs, rvec, tvec, length) + -# Load camera calibrations --------------------------------------------------- +# Load camera calibrations -------------------------------------------------- cv_file = cv.FileStorage('../board/calibration_data.txt', cv.FILE_STORAGE_READ) cameraMatrix = cv_file.getNode("K").mat() distCoeffs = cv_file.getNode("D").mat() @@ -32,25 +53,39 @@ while frame_captured: - # detectMarkers(inputImage, dictionary, markerCorners, markerIds, parameters, rejectedCandidates); - markerCorners, markerIds, rejectedCandidates = cv.aruco.detectMarkers(frame, dictionary, parameters=parameters) + # detectMarkers(inputImage, dictionary, markerCorners, + # markerIds, parameters, rejectedCandidates) + markerCorners, markerIds, rejectedCandidates = detect_markers(frame) - #print(type(markerIds)) + # print(type(markerIds)) # Non-empty array of markers - if (type(markerIds) != type(None)): + if markerIds is not None: # drawDetectedMarkers(outputImage, markerCorners, markerIds) - cv.aruco.drawDetectedMarkers(frame, markerCorners, markerIds); + cv.aruco.drawDetectedMarkers(frame, markerCorners, markerIds) - # estimatePoseSingleMarkers(markerCorners, size_of_marker_in_real, cameraMatrix, distCoeffs, rvecs, tvecs); - rvecs, tvecs, _objPoints = cv.aruco.estimatePoseSingleMarkers(markerCorners, 50, cameraMatrix, distCoeffs) + # estimatePoseSingleMarkers(markerCorners, size_of_marker_in_real, + # cameraMatrix, distCoeffs, rvecs, tvecs) + rvecs, tvecs, _objPoints = cv.aruco.estimatePoseSingleMarkers( + markerCorners, + 50, + cameraMatrix, + distCoeffs, + ) for i in range(len(markerIds)): - print(markerIds[i], tvecs[i][0] ) # rvecs[i], + print(markerIds[i], tvecs[i][0]) # rvecs[i], # Display marker coordinates with x,y,z axies - cv.aruco.drawAxis(frame, cameraMatrix, distCoeffs, rvecs[i], tvecs[i], 100); + draw_axes( + frame, + cameraMatrix, + distCoeffs, + rvecs[i], + tvecs[i], + 100, + ) cv.imshow('Marker Detector', frame) diff --git a/scripts/previousScripts/script4.py b/scripts/previousScripts/script4.py index 02d64b8..4334d48 100644 --- a/scripts/previousScripts/script4.py +++ b/scripts/previousScripts/script4.py @@ -1,35 +1,45 @@ # # https://pypi.org/project/paho-mqtt/ # -import numpy as np -import paho.mqtt.client as paho +import importlib import json import math -import time +from typing import Any, Dict -mqtt_server="68.183.188.135" -mqtt_port=1883 -mqtt_keepalive=60 -sub_topic_update="v1/localization/update" -sub_topic_publish="v1/localization/info" +try: + paho = importlib.import_module("paho.mqtt.client") +except ImportError as exc: + raise RuntimeError( + "Missing dependency 'paho-mqtt'. Install with: pip install paho-mqtt" + ) from exc + +mqtt_server = "68.183.188.135" +mqtt_port = 1883 +mqtt_keepalive = 60 +sub_topic_update = "v1/localization/update" +sub_topic_publish = "v1/localization/info" # temp topics, for debug purposes -sub_topic_update_robot="v1/localization/update/robot" -sub_topic_create="v1/gui/create" +sub_topic_update_robot = "v1/localization/update/robot" +sub_topic_create = "v1/gui/create" + +robots: Dict[int, Dict[str, Any]] = {} +update_xy_threshold = 0.5 # units +update_heading_threshold = 1 # degrees -robots = {} -update_xy_threshold = 0.5 # units -update_heading_threshold = 1 # degrees def update_robot(id, x, y, heading): - #print('Robot coordinate update received') - #print([id, x, y, heading]) + # print('Robot coordinate update received') + # print([id, x, y, heading]) if id in robots: - old = robots[id]; + old = robots[id] + distance = math.sqrt(abs(pow(x - old['x'], 2) + pow(y - old['y'], 2))) + heading_delta = abs(old['heading'] - heading) - if( (math.sqrt(abs( pow(x-old['x'], 2) + pow(y-old['y'], 2))) >= update_xy_threshold) or - (abs(old['heading'] - heading) >= update_heading_threshold) ): + if (distance >= update_xy_threshold) or ( + heading_delta >= update_heading_threshold + ): # update the server about new coordinates robots[id]['x'] = x @@ -38,39 +48,41 @@ def update_robot(id, x, y, heading): client.publish(sub_topic_publish, json.dumps(robots[id]), qos=1) else: # create and publish - robots[id] = {'id':id, 'x':x, 'y':y, 'head':heading} + robots[id] = {'id': id, 'x': x, 'y': y, 'head': heading} client.publish(sub_topic_publish, json.dumps(robots[id]), qos=1) def on_connect(client, userdata, flags, rc): - print("Connected with result code "+str(rc)) + print("Connected with result code " + str(rc)) client.subscribe(sub_topic_update) client.subscribe(sub_topic_create) client.subscribe(sub_topic_update_robot) # Adding a robot into the data structure - only for debug - robots[0] = {'id':0.0, 'x':0.0, 'y':0.0, 'head':0.0} + robots[0] = {'id': 0.0, 'x': 0.0, 'y': 0.0, 'head': 0.0} + def on_message(client, userdata, msg): - print(msg.topic+" > "+str(msg.payload, 'utf-8')) + print(msg.topic + " > " + str(msg.payload, 'utf-8')) topic = msg.topic body = str(msg.payload, 'utf-8') - if (topic==sub_topic_update): + if topic == sub_topic_update: client.publish(sub_topic_publish, json.dumps(robots), qos=1) - elif (topic== sub_topic_create): # only for testings purposes - d=json.loads(body) + elif topic == sub_topic_create: # only for testing purposes + d = json.loads(body) robots[d['id']] = d print(robots) - elif (topic == sub_topic_update_robot): - d=json.loads(body) + elif topic == sub_topic_update_robot: + d = json.loads(body) print(d) update_robot(d['id'], d['x'], d['y'], d['heading']) -# -- MQTT Client --------------------------------------------------------------- + +# -- MQTT Client ------------------------------------------------------------- client = paho.Client() client.on_connect = on_connect client.on_message = on_message @@ -81,9 +93,6 @@ def on_message(client, userdata, msg): # time.sleep(1) - - - ''' def on_publish(mqttc, obj, mid): print("mid: " + str(mid)) diff --git a/scripts/pycamera.py b/scripts/pycamera.py index 7b77664..f994be6 100644 --- a/scripts/pycamera.py +++ b/scripts/pycamera.py @@ -1,31 +1,52 @@ -import picamera -from picamera.array import PiRGBAnalysis -from picamera.color import Color -import cv2 import time -pi_camera = picamera.PiCamera(resolution='1024x768', framerate=25) +import cv2 + +try: + import picamera + from picamera.array import PiRGBAnalysis +except ImportError as exc: + raise RuntimeError( + "Missing dependency 'picamera'. Install on Raspberry Pi and run with a Pi camera enabled." + ) from exc + + +class PreviewAnalyzer(PiRGBAnalysis): + def __init__(self, camera): + super().__init__(camera) + self.should_stop = False -with pi_camera as camera: - # fix the camera's white-balance gains - #camera.awb_mode = 'off' - #camera.awb_gains = (1.4, 1.5) + def analyze(self, frame): + cv2.imshow("PiCamera Preview", frame) + if (cv2.waitKey(1) & 0xFF) == ord('q'): + self.should_stop = True + +def configure_camera(camera): camera.shutter_speed = 35000 camera.iso = 400 - # allow the camera to warmup time.sleep(2) - camera.exposure_mode = 'off' # lock gains and disable auto exposure + camera.exposure_mode = 'off' - # allow the camera to warmup time.sleep(2) - # construct the analysis output and start recording data to it - camera.start_recording(detector, 'rgb') - try: - while True: - camera.wait_recording(1) - finally: - camera.stop_recording() + +def main(): + with picamera.PiCamera(resolution=(1024, 768), framerate=25) as camera: + configure_camera(camera) + + detector = PreviewAnalyzer(camera) + camera.start_recording(detector, format='rgb') + try: + while not detector.should_stop: + camera.wait_recording(0.1) + finally: + camera.stop_recording() + detector.close() + cv2.destroyAllWindows() + + +if __name__ == '__main__': + main() diff --git a/scripts/script.py b/scripts/script.py index 0b176dd..6f47647 100644 --- a/scripts/script.py +++ b/scripts/script.py @@ -1,16 +1,49 @@ import cv2 as cv import numpy as np -import paho.mqtt.client as paho +import importlib import json import math import time -import yaml +from pathlib import Path +from typing import Any, Dict, Optional, Tuple from time import sleep # import threading -CONFIG_MQTT = 'config-mqtt.yaml' -CONFIG_MAPPING = 'config-mapping.yaml' -CONFIG_CAMERA = 'board/calibration_data.txt' +try: + paho = importlib.import_module("paho.mqtt.client") +except ImportError as exc: + raise RuntimeError( + "Missing dependency 'paho-mqtt'. Install with: pip install paho-mqtt" + ) from exc + +try: + yaml = importlib.import_module("yaml") +except ImportError as exc: + raise RuntimeError( + "Missing dependency 'pyyaml'. Install with: pip install pyyaml" + ) from exc + +SCRIPT_DIR = Path(__file__).resolve().parent + +CONFIG_MQTT = SCRIPT_DIR / 'config-mqtt.yaml' +CONFIG_MAPPING = SCRIPT_DIR / 'config-mapping.yaml' +CONFIG_CAMERA = SCRIPT_DIR / 'board' / 'calibration_data.txt' + + +def ensure_required_file( + path: Path, + label: str, + sample_name: Optional[str] = None, +) -> None: + if path.is_file(): + return + + message = f"Missing {label} file: {path}" + if sample_name is not None: + sample_path = SCRIPT_DIR / sample_name + message += f". Copy sample file {sample_path} to {path}" + raise FileNotFoundError(message) + camera_id = 0 @@ -21,12 +54,17 @@ print('fps: ', fps) -# -- Coordinate system variables ----------------------------------------------- -robots = {} +# -- Coordinate system variables --------------------------------------------- +robots: Dict[int, Dict[str, Any]] = {} update_xy_threshold = 10 # units update_heading_threshold = 2 # degrees -with open(CONFIG_MAPPING, 'r') as file: +ensure_required_file( + CONFIG_MAPPING, + 'mapping config', + 'config-mapping.yaml', +) +with open(CONFIG_MAPPING, 'r', encoding='utf-8') as file: mapping_data = yaml.load(file, Loader=yaml.Loader) # print(mapping_data) REFERENCE_POINTS = mapping_data['reference_points'] @@ -34,18 +72,21 @@ # -- MQTT variables ----------------------------------------------------------- -sub_topic_update = "v1/localization/update/?" -sub_topic_publish = "v1/localization/update" -sub_topic_create = "v1/robot/create" +sub_topic_update = "v10/localization/update/?" +sub_topic_publish = "v10/localization/update" +sub_topic_create = "v10/robot/create" # temp topics, for debug purposes # sub_topic_update_robot="v1/localization/update/robot" -with open(CONFIG_MQTT, 'r') as file: +ensure_required_file(CONFIG_MQTT, 'MQTT config', 'config-mqtt_sample.yaml') +with open(CONFIG_MQTT, 'r', encoding='utf-8') as file: mqtt_data = yaml.load(file, Loader=yaml.Loader) print(mqtt_data) mqtt_server = mqtt_data['mqtt_server'] mqtt_port = mqtt_data['mqtt_port'] mqtt_keepalive = mqtt_data['mqtt_keepalive'] + mqtt_username = mqtt_data.get('mqtt_username') + mqtt_password = mqtt_data.get('mqtt_password') def transXY(camX, camY): @@ -61,7 +102,7 @@ def transXY(camX, camY): return projected -# -- MQTT loop thread function - NOT WORKING FOR NOW --------------------------- +# -- MQTT loop thread function - NOT WORKING FOR NOW ------------------------- def mqtt_loop(client): client.loop() @@ -70,9 +111,9 @@ def mqtt_setup(): client = paho.Client() client.on_connect = on_connect client.on_message = on_message - - # TODO: Config username and password - # client.username_pw_set("username", "password") + + if mqtt_username and mqtt_password: + client.username_pw_set(mqtt_username, mqtt_password) client.connect(mqtt_server, mqtt_port, mqtt_keepalive) time.sleep(2) @@ -88,9 +129,14 @@ def update_robot(id, x, y, heading): if id in robots: old = robots[id] update_queue = [] - if ((math.sqrt(abs(pow(x - old['x'], 2) + pow(y - old['y'], 2))) >= update_xy_threshold)) or (abs(old['heading'] - heading) >= update_heading_threshold): - - # update the server about new coordinates, if there is any significant difference + distance = math.sqrt(abs(pow(x - old['x'], 2) + pow(y - old['y'], 2))) + heading_delta = abs(old['heading'] - heading) + if ( + (distance >= update_xy_threshold) + or (heading_delta >= update_heading_threshold) + ): + + # update the server if there is any significant difference robots[id]['id'] = id robots[id]['x'] = -1*x robots[id]['y'] = y @@ -128,8 +174,6 @@ def on_connect(client, userdata, flags, rc): def on_message(client, userdata, msg): # print(msg.topic+" > "+str(msg.payload, 'utf-8')) topic = msg.topic - body = str(msg.payload, 'utf-8') - if (topic == sub_topic_update): # Update the coordinates of all active robots client.publish(sub_topic_publish, json.dumps( @@ -141,18 +185,43 @@ def on_message(client, userdata, msg): # update_robot(d['id'], d['x'], d['y'], d['heading']) -# -- OpenCV Image processing --------------------------------------------------- +# -- OpenCV Image processing ------------------------------------------------- # Load the predefined dictionary -# dictionary = cv.aruco.Dictionary_get(cv.aruco.DICT_6X6_250) dictionary = cv.aruco.getPredefinedDictionary(cv.aruco.DICT_6X6_250) -# parameters = cv.aruco.DetectorParameters_create() parameters = cv.aruco.DetectorParameters() -# -- Load camera calibrations -------------------------------------------------- +if hasattr(cv.aruco, "ArucoDetector"): + detector: Optional[cv.aruco.ArucoDetector] = cv.aruco.ArucoDetector( + dictionary, + parameters, + ) +else: + detector = None + + +def detect_markers( + frame, +) -> Tuple[Any, Optional[np.ndarray], Any]: + if detector is not None: + return detector.detectMarkers(frame) + return cv.aruco.detectMarkers(frame, dictionary, parameters=parameters) + + +def draw_axes(frame, camera_matrix, dist_coeffs, rvec, tvec, length) -> None: + if hasattr(cv, "drawFrameAxes"): + cv.drawFrameAxes(frame, camera_matrix, dist_coeffs, rvec, tvec, length) + else: + draw_axis = getattr(cv.aruco, "drawAxis", None) + if draw_axis is not None: + draw_axis(frame, camera_matrix, dist_coeffs, rvec, tvec, length) + + +# -- Load camera calibrations ------------------------------------------------ -cv_file = cv.FileStorage(CONFIG_CAMERA, cv.FILE_STORAGE_READ) +ensure_required_file(CONFIG_CAMERA, 'camera calibration') +cv_file = cv.FileStorage(str(CONFIG_CAMERA), cv.FILE_STORAGE_READ) cameraMatrix = cv_file.getNode("K").mat() distCoeffs = cv_file.getNode("D").mat() cv_file.release() @@ -202,14 +271,16 @@ def on_message(client, userdata, msg): # Reset the counter and process the frame i = 0 - markerCorners, markerIds, rejectedCandidates = cv.aruco.detectMarkers( - frame, dictionary, parameters=parameters) + markerCorners, markerIds, rejectedCandidates = detect_markers(frame) # Non-empty array of markers - if (type(markerIds) != type(None)): + if markerIds is not None: cv.aruco.drawDetectedMarkers(frame, markerCorners, markerIds) - # estimatePoseSingleMarkers(markerCorners, size_of_marker_in_real, cameraMatrix, distCoeffs, rvecs, tvecs) + # estimatePoseSingleMarkers( + # markerCorners, size_of_marker_in_real, cameraMatrix, + # distCoeffs, rvecs, tvecs + # ) rvecs, tvecs, _objPoints = cv.aruco.estimatePoseSingleMarkers( markerCorners, 50, cameraMatrix, distCoeffs) @@ -229,8 +300,8 @@ def on_message(client, userdata, msg): update_robot(id, res[0], res[1], heading) # Display marker coordinates with x,y,z axies - cv.aruco.drawAxis(frame, cameraMatrix, - distCoeffs, rvecs[i], tvecs[i], 100) + draw_axes(frame, cameraMatrix, + distCoeffs, rvecs[i], tvecs[i], 100) cv.imshow('Marker Detector', frame)