|
| 1 | +from typing import Optional, Tuple |
| 2 | + |
| 3 | +import cv2 |
| 4 | +import numpy as np |
| 5 | +import rclpy |
| 6 | +import rclpy.logging |
| 7 | +from ament_index_python.packages import get_package_share_directory |
| 8 | +from cv_bridge import CvBridge |
| 9 | +from rclpy.node import Node |
| 10 | +from sensor_msgs.msg import Image |
| 11 | +from soccer_vision_2d_msgs.msg import Robot, RobotArray |
| 12 | + |
| 13 | +from bitbots_technical_challenge_vision.bitbots_technical_challenge_vision_params import ( |
| 14 | + bitbots_technical_challenge_vision, |
| 15 | +) |
| 16 | + |
| 17 | + |
| 18 | +class TechnicalChallengeVision(Node): |
| 19 | + def __init__(self): |
| 20 | + super().__init__("bitbots_technical_challenge_vision") |
| 21 | + |
| 22 | + self._package_path = get_package_share_directory("bitbots_technical_challenge_vision") |
| 23 | + self._cv_bridge = CvBridge() |
| 24 | + self._annotations_pub = self.create_publisher(RobotArray, "/robots_in_image", 10) |
| 25 | + self._debug_img_pub = self.create_publisher(Image, "/bitbots_technical_challenge_vision_debug_img", 10) |
| 26 | + self._debug_clrmp_pub_blue = self.create_publisher( |
| 27 | + Image, "/bitbots_technical_challenge_vision_debug_clrmp_blue", 10 |
| 28 | + ) |
| 29 | + self._debug_clrmp_pub_red = self.create_publisher( |
| 30 | + Image, "/bitbots_technical_challenge_vision_debug_clrmp_red", 10 |
| 31 | + ) |
| 32 | + self._img_sub = self.create_subscription(Image, "/camera/image_proc", self.image_callback, 10) |
| 33 | + self._param_listener = bitbots_technical_challenge_vision.ParamListener(self) |
| 34 | + self._params = self._param_listener.get_params() |
| 35 | + |
| 36 | + def create_robot_msg(self, x: int, y: int, w: int, h: int, t: int) -> Robot: |
| 37 | + """ |
| 38 | + Creates a Robot message from a robots bounding box data and its color. |
| 39 | +
|
| 40 | + :param x: bb top left x |
| 41 | + :param y: bb top left y |
| 42 | + :param w: bb width |
| 43 | + :param h: bb height |
| 44 | + :param t: robot team |
| 45 | + :return: robot message for that robot |
| 46 | + """ |
| 47 | + robot = Robot() |
| 48 | + |
| 49 | + robot.bb.center.position.x = float(x + (w / 2)) |
| 50 | + robot.bb.center.position.y = float(y + (h / 2)) |
| 51 | + robot.bb.size_x = float(w) |
| 52 | + robot.bb.size_y = float(h) |
| 53 | + robot.attributes.team = t |
| 54 | + |
| 55 | + return robot |
| 56 | + |
| 57 | + def process_image( |
| 58 | + self, img: np.ndarray, debug_img: np.ndarray, arg |
| 59 | + ) -> Tuple[list[Robot], list[Robot], np.ndarray, np.ndarray]: |
| 60 | + """ |
| 61 | + gets annotations from the camera image |
| 62 | +
|
| 63 | + :param img: ndarray containing the camera image |
| 64 | + :param debug_img: copy of img debug annotations should be drawn here |
| 65 | + :param arg: __RosParameters object containing the dynamic parameters |
| 66 | + :return: [[blue_robots], [red_robots], clrmp_blue, clrmp_red] |
| 67 | + """ |
| 68 | + # convert img to hsv to isolate colors better |
| 69 | + img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) |
| 70 | + |
| 71 | + # get color maps |
| 72 | + blue_map = cv2.inRange( |
| 73 | + img, |
| 74 | + (arg.blue_lower_h, arg.blue_lower_s, arg.blue_lower_v), |
| 75 | + (arg.blue_upper_h, arg.blue_upper_s, arg.blue_upper_v), |
| 76 | + ) |
| 77 | + |
| 78 | + red_map = cv2.inRange( |
| 79 | + img, |
| 80 | + (arg.red_lower_h, arg.red_lower_s, arg.red_lower_v), |
| 81 | + (arg.red_upper_h, arg.red_upper_s, arg.red_upper_v), |
| 82 | + ) |
| 83 | + |
| 84 | + # get contours in color maps |
| 85 | + blue_contours, _ = cv2.findContours(blue_map, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) |
| 86 | + red_contours, _ = cv2.findContours(red_map, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) |
| 87 | + |
| 88 | + # get lists of bounding boxes |
| 89 | + blue_robots = [] |
| 90 | + red_robots = [] |
| 91 | + |
| 92 | + def annotate(x, y, w, h, c) -> Optional[np.ndarray]: |
| 93 | + if not arg.debug_mode: |
| 94 | + return None |
| 95 | + return cv2.rectangle( |
| 96 | + debug_img, |
| 97 | + (x, y), |
| 98 | + (x + w, y + h), |
| 99 | + c, |
| 100 | + 2, |
| 101 | + ) |
| 102 | + |
| 103 | + for cnt in blue_contours: |
| 104 | + x, y, w, h = cv2.boundingRect(cnt) |
| 105 | + if min(w, h) >= arg.min_size and max(h, w) <= arg.max_size: |
| 106 | + # draw bb on debug img |
| 107 | + annotate(x, y, w, h, (255, 0, 0)) |
| 108 | + |
| 109 | + # TODO I think 1 is for the blue team? |
| 110 | + blue_robots.append(self.create_robot_msg(x, y, w, h, 1)) |
| 111 | + |
| 112 | + for cnt in red_contours: |
| 113 | + x, y, w, h = cv2.boundingRect(cnt) |
| 114 | + if min(w, h) >= arg.min_size and max(h, w) <= arg.max_size: |
| 115 | + # draw bb on debug img |
| 116 | + annotate(x, y, w, h, (0, 0, 255)) |
| 117 | + |
| 118 | + red_robots.append(self.create_robot_msg(x, y, w, h, 2)) |
| 119 | + |
| 120 | + return blue_robots, red_robots, blue_map, red_map, debug_img |
| 121 | + |
| 122 | + def image_callback(self, msg: Image): |
| 123 | + # get dynamic parameters |
| 124 | + if self._param_listener.is_old(self._params): |
| 125 | + self._param_listener.refresh_dynamic_parameters() |
| 126 | + self._params = self._param_listener.get_params() |
| 127 | + |
| 128 | + arg = self._params |
| 129 | + |
| 130 | + # set variables |
| 131 | + img = self._cv_bridge.imgmsg_to_cv2(img_msg=msg, desired_encoding="bgr8") |
| 132 | + header = msg.header |
| 133 | + |
| 134 | + if arg.debug_mode: |
| 135 | + debug_img = np.copy(img) |
| 136 | + else: |
| 137 | + debug_img = None |
| 138 | + |
| 139 | + # get annotations |
| 140 | + blue_robots, red_robots, blue_map, red_map, debug_img = self.process_image(img, debug_img, arg) |
| 141 | + robots = [] |
| 142 | + robots.extend(blue_robots) |
| 143 | + robots.extend(red_robots) |
| 144 | + |
| 145 | + # make output message |
| 146 | + robot_array_message = RobotArray() |
| 147 | + robot_array_message.header = header |
| 148 | + robot_array_message.robots = robots |
| 149 | + |
| 150 | + # publish output message |
| 151 | + self._annotations_pub.publish(robot_array_message) |
| 152 | + |
| 153 | + if arg.debug_mode: |
| 154 | + # make debug image message |
| 155 | + debug_img_msg = self._cv_bridge.cv2_to_imgmsg(cvim=debug_img, encoding="bgr8", header=header) |
| 156 | + |
| 157 | + # publish debug image |
| 158 | + self._debug_img_pub.publish(debug_img_msg) |
| 159 | + |
| 160 | + # make color map messages |
| 161 | + clrmp_blue_img = cv2.cvtColor(blue_map, cv2.COLOR_GRAY2BGR) |
| 162 | + clrmp_blue_msg = self._cv_bridge.cv2_to_imgmsg(cvim=clrmp_blue_img, encoding="bgr8", header=header) |
| 163 | + |
| 164 | + clrmp_red_img = cv2.cvtColor(red_map, cv2.COLOR_GRAY2BGR) |
| 165 | + clrmp_red_msg = self._cv_bridge.cv2_to_imgmsg(clrmp_red_img, encoding="bgr8", header=header) |
| 166 | + |
| 167 | + # publish color map messages |
| 168 | + self._debug_clrmp_pub_blue.publish(clrmp_blue_msg) |
| 169 | + self._debug_clrmp_pub_red.publish(clrmp_red_msg) |
| 170 | + |
| 171 | + |
| 172 | +def main(args=None): |
| 173 | + rclpy.init(args=args) |
| 174 | + node = TechnicalChallengeVision() |
| 175 | + try: |
| 176 | + rclpy.spin(node) |
| 177 | + except KeyboardInterrupt: |
| 178 | + pass |
| 179 | + node.destroy_node() |
| 180 | + |
| 181 | + |
| 182 | +if __name__ == "__main__": |
| 183 | + main() |
0 commit comments