From 136b0abbf0afda8d1359093ab28dd5067f823ed3 Mon Sep 17 00:00:00 2001
From: minibot-1 <paddy-hofmann@web.de>
Date: Fri, 14 Apr 2023 15:22:46 +0000
Subject: [PATCH] added params to further downscaled images that are over the
 wifi

---
 minibot_vision/config/sign_detector.yaml       |  5 ++---
 minibot_vision/launch/capture_imgs.launch      | 10 ----------
 minibot_vision/launch/crop_sign.launch         | 14 --------------
 minibot_vision/launch/crop_sign_rosparams.yaml |  6 ------
 minibot_vision/launch/sign_classifier.launch   | 18 ++++++++++++++++++
 minibot_vision/scripts/SignDetector.py         | 14 +++++++++++---
 .../launch/traffic_sign_workshop.launch        | 12 ++++--------
 7 files changed, 35 insertions(+), 44 deletions(-)
 delete mode 100644 minibot_vision/launch/capture_imgs.launch
 delete mode 100644 minibot_vision/launch/crop_sign.launch
 delete mode 100644 minibot_vision/launch/crop_sign_rosparams.yaml
 create mode 100644 minibot_vision/launch/sign_classifier.launch

diff --git a/minibot_vision/config/sign_detector.yaml b/minibot_vision/config/sign_detector.yaml
index 4387e4b..7788bc6 100644
--- a/minibot_vision/config/sign_detector.yaml
+++ b/minibot_vision/config/sign_detector.yaml
@@ -1,3 +1,2 @@
-sign_detector: {canny_param1: 99, canny_param2: 50,
-  max_depth: 1.0, max_radius: 128, min_depth: 0.2, min_radius: 15,
-  zoom_threshold: 1.15}
+sign_detector: {canny_param1: 107, canny_param2: 100, max_depth: 1.0, max_radius: 128,
+  min_depth: 0.2, min_radius: 15, zoom_threshold: 1.15}
diff --git a/minibot_vision/launch/capture_imgs.launch b/minibot_vision/launch/capture_imgs.launch
deleted file mode 100644
index 9d0ee92..0000000
--- a/minibot_vision/launch/capture_imgs.launch
+++ /dev/null
@@ -1,10 +0,0 @@
-<?xml version='1.0' ?>
-<launch>
-    <arg name="remote_node" default="True" />
-    <arg name="save_dir" default="/resources/training_imgs/" />      <!-- save dir relative to the minibot_vision package -->
-
-    <node name="capture_imgs" pkg="minibot_vision" type="Capture_Images.py" output="screen" ns="$(env ROBOT)" >
-        <param name="remote_node" value="$(arg remote_node)" />
-        <param name="save_dir" value="$(arg save_dir)" />
-    </node>
-</launch>
diff --git a/minibot_vision/launch/crop_sign.launch b/minibot_vision/launch/crop_sign.launch
deleted file mode 100644
index 26021d2..0000000
--- a/minibot_vision/launch/crop_sign.launch
+++ /dev/null
@@ -1,14 +0,0 @@
-<?xml version='1.0' ?>
-<launch>
-    <!-- This is deprecated. The new sign segmentation is directly integrated to SignDetector -->
-    <arg name="visualize" default="False" />
-    <arg name="load_hyper_params" default="True" />
-    <arg name="start_node" default="True" />
-    <arg name="ns" default="/" />
-
-    <!-- load hyperparams to server -->
-    <rosparam file="$(find minibot_vision)/launch/crop_sign_rosparams.yaml" if="$(arg load_hyper_params)" />
-
-    <node name="crop_sign" pkg="minibot_vision" type="Crop_Sign.py" output="screen" args="$(arg visualize)" if="$(arg start_node)" ns="$(arg ns)" />
-
-</launch>
\ No newline at end of file
diff --git a/minibot_vision/launch/crop_sign_rosparams.yaml b/minibot_vision/launch/crop_sign_rosparams.yaml
deleted file mode 100644
index 0538839..0000000
--- a/minibot_vision/launch/crop_sign_rosparams.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-crop_sign:
-  circularity: 0.77
-  thickness: 22
-  circle_filter: 5
-  canny1: 60
-  canny2: 50
diff --git a/minibot_vision/launch/sign_classifier.launch b/minibot_vision/launch/sign_classifier.launch
new file mode 100644
index 0000000..d932c59
--- /dev/null
+++ b/minibot_vision/launch/sign_classifier.launch
@@ -0,0 +1,18 @@
+<?xml version="1.0"?>
+<launch>
+    <arg name="classification_fps" default="3" />
+    <arg name="visualization_compression_factor" default="0.5" />
+
+    <rosparam file="$(find minibot_vision)/config/sign_detector.yaml" />
+
+    <!-- Node to hande the actual classification -->
+    <node name="sign_detector" pkg="minibot_vision" type="SignDetector.py" output="screen" >
+        <!-- The actual framerate might not exceed 3FPS so don't go too wild -->
+        <param name="fps" value="$(arg classification_fps)" />
+        <!-- The image streamed to visualize will be downscaled by this factor [0, 1]. Note that the streamed sign patch is not further downscaled -->
+        <param name="visualization_compression_factor" value="$(arg visualization_compression_factor)" />
+    </node>
+
+    <!-- Node to stream croped signs -->
+    <node name="hough_transform" pkg="minibot_vision" type="SegmentSign.py" output="screen" />
+</launch>
\ No newline at end of file
diff --git a/minibot_vision/scripts/SignDetector.py b/minibot_vision/scripts/SignDetector.py
index af27b8b..1f33e22 100755
--- a/minibot_vision/scripts/SignDetector.py
+++ b/minibot_vision/scripts/SignDetector.py
@@ -25,6 +25,7 @@ IMG_RES = SegmentSign.IMG_RES
 TF_RES = SegmentSign.TENSOR_RES
 
 # *** GLOBALS ***
+visualization_compression_factor = None
 sign_classifier = TmClassification()
 bridge = CvBridge()
 img_rgb_stream = np.zeros((IMG_RES[0], IMG_RES[1], 3), np.uint8)
@@ -118,7 +119,7 @@ def hough_pipeline(img_color, img_depth):
 
 
 def detect_sign(img_rgb_stream, image_timestamp):
-    global img_depth_stream, pub_result_img, toggle_hough_detection, enable_sign_detection
+    global img_depth_stream, pub_result_img, toggle_hough_detection, enable_sign_detection, visualization_compression_factor
 
     img_orig = np.copy(img_rgb_stream)
     # TODO the ratio between img_depth_stream and img_rgb_stream might be different!
@@ -146,7 +147,7 @@ def detect_sign(img_rgb_stream, image_timestamp):
         if enable_sign_detection:
             label, precision = sign_classifier.predictImage(p)  # returns tupel (label, precision), if no model / error is set up label= -1
 
-        # TODO get most recent depth data
+        # get most recent depth data
         new_keypoints = hough_pipeline(img_rgb_stream, img_depth_stream)
         if len(new_keypoints) >= 1:
             d = new_keypoints[0]["depth"]
@@ -159,6 +160,7 @@ def detect_sign(img_rgb_stream, image_timestamp):
         if len(text) > 0:
             SegmentSign.visualize_patches(keypoints, patches, text, img_orig)
         # compress and publish
+        img_orig = cv2.resize(img_orig, (0, 0), fx=visualization_compression_factor, fy=visualization_compression_factor)       # downscale to further decrease bandwidth usage
         cmprsmsg = bridge.cv2_to_compressed_imgmsg(img_orig)
         pub_result_img.publish(cmprsmsg)
 
@@ -213,6 +215,10 @@ def update_rosparams_callback(req):
 if __name__ == "__main__":
     rospy.init_node("sign_detector")
 
+    # caputre rosparams
+    fps = float(rospy.get_param("~fps", "3"))
+    visualization_compression_factor = float(rospy.get_param("~visualization_compression_factor", "1"))
+
     img_color_topic = "{}camera/color/image_raw".format(rospy.get_namespace())
     img_depth_topic = "{}camera/aligned_depth_to_color/image_raw".format(rospy.get_namespace())
 
@@ -233,7 +239,9 @@ if __name__ == "__main__":
 
     SegmentSign.update_camera_params()
 
-    rate = rospy.Rate(30)       # currently this is impossible, but then the rate is defined by the detect_sign evaluation time
+    rospy.loginfo("({}) Sign classification running with params fps: {}, visualization_compression_factor: {}".format(rospy.get_name(), fps, visualization_compression_factor))
+
+    rate = rospy.Rate(fps)
     while not rospy.is_shutdown():
         detect_sign(img_rgb_stream, img_rgb_timestamp)
         rate.sleep()
diff --git a/minibot_workshops/launch/traffic_sign_workshop.launch b/minibot_workshops/launch/traffic_sign_workshop.launch
index 7a2069d..3cc1968 100644
--- a/minibot_workshops/launch/traffic_sign_workshop.launch
+++ b/minibot_workshops/launch/traffic_sign_workshop.launch
@@ -1,13 +1,9 @@
 <?xml version="1.0"?>
 <launch>
-    <!--include file="$(find minibot_vision)/launch/capture_imgs.launch" >
-        <arg name="remote_node" value="true" />
-    </include-->
-
     <group ns="$(env ROBOT)" >
-        <node name="sign_detector" pkg="minibot_vision" type="SignDetector.py" output="screen" />
-        <rosparam file="$(find minibot_vision)/config/sign_detector.yaml" />
-
-        <node name="hough_transform" pkg="minibot_vision" type="SegmentSign.py" output="screen" />
+        <include file="$(find minibot_vision)/launch/sign_classifier.launch" >
+            <arg name="classification_fps" value="3" />
+            <arg name="visualization_compression_factor" value="0.5" />
+        </include>
     </group>
 </launch>
-- 
GitLab