Skip to content
Snippets Groups Projects
Commit 136b0abb authored by User expired's avatar User expired :spy_tone1:
Browse files

added params to further downscaled images that are over the wifi

parent 40bf2048
No related branches found
No related tags found
No related merge requests found
sign_detector: {canny_param1: 99, canny_param2: 50, sign_detector: {canny_param1: 107, canny_param2: 100, max_depth: 1.0, max_radius: 128,
max_depth: 1.0, max_radius: 128, min_depth: 0.2, min_radius: 15, min_depth: 0.2, min_radius: 15, zoom_threshold: 1.15}
zoom_threshold: 1.15}
<?xml version='1.0' ?>
<launch>
<arg name="remote_node" default="True" />
<arg name="save_dir" default="/resources/training_imgs/" /> <!-- save dir relative to the minibot_vision package -->
<node name="capture_imgs" pkg="minibot_vision" type="Capture_Images.py" output="screen" ns="$(env ROBOT)" >
<param name="remote_node" value="$(arg remote_node)" />
<param name="save_dir" value="$(arg save_dir)" />
</node>
</launch>
<?xml version='1.0' ?>
<launch>
<!-- This is deprecated. The new sign segmentation is directly integrated to SignDetector -->
<arg name="visualize" default="False" />
<arg name="load_hyper_params" default="True" />
<arg name="start_node" default="True" />
<arg name="ns" default="/" />
<!-- load hyperparams to server -->
<rosparam file="$(find minibot_vision)/launch/crop_sign_rosparams.yaml" if="$(arg load_hyper_params)" />
<node name="crop_sign" pkg="minibot_vision" type="Crop_Sign.py" output="screen" args="$(arg visualize)" if="$(arg start_node)" ns="$(arg ns)" />
</launch>
\ No newline at end of file
crop_sign:
circularity: 0.77
thickness: 22
circle_filter: 5
canny1: 60
canny2: 50
<?xml version="1.0"?>
<launch>
<arg name="classification_fps" default="3" />
<arg name="visualization_compression_factor" default="0.5" />
<rosparam file="$(find minibot_vision)/config/sign_detector.yaml" />
<!-- Node to hande the actual classification -->
<node name="sign_detector" pkg="minibot_vision" type="SignDetector.py" output="screen" >
<!-- The actual framerate might not exceed 3FPS so don't go too wild -->
<param name="fps" value="$(arg classification_fps)" />
<!-- The image streamed to visualize will be downscaled by this factor [0, 1]. Note that the streamed sign patch is not further downscaled -->
<param name="visualization_compression_factor" value="$(arg visualization_compression_factor)" />
</node>
<!-- Node to stream croped signs -->
<node name="hough_transform" pkg="minibot_vision" type="SegmentSign.py" output="screen" />
</launch>
\ No newline at end of file
...@@ -25,6 +25,7 @@ IMG_RES = SegmentSign.IMG_RES ...@@ -25,6 +25,7 @@ IMG_RES = SegmentSign.IMG_RES
TF_RES = SegmentSign.TENSOR_RES TF_RES = SegmentSign.TENSOR_RES
# *** GLOBALS *** # *** GLOBALS ***
visualization_compression_factor = None
sign_classifier = TmClassification() sign_classifier = TmClassification()
bridge = CvBridge() bridge = CvBridge()
img_rgb_stream = np.zeros((IMG_RES[0], IMG_RES[1], 3), np.uint8) img_rgb_stream = np.zeros((IMG_RES[0], IMG_RES[1], 3), np.uint8)
...@@ -118,7 +119,7 @@ def hough_pipeline(img_color, img_depth): ...@@ -118,7 +119,7 @@ def hough_pipeline(img_color, img_depth):
def detect_sign(img_rgb_stream, image_timestamp): def detect_sign(img_rgb_stream, image_timestamp):
global img_depth_stream, pub_result_img, toggle_hough_detection, enable_sign_detection global img_depth_stream, pub_result_img, toggle_hough_detection, enable_sign_detection, visualization_compression_factor
img_orig = np.copy(img_rgb_stream) img_orig = np.copy(img_rgb_stream)
# TODO the ratio between img_depth_stream and img_rgb_stream might be different! # TODO the ratio between img_depth_stream and img_rgb_stream might be different!
...@@ -146,7 +147,7 @@ def detect_sign(img_rgb_stream, image_timestamp): ...@@ -146,7 +147,7 @@ def detect_sign(img_rgb_stream, image_timestamp):
if enable_sign_detection: if enable_sign_detection:
label, precision = sign_classifier.predictImage(p) # returns tupel (label, precision), if no model / error is set up label= -1 label, precision = sign_classifier.predictImage(p) # returns tupel (label, precision), if no model / error is set up label= -1
# TODO get most recent depth data # get most recent depth data
new_keypoints = hough_pipeline(img_rgb_stream, img_depth_stream) new_keypoints = hough_pipeline(img_rgb_stream, img_depth_stream)
if len(new_keypoints) >= 1: if len(new_keypoints) >= 1:
d = new_keypoints[0]["depth"] d = new_keypoints[0]["depth"]
...@@ -159,6 +160,7 @@ def detect_sign(img_rgb_stream, image_timestamp): ...@@ -159,6 +160,7 @@ def detect_sign(img_rgb_stream, image_timestamp):
if len(text) > 0: if len(text) > 0:
SegmentSign.visualize_patches(keypoints, patches, text, img_orig) SegmentSign.visualize_patches(keypoints, patches, text, img_orig)
# compress and publish # compress and publish
img_orig = cv2.resize(img_orig, (0, 0), fx=visualization_compression_factor, fy=visualization_compression_factor) # downscale to further decrease bandwidth usage
cmprsmsg = bridge.cv2_to_compressed_imgmsg(img_orig) cmprsmsg = bridge.cv2_to_compressed_imgmsg(img_orig)
pub_result_img.publish(cmprsmsg) pub_result_img.publish(cmprsmsg)
...@@ -213,6 +215,10 @@ def update_rosparams_callback(req): ...@@ -213,6 +215,10 @@ def update_rosparams_callback(req):
if __name__ == "__main__": if __name__ == "__main__":
rospy.init_node("sign_detector") rospy.init_node("sign_detector")
# caputre rosparams
fps = float(rospy.get_param("~fps", "3"))
visualization_compression_factor = float(rospy.get_param("~visualization_compression_factor", "1"))
img_color_topic = "{}camera/color/image_raw".format(rospy.get_namespace()) img_color_topic = "{}camera/color/image_raw".format(rospy.get_namespace())
img_depth_topic = "{}camera/aligned_depth_to_color/image_raw".format(rospy.get_namespace()) img_depth_topic = "{}camera/aligned_depth_to_color/image_raw".format(rospy.get_namespace())
...@@ -233,7 +239,9 @@ if __name__ == "__main__": ...@@ -233,7 +239,9 @@ if __name__ == "__main__":
SegmentSign.update_camera_params() SegmentSign.update_camera_params()
rate = rospy.Rate(30) # currently this is impossible, but then the rate is defined by the detect_sign evaluation time rospy.loginfo("({}) Sign classification running with params fps: {}, visualization_compression_factor: {}".format(rospy.get_name(), fps, visualization_compression_factor))
rate = rospy.Rate(fps)
while not rospy.is_shutdown(): while not rospy.is_shutdown():
detect_sign(img_rgb_stream, img_rgb_timestamp) detect_sign(img_rgb_stream, img_rgb_timestamp)
rate.sleep() rate.sleep()
<?xml version="1.0"?> <?xml version="1.0"?>
<launch> <launch>
<!--include file="$(find minibot_vision)/launch/capture_imgs.launch" >
<arg name="remote_node" value="true" />
</include-->
<group ns="$(env ROBOT)" > <group ns="$(env ROBOT)" >
<node name="sign_detector" pkg="minibot_vision" type="SignDetector.py" output="screen" /> <include file="$(find minibot_vision)/launch/sign_classifier.launch" >
<rosparam file="$(find minibot_vision)/config/sign_detector.yaml" /> <arg name="classification_fps" value="3" />
<arg name="visualization_compression_factor" value="0.5" />
<node name="hough_transform" pkg="minibot_vision" type="SegmentSign.py" output="screen" /> </include>
</group> </group>
</launch> </launch>
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment