diff --git a/minibot_msgs/CMakeLists.txt b/minibot_msgs/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..0d7b8e0453fe5abe4a2e559aa471d5f85c49c9b8 --- /dev/null +++ b/minibot_msgs/CMakeLists.txt @@ -0,0 +1,205 @@ +cmake_minimum_required(VERSION 3.0.2) +project(minibot_msgs) + +## Compile as C++11, supported in ROS Kinetic and newer +# add_compile_options(-std=c++11) + +## Find catkin macros and libraries +## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) +## is used, also find other catkin packages +find_package(catkin REQUIRED COMPONENTS + std_msgs + geometry_msgs + message_generation + message_runtime +) + +## System dependencies are found with CMake's conventions +# find_package(Boost REQUIRED COMPONENTS system) + + +## Uncomment this if the package has a setup.py. This macro ensures +## modules and global scripts declared therein get installed +## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html +# catkin_python_setup() + +################################################ +## Declare ROS messages, services and actions ## +################################################ + +## To declare and build messages, services or actions from within this +## package, follow these steps: +## * Let MSG_DEP_SET be the set of packages whose message types you use in +## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). +## * In the file package.xml: +## * add a build_depend tag for "message_generation" +## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET +## * If MSG_DEP_SET isn't empty the following dependency has been pulled in +## but can be declared for certainty nonetheless: +## * add a exec_depend tag for "message_runtime" +## * In this file (CMakeLists.txt): +## * add "message_generation" and every package in MSG_DEP_SET to +## find_package(catkin REQUIRED COMPONENTS ...) +## * add "message_runtime" and every package in MSG_DEP_SET to +## catkin_package(CATKIN_DEPENDS ...) +## * uncomment the add_*_files sections below as needed +## and list every .msg/.srv/.action file to be processed +## * uncomment the generate_messages entry below +## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) + +## Generate messages in the 'msg' folder +#add_message_files( +# FILES +#) + +## Generate services in the 'srv' folder +add_service_files( + FILES + set_url.srv +) + +## Generate actions in the 'action' folder +# add_action_files( +# FILES +# Action1.action +# Action2.action +# ) + +## Generate added messages and services with any dependencies listed here +generate_messages( + DEPENDENCIES + std_msgs + geometry_msgs +) + +################################################ +## Declare ROS dynamic reconfigure parameters ## +################################################ + +## To declare and build dynamic reconfigure parameters within this +## package, follow these steps: +## * In the file package.xml: +## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" +## * In this file (CMakeLists.txt): +## * add "dynamic_reconfigure" to +## find_package(catkin REQUIRED COMPONENTS ...) +## * uncomment the "generate_dynamic_reconfigure_options" section below +## and list every .cfg file to be processed + +## Generate dynamic reconfigure parameters in the 'cfg' folder +# generate_dynamic_reconfigure_options( +# cfg/DynReconf1.cfg +# cfg/DynReconf2.cfg +# ) + +################################### +## catkin specific configuration ## +################################### +## The catkin_package macro generates cmake config files for your package +## Declare things to be passed to dependent projects +## INCLUDE_DIRS: uncomment this if your package contains header files +## LIBRARIES: libraries you create in this project that dependent projects also need +## CATKIN_DEPENDS: catkin_packages dependent projects also need +## DEPENDS: system dependencies of this project that dependent projects also need +catkin_package( +# INCLUDE_DIRS include +# LIBRARIES minibot_msgs + CATKIN_DEPENDS message_generation message_runtime +# DEPENDS system_lib +) + +########### +## Build ## +########### + +## Specify additional locations of header files +## Your package locations should be listed before other locations +include_directories( +# include + ${catkin_INCLUDE_DIRS} +) + +## Declare a C++ library +# add_library(${PROJECT_NAME} +# src/${PROJECT_NAME}/minibot_msgs.cpp +# ) + +## Add cmake target dependencies of the library +## as an example, code may need to be generated before libraries +## either from message generation or dynamic reconfigure +# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) + +## Declare a C++ executable +## With catkin_make all packages are built within a single CMake context +## The recommended prefix ensures that target names across packages don't collide +# add_executable(${PROJECT_NAME}_node src/minibot_msgs_node.cpp) + +## Rename C++ executable without prefix +## The above recommended prefix causes long target names, the following renames the +## target back to the shorter version for ease of user use +## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" +# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") + +## Add cmake target dependencies of the executable +## same as for the library above +# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) + +## Specify libraries to link a library or executable target against +# target_link_libraries(${PROJECT_NAME}_node +# ${catkin_LIBRARIES} +# ) + +############# +## Install ## +############# + +# all install targets should use catkin DESTINATION variables +# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html + +## Mark executable scripts (Python etc.) for installation +## in contrast to setup.py, you can choose the destination +# catkin_install_python(PROGRAMS +# scripts/my_python_script +# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} +# ) + +## Mark executables for installation +## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html +# install(TARGETS ${PROJECT_NAME}_node +# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} +# ) + +## Mark libraries for installation +## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html +# install(TARGETS ${PROJECT_NAME} +# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} +# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} +# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} +# ) + +## Mark cpp header files for installation +# install(DIRECTORY include/${PROJECT_NAME}/ +# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} +# FILES_MATCHING PATTERN "*.h" +# PATTERN ".svn" EXCLUDE +# ) + +## Mark other files for installation (e.g. launch and bag files, etc.) +# install(FILES +# # myfile1 +# # myfile2 +# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} +# ) + +############# +## Testing ## +############# + +## Add gtest based cpp test target and link libraries +# catkin_add_gtest(${PROJECT_NAME}-test test/test_minibot_msgs.cpp) +# if(TARGET ${PROJECT_NAME}-test) +# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) +# endif() + +## Add folders to be run by python nosetests +# catkin_add_nosetests(test) diff --git a/minibot_msgs/package.xml b/minibot_msgs/package.xml new file mode 100644 index 0000000000000000000000000000000000000000..89080f3a4511e53241446bcce88ae4c96b589b44 --- /dev/null +++ b/minibot_msgs/package.xml @@ -0,0 +1,63 @@ +<?xml version="1.0"?> +<package format="2"> + <name>minibot_msgs</name> + <version>0.0.0</version> + <description>The minibot_msgs package</description> + + <!-- One maintainer tag required, multiple allowed, one person per tag --> + <!-- Example: --> + <!-- <maintainer email="jane.doe@example.com">Jane Doe</maintainer> --> + <maintainer email="paddy-hofmann@web.de">gmar</maintainer> + + + <!-- One license tag required, multiple allowed, one license per tag --> + <!-- Commonly used license strings: --> + <!-- BSD, MIT, Boost Software License, GPLv2, GPLv3, LGPLv2.1, LGPLv3 --> + <license>BSD</license> + + + <!-- Url tags are optional, but multiple are allowed, one per tag --> + <!-- Optional attribute type can be: website, bugtracker, or repository --> + <!-- Example: --> + <!-- <url type="website">http://wiki.ros.org/minibot_msgs</url> --> + + + <!-- Author tags are optional, multiple are allowed, one per tag --> + <!-- Authors do not have to be maintainers, but could be --> + <!-- Example: --> + <!-- <author email="jane.doe@example.com">Jane Doe</author> --> + + + <!-- The *depend tags are used to specify dependencies --> + <!-- Dependencies can be catkin packages or system dependencies --> + <!-- Examples: --> + <!-- Use depend as a shortcut for packages that are both build and exec dependencies --> + <!-- <depend>roscpp</depend> --> + <!-- Note that this is equivalent to the following: --> + <!-- <build_depend>roscpp</build_depend> --> + <!-- <exec_depend>roscpp</exec_depend> --> + <!-- Use build_depend for packages you need at compile time: --> + <!-- <build_depend>message_generation</build_depend> --> + <!-- Use build_export_depend for packages you need in order to build against this package: --> + <!-- <build_export_depend>message_generation</build_export_depend> --> + <!-- Use buildtool_depend for build tool packages: --> + <!-- <buildtool_depend>catkin</buildtool_depend> --> + <!-- Use exec_depend for packages you need at runtime: --> + <!-- <exec_depend>message_runtime</exec_depend> --> + <!-- Use test_depend for packages you need only for testing: --> + <!-- <test_depend>gtest</test_depend> --> + <!-- Use doc_depend for packages you need only for building documentation: --> + <!-- <doc_depend>doxygen</doc_depend> --> + <buildtool_depend>catkin</buildtool_depend> + <depend>message_generation</depend> + <depend>message_runtime</depend> + <depend>std_msgs</depend> + <depend>geometry_msgs</depend> + + + <!-- The export tag contains other, unspecified, tags --> + <export> + <!-- Other tools can request additional information be placed here --> + + </export> +</package> diff --git a/minibot_msgs/srv/set_url.srv b/minibot_msgs/srv/set_url.srv new file mode 100644 index 0000000000000000000000000000000000000000..b54ac44ead67902352f843112a5f854fcf05c4a3 --- /dev/null +++ b/minibot_msgs/srv/set_url.srv @@ -0,0 +1,3 @@ +string url +--- +bool success \ No newline at end of file diff --git a/minibot_vision/.gitignore b/minibot_vision/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..e414f034b9909ea8cf7a76d5def2cf988193fc95 --- /dev/null +++ b/minibot_vision/.gitignore @@ -0,0 +1,4 @@ +/resources/h5_model/ +/resources/tf_lite_model/ +/resources/tfjs_model/ +/resources/training_imgs* diff --git a/minibot_vision/CMakeLists.txt b/minibot_vision/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..0a11db6b97f7cedda962db17f52e1533d23fe398 --- /dev/null +++ b/minibot_vision/CMakeLists.txt @@ -0,0 +1,206 @@ +cmake_minimum_required(VERSION 3.0.2) +project(minibot_vision) + +## Compile as C++11, supported in ROS Kinetic and newer +# add_compile_options(-std=c++11) + +## Find catkin macros and libraries +## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) +## is used, also find other catkin packages +find_package(catkin REQUIRED COMPONENTS + roscpp + rospy + std_msgs +) + +## System dependencies are found with CMake's conventions +# find_package(Boost REQUIRED COMPONENTS system) + + +## Uncomment this if the package has a setup.py. This macro ensures +## modules and global scripts declared therein get installed +## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html +# catkin_python_setup() + +################################################ +## Declare ROS messages, services and actions ## +################################################ + +## To declare and build messages, services or actions from within this +## package, follow these steps: +## * Let MSG_DEP_SET be the set of packages whose message types you use in +## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). +## * In the file package.xml: +## * add a build_depend tag for "message_generation" +## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET +## * If MSG_DEP_SET isn't empty the following dependency has been pulled in +## but can be declared for certainty nonetheless: +## * add a exec_depend tag for "message_runtime" +## * In this file (CMakeLists.txt): +## * add "message_generation" and every package in MSG_DEP_SET to +## find_package(catkin REQUIRED COMPONENTS ...) +## * add "message_runtime" and every package in MSG_DEP_SET to +## catkin_package(CATKIN_DEPENDS ...) +## * uncomment the add_*_files sections below as needed +## and list every .msg/.srv/.action file to be processed +## * uncomment the generate_messages entry below +## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) + +## Generate messages in the 'msg' folder +# add_message_files( +# FILES +# Message1.msg +# Message2.msg +# ) + +## Generate services in the 'srv' folder +# add_service_files( +# FILES +# Service1.srv +# Service2.srv +# ) + +## Generate actions in the 'action' folder +# add_action_files( +# FILES +# Action1.action +# Action2.action +# ) + +## Generate added messages and services with any dependencies listed here +# generate_messages( +# DEPENDENCIES +# std_msgs +# ) + +################################################ +## Declare ROS dynamic reconfigure parameters ## +################################################ + +## To declare and build dynamic reconfigure parameters within this +## package, follow these steps: +## * In the file package.xml: +## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" +## * In this file (CMakeLists.txt): +## * add "dynamic_reconfigure" to +## find_package(catkin REQUIRED COMPONENTS ...) +## * uncomment the "generate_dynamic_reconfigure_options" section below +## and list every .cfg file to be processed + +## Generate dynamic reconfigure parameters in the 'cfg' folder +# generate_dynamic_reconfigure_options( +# cfg/DynReconf1.cfg +# cfg/DynReconf2.cfg +# ) + +################################### +## catkin specific configuration ## +################################### +## The catkin_package macro generates cmake config files for your package +## Declare things to be passed to dependent projects +## INCLUDE_DIRS: uncomment this if your package contains header files +## LIBRARIES: libraries you create in this project that dependent projects also need +## CATKIN_DEPENDS: catkin_packages dependent projects also need +## DEPENDS: system dependencies of this project that dependent projects also need +catkin_package( +# INCLUDE_DIRS include +# LIBRARIES minibot_vision +# CATKIN_DEPENDS roscpp rospy std_msgs +# DEPENDS system_lib +) + +########### +## Build ## +########### + +## Specify additional locations of header files +## Your package locations should be listed before other locations +include_directories( +# include + ${catkin_INCLUDE_DIRS} +) + +## Declare a C++ library +# add_library(${PROJECT_NAME} +# src/${PROJECT_NAME}/minibot_vision.cpp +# ) + +## Add cmake target dependencies of the library +## as an example, code may need to be generated before libraries +## either from message generation or dynamic reconfigure +# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) + +## Declare a C++ executable +## With catkin_make all packages are built within a single CMake context +## The recommended prefix ensures that target names across packages don't collide +# add_executable(${PROJECT_NAME}_node src/minibot_vision_node.cpp) + +## Rename C++ executable without prefix +## The above recommended prefix causes long target names, the following renames the +## target back to the shorter version for ease of user use +## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" +# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") + +## Add cmake target dependencies of the executable +## same as for the library above +# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) + +## Specify libraries to link a library or executable target against +# target_link_libraries(${PROJECT_NAME}_node +# ${catkin_LIBRARIES} +# ) + +############# +## Install ## +############# + +# all install targets should use catkin DESTINATION variables +# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html + +## Mark executable scripts (Python etc.) for installation +## in contrast to setup.py, you can choose the destination +# catkin_install_python(PROGRAMS +# scripts/my_python_script +# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} +# ) + +## Mark executables for installation +## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html +# install(TARGETS ${PROJECT_NAME}_node +# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} +# ) + +## Mark libraries for installation +## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html +# install(TARGETS ${PROJECT_NAME} +# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} +# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} +# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} +# ) + +## Mark cpp header files for installation +# install(DIRECTORY include/${PROJECT_NAME}/ +# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} +# FILES_MATCHING PATTERN "*.h" +# PATTERN ".svn" EXCLUDE +# ) + +## Mark other files for installation (e.g. launch and bag files, etc.) +# install(FILES +# # myfile1 +# # myfile2 +# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} +# ) + +############# +## Testing ## +############# + +## Add gtest based cpp test target and link libraries +# catkin_add_gtest(${PROJECT_NAME}-test test/test_minibot_vision.cpp) +# if(TARGET ${PROJECT_NAME}-test) +# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) +# endif() + +## Add folders to be run by python nosetests +# catkin_add_nosetests(test) diff --git a/minibot_vision/README.md b/minibot_vision/README.md new file mode 100644 index 0000000000000000000000000000000000000000..6a9f33174909c1ba515961570dcc8a2ce8c48ce4 --- /dev/null +++ b/minibot_vision/README.md @@ -0,0 +1,58 @@ +# Minibot Vision +This package contains all the functionality related to visual tasks of the minibot. + +## Startup +All of the following nodes require a running realsense camera. +Start the realsense camera with align_depth as true eg. ``roslaunch realsense2_camera rs_camera.launch align_depth:=True``. + +## Segment Sign +This node detects round objects (like traffic signs) in the rgb image of the realsense camera. +The z value of the pose message is then the detected depth in the camera frame (x and y are not updated). +The detected signs are cropped to patches and zoomed so that the patch width/height matches our tensorflow requirements. +This segmentation depends on a some hyper parameters. +Since it seems to be very robust with the current configuration these are set static in the python script. +If you need to adjust them there are two different visualization modes for debugging implemented. + +## Sign Detector +This node starts the SegmentSign node together with a tensorflow classification. + +### Services +- **sign_detector/set_model**: With this service call a model trained and uploaded by `teachablemachine.withgoogle.com` is downloaded by the given url. +The new model overwrites the existing model and updates its label on the param server. + +- **sign_detector/set_visualize**: A service to set the `visualize` flag (default: true). This flag controls the image stream on the topic `sign_detector/result_image/compressed`. +Check the Topics section for more details. + +### Topics +- **sign_detector/keypoints**: A `vision_msgs.msg.Detection2D` msg will be published. +The relevant data is stored in results as `vision_msgs.msg.ObjectHypothesisWithPose`. +As position only the depth is set yet. +The precision of the best prediction is set as score and the corresponding label as id. +Note that the label is an integer value. +The corresponding string is stored on the parameter server under `sign_classification/class_labels/`. +As timestamp in the header the timestamp of the image used for classification is used. +This is especially important if tensorflow is performing low to filter results that are based on outdated images. + +- **sign_detector/result_image/compressed**: If the `visualize` flag is set to true a `sensor_msgs/CompressedImage` with the image and its marked and labeled detected signs is published on this topic. +The ROS tool image_view can be used for visualization: `rosrun image_view image_view image:=/<namespace>/sign_detector/result_image _image_transport:=compressed` (note that the given topic is without the `/compressed` suffix). +This is intended to be used while debugging the image classification neural network. + +## Capture Images +This is a tool to capture images as training data for the sign classification network. +This will process the output of the realsense camera by SignSegmentation. +We have observed that training the network with a segmented version of the signs leads to a more robust classification. + +You can run it by launching `roslaunch minibot_vision capture_imgs.launch`. +There are two arguments: +- `remote_node`: (default: true) + - if true the node runs on the minibot and the resulting images are published to `capture_images/result_image` as raw and compressed. + - otherwise the node runs in local mode and the script will ask you to type the filename of the resulting images. +Then the images are visualized and can be sampled by holding `r` and are then saved to the before specified filename. +- `save_dir`: (default: "/resources/training_imgs/") only used in local mode. +The save directory relative to the minibot_package for all images (they will be in sub-folders of their filename). + +If the node is in remote mode, you need to call the service `capture_images/enable` to activate the node. + +## TODO +- [ ] Crop sign also publish x and y position in camera frame. +- [ ] SignDetector: Publish multiple keypoints (currently only one is published) \ No newline at end of file diff --git a/minibot_vision/config/sign_detector.yaml b/minibot_vision/config/sign_detector.yaml new file mode 100644 index 0000000000000000000000000000000000000000..902315573c31deeb4dfde74677eae02f9930c747 --- /dev/null +++ b/minibot_vision/config/sign_detector.yaml @@ -0,0 +1,11 @@ +sign_detector: + img_height: 480 + img_width: 640 + canny_param1: 100 + canny_param2: 100 # good light: 40; bad light: 100 + min_depth: 0.2 + max_depth: 1.0 + visualize: False + zoom_threshold: 1.15 + min_radius: 15 + max_radius: 128 \ No newline at end of file diff --git a/minibot_vision/config/sign_detector_gazebo.yaml b/minibot_vision/config/sign_detector_gazebo.yaml new file mode 100644 index 0000000000000000000000000000000000000000..169f19097604677dafa6e8401705d81d4e7a394e --- /dev/null +++ b/minibot_vision/config/sign_detector_gazebo.yaml @@ -0,0 +1,11 @@ +sign_detector: + img_height: 1080 + img_width: 1920 + canny_param1: 70 + canny_param2: 30 + min_depth: 0.2 + max_depth: 1.0 + visualize: False + zoom_threshold: 1.15 + min_radius: 15 + max_radius: 128 \ No newline at end of file diff --git a/minibot_vision/config/sign_detector_rviz.yaml b/minibot_vision/config/sign_detector_rviz.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7f54a399215dd8370fbb4ef5bde9348c227f4241 --- /dev/null +++ b/minibot_vision/config/sign_detector_rviz.yaml @@ -0,0 +1,11 @@ +sign_detector: + img_height: 720 + img_width: 1280 + canny_param1: 70 + canny_param2: 20 + min_depth: 10 + max_depth: 90 + visualize: False + zoom_threshold: 1.15 + min_radius: 50 + max_radius: 128 \ No newline at end of file diff --git a/minibot_vision/launch/capture_imgs.launch b/minibot_vision/launch/capture_imgs.launch new file mode 100644 index 0000000000000000000000000000000000000000..9d0ee920ff41baf99f68e1f2eaff0ab4fd0e98ee --- /dev/null +++ b/minibot_vision/launch/capture_imgs.launch @@ -0,0 +1,10 @@ +<?xml version='1.0' ?> +<launch> + <arg name="remote_node" default="True" /> + <arg name="save_dir" default="/resources/training_imgs/" /> <!-- save dir relative to the minibot_vision package --> + + <node name="capture_imgs" pkg="minibot_vision" type="Capture_Images.py" output="screen" ns="$(env ROBOT)" > + <param name="remote_node" value="$(arg remote_node)" /> + <param name="save_dir" value="$(arg save_dir)" /> + </node> +</launch> diff --git a/minibot_vision/launch/crop_sign.launch b/minibot_vision/launch/crop_sign.launch new file mode 100644 index 0000000000000000000000000000000000000000..26021d2925800b5ec489caa4d6e4004b8be9bb64 --- /dev/null +++ b/minibot_vision/launch/crop_sign.launch @@ -0,0 +1,14 @@ +<?xml version='1.0' ?> +<launch> + <!-- This is deprecated. The new sign segmentation is directly integrated to SignDetector --> + <arg name="visualize" default="False" /> + <arg name="load_hyper_params" default="True" /> + <arg name="start_node" default="True" /> + <arg name="ns" default="/" /> + + <!-- load hyperparams to server --> + <rosparam file="$(find minibot_vision)/launch/crop_sign_rosparams.yaml" if="$(arg load_hyper_params)" /> + + <node name="crop_sign" pkg="minibot_vision" type="Crop_Sign.py" output="screen" args="$(arg visualize)" if="$(arg start_node)" ns="$(arg ns)" /> + +</launch> \ No newline at end of file diff --git a/minibot_vision/launch/crop_sign_rosparams.yaml b/minibot_vision/launch/crop_sign_rosparams.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0538839a9152260263e10b25ad268e4a5925d17f --- /dev/null +++ b/minibot_vision/launch/crop_sign_rosparams.yaml @@ -0,0 +1,6 @@ +crop_sign: + circularity: 0.77 + thickness: 22 + circle_filter: 5 + canny1: 60 + canny2: 50 diff --git a/minibot_vision/package.xml b/minibot_vision/package.xml new file mode 100644 index 0000000000000000000000000000000000000000..8eb62f1c90f3001f6887dea01114aefc3ec15683 --- /dev/null +++ b/minibot_vision/package.xml @@ -0,0 +1,72 @@ +<?xml version="1.0"?> +<package format="2"> + <name>minibot_vision</name> + <version>0.0.0</version> + <description>The minibot_vision package</description> + + <!-- One maintainer tag required, multiple allowed, one person per tag --> + <!-- Example: --> + <!-- <maintainer email="jane.doe@example.com">Jane Doe</maintainer> --> + <maintainer email="paddy-hofmann@web.de">paddy</maintainer> + + + <!-- One license tag required, multiple allowed, one license per tag --> + <!-- Commonly used license strings: --> + <!-- BSD, MIT, Boost Software License, GPLv2, GPLv3, LGPLv2.1, LGPLv3 --> + <license>BSD</license> + + + <!-- Url tags are optional, but multiple are allowed, one per tag --> + <!-- Optional attribute type can be: website, bugtracker, or repository --> + <!-- Example: --> + <!-- <url type="website">http://wiki.ros.org/minibot_vision</url> --> + + + <!-- Author tags are optional, multiple are allowed, one per tag --> + <!-- Authors do not have to be maintainers, but could be --> + <!-- Example: --> + <!-- <author email="jane.doe@example.com">Jane Doe</author> --> + + + <!-- The *depend tags are used to specify dependencies --> + <!-- Dependencies can be catkin packages or system dependencies --> + <!-- Examples: --> + <!-- Use depend as a shortcut for packages that are both build and exec dependencies --> + <!-- <depend>roscpp</depend> --> + <!-- Note that this is equivalent to the following: --> + <!-- <build_depend>roscpp</build_depend> --> + <!-- <exec_depend>roscpp</exec_depend> --> + <!-- Use build_depend for packages you need at compile time: --> + <!-- <build_depend>message_generation</build_depend> --> + <!-- Use build_export_depend for packages you need in order to build against this package: --> + <!-- <build_export_depend>message_generation</build_export_depend> --> + <!-- Use buildtool_depend for build tool packages: --> + <!-- <buildtool_depend>catkin</buildtool_depend> --> + <!-- Use exec_depend for packages you need at runtime: --> + <!-- <exec_depend>message_runtime</exec_depend> --> + <!-- Use test_depend for packages you need only for testing: --> + <!-- <test_depend>gtest</test_depend> --> + <!-- Use doc_depend for packages you need only for building documentation: --> + <!-- <doc_depend>doxygen</doc_depend> --> + <buildtool_depend>catkin</buildtool_depend> + <build_depend>roscpp</build_depend> + <build_depend>rospy</build_depend> + <build_depend>std_msgs</build_depend> + <build_export_depend>roscpp</build_export_depend> + <build_export_depend>rospy</build_export_depend> + <build_export_depend>std_msgs</build_export_depend> + <exec_depend>roscpp</exec_depend> + <exec_depend>rospy</exec_depend> + <exec_depend>std_msgs</exec_depend> + <depend>cv_bridge</depend> + <depend>vision_msgs</depend> + <depend>std_srvs</depend> + <exec_depend>rosbridge_server</exec_depend> + + + <!-- The export tag contains other, unspecified, tags --> + <export> + <!-- Other tools can request additional information be placed here --> + + </export> +</package> diff --git a/minibot_vision/resources/example_images/stop_0.jpg b/minibot_vision/resources/example_images/stop_0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d73ea30938f537bfe612f9d0f258b7879a98bb2d Binary files /dev/null and b/minibot_vision/resources/example_images/stop_0.jpg differ diff --git a/minibot_vision/resources/example_images/stop_1.jpg b/minibot_vision/resources/example_images/stop_1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8ff35255bb47e6f4253c847ab396c7bbd8d11c59 Binary files /dev/null and b/minibot_vision/resources/example_images/stop_1.jpg differ diff --git a/minibot_vision/resources/example_images/stop_2.jpg b/minibot_vision/resources/example_images/stop_2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..792bb958446ecbec9a10b9929860a8ef6dd20035 Binary files /dev/null and b/minibot_vision/resources/example_images/stop_2.jpg differ diff --git a/minibot_vision/resources/example_images/stop_3.jpg b/minibot_vision/resources/example_images/stop_3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1b6325e82b978fd9fb8d8a7d6f2a49bed4eb82f1 Binary files /dev/null and b/minibot_vision/resources/example_images/stop_3.jpg differ diff --git a/minibot_vision/resources/example_images/stop_4.jpg b/minibot_vision/resources/example_images/stop_4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a8ceba1f3ad6f5b3ae2dae30e587289bdae97881 Binary files /dev/null and b/minibot_vision/resources/example_images/stop_4.jpg differ diff --git a/minibot_vision/resources/example_images/up_0.jpg b/minibot_vision/resources/example_images/up_0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..07c6b12a183f7fe06c5b13c43f740df98136727b Binary files /dev/null and b/minibot_vision/resources/example_images/up_0.jpg differ diff --git a/minibot_vision/resources/example_images/up_1.jpg b/minibot_vision/resources/example_images/up_1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dd7c40e80fbaf58ac1590c37799371673acb1589 Binary files /dev/null and b/minibot_vision/resources/example_images/up_1.jpg differ diff --git a/minibot_vision/resources/example_images/up_2.png b/minibot_vision/resources/example_images/up_2.png new file mode 100644 index 0000000000000000000000000000000000000000..e141506ee09ab27fee3f5ac69eed82cb2d5c58dd Binary files /dev/null and b/minibot_vision/resources/example_images/up_2.png differ diff --git a/minibot_vision/resources/h5_model_bk/model.h5 b/minibot_vision/resources/h5_model_bk/model.h5 new file mode 100644 index 0000000000000000000000000000000000000000..ab9908b9605a13174d1cef4479adbd1527caef3c Binary files /dev/null and b/minibot_vision/resources/h5_model_bk/model.h5 differ diff --git a/minibot_vision/resources/tfjs_model_bk/metadata.json b/minibot_vision/resources/tfjs_model_bk/metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..589c52456a79fb0665956655d9b958831f067d06 --- /dev/null +++ b/minibot_vision/resources/tfjs_model_bk/metadata.json @@ -0,0 +1 @@ +{"tfjsVersion":"1.3.1","tmVersion":"2.4.4","packageVersion":"0.8.4-alpha2","packageName":"@teachablemachine/image","timeStamp":"2022-02-22T12:27:24.584Z","userMetadata":{},"modelName":"tm-my-image-model","labels":["up","left","right","stop","none"],"imageSize":224} \ No newline at end of file diff --git a/minibot_vision/resources/tfjs_model_bk/model.json b/minibot_vision/resources/tfjs_model_bk/model.json new file mode 100644 index 0000000000000000000000000000000000000000..5e6e5779e5bd048651b9e7f019b17769474d5aee --- /dev/null +++ b/minibot_vision/resources/tfjs_model_bk/model.json @@ -0,0 +1 @@ +{"modelTopology":{"class_name":"Sequential","config":{"name":"sequential_4","layers":[{"class_name":"Sequential","config":{"name":"sequential_1","layers":[{"class_name":"Model","config":{"name":"model1","layers":[{"name":"input_1","class_name":"InputLayer","config":{"batch_input_shape":[null,224,224,3],"dtype":"float32","sparse":false,"name":"input_1"},"inbound_nodes":[]},{"name":"Conv1_pad","class_name":"ZeroPadding2D","config":{"padding":[[0,1],[0,1]],"data_format":"channels_last","name":"Conv1_pad","trainable":true},"inbound_nodes":[[["input_1",0,0,{}]]]},{"name":"Conv1","class_name":"Conv2D","config":{"filters":16,"kernel_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"uniform","seed":null}},"kernel_regularizer":null,"kernel_constraint":null,"kernel_size":[3,3],"strides":[2,2],"padding":"valid","data_format":"channels_last","dilation_rate":[1,1],"activation":"linear","use_bias":false,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"Conv1","trainable":true},"inbound_nodes":[[["Conv1_pad",0,0,{}]]]},{"name":"bn_Conv1","class_name":"BatchNormalization","config":{"axis":-1,"momentum":0.999,"epsilon":0.001,"center":true,"scale":true,"beta_initializer":{"class_name":"Zeros","config":{}},"gamma_initializer":{"class_name":"Ones","config":{}},"moving_mean_initializer":{"class_name":"Zeros","config":{}},"moving_variance_initializer":{"class_name":"Ones","config":{}},"beta_regularizer":null,"gamma_regularizer":null,"beta_constraint":null,"gamma_constraint":null,"name":"bn_Conv1","trainable":true},"inbound_nodes":[[["Conv1",0,0,{}]]]},{"name":"Conv1_relu","class_name":"ReLU","config":{"max_value":6,"name":"Conv1_relu","trainable":true},"inbound_nodes":[[["bn_Conv1",0,0,{}]]]},{"name":"expanded_conv_depthwise","class_name":"DepthwiseConv2D","config":{"kernel_size":[3,3],"strides":[1,1],"padding":"same","data_format":"channels_last","dilation_rate":[1,1],"activation":"linear","use_bias":false,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"expanded_conv_depthwise","trainable":true,"depth_multiplier":1,"depthwise_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"uniform","seed":null}},"depthwise_regularizer":null,"depthwise_constraint":null},"inbound_nodes":[[["Conv1_relu",0,0,{}]]]},{"name":"expanded_conv_depthwise_BN","class_name":"BatchNormalization","config":{"axis":-1,"momentum":0.999,"epsilon":0.001,"center":true,"scale":true,"beta_initializer":{"class_name":"Zeros","config":{}},"gamma_initializer":{"class_name":"Ones","config":{}},"moving_mean_initializer":{"class_name":"Zeros","config":{}},"moving_variance_initializer":{"class_name":"Ones","config":{}},"beta_regularizer":null,"gamma_regularizer":null,"beta_constraint":null,"gamma_constraint":null,"name":"expanded_conv_depthwise_BN","trainable":true},"inbound_nodes":[[["expanded_conv_depthwise",0,0,{}]]]},{"name":"expanded_conv_depthwise_relu","class_name":"ReLU","config":{"max_value":6,"name":"expanded_conv_depthwise_relu","trainable":true},"inbound_nodes":[[["expanded_conv_depthwise_BN",0,0,{}]]]},{"name":"expanded_conv_project","class_name":"Conv2D","config":{"filters":8,"kernel_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"uniform","seed":null}},"kernel_regularizer":null,"kernel_constraint":null,"kernel_size":[1,1],"strides":[1,1],"padding":"same","data_format":"channels_last","dilation_rate":[1,1],"activation":"linear","use_bias":false,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"expanded_conv_project","trainable":true},"inbound_nodes":[[["expanded_conv_depthwise_relu",0,0,{}]]]},{"name":"expanded_conv_project_BN","class_name":"BatchNormalization","config":{"axis":-1,"momentum":0.999,"epsilon":0.001,"center":true,"scale":true,"beta_initializer":{"class_name":"Zeros","config":{}},"gamma_initializer":{"class_name":"Ones","config":{}},"moving_mean_initializer":{"class_name":"Zeros","config":{}},"moving_variance_initializer":{"class_name":"Ones","config":{}},"beta_regularizer":null,"gamma_regularizer":null,"beta_constraint":null,"gamma_constraint":null,"name":"expanded_conv_project_BN","trainable":true},"inbound_nodes":[[["expanded_conv_project",0,0,{}]]]},{"name":"block_1_expand","class_name":"Conv2D","config":{"filters":48,"kernel_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"uniform","seed":null}},"kernel_regularizer":null,"kernel_constraint":null,"kernel_size":[1,1],"strides":[1,1],"padding":"same","data_format":"channels_last","dilation_rate":[1,1],"activation":"linear","use_bias":false,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"block_1_expand","trainable":true},"inbound_nodes":[[["expanded_conv_project_BN",0,0,{}]]]},{"name":"block_1_expand_BN","class_name":"BatchNormalization","config":{"axis":-1,"momentum":0.999,"epsilon":0.001,"center":true,"scale":true,"beta_initializer":{"class_name":"Zeros","config":{}},"gamma_initializer":{"class_name":"Ones","config":{}},"moving_mean_initializer":{"class_name":"Zeros","config":{}},"moving_variance_initializer":{"class_name":"Ones","config":{}},"beta_regularizer":null,"gamma_regularizer":null,"beta_constraint":null,"gamma_constraint":null,"name":"block_1_expand_BN","trainable":true},"inbound_nodes":[[["block_1_expand",0,0,{}]]]},{"name":"block_1_expand_relu","class_name":"ReLU","config":{"max_value":6,"name":"block_1_expand_relu","trainable":true},"inbound_nodes":[[["block_1_expand_BN",0,0,{}]]]},{"name":"block_1_pad","class_name":"ZeroPadding2D","config":{"padding":[[0,1],[0,1]],"data_format":"channels_last","name":"block_1_pad","trainable":true},"inbound_nodes":[[["block_1_expand_relu",0,0,{}]]]},{"name":"block_1_depthwise","class_name":"DepthwiseConv2D","config":{"kernel_size":[3,3],"strides":[2,2],"padding":"valid","data_format":"channels_last","dilation_rate":[1,1],"activation":"linear","use_bias":false,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"block_1_depthwise","trainable":true,"depth_multiplier":1,"depthwise_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"uniform","seed":null}},"depthwise_regularizer":null,"depthwise_constraint":null},"inbound_nodes":[[["block_1_pad",0,0,{}]]]},{"name":"block_1_depthwise_BN","class_name":"BatchNormalization","config":{"axis":-1,"momentum":0.999,"epsilon":0.001,"center":true,"scale":true,"beta_initializer":{"class_name":"Zeros","config":{}},"gamma_initializer":{"class_name":"Ones","config":{}},"moving_mean_initializer":{"class_name":"Zeros","config":{}},"moving_variance_initializer":{"class_name":"Ones","config":{}},"beta_regularizer":null,"gamma_regularizer":null,"beta_constraint":null,"gamma_constraint":null,"name":"block_1_depthwise_BN","trainable":true},"inbound_nodes":[[["block_1_depthwise",0,0,{}]]]},{"name":"block_1_depthwise_relu","class_name":"ReLU","config":{"max_value":6,"name":"block_1_depthwise_relu","trainable":true},"inbound_nodes":[[["block_1_depthwise_BN",0,0,{}]]]},{"name":"block_1_project","class_name":"Conv2D","config":{"filters":8,"kernel_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"uniform","seed":null}},"kernel_regularizer":null,"kernel_constraint":null,"kernel_size":[1,1],"strides":[1,1],"padding":"same","data_format":"channels_last","dilation_rate":[1,1],"activation":"linear","use_bias":false,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"block_1_project","trainable":true},"inbound_nodes":[[["block_1_depthwise_relu",0,0,{}]]]},{"name":"block_1_project_BN","class_name":"BatchNormalization","config":{"axis":-1,"momentum":0.999,"epsilon":0.001,"center":true,"scale":true,"beta_initializer":{"class_name":"Zeros","config":{}},"gamma_initializer":{"class_name":"Ones","config":{}},"moving_mean_initializer":{"class_name":"Zeros","config":{}},"moving_variance_initializer":{"class_name":"Ones","config":{}},"beta_regularizer":null,"gamma_regularizer":null,"beta_constraint":null,"gamma_constraint":null,"name":"block_1_project_BN","trainable":true},"inbound_nodes":[[["block_1_project",0,0,{}]]]},{"name":"block_2_expand","class_name":"Conv2D","config":{"filters":48,"kernel_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"uniform","seed":null}},"kernel_regularizer":null,"kernel_constraint":null,"kernel_size":[1,1],"strides":[1,1],"padding":"same","data_format":"channels_last","dilation_rate":[1,1],"activation":"linear","use_bias":false,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"block_2_expand","trainable":true},"inbound_nodes":[[["block_1_project_BN",0,0,{}]]]},{"name":"block_2_expand_BN","class_name":"BatchNormalization","config":{"axis":-1,"momentum":0.999,"epsilon":0.001,"center":true,"scale":true,"beta_initializer":{"class_name":"Zeros","config":{}},"gamma_initializer":{"class_name":"Ones","config":{}},"moving_mean_initializer":{"class_name":"Zeros","config":{}},"moving_variance_initializer":{"class_name":"Ones","config":{}},"beta_regularizer":null,"gamma_regularizer":null,"beta_constraint":null,"gamma_constraint":null,"name":"block_2_expand_BN","trainable":true},"inbound_nodes":[[["block_2_expand",0,0,{}]]]},{"name":"block_2_expand_relu","class_name":"ReLU","config":{"max_value":6,"name":"block_2_expand_relu","trainable":true},"inbound_nodes":[[["block_2_expand_BN",0,0,{}]]]},{"name":"block_2_depthwise","class_name":"DepthwiseConv2D","config":{"kernel_size":[3,3],"strides":[1,1],"padding":"same","data_format":"channels_last","dilation_rate":[1,1],"activation":"linear","use_bias":false,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"block_2_depthwise","trainable":true,"depth_multiplier":1,"depthwise_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"uniform","seed":null}},"depthwise_regularizer":null,"depthwise_constraint":null},"inbound_nodes":[[["block_2_expand_relu",0,0,{}]]]},{"name":"block_2_depthwise_BN","class_name":"BatchNormalization","config":{"axis":-1,"momentum":0.999,"epsilon":0.001,"center":true,"scale":true,"beta_initializer":{"class_name":"Zeros","config":{}},"gamma_initializer":{"class_name":"Ones","config":{}},"moving_mean_initializer":{"class_name":"Zeros","config":{}},"moving_variance_initializer":{"class_name":"Ones","config":{}},"beta_regularizer":null,"gamma_regularizer":null,"beta_constraint":null,"gamma_constraint":null,"name":"block_2_depthwise_BN","trainable":true},"inbound_nodes":[[["block_2_depthwise",0,0,{}]]]},{"name":"block_2_depthwise_relu","class_name":"ReLU","config":{"max_value":6,"name":"block_2_depthwise_relu","trainable":true},"inbound_nodes":[[["block_2_depthwise_BN",0,0,{}]]]},{"name":"block_2_project","class_name":"Conv2D","config":{"filters":8,"kernel_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"uniform","seed":null}},"kernel_regularizer":null,"kernel_constraint":null,"kernel_size":[1,1],"strides":[1,1],"padding":"same","data_format":"channels_last","dilation_rate":[1,1],"activation":"linear","use_bias":false,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"block_2_project","trainable":true},"inbound_nodes":[[["block_2_depthwise_relu",0,0,{}]]]},{"name":"block_2_project_BN","class_name":"BatchNormalization","config":{"axis":-1,"momentum":0.999,"epsilon":0.001,"center":true,"scale":true,"beta_initializer":{"class_name":"Zeros","config":{}},"gamma_initializer":{"class_name":"Ones","config":{}},"moving_mean_initializer":{"class_name":"Zeros","config":{}},"moving_variance_initializer":{"class_name":"Ones","config":{}},"beta_regularizer":null,"gamma_regularizer":null,"beta_constraint":null,"gamma_constraint":null,"name":"block_2_project_BN","trainable":true},"inbound_nodes":[[["block_2_project",0,0,{}]]]},{"name":"block_2_add","class_name":"Add","config":{"name":"block_2_add","trainable":true},"inbound_nodes":[[["block_1_project_BN",0,0,{}],["block_2_project_BN",0,0,{}]]]},{"name":"block_3_expand","class_name":"Conv2D","config":{"filters":48,"kernel_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"uniform","seed":null}},"kernel_regularizer":null,"kernel_constraint":null,"kernel_size":[1,1],"strides":[1,1],"padding":"same","data_format":"channels_last","dilation_rate":[1,1],"activation":"linear","use_bias":false,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"block_3_expand","trainable":true},"inbound_nodes":[[["block_2_add",0,0,{}]]]},{"name":"block_3_expand_BN","class_name":"BatchNormalization","config":{"axis":-1,"momentum":0.999,"epsilon":0.001,"center":true,"scale":true,"beta_initializer":{"class_name":"Zeros","config":{}},"gamma_initializer":{"class_name":"Ones","config":{}},"moving_mean_initializer":{"class_name":"Zeros","config":{}},"moving_variance_initializer":{"class_name":"Ones","config":{}},"beta_regularizer":null,"gamma_regularizer":null,"beta_constraint":null,"gamma_constraint":null,"name":"block_3_expand_BN","trainable":true},"inbound_nodes":[[["block_3_expand",0,0,{}]]]},{"name":"block_3_expand_relu","class_name":"ReLU","config":{"max_value":6,"name":"block_3_expand_relu","trainable":true},"inbound_nodes":[[["block_3_expand_BN",0,0,{}]]]},{"name":"block_3_pad","class_name":"ZeroPadding2D","config":{"padding":[[0,1],[0,1]],"data_format":"channels_last","name":"block_3_pad","trainable":true},"inbound_nodes":[[["block_3_expand_relu",0,0,{}]]]},{"name":"block_3_depthwise","class_name":"DepthwiseConv2D","config":{"kernel_size":[3,3],"strides":[2,2],"padding":"valid","data_format":"channels_last","dilation_rate":[1,1],"activation":"linear","use_bias":false,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"block_3_depthwise","trainable":true,"depth_multiplier":1,"depthwise_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"uniform","seed":null}},"depthwise_regularizer":null,"depthwise_constraint":null},"inbound_nodes":[[["block_3_pad",0,0,{}]]]},{"name":"block_3_depthwise_BN","class_name":"BatchNormalization","config":{"axis":-1,"momentum":0.999,"epsilon":0.001,"center":true,"scale":true,"beta_initializer":{"class_name":"Zeros","config":{}},"gamma_initializer":{"class_name":"Ones","config":{}},"moving_mean_initializer":{"class_name":"Zeros","config":{}},"moving_variance_initializer":{"class_name":"Ones","config":{}},"beta_regularizer":null,"gamma_regularizer":null,"beta_constraint":null,"gamma_constraint":null,"name":"block_3_depthwise_BN","trainable":true},"inbound_nodes":[[["block_3_depthwise",0,0,{}]]]},{"name":"block_3_depthwise_relu","class_name":"ReLU","config":{"max_value":6,"name":"block_3_depthwise_relu","trainable":true},"inbound_nodes":[[["block_3_depthwise_BN",0,0,{}]]]},{"name":"block_3_project","class_name":"Conv2D","config":{"filters":16,"kernel_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"uniform","seed":null}},"kernel_regularizer":null,"kernel_constraint":null,"kernel_size":[1,1],"strides":[1,1],"padding":"same","data_format":"channels_last","dilation_rate":[1,1],"activation":"linear","use_bias":false,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"block_3_project","trainable":true},"inbound_nodes":[[["block_3_depthwise_relu",0,0,{}]]]},{"name":"block_3_project_BN","class_name":"BatchNormalization","config":{"axis":-1,"momentum":0.999,"epsilon":0.001,"center":true,"scale":true,"beta_initializer":{"class_name":"Zeros","config":{}},"gamma_initializer":{"class_name":"Ones","config":{}},"moving_mean_initializer":{"class_name":"Zeros","config":{}},"moving_variance_initializer":{"class_name":"Ones","config":{}},"beta_regularizer":null,"gamma_regularizer":null,"beta_constraint":null,"gamma_constraint":null,"name":"block_3_project_BN","trainable":true},"inbound_nodes":[[["block_3_project",0,0,{}]]]},{"name":"block_4_expand","class_name":"Conv2D","config":{"filters":96,"kernel_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"uniform","seed":null}},"kernel_regularizer":null,"kernel_constraint":null,"kernel_size":[1,1],"strides":[1,1],"padding":"same","data_format":"channels_last","dilation_rate":[1,1],"activation":"linear","use_bias":false,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"block_4_expand","trainable":true},"inbound_nodes":[[["block_3_project_BN",0,0,{}]]]},{"name":"block_4_expand_BN","class_name":"BatchNormalization","config":{"axis":-1,"momentum":0.999,"epsilon":0.001,"center":true,"scale":true,"beta_initializer":{"class_name":"Zeros","config":{}},"gamma_initializer":{"class_name":"Ones","config":{}},"moving_mean_initializer":{"class_name":"Zeros","config":{}},"moving_variance_initializer":{"class_name":"Ones","config":{}},"beta_regularizer":null,"gamma_regularizer":null,"beta_constraint":null,"gamma_constraint":null,"name":"block_4_expand_BN","trainable":true},"inbound_nodes":[[["block_4_expand",0,0,{}]]]},{"name":"block_4_expand_relu","class_name":"ReLU","config":{"max_value":6,"name":"block_4_expand_relu","trainable":true},"inbound_nodes":[[["block_4_expand_BN",0,0,{}]]]},{"name":"block_4_depthwise","class_name":"DepthwiseConv2D","config":{"kernel_size":[3,3],"strides":[1,1],"padding":"same","data_format":"channels_last","dilation_rate":[1,1],"activation":"linear","use_bias":false,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"block_4_depthwise","trainable":true,"depth_multiplier":1,"depthwise_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"uniform","seed":null}},"depthwise_regularizer":null,"depthwise_constraint":null},"inbound_nodes":[[["block_4_expand_relu",0,0,{}]]]},{"name":"block_4_depthwise_BN","class_name":"BatchNormalization","config":{"axis":-1,"momentum":0.999,"epsilon":0.001,"center":true,"scale":true,"beta_initializer":{"class_name":"Zeros","config":{}},"gamma_initializer":{"class_name":"Ones","config":{}},"moving_mean_initializer":{"class_name":"Zeros","config":{}},"moving_variance_initializer":{"class_name":"Ones","config":{}},"beta_regularizer":null,"gamma_regularizer":null,"beta_constraint":null,"gamma_constraint":null,"name":"block_4_depthwise_BN","trainable":true},"inbound_nodes":[[["block_4_depthwise",0,0,{}]]]},{"name":"block_4_depthwise_relu","class_name":"ReLU","config":{"max_value":6,"name":"block_4_depthwise_relu","trainable":true},"inbound_nodes":[[["block_4_depthwise_BN",0,0,{}]]]},{"name":"block_4_project","class_name":"Conv2D","config":{"filters":16,"kernel_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"uniform","seed":null}},"kernel_regularizer":null,"kernel_constraint":null,"kernel_size":[1,1],"strides":[1,1],"padding":"same","data_format":"channels_last","dilation_rate":[1,1],"activation":"linear","use_bias":false,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"block_4_project","trainable":true},"inbound_nodes":[[["block_4_depthwise_relu",0,0,{}]]]},{"name":"block_4_project_BN","class_name":"BatchNormalization","config":{"axis":-1,"momentum":0.999,"epsilon":0.001,"center":true,"scale":true,"beta_initializer":{"class_name":"Zeros","config":{}},"gamma_initializer":{"class_name":"Ones","config":{}},"moving_mean_initializer":{"class_name":"Zeros","config":{}},"moving_variance_initializer":{"class_name":"Ones","config":{}},"beta_regularizer":null,"gamma_regularizer":null,"beta_constraint":null,"gamma_constraint":null,"name":"block_4_project_BN","trainable":true},"inbound_nodes":[[["block_4_project",0,0,{}]]]},{"name":"block_4_add","class_name":"Add","config":{"name":"block_4_add","trainable":true},"inbound_nodes":[[["block_3_project_BN",0,0,{}],["block_4_project_BN",0,0,{}]]]},{"name":"block_5_expand","class_name":"Conv2D","config":{"filters":96,"kernel_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"uniform","seed":null}},"kernel_regularizer":null,"kernel_constraint":null,"kernel_size":[1,1],"strides":[1,1],"padding":"same","data_format":"channels_last","dilation_rate":[1,1],"activation":"linear","use_bias":false,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"block_5_expand","trainable":true},"inbound_nodes":[[["block_4_add",0,0,{}]]]},{"name":"block_5_expand_BN","class_name":"BatchNormalization","config":{"axis":-1,"momentum":0.999,"epsilon":0.001,"center":true,"scale":true,"beta_initializer":{"class_name":"Zeros","config":{}},"gamma_initializer":{"class_name":"Ones","config":{}},"moving_mean_initializer":{"class_name":"Zeros","config":{}},"moving_variance_initializer":{"class_name":"Ones","config":{}},"beta_regularizer":null,"gamma_regularizer":null,"beta_constraint":null,"gamma_constraint":null,"name":"block_5_expand_BN","trainable":true},"inbound_nodes":[[["block_5_expand",0,0,{}]]]},{"name":"block_5_expand_relu","class_name":"ReLU","config":{"max_value":6,"name":"block_5_expand_relu","trainable":true},"inbound_nodes":[[["block_5_expand_BN",0,0,{}]]]},{"name":"block_5_depthwise","class_name":"DepthwiseConv2D","config":{"kernel_size":[3,3],"strides":[1,1],"padding":"same","data_format":"channels_last","dilation_rate":[1,1],"activation":"linear","use_bias":false,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"block_5_depthwise","trainable":true,"depth_multiplier":1,"depthwise_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"uniform","seed":null}},"depthwise_regularizer":null,"depthwise_constraint":null},"inbound_nodes":[[["block_5_expand_relu",0,0,{}]]]},{"name":"block_5_depthwise_BN","class_name":"BatchNormalization","config":{"axis":-1,"momentum":0.999,"epsilon":0.001,"center":true,"scale":true,"beta_initializer":{"class_name":"Zeros","config":{}},"gamma_initializer":{"class_name":"Ones","config":{}},"moving_mean_initializer":{"class_name":"Zeros","config":{}},"moving_variance_initializer":{"class_name":"Ones","config":{}},"beta_regularizer":null,"gamma_regularizer":null,"beta_constraint":null,"gamma_constraint":null,"name":"block_5_depthwise_BN","trainable":true},"inbound_nodes":[[["block_5_depthwise",0,0,{}]]]},{"name":"block_5_depthwise_relu","class_name":"ReLU","config":{"max_value":6,"name":"block_5_depthwise_relu","trainable":true},"inbound_nodes":[[["block_5_depthwise_BN",0,0,{}]]]},{"name":"block_5_project","class_name":"Conv2D","config":{"filters":16,"kernel_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"uniform","seed":null}},"kernel_regularizer":null,"kernel_constraint":null,"kernel_size":[1,1],"strides":[1,1],"padding":"same","data_format":"channels_last","dilation_rate":[1,1],"activation":"linear","use_bias":false,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"block_5_project","trainable":true},"inbound_nodes":[[["block_5_depthwise_relu",0,0,{}]]]},{"name":"block_5_project_BN","class_name":"BatchNormalization","config":{"axis":-1,"momentum":0.999,"epsilon":0.001,"center":true,"scale":true,"beta_initializer":{"class_name":"Zeros","config":{}},"gamma_initializer":{"class_name":"Ones","config":{}},"moving_mean_initializer":{"class_name":"Zeros","config":{}},"moving_variance_initializer":{"class_name":"Ones","config":{}},"beta_regularizer":null,"gamma_regularizer":null,"beta_constraint":null,"gamma_constraint":null,"name":"block_5_project_BN","trainable":true},"inbound_nodes":[[["block_5_project",0,0,{}]]]},{"name":"block_5_add","class_name":"Add","config":{"name":"block_5_add","trainable":true},"inbound_nodes":[[["block_4_add",0,0,{}],["block_5_project_BN",0,0,{}]]]},{"name":"block_6_expand","class_name":"Conv2D","config":{"filters":96,"kernel_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"uniform","seed":null}},"kernel_regularizer":null,"kernel_constraint":null,"kernel_size":[1,1],"strides":[1,1],"padding":"same","data_format":"channels_last","dilation_rate":[1,1],"activation":"linear","use_bias":false,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"block_6_expand","trainable":true},"inbound_nodes":[[["block_5_add",0,0,{}]]]},{"name":"block_6_expand_BN","class_name":"BatchNormalization","config":{"axis":-1,"momentum":0.999,"epsilon":0.001,"center":true,"scale":true,"beta_initializer":{"class_name":"Zeros","config":{}},"gamma_initializer":{"class_name":"Ones","config":{}},"moving_mean_initializer":{"class_name":"Zeros","config":{}},"moving_variance_initializer":{"class_name":"Ones","config":{}},"beta_regularizer":null,"gamma_regularizer":null,"beta_constraint":null,"gamma_constraint":null,"name":"block_6_expand_BN","trainable":true},"inbound_nodes":[[["block_6_expand",0,0,{}]]]},{"name":"block_6_expand_relu","class_name":"ReLU","config":{"max_value":6,"name":"block_6_expand_relu","trainable":true},"inbound_nodes":[[["block_6_expand_BN",0,0,{}]]]},{"name":"block_6_pad","class_name":"ZeroPadding2D","config":{"padding":[[0,1],[0,1]],"data_format":"channels_last","name":"block_6_pad","trainable":true},"inbound_nodes":[[["block_6_expand_relu",0,0,{}]]]},{"name":"block_6_depthwise","class_name":"DepthwiseConv2D","config":{"kernel_size":[3,3],"strides":[2,2],"padding":"valid","data_format":"channels_last","dilation_rate":[1,1],"activation":"linear","use_bias":false,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"block_6_depthwise","trainable":true,"depth_multiplier":1,"depthwise_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"uniform","seed":null}},"depthwise_regularizer":null,"depthwise_constraint":null},"inbound_nodes":[[["block_6_pad",0,0,{}]]]},{"name":"block_6_depthwise_BN","class_name":"BatchNormalization","config":{"axis":-1,"momentum":0.999,"epsilon":0.001,"center":true,"scale":true,"beta_initializer":{"class_name":"Zeros","config":{}},"gamma_initializer":{"class_name":"Ones","config":{}},"moving_mean_initializer":{"class_name":"Zeros","config":{}},"moving_variance_initializer":{"class_name":"Ones","config":{}},"beta_regularizer":null,"gamma_regularizer":null,"beta_constraint":null,"gamma_constraint":null,"name":"block_6_depthwise_BN","trainable":true},"inbound_nodes":[[["block_6_depthwise",0,0,{}]]]},{"name":"block_6_depthwise_relu","class_name":"ReLU","config":{"max_value":6,"name":"block_6_depthwise_relu","trainable":true},"inbound_nodes":[[["block_6_depthwise_BN",0,0,{}]]]},{"name":"block_6_project","class_name":"Conv2D","config":{"filters":24,"kernel_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"uniform","seed":null}},"kernel_regularizer":null,"kernel_constraint":null,"kernel_size":[1,1],"strides":[1,1],"padding":"same","data_format":"channels_last","dilation_rate":[1,1],"activation":"linear","use_bias":false,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"block_6_project","trainable":true},"inbound_nodes":[[["block_6_depthwise_relu",0,0,{}]]]},{"name":"block_6_project_BN","class_name":"BatchNormalization","config":{"axis":-1,"momentum":0.999,"epsilon":0.001,"center":true,"scale":true,"beta_initializer":{"class_name":"Zeros","config":{}},"gamma_initializer":{"class_name":"Ones","config":{}},"moving_mean_initializer":{"class_name":"Zeros","config":{}},"moving_variance_initializer":{"class_name":"Ones","config":{}},"beta_regularizer":null,"gamma_regularizer":null,"beta_constraint":null,"gamma_constraint":null,"name":"block_6_project_BN","trainable":true},"inbound_nodes":[[["block_6_project",0,0,{}]]]},{"name":"block_7_expand","class_name":"Conv2D","config":{"filters":144,"kernel_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"uniform","seed":null}},"kernel_regularizer":null,"kernel_constraint":null,"kernel_size":[1,1],"strides":[1,1],"padding":"same","data_format":"channels_last","dilation_rate":[1,1],"activation":"linear","use_bias":false,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"block_7_expand","trainable":true},"inbound_nodes":[[["block_6_project_BN",0,0,{}]]]},{"name":"block_7_expand_BN","class_name":"BatchNormalization","config":{"axis":-1,"momentum":0.999,"epsilon":0.001,"center":true,"scale":true,"beta_initializer":{"class_name":"Zeros","config":{}},"gamma_initializer":{"class_name":"Ones","config":{}},"moving_mean_initializer":{"class_name":"Zeros","config":{}},"moving_variance_initializer":{"class_name":"Ones","config":{}},"beta_regularizer":null,"gamma_regularizer":null,"beta_constraint":null,"gamma_constraint":null,"name":"block_7_expand_BN","trainable":true},"inbound_nodes":[[["block_7_expand",0,0,{}]]]},{"name":"block_7_expand_relu","class_name":"ReLU","config":{"max_value":6,"name":"block_7_expand_relu","trainable":true},"inbound_nodes":[[["block_7_expand_BN",0,0,{}]]]},{"name":"block_7_depthwise","class_name":"DepthwiseConv2D","config":{"kernel_size":[3,3],"strides":[1,1],"padding":"same","data_format":"channels_last","dilation_rate":[1,1],"activation":"linear","use_bias":false,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"block_7_depthwise","trainable":true,"depth_multiplier":1,"depthwise_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"uniform","seed":null}},"depthwise_regularizer":null,"depthwise_constraint":null},"inbound_nodes":[[["block_7_expand_relu",0,0,{}]]]},{"name":"block_7_depthwise_BN","class_name":"BatchNormalization","config":{"axis":-1,"momentum":0.999,"epsilon":0.001,"center":true,"scale":true,"beta_initializer":{"class_name":"Zeros","config":{}},"gamma_initializer":{"class_name":"Ones","config":{}},"moving_mean_initializer":{"class_name":"Zeros","config":{}},"moving_variance_initializer":{"class_name":"Ones","config":{}},"beta_regularizer":null,"gamma_regularizer":null,"beta_constraint":null,"gamma_constraint":null,"name":"block_7_depthwise_BN","trainable":true},"inbound_nodes":[[["block_7_depthwise",0,0,{}]]]},{"name":"block_7_depthwise_relu","class_name":"ReLU","config":{"max_value":6,"name":"block_7_depthwise_relu","trainable":true},"inbound_nodes":[[["block_7_depthwise_BN",0,0,{}]]]},{"name":"block_7_project","class_name":"Conv2D","config":{"filters":24,"kernel_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"uniform","seed":null}},"kernel_regularizer":null,"kernel_constraint":null,"kernel_size":[1,1],"strides":[1,1],"padding":"same","data_format":"channels_last","dilation_rate":[1,1],"activation":"linear","use_bias":false,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"block_7_project","trainable":true},"inbound_nodes":[[["block_7_depthwise_relu",0,0,{}]]]},{"name":"block_7_project_BN","class_name":"BatchNormalization","config":{"axis":-1,"momentum":0.999,"epsilon":0.001,"center":true,"scale":true,"beta_initializer":{"class_name":"Zeros","config":{}},"gamma_initializer":{"class_name":"Ones","config":{}},"moving_mean_initializer":{"class_name":"Zeros","config":{}},"moving_variance_initializer":{"class_name":"Ones","config":{}},"beta_regularizer":null,"gamma_regularizer":null,"beta_constraint":null,"gamma_constraint":null,"name":"block_7_project_BN","trainable":true},"inbound_nodes":[[["block_7_project",0,0,{}]]]},{"name":"block_7_add","class_name":"Add","config":{"name":"block_7_add","trainable":true},"inbound_nodes":[[["block_6_project_BN",0,0,{}],["block_7_project_BN",0,0,{}]]]},{"name":"block_8_expand","class_name":"Conv2D","config":{"filters":144,"kernel_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"uniform","seed":null}},"kernel_regularizer":null,"kernel_constraint":null,"kernel_size":[1,1],"strides":[1,1],"padding":"same","data_format":"channels_last","dilation_rate":[1,1],"activation":"linear","use_bias":false,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"block_8_expand","trainable":true},"inbound_nodes":[[["block_7_add",0,0,{}]]]},{"name":"block_8_expand_BN","class_name":"BatchNormalization","config":{"axis":-1,"momentum":0.999,"epsilon":0.001,"center":true,"scale":true,"beta_initializer":{"class_name":"Zeros","config":{}},"gamma_initializer":{"class_name":"Ones","config":{}},"moving_mean_initializer":{"class_name":"Zeros","config":{}},"moving_variance_initializer":{"class_name":"Ones","config":{}},"beta_regularizer":null,"gamma_regularizer":null,"beta_constraint":null,"gamma_constraint":null,"name":"block_8_expand_BN","trainable":true},"inbound_nodes":[[["block_8_expand",0,0,{}]]]},{"name":"block_8_expand_relu","class_name":"ReLU","config":{"max_value":6,"name":"block_8_expand_relu","trainable":true},"inbound_nodes":[[["block_8_expand_BN",0,0,{}]]]},{"name":"block_8_depthwise","class_name":"DepthwiseConv2D","config":{"kernel_size":[3,3],"strides":[1,1],"padding":"same","data_format":"channels_last","dilation_rate":[1,1],"activation":"linear","use_bias":false,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"block_8_depthwise","trainable":true,"depth_multiplier":1,"depthwise_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"uniform","seed":null}},"depthwise_regularizer":null,"depthwise_constraint":null},"inbound_nodes":[[["block_8_expand_relu",0,0,{}]]]},{"name":"block_8_depthwise_BN","class_name":"BatchNormalization","config":{"axis":-1,"momentum":0.999,"epsilon":0.001,"center":true,"scale":true,"beta_initializer":{"class_name":"Zeros","config":{}},"gamma_initializer":{"class_name":"Ones","config":{}},"moving_mean_initializer":{"class_name":"Zeros","config":{}},"moving_variance_initializer":{"class_name":"Ones","config":{}},"beta_regularizer":null,"gamma_regularizer":null,"beta_constraint":null,"gamma_constraint":null,"name":"block_8_depthwise_BN","trainable":true},"inbound_nodes":[[["block_8_depthwise",0,0,{}]]]},{"name":"block_8_depthwise_relu","class_name":"ReLU","config":{"max_value":6,"name":"block_8_depthwise_relu","trainable":true},"inbound_nodes":[[["block_8_depthwise_BN",0,0,{}]]]},{"name":"block_8_project","class_name":"Conv2D","config":{"filters":24,"kernel_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"uniform","seed":null}},"kernel_regularizer":null,"kernel_constraint":null,"kernel_size":[1,1],"strides":[1,1],"padding":"same","data_format":"channels_last","dilation_rate":[1,1],"activation":"linear","use_bias":false,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"block_8_project","trainable":true},"inbound_nodes":[[["block_8_depthwise_relu",0,0,{}]]]},{"name":"block_8_project_BN","class_name":"BatchNormalization","config":{"axis":-1,"momentum":0.999,"epsilon":0.001,"center":true,"scale":true,"beta_initializer":{"class_name":"Zeros","config":{}},"gamma_initializer":{"class_name":"Ones","config":{}},"moving_mean_initializer":{"class_name":"Zeros","config":{}},"moving_variance_initializer":{"class_name":"Ones","config":{}},"beta_regularizer":null,"gamma_regularizer":null,"beta_constraint":null,"gamma_constraint":null,"name":"block_8_project_BN","trainable":true},"inbound_nodes":[[["block_8_project",0,0,{}]]]},{"name":"block_8_add","class_name":"Add","config":{"name":"block_8_add","trainable":true},"inbound_nodes":[[["block_7_add",0,0,{}],["block_8_project_BN",0,0,{}]]]},{"name":"block_9_expand","class_name":"Conv2D","config":{"filters":144,"kernel_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"uniform","seed":null}},"kernel_regularizer":null,"kernel_constraint":null,"kernel_size":[1,1],"strides":[1,1],"padding":"same","data_format":"channels_last","dilation_rate":[1,1],"activation":"linear","use_bias":false,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"block_9_expand","trainable":true},"inbound_nodes":[[["block_8_add",0,0,{}]]]},{"name":"block_9_expand_BN","class_name":"BatchNormalization","config":{"axis":-1,"momentum":0.999,"epsilon":0.001,"center":true,"scale":true,"beta_initializer":{"class_name":"Zeros","config":{}},"gamma_initializer":{"class_name":"Ones","config":{}},"moving_mean_initializer":{"class_name":"Zeros","config":{}},"moving_variance_initializer":{"class_name":"Ones","config":{}},"beta_regularizer":null,"gamma_regularizer":null,"beta_constraint":null,"gamma_constraint":null,"name":"block_9_expand_BN","trainable":true},"inbound_nodes":[[["block_9_expand",0,0,{}]]]},{"name":"block_9_expand_relu","class_name":"ReLU","config":{"max_value":6,"name":"block_9_expand_relu","trainable":true},"inbound_nodes":[[["block_9_expand_BN",0,0,{}]]]},{"name":"block_9_depthwise","class_name":"DepthwiseConv2D","config":{"kernel_size":[3,3],"strides":[1,1],"padding":"same","data_format":"channels_last","dilation_rate":[1,1],"activation":"linear","use_bias":false,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"block_9_depthwise","trainable":true,"depth_multiplier":1,"depthwise_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"uniform","seed":null}},"depthwise_regularizer":null,"depthwise_constraint":null},"inbound_nodes":[[["block_9_expand_relu",0,0,{}]]]},{"name":"block_9_depthwise_BN","class_name":"BatchNormalization","config":{"axis":-1,"momentum":0.999,"epsilon":0.001,"center":true,"scale":true,"beta_initializer":{"class_name":"Zeros","config":{}},"gamma_initializer":{"class_name":"Ones","config":{}},"moving_mean_initializer":{"class_name":"Zeros","config":{}},"moving_variance_initializer":{"class_name":"Ones","config":{}},"beta_regularizer":null,"gamma_regularizer":null,"beta_constraint":null,"gamma_constraint":null,"name":"block_9_depthwise_BN","trainable":true},"inbound_nodes":[[["block_9_depthwise",0,0,{}]]]},{"name":"block_9_depthwise_relu","class_name":"ReLU","config":{"max_value":6,"name":"block_9_depthwise_relu","trainable":true},"inbound_nodes":[[["block_9_depthwise_BN",0,0,{}]]]},{"name":"block_9_project","class_name":"Conv2D","config":{"filters":24,"kernel_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"uniform","seed":null}},"kernel_regularizer":null,"kernel_constraint":null,"kernel_size":[1,1],"strides":[1,1],"padding":"same","data_format":"channels_last","dilation_rate":[1,1],"activation":"linear","use_bias":false,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"block_9_project","trainable":true},"inbound_nodes":[[["block_9_depthwise_relu",0,0,{}]]]},{"name":"block_9_project_BN","class_name":"BatchNormalization","config":{"axis":-1,"momentum":0.999,"epsilon":0.001,"center":true,"scale":true,"beta_initializer":{"class_name":"Zeros","config":{}},"gamma_initializer":{"class_name":"Ones","config":{}},"moving_mean_initializer":{"class_name":"Zeros","config":{}},"moving_variance_initializer":{"class_name":"Ones","config":{}},"beta_regularizer":null,"gamma_regularizer":null,"beta_constraint":null,"gamma_constraint":null,"name":"block_9_project_BN","trainable":true},"inbound_nodes":[[["block_9_project",0,0,{}]]]},{"name":"block_9_add","class_name":"Add","config":{"name":"block_9_add","trainable":true},"inbound_nodes":[[["block_8_add",0,0,{}],["block_9_project_BN",0,0,{}]]]},{"name":"block_10_expand","class_name":"Conv2D","config":{"filters":144,"kernel_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"uniform","seed":null}},"kernel_regularizer":null,"kernel_constraint":null,"kernel_size":[1,1],"strides":[1,1],"padding":"same","data_format":"channels_last","dilation_rate":[1,1],"activation":"linear","use_bias":false,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"block_10_expand","trainable":true},"inbound_nodes":[[["block_9_add",0,0,{}]]]},{"name":"block_10_expand_BN","class_name":"BatchNormalization","config":{"axis":-1,"momentum":0.999,"epsilon":0.001,"center":true,"scale":true,"beta_initializer":{"class_name":"Zeros","config":{}},"gamma_initializer":{"class_name":"Ones","config":{}},"moving_mean_initializer":{"class_name":"Zeros","config":{}},"moving_variance_initializer":{"class_name":"Ones","config":{}},"beta_regularizer":null,"gamma_regularizer":null,"beta_constraint":null,"gamma_constraint":null,"name":"block_10_expand_BN","trainable":true},"inbound_nodes":[[["block_10_expand",0,0,{}]]]},{"name":"block_10_expand_relu","class_name":"ReLU","config":{"max_value":6,"name":"block_10_expand_relu","trainable":true},"inbound_nodes":[[["block_10_expand_BN",0,0,{}]]]},{"name":"block_10_depthwise","class_name":"DepthwiseConv2D","config":{"kernel_size":[3,3],"strides":[1,1],"padding":"same","data_format":"channels_last","dilation_rate":[1,1],"activation":"linear","use_bias":false,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"block_10_depthwise","trainable":true,"depth_multiplier":1,"depthwise_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"uniform","seed":null}},"depthwise_regularizer":null,"depthwise_constraint":null},"inbound_nodes":[[["block_10_expand_relu",0,0,{}]]]},{"name":"block_10_depthwise_BN","class_name":"BatchNormalization","config":{"axis":-1,"momentum":0.999,"epsilon":0.001,"center":true,"scale":true,"beta_initializer":{"class_name":"Zeros","config":{}},"gamma_initializer":{"class_name":"Ones","config":{}},"moving_mean_initializer":{"class_name":"Zeros","config":{}},"moving_variance_initializer":{"class_name":"Ones","config":{}},"beta_regularizer":null,"gamma_regularizer":null,"beta_constraint":null,"gamma_constraint":null,"name":"block_10_depthwise_BN","trainable":true},"inbound_nodes":[[["block_10_depthwise",0,0,{}]]]},{"name":"block_10_depthwise_relu","class_name":"ReLU","config":{"max_value":6,"name":"block_10_depthwise_relu","trainable":true},"inbound_nodes":[[["block_10_depthwise_BN",0,0,{}]]]},{"name":"block_10_project","class_name":"Conv2D","config":{"filters":32,"kernel_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"uniform","seed":null}},"kernel_regularizer":null,"kernel_constraint":null,"kernel_size":[1,1],"strides":[1,1],"padding":"same","data_format":"channels_last","dilation_rate":[1,1],"activation":"linear","use_bias":false,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"block_10_project","trainable":true},"inbound_nodes":[[["block_10_depthwise_relu",0,0,{}]]]},{"name":"block_10_project_BN","class_name":"BatchNormalization","config":{"axis":-1,"momentum":0.999,"epsilon":0.001,"center":true,"scale":true,"beta_initializer":{"class_name":"Zeros","config":{}},"gamma_initializer":{"class_name":"Ones","config":{}},"moving_mean_initializer":{"class_name":"Zeros","config":{}},"moving_variance_initializer":{"class_name":"Ones","config":{}},"beta_regularizer":null,"gamma_regularizer":null,"beta_constraint":null,"gamma_constraint":null,"name":"block_10_project_BN","trainable":true},"inbound_nodes":[[["block_10_project",0,0,{}]]]},{"name":"block_11_expand","class_name":"Conv2D","config":{"filters":192,"kernel_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"uniform","seed":null}},"kernel_regularizer":null,"kernel_constraint":null,"kernel_size":[1,1],"strides":[1,1],"padding":"same","data_format":"channels_last","dilation_rate":[1,1],"activation":"linear","use_bias":false,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"block_11_expand","trainable":true},"inbound_nodes":[[["block_10_project_BN",0,0,{}]]]},{"name":"block_11_expand_BN","class_name":"BatchNormalization","config":{"axis":-1,"momentum":0.999,"epsilon":0.001,"center":true,"scale":true,"beta_initializer":{"class_name":"Zeros","config":{}},"gamma_initializer":{"class_name":"Ones","config":{}},"moving_mean_initializer":{"class_name":"Zeros","config":{}},"moving_variance_initializer":{"class_name":"Ones","config":{}},"beta_regularizer":null,"gamma_regularizer":null,"beta_constraint":null,"gamma_constraint":null,"name":"block_11_expand_BN","trainable":true},"inbound_nodes":[[["block_11_expand",0,0,{}]]]},{"name":"block_11_expand_relu","class_name":"ReLU","config":{"max_value":6,"name":"block_11_expand_relu","trainable":true},"inbound_nodes":[[["block_11_expand_BN",0,0,{}]]]},{"name":"block_11_depthwise","class_name":"DepthwiseConv2D","config":{"kernel_size":[3,3],"strides":[1,1],"padding":"same","data_format":"channels_last","dilation_rate":[1,1],"activation":"linear","use_bias":false,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"block_11_depthwise","trainable":true,"depth_multiplier":1,"depthwise_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"uniform","seed":null}},"depthwise_regularizer":null,"depthwise_constraint":null},"inbound_nodes":[[["block_11_expand_relu",0,0,{}]]]},{"name":"block_11_depthwise_BN","class_name":"BatchNormalization","config":{"axis":-1,"momentum":0.999,"epsilon":0.001,"center":true,"scale":true,"beta_initializer":{"class_name":"Zeros","config":{}},"gamma_initializer":{"class_name":"Ones","config":{}},"moving_mean_initializer":{"class_name":"Zeros","config":{}},"moving_variance_initializer":{"class_name":"Ones","config":{}},"beta_regularizer":null,"gamma_regularizer":null,"beta_constraint":null,"gamma_constraint":null,"name":"block_11_depthwise_BN","trainable":true},"inbound_nodes":[[["block_11_depthwise",0,0,{}]]]},{"name":"block_11_depthwise_relu","class_name":"ReLU","config":{"max_value":6,"name":"block_11_depthwise_relu","trainable":true},"inbound_nodes":[[["block_11_depthwise_BN",0,0,{}]]]},{"name":"block_11_project","class_name":"Conv2D","config":{"filters":32,"kernel_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"uniform","seed":null}},"kernel_regularizer":null,"kernel_constraint":null,"kernel_size":[1,1],"strides":[1,1],"padding":"same","data_format":"channels_last","dilation_rate":[1,1],"activation":"linear","use_bias":false,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"block_11_project","trainable":true},"inbound_nodes":[[["block_11_depthwise_relu",0,0,{}]]]},{"name":"block_11_project_BN","class_name":"BatchNormalization","config":{"axis":-1,"momentum":0.999,"epsilon":0.001,"center":true,"scale":true,"beta_initializer":{"class_name":"Zeros","config":{}},"gamma_initializer":{"class_name":"Ones","config":{}},"moving_mean_initializer":{"class_name":"Zeros","config":{}},"moving_variance_initializer":{"class_name":"Ones","config":{}},"beta_regularizer":null,"gamma_regularizer":null,"beta_constraint":null,"gamma_constraint":null,"name":"block_11_project_BN","trainable":true},"inbound_nodes":[[["block_11_project",0,0,{}]]]},{"name":"block_11_add","class_name":"Add","config":{"name":"block_11_add","trainable":true},"inbound_nodes":[[["block_10_project_BN",0,0,{}],["block_11_project_BN",0,0,{}]]]},{"name":"block_12_expand","class_name":"Conv2D","config":{"filters":192,"kernel_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"uniform","seed":null}},"kernel_regularizer":null,"kernel_constraint":null,"kernel_size":[1,1],"strides":[1,1],"padding":"same","data_format":"channels_last","dilation_rate":[1,1],"activation":"linear","use_bias":false,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"block_12_expand","trainable":true},"inbound_nodes":[[["block_11_add",0,0,{}]]]},{"name":"block_12_expand_BN","class_name":"BatchNormalization","config":{"axis":-1,"momentum":0.999,"epsilon":0.001,"center":true,"scale":true,"beta_initializer":{"class_name":"Zeros","config":{}},"gamma_initializer":{"class_name":"Ones","config":{}},"moving_mean_initializer":{"class_name":"Zeros","config":{}},"moving_variance_initializer":{"class_name":"Ones","config":{}},"beta_regularizer":null,"gamma_regularizer":null,"beta_constraint":null,"gamma_constraint":null,"name":"block_12_expand_BN","trainable":true},"inbound_nodes":[[["block_12_expand",0,0,{}]]]},{"name":"block_12_expand_relu","class_name":"ReLU","config":{"max_value":6,"name":"block_12_expand_relu","trainable":true},"inbound_nodes":[[["block_12_expand_BN",0,0,{}]]]},{"name":"block_12_depthwise","class_name":"DepthwiseConv2D","config":{"kernel_size":[3,3],"strides":[1,1],"padding":"same","data_format":"channels_last","dilation_rate":[1,1],"activation":"linear","use_bias":false,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"block_12_depthwise","trainable":true,"depth_multiplier":1,"depthwise_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"uniform","seed":null}},"depthwise_regularizer":null,"depthwise_constraint":null},"inbound_nodes":[[["block_12_expand_relu",0,0,{}]]]},{"name":"block_12_depthwise_BN","class_name":"BatchNormalization","config":{"axis":-1,"momentum":0.999,"epsilon":0.001,"center":true,"scale":true,"beta_initializer":{"class_name":"Zeros","config":{}},"gamma_initializer":{"class_name":"Ones","config":{}},"moving_mean_initializer":{"class_name":"Zeros","config":{}},"moving_variance_initializer":{"class_name":"Ones","config":{}},"beta_regularizer":null,"gamma_regularizer":null,"beta_constraint":null,"gamma_constraint":null,"name":"block_12_depthwise_BN","trainable":true},"inbound_nodes":[[["block_12_depthwise",0,0,{}]]]},{"name":"block_12_depthwise_relu","class_name":"ReLU","config":{"max_value":6,"name":"block_12_depthwise_relu","trainable":true},"inbound_nodes":[[["block_12_depthwise_BN",0,0,{}]]]},{"name":"block_12_project","class_name":"Conv2D","config":{"filters":32,"kernel_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"uniform","seed":null}},"kernel_regularizer":null,"kernel_constraint":null,"kernel_size":[1,1],"strides":[1,1],"padding":"same","data_format":"channels_last","dilation_rate":[1,1],"activation":"linear","use_bias":false,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"block_12_project","trainable":true},"inbound_nodes":[[["block_12_depthwise_relu",0,0,{}]]]},{"name":"block_12_project_BN","class_name":"BatchNormalization","config":{"axis":-1,"momentum":0.999,"epsilon":0.001,"center":true,"scale":true,"beta_initializer":{"class_name":"Zeros","config":{}},"gamma_initializer":{"class_name":"Ones","config":{}},"moving_mean_initializer":{"class_name":"Zeros","config":{}},"moving_variance_initializer":{"class_name":"Ones","config":{}},"beta_regularizer":null,"gamma_regularizer":null,"beta_constraint":null,"gamma_constraint":null,"name":"block_12_project_BN","trainable":true},"inbound_nodes":[[["block_12_project",0,0,{}]]]},{"name":"block_12_add","class_name":"Add","config":{"name":"block_12_add","trainable":true},"inbound_nodes":[[["block_11_add",0,0,{}],["block_12_project_BN",0,0,{}]]]},{"name":"block_13_expand","class_name":"Conv2D","config":{"filters":192,"kernel_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"uniform","seed":null}},"kernel_regularizer":null,"kernel_constraint":null,"kernel_size":[1,1],"strides":[1,1],"padding":"same","data_format":"channels_last","dilation_rate":[1,1],"activation":"linear","use_bias":false,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"block_13_expand","trainable":true},"inbound_nodes":[[["block_12_add",0,0,{}]]]},{"name":"block_13_expand_BN","class_name":"BatchNormalization","config":{"axis":-1,"momentum":0.999,"epsilon":0.001,"center":true,"scale":true,"beta_initializer":{"class_name":"Zeros","config":{}},"gamma_initializer":{"class_name":"Ones","config":{}},"moving_mean_initializer":{"class_name":"Zeros","config":{}},"moving_variance_initializer":{"class_name":"Ones","config":{}},"beta_regularizer":null,"gamma_regularizer":null,"beta_constraint":null,"gamma_constraint":null,"name":"block_13_expand_BN","trainable":true},"inbound_nodes":[[["block_13_expand",0,0,{}]]]},{"name":"block_13_expand_relu","class_name":"ReLU","config":{"max_value":6,"name":"block_13_expand_relu","trainable":true},"inbound_nodes":[[["block_13_expand_BN",0,0,{}]]]},{"name":"block_13_pad","class_name":"ZeroPadding2D","config":{"padding":[[0,1],[0,1]],"data_format":"channels_last","name":"block_13_pad","trainable":true},"inbound_nodes":[[["block_13_expand_relu",0,0,{}]]]},{"name":"block_13_depthwise","class_name":"DepthwiseConv2D","config":{"kernel_size":[3,3],"strides":[2,2],"padding":"valid","data_format":"channels_last","dilation_rate":[1,1],"activation":"linear","use_bias":false,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"block_13_depthwise","trainable":true,"depth_multiplier":1,"depthwise_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"uniform","seed":null}},"depthwise_regularizer":null,"depthwise_constraint":null},"inbound_nodes":[[["block_13_pad",0,0,{}]]]},{"name":"block_13_depthwise_BN","class_name":"BatchNormalization","config":{"axis":-1,"momentum":0.999,"epsilon":0.001,"center":true,"scale":true,"beta_initializer":{"class_name":"Zeros","config":{}},"gamma_initializer":{"class_name":"Ones","config":{}},"moving_mean_initializer":{"class_name":"Zeros","config":{}},"moving_variance_initializer":{"class_name":"Ones","config":{}},"beta_regularizer":null,"gamma_regularizer":null,"beta_constraint":null,"gamma_constraint":null,"name":"block_13_depthwise_BN","trainable":true},"inbound_nodes":[[["block_13_depthwise",0,0,{}]]]},{"name":"block_13_depthwise_relu","class_name":"ReLU","config":{"max_value":6,"name":"block_13_depthwise_relu","trainable":true},"inbound_nodes":[[["block_13_depthwise_BN",0,0,{}]]]},{"name":"block_13_project","class_name":"Conv2D","config":{"filters":56,"kernel_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"uniform","seed":null}},"kernel_regularizer":null,"kernel_constraint":null,"kernel_size":[1,1],"strides":[1,1],"padding":"same","data_format":"channels_last","dilation_rate":[1,1],"activation":"linear","use_bias":false,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"block_13_project","trainable":true},"inbound_nodes":[[["block_13_depthwise_relu",0,0,{}]]]},{"name":"block_13_project_BN","class_name":"BatchNormalization","config":{"axis":-1,"momentum":0.999,"epsilon":0.001,"center":true,"scale":true,"beta_initializer":{"class_name":"Zeros","config":{}},"gamma_initializer":{"class_name":"Ones","config":{}},"moving_mean_initializer":{"class_name":"Zeros","config":{}},"moving_variance_initializer":{"class_name":"Ones","config":{}},"beta_regularizer":null,"gamma_regularizer":null,"beta_constraint":null,"gamma_constraint":null,"name":"block_13_project_BN","trainable":true},"inbound_nodes":[[["block_13_project",0,0,{}]]]},{"name":"block_14_expand","class_name":"Conv2D","config":{"filters":336,"kernel_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"uniform","seed":null}},"kernel_regularizer":null,"kernel_constraint":null,"kernel_size":[1,1],"strides":[1,1],"padding":"same","data_format":"channels_last","dilation_rate":[1,1],"activation":"linear","use_bias":false,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"block_14_expand","trainable":true},"inbound_nodes":[[["block_13_project_BN",0,0,{}]]]},{"name":"block_14_expand_BN","class_name":"BatchNormalization","config":{"axis":-1,"momentum":0.999,"epsilon":0.001,"center":true,"scale":true,"beta_initializer":{"class_name":"Zeros","config":{}},"gamma_initializer":{"class_name":"Ones","config":{}},"moving_mean_initializer":{"class_name":"Zeros","config":{}},"moving_variance_initializer":{"class_name":"Ones","config":{}},"beta_regularizer":null,"gamma_regularizer":null,"beta_constraint":null,"gamma_constraint":null,"name":"block_14_expand_BN","trainable":true},"inbound_nodes":[[["block_14_expand",0,0,{}]]]},{"name":"block_14_expand_relu","class_name":"ReLU","config":{"max_value":6,"name":"block_14_expand_relu","trainable":true},"inbound_nodes":[[["block_14_expand_BN",0,0,{}]]]},{"name":"block_14_depthwise","class_name":"DepthwiseConv2D","config":{"kernel_size":[3,3],"strides":[1,1],"padding":"same","data_format":"channels_last","dilation_rate":[1,1],"activation":"linear","use_bias":false,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"block_14_depthwise","trainable":true,"depth_multiplier":1,"depthwise_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"uniform","seed":null}},"depthwise_regularizer":null,"depthwise_constraint":null},"inbound_nodes":[[["block_14_expand_relu",0,0,{}]]]},{"name":"block_14_depthwise_BN","class_name":"BatchNormalization","config":{"axis":-1,"momentum":0.999,"epsilon":0.001,"center":true,"scale":true,"beta_initializer":{"class_name":"Zeros","config":{}},"gamma_initializer":{"class_name":"Ones","config":{}},"moving_mean_initializer":{"class_name":"Zeros","config":{}},"moving_variance_initializer":{"class_name":"Ones","config":{}},"beta_regularizer":null,"gamma_regularizer":null,"beta_constraint":null,"gamma_constraint":null,"name":"block_14_depthwise_BN","trainable":true},"inbound_nodes":[[["block_14_depthwise",0,0,{}]]]},{"name":"block_14_depthwise_relu","class_name":"ReLU","config":{"max_value":6,"name":"block_14_depthwise_relu","trainable":true},"inbound_nodes":[[["block_14_depthwise_BN",0,0,{}]]]},{"name":"block_14_project","class_name":"Conv2D","config":{"filters":56,"kernel_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"uniform","seed":null}},"kernel_regularizer":null,"kernel_constraint":null,"kernel_size":[1,1],"strides":[1,1],"padding":"same","data_format":"channels_last","dilation_rate":[1,1],"activation":"linear","use_bias":false,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"block_14_project","trainable":true},"inbound_nodes":[[["block_14_depthwise_relu",0,0,{}]]]},{"name":"block_14_project_BN","class_name":"BatchNormalization","config":{"axis":-1,"momentum":0.999,"epsilon":0.001,"center":true,"scale":true,"beta_initializer":{"class_name":"Zeros","config":{}},"gamma_initializer":{"class_name":"Ones","config":{}},"moving_mean_initializer":{"class_name":"Zeros","config":{}},"moving_variance_initializer":{"class_name":"Ones","config":{}},"beta_regularizer":null,"gamma_regularizer":null,"beta_constraint":null,"gamma_constraint":null,"name":"block_14_project_BN","trainable":true},"inbound_nodes":[[["block_14_project",0,0,{}]]]},{"name":"block_14_add","class_name":"Add","config":{"name":"block_14_add","trainable":true},"inbound_nodes":[[["block_13_project_BN",0,0,{}],["block_14_project_BN",0,0,{}]]]},{"name":"block_15_expand","class_name":"Conv2D","config":{"filters":336,"kernel_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"uniform","seed":null}},"kernel_regularizer":null,"kernel_constraint":null,"kernel_size":[1,1],"strides":[1,1],"padding":"same","data_format":"channels_last","dilation_rate":[1,1],"activation":"linear","use_bias":false,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"block_15_expand","trainable":true},"inbound_nodes":[[["block_14_add",0,0,{}]]]},{"name":"block_15_expand_BN","class_name":"BatchNormalization","config":{"axis":-1,"momentum":0.999,"epsilon":0.001,"center":true,"scale":true,"beta_initializer":{"class_name":"Zeros","config":{}},"gamma_initializer":{"class_name":"Ones","config":{}},"moving_mean_initializer":{"class_name":"Zeros","config":{}},"moving_variance_initializer":{"class_name":"Ones","config":{}},"beta_regularizer":null,"gamma_regularizer":null,"beta_constraint":null,"gamma_constraint":null,"name":"block_15_expand_BN","trainable":true},"inbound_nodes":[[["block_15_expand",0,0,{}]]]},{"name":"block_15_expand_relu","class_name":"ReLU","config":{"max_value":6,"name":"block_15_expand_relu","trainable":true},"inbound_nodes":[[["block_15_expand_BN",0,0,{}]]]},{"name":"block_15_depthwise","class_name":"DepthwiseConv2D","config":{"kernel_size":[3,3],"strides":[1,1],"padding":"same","data_format":"channels_last","dilation_rate":[1,1],"activation":"linear","use_bias":false,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"block_15_depthwise","trainable":true,"depth_multiplier":1,"depthwise_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"uniform","seed":null}},"depthwise_regularizer":null,"depthwise_constraint":null},"inbound_nodes":[[["block_15_expand_relu",0,0,{}]]]},{"name":"block_15_depthwise_BN","class_name":"BatchNormalization","config":{"axis":-1,"momentum":0.999,"epsilon":0.001,"center":true,"scale":true,"beta_initializer":{"class_name":"Zeros","config":{}},"gamma_initializer":{"class_name":"Ones","config":{}},"moving_mean_initializer":{"class_name":"Zeros","config":{}},"moving_variance_initializer":{"class_name":"Ones","config":{}},"beta_regularizer":null,"gamma_regularizer":null,"beta_constraint":null,"gamma_constraint":null,"name":"block_15_depthwise_BN","trainable":true},"inbound_nodes":[[["block_15_depthwise",0,0,{}]]]},{"name":"block_15_depthwise_relu","class_name":"ReLU","config":{"max_value":6,"name":"block_15_depthwise_relu","trainable":true},"inbound_nodes":[[["block_15_depthwise_BN",0,0,{}]]]},{"name":"block_15_project","class_name":"Conv2D","config":{"filters":56,"kernel_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"uniform","seed":null}},"kernel_regularizer":null,"kernel_constraint":null,"kernel_size":[1,1],"strides":[1,1],"padding":"same","data_format":"channels_last","dilation_rate":[1,1],"activation":"linear","use_bias":false,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"block_15_project","trainable":true},"inbound_nodes":[[["block_15_depthwise_relu",0,0,{}]]]},{"name":"block_15_project_BN","class_name":"BatchNormalization","config":{"axis":-1,"momentum":0.999,"epsilon":0.001,"center":true,"scale":true,"beta_initializer":{"class_name":"Zeros","config":{}},"gamma_initializer":{"class_name":"Ones","config":{}},"moving_mean_initializer":{"class_name":"Zeros","config":{}},"moving_variance_initializer":{"class_name":"Ones","config":{}},"beta_regularizer":null,"gamma_regularizer":null,"beta_constraint":null,"gamma_constraint":null,"name":"block_15_project_BN","trainable":true},"inbound_nodes":[[["block_15_project",0,0,{}]]]},{"name":"block_15_add","class_name":"Add","config":{"name":"block_15_add","trainable":true},"inbound_nodes":[[["block_14_add",0,0,{}],["block_15_project_BN",0,0,{}]]]},{"name":"block_16_expand","class_name":"Conv2D","config":{"filters":336,"kernel_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"uniform","seed":null}},"kernel_regularizer":null,"kernel_constraint":null,"kernel_size":[1,1],"strides":[1,1],"padding":"same","data_format":"channels_last","dilation_rate":[1,1],"activation":"linear","use_bias":false,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"block_16_expand","trainable":true},"inbound_nodes":[[["block_15_add",0,0,{}]]]},{"name":"block_16_expand_BN","class_name":"BatchNormalization","config":{"axis":-1,"momentum":0.999,"epsilon":0.001,"center":true,"scale":true,"beta_initializer":{"class_name":"Zeros","config":{}},"gamma_initializer":{"class_name":"Ones","config":{}},"moving_mean_initializer":{"class_name":"Zeros","config":{}},"moving_variance_initializer":{"class_name":"Ones","config":{}},"beta_regularizer":null,"gamma_regularizer":null,"beta_constraint":null,"gamma_constraint":null,"name":"block_16_expand_BN","trainable":true},"inbound_nodes":[[["block_16_expand",0,0,{}]]]},{"name":"block_16_expand_relu","class_name":"ReLU","config":{"max_value":6,"name":"block_16_expand_relu","trainable":true},"inbound_nodes":[[["block_16_expand_BN",0,0,{}]]]},{"name":"block_16_depthwise","class_name":"DepthwiseConv2D","config":{"kernel_size":[3,3],"strides":[1,1],"padding":"same","data_format":"channels_last","dilation_rate":[1,1],"activation":"linear","use_bias":false,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"block_16_depthwise","trainable":true,"depth_multiplier":1,"depthwise_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"uniform","seed":null}},"depthwise_regularizer":null,"depthwise_constraint":null},"inbound_nodes":[[["block_16_expand_relu",0,0,{}]]]},{"name":"block_16_depthwise_BN","class_name":"BatchNormalization","config":{"axis":-1,"momentum":0.999,"epsilon":0.001,"center":true,"scale":true,"beta_initializer":{"class_name":"Zeros","config":{}},"gamma_initializer":{"class_name":"Ones","config":{}},"moving_mean_initializer":{"class_name":"Zeros","config":{}},"moving_variance_initializer":{"class_name":"Ones","config":{}},"beta_regularizer":null,"gamma_regularizer":null,"beta_constraint":null,"gamma_constraint":null,"name":"block_16_depthwise_BN","trainable":true},"inbound_nodes":[[["block_16_depthwise",0,0,{}]]]},{"name":"block_16_depthwise_relu","class_name":"ReLU","config":{"max_value":6,"name":"block_16_depthwise_relu","trainable":true},"inbound_nodes":[[["block_16_depthwise_BN",0,0,{}]]]},{"name":"block_16_project","class_name":"Conv2D","config":{"filters":112,"kernel_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"uniform","seed":null}},"kernel_regularizer":null,"kernel_constraint":null,"kernel_size":[1,1],"strides":[1,1],"padding":"same","data_format":"channels_last","dilation_rate":[1,1],"activation":"linear","use_bias":false,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"block_16_project","trainable":true},"inbound_nodes":[[["block_16_depthwise_relu",0,0,{}]]]},{"name":"block_16_project_BN","class_name":"BatchNormalization","config":{"axis":-1,"momentum":0.999,"epsilon":0.001,"center":true,"scale":true,"beta_initializer":{"class_name":"Zeros","config":{}},"gamma_initializer":{"class_name":"Ones","config":{}},"moving_mean_initializer":{"class_name":"Zeros","config":{}},"moving_variance_initializer":{"class_name":"Ones","config":{}},"beta_regularizer":null,"gamma_regularizer":null,"beta_constraint":null,"gamma_constraint":null,"name":"block_16_project_BN","trainable":true},"inbound_nodes":[[["block_16_project",0,0,{}]]]},{"name":"Conv_1","class_name":"Conv2D","config":{"filters":1280,"kernel_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"uniform","seed":null}},"kernel_regularizer":null,"kernel_constraint":null,"kernel_size":[1,1],"strides":[1,1],"padding":"valid","data_format":"channels_last","dilation_rate":[1,1],"activation":"linear","use_bias":false,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"Conv_1","trainable":true},"inbound_nodes":[[["block_16_project_BN",0,0,{}]]]},{"name":"Conv_1_bn","class_name":"BatchNormalization","config":{"axis":-1,"momentum":0.999,"epsilon":0.001,"center":true,"scale":true,"beta_initializer":{"class_name":"Zeros","config":{}},"gamma_initializer":{"class_name":"Ones","config":{}},"moving_mean_initializer":{"class_name":"Zeros","config":{}},"moving_variance_initializer":{"class_name":"Ones","config":{}},"beta_regularizer":null,"gamma_regularizer":null,"beta_constraint":null,"gamma_constraint":null,"name":"Conv_1_bn","trainable":true},"inbound_nodes":[[["Conv_1",0,0,{}]]]},{"name":"out_relu","class_name":"ReLU","config":{"max_value":6,"name":"out_relu","trainable":true},"inbound_nodes":[[["Conv_1_bn",0,0,{}]]]}],"input_layers":[["input_1",0,0]],"output_layers":[["out_relu",0,0]]}},{"class_name":"GlobalAveragePooling2D","config":{"data_format":"channels_last","name":"global_average_pooling2d_GlobalAveragePooling2D1","trainable":true}}]}},{"class_name":"Sequential","config":{"name":"sequential_3","layers":[{"class_name":"Dense","config":{"units":100,"activation":"relu","use_bias":true,"kernel_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_in","distribution":"normal","seed":null}},"bias_initializer":{"class_name":"Zeros","config":{}},"kernel_regularizer":null,"bias_regularizer":null,"activity_regularizer":null,"kernel_constraint":null,"bias_constraint":null,"name":"dense_Dense1","trainable":true,"batch_input_shape":[null,1280],"dtype":"float32"}},{"class_name":"Dense","config":{"units":5,"activation":"softmax","use_bias":false,"kernel_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_in","distribution":"normal","seed":null}},"bias_initializer":{"class_name":"Zeros","config":{}},"kernel_regularizer":null,"bias_regularizer":null,"activity_regularizer":null,"kernel_constraint":null,"bias_constraint":null,"name":"dense_Dense2","trainable":true}}]}}]},"keras_version":"tfjs-layers 1.3.1","backend":"tensor_flow.js"},"format":"layers-model","generatedBy":"TensorFlow.js tfjs-layers v1.3.1","convertedBy":null,"weightsManifest":[{"paths":["./model.weights.bin"],"weights":[{"name":"Conv1/kernel","shape":[3,3,3,16],"dtype":"float32"},{"name":"bn_Conv1/gamma","shape":[16],"dtype":"float32"},{"name":"bn_Conv1/beta","shape":[16],"dtype":"float32"},{"name":"expanded_conv_depthwise/depthwise_kernel","shape":[3,3,16,1],"dtype":"float32"},{"name":"expanded_conv_depthwise_BN/gamma","shape":[16],"dtype":"float32"},{"name":"expanded_conv_depthwise_BN/beta","shape":[16],"dtype":"float32"},{"name":"expanded_conv_project/kernel","shape":[1,1,16,8],"dtype":"float32"},{"name":"expanded_conv_project_BN/gamma","shape":[8],"dtype":"float32"},{"name":"expanded_conv_project_BN/beta","shape":[8],"dtype":"float32"},{"name":"block_1_expand/kernel","shape":[1,1,8,48],"dtype":"float32"},{"name":"block_1_expand_BN/gamma","shape":[48],"dtype":"float32"},{"name":"block_1_expand_BN/beta","shape":[48],"dtype":"float32"},{"name":"block_1_depthwise/depthwise_kernel","shape":[3,3,48,1],"dtype":"float32"},{"name":"block_1_depthwise_BN/gamma","shape":[48],"dtype":"float32"},{"name":"block_1_depthwise_BN/beta","shape":[48],"dtype":"float32"},{"name":"block_1_project/kernel","shape":[1,1,48,8],"dtype":"float32"},{"name":"block_1_project_BN/gamma","shape":[8],"dtype":"float32"},{"name":"block_1_project_BN/beta","shape":[8],"dtype":"float32"},{"name":"block_2_expand/kernel","shape":[1,1,8,48],"dtype":"float32"},{"name":"block_2_expand_BN/gamma","shape":[48],"dtype":"float32"},{"name":"block_2_expand_BN/beta","shape":[48],"dtype":"float32"},{"name":"block_2_depthwise/depthwise_kernel","shape":[3,3,48,1],"dtype":"float32"},{"name":"block_2_depthwise_BN/gamma","shape":[48],"dtype":"float32"},{"name":"block_2_depthwise_BN/beta","shape":[48],"dtype":"float32"},{"name":"block_2_project/kernel","shape":[1,1,48,8],"dtype":"float32"},{"name":"block_2_project_BN/gamma","shape":[8],"dtype":"float32"},{"name":"block_2_project_BN/beta","shape":[8],"dtype":"float32"},{"name":"block_3_expand/kernel","shape":[1,1,8,48],"dtype":"float32"},{"name":"block_3_expand_BN/gamma","shape":[48],"dtype":"float32"},{"name":"block_3_expand_BN/beta","shape":[48],"dtype":"float32"},{"name":"block_3_depthwise/depthwise_kernel","shape":[3,3,48,1],"dtype":"float32"},{"name":"block_3_depthwise_BN/gamma","shape":[48],"dtype":"float32"},{"name":"block_3_depthwise_BN/beta","shape":[48],"dtype":"float32"},{"name":"block_3_project/kernel","shape":[1,1,48,16],"dtype":"float32"},{"name":"block_3_project_BN/gamma","shape":[16],"dtype":"float32"},{"name":"block_3_project_BN/beta","shape":[16],"dtype":"float32"},{"name":"block_4_expand/kernel","shape":[1,1,16,96],"dtype":"float32"},{"name":"block_4_expand_BN/gamma","shape":[96],"dtype":"float32"},{"name":"block_4_expand_BN/beta","shape":[96],"dtype":"float32"},{"name":"block_4_depthwise/depthwise_kernel","shape":[3,3,96,1],"dtype":"float32"},{"name":"block_4_depthwise_BN/gamma","shape":[96],"dtype":"float32"},{"name":"block_4_depthwise_BN/beta","shape":[96],"dtype":"float32"},{"name":"block_4_project/kernel","shape":[1,1,96,16],"dtype":"float32"},{"name":"block_4_project_BN/gamma","shape":[16],"dtype":"float32"},{"name":"block_4_project_BN/beta","shape":[16],"dtype":"float32"},{"name":"block_5_expand/kernel","shape":[1,1,16,96],"dtype":"float32"},{"name":"block_5_expand_BN/gamma","shape":[96],"dtype":"float32"},{"name":"block_5_expand_BN/beta","shape":[96],"dtype":"float32"},{"name":"block_5_depthwise/depthwise_kernel","shape":[3,3,96,1],"dtype":"float32"},{"name":"block_5_depthwise_BN/gamma","shape":[96],"dtype":"float32"},{"name":"block_5_depthwise_BN/beta","shape":[96],"dtype":"float32"},{"name":"block_5_project/kernel","shape":[1,1,96,16],"dtype":"float32"},{"name":"block_5_project_BN/gamma","shape":[16],"dtype":"float32"},{"name":"block_5_project_BN/beta","shape":[16],"dtype":"float32"},{"name":"block_6_expand/kernel","shape":[1,1,16,96],"dtype":"float32"},{"name":"block_6_expand_BN/gamma","shape":[96],"dtype":"float32"},{"name":"block_6_expand_BN/beta","shape":[96],"dtype":"float32"},{"name":"block_6_depthwise/depthwise_kernel","shape":[3,3,96,1],"dtype":"float32"},{"name":"block_6_depthwise_BN/gamma","shape":[96],"dtype":"float32"},{"name":"block_6_depthwise_BN/beta","shape":[96],"dtype":"float32"},{"name":"block_6_project/kernel","shape":[1,1,96,24],"dtype":"float32"},{"name":"block_6_project_BN/gamma","shape":[24],"dtype":"float32"},{"name":"block_6_project_BN/beta","shape":[24],"dtype":"float32"},{"name":"block_7_expand/kernel","shape":[1,1,24,144],"dtype":"float32"},{"name":"block_7_expand_BN/gamma","shape":[144],"dtype":"float32"},{"name":"block_7_expand_BN/beta","shape":[144],"dtype":"float32"},{"name":"block_7_depthwise/depthwise_kernel","shape":[3,3,144,1],"dtype":"float32"},{"name":"block_7_depthwise_BN/gamma","shape":[144],"dtype":"float32"},{"name":"block_7_depthwise_BN/beta","shape":[144],"dtype":"float32"},{"name":"block_7_project/kernel","shape":[1,1,144,24],"dtype":"float32"},{"name":"block_7_project_BN/gamma","shape":[24],"dtype":"float32"},{"name":"block_7_project_BN/beta","shape":[24],"dtype":"float32"},{"name":"block_8_expand/kernel","shape":[1,1,24,144],"dtype":"float32"},{"name":"block_8_expand_BN/gamma","shape":[144],"dtype":"float32"},{"name":"block_8_expand_BN/beta","shape":[144],"dtype":"float32"},{"name":"block_8_depthwise/depthwise_kernel","shape":[3,3,144,1],"dtype":"float32"},{"name":"block_8_depthwise_BN/gamma","shape":[144],"dtype":"float32"},{"name":"block_8_depthwise_BN/beta","shape":[144],"dtype":"float32"},{"name":"block_8_project/kernel","shape":[1,1,144,24],"dtype":"float32"},{"name":"block_8_project_BN/gamma","shape":[24],"dtype":"float32"},{"name":"block_8_project_BN/beta","shape":[24],"dtype":"float32"},{"name":"block_9_expand/kernel","shape":[1,1,24,144],"dtype":"float32"},{"name":"block_9_expand_BN/gamma","shape":[144],"dtype":"float32"},{"name":"block_9_expand_BN/beta","shape":[144],"dtype":"float32"},{"name":"block_9_depthwise/depthwise_kernel","shape":[3,3,144,1],"dtype":"float32"},{"name":"block_9_depthwise_BN/gamma","shape":[144],"dtype":"float32"},{"name":"block_9_depthwise_BN/beta","shape":[144],"dtype":"float32"},{"name":"block_9_project/kernel","shape":[1,1,144,24],"dtype":"float32"},{"name":"block_9_project_BN/gamma","shape":[24],"dtype":"float32"},{"name":"block_9_project_BN/beta","shape":[24],"dtype":"float32"},{"name":"block_10_expand/kernel","shape":[1,1,24,144],"dtype":"float32"},{"name":"block_10_expand_BN/gamma","shape":[144],"dtype":"float32"},{"name":"block_10_expand_BN/beta","shape":[144],"dtype":"float32"},{"name":"block_10_depthwise/depthwise_kernel","shape":[3,3,144,1],"dtype":"float32"},{"name":"block_10_depthwise_BN/gamma","shape":[144],"dtype":"float32"},{"name":"block_10_depthwise_BN/beta","shape":[144],"dtype":"float32"},{"name":"block_10_project/kernel","shape":[1,1,144,32],"dtype":"float32"},{"name":"block_10_project_BN/gamma","shape":[32],"dtype":"float32"},{"name":"block_10_project_BN/beta","shape":[32],"dtype":"float32"},{"name":"block_11_expand/kernel","shape":[1,1,32,192],"dtype":"float32"},{"name":"block_11_expand_BN/gamma","shape":[192],"dtype":"float32"},{"name":"block_11_expand_BN/beta","shape":[192],"dtype":"float32"},{"name":"block_11_depthwise/depthwise_kernel","shape":[3,3,192,1],"dtype":"float32"},{"name":"block_11_depthwise_BN/gamma","shape":[192],"dtype":"float32"},{"name":"block_11_depthwise_BN/beta","shape":[192],"dtype":"float32"},{"name":"block_11_project/kernel","shape":[1,1,192,32],"dtype":"float32"},{"name":"block_11_project_BN/gamma","shape":[32],"dtype":"float32"},{"name":"block_11_project_BN/beta","shape":[32],"dtype":"float32"},{"name":"block_12_expand/kernel","shape":[1,1,32,192],"dtype":"float32"},{"name":"block_12_expand_BN/gamma","shape":[192],"dtype":"float32"},{"name":"block_12_expand_BN/beta","shape":[192],"dtype":"float32"},{"name":"block_12_depthwise/depthwise_kernel","shape":[3,3,192,1],"dtype":"float32"},{"name":"block_12_depthwise_BN/gamma","shape":[192],"dtype":"float32"},{"name":"block_12_depthwise_BN/beta","shape":[192],"dtype":"float32"},{"name":"block_12_project/kernel","shape":[1,1,192,32],"dtype":"float32"},{"name":"block_12_project_BN/gamma","shape":[32],"dtype":"float32"},{"name":"block_12_project_BN/beta","shape":[32],"dtype":"float32"},{"name":"block_13_expand/kernel","shape":[1,1,32,192],"dtype":"float32"},{"name":"block_13_expand_BN/gamma","shape":[192],"dtype":"float32"},{"name":"block_13_expand_BN/beta","shape":[192],"dtype":"float32"},{"name":"block_13_depthwise/depthwise_kernel","shape":[3,3,192,1],"dtype":"float32"},{"name":"block_13_depthwise_BN/gamma","shape":[192],"dtype":"float32"},{"name":"block_13_depthwise_BN/beta","shape":[192],"dtype":"float32"},{"name":"block_13_project/kernel","shape":[1,1,192,56],"dtype":"float32"},{"name":"block_13_project_BN/gamma","shape":[56],"dtype":"float32"},{"name":"block_13_project_BN/beta","shape":[56],"dtype":"float32"},{"name":"block_14_expand/kernel","shape":[1,1,56,336],"dtype":"float32"},{"name":"block_14_expand_BN/gamma","shape":[336],"dtype":"float32"},{"name":"block_14_expand_BN/beta","shape":[336],"dtype":"float32"},{"name":"block_14_depthwise/depthwise_kernel","shape":[3,3,336,1],"dtype":"float32"},{"name":"block_14_depthwise_BN/gamma","shape":[336],"dtype":"float32"},{"name":"block_14_depthwise_BN/beta","shape":[336],"dtype":"float32"},{"name":"block_14_project/kernel","shape":[1,1,336,56],"dtype":"float32"},{"name":"block_14_project_BN/gamma","shape":[56],"dtype":"float32"},{"name":"block_14_project_BN/beta","shape":[56],"dtype":"float32"},{"name":"block_15_expand/kernel","shape":[1,1,56,336],"dtype":"float32"},{"name":"block_15_expand_BN/gamma","shape":[336],"dtype":"float32"},{"name":"block_15_expand_BN/beta","shape":[336],"dtype":"float32"},{"name":"block_15_depthwise/depthwise_kernel","shape":[3,3,336,1],"dtype":"float32"},{"name":"block_15_depthwise_BN/gamma","shape":[336],"dtype":"float32"},{"name":"block_15_depthwise_BN/beta","shape":[336],"dtype":"float32"},{"name":"block_15_project/kernel","shape":[1,1,336,56],"dtype":"float32"},{"name":"block_15_project_BN/gamma","shape":[56],"dtype":"float32"},{"name":"block_15_project_BN/beta","shape":[56],"dtype":"float32"},{"name":"block_16_expand/kernel","shape":[1,1,56,336],"dtype":"float32"},{"name":"block_16_expand_BN/gamma","shape":[336],"dtype":"float32"},{"name":"block_16_expand_BN/beta","shape":[336],"dtype":"float32"},{"name":"block_16_depthwise/depthwise_kernel","shape":[3,3,336,1],"dtype":"float32"},{"name":"block_16_depthwise_BN/gamma","shape":[336],"dtype":"float32"},{"name":"block_16_depthwise_BN/beta","shape":[336],"dtype":"float32"},{"name":"block_16_project/kernel","shape":[1,1,336,112],"dtype":"float32"},{"name":"block_16_project_BN/gamma","shape":[112],"dtype":"float32"},{"name":"block_16_project_BN/beta","shape":[112],"dtype":"float32"},{"name":"Conv_1/kernel","shape":[1,1,112,1280],"dtype":"float32"},{"name":"Conv_1_bn/gamma","shape":[1280],"dtype":"float32"},{"name":"Conv_1_bn/beta","shape":[1280],"dtype":"float32"},{"name":"dense_Dense1/kernel","shape":[1280,100],"dtype":"float32"},{"name":"dense_Dense1/bias","shape":[100],"dtype":"float32"},{"name":"dense_Dense2/kernel","shape":[100,5],"dtype":"float32"},{"name":"bn_Conv1/moving_mean","shape":[16],"dtype":"float32"},{"name":"bn_Conv1/moving_variance","shape":[16],"dtype":"float32"},{"name":"expanded_conv_depthwise_BN/moving_mean","shape":[16],"dtype":"float32"},{"name":"expanded_conv_depthwise_BN/moving_variance","shape":[16],"dtype":"float32"},{"name":"expanded_conv_project_BN/moving_mean","shape":[8],"dtype":"float32"},{"name":"expanded_conv_project_BN/moving_variance","shape":[8],"dtype":"float32"},{"name":"block_1_expand_BN/moving_mean","shape":[48],"dtype":"float32"},{"name":"block_1_expand_BN/moving_variance","shape":[48],"dtype":"float32"},{"name":"block_1_depthwise_BN/moving_mean","shape":[48],"dtype":"float32"},{"name":"block_1_depthwise_BN/moving_variance","shape":[48],"dtype":"float32"},{"name":"block_1_project_BN/moving_mean","shape":[8],"dtype":"float32"},{"name":"block_1_project_BN/moving_variance","shape":[8],"dtype":"float32"},{"name":"block_2_expand_BN/moving_mean","shape":[48],"dtype":"float32"},{"name":"block_2_expand_BN/moving_variance","shape":[48],"dtype":"float32"},{"name":"block_2_depthwise_BN/moving_mean","shape":[48],"dtype":"float32"},{"name":"block_2_depthwise_BN/moving_variance","shape":[48],"dtype":"float32"},{"name":"block_2_project_BN/moving_mean","shape":[8],"dtype":"float32"},{"name":"block_2_project_BN/moving_variance","shape":[8],"dtype":"float32"},{"name":"block_3_expand_BN/moving_mean","shape":[48],"dtype":"float32"},{"name":"block_3_expand_BN/moving_variance","shape":[48],"dtype":"float32"},{"name":"block_3_depthwise_BN/moving_mean","shape":[48],"dtype":"float32"},{"name":"block_3_depthwise_BN/moving_variance","shape":[48],"dtype":"float32"},{"name":"block_3_project_BN/moving_mean","shape":[16],"dtype":"float32"},{"name":"block_3_project_BN/moving_variance","shape":[16],"dtype":"float32"},{"name":"block_4_expand_BN/moving_mean","shape":[96],"dtype":"float32"},{"name":"block_4_expand_BN/moving_variance","shape":[96],"dtype":"float32"},{"name":"block_4_depthwise_BN/moving_mean","shape":[96],"dtype":"float32"},{"name":"block_4_depthwise_BN/moving_variance","shape":[96],"dtype":"float32"},{"name":"block_4_project_BN/moving_mean","shape":[16],"dtype":"float32"},{"name":"block_4_project_BN/moving_variance","shape":[16],"dtype":"float32"},{"name":"block_5_expand_BN/moving_mean","shape":[96],"dtype":"float32"},{"name":"block_5_expand_BN/moving_variance","shape":[96],"dtype":"float32"},{"name":"block_5_depthwise_BN/moving_mean","shape":[96],"dtype":"float32"},{"name":"block_5_depthwise_BN/moving_variance","shape":[96],"dtype":"float32"},{"name":"block_5_project_BN/moving_mean","shape":[16],"dtype":"float32"},{"name":"block_5_project_BN/moving_variance","shape":[16],"dtype":"float32"},{"name":"block_6_expand_BN/moving_mean","shape":[96],"dtype":"float32"},{"name":"block_6_expand_BN/moving_variance","shape":[96],"dtype":"float32"},{"name":"block_6_depthwise_BN/moving_mean","shape":[96],"dtype":"float32"},{"name":"block_6_depthwise_BN/moving_variance","shape":[96],"dtype":"float32"},{"name":"block_6_project_BN/moving_mean","shape":[24],"dtype":"float32"},{"name":"block_6_project_BN/moving_variance","shape":[24],"dtype":"float32"},{"name":"block_7_expand_BN/moving_mean","shape":[144],"dtype":"float32"},{"name":"block_7_expand_BN/moving_variance","shape":[144],"dtype":"float32"},{"name":"block_7_depthwise_BN/moving_mean","shape":[144],"dtype":"float32"},{"name":"block_7_depthwise_BN/moving_variance","shape":[144],"dtype":"float32"},{"name":"block_7_project_BN/moving_mean","shape":[24],"dtype":"float32"},{"name":"block_7_project_BN/moving_variance","shape":[24],"dtype":"float32"},{"name":"block_8_expand_BN/moving_mean","shape":[144],"dtype":"float32"},{"name":"block_8_expand_BN/moving_variance","shape":[144],"dtype":"float32"},{"name":"block_8_depthwise_BN/moving_mean","shape":[144],"dtype":"float32"},{"name":"block_8_depthwise_BN/moving_variance","shape":[144],"dtype":"float32"},{"name":"block_8_project_BN/moving_mean","shape":[24],"dtype":"float32"},{"name":"block_8_project_BN/moving_variance","shape":[24],"dtype":"float32"},{"name":"block_9_expand_BN/moving_mean","shape":[144],"dtype":"float32"},{"name":"block_9_expand_BN/moving_variance","shape":[144],"dtype":"float32"},{"name":"block_9_depthwise_BN/moving_mean","shape":[144],"dtype":"float32"},{"name":"block_9_depthwise_BN/moving_variance","shape":[144],"dtype":"float32"},{"name":"block_9_project_BN/moving_mean","shape":[24],"dtype":"float32"},{"name":"block_9_project_BN/moving_variance","shape":[24],"dtype":"float32"},{"name":"block_10_expand_BN/moving_mean","shape":[144],"dtype":"float32"},{"name":"block_10_expand_BN/moving_variance","shape":[144],"dtype":"float32"},{"name":"block_10_depthwise_BN/moving_mean","shape":[144],"dtype":"float32"},{"name":"block_10_depthwise_BN/moving_variance","shape":[144],"dtype":"float32"},{"name":"block_10_project_BN/moving_mean","shape":[32],"dtype":"float32"},{"name":"block_10_project_BN/moving_variance","shape":[32],"dtype":"float32"},{"name":"block_11_expand_BN/moving_mean","shape":[192],"dtype":"float32"},{"name":"block_11_expand_BN/moving_variance","shape":[192],"dtype":"float32"},{"name":"block_11_depthwise_BN/moving_mean","shape":[192],"dtype":"float32"},{"name":"block_11_depthwise_BN/moving_variance","shape":[192],"dtype":"float32"},{"name":"block_11_project_BN/moving_mean","shape":[32],"dtype":"float32"},{"name":"block_11_project_BN/moving_variance","shape":[32],"dtype":"float32"},{"name":"block_12_expand_BN/moving_mean","shape":[192],"dtype":"float32"},{"name":"block_12_expand_BN/moving_variance","shape":[192],"dtype":"float32"},{"name":"block_12_depthwise_BN/moving_mean","shape":[192],"dtype":"float32"},{"name":"block_12_depthwise_BN/moving_variance","shape":[192],"dtype":"float32"},{"name":"block_12_project_BN/moving_mean","shape":[32],"dtype":"float32"},{"name":"block_12_project_BN/moving_variance","shape":[32],"dtype":"float32"},{"name":"block_13_expand_BN/moving_mean","shape":[192],"dtype":"float32"},{"name":"block_13_expand_BN/moving_variance","shape":[192],"dtype":"float32"},{"name":"block_13_depthwise_BN/moving_mean","shape":[192],"dtype":"float32"},{"name":"block_13_depthwise_BN/moving_variance","shape":[192],"dtype":"float32"},{"name":"block_13_project_BN/moving_mean","shape":[56],"dtype":"float32"},{"name":"block_13_project_BN/moving_variance","shape":[56],"dtype":"float32"},{"name":"block_14_expand_BN/moving_mean","shape":[336],"dtype":"float32"},{"name":"block_14_expand_BN/moving_variance","shape":[336],"dtype":"float32"},{"name":"block_14_depthwise_BN/moving_mean","shape":[336],"dtype":"float32"},{"name":"block_14_depthwise_BN/moving_variance","shape":[336],"dtype":"float32"},{"name":"block_14_project_BN/moving_mean","shape":[56],"dtype":"float32"},{"name":"block_14_project_BN/moving_variance","shape":[56],"dtype":"float32"},{"name":"block_15_expand_BN/moving_mean","shape":[336],"dtype":"float32"},{"name":"block_15_expand_BN/moving_variance","shape":[336],"dtype":"float32"},{"name":"block_15_depthwise_BN/moving_mean","shape":[336],"dtype":"float32"},{"name":"block_15_depthwise_BN/moving_variance","shape":[336],"dtype":"float32"},{"name":"block_15_project_BN/moving_mean","shape":[56],"dtype":"float32"},{"name":"block_15_project_BN/moving_variance","shape":[56],"dtype":"float32"},{"name":"block_16_expand_BN/moving_mean","shape":[336],"dtype":"float32"},{"name":"block_16_expand_BN/moving_variance","shape":[336],"dtype":"float32"},{"name":"block_16_depthwise_BN/moving_mean","shape":[336],"dtype":"float32"},{"name":"block_16_depthwise_BN/moving_variance","shape":[336],"dtype":"float32"},{"name":"block_16_project_BN/moving_mean","shape":[112],"dtype":"float32"},{"name":"block_16_project_BN/moving_variance","shape":[112],"dtype":"float32"},{"name":"Conv_1_bn/moving_mean","shape":[1280],"dtype":"float32"},{"name":"Conv_1_bn/moving_variance","shape":[1280],"dtype":"float32"}]}]} \ No newline at end of file diff --git a/minibot_vision/resources/tfjs_model_bk/model.weights.bin b/minibot_vision/resources/tfjs_model_bk/model.weights.bin new file mode 100644 index 0000000000000000000000000000000000000000..8cb87284773bebb12116de2cd25373d3dcbe3765 Binary files /dev/null and b/minibot_vision/resources/tfjs_model_bk/model.weights.bin differ diff --git a/minibot_vision/scripts/Capture_Images.py b/minibot_vision/scripts/Capture_Images.py new file mode 100644 index 0000000000000000000000000000000000000000..b20bf45b28c7e3496d50ed81bd56d9a9025e8df9 --- /dev/null +++ b/minibot_vision/scripts/Capture_Images.py @@ -0,0 +1,166 @@ +#!/usr/bin/env python3 + +import cv2 +import numpy as np +import rospy +from sensor_msgs.msg import Image, CompressedImage +from cv_bridge import CvBridge, CvBridgeError +import time +import rospkg +from pathlib import Path +from copy import copy +import SegmentSign +import ShapeDetector +from std_srvs.srv import SetBool, SetBoolResponse + +# *** hyper-params *** +IMG_RES = (480, 640) +TF_RES = (224, 224) # our tensorflow is using a reduced image size +MAX_DURATION = 0.1 # max duration between screenshots in [sec] +IMAGE_NAME = "" +REMOTE_NODE = False +SAVE_DIR = "/resources/training_imgs/" + +# *** GLOBALS *** +bridge = CvBridge() +img_rgb_stream = np.zeros((IMG_RES[0], IMG_RES[1], 3), np.uint8) +img_depth_stream = np.zeros((IMG_RES[0], IMG_RES[1], 1), np.uint8) +pub_raw_img = None +pub_cmpr_img = None +enable = True +rate = None + +# subscribe to RGB img +def image_color_callback(data): + global bridge, img_rgb_stream + + try: + img_rgb_stream = bridge.imgmsg_to_cv2(data, "bgr8") + except CvBridgeError as e: + print(e) + + +def image_depth_callback(data): + global img_depth_stream, bridge + + try: + img_depth_stream = bridge.imgmsg_to_cv2(data, "16UC1") + except CvBridgeError as e: + print(e) + + +def sign_from_segmentation(): + global img_rgb_stream, img_depth_stream + + img_orig = copy(img_rgb_stream) + + # get sign location in img + keypoints = SegmentSign.do_hough_circle_detection(copy(img_orig), copy(img_depth_stream)) + keypoints += ShapeDetector.do_shape_detection(copy(img_orig), copy(img_depth_stream)) + keypoints = SegmentSign.filter_duplicate_keypoints(keypoints) + patches = SegmentSign.get_tensor_patches(copy(img_orig), keypoints) + if len(patches) > 0: + return patches[0] + else: + return None + + +def publish_img_patch(img_patch): + global bridge, pub_raw_img, pub_cmpr_img + + # use same timestamp for synchronisation + timestamp = rospy.Time.now() + + # publish non compressed image for saving + rawmsg = bridge.cv2_to_imgmsg(img_patch, "bgr8") + rawmsg.header.stamp = timestamp + pub_raw_img.publish(rawmsg) + # publish compressed img for website visualization + cmprsmsg = bridge.cv2_to_compressed_imgmsg(img_patch) + cmprsmsg.header.stamp = timestamp + pub_cmpr_img.publish(cmprsmsg) + + +def enable_callback(req): + global enable, rate + + enable = req.data + rospy.loginfo("({}) set enable to {}".format(rospy.get_name(), enable)) + # go in low power mode if the node is doing nothing + if enable: + rate = rospy.Rate(30) + else: + rate = rospy.Rate(5) + + return True, "" + + +if __name__ == "__main__": + rospy.init_node("capture_images") + + if rospy.has_param("~remote_node"): + REMOTE_NODE = rospy.get_param("~remote_node") + if rospy.has_param("~save_dir"): + SAVE_DIR = rospy.get_param("~save_dir") + + img_depth_topic = "camera/aligned_depth_to_color/image_raw" + img_color_topic = "camera/color/image_raw" + camera_frame = "camera_aligned_depth_to_color_frame" + # get img stream + rospy.Subscriber(img_color_topic, Image, image_color_callback) + rospy.Subscriber(img_depth_topic, Image, image_depth_callback) + + if REMOTE_NODE: + enable = False # set this by calling the corresponding service + rate = rospy.Rate(5) # go in low power mode if the node is doing nothing + # init publisher + pub_raw_img = rospy.Publisher("~result_image", Image, queue_size=10) + pub_cmpr_img = rospy.Publisher("~result_image/compressed", CompressedImage, queue_size=10) + rospy.Service("~enable", SetBool, enable_callback) + + rospy.loginfo("{} is up in remote mode.".format(rospy.get_name())) + else: + enable = True + rate = rospy.Rate(30) + # init save dirs + print("input image name: ") + IMAGE_NAME = input() + rospack = rospkg.RosPack() + SAVE_DIR = "{}{}{}/".format(rospack.get_path("minibot_vision"), SAVE_DIR, IMAGE_NAME) + Path(SAVE_DIR).mkdir(parents=True, exist_ok=True) + + rospy.loginfo("{} is up in local mode. Hold r to save streamed imgs with name {} to {}".format(rospy.get_name(), IMAGE_NAME, SAVE_DIR)) + + start_time = time.time() + duration = 0. + counter = 0 + while not rospy.is_shutdown(): + if enable: + # from blob detector and zoomed + img_patch = sign_from_segmentation() + if img_patch is None: + continue + + if REMOTE_NODE: + publish_img_patch(img_patch) + else: + cv2.imshow("IMG_Color", img_patch) + k = cv2.waitKey(1) + + if k == ord('r'): + duration = time.time() - start_time + if duration >= MAX_DURATION: + # reset timer + start_time = time.time() + duration = 0. + + # save screenshot + img_name = "{}_{}".format(IMAGE_NAME, counter) + rospy.loginfo("Save img {} at {}".format(img_name, SAVE_DIR)) + cv2.imwrite("{}{}.jpg".format(SAVE_DIR, img_name), img_patch) + counter += 1 + + rate.sleep() + + rospy.loginfo("Node is shutting down. Closing all cv2 windows (if there are some)...") + cv2.destroyAllWindows() diff --git a/minibot_vision/scripts/Crop_Sign_Depth.py b/minibot_vision/scripts/Crop_Sign_Depth.py new file mode 100644 index 0000000000000000000000000000000000000000..c3404f4a351ee269a4f8ceb722345f20987f60f1 --- /dev/null +++ b/minibot_vision/scripts/Crop_Sign_Depth.py @@ -0,0 +1,250 @@ +#!/usr/bin/env python3 + +# Standard imports +import cv2 +import numpy as np +from cv_bridge import CvBridge, CvBridgeError +from sensor_msgs.msg import Image +import rospy +import sys +from vision_msgs.msg import Detection2D, ObjectHypothesisWithPose +import distutils.util +import copy + +class CropSign: + def __init__(self, depth_topic, rgb_topic, camera_frame, visualize=False, publish=False): + # *** params *** + # get these from param server + self.circularity = rospy.get_param("/crop_sign/circularity") # 0.65 + self.thickness = rospy.get_param("/crop_sign/thickness") # 7 + self.circle_filter = rospy.get_param("/crop_sign/circle_filter") # 6 + self.canny1 = rospy.get_param("/crop_sign/canny1") # 26 + self.canny2 = rospy.get_param("/crop_sign/canny2") # 27 + + # *** ROS topics *** + self.camera_frame = camera_frame + self.visualize = visualize + self.publish = publish + self.bridge = CvBridge() + image_depth_sub = rospy.Subscriber(depth_topic, Image, self.image_depth_callback) + self.img_depth_buf = np.zeros((480, 640, 1), np.uint8) + + if self.publish: + self.pub_keypoint = rospy.Publisher('sign_keypoints', Detection2D, queue_size=10) + + if self.visualize: + image_color_sub = rospy.Subscriber(rgb_topic, Image, self.image_color_callback) + + cv2.namedWindow("Parameters") + cv2.resizeWindow("Parameters", 800, 600) + cv2.createTrackbar("Circularity", "Parameters", int(self.circularity * 100), 100, self.empty) + cv2.createTrackbar("Thickness", "Parameters", self.thickness, 30, self.empty) + cv2.createTrackbar("CircleFilter", "Parameters", self.circle_filter, 30, self.empty) + cv2.createTrackbar("Canny1", "Parameters", self.canny1, 255, self.empty) + cv2.createTrackbar("Canny2", "Parameters", self.canny2, 255, self.empty) + + self.img_contours = np.zeros((480, 640, 3), np.uint8) + self.img_rgb = np.zeros((480, 640, 3), np.uint8) + self.ros_img_rgb = np.zeros((480, 640, 3), np.uint8) + self.img_depth = np.zeros((480, 640, 1), np.uint8) + cv2.namedWindow("RGB") + cv2.namedWindow("Depth") + + def spin(self): + rate = rospy.Rate(10) + try: + if visualize: + while not rospy.is_shutdown(): + # *** update parameters *** + self.circularity = cv2.getTrackbarPos("Circularity", "Parameters") / 100 + self.thickness = cv2.getTrackbarPos("Thickness", "Parameters") + self.circle_filter = cv2.getTrackbarPos("CircleFilter", "Parameters") + self.canny1 = cv2.getTrackbarPos("Canny1", "Parameters") + self.canny2 = cv2.getTrackbarPos("Canny2", "Parameters") + + # *** show image stream to update parameters *** + cv2.imshow('Parameters', self.img_contours) + cv2.imshow('RGB', self.img_rgb) + cv2.imshow('Depth', self.img_depth) + cv2.waitKey(1) + rate.sleep() + else: + rospy.spin() + except KeyboardInterrupt: + print("Shutting down") + + cv2.destroyAllWindows() + + + def empty(self, d): + pass + + def blob_detector(self, im): + # Set up the detector with default parameters. + # Setup SimpleBlobDetector parameters. + # find a detailed description in the official docu: https://docs.opencv.org/4.x/d0/d7a/classcv_1_1SimpleBlobDetector.html + params = cv2.SimpleBlobDetector_Params() + + # Change thresholds + # params.minThreshold = 20 + # params.thresholdStep = 5 + # params.maxThreshold = 150 + + # Filter by Area. + params.filterByArea = True + params.minArea = 500 + params.maxArea = 10000 + + # Filter by Circularity + params.filterByCircularity = True + params.minCircularity = self.circularity + params.maxCircularity = 1.0 + + # Filter by Convexity + params.filterByConvexity = False + params.minConvexity = 0.87 + + # Filter by Inertia + params.filterByInertia = False + params.minInertiaRatio = 0.01 + detector = cv2.SimpleBlobDetector_create(params) + + # Detect blobs. + keypoints = detector.detect(im) + + return keypoints + + def depth_edge_detector(self, img : np.array, canny1, canny2): + """ + This function does some preprocessing to a discretized grey img [0,255] to detect (round) edges. + :param canny1 First param of canny edge detector + :param canny2 Second param of canny edge detector + """ + # circular kernel makes noisy blobs more round + kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (self.circle_filter, self.circle_filter)) + img_filtered = cv2.dilate(img, kernel, iterations=2) + img_filtered = cv2.GaussianBlur(img_filtered, (9, 9), 0) + img_canny = cv2.Canny(img_filtered, canny1, canny2) + + return img_canny, img_filtered + + def discretize_depth_img(self, img : np.array, max_range : int =1000): + """ + This function converts a depth image with integer range [mm] to values (near to wide) between [0, 255]. + :param max_range: The max range in the converted image. Values above will be clipped to 255. + :return The converted img. + """ + img_d = np.clip(img, 0, max_range) # 1m is the max range we are supporting + img_d = np.array((img_d / max_range) * 255, dtype=np.uint8) # convert to values [0, 255] + + return img_d + + def image_color_callback(self, data): + try: + self.ros_img_rgb = self.bridge.imgmsg_to_cv2(data, "bgr8") + except CvBridgeError as e: + print(e) + + def image_depth_callback(self, data): + try: + self.img_depth_buf = self.bridge.imgmsg_to_cv2(data, "16UC1") + # only run the automated detection if this is supposed to run as standalone node + if self.publish or self.visualize: + self.detect_signs_depth_img() + except CvBridgeError as e: + print(e) + + def circular_mean(self, p, r, arr : np.array): + """ + returns the mean intensity in a circle described by a middle point p and radius r of a grey image. + """ + # x_start x_end x_step y_start y_end y_step + xy = np.mgrid[int(p[0] - r) : int(p[0] + r) : 1, int(p[1] - r) : int(p[1] + r):1].reshape(2,-1).T + sum_px_values = 0 + count_px = 0 + for x, y in xy: + if (x - p[0])**2 + (y - p[1])**2 < r**2: + sum_px_values += arr[y, x] + count_px += 1 + + return sum_px_values / count_px + + def detect_signs_depth_img(self): + depth_img_raw = copy.copy(self.img_depth_buf) # to ensure that there is no raise condition + depth_img_discret = self.discretize_depth_img(depth_img_raw) + img_edge, img_depth_filtered = self.depth_edge_detector(depth_img_discret, self.canny1, self.canny2) + + #img_contour = self.contour_detector(img_edge) + img_contour = img_edge + + kernel = np.ones((self.thickness, self.thickness)) + img_contour = cv2.dilate(img_contour, kernel, iterations=1) + keypoints = self.blob_detector(img_contour) # the blob detector is detecting black blobs in white backgrounds + + # filter depth shadows out + new_keypoints = [] + for p in keypoints: + if depth_img_raw[int(p.pt[1]), int(p.pt[0])] > 0: # TODO do this by taking the most common value of the blob + new_keypoints.append(p) + + if self.visualize: + # Draw detected blobs as red circles in contour image. + # cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob + img_with_keypoints = cv2.drawKeypoints(img_contour, keypoints, np.array([]), (0, 0, 255), + cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) + + # update images + self.img_contours = img_with_keypoints + self.img_depth = img_depth_filtered + self.img_rgb = self.ros_img_rgb.copy() # copy the image since the asynchron callback could override any changes + + # publish keypoints + depths = [] + for i, (p) in enumerate(new_keypoints): + # depth in [m] + depth = self.circular_mean(p.pt, (p.size/2) * 0.1, depth_img_raw) / 1000 # take the depth value from the original image (10% of radius) + depths.append(depth) + + if self.publish: + detection_msg = Detection2D() + detection_msg.header.stamp = rospy.Time.now() + detection_msg.header.frame_id = self.camera_frame # TODO check if this is using tf_prefix + + detection_msg.bbox.size_x = p.size + detection_msg.bbox.size_y = p.size + detection_msg.bbox.center.x = int(p.pt[0]) + detection_msg.bbox.center.y = int(p.pt[1]) + + obj_with_pose = ObjectHypothesisWithPose() + # the id might not be the same in different msgs + obj_with_pose.id = i + # TODO calc x and y in img frame + obj_with_pose.pose.pose.position.z = depth + detection_msg.results = [obj_with_pose] + + self.pub_keypoint.publish(detection_msg) + + # visualize filtered keypoints on rgb image + if self.visualize: + cv2.circle(self.img_rgb,(int(p.pt[0]),int(p.pt[1])), int(p.size/2), (0, 0, 0), thickness=2) + cv2.putText(self.img_rgb, "d:{}".format(depth), (int(p.pt[0]), int(p.pt[1])), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), thickness=2) + + return new_keypoints, depths + +if __name__ == "__main__": + # parse args + visualize = True + for i, arg in enumerate(sys.argv): + if i == 1: + visualize = bool(distutils.util.strtobool(arg)) + + rospy.init_node("crop_sign") + + img_color_topic = "{}camera/color/image_raw".format(rospy.get_namespace()) + img_depth_topic = "{}camera/aligned_depth_to_color/image_raw".format(rospy.get_namespace()) + camera_frame = "camera_aligned_depth_to_color_frame" + + sign_detector = CropSign(img_depth_topic, img_color_topic, camera_frame, visualize=visualize) + rospy.loginfo("{}: SignDetector is up with visualize={}. Spinning ...".format(rospy.get_name(), visualize)) + sign_detector.spin() + diff --git a/minibot_vision/scripts/SegmentSign.py b/minibot_vision/scripts/SegmentSign.py new file mode 100644 index 0000000000000000000000000000000000000000..e1fbde3c780b9dafbaaeb9018fd91f63c1ebe0d1 --- /dev/null +++ b/minibot_vision/scripts/SegmentSign.py @@ -0,0 +1,226 @@ +import cv2 +import numpy as np +from cv_bridge import CvBridge, CvBridgeError +from sensor_msgs.msg import Image +import rospy +from copy import copy + +# *** hyper params *** +#IMG_RES = (480, 640) +IMG_RES = (rospy.get_param("sign_detector/img_height", 1080), rospy.get_param("sign_detector/img_width", 1920)) #(1080,1920) +TENSOR_RES = (224, 224) +# gazebo 70,30; rviz 70, 20; real 100 40 +canny = rospy.get_param("sign_detector/canny_param1", 100) #100 +accum_thresh = rospy.get_param("sign_detector/canny_param2", 40) #30 +VISUALIZE = rospy.get_param("sign_detector/visualize", True) # Flase +ZOOM_THREASHOLD = rospy.get_param("sign_detector/zoom_threshold", 1.15) #(1.15) multiplied percentage to the detected radius + +MIN_DEPTH = rospy.get_param("sign_detector/min_depth", 0.2) # 12 +MAX_DEPTH = rospy.get_param("sign_detector/max_depth", 1.0) # 20 + +MIN_RADIUS = rospy.get_param("sign_detector/min_radius", 15) # 15 +MAX_RADIUS = rospy.get_param("sign_detector/max_radius", 128) # 128 + +# *** Globals *** +cv_bridge = CvBridge() +img_rgb_stream = np.zeros((IMG_RES[0], IMG_RES[1], 3), np.uint8) +img_depth_stream = np.zeros((IMG_RES[0], IMG_RES[1], 1), np.uint8) + +def empty(d): + pass + +if VISUALIZE: + cv2.namedWindow("Parameters") + cv2.resizeWindow("Parameters", 800, 600) + cv2.createTrackbar("Canny", "Parameters", canny, 255, empty) + cv2.createTrackbar("Accum", "Parameters", accum_thresh, 255, empty) + + +def image_color_callback(data): + global img_rgb_stream, cv_bridge + + try: + img_rgb_stream = cv_bridge.imgmsg_to_cv2(data, "bgr8") + except CvBridgeError as e: + print(e) + + +def image_depth_callback(data): + global img_depth_stream, cv_bridge + + try: + img_depth_stream = cv_bridge.imgmsg_to_cv2(data, "16UC1") + except CvBridgeError as e: + print(e) + + +def circular_mean(p, r, arr : np.array): + """ + returns the mean intensity in a circle described by a middle point p and radius r of a grey image. + """ + # x_start x_end x_step y_start y_end y_step + xy = np.mgrid[int(p[0] - r) : int(p[0] + r) : 1, int(p[1] - r) : int(p[1] + r):1].reshape(2,-1).T + sum_px_values = 0 + count_px = 0 + for x, y in xy: + if x >= IMG_RES[1] or y >= IMG_RES[0]: + continue + if (x - p[0])**2 + (y - p[1])**2 < r**2: + sum_px_values += arr[y, x] + count_px += 1 + + if count_px == 0: + return 0 + + return sum_px_values / count_px + + +def do_hough_circle_detection(img_rgb, img_depth): + global canny, accum_thresh + + gray = img_rgb + gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY) + #gray = cv2.medianBlur(gray, 5) # reduce noise + + circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1, gray.shape[0] / 4, + param1=canny, # First method-specific parameter. In case of HOUGH_GRADIENT , it is the higher threshold of the two passed to the Canny edge detector (the lower one is twice smaller). + param2=accum_thresh, # Second method-specific parameter. In case of HOUGH_GRADIENT , it is the accumulator threshold for the circle centers at the detection stage. The smaller it is, the more false circles may be detected. Circles, corresponding to the larger accumulator values, will be returned first. + minRadius=MIN_RADIUS, maxRadius=MAX_RADIUS) + keypoints = [] + if circles is not None: + circles = np.uint16(np.around(circles)) + for k, (i) in enumerate(circles[0, :]): + center = (i[0], i[1]) + radius = i[2] + + # get depth in [m] (was radius * 0.4 in real world) + d = circular_mean(center, radius * 0.2, copy(img_depth)) / 1000 + # filter if sign to close (circle detector will struggle) or to far (background) + # was 0.2 and 1.0 + if d < MIN_DEPTH or d > MAX_DEPTH: + continue + keypoints.append({"center": center, "radius": radius, "depth": d}) + + # circle center + if VISUALIZE: + cv2.putText(img_rgb, "d:{:1.3f} r:{:1.0f} num:{}".format(d, radius, k), (center[0], center[1] - radius - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), thickness=1) + cv2.circle(img_rgb, center, 1, (0, 100, 100), 3) + # circle outline + cv2.circle(img_rgb, center, radius, (255, 0, 255), 3) + + + return keypoints + + +def crop_to_bounds(crop_bounds, max_val): + if crop_bounds[0] < 0: + crop_bounds[1] += 0 - crop_bounds[0] + crop_bounds[0] = 0 + if crop_bounds[1] > max_val: + crop_bounds[0] -= crop_bounds[1] - max_val + crop_bounds[1] = max_val + + return crop_bounds + + +def get_tensor_patches(img_rgb, keypoints): + """ + Turns a set of keypoints into image patches from the original image. + Each patch has the size needed by the tensorflow model so that the patch can be directly fet into the image classifier. + Each patch is zoomed such that the detected object fills the entire patch. + :param img_rgb: original image + :param keypoints: list of detected keypoints + :return: A set of image patches. + """ + global TENSOR_RES, ZOOM_THREASHOLD + + img_patches = [] + for k in keypoints: + img = copy(img_rgb) + d = k["depth"] + center = k["center"] + center = [center[1], center[0]] + r = k["radius"] + + # zoom into images based on radius? + zoom_factor = np.array(TENSOR_RES) / ((r*2 * ZOOM_THREASHOLD)) + zoomed_image = cv2.resize(img, dsize=None, fx=zoom_factor[0], fy=zoom_factor[1], interpolation=cv2.INTER_NEAREST) + + # handle border + img_center_zoomed = (center * zoom_factor).astype(int) + y = [img_center_zoomed[0] - TENSOR_RES[0] // 2, img_center_zoomed[0] + TENSOR_RES[0] // 2] + y = crop_to_bounds(y, np.shape(zoomed_image)[0]) + x = [img_center_zoomed[1] - TENSOR_RES[1] // 2, img_center_zoomed[1] + TENSOR_RES[1] // 2] + x = crop_to_bounds(x, np.shape(zoomed_image)[1]) + img_patches.append(zoomed_image[y[0]:y[1], x[0]:x[1], :]) + + return img_patches + + +def visualize_patches(keypoints, patches, text, img_rgb): + for i in range(len(keypoints)): + k = keypoints[i] + d = k["depth"] + center = k["center"] + center = [center[1], center[0]] + r = k["radius"] + patch = patches[i] + + # we need the exact idx in the non zoomed image, so we have to reacalc the boarders + y = [center[0] - TENSOR_RES[0] // 2, center[0] + TENSOR_RES[0] // 2] + y = crop_to_bounds(y, np.shape(img_rgb)[0]) + x = [center[1] - TENSOR_RES[1] // 2, center[1] + TENSOR_RES[1] // 2] + x = crop_to_bounds(x, np.shape(img_rgb)[1]) + # replace the patch of the zoomed sign so that the patch that is fed to t flow can be directly seen + img_rgb[y[0]:y[1], x[0]:x[1]] = patch + cv2.rectangle(img_rgb, (x[0], y[0]), (x[1], y[1]), (255, 255, 255), thickness=1) + cv2.putText(img_rgb, text[i], (x[0], y[0] - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), thickness=1) + + return img_rgb + + +def filter_duplicate_keypoints(keypoints): + required_keypoints = [] + + for point in keypoints: + result = list(filter(lambda x: abs(point['radius'] - x['radius']) < MIN_RADIUS, required_keypoints)) + if len(result) == 0: + required_keypoints.append(point) + return required_keypoints + + + +if __name__=="__main__": + rospy.init_node("hough_detection") + + + ns = "/minibot/"#rospy.get_namespace() + VISUALIZE = True + img_color_topic = "{}camera/color/image_raw".format(ns) + img_depth_topic = "{}camera/aligned_depth_to_color/image_raw".format(ns) + rospy.Subscriber(img_color_topic, Image, image_color_callback) + rospy.Subscriber(img_depth_topic, Image, image_depth_callback) + + toggle_patch_visualization = True + print("Toggle patch visualisation is {}. Press p to change.".format(toggle_patch_visualization)) + rate = rospy.Rate(30) + while not rospy.is_shutdown(): + img_processed = copy(img_rgb_stream) + keypoints = do_hough_circle_detection(img_processed, copy(img_depth_stream)) + + if toggle_patch_visualization: + img_processed = copy(img_rgb_stream) + patches = get_tensor_patches(copy(img_rgb_stream), keypoints) + visualize_patches(keypoints, patches, ["d:{:1.3f}".format(k["depth"]) for k in keypoints], img_processed) + + cv2.imshow("Parameters", img_processed) + canny = cv2.getTrackbarPos("Canny", "Parameters") + accum_thresh = cv2.getTrackbarPos("Accum", "Parameters") + + k = cv2.waitKey(1) & 0xFF + if k == ord("p"): + print("Toggle patch visualisation is {}".format(toggle_patch_visualization)) + toggle_patch_visualization = not toggle_patch_visualization + + + rate.sleep() diff --git a/minibot_vision/scripts/ShapeDetector.py b/minibot_vision/scripts/ShapeDetector.py new file mode 100644 index 0000000000000000000000000000000000000000..565328962c1b2f810ec269de8b28d01dc52fac6b --- /dev/null +++ b/minibot_vision/scripts/ShapeDetector.py @@ -0,0 +1,145 @@ +import cv2 +import numpy as np +from cv_bridge import CvBridge, CvBridgeError +from sensor_msgs.msg import Image +import rospy +from copy import copy +import SegmentSign + +VISUALIZE = False + +# *** hyper params *** +#IMG_RES = (480, 640) +IMG_RES = (rospy.get_param("sign_detector/img_height"), rospy.get_param("sign_detector/img_width")) #(1080,1920) +TENSOR_RES = (224, 224) + +MIN_DEPTH = rospy.get_param("sign_detector/min_depth") # 12 +MAX_DEPTH = rospy.get_param("sign_detector/max_depth") # 20 + +MIN_RADIUS = rospy.get_param("sign_detector/min_radius") # 15 +MAX_RADIUS = rospy.get_param("sign_detector/max_radius") # 128 + +# *** Globals *** +cv_bridge = CvBridge() +img_rgb_stream = np.zeros((IMG_RES[0], IMG_RES[1], 3), np.uint8) +img_depth_stream = np.zeros((IMG_RES[0], IMG_RES[1], 1), np.uint8) + +def empty(d): + pass + + +def image_color_callback(data): + global img_rgb_stream, cv_bridge + + try: + img_rgb_stream = cv_bridge.imgmsg_to_cv2(data, "bgr8") + except CvBridgeError as e: + print(e) + + +def image_depth_callback(data): + global img_depth_stream, cv_bridge + + try: + img_depth_stream = cv_bridge.imgmsg_to_cv2(data, "16UC1") + except CvBridgeError as e: + print(e) + + + # Compute perimeter of contour and perform contour approximation + shape = "" + peri = cv2.arcLength(c, True) + approx = cv2.approxPolyDP(c, 0.04 * peri, True) + + # Triangle + if len(approx) == 3: + shape = "triangle" + + # Square or rectangle + elif len(approx) == 4: + (x, y, w, h) = cv2.boundingRect(approx) + ar = w / float(h) + + # A square will have an aspect ratio that is approximately + # equal to one, otherwise, the shape is a rectangle + shape = "square" if ar >= 0.95 and ar <= 1.05 else "rectangle" + + # Pentagon + elif len(approx) == 5: + shape = "pentagon" + + # Hexagon + elif len(approx) == 6: + shape = "hexagon" + + # Octagon + elif len(approx) == 8: + shape = "octagon" + + # Star + elif len(approx) == 10: + shape = "star" + + # Otherwise assume as circle or oval + else: + shape = "circle" + + return shape + +def do_shape_detection(img_rgb, img_depth): + gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY) + + ret,thresh1 = cv2.threshold(gray, 0, 255,cv2.THRESH_BINARY) + if VISUALIZE: + cv2.imshow('thresh img', thresh1) + + contours, hierarchy = cv2.findContours(thresh1, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + keypoints = [] + for cnt in contours: + approx = cv2.approxPolyDP(cnt,0.01*cv2.arcLength(cnt,True),True) + if len(approx)==8: + (x,y),radius = cv2.minEnclosingCircle(cnt) + center = (int(x),int(y)) + radius = int(radius) + + if radius < MIN_RADIUS or radius > MAX_RADIUS: + continue + + # get depth in [m] (was radius * 0.4 in real world) + d = SegmentSign.circular_mean(center, radius * 0.2, copy(img_depth)) / 1000 + # filter if sign to close (circle detector will struggle) or to far (background) + # was 0.2 and 1.0 + if d < MIN_DEPTH or d > MAX_DEPTH: + continue + keypoints.append({"center": center, "radius": radius, "depth": d}) + + + if VISUALIZE: + cv2.circle(img_rgb,center,radius,(0,255,0),2) + cv2.drawContours(img_rgb, [cnt], 0, (0, 255, 0), 6) + cv2.imshow('sign', img_rgb) + cv2.waitKey(0) + return keypoints + + + + +if __name__=="__main__": + rospy.init_node("shape_detection") + VISUALIZE = True + img_color_topic = "{}camera/color/image_raw".format(rospy.get_namespace()) + img_depth_topic = "{}camera/aligned_depth_to_color/image_raw".format(rospy.get_namespace()) + rospy.Subscriber(img_color_topic, Image, image_color_callback) + rospy.Subscriber(img_depth_topic, Image, image_depth_callback) + + rate = rospy.Rate(1) + while not rospy.is_shutdown(): + img_processed = copy(img_rgb_stream) + keypoints = do_shape_detection(img_processed, copy(img_depth_stream)) + + + + #cv2.imshow("Shape", img_processed) + + + rate.sleep() \ No newline at end of file diff --git a/minibot_vision/scripts/SignDetector.py b/minibot_vision/scripts/SignDetector.py new file mode 100644 index 0000000000000000000000000000000000000000..d8434279bf080a7d747e797d2e61b779ea8135ba --- /dev/null +++ b/minibot_vision/scripts/SignDetector.py @@ -0,0 +1,144 @@ +#!/usr/bin/env python3 + +import rospy +import std_srvs.srv + +import ShapeDetector +import SegmentSign +from cv_bridge import CvBridge, CvBridgeError +import numpy as np +from sensor_msgs.msg import Image +from sensor_msgs.msg import CompressedImage +from TmClassification import TmClassification +import cv2 +from copy import copy +from vision_msgs.msg import Detection2D, ObjectHypothesisWithPose +from minibot_msgs.srv import set_url + +# *** CONSTANTS *** +visualize = True +camera_frame = "camera_aligned_depth_to_color_frame" +#OLD CONFIG: IMG_RES = (480, 640) +IMG_RES = (rospy.get_param("sign_detector/img_height"), rospy.get_param("sign_detector/img_width")) +TF_RES = (224, 224) # tf is cropping the image + +# *** GLOBALS *** +sign_classifier = TmClassification() +bridge = CvBridge() +img_rgb_stream = np.zeros((IMG_RES[0], IMG_RES[1], 3), np.uint8) +img_rgb_timestamp = rospy.Time(0, 0) +img_depth_stream = np.zeros((IMG_RES[0], IMG_RES[1], 1), np.uint8) +img_rgb = img_rgb_stream +pub_keypoint = None +pub_result_img = None + +# subscribe to RGB img +def image_color_callback(data): + global bridge, img_rgb_stream, img_rgb_timestamp + + try: + img_rgb_stream = bridge.imgmsg_to_cv2(data, "bgr8") + img_rgb_timestamp = rospy.Time.now() + except CvBridgeError as e: + print(e) + + +def image_depth_callback(data): + global img_depth_stream, bridge + + try: + img_depth_stream = bridge.imgmsg_to_cv2(data, "16UC1") + except CvBridgeError as e: + print(e) + + +def publish_results(point, radius, depth, label, precision, timestamp): + global camera_frame + + detection_msg = Detection2D() + # the time when the image was taken + detection_msg.header.stamp = timestamp + detection_msg.header.frame_id = camera_frame + + detection_msg.bbox.size_x = radius*2 + detection_msg.bbox.size_y = radius*2 + detection_msg.bbox.center.x = point[0] + detection_msg.bbox.center.y = point[1] + + obj_with_pose = ObjectHypothesisWithPose() + # the id might not be the same in different msgs + #obj_with_pose.id = i + # TODO calc x and y in img frame + obj_with_pose.pose.pose.position.z = depth + obj_with_pose.score = precision + obj_with_pose.id = label + + detection_msg.results = [obj_with_pose] + + pub_keypoint.publish(detection_msg) + +def detect_sign(img_rgb_stream, image_timestamp): + global img_depth_stream, pub_result_img + + img_orig = copy(img_rgb_stream) + + # get sign location in img + keypoints = SegmentSign.do_hough_circle_detection(copy(img_orig), copy(img_depth_stream)) + keypoints += ShapeDetector.do_shape_detection(copy(img_orig), copy(img_depth_stream)) + keypoints = SegmentSign.filter_duplicate_keypoints(keypoints) + patches = SegmentSign.get_tensor_patches(copy(img_orig), keypoints) + + # cut to multiple images at keypoints + text = [] + for i in range(len(keypoints)): + k = keypoints[i] + p = patches[i] + d = k["depth"] + center = [k["center"][1], k["center"][0]] + r = k["radius"] + + # classify image batches + label, precision = sign_classifier.predictImage(p) # returns tupel (label, precision), if no model / error is set up label= -1 + if label >= 0: + # publish results + publish_results(center, r, d, label, precision, image_timestamp) + text.append("c: {} p: {:1.3f} d:{:1.3f}".format(sign_classifier.labelOfClass(label), precision, d)) + + if visualize: + if len(text) > 0: + SegmentSign.visualize_patches(keypoints, patches, text, img_orig) + # compress and publish + cmprsmsg = bridge.cv2_to_compressed_imgmsg(img_orig) + pub_result_img.publish(cmprsmsg) + + +def set_model_callback(req): + sign_classifier.setNewModel(req.url) + rospy.logwarn("TODO implement url error check") + return False # TODO implement url error check + + +def set_visualize_callback(req): + global visualize + + visualize = req.data + return True, "" + + +if __name__ == "__main__": + rospy.init_node("sign_detector") + + img_color_topic = "{}camera/color/image_raw".format(rospy.get_namespace()) + img_depth_topic = "{}camera/aligned_depth_to_color/image_raw".format(rospy.get_namespace()) + + rospy.Subscriber(img_color_topic, Image, image_color_callback, queue_size=1) + rospy.Subscriber(img_depth_topic, Image, image_depth_callback, queue_size=1) + rospy.Service('sign_detector/set_model', set_url, set_model_callback) + rospy.Service('sign_detector/set_visualize', std_srvs.srv.SetBool, set_visualize_callback) + pub_keypoint = rospy.Publisher('sign_detector/keypoints', Detection2D, queue_size=10) + pub_result_img = rospy.Publisher("sign_detector/result_image/compressed", CompressedImage, queue_size=10) + + rate = rospy.Rate(30) # currently this is impossible, but then the rate is defined by the detect_sign evaluation time + while not rospy.is_shutdown(): + detect_sign(img_rgb_stream, img_rgb_timestamp) + rate.sleep() diff --git a/minibot_vision/scripts/TmClassification.py b/minibot_vision/scripts/TmClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..e1188e7ba1c9b8b0998cbf48dd1f942a90a9b88e --- /dev/null +++ b/minibot_vision/scripts/TmClassification.py @@ -0,0 +1,121 @@ +from select import select +import requests +import os + +import rospy +from keras.models import load_model +import tensorflow as tf +from PIL import Image, ImageOps +import numpy as np +import json +import rospkg +import shutil + + +class TmClassification: + files = ['model.json', 'metadata.json', 'model.weights.bin'] + tfjs_dir = "/resources/tfjs_model" + h5_dir = "/resources/h5_model" + h5_file = "model.h5" + + def __init__(self, url=None): + rospack = rospkg.RosPack() + ros_dir = rospack.get_path("minibot_vision") + self.tfjs_dir = ros_dir + self.tfjs_dir + self.h5_dir = ros_dir + self.h5_dir + + if url is not None: + self.setNewModel(url) + + self.loadNewModel() + print("TF: ready, waiting for images to classify.") + + if not tf.test.is_built_with_cuda(): + print("Your tf build has no CUDA support.") + + def setNewModel(self, url): + print("TF: Downloading model from url: {}".format(url)) + self._prepareDirectories() + self._downloadFiles(url) + self._convertFromFfjsFoKeras() + + self.loadNewModel() + + def loadNewModel(self): + # TODO if there is an existing model, else error + if not os.path.exists(f'{self.tfjs_dir}/{self.files[1]}') or not os.path.exists(f'{self.h5_dir}/{self.h5_file}'): + rospy.logwarn("({}) There is no existing tensorflow model on your machine. You can set a new model by calling the /set_model service.".format(rospy.get_name())) + self.model = None # ensure that object exists in class + return + # Load the model + self.model = load_model(f'{self.h5_dir}/{self.h5_file}', compile=False) + # Load metadata for labels + self.metadata = self._loadMetadata() + self._uploadLabelsToParamServer() + + def _prepareDirectories(self): + if os.path.exists(self.tfjs_dir): + shutil.rmtree(self.tfjs_dir) + if os.path.exists(self.h5_dir): + shutil.rmtree(self.h5_dir) + + os.mkdir(self.h5_dir) + os.mkdir(self.tfjs_dir) + + def _downloadFiles(self, url): + for f in self.files: + request_url = url + f + storage_file = f'{self.tfjs_dir}/{f}' + r = requests.get(request_url, allow_redirects=True) + open(storage_file, 'wb').write(r.content) + + def _convertFromFfjsFoKeras(self): + os.system(f'tensorflowjs_converter --input_format=tfjs_layers_model --output_format=keras {self.tfjs_dir}/{self.files[0]} {self.h5_dir}/{self.h5_file}') + + def _loadMetadata(self): + f = open(self.tfjs_dir+'/'+self.files[1]) + return json.load(f) + + def predictImage(self, image): + if self.model is None: + return -1, 1.0 + # Create the array of the right shape to feed into the keras model + # The 'length' or number of images you can put into the array is + # determined by the first position in the shape tuple, in this case 1. + data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32) + # Replace this with the path to your image + #image = Image.open('rosa.jpg') + #resize the image to a 224x224 with the same strategy as in TM2: + #resizing the image to be at least 224x224 and then cropping from the center + + #turn the image into a numpy array + image_array = np.asarray(image) + # Normalize the image + normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1 + # Load the image into the array + data[0] = normalized_image_array + + # run the inference + prediction = self.model.predict(data) + + # Generate arg maxes of predictions + class_nr = np.argmax(prediction, axis=1)[0] + return class_nr, np.max(prediction, axis=1)[0] + + def _uploadLabelsToParamServer(self): + # delete all existing params in this namespace + try: + rospy.delete_param('sign_classification/class_labels/') + except KeyError: + pass + + labels = self.metadata["labels"] + for i, (l) in enumerate(labels): + rospy.set_param("sign_classification/class_labels/{}".format(i), l) + + def labelOfClass(self, class_number): + labels = self.metadata["labels"] + if class_number < 0 or class_number > len(labels): + return 'unkown' + return labels[class_number] + diff --git a/minibot_vision/scripts/TmClassificationLite.py b/minibot_vision/scripts/TmClassificationLite.py new file mode 100644 index 0000000000000000000000000000000000000000..59d28a4d96a56e3ed3c91b3f75f5a3e2dc3caa99 --- /dev/null +++ b/minibot_vision/scripts/TmClassificationLite.py @@ -0,0 +1,64 @@ +import tflite_runtime.interpreter as tflite +from PIL import Image, ImageOps +import numpy as np +import rospkg +import csv + + +class TmClassificationLite: + file_names = ['model_unquant.tflite', 'labels.txt'] + model_dir = '/resources/tf_lite_model/' + + def __init__(self): + rospack = rospkg.RosPack() + self.model_dir = rospack.get_path("minibot_vision") + self.model_dir + + # Load the TFLite model in TFLite Interpreter + self.interpreter = tflite.Interpreter(self.model_dir + self.file_names[0]) + self.interpreter.allocate_tensors() + + self.labels = self.read_labels() + + def read_labels(self): + # This is truly not a nice implementation... But it works! + reader = csv.reader(open(self.model_dir + self.file_names[1]), delimiter=' ') + result = [] + for s in reader: + result.append(s[1]) + + return result + + + def preprocess_image(self, image): + img_pil = Image.fromarray(image) + # Create the array of the right shape to feed into the keras model + # The 'length' or number of images you can put into the array is + # determined by the first position in the shape tuple, in this case 1. + data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32) + # Replace this with the path to your image + #image = Image.open('rosa.jpg') + #resize the image to a 224x224 with the same strategy as in TM2: + #resizing the image to be at least 224x224 and then cropping from the center + size = (224, 224) + image = ImageOps.fit(img_pil, size, Image.ANTIALIAS, centering=(0.5, 0.5)) + + #turn the image into a numpy array + image_array = np.asarray(image) + # Normalize the image + normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1 + # Load the image into the array + data[0] = normalized_image_array + + return data + + def predict_image(self, img): + img_preprocessed = self.preprocess_image(img) + + input_details = self.interpreter.get_input_details() + self.interpreter.set_tensor(input_details[0]['index'], img_preprocessed) + self.interpreter.invoke() + + output_details = self.interpreter.get_output_details() + output = self.interpreter.get_tensor(output_details[0]['index']) + + return output, self.labels diff --git a/minibot_vision/scripts/__init__.py b/minibot_vision/scripts/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/minibot_vision/scripts/__pycache__/Crop_Sign.cpython-38.pyc b/minibot_vision/scripts/__pycache__/Crop_Sign.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ee0aa504f599a35636d7b35f7f67b8d501be7d99 Binary files /dev/null and b/minibot_vision/scripts/__pycache__/Crop_Sign.cpython-38.pyc differ diff --git a/minibot_vision/scripts/__pycache__/SegmentSign.cpython-38.pyc b/minibot_vision/scripts/__pycache__/SegmentSign.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1769aa6ab76c9ccca3ba04008cd180456863213f Binary files /dev/null and b/minibot_vision/scripts/__pycache__/SegmentSign.cpython-38.pyc differ diff --git a/minibot_vision/scripts/__pycache__/ShapeDetector.cpython-38.pyc b/minibot_vision/scripts/__pycache__/ShapeDetector.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a51d05721688d61c67981b8229a17035f9fd5147 Binary files /dev/null and b/minibot_vision/scripts/__pycache__/ShapeDetector.cpython-38.pyc differ diff --git a/minibot_vision/scripts/__pycache__/TmClassification.cpython-38.pyc b/minibot_vision/scripts/__pycache__/TmClassification.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72f414b783d89d0b17a604d7243335c6b2185358 Binary files /dev/null and b/minibot_vision/scripts/__pycache__/TmClassification.cpython-38.pyc differ diff --git a/minibot_vision/scripts/example_sign_classification.py b/minibot_vision/scripts/example_sign_classification.py new file mode 100644 index 0000000000000000000000000000000000000000..1e7aa7afe8589886a6d67463e577d5e24b044408 --- /dev/null +++ b/minibot_vision/scripts/example_sign_classification.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python3 + +import cv2 +import TmClassification +#from PIL import Image, ImageOps +import numpy as np +from copy import deepcopy + +# ROS +import rospy +from cv_bridge import CvBridge, CvBridgeError +from sensor_msgs.msg import Image + +# *** hyper params *** +URL = 'https://teachablemachine.withgoogle.com/models/3dHrDR9Aq/' +IMG_RES = (480, 640) +TF_RES = (224, 224) # tf is cropping the image + +# *** globals *** +img_stream = np.zeros((IMG_RES[0], IMG_RES[1], 3), np.uint8) +tf = TmClassification.TmClassification() +#tf = TmClassification.TmClassification(URL) +cv_bridge = CvBridge() + + +def image_color_callback(img_ros): + global img_stream, cv_bridge, tf + # convert to cv2 + try: + img_color = cv_bridge.imgmsg_to_cv2(img_ros, "bgr8") + + prediction, probability = tf.predictImage(img_color) + cv2.putText(img_color, "c: {} p: {}".format(prediction, probability), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), thickness=1) + cv2.rectangle(img_color, (IMG_RES[1] // 2 - (TF_RES[1] // 2), IMG_RES[0] // 2 - (TF_RES[0] // 2)), (IMG_RES[1] // 2 + (TF_RES[1] // 2), IMG_RES[0] // 2 + (TF_RES[0] // 2)), (255, 255, 255), thickness=2) + img_stream = deepcopy(img_color) + except CvBridgeError as e: + print(e) + +if __name__ == "__main__": + rospy.init_node("example_sign_classification") + + # get camera stream + img_color_topic = "/camera/color/image_raw" + image_depth_sub = rospy.Subscriber(img_color_topic, Image, image_color_callback) + + rate = rospy.Rate(10) + while not rospy.is_shutdown(): + cv2.imshow("IMG_Color", img_stream) + cv2.waitKey(1) + + rate.sleep() + + rospy.loginfo("Node is shutting down. Closing all cv2 windows...") + cv2.destroyAllWindows() diff --git a/minibot_vision/scripts/example_sign_classification_lite.py b/minibot_vision/scripts/example_sign_classification_lite.py new file mode 100644 index 0000000000000000000000000000000000000000..8b759403daa715710cd41b511471febb09ad88fb --- /dev/null +++ b/minibot_vision/scripts/example_sign_classification_lite.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python3 + +import TmClassificationLite +import numpy as np +import cv2 +from copy import deepcopy + +# ROS +import rospy +from cv_bridge import CvBridge, CvBridgeError +from sensor_msgs.msg import Image + +# *** hyper params *** +IMG_RES = (480, 640) +TF_RES = (224, 224) # tf is cropping the image + +# *** globals *** +model = TmClassificationLite.TmClassificationLite() +img_stream = np.zeros((IMG_RES[0], IMG_RES[1], 3), np.uint8) +cv_bridge = CvBridge() + +def image_color_callback(img_ros): + global img_stream, cv_bridge, model + # convert to cv2 + try: + # convert image + img_color = cv_bridge.imgmsg_to_cv2(img_ros, "bgr8") + + # call model + predictions, labels = model.predict_image(img_color) + max_class_nr = np.argmax(predictions, axis=1)[0] + max_prediction = np.max(predictions, axis=1)[0] + + # visualize + cv2.putText(img_color, "c: {} p: {}".format(labels[max_class_nr], max_prediction), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), thickness=1) + cv2.rectangle(img_color, (IMG_RES[1] // 2 - (TF_RES[1] // 2), IMG_RES[0] // 2 - (TF_RES[0] // 2)), (IMG_RES[1] // 2 + (TF_RES[1] // 2), IMG_RES[0] // 2 + (TF_RES[0] // 2)), (255, 255, 255), thickness=2) + + img_stream = deepcopy(img_color) + except CvBridgeError as e: + print(e) + + +if __name__ == "__main__": + rospy.init_node("example_sign_classification") + + # get camera stream + img_color_topic = "/camera/color/image_raw" + image_depth_sub = rospy.Subscriber(img_color_topic, Image, image_color_callback) + + rate = rospy.Rate(10) + while not rospy.is_shutdown(): + cv2.imshow("IMG_Color", img_stream) + cv2.waitKey(1) + + rate.sleep() + + rospy.loginfo("Node is shutting down. Closing all cv2 windows...") + cv2.destroyAllWindows() diff --git a/minibot_vision/scripts/useful_code_snippets.py b/minibot_vision/scripts/useful_code_snippets.py new file mode 100644 index 0000000000000000000000000000000000000000..f85648ba62b09e2f8640bc450217c5090ad00a7c --- /dev/null +++ b/minibot_vision/scripts/useful_code_snippets.py @@ -0,0 +1,30 @@ +import cv2 +import numpy as np + +def to_HSV(im): + im_hsv = cv2.cvtColor(im, cv2.COLOR_BGR2HSV) + H = im_hsv[:, :, 0] + S = im_hsv[:, :, 1] + V = im_hsv[:, :, 2] + + # im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) + +def background_blur(img, depth_img): + # get depth mask + depth_mask = np.where((depth_img > 1000) | (depth_img <= 0), 0., 1.) # 0 at every pos depth > 1m + # make coarser edges + kernel = np.ones((7, 7)) + depth_mask = cv2.dilate(depth_mask, kernel, iterations=1) + + im_blurred = cv2.GaussianBlur(img, (21, 21), 15) + im = np.where(depth_mask == 0, im_blurred, img) + +def contour_detector(self, im): + contours, hierarchy = cv2.findContours(im, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + # draw contours + contours_img = np.zeros((im.shape[0], im.shape[1], 3), dtype=np.uint8) + contours_img = cv2.drawContours(contours_img, contours, -1, (255, 255, 255), thickness=self.thickness, hierarchy=hierarchy, + maxLevel=self.max_depth) + + return contours_img +