Skip to content

Commit 0acfe63

Browse files
committed
* initial commit
0 parents  commit 0acfe63

File tree

19 files changed

+1906
-0
lines changed

19 files changed

+1906
-0
lines changed

CMakeLists.txt

Lines changed: 187 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,187 @@
1+
cmake_minimum_required(VERSION 2.8.3)
2+
project(renderer)
3+
4+
## Compile as C++11, supported in ROS Kinetic and newer
5+
# add_compile_options(-std=c++11)
6+
7+
## Find catkin macros and libraries
8+
## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)
9+
## is used, also find other catkin packages
10+
find_package(catkin REQUIRED COMPONENTS
11+
pcl_conversions
12+
pcl_ros
13+
roscpp
14+
rospy
15+
sensor_msgs
16+
std_msgs
17+
std_srvs
18+
cv_bridge
19+
image_transport
20+
message_generation
21+
)
22+
23+
## System dependencies are found with CMake's conventions
24+
# find_package(Boost REQUIRED COMPONENTS system)
25+
26+
27+
## Uncomment this if the package has a setup.py. This macro ensures
28+
## modules and global scripts declared therein get installed
29+
## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html
30+
# catkin_python_setup()
31+
32+
################################################
33+
## Declare ROS messages, services and actions ##
34+
################################################
35+
36+
## To declare and build messages, services or actions from within this
37+
## package, follow these steps:
38+
## * Let MSG_DEP_SET be the set of packages whose message types you use in
39+
## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...).
40+
## * In the file package.xml:
41+
## * add a build_depend tag for "message_generation"
42+
## * add a build_depend and a run_depend tag for each package in MSG_DEP_SET
43+
## * If MSG_DEP_SET isn't empty the following dependency has been pulled in
44+
## but can be declared for certainty nonetheless:
45+
## * add a run_depend tag for "message_runtime"
46+
## * In this file (CMakeLists.txt):
47+
## * add "message_generation" and every package in MSG_DEP_SET to
48+
## find_package(catkin REQUIRED COMPONENTS ...)
49+
## * add "message_runtime" and every package in MSG_DEP_SET to
50+
## catkin_package(CATKIN_DEPENDS ...)
51+
## * uncomment the add_*_files sections below as needed
52+
## and list every .msg/.srv/.action file to be processed
53+
## * uncomment the generate_messages entry below
54+
## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...)
55+
56+
## Generate messages in the 'msg' folder
57+
# add_message_files(
58+
# FILES
59+
# Message1.msg
60+
# Message2.msg
61+
# )
62+
63+
## Generate services in the 'srv' folder
64+
add_service_files(
65+
FILES
66+
Preprocess.srv
67+
Render.srv
68+
)
69+
70+
## Generate actions in the 'action' folder
71+
# add_action_files(
72+
# FILES
73+
# Action1.action
74+
# Action2.action
75+
# )
76+
77+
## Generate added messages and services with any dependencies listed here
78+
generate_messages(
79+
DEPENDENCIES
80+
std_msgs
81+
sensor_msgs
82+
)
83+
84+
################################################
85+
## Declare ROS dynamic reconfigure parameters ##
86+
################################################
87+
88+
## To declare and build dynamic reconfigure parameters within this
89+
## package, follow these steps:
90+
## * In the file package.xml:
91+
## * add a build_depend and a run_depend tag for "dynamic_reconfigure"
92+
## * In this file (CMakeLists.txt):
93+
## * add "dynamic_reconfigure" to
94+
## find_package(catkin REQUIRED COMPONENTS ...)
95+
## * uncomment the "generate_dynamic_reconfigure_options" section below
96+
## and list every .cfg file to be processed
97+
98+
## Generate dynamic reconfigure parameters in the 'cfg' folder
99+
# generate_dynamic_reconfigure_options(
100+
# cfg/DynReconf1.cfg
101+
# cfg/DynReconf2.cfg
102+
# )
103+
104+
###################################
105+
## catkin specific configuration ##
106+
###################################
107+
## The catkin_package macro generates cmake config files for your package
108+
## Declare things to be passed to dependent projects
109+
## INCLUDE_DIRS: uncomment this if your package contains header files
110+
## LIBRARIES: libraries you create in this project that dependent projects also need
111+
## CATKIN_DEPENDS: catkin_packages dependent projects also need
112+
## DEPENDS: system dependencies of this project that dependent projects also need
113+
catkin_package(
114+
# INCLUDE_DIRS include
115+
# LIBRARIES renderer
116+
# CATKIN_DEPENDS message_runtime pcl_conversions pcl_ros rospy sensor_msgs std_msgs std_srvs cv_bridge
117+
# DEPENDS system_lib
118+
)
119+
120+
###########
121+
## Build ##
122+
###########
123+
124+
include(CheckCXXCompilerFlag)
125+
CHECK_CXX_COMPILER_FLAG("-std=c++11" COMPILER_SUPPORTS_CXX11)
126+
CHECK_CXX_COMPILER_FLAG("-std=c++0x" COMPILER_SUPPORTS_CXX0X)
127+
if(COMPILER_SUPPORTS_CXX11)
128+
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
129+
elseif(COMPILER_SUPPORTS_CXX0X)
130+
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++0x")
131+
else()
132+
message(STATUS "The compiler ${CMAKE_CXX_COMPILER} has no C++11 support. Please use a different C++ compiler.")
133+
endif()
134+
135+
include_directories(include)
136+
include_directories(${catkin_INCLUDE_DIRS})
137+
138+
add_executable(renderer_node node/renderer_node.cpp src/renderer.cpp)
139+
target_link_libraries(renderer_node ${catkin_LIBRARIES})
140+
141+
#############
142+
## Install ##
143+
#############
144+
145+
# all install targets should use catkin DESTINATION variables
146+
# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html
147+
148+
## Mark executable scripts (Python etc.) for installation
149+
## in contrast to setup.py, you can choose the destination
150+
# install(PROGRAMS
151+
# scripts/my_python_script
152+
# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
153+
# )
154+
155+
## Mark executables and/or libraries for installation
156+
# install(TARGETS ${PROJECT_NAME} ${PROJECT_NAME}_node
157+
# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
158+
# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
159+
# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
160+
# )
161+
162+
## Mark cpp header files for installation
163+
# install(DIRECTORY include/${PROJECT_NAME}/
164+
# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
165+
# FILES_MATCHING PATTERN "*.h"
166+
# PATTERN ".svn" EXCLUDE
167+
# )
168+
169+
## Mark other files for installation (e.g. launch and bag files, etc.)
170+
# install(FILES
171+
# # myfile1
172+
# # myfile2
173+
# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}
174+
# )
175+
176+
#############
177+
## Testing ##
178+
#############
179+
180+
## Add gtest based cpp test target and link libraries
181+
# catkin_add_gtest(${PROJECT_NAME}-test test/test_renderer.cpp)
182+
# if(TARGET ${PROJECT_NAME}-test)
183+
# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME})
184+
# endif()
185+
186+
## Add folders to be run by python nosetests
187+
# catkin_add_nosetests(test)

LICENSE.md

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
The MIT License (MIT)
2+
3+
Copyright (c) 2018 Jingwei Zhang
4+
5+
Permission is hereby granted, free of charge, to any person obtaining a copy
6+
of this software and associated documentation files (the "Software"), to deal
7+
in the Software without restriction, including without limitation the rights
8+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9+
copies of the Software, and to permit persons to whom the Software is
10+
furnished to do so, subject to the following conditions:
11+
12+
The above copyright notice and this permission notice shall be included in all
13+
copies or substantial portions of the Software.
14+
15+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21+
SOFTWARE.

README.md

Lines changed: 69 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,69 @@
1+
# **renderer**
2+
_a ROS package to turn your point clouds into a simulator for training Deep Reinforcement Learning agents_
3+
4+
(paired w/ files to interface w/ DRL agents, e.g. [pytorch-rl](https://github.com/jingweiz/pytorch-rl))
5+
*******
6+
7+
8+
## What is this?
9+
> This repo contains a ROS package that can be used to turn your point cloud (e.g. `.pcd`'s) into a simulator, where your agent can wonder around. The `/render` service will render **RGB** or **depth** images from the current pose of your agent. Thus this repo can be helpful if you want to train some DRL agents (e.g. [pytorch-rl](https://github.com/jingweiz/pytorch-rl)) out of the point cloud data you collected, for example, of your office. This repo is developed w/ the help of [@onlytailei](https://github.com/onlytailei).
10+
11+
![rviz](/assets/rviz.png)
12+
13+
*******
14+
15+
16+
## Tested system configurations:
17+
- Ubuntu 16.04
18+
- [ROS Kinetic](http://wiki.ros.org/kinetic/Installation/Ubuntu)
19+
*******
20+
21+
22+
## How to run:
23+
* Compile the `renderer` package:
24+
> * clone the package into `YOUR_ROS_WORKSPACE_DIRECTORY/src/`
25+
> * `catkin_make` in `YOUR_ROS_WORKSPACE_DIRECTORY`
26+
* Launch node:
27+
> `roslaunch renderer renderer.launch`
28+
* Call service (`action_ind={3`(reset)`,0`(go straight)`,1`(turn left)`,2`(turn right)`}`):
29+
> `rosservice call /render action_ind`
30+
*******
31+
32+
33+
## How to pair w/ [pytorch-rl](https://github.com/jingweiz/pytorch-rl)?
34+
* Turn off the `rviz` visualizations in `./launch/renderer.launch` (cos we don't want to slow down the expensive DRL trainings :P):
35+
> * `line2-3`: comment off to turn off `rviz`
36+
> * `line5`: set `display` to `false`
37+
* Copy the generated interfacing files from `YOUR_ROS_WORKSPACE_DIRECTORY` into `pytorch-rl`:
38+
> ```cp -r YOUR_ROS_WORKSPACE_DIRECTORY/devel/lib/python.../dist-packages/renderer YOUR_pytorch-rl_DIRECTORY/```
39+
* Copy the provided env wrapper from `YOUR_ROS_WORKSPACE_DIRECTORY` into `pytorch-rl`:
40+
> ```cp YOUR_ROS_WORKSPACE_DIRECTORY/envs/rendererEnv.py YOUR_pytorch-rl_DIRECTORY/core/envs/```
41+
*******
42+
43+
44+
## About the point clouds:
45+
The `pcd/office.pcd` & `pcd/chair.pcd` are provided by my awesome colleagues `Tim Caselitz (@caselitz)` and `Michael Krawez`.
46+
If you want to use your own point clouds, you need to:
47+
> * put those `.pcd`'s into `./data/`
48+
> * modify the magic numbers in `line522-527` & `line549-554` in `./src/renderer.cpp` to align the clouds w/ the axis
49+
> * finally `rosservice call /preprocess` to save the aligned clouds into `./pcd/`
50+
51+
*******
52+
53+
54+
## The following paper might be interesting to take a look:)
55+
> [VR Goggles for Robots: Real-to-sim Domain Adaptation for Visual Control](https://arxiv.org/abs/1802.00265): This paper deals with the _**reality gap**_ from a novel perspective, targeting transferring Deep Reinforcement Learning (DRL) policies learned in simulated environments to the real-world domain for visual control tasks. Instead of adopting the common solutions to the problem by increasing the visual fidelity of synthetic images output from simulators during the training phase, this paper seeks to tackle the problem by translating the real-world image streams back to the synthetic domain during the deployment phase, to _**make the robot feel at home**_.
56+
We propose this as a lightweight, flexible, and efficient solution for visual control, as 1) no extra transfer steps are required during the expensive training of DRL agents in simulation; 2) the trained DRL agents will not be constrained to being deployable in only one specific real-world environment; 3) the policy training and the transfer operations are decoupled, and can be conducted in parallel.
57+
Besides this, we propose a conceptually simple yet very effective _**shift loss**_ to constrain the consistency between subsequent frames, eliminating the need for optical flow.
58+
We validate the _**shift loss**_ for _**artistic style transfer for videos**_ and _**domain adaptation**_, and validate our visual control approach in real-world robot experiments. A video of our results is available at:
59+
\url{https://goo.gl/b1xz1s}.
60+
.
61+
62+
```
63+
@article{zhang2018vr,
64+
title={Vr goggles for robots: Real-to-sim domain adaptation for visual control},
65+
author={Zhang, Jingwei and Tai, Lei and Xiong, Yufeng and Liu, Ming and Boedecker, Joschka and Burgard, Wolfram},
66+
journal={arXiv preprint arXiv:1802.00265},
67+
year={2018}
68+
}
69+
```

assets/rviz.png

733 KB
Loading

data/chair.pcd

1.09 MB
Binary file not shown.

data/office.pcd

28.1 MB
Binary file not shown.

envs/rendererEnv.py

Lines changed: 96 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,96 @@
1+
from __future__ import absolute_import
2+
from __future__ import division
3+
4+
import numpy as np
5+
from copy import deepcopy
6+
from gym.spaces.box import Box
7+
import inspect
8+
import os, subprocess
9+
import cv2
10+
import time
11+
import timeit
12+
import rospy, rosgraph
13+
from sensor_msgs.msg import Image
14+
from cv_bridge import CvBridge
15+
from utils.helpers import preprocessAtari, rgb2gray, rgb2y, scale, center_crop_rgb, center_crop_depth
16+
from core.env import Env
17+
import math
18+
from PIL import Image
19+
import matplotlib.pyplot as plt
20+
21+
import torch
22+
import torchvision
23+
from torchvision import datasets, models, transforms
24+
25+
class RendererEnv(Env):
26+
def __init__(self, args, env_ind=0):
27+
super(RendererEnv, self).__init__(args, env_ind)
28+
self.env_ind = env_ind
29+
self.enable_continuous = args.enable_continuous
30+
31+
# TODO: use this for training in docker on remotes
32+
# os.environ["ROS_MASTER_URI"] = "http://rosimage"+str(self.env_ind)+":11311"
33+
# TODO: use this for local test
34+
os.environ["ROS_MASTER_URI"] = "http://localhost:11311"
35+
36+
time.sleep(10)
37+
self.root_dir = args.root_dir
38+
39+
try:
40+
from renderer.srv import Render
41+
except ImportError as e: self.logger.warning("WARNING: renderer not found")
42+
43+
# set up service
44+
self.render_srv = rospy.ServiceProxy('/render', Render)
45+
46+
self.hei_state = args.hei_state
47+
self.wid_state = args.wid_state
48+
self.preprocess_mode = args.preprocess_mode
49+
self.img_encoding_type = args.img_encoding_type
50+
51+
self.bridge = CvBridge()
52+
53+
def __del__(self):
54+
self.render_service.close()
55+
56+
@property
57+
def action_dim(self):
58+
# NOTE: in /render service,
59+
# NOTE: 0: go_straight | 1 : turn_left | 2: turn_right
60+
# NOTE: 3: reset the game
61+
return 3
62+
63+
@property
64+
def state_shape(self):
65+
return (self.hei_state, self.wid_state)
66+
67+
def _preprocessState(self, state): # called before _preprocessState in A3CSingleProcess
68+
img = self.bridge.imgmsg_to_cv2(state, self.img_encoding_type)
69+
# # visualize img
70+
# plt.figure(); plt.imshow(img); plt.show(); raw_input()
71+
72+
if self.preprocess_mode == 4: # for rgb images: do not convert to grayscale
73+
img = 2 * (scale(img, self.hei_state, self.wid_state) / 255. - 0.5) # (90, 160, 3) (-1, 1)
74+
img = np.transpose(img, (2, 0, 1)) # (3, 90, 160)
75+
76+
return img
77+
78+
def step(self, exp_action):
79+
# # in test phase, for synchronization
80+
# time.sleep(0.1)
81+
try:
82+
# TODO: use this for training in docker on remotes
83+
# os.environ["ROS_MASTER_URI"] = "http://rosimage"+str(self.env_ind)+":11311"
84+
# TODO: use this for local test
85+
os.environ["ROS_MASTER_URI"] = "http://localhost:11311"
86+
87+
render_res = self.render_srv(exp_action)
88+
self.exp_state1 = render_res.color
89+
self.exp_reward = render_res.reward
90+
self.exp_terminal1 = render_res.terminal
91+
except rospy.ServiceException, e:
92+
print("Service call failed during step: %s" %e)
93+
return self._get_experience()
94+
95+
def reset(self):
96+
return self.step(self.action_dim)

0 commit comments

Comments
 (0)