Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Pick & Place Exercise (Gazebo + Console + Rviz) with web-templates #1153

Open
wants to merge 47 commits into
base: melodic
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 32 commits
Commits
Show all changes
47 commits
Select commit Hold shift + click to select a range
fd66e0b
commented the rsync command to create a docker image
predator4hack Jun 20, 2021
56de72b
Docker Mount is working
predator4hack Jun 20, 2021
7d1a128
Created an entry for pic and place in db.sqlite3
predator4hack Jun 20, 2021
36a85fb
Added exercise.py and assets
predator4hack Jun 20, 2021
ed90aa4
exercise.html
predator4hack Jun 21, 2021
06265b3
Added teaser png
predator4hack Jun 21, 2021
300bf5a
Added ace-builds
predator4hack Jun 21, 2021
c5e1c4e
Added Code, exercise and launch files
predator4hack Jun 21, 2021
db624b8
added kinematic widget in html page
predator4hack Jun 23, 2021
cfd4a7f
Updated HAL and created env functions
predator4hack Jun 24, 2021
a283024
Added GAZEBO_RESOURCE_PATH in manager
predator4hack Jun 24, 2021
1981801
Updated launcher.js
predator4hack Jun 24, 2021
5864bd7
configured repo for IndustrialRobots
predator4hack Jun 26, 2021
f1c2137
updated dockerfile
predator4hack Jun 26, 2021
cde3e34
replaced my repo in dockerfile
predator4hack Jun 26, 2021
671c487
removed rsync
predator4hack Jun 26, 2021
a037953
Merge branch 'JdeRobot:master' into pick_place
predator4hack Jun 26, 2021
f14d614
Made changes for new Image
predator4hack Jun 26, 2021
74fdb57
Updated ROS_PACKAGE_PATH in dockerfile
predator4hack Jun 27, 2021
5564991
updated dockerfile
predator4hack Jun 27, 2021
955d817
Updated world path
predator4hack Jun 27, 2021
c4d0034
added rospkg library
predator4hack Jun 27, 2021
32e77da
Added Comments
predator4hack Jun 28, 2021
f826ed3
Added widget
predator4hack Jun 30, 2021
9c59456
Connection Resolved
predator4hack Jun 30, 2021
db21771
Connection Working
predator4hack Jul 1, 2021
671480d
Added child css
predator4hack Jul 1, 2021
f92135e
connections are working and hal and env APIs are created
predator4hack Jul 1, 2021
5c59cb1
Css Working Now
predator4hack Jul 1, 2021
f42e733
Fixed merge conflicts
predator4hack Jul 1, 2021
3769b61
Added rviz display
predator4hack Jul 2, 2021
cc92c88
fixed some bug
predator4hack Jul 2, 2021
35a1fd5
Rviz is working nowpwd
predator4hack Jul 3, 2021
511fda4
Merge branch 'Rviztesting' into pick_place
predator4hack Jul 4, 2021
4fb5a6d
HAL apis
predator4hack Jul 4, 2021
51d1236
Rviz objects spawned properly
predator4hack Jul 5, 2021
9f003e5
Added kinematic perspective
predator4hack Jul 7, 2021
6089da4
Added start.sh for the config of workspace
predator4hack Jul 8, 2021
eb743a8
Fixed simulation bugs
predator4hack Jul 9, 2021
840f816
Updated the dockerfile
predator4hack Jul 10, 2021
22f9dc5
Fixed issues with reset button
predator4hack Jul 11, 2021
05c1f54
Fixed the templates issue with other exercises
predator4hack Jul 11, 2021
288f4d0
Removed redundant exercises
predator4hack Jul 13, 2021
96e34ba
Added plugin path for acceleration enabled containers
predator4hack Jul 13, 2021
094b540
Delete settings.json for vscode
sixer51 Jul 16, 2021
592183a
Removed the changes made for RADI2.3
predator4hack Jul 17, 2021
1fd0fbc
Updated the repo from personal fork to JdeRobot
predator4hack Jul 17, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
The diff you're trying to view is too large. We only load the first 3000 changed files.
3 changes: 3 additions & 0 deletions .vscode/settings.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
{
"python.pythonPath": "/usr/bin/python3.8"
}
Binary file modified db.sqlite3
Binary file not shown.
128 changes: 128 additions & 0 deletions exercises/2d_visual_odometry/MyAlgorithm.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,128 @@
import threading
import time
import math
import rosbag
import cv2
import numpy as np
from datetime import datetime


time_cycle = 40 #80

class MyAlgorithm(threading.Thread):

def __init__(self, bag_readings, pose_obj):
self.bag_readings = bag_readings
self.pose_obj = pose_obj
self.threshold_image = np.zeros((640,480,3), np.uint8)
self.color_image = np.zeros((640,480,3), np.uint8)
self.stop_event = threading.Event()
self.kill_event = threading.Event()
self.lock = threading.Lock()
self.threshold_image_lock = threading.Lock()
self.color_image_lock = threading.Lock()
self.pose_lock = threading.Lock()
threading.Thread.__init__(self, args=self.stop_event)
self.diff_time = 0


def getReadings(self , *sensors):
self.lock.acquire()
data = self.bag_readings.getData(sensors)
self.lock.release()
return data

def set_predicted_path(self,path):
self.pose_lock.acquire()

self.pose_obj.set_pred_path(path)
self.pose_lock.release()

def set_predicted_pose(self,x,y,t):
self.pose_lock.acquire()
self.predicted_pose = [x,y]
self.pose_obj.set_pred_pose([x,y],t)
self.pose_lock.release()

def get_predicted_pose(self):
self.pose_lock.acquire()

def set_processed_image(self,image):
img = np.copy(image)
if len(img.shape) == 2:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
self.threshold_image_lock.acquire()
self.threshold_image = img
self.threshold_image_lock.release()
def get_processed_image (self):
self.threshold_image_lock.acquire()
img = np.copy(self.threshold_image)
self.threshold_image_lock.release()
return img

def run (self):

#self.algo_start_time = time.time()
while (not self.kill_event.is_set()):
start_time = datetime.now()

if not self.stop_event.is_set():
self.algo_start_time = time.time()
self.algorithm()
self.algo_stop_time = time.time()
self.diff_time = self.diff_time + (self.algo_stop_time - self.algo_start_time)
finish_Time = datetime.now()
dt = finish_Time - start_time
ms = (dt.days * 24 * 60 * 60 + dt.seconds) * 1000 + dt.microseconds / 1000.0
#print (ms)
if (ms < time_cycle):
time.sleep((time_cycle - ms) / 1000.0)

def stop (self):
self.stop_event.set()

def play (self):
if self.is_alive():
self.stop_event.clear()
else:
self.start()

def kill (self):
self.kill_event.set()

def algorithm(self):
#Getting readings data
data = self.getReadings('accelerometer' , 'orientation' , 'color_img' , 'depth_img') # to get readings data from particular sensors



#data = self.getReadings('stream') # to stream data from all sensors one by one


#imu data
ax=data.accelerometer['x']
ay=data.accelerometer['y']
accelerometer_t = data.accelerometer_t
orientation_t = data.orientation_t
qz=data.orientation['qz']
qw=data.orientation['qw']
#color image
color_image = data.color_img
#depth image
depth_image = data.depth_img
color_img_t = data.color_img_t





x = 1
y = 1
#Show processed image on GUI
self.set_processed_image(color_image)
#self.set_processed_image(depth_image)
#set predicted pose
self.set_predicted_pose(x,y,color_img_t)

#set predicted path at once /or reset the previously set predicted poses at once ---- path should be Nx2 numpy array or python list [x,y].
#self.set_predicted_path(path)
120 changes: 120 additions & 0 deletions exercises/2d_visual_odometry/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,120 @@

# 2D Visual Odometry.
The objective of this exercise is to implement the logic of a RGBD (RGB + Depth) visual odometry algorithm. The performance/accuracy of the users' algorithm will be shown on the GUI of the exercise.

[Exercise Documentation Website](https://jderobot.github.io/RoboticsAcademy/exercises/ComputerVision/visual_odometry)

## How to Run:
To launch the exercise, follow the steps below:

* **Download** the rosbag file from here. (http://wiki.jderobot.org/store/slam-datasets/rgbd_dataset_freiburg2_pioneer_slam_truncated.bag)
* **Requirements** - (install these packages before proceeding).


pyqtgraph --- ```sudo pip install pyqtgraph```

configparser --- ```sudo pip install configparser```


* **Place** the rosbag file in the same directory as of this exercise and replace the name of the rosbag file in the **'visual_odometry.cfg'** or mention the full path of the rosbag file.

* **Execute** the exercise with GUI : ```python visual_odom.py```

## How to do the practice:
To carry out the practice, you must edit the MyAlgorithm.py file and insert the algorithm logic into it.

### Where to insert the code
[MyAlgorithm.py](MyAlgorithm.py#L93)
```
def execute(self):
# demo code (replace with your code )
print ("Runing")

#Getting sensor data
data = self.getReadings( 'color_img' , 'depth_img' )

#color image
color_image = data.color_img

#depth image
depth_image = data.depth_img

#set processed image
self.set_processed_image(color_img)

#set predicted pose
x = 1
y = 1
self.set_predicted_pose(x,y,timestamp) #timestamp of the predicted position.

```

### API's:

**Sensors available:** - 'color_img' , 'depth_img', 'orientation' , 'accelerometer' , 'scan' (laser scan) .

* ```data = self.getReadings('color_img' , 'depth_img')``` - to get the next available RGB image and the Depth image from the ROSbag file. ( only one pair of RGB image and Depth image will be provided per call).

* ```color_img = data.color_img``` - to get the RGB image from the object 'data'.
* ```depth_img = data.depth_img``` - to get the Depth image from the object 'data'.
* ```color_img_t = data.color_img_t``` - to get the timestamp of the RGB image.
* ```depth_img_t = data.depth_img_t``` - to get the timestamp of the Depth image.


**Similiarly ,** to get the readings of other sensors the required sensors name has to be mentioned while calling ```data = self.getReadings()``` seperated by commas (,).


```data.color_img``` - for RGB color image and ```data.color_img_t``` for its timestamp.

```data.depth_img``` - for depth image and ```data.depth_img_t``` for its timestamp.

```data.accelerometer``` - for accelerometer data and ```data.accelerometer_t``` for its timestamp.

```data.orientation``` - for orientation data and ```data.orientation_t``` for its timestamp.

```data.scan``` - for laser scan data and ```data.scan_t``` for its timestamp.


(You can mention as many as sensors names during calling the ```data = self.getReadings()``` method).

* ```self.set_processed_image(color_img)``` - to set the processed RGB image to be shown on the GUI.
* ```self.set_predicted_pose(x,y,timestamp)``` - to set the position and timestamp of the predicte pose by the algorithm ( x and y should be floating point number.)
* ```self.set_predicted_path(path)``` - to set predicted path at once /or reset the previously set predicted poses at once ---- path should be Nx2 (numpy array or python list) [x,y]. _(optional)_

**How does datas from multiple sensors are read and provided to the users by the ```self.getReadings()``` method?**

Let's assume that the user only wants the data from 'color_img' , 'depth_img' and 'scan' sensors. So the user will call the method like this ```data = self.getReadings('color_img' , 'depth_img','scan')``` . Internally the program then starts to read the messages ROSbag file chronologically and if any of the above mentioned sensor topic is found , the topic data gets stored in its respective attribute.The program continues to read the topics in ROSbag file sequentially until all the sensor datas required by the user are read and stored in its respective attributes. And since the data frequency of different sensors are different so while reading the sensor datas sequentially the latest data from a particular sensor will override it's previous value.

**REMEMBER :** By this method only the sensor name mentioned while calling this method will have the sensor data . e.g. - In the above example (```data = self.getReadings('color_img' , 'depth_img','scan')``` ) only ```data.color_img``` , ```data.depth_img``` and ```data.scan``` will contain its' respective sensor data and other attributes like ```data.accelerometer``` and ```data.orientation``` will contain ```NoneType``` data.


## Data Types:
* The Color RGB image is provided in 640×480 8-bit RGB format.
* The Depth image is provided in 32-bit floating point precision.
* The Timestamps are floating point numbers.
* The laser scan data is provided in numpy array format.
* Orientation data can be accessed as follows:

qz = data.orientation['qz']

qw = data.orientation['qw']

‘qx’ and ‘qy’ are essentially zero(since it is a 2D odometry).

* Accelerometer data can be accessed as follows:

ax = data.accelerometer['x']

ay = data.accelerometer['y']

‘az’ is essentially zero(since it is a 2D odometry).

* For more details about the dataset refer to the original page of the TUM RGBD dataset <https://vision.in.tum.de/data/datasets/rgbd-dataset/file_formats#ros_bag>.

## Demonstration video:


[https://www.youtube.com/watch?v=HUFOadsCSRo](https://www.youtube.com/watch?v=HUFOadsCSRo)

*Author:*
* *Debrup Datta <[email protected]>*
143 changes: 143 additions & 0 deletions exercises/2d_visual_odometry/gui/GUI.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,143 @@
from cameraWidget import CameraWidget
from logoWidget import LogoWidget
import resources_rc
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtWidgets import QMainWindow
from form import Ui_MainWindow
from PyQt5 import QtGui
import numpy
import time
class MainWindow(QMainWindow, Ui_MainWindow):
updGUI=pyqtSignal()
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
self.setupUi(self)
self.logo = LogoWidget(self)
self.logoLayout.addWidget(self.logo)
self.logo.setVisible(True)
self.playpause.clicked.connect(self.playpauseClicked)
self.updGUI.connect(self.updateGUI)
self.icon_play = QtGui.QIcon()
self.icon_play.addPixmap(QtGui.QPixmap(":/images/play.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.icon_stop = QtGui.QIcon()
self.icon_stop.addPixmap(QtGui.QPixmap(":/images/stop.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)

self.camera=CameraWidget(self)

def set_bag(self, bag):
self.bag = bag

def set_pose(self , pose):
self.pose_obj = pose

def setAlgorithm(self, algorithm ):
self.algorithm=algorithm

def getAlgorithm(self):
return self.algorithm

def getRGBImage(self):
return (self.bag.color_img)

def align(self,model,data):
"""Align two trajectories using the method of Horn (closed-form)
and compute translational error.

Input:
model -- first trajectory (2xn1)
data -- second trajectory (2xn2)

Output:
rot -- rotation matrix (3x3)
trans -- translation vector (3x1)
trans_error -- translational error per point (1xn)

"""
model = model.T
data = data.T

if model.shape[1] > data.shape[1] :
diff = model.shape[1] - data.shape[1]
#remove = numpy.random.choice(range(model.shape[1]) , size = diff , replace =False)
remove = numpy.linspace(start = 1 , stop = model.shape[1] , num = diff , endpoint=False ,dtype= int)
model = numpy.delete(model , remove , axis = 1)
z=numpy.zeros((1,data.shape[1]))
data= numpy.vstack((data,z))
model= numpy.vstack((model,z))
elif model.shape[1] < data.shape[1] :
diff = -(model.shape[1] - data.shape[1])
#remove = numpy.random.choice(range(data.shape[1]) , size = diff ,replace = False)
remove = numpy.linspace(start = 1 , stop = data.shape[1] , num = diff , endpoint=False ,dtype= int)
data = numpy.delete(data , remove , axis = 1)
z=numpy.zeros((1,model.shape[1]))
data= numpy.vstack((data,z))
model= numpy.vstack((model,z))
else:
z=numpy.zeros((1,data.shape[1]))
data= numpy.vstack((data,z))
model= numpy.vstack((model,z))


model_zerocentered = model - model.mean(1).reshape((model.mean(1).shape[0]), 1)
data_zerocentered = data - data.mean(1).reshape((data.mean(1).shape[0]), 1)

W = numpy.zeros( (3,3) )
for column in range(model.shape[1]):
W += numpy.outer(model_zerocentered[:,column],data_zerocentered[:,column])
U,d,Vh = numpy.linalg.linalg.svd(W.transpose())
S = numpy.matrix(numpy.identity( 3 ))
if(numpy.linalg.det(U) * numpy.linalg.det(Vh)<0):
S[2,2] = -1
rot = U*S*Vh
trans = (data.mean(1).reshape((data.mean(1).shape[0]), 1)) - rot * (model.mean(1).reshape((model.mean(1).shape[0]), 1))

model_aligned = rot * model + trans
alignment_error = model_aligned - data

trans_error = numpy.sqrt(numpy.sum(numpy.multiply(alignment_error,alignment_error),0)).A[0]

return trans_error

def playpauseClicked(self):
if self.playpause.isChecked():
self.playpause.setText("Pause code")
self.playpause.setIcon(self.icon_stop)
self.algorithm.play()
else:
self.algorithm.stop()

self.playpause.setText("Play code")
self.playpause.setIcon(self.icon_play)
pred_path = self.pose_obj.get_pred_path()
actual_path = self.pose_obj.get_actual_path()
trans_error = self.align(pred_path , actual_path)
self.median.display(numpy.median(trans_error))
self.mean.display(numpy.mean(trans_error))
self.Std_dev.display(numpy.std(trans_error))
self.rmse.display(numpy.sqrt(numpy.dot(trans_error,trans_error) / len(trans_error)))
def updateGUI(self):
#try:
if self.bag.current_timestamp !=None :
time_elapsed = self.bag.current_timestamp- self.bag.init_timestamp
#except TypeError:

# time_elapsed = 0
percetage_covered = time_elapsed /125.9 * 100
self.progressBar.setValue(percetage_covered)
else:
time_elapsed = 0
#self.median.display(????)
#self.mean.display(??)
#self.Std_dev.display(??)
#self.rmse.display(??)s
try:
RT_factor = time_elapsed/(self.algorithm.diff_time)
except (AttributeError , ZeroDivisionError):
RT_factor = 0
self.realtime.display(RT_factor)
c=time.time()
self.camera.updateImage()

d = time.time()

self.plot.update_figure(self.pose_obj.get_pred_path() , self.pose_obj.get_actual_path())
Empty file.
Loading