Skip to content

Commit

Permalink
reproduce the original paper's result & faster post-processing using …
Browse files Browse the repository at this point in the history
…c++/swig
  • Loading branch information
Curtis.Kim committed May 13, 2018
1 parent bd03ac6 commit 949442e
Show file tree
Hide file tree
Showing 126 changed files with 89,714 additions and 1,186 deletions.
4 changes: 3 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -111,4 +111,6 @@ models/trained/*/checkpoint
models/trained/*/*.pb
models/trained/*/model-*.data-*
models/trained/*/model-*.index
models/trained/*/model-*.meta
models/trained/*/model-*.meta
models/pretrained/resnet_v2_101/eval.graph
models/pretrained/resnet_v2_101/train.graph
9 changes: 9 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,13 @@ You need dependencies below.
- tensorflow 1.4.1+
- opencv3, protobuf, python3-tk

### Opensources

- slim
- slidingwindow
- https://github.com/adamrehn/slidingwindow
- I copied from the above git repo to modify few things.

### Install

```bash
Expand Down Expand Up @@ -143,6 +150,8 @@ See : [etcs/training.md](./etcs/training.md)

[4] Keras Openpose : https://github.com/michalfaber/keras_Realtime_Multi-Person_Pose_Estimation

[5] Keras Openpose2 : https://github.com/kevinlin311tw/keras-openpose-reproduce

### Lifting from the deep

[1] Arxiv Paper : https://arxiv.org/abs/1701.00295
Expand Down
9 changes: 9 additions & 0 deletions etcs/experiments.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@


## COCO Datasets

| Model | Scale | AP | AP 50 | AP 75 | AP medium | AP large | AR | AR 50 | AR 75 | AR medium | AR large |
|----------------|------------|------------|------------|------------|------------|------------|------------|------------|------------|------------|------------|
| CMU | Multi | 0.5067 | 0.7660 | 0.5377 | 0.4927 | 0.5309 | 0.5614 | 0.7900 | 0.5903 | 0.5089 | 0.6347 |
| CMU | Single | 0.5067 | 0.7660 | 0.5377 | 0.4927 | 0.5309 | 0.5614 | 0.7900 | 0.5903 | 0.5089 | 0.6347 |
| Mobilenet thin | Single | 0.2806 | 0.5577 | 0.2474 | 0.2802 | 0.2843 | 0.3214 | 0.5840 | 0.2997 | 0.2946 | 0.3587 |
Binary file added images/COCO_val2014_000000000357.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
5 changes: 5 additions & 0 deletions models/pretrained/resnet_v2_101/download.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
#! /bin/bash

wget http://download.tensorflow.org/models/resnet_v2_101_2017_04_14.tar.gz
tar -xvf resnet_v2_101_2017_04_14.tar.gz
rm resnet_v2_101_2017_04_14.tar.gz
5 changes: 3 additions & 2 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
argparse
matplotlib
scipy
skimage
tqdm
requests
fire
ast
dill
git+https://github.com/ppwwyyxx/tensorpack.git
git+https://github.com/ppwwyyxx/tensorpack.git
slidingwindow
22 changes: 3 additions & 19 deletions scripts/broadcaster_ros.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,6 @@
from networks import model_wh, get_graph_path


scales = [None]


def humans_to_msg(humans):
persons = Persons()

Expand Down Expand Up @@ -52,8 +49,7 @@ def callback_image(data):
return

try:
global scales
humans = pose_estimator.inference(cv_image, scales)
humans = pose_estimator.inference(cv_image, resize_to_default=True, upsample_size=resize_out_ratio)
finally:
tf_lock.release()

Expand All @@ -63,31 +59,20 @@ def callback_image(data):
msg.header = data.header

pub_pose.publish(msg)
# rospy.loginfo(time.time() - et)


def callback_scales(data):
global scales
scales = ast.literal_eval(data.data)
rospy.logdebug('[tf-pose-estimation] scale changed: ' + str(scales))


if __name__ == '__main__':
global scales

rospy.loginfo('initialization+')
rospy.init_node('TfPoseEstimatorROS', anonymous=True, log_level=rospy.INFO)

# parameters
image_topic = rospy.get_param('~camera', '')
model = rospy.get_param('~model', 'cmu')

resolution = rospy.get_param('~resolution', '432x368')
scales_str = rospy.get_param('~scales', '[None]')
scales = ast.literal_eval(scales_str)
resize_out_ratio = float(rospy.get_param('~resize_out_ratio', '4.0'))
tf_lock = Lock()

rospy.loginfo('[TfPoseEstimatorROS] scales(%d)=%s' % (len(scales), str(scales)))

if not image_topic:
rospy.logerr('Parameter \'camera\' is not provided.')
sys.exit(-1)
Expand All @@ -106,7 +91,6 @@ def callback_scales(data):
cv_bridge = CvBridge()

rospy.Subscriber(image_topic, Image, callback_image, queue_size=1, buff_size=2**24)
rospy.Subscriber('~scales', String, callback_scales, queue_size=1)
pub_pose = rospy.Publisher('~pose', Persons, queue_size=1)

rospy.loginfo('start+')
Expand Down
10 changes: 5 additions & 5 deletions src/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,17 +100,17 @@ def from_coco(human):
(11, 12), (12, 13), (1, 0), (0, 14), (14, 16), (0, 15), (15, 17), (2, 16), (5, 17)
] # = 19
CocoPairsRender = CocoPairs[:-2]
CocoPairsNetwork = [
(12, 13), (20, 21), (14, 15), (16, 17), (22, 23), (24, 25), (0, 1), (2, 3), (4, 5),
(6, 7), (8, 9), (10, 11), (28, 29), (30, 31), (34, 35), (32, 33), (36, 37), (18, 19), (26, 27)
] # = 19
# CocoPairsNetwork = [
# (12, 13), (20, 21), (14, 15), (16, 17), (22, 23), (24, 25), (0, 1), (2, 3), (4, 5),
# (6, 7), (8, 9), (10, 11), (28, 29), (30, 31), (34, 35), (32, 33), (36, 37), (18, 19), (26, 27)
# ] # = 19

CocoColors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0],
[0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255],
[170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]


def read_imgfile(path, width, height):
def read_imgfile(path, width=None, height=None):
val_image = cv2.imread(path, cv2.IMREAD_COLOR)
if width is not None and height is not None:
val_image = cv2.resize(val_image, (width, height))
Expand Down
Loading

0 comments on commit 949442e

Please sign in to comment.