Skip to content

Commit c1fcc2b

Browse files
author
Eric Yang
committed
20190727 Update toy dataset link
1 parent 672db81 commit c1fcc2b

File tree

5 files changed

+19
-13
lines changed

5 files changed

+19
-13
lines changed

PREPS.md

+1
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@ Make sure you have an GPU card with at least 12 GB graphic memory. You can decre
2020
We hereby datail the necessary steps to reproduce human36m experiments. Most people failed on this step, so I hope this instruction could make sure you don't become one of them.
2121

2222
1. I tentatively suggest users to first try download human36m dataset from the [official website](http://vision.imar.ro/human3.6m/description.php). However manual authorization could take anywhere from 6 days to 6 months, so if you are keen to get things going, you can start by playing with this [toy_dataset](https://pan.baidu.com/s/1szhb9B_8n6p6CeAoPUxnhw). Extraction code: 0o95
23+
Alternative link [Google Drive](https://drive.google.com/open?id=1ssuUje20x1PS5qYwbg1AAloDsVwF1eTW)
2324

2425
2. Unpack downloaded zip files into a single folder (which I suggest you name it `human36m`) and put it under `path-to-your-datasets`.
2526
Open `data_utils/data_washing.py`, change the `root_dir` variable in main function near line 190 to your h36m dataset path and run it to perform standard data augmentations, the washed dataset will be stored in `path-to-your-datasets/human36m_washed`.

README.md

+3
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,9 @@ PyTorch implementation of CloudWalk's recent paper [DenseBody](https://arxiv.org
55

66
**Update on 20190613** A toy dataset has been released to facilitate the reproduction of this project. checkout [`PREPS.md`](PREPS.md) for details.
77

8+
**Update on 20190727** Update an alternative [link](https://drive.google.com/open?id=1ssuUje20x1PS5qYwbg1AAloDsVwF1eTW) for the toy dataset.
9+
10+
811
![paper teaser](teaser/teaser.jpg)
912

1013
### Reproduction results

data_utils/uv_map_generator.py

+11-8
Original file line numberDiff line numberDiff line change
@@ -371,7 +371,7 @@ def UV_interp(self, rgbs):
371371
UV_map: [H * W * 3] Interpolated UV map.
372372
colored_verts: [H * W * 3] Scatter plot of colorized UV vertices
373373
'''
374-
def get_UV_map(self, verts):
374+
def get_UV_map(self, verts, dilate=True):
375375
# normalize all to [0,1]
376376
_min = np.amin(verts, axis=0, keepdims=True)
377377
_max = np.amax(verts, axis=0, keepdims=True)
@@ -383,8 +383,10 @@ def get_UV_map(self, verts):
383383
])
384384
rgbs = verts[vt_to_v_index]
385385

386-
#return self.UV_interp(rgbs), verts_backup
387-
return self._dilate(self.UV_interp(rgbs)), verts_backup
386+
uv_map = self.UV_interp(rgbs)
387+
if dilate:
388+
uv_map = self._dilate(uv_map)
389+
return uv_map, verts_backup
388390

389391
'''
390392
TODO: make it torch.
@@ -419,23 +421,24 @@ def resample(self, UV_map):
419421
if __name__ == '__main__':
420422
# test render module
421423
# change this to the same as in train.py opt.uv_prefix
422-
file_prefix = 'radvani_template'
424+
# file_prefix = 'radvani_template'
423425
#file_prefix = 'vbml_close_template'
424426
#file_prefix = 'vbml_spaced_template'
427+
file_prefix = 'smpl_fbx_template'
425428
generator = UV_Map_Generator(
426-
UV_height=256,
429+
UV_height=512,
427430
UV_pickle=file_prefix+'.pickle'
428431
)
429-
test_folder = '_test_radvani'
432+
test_folder = 'smpl_512'
430433
if not os.path.isdir(test_folder):
431434
os.makedirs(test_folder)
432435

433436
generator.render_UV_atlas('{}/{}_atlas.png'.format(test_folder, file_prefix))
434437
img, verts, rgbs = generator.render_point_cloud('{}/{}.png'.format(test_folder, file_prefix))
435438
verts, rgbs = generator.write_ply('{}/{}.ply'.format(test_folder, file_prefix), verts, rgbs)
436-
uv, _ = generator.get_UV_map(verts)
439+
uv, _ = generator.get_UV_map(verts, dilate=False)
437440
uv = uv.max(axis=2)
438441
print(uv.shape)
439442
binary_mask = np.where(uv > 0, 1., 0.)
440443
binary_mask = (binary_mask * 255).astype(np.uint8)
441-
imsave('{}_UV_mask.png'.format(file_prefix), binary_mask)
444+
imsave('./{}_UV_mask.png'.format(file_prefix), binary_mask)

models/networks.py

+3-2
Original file line numberDiff line numberDiff line change
@@ -118,7 +118,8 @@ def acquire_weights(UV_weight_npy):
118118
mask_name = UV_weight_npy.replace('weights.npy', 'mask.png')
119119
print(mask_name)
120120
UV_mask = imread(mask_name)
121-
UV_mask = UV_mask[:,:,0]
121+
if UV_mask.ndim == 3:
122+
UV_mask = UV_mask[:,:,0]
122123
ret, labels = connectedComponents(UV_mask, connectivity=4)
123124
unique, counts = np.unique(labels, return_counts=True)
124125
print(unique, counts)
@@ -404,5 +405,5 @@ def forward(self, x):
404405
return self.conv(x)
405406

406407
if __name__ == '__main__':
407-
acquire_weights('../data_utils/radvani_UV_weights.npy')
408+
acquire_weights('../data_utils/smpl_fbx_template_UV_weights.npy')
408409

train.py

+1-3
Original file line numberDiff line numberDiff line change
@@ -23,10 +23,8 @@ def TrainOptions(debug=False):
2323

2424
parser.add_argument('--data_root', type=str, default=data_root)
2525
parser.add_argument('--checkpoints_dir', type=str, default='checkpoints')
26-
parser.add_argument('--dataset', type=str, default='human36m',
27-
choices=['human36m', 'surreal', 'up3d'])
2826
parser.add_argument('--max_dataset_size', type=int, default=-1)
29-
parser.add_argument('--im_size', type=int, default=256)
27+
parser.add_argument('--im_size', type=int, default=512)
3028
parser.add_argument('--batch_size', type=int, default=batch_size)
3129
parser.add_argument('--name', type=str, default='densebody_resnet_h36m')
3230
parser.add_argument('--uv_map', type=str, default='radvani', choices=['radvani', 'vbml_close', 'vbml_spaced', 'smpl_fbx'])

0 commit comments

Comments
 (0)