File tree 4 files changed +9
-9
lines changed
projects/DensePose/densepose
4 files changed +9
-9
lines changed Original file line number Diff line number Diff line change @@ -69,7 +69,7 @@ def default_argument_parser():
69
69
# PyTorch still may leave orphan processes in multi-gpu training.
70
70
# Therefore we use a deterministic way to obtain port,
71
71
# so that users are aware of orphan processes by seeing the port occupied.
72
- port = 2 ** 15 + 2 ** 14 + hash (os .getuid ()) % 2 ** 14
72
+ port = 2 ** 15 + 2 ** 14 + hash (os .getpid ()) % 2 ** 14
73
73
parser .add_argument ("--dist-url" , default = "tcp://127.0.0.1:{}" .format (port ))
74
74
parser .add_argument (
75
75
"opts" ,
Original file line number Diff line number Diff line change @@ -334,7 +334,7 @@ at::Tensor ROIAlign_forward_cuda(
334
334
auto output_size = num_rois * pooled_height * pooled_width * channels;
335
335
cudaStream_t stream = at::cuda::getCurrentCUDAStream ();
336
336
337
- dim3 grid (std::min (at::cuda::ATenCeilDiv (output_size, 512L ), 4096L ));
337
+ dim3 grid (std::min (at::cuda::ATenCeilDiv (( long ) output_size, 512L ), 4096L ));
338
338
dim3 block (512 );
339
339
340
340
if (output.numel () == 0 ) {
@@ -390,7 +390,7 @@ at::Tensor ROIAlign_backward_cuda(
390
390
391
391
cudaStream_t stream = at::cuda::getCurrentCUDAStream ();
392
392
393
- dim3 grid (std::min (at::cuda::ATenCeilDiv (grad.numel (), 512L ), 4096L ));
393
+ dim3 grid (std::min (at::cuda::ATenCeilDiv (( long ) grad.numel (), 512L ), 4096L ));
394
394
dim3 block (512 );
395
395
396
396
// handle possibly empty gradients
Original file line number Diff line number Diff line change @@ -348,7 +348,7 @@ at::Tensor ROIAlignRotated_forward_cuda(
348
348
auto output_size = num_rois * pooled_height * pooled_width * channels;
349
349
cudaStream_t stream = at::cuda::getCurrentCUDAStream ();
350
350
351
- dim3 grid (std::min (at::cuda::ATenCeilDiv (output_size, 512L ), 4096L ));
351
+ dim3 grid (std::min (at::cuda::ATenCeilDiv (( long ) output_size, 512L ), 4096L ));
352
352
dim3 block (512 );
353
353
354
354
if (output.numel () == 0 ) {
@@ -403,7 +403,7 @@ at::Tensor ROIAlignRotated_backward_cuda(
403
403
404
404
cudaStream_t stream = at::cuda::getCurrentCUDAStream ();
405
405
406
- dim3 grid (std::min (at::cuda::ATenCeilDiv (grad.numel (), 512L ), 4096L ));
406
+ dim3 grid (std::min (at::cuda::ATenCeilDiv (( long ) grad.numel (), 512L ), 4096L ));
407
407
dim3 block (512 );
408
408
409
409
// handle possibly empty gradients
Original file line number Diff line number Diff line change @@ -16,15 +16,15 @@ def get_densepose_metadata():
16
16
17
17
18
18
SPLITS = {
19
- "densepose_coco_2014_train" : ("coco/train2014" , "coco/annotations/densepose_train2014 .json" ),
20
- "densepose_coco_2014_minival" : ("coco/val2014" , "coco/annotations/densepose_minival2014 .json" ),
19
+ "densepose_coco_2014_train" : ("coco/train2014" , "coco/annotations/densepose_coco_2014_train .json" ),
20
+ "densepose_coco_2014_minival" : ("coco/val2014" , "coco/annotations/densepose_coco_2014_minival .json" ),
21
21
"densepose_coco_2014_minival_100" : (
22
22
"coco/val2014" ,
23
- "coco/annotations/densepose_minival2014_100 .json" ,
23
+ "coco/annotations/densepose_coco_2014_minival .json" ,
24
24
),
25
25
"densepose_coco_2014_valminusminival" : (
26
26
"coco/val2014" ,
27
- "coco/annotations/densepose_valminusminival2014 .json" ,
27
+ "coco/annotations/densepose_coco_2014_valminusminival .json" ,
28
28
),
29
29
}
30
30
You can’t perform that action at this time.
0 commit comments