We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
1 parent 262bd64 commit ecdbea7Copy full SHA for ecdbea7
torch/nn/parallel/distributed.py
@@ -329,7 +329,7 @@ class DistributedDataParallel(Module):
329
Example::
330
331
>>> torch.distributed.init_process_group(backend='nccl', world_size=4, init_method='...')
332
- >>> net = torch.nn.DistributedDataParallel(model, pg)
+ >>> net = torch.nn.parallel.DistributedDataParallel(model, pg)
333
"""
334
def __init__(self, module, device_ids=None,
335
output_device=None, dim=0, broadcast_buffers=True,
@@ -626,7 +626,7 @@ def no_sync(self):
626
627
628
629
- >>> ddp = torch.nn.DistributedDataParallel(model, pg)
+ >>> ddp = torch.nn.parallel.DistributedDataParallel(model, pg)
630
>>> with ddp.no_sync():
631
>>> for input in inputs:
632
>>> ddp(input).backward() # no synchronization, accumulate grads
0 commit comments