From c3204dd5ffe384d40bb3d5d23febea463088e4c6 Mon Sep 17 00:00:00 2001 From: Doyup Lee <33016192+LeeDoYup@users.noreply.github.com> Date: Fri, 25 Sep 2020 21:47:15 +0900 Subject: [PATCH] Fix random seed bug of DDP in the ImageNet example the random seed has to be set in the main_worker, not in the `def main()`. I found that although the seed is set in `def main()`, each process in distributed training has individual & different seeds. --- imagenet/main.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/imagenet/main.py b/imagenet/main.py index 169d127e68..5f14e97562 100644 --- a/imagenet/main.py +++ b/imagenet/main.py @@ -81,9 +81,6 @@ def main(): args = parser.parse_args() if args.seed is not None: - random.seed(args.seed) - torch.manual_seed(args.seed) - cudnn.deterministic = True warnings.warn('You have chosen to seed training. ' 'This will turn on the CUDNN deterministic setting, ' 'which can slow down your training considerably! ' @@ -115,6 +112,11 @@ def main(): def main_worker(gpu, ngpus_per_node, args): global best_acc1 args.gpu = gpu + + if args.seed is not None: + random.seed(args.seed) + torch.manual_seed(args.seed) + cudnn.deterministic = True if args.gpu is not None: print("Use GPU: {} for training".format(args.gpu))