From a886e53c54d3eb319eb4dff57860f6edf836c8ae Mon Sep 17 00:00:00 2001 From: Yin Li Date: Mon, 27 Jul 2020 17:47:58 -0700 Subject: [PATCH] Always use training loss for lr scheduler --- map2map/train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/map2map/train.py b/map2map/train.py index 5c49092..5e6dc6a 100644 --- a/map2map/train.py +++ b/map2map/train.py @@ -207,7 +207,7 @@ def gpu_worker(local_rank, node, args): if args.val: val_loss = validate(epoch, val_loader, model, lag2eul, criterion, logger, device, args) - epoch_loss = val_loss + #epoch_loss = val_loss if args.reduce_lr_on_plateau: lag_scheduler.step(epoch_loss[0])