Merge branch 'master' of github.com:eelregit/map2map

Conflicts:
	map2map/train.py
This commit is contained in:
Yin Li 2019-12-03 17:52:01 -05:00
commit afeefcaa9e
3 changed files with 7 additions and 7 deletions

View File

@ -1,4 +1,4 @@
import os
mport os
import shutil
import torch
from torch.multiprocessing import spawn
@ -89,8 +89,8 @@ def gpu_worker(local_rank, args):
#weight_decay=args.weight_decay
)
#scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer)
scheduler = torch.optim.CyclicLR(optimizer, base_lr=args.lr * 1e-2,
max_lr=args.lr)
scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer,
base_lr=args.lr * 1e-2, max_lr=args.lr)
if args.load_state:
state = torch.load(args.load_state, map_location=args.device)

View File

@ -9,7 +9,7 @@
#SBATCH --gres=gpu:v100-32gb:4
#SBATCH --exclusive
#SBATCH --nodes=2
#SBATCH --nodes=4
#SBATCH --mem=0
#SBATCH --time=7-00:00:00
@ -46,7 +46,7 @@ srun m2m.py train \
--val-in-patterns "$data_root_dir/$in_dir/$val_dirs/$in_files" \
--val-tgt-patterns "$data_root_dir/$tgt_dir/$val_dirs/$tgt_files" \
--in-channels 3 --out-channels 3 --norms cosmology.dis --augment \
--epochs 1024 --batches 3 --loader-workers 3 --lr 0.0002
--epochs 1024 --batches 3 --loader-workers 3 --lr 0.001
# --load-state checkpoint.pth

View File

@ -9,7 +9,7 @@
#SBATCH --gres=gpu:v100-32gb:4
#SBATCH --exclusive
#SBATCH --nodes=2
#SBATCH --nodes=4
#SBATCH --mem=0
#SBATCH --time=7-00:00:00
@ -46,7 +46,7 @@ srun m2m.py train \
--val-in-patterns "$data_root_dir/$in_dir/$val_dirs/$in_files" \
--val-tgt-patterns "$data_root_dir/$tgt_dir/$val_dirs/$tgt_files" \
--in-channels 3 --out-channels 3 --norms cosmology.vel --augment \
--epochs 1024 --batches 3 --loader-workers 3 --lr 0.0002
--epochs 1024 --batches 3 --loader-workers 3 --lr 0.001
# --load-state checkpoint.pth