Add loglxe loss as log of product of Lagrangian and Eulerian losses
This commit is contained in:
parent
13edf3b96d
commit
9c8b331bf5
@ -133,20 +133,13 @@ def gpu_worker(local_rank, node, args):
|
|||||||
criterion.to(device)
|
criterion.to(device)
|
||||||
|
|
||||||
optimizer = import_attr(args.optimizer, optim.__name__, args.callback_at)
|
optimizer = import_attr(args.optimizer, optim.__name__, args.callback_at)
|
||||||
lag_optimizer = optimizer(
|
optimizer = optimizer(
|
||||||
model.parameters(),
|
model.parameters(),
|
||||||
lr=args.lr,
|
lr=args.lr,
|
||||||
**args.optimizer_args,
|
**args.optimizer_args,
|
||||||
)
|
)
|
||||||
eul_optimizer = optimizer(
|
scheduler = optim.lr_scheduler.ReduceLROnPlateau(
|
||||||
model.parameters(),
|
optimizer, **args.scheduler_args)
|
||||||
lr=args.lr,
|
|
||||||
**args.optimizer_args,
|
|
||||||
)
|
|
||||||
lag_scheduler = optim.lr_scheduler.ReduceLROnPlateau(
|
|
||||||
lag_optimizer, **args.scheduler_args)
|
|
||||||
eul_scheduler = optim.lr_scheduler.ReduceLROnPlateau(
|
|
||||||
eul_optimizer, **args.scheduler_args)
|
|
||||||
|
|
||||||
if (args.load_state == ckpt_link and not os.path.isfile(ckpt_link)
|
if (args.load_state == ckpt_link and not os.path.isfile(ckpt_link)
|
||||||
or not args.load_state):
|
or not args.load_state):
|
||||||
@ -201,8 +194,7 @@ def gpu_worker(local_rank, node, args):
|
|||||||
train_sampler.set_epoch(epoch)
|
train_sampler.set_epoch(epoch)
|
||||||
|
|
||||||
train_loss = train(epoch, train_loader, model, lag2eul, criterion,
|
train_loss = train(epoch, train_loader, model, lag2eul, criterion,
|
||||||
lag_optimizer, eul_optimizer, lag_scheduler, eul_scheduler,
|
optimizer, scheduler, logger, device, args)
|
||||||
logger, device, args)
|
|
||||||
epoch_loss = train_loss
|
epoch_loss = train_loss
|
||||||
|
|
||||||
if args.val:
|
if args.val:
|
||||||
@ -211,14 +203,13 @@ def gpu_worker(local_rank, node, args):
|
|||||||
#epoch_loss = val_loss
|
#epoch_loss = val_loss
|
||||||
|
|
||||||
if args.reduce_lr_on_plateau:
|
if args.reduce_lr_on_plateau:
|
||||||
lag_scheduler.step(epoch_loss[0])
|
scheduler.step(epoch_loss[2])
|
||||||
eul_scheduler.step(epoch_loss[1])
|
|
||||||
|
|
||||||
if rank == 0:
|
if rank == 0:
|
||||||
logger.flush()
|
logger.flush()
|
||||||
|
|
||||||
if min_loss is None or torch.prod(epoch_loss) < torch.prod(min_loss):
|
if min_loss is None or epoch_loss[2] < min_loss:
|
||||||
min_loss = epoch_loss
|
min_loss = epoch_loss[2]
|
||||||
|
|
||||||
state = {
|
state = {
|
||||||
'epoch': epoch + 1,
|
'epoch': epoch + 1,
|
||||||
@ -239,14 +230,13 @@ def gpu_worker(local_rank, node, args):
|
|||||||
|
|
||||||
|
|
||||||
def train(epoch, loader, model, lag2eul, criterion,
|
def train(epoch, loader, model, lag2eul, criterion,
|
||||||
lag_optimizer, eul_optimizer, lag_scheduler, eul_scheduler,
|
optimizer, scheduler, logger, device, args):
|
||||||
logger, device, args):
|
|
||||||
model.train()
|
model.train()
|
||||||
|
|
||||||
rank = dist.get_rank()
|
rank = dist.get_rank()
|
||||||
world_size = dist.get_world_size()
|
world_size = dist.get_world_size()
|
||||||
|
|
||||||
epoch_loss = torch.zeros(2, dtype=torch.float64, device=device)
|
epoch_loss = torch.zeros(3, dtype=torch.float64, device=device)
|
||||||
|
|
||||||
for i, (input, target) in enumerate(loader):
|
for i, (input, target) in enumerate(loader):
|
||||||
input = input.to(device, non_blocking=True)
|
input = input.to(device, non_blocking=True)
|
||||||
@ -264,59 +254,38 @@ def train(epoch, loader, model, lag2eul, criterion,
|
|||||||
input, output, target = narrow_cast(input, output, target)
|
input, output, target = narrow_cast(input, output, target)
|
||||||
|
|
||||||
lag_out, lag_tgt = output, target
|
lag_out, lag_tgt = output, target
|
||||||
|
eul_out, eul_tgt = lag2eul(lag_out, lag_tgt)
|
||||||
|
|
||||||
if i % 2 == 0:
|
lag_loss = criterion(lag_out, lag_tgt)
|
||||||
lag_loss = criterion(lag_out, lag_tgt)
|
eul_loss = criterion(eul_out, eul_tgt)
|
||||||
epoch_loss[0] += lag_loss.item()
|
loss = lag_loss * eul_loss
|
||||||
|
epoch_loss[0] += lag_loss.item()
|
||||||
|
epoch_loss[1] += eul_loss.item()
|
||||||
|
epoch_loss[2] += loss.item()
|
||||||
|
|
||||||
with torch.no_grad():
|
optimizer.zero_grad()
|
||||||
eul_out, eul_tgt = lag2eul(lag_out, lag_tgt)
|
torch.log(loss).backward() # NOTE actual loss is log(loss)
|
||||||
|
optimizer.step()
|
||||||
eul_loss = criterion(eul_out, eul_tgt)
|
grads = get_grads(model)
|
||||||
epoch_loss[1] += eul_loss.item()
|
|
||||||
|
|
||||||
lag_optimizer.zero_grad()
|
|
||||||
lag_loss.backward()
|
|
||||||
lag_optimizer.step()
|
|
||||||
lag_grads = get_grads(model)
|
|
||||||
else:
|
|
||||||
with torch.no_grad():
|
|
||||||
lag_loss = criterion(lag_out, lag_tgt)
|
|
||||||
epoch_loss[0] += lag_loss.item()
|
|
||||||
|
|
||||||
eul_out, eul_tgt = lag2eul(lag_out, lag_tgt)
|
|
||||||
|
|
||||||
eul_loss = criterion(eul_out, eul_tgt)
|
|
||||||
epoch_loss[1] += eul_loss.item()
|
|
||||||
|
|
||||||
eul_optimizer.zero_grad()
|
|
||||||
eul_loss.backward()
|
|
||||||
eul_optimizer.step()
|
|
||||||
eul_grads = get_grads(model)
|
|
||||||
|
|
||||||
batch = epoch * len(loader) + i + 1
|
batch = epoch * len(loader) + i + 1
|
||||||
if batch % args.log_interval == 0 and batch >= 2:
|
if batch % args.log_interval == 0:
|
||||||
dist.all_reduce(lag_loss)
|
dist.all_reduce(lag_loss)
|
||||||
dist.all_reduce(eul_loss)
|
dist.all_reduce(eul_loss)
|
||||||
|
dist.all_reduce(loss)
|
||||||
lag_loss /= world_size
|
lag_loss /= world_size
|
||||||
eul_loss /= world_size
|
eul_loss /= world_size
|
||||||
|
loss /= world_size
|
||||||
if rank == 0:
|
if rank == 0:
|
||||||
logger.add_scalar('loss/batch/train/lag', lag_loss.item(),
|
logger.add_scalar('loss/batch/train/lag', lag_loss.item(),
|
||||||
global_step=batch)
|
global_step=batch)
|
||||||
logger.add_scalar('loss/batch/train/eul', eul_loss.item(),
|
logger.add_scalar('loss/batch/train/eul', eul_loss.item(),
|
||||||
global_step=batch)
|
global_step=batch)
|
||||||
logger.add_scalar('loss/batch/train/lxe',
|
logger.add_scalar('loss/batch/train/lxe', loss.item(),
|
||||||
lag_loss.item() * eul_loss.item(),
|
|
||||||
global_step=batch)
|
global_step=batch)
|
||||||
|
|
||||||
logger.add_scalar('grad/lag/first', lag_grads[0],
|
logger.add_scalar('grad/first', grads[0], global_step=batch)
|
||||||
global_step=batch)
|
logger.add_scalar('grad/last', grads[-1], global_step=batch)
|
||||||
logger.add_scalar('grad/lag/last', lag_grads[-1],
|
|
||||||
global_step=batch)
|
|
||||||
logger.add_scalar('grad/eul/first', eul_grads[0],
|
|
||||||
global_step=batch)
|
|
||||||
logger.add_scalar('grad/eul/last', eul_grads[-1],
|
|
||||||
global_step=batch)
|
|
||||||
|
|
||||||
dist.all_reduce(epoch_loss)
|
dist.all_reduce(epoch_loss)
|
||||||
epoch_loss /= len(loader) * world_size
|
epoch_loss /= len(loader) * world_size
|
||||||
@ -325,7 +294,7 @@ def train(epoch, loader, model, lag2eul, criterion,
|
|||||||
global_step=epoch+1)
|
global_step=epoch+1)
|
||||||
logger.add_scalar('loss/epoch/train/eul', epoch_loss[1],
|
logger.add_scalar('loss/epoch/train/eul', epoch_loss[1],
|
||||||
global_step=epoch+1)
|
global_step=epoch+1)
|
||||||
logger.add_scalar('loss/epoch/train/lxe', epoch_loss.prod(),
|
logger.add_scalar('loss/epoch/train/lxe', epoch_loss[2],
|
||||||
global_step=epoch+1)
|
global_step=epoch+1)
|
||||||
|
|
||||||
fig = plt_slices(
|
fig = plt_slices(
|
||||||
@ -346,7 +315,7 @@ def validate(epoch, loader, model, lag2eul, criterion, logger, device, args):
|
|||||||
rank = dist.get_rank()
|
rank = dist.get_rank()
|
||||||
world_size = dist.get_world_size()
|
world_size = dist.get_world_size()
|
||||||
|
|
||||||
epoch_loss = torch.zeros(2, dtype=torch.float64, device=device)
|
epoch_loss = torch.zeros(3, dtype=torch.float64, device=device)
|
||||||
|
|
||||||
with torch.no_grad():
|
with torch.no_grad():
|
||||||
for input, target in loader:
|
for input, target in loader:
|
||||||
@ -361,14 +330,14 @@ def validate(epoch, loader, model, lag2eul, criterion, logger, device, args):
|
|||||||
input, output, target = narrow_cast(input, output, target)
|
input, output, target = narrow_cast(input, output, target)
|
||||||
|
|
||||||
lag_out, lag_tgt = output, target
|
lag_out, lag_tgt = output, target
|
||||||
|
|
||||||
lag_loss = criterion(lag_out, lag_tgt)
|
|
||||||
epoch_loss[0] += lag_loss.item()
|
|
||||||
|
|
||||||
eul_out, eul_tgt = lag2eul(lag_out, lag_tgt)
|
eul_out, eul_tgt = lag2eul(lag_out, lag_tgt)
|
||||||
|
|
||||||
|
lag_loss = criterion(lag_out, lag_tgt)
|
||||||
eul_loss = criterion(eul_out, eul_tgt)
|
eul_loss = criterion(eul_out, eul_tgt)
|
||||||
|
loss = lag_loss * eul_loss
|
||||||
|
epoch_loss[0] += lag_loss.item()
|
||||||
epoch_loss[1] += eul_loss.item()
|
epoch_loss[1] += eul_loss.item()
|
||||||
|
epoch_loss[2] += loss.item()
|
||||||
|
|
||||||
dist.all_reduce(epoch_loss)
|
dist.all_reduce(epoch_loss)
|
||||||
epoch_loss /= len(loader) * world_size
|
epoch_loss /= len(loader) * world_size
|
||||||
@ -377,7 +346,7 @@ def validate(epoch, loader, model, lag2eul, criterion, logger, device, args):
|
|||||||
global_step=epoch+1)
|
global_step=epoch+1)
|
||||||
logger.add_scalar('loss/epoch/val/eul', epoch_loss[1],
|
logger.add_scalar('loss/epoch/val/eul', epoch_loss[1],
|
||||||
global_step=epoch+1)
|
global_step=epoch+1)
|
||||||
logger.add_scalar('loss/epoch/val/lxe', epoch_loss.prod(),
|
logger.add_scalar('loss/epoch/val/lxe', epoch_loss[2],
|
||||||
global_step=epoch+1)
|
global_step=epoch+1)
|
||||||
|
|
||||||
fig = plt_slices(
|
fig = plt_slices(
|
||||||
|
Loading…
Reference in New Issue
Block a user