rework the code to not use the walrus operator because colab's 3.7 does not support it
This commit is contained in:
parent
b8f2dfed3c
commit
62e3d71aa7
1 changed files with 5 additions and 2 deletions
|
@ -429,13 +429,16 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
|
||||||
weights = hypernetwork.weights()
|
weights = hypernetwork.weights()
|
||||||
for weight in weights:
|
for weight in weights:
|
||||||
weight.requires_grad = True
|
weight.requires_grad = True
|
||||||
|
|
||||||
# Here we use optimizer from saved HN, or we can specify as UI option.
|
# Here we use optimizer from saved HN, or we can specify as UI option.
|
||||||
if (optimizer_name := hypernetwork.optimizer_name) in optimizer_dict:
|
if hypernetwork.optimizer_name in optimizer_dict:
|
||||||
optimizer = optimizer_dict[hypernetwork.optimizer_name](params=weights, lr=scheduler.learn_rate)
|
optimizer = optimizer_dict[hypernetwork.optimizer_name](params=weights, lr=scheduler.learn_rate)
|
||||||
|
optimizer_name = hypernetwork.optimizer_name
|
||||||
else:
|
else:
|
||||||
print(f"Optimizer type {optimizer_name} is not defined!")
|
print(f"Optimizer type {hypernetwork.optimizer_name} is not defined!")
|
||||||
optimizer = torch.optim.AdamW(params=weights, lr=scheduler.learn_rate)
|
optimizer = torch.optim.AdamW(params=weights, lr=scheduler.learn_rate)
|
||||||
optimizer_name = 'AdamW'
|
optimizer_name = 'AdamW'
|
||||||
|
|
||||||
if hypernetwork.optimizer_state_dict: # This line must be changed if Optimizer type can be different from saved optimizer.
|
if hypernetwork.optimizer_state_dict: # This line must be changed if Optimizer type can be different from saved optimizer.
|
||||||
try:
|
try:
|
||||||
optimizer.load_state_dict(hypernetwork.optimizer_state_dict)
|
optimizer.load_state_dict(hypernetwork.optimizer_state_dict)
|
||||||
|
|
Loading…
Reference in a new issue