Merge pull request #5992 from yuvalabou/F541
Fix F541: f-string without any placeholders
This commit is contained in:
commit
b12de850ae
10 changed files with 18 additions and 18 deletions
|
@ -26,7 +26,7 @@ class LDSR:
|
||||||
global cached_ldsr_model
|
global cached_ldsr_model
|
||||||
|
|
||||||
if shared.opts.ldsr_cached and cached_ldsr_model is not None:
|
if shared.opts.ldsr_cached and cached_ldsr_model is not None:
|
||||||
print(f"Loading model from cache")
|
print("Loading model from cache")
|
||||||
model: torch.nn.Module = cached_ldsr_model
|
model: torch.nn.Module = cached_ldsr_model
|
||||||
else:
|
else:
|
||||||
print(f"Loading model from {self.modelPath}")
|
print(f"Loading model from {self.modelPath}")
|
||||||
|
|
|
@ -382,7 +382,7 @@ class VQAutoEncoder(nn.Module):
|
||||||
self.load_state_dict(torch.load(model_path, map_location='cpu')['params'])
|
self.load_state_dict(torch.load(model_path, map_location='cpu')['params'])
|
||||||
logger.info(f'vqgan is loaded from: {model_path} [params]')
|
logger.info(f'vqgan is loaded from: {model_path} [params]')
|
||||||
else:
|
else:
|
||||||
raise ValueError(f'Wrong params!')
|
raise ValueError('Wrong params!')
|
||||||
|
|
||||||
|
|
||||||
def forward(self, x):
|
def forward(self, x):
|
||||||
|
@ -431,7 +431,7 @@ class VQGANDiscriminator(nn.Module):
|
||||||
elif 'params' in chkpt:
|
elif 'params' in chkpt:
|
||||||
self.load_state_dict(torch.load(model_path, map_location='cpu')['params'])
|
self.load_state_dict(torch.load(model_path, map_location='cpu')['params'])
|
||||||
else:
|
else:
|
||||||
raise ValueError(f'Wrong params!')
|
raise ValueError('Wrong params!')
|
||||||
|
|
||||||
def forward(self, x):
|
def forward(self, x):
|
||||||
return self.main(x)
|
return self.main(x)
|
|
@ -277,7 +277,7 @@ def load_hypernetwork(filename):
|
||||||
print(traceback.format_exc(), file=sys.stderr)
|
print(traceback.format_exc(), file=sys.stderr)
|
||||||
else:
|
else:
|
||||||
if shared.loaded_hypernetwork is not None:
|
if shared.loaded_hypernetwork is not None:
|
||||||
print(f"Unloading hypernetwork")
|
print("Unloading hypernetwork")
|
||||||
|
|
||||||
shared.loaded_hypernetwork = None
|
shared.loaded_hypernetwork = None
|
||||||
|
|
||||||
|
@ -443,7 +443,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, gradient_step,
|
||||||
|
|
||||||
initial_step = hypernetwork.step or 0
|
initial_step = hypernetwork.step or 0
|
||||||
if initial_step >= steps:
|
if initial_step >= steps:
|
||||||
shared.state.textinfo = f"Model has already been trained beyond specified max steps"
|
shared.state.textinfo = "Model has already been trained beyond specified max steps"
|
||||||
return hypernetwork, filename
|
return hypernetwork, filename
|
||||||
|
|
||||||
scheduler = LearnRateScheduler(learn_rate, steps, initial_step)
|
scheduler = LearnRateScheduler(learn_rate, steps, initial_step)
|
||||||
|
|
|
@ -599,7 +599,7 @@ def read_info_from_image(image):
|
||||||
Negative prompt: {json_info["uc"]}
|
Negative prompt: {json_info["uc"]}
|
||||||
Steps: {json_info["steps"]}, Sampler: {sampler}, CFG scale: {json_info["scale"]}, Seed: {json_info["seed"]}, Size: {image.width}x{image.height}, Clip skip: 2, ENSD: 31337"""
|
Steps: {json_info["steps"]}, Sampler: {sampler}, CFG scale: {json_info["scale"]}, Seed: {json_info["seed"]}, Size: {image.width}x{image.height}, Clip skip: 2, ENSD: 31337"""
|
||||||
except Exception:
|
except Exception:
|
||||||
print(f"Error parsing NovelAI image generation parameters:", file=sys.stderr)
|
print("Error parsing NovelAI image generation parameters:", file=sys.stderr)
|
||||||
print(traceback.format_exc(), file=sys.stderr)
|
print(traceback.format_exc(), file=sys.stderr)
|
||||||
|
|
||||||
return geninfo, items
|
return geninfo, items
|
||||||
|
|
|
@ -172,7 +172,7 @@ class InterrogateModels:
|
||||||
res += ", " + match
|
res += ", " + match
|
||||||
|
|
||||||
except Exception:
|
except Exception:
|
||||||
print(f"Error interrogating", file=sys.stderr)
|
print("Error interrogating", file=sys.stderr)
|
||||||
print(traceback.format_exc(), file=sys.stderr)
|
print(traceback.format_exc(), file=sys.stderr)
|
||||||
res += "<error>"
|
res += "<error>"
|
||||||
|
|
||||||
|
|
|
@ -137,15 +137,15 @@ def load_with_extra(filename, extra_handler=None, *args, **kwargs):
|
||||||
except pickle.UnpicklingError:
|
except pickle.UnpicklingError:
|
||||||
print(f"Error verifying pickled file from {filename}:", file=sys.stderr)
|
print(f"Error verifying pickled file from {filename}:", file=sys.stderr)
|
||||||
print(traceback.format_exc(), file=sys.stderr)
|
print(traceback.format_exc(), file=sys.stderr)
|
||||||
print(f"-----> !!!! The file is most likely corrupted !!!! <-----", file=sys.stderr)
|
print("-----> !!!! The file is most likely corrupted !!!! <-----", file=sys.stderr)
|
||||||
print(f"You can skip this check with --disable-safe-unpickle commandline argument, but that is not going to help you.\n\n", file=sys.stderr)
|
print("You can skip this check with --disable-safe-unpickle commandline argument, but that is not going to help you.\n\n", file=sys.stderr)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
except Exception:
|
except Exception:
|
||||||
print(f"Error verifying pickled file from {filename}:", file=sys.stderr)
|
print(f"Error verifying pickled file from {filename}:", file=sys.stderr)
|
||||||
print(traceback.format_exc(), file=sys.stderr)
|
print(traceback.format_exc(), file=sys.stderr)
|
||||||
print(f"\nThe file may be malicious, so the program is not going to read it.", file=sys.stderr)
|
print("\nThe file may be malicious, so the program is not going to read it.", file=sys.stderr)
|
||||||
print(f"You can skip this check with --disable-safe-unpickle commandline argument.\n\n", file=sys.stderr)
|
print("You can skip this check with --disable-safe-unpickle commandline argument.\n\n", file=sys.stderr)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
return unsafe_torch_load(filename, *args, **kwargs)
|
return unsafe_torch_load(filename, *args, **kwargs)
|
||||||
|
|
|
@ -117,13 +117,13 @@ def select_checkpoint():
|
||||||
return checkpoint_info
|
return checkpoint_info
|
||||||
|
|
||||||
if len(checkpoints_list) == 0:
|
if len(checkpoints_list) == 0:
|
||||||
print(f"No checkpoints found. When searching for checkpoints, looked at:", file=sys.stderr)
|
print("No checkpoints found. When searching for checkpoints, looked at:", file=sys.stderr)
|
||||||
if shared.cmd_opts.ckpt is not None:
|
if shared.cmd_opts.ckpt is not None:
|
||||||
print(f" - file {os.path.abspath(shared.cmd_opts.ckpt)}", file=sys.stderr)
|
print(f" - file {os.path.abspath(shared.cmd_opts.ckpt)}", file=sys.stderr)
|
||||||
print(f" - directory {model_path}", file=sys.stderr)
|
print(f" - directory {model_path}", file=sys.stderr)
|
||||||
if shared.cmd_opts.ckpt_dir is not None:
|
if shared.cmd_opts.ckpt_dir is not None:
|
||||||
print(f" - directory {os.path.abspath(shared.cmd_opts.ckpt_dir)}", file=sys.stderr)
|
print(f" - directory {os.path.abspath(shared.cmd_opts.ckpt_dir)}", file=sys.stderr)
|
||||||
print(f"Can't run without a checkpoint. Find and place a .ckpt file into any of those locations. The program will exit.", file=sys.stderr)
|
print("Can't run without a checkpoint. Find and place a .ckpt file into any of those locations. The program will exit.", file=sys.stderr)
|
||||||
exit(1)
|
exit(1)
|
||||||
|
|
||||||
checkpoint_info = next(iter(checkpoints_list.values()))
|
checkpoint_info = next(iter(checkpoints_list.values()))
|
||||||
|
@ -324,7 +324,7 @@ def load_model(checkpoint_info=None):
|
||||||
|
|
||||||
script_callbacks.model_loaded_callback(sd_model)
|
script_callbacks.model_loaded_callback(sd_model)
|
||||||
|
|
||||||
print(f"Model loaded.")
|
print("Model loaded.")
|
||||||
return sd_model
|
return sd_model
|
||||||
|
|
||||||
|
|
||||||
|
@ -359,5 +359,5 @@ def reload_model_weights(sd_model=None, info=None):
|
||||||
if not shared.cmd_opts.lowvram and not shared.cmd_opts.medvram:
|
if not shared.cmd_opts.lowvram and not shared.cmd_opts.medvram:
|
||||||
sd_model.to(devices.device)
|
sd_model.to(devices.device)
|
||||||
|
|
||||||
print(f"Weights loaded.")
|
print("Weights loaded.")
|
||||||
return sd_model
|
return sd_model
|
||||||
|
|
|
@ -208,5 +208,5 @@ def reload_vae_weights(sd_model=None, vae_file="auto"):
|
||||||
if not shared.cmd_opts.lowvram and not shared.cmd_opts.medvram:
|
if not shared.cmd_opts.lowvram and not shared.cmd_opts.medvram:
|
||||||
sd_model.to(devices.device)
|
sd_model.to(devices.device)
|
||||||
|
|
||||||
print(f"VAE Weights loaded.")
|
print("VAE Weights loaded.")
|
||||||
return sd_model
|
return sd_model
|
||||||
|
|
|
@ -263,7 +263,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_
|
||||||
|
|
||||||
initial_step = embedding.step or 0
|
initial_step = embedding.step or 0
|
||||||
if initial_step >= steps:
|
if initial_step >= steps:
|
||||||
shared.state.textinfo = f"Model has already been trained beyond specified max steps"
|
shared.state.textinfo = "Model has already been trained beyond specified max steps"
|
||||||
return embedding, filename
|
return embedding, filename
|
||||||
scheduler = LearnRateScheduler(learn_rate, steps, initial_step)
|
scheduler = LearnRateScheduler(learn_rate, steps, initial_step)
|
||||||
|
|
||||||
|
|
|
@ -140,7 +140,7 @@ class Script(scripts.Script):
|
||||||
try:
|
try:
|
||||||
args = cmdargs(line)
|
args = cmdargs(line)
|
||||||
except Exception:
|
except Exception:
|
||||||
print(f"Error parsing line [line] as commandline:", file=sys.stderr)
|
print(f"Error parsing line {line} as commandline:", file=sys.stderr)
|
||||||
print(traceback.format_exc(), file=sys.stderr)
|
print(traceback.format_exc(), file=sys.stderr)
|
||||||
args = {"prompt": line}
|
args = {"prompt": line}
|
||||||
else:
|
else:
|
||||||
|
|
Loading…
Reference in a new issue