rework the code for lowram a bit
This commit is contained in:
parent
4a216ded43
commit
bb295f5478
2 changed files with 4 additions and 11 deletions
|
@ -134,11 +134,7 @@ def load_model_weights(model, checkpoint_info):
|
|||
|
||||
print(f"Loading weights [{sd_model_hash}] from {checkpoint_file}")
|
||||
|
||||
if shared.cmd_opts.lowram:
|
||||
print("Load to VRAM if GPU is available (low RAM)")
|
||||
pl_sd = torch.load(checkpoint_file)
|
||||
else:
|
||||
pl_sd = torch.load(checkpoint_file, map_location="cpu")
|
||||
pl_sd = torch.load(checkpoint_file, map_location=shared.weight_load_location)
|
||||
|
||||
if "global_step" in pl_sd:
|
||||
print(f"Global Step: {pl_sd['global_step']}")
|
||||
|
@ -164,11 +160,7 @@ def load_model_weights(model, checkpoint_info):
|
|||
if os.path.exists(vae_file):
|
||||
print(f"Loading VAE weights from: {vae_file}")
|
||||
|
||||
if shared.cmd_opts.lowram:
|
||||
print("Load to VRAM if GPU is available (low RAM)")
|
||||
vae_ckpt = torch.load(vae_file)
|
||||
else:
|
||||
vae_ckpt = torch.load(vae_file, map_location="cpu")
|
||||
vae_ckpt = torch.load(vae_file, map_location=shared.weight_load_location)
|
||||
|
||||
vae_dict = {k: v for k, v in vae_ckpt["state_dict"].items() if k[0:4] != "loss"}
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@ parser.add_argument("--hypernetwork-dir", type=str, default=os.path.join(models_
|
|||
parser.add_argument("--allow-code", action='store_true', help="allow custom script execution from webui")
|
||||
parser.add_argument("--medvram", action='store_true', help="enable stable diffusion model optimizations for sacrificing a little speed for low VRM usage")
|
||||
parser.add_argument("--lowvram", action='store_true', help="enable stable diffusion model optimizations for sacrificing a lot of speed for very low VRM usage")
|
||||
parser.add_argument("--lowram", action='store_true', help="load models to VRM instead of RAM (for machines which have bigger VRM than RAM such as free Google Colab server)")
|
||||
parser.add_argument("--lowram", action='store_true', help="load stable diffusion checkpoint weights to VRAM instead of RAM")
|
||||
parser.add_argument("--always-batch-cond-uncond", action='store_true', help="disables cond/uncond batching that is enabled to save memory with --medvram or --lowvram")
|
||||
parser.add_argument("--unload-gfpgan", action='store_true', help="does not do anything.")
|
||||
parser.add_argument("--precision", type=str, help="evaluate at this precision", choices=["full", "autocast"], default="autocast")
|
||||
|
@ -81,6 +81,7 @@ devices.device, devices.device_interrogate, devices.device_gfpgan, devices.devic
|
|||
(devices.cpu if any(y in cmd_opts.use_cpu for y in [x, 'all']) else devices.get_optimal_device() for x in ['sd', 'interrogate', 'gfpgan', 'bsrgan', 'esrgan', 'scunet', 'codeformer'])
|
||||
|
||||
device = devices.device
|
||||
weight_load_location = None if cmd_opts.lowram else "cpu"
|
||||
|
||||
batch_cond_uncond = cmd_opts.always_batch_cond_uncond or not (cmd_opts.lowvram or cmd_opts.medvram)
|
||||
parallel_processing_allowed = not cmd_opts.lowvram and not cmd_opts.medvram
|
||||
|
|
Loading…
Reference in a new issue