use commandline-supplied cuda device name instead of cuda:0 for safetensors PR that doesn't fix anything

This commit is contained in:
AUTOMATIC 2023-01-04 15:09:53 +03:00
parent 68fbf4558f
commit 642142556d

View file

@ -173,7 +173,7 @@ def read_state_dict(checkpoint_file, print_global_state=False, map_location=None
if extension.lower() == ".safetensors":
device = map_location or shared.weight_load_location
if device is None:
device = "cuda:0" if torch.cuda.is_available() else "cpu"
device = devices.get_cuda_device_string() if torch.cuda.is_available() else "cpu"
pl_sd = safetensors.torch.load_file(checkpoint_file, device=device)
else:
pl_sd = torch.load(checkpoint_file, map_location=map_location or shared.weight_load_location)