From 2f4c91894d4c0a055c1069b2fda0e4da8fcda188 Mon Sep 17 00:00:00 2001
From: guaneec
Date: Wed, 26 Oct 2022 12:10:30 +0800
Subject: [PATCH 001/147] Remove activation from final layer of HNs
---
modules/hypernetworks/hypernetwork.py | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py
index d647ea55..54346b64 100644
--- a/modules/hypernetworks/hypernetwork.py
+++ b/modules/hypernetworks/hypernetwork.py
@@ -41,8 +41,8 @@ class HypernetworkModule(torch.nn.Module):
# Add a fully-connected layer
linears.append(torch.nn.Linear(int(dim * layer_structure[i]), int(dim * layer_structure[i+1])))
- # Add an activation func
- if activation_func == "linear" or activation_func is None:
+ # Add an activation func except last layer
+ if activation_func == "linear" or activation_func is None or i >= len(layer_structure) - 3:
pass
elif activation_func in self.activation_dict:
linears.append(self.activation_dict[activation_func]())
@@ -53,7 +53,7 @@ class HypernetworkModule(torch.nn.Module):
if add_layer_norm:
linears.append(torch.nn.LayerNorm(int(dim * layer_structure[i+1])))
- # Add dropout expect last layer
+ # Add dropout except last layer
if use_dropout and i < len(layer_structure) - 3:
linears.append(torch.nn.Dropout(p=0.3))
From c702d4d0df21790199d199818f25c449213ffe0f Mon Sep 17 00:00:00 2001
From: guaneec
Date: Wed, 26 Oct 2022 13:43:04 +0800
Subject: [PATCH 002/147] Fix off-by-one
---
modules/hypernetworks/hypernetwork.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py
index 54346b64..3ce85bb5 100644
--- a/modules/hypernetworks/hypernetwork.py
+++ b/modules/hypernetworks/hypernetwork.py
@@ -42,7 +42,7 @@ class HypernetworkModule(torch.nn.Module):
linears.append(torch.nn.Linear(int(dim * layer_structure[i]), int(dim * layer_structure[i+1])))
# Add an activation func except last layer
- if activation_func == "linear" or activation_func is None or i >= len(layer_structure) - 3:
+ if activation_func == "linear" or activation_func is None or i >= len(layer_structure) - 2:
pass
elif activation_func in self.activation_dict:
linears.append(self.activation_dict[activation_func]())
@@ -54,7 +54,7 @@ class HypernetworkModule(torch.nn.Module):
linears.append(torch.nn.LayerNorm(int(dim * layer_structure[i+1])))
# Add dropout except last layer
- if use_dropout and i < len(layer_structure) - 3:
+ if use_dropout and i < len(layer_structure) - 2:
linears.append(torch.nn.Dropout(p=0.3))
self.linear = torch.nn.Sequential(*linears)
From 877d94f97ca5491d8779440769b191e0dcd32c8e Mon Sep 17 00:00:00 2001
From: guaneec
Date: Wed, 26 Oct 2022 14:50:58 +0800
Subject: [PATCH 003/147] Back compatibility
---
modules/hypernetworks/hypernetwork.py | 17 ++++++++++-------
1 file changed, 10 insertions(+), 7 deletions(-)
diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py
index 3ce85bb5..dd317085 100644
--- a/modules/hypernetworks/hypernetwork.py
+++ b/modules/hypernetworks/hypernetwork.py
@@ -28,7 +28,7 @@ class HypernetworkModule(torch.nn.Module):
"swish": torch.nn.Hardswish,
}
- def __init__(self, dim, state_dict=None, layer_structure=None, activation_func=None, add_layer_norm=False, use_dropout=False):
+ def __init__(self, dim, state_dict=None, layer_structure=None, activation_func=None, add_layer_norm=False, use_dropout=False, activate_output=False):
super().__init__()
assert layer_structure is not None, "layer_structure must not be None"
@@ -42,7 +42,7 @@ class HypernetworkModule(torch.nn.Module):
linears.append(torch.nn.Linear(int(dim * layer_structure[i]), int(dim * layer_structure[i+1])))
# Add an activation func except last layer
- if activation_func == "linear" or activation_func is None or i >= len(layer_structure) - 2:
+ if activation_func == "linear" or activation_func is None or (i >= len(layer_structure) - 2 and not activate_output):
pass
elif activation_func in self.activation_dict:
linears.append(self.activation_dict[activation_func]())
@@ -105,7 +105,7 @@ class Hypernetwork:
filename = None
name = None
- def __init__(self, name=None, enable_sizes=None, layer_structure=None, activation_func=None, add_layer_norm=False, use_dropout=False):
+ def __init__(self, name=None, enable_sizes=None, layer_structure=None, activation_func=None, add_layer_norm=False, use_dropout=False, activate_output=False):
self.filename = None
self.name = name
self.layers = {}
@@ -116,11 +116,12 @@ class Hypernetwork:
self.activation_func = activation_func
self.add_layer_norm = add_layer_norm
self.use_dropout = use_dropout
+ self.activate_output = activate_output
for size in enable_sizes or []:
self.layers[size] = (
- HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.add_layer_norm, self.use_dropout),
- HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.add_layer_norm, self.use_dropout),
+ HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.add_layer_norm, self.use_dropout, self.activate_output),
+ HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.add_layer_norm, self.use_dropout, self.activate_output),
)
def weights(self):
@@ -147,6 +148,7 @@ class Hypernetwork:
state_dict['use_dropout'] = self.use_dropout
state_dict['sd_checkpoint'] = self.sd_checkpoint
state_dict['sd_checkpoint_name'] = self.sd_checkpoint_name
+ state_dict['activate_output'] = self.activate_output
torch.save(state_dict, filename)
@@ -161,12 +163,13 @@ class Hypernetwork:
self.activation_func = state_dict.get('activation_func', None)
self.add_layer_norm = state_dict.get('is_layer_norm', False)
self.use_dropout = state_dict.get('use_dropout', False)
+ self.activate_output = state_dict.get('activate_output', True)
for size, sd in state_dict.items():
if type(size) == int:
self.layers[size] = (
- HypernetworkModule(size, sd[0], self.layer_structure, self.activation_func, self.add_layer_norm, self.use_dropout),
- HypernetworkModule(size, sd[1], self.layer_structure, self.activation_func, self.add_layer_norm, self.use_dropout),
+ HypernetworkModule(size, sd[0], self.layer_structure, self.activation_func, self.add_layer_norm, self.use_dropout, self.activate_output),
+ HypernetworkModule(size, sd[1], self.layer_structure, self.activation_func, self.add_layer_norm, self.use_dropout, self.activate_output),
)
self.name = state_dict.get('name', self.name)
From 91bb35b1e6842b30ce7553009c8ecea3643de8d2 Mon Sep 17 00:00:00 2001
From: guaneec
Date: Wed, 26 Oct 2022 15:00:03 +0800
Subject: [PATCH 004/147] Merge fix
---
modules/hypernetworks/hypernetwork.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py
index eab8b32f..bd171793 100644
--- a/modules/hypernetworks/hypernetwork.py
+++ b/modules/hypernetworks/hypernetwork.py
@@ -190,7 +190,7 @@ class Hypernetwork:
print(f"Weight initialization is {self.weight_init}")
self.add_layer_norm = state_dict.get('is_layer_norm', False)
print(f"Layer norm is set to {self.add_layer_norm}")
- self.use_dropout = state_dict.get('use_dropout', False
+ self.use_dropout = state_dict.get('use_dropout', False)
print(f"Dropout usage is set to {self.use_dropout}" )
self.activate_output = state_dict.get('activate_output', True)
From b6a8bb123bd519736306417399f6441e504f1e8b Mon Sep 17 00:00:00 2001
From: guaneec
Date: Wed, 26 Oct 2022 15:15:19 +0800
Subject: [PATCH 005/147] Fix merge
---
modules/hypernetworks/hypernetwork.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py
index bd171793..2997cead 100644
--- a/modules/hypernetworks/hypernetwork.py
+++ b/modules/hypernetworks/hypernetwork.py
@@ -60,7 +60,7 @@ class HypernetworkModule(torch.nn.Module):
linears.append(torch.nn.LayerNorm(int(dim * layer_structure[i+1])))
# Add dropout except last layer
- if use_dropout and i < len(layer_structure) - 2:
+ if use_dropout and i < len(layer_structure) - 3:
linears.append(torch.nn.Dropout(p=0.3))
self.linear = torch.nn.Sequential(*linears)
@@ -126,7 +126,7 @@ class Hypernetwork:
filename = None
name = None
- def __init__(self, name=None, enable_sizes=None, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, activate_output=False)
+ def __init__(self, name=None, enable_sizes=None, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, activate_output=False):
self.filename = None
self.name = name
self.layers = {}
From 7bd8581e461623932ffbd5762ee931ee51f798db Mon Sep 17 00:00:00 2001
From: Sihan Wang <31711261+shwang95@users.noreply.github.com>
Date: Wed, 26 Oct 2022 20:32:55 +0800
Subject: [PATCH 006/147] Fix error caused by EXIF transpose when using custom
scripts
Some custom scripts read image directly and no need to select image in UI, this will cause error.
---
modules/img2img.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/modules/img2img.py b/modules/img2img.py
index 9c0cf23e..86a19f37 100644
--- a/modules/img2img.py
+++ b/modules/img2img.py
@@ -80,7 +80,8 @@ def img2img(mode: int, prompt: str, negative_prompt: str, prompt_style: str, pro
mask = None
# Use the EXIF orientation of photos taken by smartphones.
- image = ImageOps.exif_transpose(image)
+ if image is not None:
+ image = ImageOps.exif_transpose(image)
assert 0. <= denoising_strength <= 1., 'can only work with strength in [0.0, 1.0]'
From 85fcccc105aa50f1d78de559233eaa9f384608b5 Mon Sep 17 00:00:00 2001
From: AngelBottomless <35677394+aria1th@users.noreply.github.com>
Date: Wed, 26 Oct 2022 22:24:33 +0900
Subject: [PATCH 007/147] Squashed commit of fixing dropout silently
fix dropouts for future hypernetworks
add kwargs for Hypernetwork class
hypernet UI for gradio input
add recommended options
remove as options
revert adding options in ui
---
modules/hypernetworks/hypernetwork.py | 25 +++++++++++++++++--------
modules/ui.py | 4 ++--
2 files changed, 19 insertions(+), 10 deletions(-)
diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py
index 2997cead..dd921153 100644
--- a/modules/hypernetworks/hypernetwork.py
+++ b/modules/hypernetworks/hypernetwork.py
@@ -34,7 +34,8 @@ class HypernetworkModule(torch.nn.Module):
}
activation_dict.update({cls_name.lower(): cls_obj for cls_name, cls_obj in inspect.getmembers(torch.nn.modules.activation) if inspect.isclass(cls_obj) and cls_obj.__module__ == 'torch.nn.modules.activation'})
- def __init__(self, dim, state_dict=None, layer_structure=None, activation_func=None, weight_init='Normal', add_layer_norm=False, use_dropout=False, activate_output=False):
+ def __init__(self, dim, state_dict=None, layer_structure=None, activation_func=None, weight_init='Normal',
+ add_layer_norm=False, use_dropout=False, activate_output=False, **kwargs):
super().__init__()
assert layer_structure is not None, "layer_structure must not be None"
@@ -60,7 +61,7 @@ class HypernetworkModule(torch.nn.Module):
linears.append(torch.nn.LayerNorm(int(dim * layer_structure[i+1])))
# Add dropout except last layer
- if use_dropout and i < len(layer_structure) - 3:
+ if 'last_layer_dropout' in kwargs and kwargs['last_layer_dropout'] and use_dropout and i < len(layer_structure) - 2:
linears.append(torch.nn.Dropout(p=0.3))
self.linear = torch.nn.Sequential(*linears)
@@ -126,7 +127,7 @@ class Hypernetwork:
filename = None
name = None
- def __init__(self, name=None, enable_sizes=None, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, activate_output=False):
+ def __init__(self, name=None, enable_sizes=None, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, activate_output=False, **kwargs):
self.filename = None
self.name = name
self.layers = {}
@@ -139,11 +140,14 @@ class Hypernetwork:
self.add_layer_norm = add_layer_norm
self.use_dropout = use_dropout
self.activate_output = activate_output
+ self.last_layer_dropout = kwargs['last_layer_dropout'] if 'last_layer_dropout' in kwargs else True
for size in enable_sizes or []:
self.layers[size] = (
- HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init, self.add_layer_norm, self.use_dropout, self.activate_output),
- HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init, self.add_layer_norm, self.use_dropout, self.activate_output),
+ HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init,
+ self.add_layer_norm, self.use_dropout, self.activate_output, last_layer_dropout=self.last_layer_dropout),
+ HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init,
+ self.add_layer_norm, self.use_dropout, self.activate_output, last_layer_dropout=self.last_layer_dropout),
)
def weights(self):
@@ -172,7 +176,8 @@ class Hypernetwork:
state_dict['sd_checkpoint'] = self.sd_checkpoint
state_dict['sd_checkpoint_name'] = self.sd_checkpoint_name
state_dict['activate_output'] = self.activate_output
-
+ state_dict['last_layer_dropout'] = self.last_layer_dropout
+
torch.save(state_dict, filename)
def load(self, filename):
@@ -193,12 +198,16 @@ class Hypernetwork:
self.use_dropout = state_dict.get('use_dropout', False)
print(f"Dropout usage is set to {self.use_dropout}" )
self.activate_output = state_dict.get('activate_output', True)
+ print(f"Activate last layer is set to {self.activate_output}")
+ self.last_layer_dropout = state_dict.get('last_layer_dropout', False)
for size, sd in state_dict.items():
if type(size) == int:
self.layers[size] = (
- HypernetworkModule(size, sd[0], self.layer_structure, self.activation_func, self.weight_init, self.add_layer_norm, self.use_dropout, self.activate_output),
- HypernetworkModule(size, sd[1], self.layer_structure, self.activation_func, self.weight_init, self.add_layer_norm, self.use_dropout, self.activate_output),
+ HypernetworkModule(size, sd[0], self.layer_structure, self.activation_func, self.weight_init,
+ self.add_layer_norm, self.use_dropout, self.activate_output, last_layer_dropout=self.last_layer_dropout),
+ HypernetworkModule(size, sd[1], self.layer_structure, self.activation_func, self.weight_init,
+ self.add_layer_norm, self.use_dropout, self.activate_output, last_layer_dropout=self.last_layer_dropout),
)
self.name = state_dict.get('name', self.name)
diff --git a/modules/ui.py b/modules/ui.py
index 0a63e357..55cbe859 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -1238,8 +1238,8 @@ def create_ui(wrap_gradio_gpu_call):
new_hypernetwork_name = gr.Textbox(label="Name")
new_hypernetwork_sizes = gr.CheckboxGroup(label="Modules", value=["768", "320", "640", "1280"], choices=["768", "320", "640", "1280"])
new_hypernetwork_layer_structure = gr.Textbox("1, 2, 1", label="Enter hypernetwork layer structure", placeholder="1st and last digit must be 1. ex:'1, 2, 1'")
- new_hypernetwork_activation_func = gr.Dropdown(value="relu", label="Select activation function of hypernetwork", choices=modules.hypernetworks.ui.keys)
- new_hypernetwork_initialization_option = gr.Dropdown(value = "Normal", label="Select Layer weights initialization. relu-like - Kaiming, sigmoid-like - Xavier is recommended", choices=["Normal", "KaimingUniform", "KaimingNormal", "XavierUniform", "XavierNormal"])
+ new_hypernetwork_activation_func = gr.Dropdown(value="relu", label="Select activation function of hypernetwork. Recommended : Swish / Linear(none)", choices=modules.hypernetworks.ui.keys)
+ new_hypernetwork_initialization_option = gr.Dropdown(value = "Normal", label="Select Layer weights initialization. Normal is default, for experiments, relu-like - Kaiming, sigmoid-like - Xavier is recommended", choices=["Normal", "KaimingUniform", "KaimingNormal", "XavierUniform", "XavierNormal"])
new_hypernetwork_add_layer_norm = gr.Checkbox(label="Add layer normalization")
new_hypernetwork_use_dropout = gr.Checkbox(label="Use dropout")
overwrite_old_hypernetwork = gr.Checkbox(value=False, label="Overwrite Old Hypernetwork")
From cc56df996e95c2c82295ab7b9928da2544791220 Mon Sep 17 00:00:00 2001
From: guaneec
Date: Wed, 26 Oct 2022 23:51:51 +0800
Subject: [PATCH 008/147] Fix dropout logic
---
modules/hypernetworks/hypernetwork.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py
index dd921153..b17598fe 100644
--- a/modules/hypernetworks/hypernetwork.py
+++ b/modules/hypernetworks/hypernetwork.py
@@ -35,7 +35,7 @@ class HypernetworkModule(torch.nn.Module):
activation_dict.update({cls_name.lower(): cls_obj for cls_name, cls_obj in inspect.getmembers(torch.nn.modules.activation) if inspect.isclass(cls_obj) and cls_obj.__module__ == 'torch.nn.modules.activation'})
def __init__(self, dim, state_dict=None, layer_structure=None, activation_func=None, weight_init='Normal',
- add_layer_norm=False, use_dropout=False, activate_output=False, **kwargs):
+ add_layer_norm=False, use_dropout=False, activate_output=False, last_layer_dropout=True):
super().__init__()
assert layer_structure is not None, "layer_structure must not be None"
@@ -61,7 +61,7 @@ class HypernetworkModule(torch.nn.Module):
linears.append(torch.nn.LayerNorm(int(dim * layer_structure[i+1])))
# Add dropout except last layer
- if 'last_layer_dropout' in kwargs and kwargs['last_layer_dropout'] and use_dropout and i < len(layer_structure) - 2:
+ if use_dropout and (i < len(layer_structure) - 3 or last_layer_dropout and i < len(layer_structure) - 2):
linears.append(torch.nn.Dropout(p=0.3))
self.linear = torch.nn.Sequential(*linears)
From 029d7c75436558f1e884bb127caed73caaecb83a Mon Sep 17 00:00:00 2001
From: AngelBottomless <35677394+aria1th@users.noreply.github.com>
Date: Thu, 27 Oct 2022 14:44:53 +0900
Subject: [PATCH 009/147] Revert unresolved changes in Bias initialization
it should be zeros_ or parameterized in future properly.
---
modules/hypernetworks/hypernetwork.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py
index b17598fe..25427a37 100644
--- a/modules/hypernetworks/hypernetwork.py
+++ b/modules/hypernetworks/hypernetwork.py
@@ -75,7 +75,7 @@ class HypernetworkModule(torch.nn.Module):
w, b = layer.weight.data, layer.bias.data
if weight_init == "Normal" or type(layer) == torch.nn.LayerNorm:
normal_(w, mean=0.0, std=0.01)
- normal_(b, mean=0.0, std=0.005)
+ normal_(b, mean=0.0, std=0)
elif weight_init == 'XavierUniform':
xavier_uniform_(w)
zeros_(b)
From 0089fa5cebe43654e6d8a45d9b880e25826c2a74 Mon Sep 17 00:00:00 2001
From: batvbs
Date: Sat, 29 Oct 2022 21:09:05 +0800
Subject: [PATCH 010/147] =?UTF-8?q?=E6=9B=B4=E6=96=B0=20zh=5FCN.json?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
localizations/zh_CN.json | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/localizations/zh_CN.json b/localizations/zh_CN.json
index e453f5e3..b208ae25 100644
--- a/localizations/zh_CN.json
+++ b/localizations/zh_CN.json
@@ -482,7 +482,7 @@
"If this values is non-zero, it will be added to seed and used to initialize RNG for noises when using samplers with Eta. You can use this to produce even more variation of images, or you can use this to match images of other software if you know what you are doing.": "如果这个值不为零,它将被添加到随机种子中,并在使用带有 Eta 的采样器时用于初始化随机噪声。你可以使用它来产生更多的图像变化,或者你可以使用它来模仿其他软件生成的图像,如果你知道你在做什么",
"Enable Autocomplete": "开启Tag补全",
"Allowed categories for random artists selection when using the Roll button": "使用抽选艺术家按钮时将会随机的艺术家类别",
- "Roll three": "抽三位出來",
+ "Roll three": "抽三位出来",
"Generate forever": "不停地生成",
"Cancel generate forever": "取消不停地生成"
}
From f512b0828bb3f7d586c2da8caa87506998ed9212 Mon Sep 17 00:00:00 2001
From: dtlnor
Date: Sun, 30 Oct 2022 00:45:30 +0900
Subject: [PATCH 011/147] Update zh_CN.json
update translation content to 35c45df28b303a05d56a13cb56d4046f08cf8c25
---
localizations/zh_CN.json | 53 ++++++++++++++++++++++++++++++++++++++++
1 file changed, 53 insertions(+)
diff --git a/localizations/zh_CN.json b/localizations/zh_CN.json
index b208ae25..94907ada 100644
--- a/localizations/zh_CN.json
+++ b/localizations/zh_CN.json
@@ -91,6 +91,9 @@
"Show Textbox": "显示文本框",
"File with inputs": "含输入内容的文件",
"Prompts": "提示词",
+ "Iterate seed every line": "每行输入都换一个种子",
+ "List of prompt inputs": "提示词输入列表",
+ "Upload prompt inputs": "上传提示词输入文件",
"X type": "X轴类型",
"Nothing": "无",
"Var. seed": "差异随机种子",
@@ -109,6 +112,7 @@
"Eta": "Eta",
"Clip skip": "Clip 跳过",
"Denoising": "去噪",
+ "Cond. Image Mask Weight": "自适应图像蒙版强度",
"X values": "X轴数值",
"Y type": "Y轴类型",
"Y values": "Y轴数值",
@@ -204,6 +208,7 @@
"GFPGAN visibility": "GFPGAN 可见度",
"CodeFormer visibility": "CodeFormer 可见度",
"CodeFormer weight (0 = maximum effect, 1 = minimum effect)": "CodeFormer 权重 (0 = 最大效果, 1 = 最小效果)",
+ "Upscale Before Restoring Faces": "放大后再进行面部修复",
"Open output directory": "打开输出目录",
"Send to txt2img": ">> 文生图",
"A merger of the two checkpoints will be generated in your": "合并后的模型(ckpt)会生成在你的",
@@ -237,6 +242,36 @@
"leakyrelu": "leakyrelu",
"elu": "elu",
"swish": "swish",
+ "tanh": "tanh",
+ "sigmoid": "sigmoid",
+ "celu": "celu",
+ "gelu": "gelu",
+ "glu": "glu",
+ "hardshrink": "hardshrink",
+ "hardsigmoid": "hardsigmoid",
+ "hardtanh": "hardtanh",
+ "logsigmoid": "logsigmoid",
+ "logsoftmax": "logsoftmax",
+ "mish": "mish",
+ "prelu": "prelu",
+ "rrelu": "rrelu",
+ "relu6": "relu6",
+ "selu": "selu",
+ "silu": "silu",
+ "softmax": "softmax",
+ "softmax2d": "softmax2d",
+ "softmin": "softmin",
+ "softplus": "softplus",
+ "softshrink": "softshrink",
+ "softsign": "softsign",
+ "tanhshrink": "tanhshrink",
+ "threshold": "阈值",
+ "Select Layer weights initialization. relu-like - Kaiming, sigmoid-like - Xavier is recommended": "选择初始化层权重的方案. 类relu - Kaiming, 类sigmoid - Xavier 都是比较推荐的选项",
+ "Normal": "正态",
+ "KaimingUniform": "Kaiming均匀",
+ "KaimingNormal": "Kaiming正态",
+ "XavierUniform": "Xavier均匀",
+ "XavierNormal": "Xavier正态",
"Add layer normalization": "添加层标准化",
"Use dropout": "采用 dropout 防止过拟合",
"Overwrite Old Hypernetwork": "覆写旧的 Hypernetwork",
@@ -250,10 +285,15 @@
"Create flipped copies": "生成镜像副本",
"Split oversized images into two": "将过大的图像分为两份",
"Split oversized images": "分割过大的图像",
+ "Auto focal point crop": "自动焦点裁切",
"Use BLIP for caption": "使用 BLIP 生成说明文字(自然语言描述)",
"Use deepbooru for caption": "使用 deepbooru 生成说明文字(tags)",
"Split image threshold": "图像分割阈值",
"Split image overlap ratio": "分割图像重叠的比率",
+ "Focal point face weight": "焦点面部权重",
+ "Focal point entropy weight": "焦点熵权重",
+ "Focal point edges weight": "焦点线条权重",
+ "Create debug image": "生成除错图片",
"Preprocess": "预处理",
"Train an embedding; must specify a directory with a set of 1:1 ratio images": "训练 embedding; 必须指定一组具有 1:1 比例图像的目录",
"Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images": "训练 embedding 或者 hypernetwork; 必须指定一组具有 1:1 比例图像的目录",
@@ -280,9 +320,11 @@
"Renew Page": "刷新页面",
"extras": "后处理",
"favorites": "收藏夹",
+ "others": "其他",
"custom fold": "自定义文件夹",
"Load": "载入",
"Images directory": "图像目录",
+ "Dropdown": "下拉列表",
"Prev batch": "上一批",
"Next batch": "下一批",
"First Page": "首页",
@@ -291,7 +333,12 @@
"Next Page": "下一页",
"End Page": "尾页",
"number of images to delete consecutively next": "接下来要连续删除的图像数",
+ "delete next": "删除下一张",
"Delete": "删除",
+ "sort by": "排序方式",
+ "path name": "路径名",
+ "date": "日期",
+ "keyword": "关键词",
"Generate Info": "生成信息",
"File Name": "文件名",
"Collect": "收藏",
@@ -299,12 +346,15 @@
"Date to": "日期至",
"Number": "数量",
"set_index": "设置索引",
+ "load_switch": "载入开关",
+ "turn_page_switch": "翻页开关",
"Checkbox": "勾选框",
"Apply settings": "保存设置",
"Saving images/grids": "保存图像/概览图",
"Always save all generated images": "始终保存所有生成的图像",
"File format for images": "图像的文件格式",
"Images filename pattern": "图像文件名格式",
+ "Add number to filename when saving": "储存的时候在文件名里添加数字",
"Always save all generated image grids": "始终保存所有生成的概览图",
"File format for grids": "概览图的文件格式",
"Add extended info (seed, prompt) to filename when saving grid": "保存概览时将扩展信息(随机种子、提示词)添加到文件名",
@@ -359,6 +409,7 @@
"Stable Diffusion": "Stable Diffusion",
"Checkpoints to cache in RAM": "缓存在内存(RAM)中的模型(ckpt)",
"Hypernetwork strength": "Hypernetwork 强度",
+ "Inpainting conditioning mask strength": "内补绘制的自适应蒙版强度",
"Apply color correction to img2img results to match original colors.": "对图生图结果应用颜色校正以匹配原始颜色",
"Save a copy of image before applying color correction to img2img results": "在对图生图结果应用颜色校正之前保存图像副本",
"With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising).": "在进行图生图的时候,确切地执行滑块指定的迭代步数(正常情况下更弱的去噪需要更少的迭代步数)",
@@ -390,6 +441,7 @@
"Add model hash to generation information": "将模型的哈希值添加到生成信息",
"Add model name to generation information": "将模型名称添加到生成信息",
"When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint.": "当从文本读取生成参数到 UI(从 PNG 信息或粘贴文本)时,不要更改选定的模型(ckpt)",
+ "Send seed when sending prompt or image to other interface": "将提示词或者图片传送到其他界面的时候同时也把随机种子传送过去",
"Font for image grids that have text": "有文字的概览图使用的字体",
"Enable full page image viewer": "启用整页图像查看器",
"Show images zoomed in by default in full page image viewer": "在整页图像查看器中默认放大显示图像",
@@ -478,6 +530,7 @@
"Restore low quality faces using GFPGAN neural network": "使用 GFPGAN 神经网络修复低质量面部",
"This regular expression will be used extract words from filename, and they will be joined using the option below into label text used for training. Leave empty to keep filename text as it is.": "此正则表达式将用于从文件名中提取单词,并将使用以下选项将它们接合到用于训练的标签文本中。留空以保持文件名文本不变",
"This string will be used to join split words into a single line if the option above is enabled.": "如果启用了上述选项,则此处的字符会用于将拆分的单词接合为同一行",
+ "Only applies to inpainting models. Determines how strongly to mask off the original image for inpainting and img2img. 1.0 means fully masked, which is the default behaviour. 0.0 means a fully unmasked conditioning. Lower values will help preserve the overall composition of the image, but will struggle with large changes.": "仅适用于内补绘制专用的模型。 决定了蒙版在内补绘制以及图生图中屏蔽原图内容的强度。 1.0 表示完全屏蔽,这是默认行为。 0.0 表示完全不屏蔽。 较低的值将有助于保持图像的整体构图,但很难遇到较大的变化。",
"List of setting names, separated by commas, for settings that should go to the quick access bar at the top, rather than the usual setting tab. See modules/shared.py for setting names. Requires restarting to apply.": "设置名称列表,以逗号分隔,设置应转到顶部的快速访问栏,而不是通常的设置选项卡。有关设置名称,请参见 modules/shared.py。需要重新启动才能应用",
"If this values is non-zero, it will be added to seed and used to initialize RNG for noises when using samplers with Eta. You can use this to produce even more variation of images, or you can use this to match images of other software if you know what you are doing.": "如果这个值不为零,它将被添加到随机种子中,并在使用带有 Eta 的采样器时用于初始化随机噪声。你可以使用它来产生更多的图像变化,或者你可以使用它来模仿其他软件生成的图像,如果你知道你在做什么",
"Enable Autocomplete": "开启Tag补全",
From 44ab954fabb9c1273366ebdca47f8da394d61aab Mon Sep 17 00:00:00 2001
From: random_thoughtss
Date: Sat, 29 Oct 2022 10:02:56 -0700
Subject: [PATCH 012/147] Fix latent upscale highres fix #3888
---
modules/processing.py | 12 +++++++-----
1 file changed, 7 insertions(+), 5 deletions(-)
diff --git a/modules/processing.py b/modules/processing.py
index 548eec29..f18b7db2 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -653,6 +653,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
if opts.use_scale_latent_for_hires_fix:
samples = torch.nn.functional.interpolate(samples, size=(self.height // opt_f, self.width // opt_f), mode="bilinear")
+ image_conditioning = self.txt2img_image_conditioning(samples)
else:
decoded_samples = decode_first_stage(self.sd_model, samples)
@@ -674,6 +675,12 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
samples = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(decoded_samples))
+ image_conditioning = self.img2img_image_conditioning(
+ decoded_samples,
+ samples,
+ decoded_samples.new_ones(decoded_samples.shape[0], 1, decoded_samples.shape[2], decoded_samples.shape[3])
+ )
+
shared.state.nextjob()
self.sampler = sd_samplers.create_sampler_with_index(sd_samplers.samplers, self.sampler_index, self.sd_model)
@@ -684,11 +691,6 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
x = None
devices.torch_gc()
- image_conditioning = self.img2img_image_conditioning(
- decoded_samples,
- samples,
- decoded_samples.new_ones(decoded_samples.shape[0], 1, decoded_samples.shape[2], decoded_samples.shape[3])
- )
samples = self.sampler.sample_img2img(self, samples, noise, conditioning, unconditional_conditioning, steps=self.steps, image_conditioning=image_conditioning)
return samples
From 6e2ce4e735db64afcd0fe637327ca4ec78335706 Mon Sep 17 00:00:00 2001
From: random_thoughtss
Date: Sat, 29 Oct 2022 10:35:51 -0700
Subject: [PATCH 013/147] Added image conditioning to latent upscale. Only
comuted if the mask weight is not 1.0 to avoid extra memory. Also includes
some code cleanup.
---
modules/processing.py | 29 +++++++++++------------------
1 file changed, 11 insertions(+), 18 deletions(-)
diff --git a/modules/processing.py b/modules/processing.py
index f18b7db2..ee0e9e34 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -134,11 +134,7 @@ class StableDiffusionProcessing():
# Dummy zero conditioning if we're not using inpainting model.
# Still takes up a bit of memory, but no encoder call.
# Pretty sure we can just make this a 1x1 image since its not going to be used besides its batch size.
- return torch.zeros(
- x.shape[0], 5, 1, 1,
- dtype=x.dtype,
- device=x.device
- )
+ return x.new_zeros(x.shape[0], 5, 1, 1)
height = height or self.height
width = width or self.width
@@ -156,11 +152,7 @@ class StableDiffusionProcessing():
def img2img_image_conditioning(self, source_image, latent_image, image_mask = None):
if self.sampler.conditioning_key not in {'hybrid', 'concat'}:
# Dummy zero conditioning if we're not using inpainting model.
- return torch.zeros(
- latent_image.shape[0], 5, 1, 1,
- dtype=latent_image.dtype,
- device=latent_image.device
- )
+ return latent_image.new_zeros(latent_image.shape[0], 5, 1, 1)
# Handle the different mask inputs
if image_mask is not None:
@@ -174,11 +166,10 @@ class StableDiffusionProcessing():
# Inpainting model uses a discretized mask as input, so we round to either 1.0 or 0.0
conditioning_mask = torch.round(conditioning_mask)
else:
- conditioning_mask = torch.ones(1, 1, *source_image.shape[-2:])
+ conditioning_mask = source_image.new_ones(1, 1, *source_image.shape[-2:])
# Create another latent image, this time with a masked version of the original input.
# Smoothly interpolate between the masked and unmasked latent conditioning image using a parameter.
- conditioning_mask = conditioning_mask.to(source_image.device)
conditioning_image = torch.lerp(
source_image,
source_image * (1.0 - conditioning_mask),
@@ -653,7 +644,13 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
if opts.use_scale_latent_for_hires_fix:
samples = torch.nn.functional.interpolate(samples, size=(self.height // opt_f, self.width // opt_f), mode="bilinear")
- image_conditioning = self.txt2img_image_conditioning(samples)
+
+ # Avoid making the inpainting conditioning unless necessary as
+ # this does need some extra compute to decode / encode the image again.
+ if getattr(self, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) < 1.0:
+ image_conditioning = self.img2img_image_conditioning(decode_first_stage(self.sd_model, samples), samples)
+ else:
+ image_conditioning = self.txt2img_image_conditioning(samples)
else:
decoded_samples = decode_first_stage(self.sd_model, samples)
@@ -675,11 +672,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
samples = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(decoded_samples))
- image_conditioning = self.img2img_image_conditioning(
- decoded_samples,
- samples,
- decoded_samples.new_ones(decoded_samples.shape[0], 1, decoded_samples.shape[2], decoded_samples.shape[3])
- )
+ image_conditioning = self.img2img_image_conditioning(decoded_samples, samples)
shared.state.nextjob()
From 39f55c3c35873bc7dd9792cb2155746a1c3d4292 Mon Sep 17 00:00:00 2001
From: random_thoughtss
Date: Sat, 29 Oct 2022 14:13:02 -0700
Subject: [PATCH 014/147] Re-add explicit device move
---
modules/processing.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/modules/processing.py b/modules/processing.py
index ee0e9e34..d07e3db9 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -170,6 +170,7 @@ class StableDiffusionProcessing():
# Create another latent image, this time with a masked version of the original input.
# Smoothly interpolate between the masked and unmasked latent conditioning image using a parameter.
+ conditioning_mask = conditioning_mask.to(source_image.device).to(source_image.dtype)
conditioning_image = torch.lerp(
source_image,
source_image * (1.0 - conditioning_mask),
From 2f125b0a97fe1d4fbd8e4c922615d2c8dfd723fb Mon Sep 17 00:00:00 2001
From: batvbs
Date: Sun, 30 Oct 2022 13:07:25 +0800
Subject: [PATCH 015/147] =?UTF-8?q?=E6=9B=B4=E6=96=B0=20zh=5FCN.json?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
localizations/zh_CN.json | 102 +++++++++++++++++++--------------------
1 file changed, 51 insertions(+), 51 deletions(-)
diff --git a/localizations/zh_CN.json b/localizations/zh_CN.json
index 94907ada..b3d83707 100644
--- a/localizations/zh_CN.json
+++ b/localizations/zh_CN.json
@@ -12,16 +12,16 @@
"Stable Diffusion checkpoint": "Stable Diffusion 模型(ckpt)",
"txt2img": "文生图",
"img2img": "图生图",
- "Extras": "后处理",
- "PNG Info": "PNG 信息",
- "Checkpoint Merger": "模型(ckpt)合并工具",
+ "Extras": "更多",
+ "PNG Info": "图片信息",
+ "Checkpoint Merger": "模型(ckpt)合并",
"Train": "训练",
- "Create aesthetic embedding": "生成美术风格 embedding",
+ "Create aesthetic embedding": "生成美术风格",
"Image Browser": "图库浏览器",
"History": "历史记录",
"Settings": "设置",
"Prompt": "提示词",
- "Negative prompt": "反向提示词",
+ "Negative prompt": "负面提示词",
"Run": "运行",
"Skip": "跳过",
"Interrupt": "中止",
@@ -61,10 +61,10 @@
"Firstpass height": "第一遍的高度",
"Denoising strength": "去噪强度",
"Batch count": "批次",
- "Batch size": "批量",
+ "Batch size": "数量",
"CFG Scale": "提示词相关性(CFG Scale)",
"Seed": "随机种子",
- "Extra": "额外参数",
+ "Extra": "▼",
"Variation seed": "差异随机种子",
"Variation strength": "差异强度",
"Resize seed from width": "自宽度缩放随机种子",
@@ -81,7 +81,7 @@
"Slerp angle": "Slerp 角度",
"Is negative text": "是反向提示词",
"Script": "脚本",
- "Embedding to Shareable PNG": "将 Embedding 转换为可分享的 PNG",
+ "Embedding to Shareable PNG": "将 Embedding 转换为可分享的 PNG 图片文件",
"Prompt matrix": "提示词矩阵",
"Prompts from file or textbox": "从文本框或文件载入提示词",
"X/Y plot": "X/Y 图表",
@@ -123,7 +123,7 @@
"Save": "保存",
"Send to img2img": ">> 图生图",
"Send to inpaint": ">> 内补绘制",
- "Send to extras": ">> 后处理",
+ "Send to extras": ">> 更多",
"Make Zip when Save?": "保存时生成zip压缩文件?",
"Textbox": "文本框",
"Interrogate\nCLIP": "CLIP\n反推提示词",
@@ -153,9 +153,9 @@
"Input directory": "输入目录",
"Output directory": "输出目录",
"Resize mode": "缩放模式",
- "Just resize": "只缩放",
- "Crop and resize": "缩放并剪裁",
- "Resize and fill": "缩放并填充",
+ "Just resize": "拉伸",
+ "Crop and resize": "裁剪",
+ "Resize and fill": "填充",
"img2img alternative test": "图生图的另一种测试",
"Loopback": "回送",
"Outpainting mk2": "外补绘制第二版",
@@ -185,7 +185,6 @@
"Color variation": "色彩变化",
"Will upscale the image to twice the dimensions; use width and height sliders to set tile size": "将图像放大到两倍尺寸; 使用宽度和高度滑块设置图块尺寸(tile size)",
"Tile overlap": "图块重叠的像素(Tile overlap)",
- "Upscaler": "放大算法",
"Lanczos": "Lanczos",
"LDSR": "LDSR",
"BSRGAN 4x": "BSRGAN 4x",
@@ -203,6 +202,7 @@
"Scale to": "指定尺寸缩放",
"Resize": "缩放",
"Crop to fit": "裁剪以适应",
+ "Upscaler 1": "放大算法 1",
"Upscaler 2": "放大算法 2",
"Upscaler 2 visibility": "放大算法 2 可见度",
"GFPGAN visibility": "GFPGAN 可见度",
@@ -293,11 +293,11 @@
"Focal point face weight": "焦点面部权重",
"Focal point entropy weight": "焦点熵权重",
"Focal point edges weight": "焦点线条权重",
- "Create debug image": "生成除错图片",
+ "Create debug image": "生成调试(debug)图片",
"Preprocess": "预处理",
"Train an embedding; must specify a directory with a set of 1:1 ratio images": "训练 embedding; 必须指定一组具有 1:1 比例图像的目录",
"Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images": "训练 embedding 或者 hypernetwork; 必须指定一组具有 1:1 比例图像的目录",
- "[wiki]": "[wiki]",
+ "[wiki]": "[帮助]",
"Embedding": "Embedding",
"Embedding Learning rate": "Embedding 学习率",
"Hypernetwork Learning rate": "Hypernetwork 学习率",
@@ -308,7 +308,7 @@
"Max steps": "最大迭代步数",
"Save an image to log directory every N steps, 0 to disable": "每 N 步保存一个图像到日志目录,0 表示禁用",
"Save a copy of embedding to log directory every N steps, 0 to disable": "每 N 步将 embedding 的副本保存到日志目录,0 表示禁用",
- "Save images with embedding in PNG chunks": "保存图像并在 PNG 文件中嵌入 embedding 文件",
+ "Save images with embedding in PNG chunks": "保存图像并在 PNG 图片文件中嵌入 embedding 文件",
"Read parameters (prompt, etc...) from txt2img tab when making previews": "进行预览时从文生图选项卡中读取参数(提示词等)",
"Train Hypernetwork": "训练 Hypernetwork",
"Train Embedding": "训练 Embedding",
@@ -316,10 +316,10 @@
"Create images embedding": "生成图集 embedding",
"txt2img history": "文生图历史记录",
"img2img history": "图生图历史记录",
- "extras history": "后处理历史记录",
+ "extras history": "更多选项卡的历史记录",
"Renew Page": "刷新页面",
- "extras": "后处理",
- "favorites": "收藏夹",
+ "extras": "更多",
+ "favorites": "已保存",
"others": "其他",
"custom fold": "自定义文件夹",
"Load": "载入",
@@ -341,7 +341,7 @@
"keyword": "关键词",
"Generate Info": "生成信息",
"File Name": "文件名",
- "Collect": "收藏",
+ "Collect": "保存",
"Refresh page": "刷新页面",
"Date to": "日期至",
"Number": "数量",
@@ -350,37 +350,37 @@
"turn_page_switch": "翻页开关",
"Checkbox": "勾选框",
"Apply settings": "保存设置",
- "Saving images/grids": "保存图像/概览图",
+ "Saving images/grids": "保存图像/宫格图",
"Always save all generated images": "始终保存所有生成的图像",
"File format for images": "图像的文件格式",
"Images filename pattern": "图像文件名格式",
"Add number to filename when saving": "储存的时候在文件名里添加数字",
- "Always save all generated image grids": "始终保存所有生成的概览图",
- "File format for grids": "概览图的文件格式",
- "Add extended info (seed, prompt) to filename when saving grid": "保存概览时将扩展信息(随机种子、提示词)添加到文件名",
- "Do not save grids consisting of one picture": "只有一张图片时不要保存概览图",
- "Prevent empty spots in grid (when set to autodetect)": "(在自动检测时)防止概览图中出现空位",
- "Grid row count; use -1 for autodetect and 0 for it to be same as batch size": "概览行数; 使用 -1 进行自动检测,使用 0 使其与批量大小相同",
- "Save text information about generation parameters as chunks to png files": "将有关生成参数的文本信息作为块保存到 png 文件中",
+ "Always save all generated image grids": "始终保存所有生成的宫格图",
+ "File format for grids": "宫格图的文件格式",
+ "Add extended info (seed, prompt) to filename when saving grid": "保存宫格图时将扩展信息(随机种子、提示词)添加到文件名",
+ "Do not save grids consisting of one picture": "只有一张图片时不要保存宫格图",
+ "Prevent empty spots in grid (when set to autodetect)": "(在自动检测时)防止宫格图中出现空位",
+ "Grid row count; use -1 for autodetect and 0 for it to be same as batch size": "宫格图行数; 使用 -1 进行自动检测,使用 0 使其与批量大小相同",
+ "Save text information about generation parameters as chunks to png files": "将有关生成参数的文本信息作为块保存到 png 图片文件中",
"Create a text file next to every image with generation parameters.": "保存图像时在每个图像旁边创建一个文本文件储存生成参数",
"Save a copy of image before doing face restoration.": "在进行面部修复之前保存图像副本",
"Quality for saved jpeg images": "保存的 jpeg 图像的质量",
- "If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG": "如果 PNG 图像大于 4MB 或宽高大于 4000,则缩小并保存副本为 JPG",
- "Use original name for output filename during batch process in extras tab": "在后处理选项卡中的批量处理过程中使用原始名称作为输出文件名",
+ "If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG": "如果 PNG 图像大于 4MB 或宽高大于 4000,则缩小并保存副本为 JPG 图片",
+ "Use original name for output filename during batch process in extras tab": "在更多选项卡中的批量处理过程中使用原始名称作为输出文件名",
"When using 'Save' button, only save a single selected image": "使用“保存”按钮时,只保存一个选定的图像",
"Do not add watermark to images": "不要给图像加水印",
"Paths for saving": "保存路径",
"Output directory for images; if empty, defaults to three directories below": "图像的输出目录; 如果为空,则默认为以下三个目录",
"Output directory for txt2img images": "文生图的输出目录",
"Output directory for img2img images": "图生图的输出目录",
- "Output directory for images from extras tab": "后处理的输出目录",
- "Output directory for grids; if empty, defaults to two directories below": "概览图的输出目录; 如果为空,则默认为以下两个目录",
- "Output directory for txt2img grids": "文生图概览的输出目录",
- "Output directory for img2img grids": "图生图概览的输出目录",
+ "Output directory for images from extras tab": "更多选项卡的输出目录",
+ "Output directory for grids; if empty, defaults to two directories below": "宫格图的输出目录; 如果为空,则默认为以下两个目录",
+ "Output directory for txt2img grids": "文生图宫格的输出目录",
+ "Output directory for img2img grids": "图生图宫格的输出目录",
"Directory for saving images using the Save button": "使用“保存”按钮保存图像的目录",
"Saving to a directory": "保存到目录",
"Save images to a subdirectory": "将图像保存到子目录",
- "Save grids to a subdirectory": "将概览图保存到子目录",
+ "Save grids to a subdirectory": "将宫格图保存到子目录",
"When using \"Save\" button, save images to a subdirectory": "使用“保存”按钮时,将图像保存到子目录",
"Directory name pattern": "目录名称格式",
"Max prompt words for [prompt_words] pattern": "[prompt_words] 格式的最大提示词数量",
@@ -405,7 +405,7 @@
"Filename word regex": "文件名用词的正则表达式",
"Filename join string": "文件名连接用字符串",
"Number of repeats for a single input image per epoch; used only for displaying epoch number": "每个 epoch 中单个输入图像的重复次数; 仅用于显示 epoch 数",
- "Save an csv containing the loss to log directory every N steps, 0 to disable": "每 N 步保存一个包含 loss 的 csv 到日志目录,0 表示禁用",
+ "Save an csv containing the loss to log directory every N steps, 0 to disable": "每 N 步保存一个包含 loss 的 csv 表格到日志目录,0 表示禁用",
"Stable Diffusion": "Stable Diffusion",
"Checkpoints to cache in RAM": "缓存在内存(RAM)中的模型(ckpt)",
"Hypernetwork strength": "Hypernetwork 强度",
@@ -418,7 +418,7 @@
"Use old emphasis implementation. Can be useful to reproduce old seeds.": "使用旧的强调符实现。可用于复现旧随机种子",
"Make K-diffusion samplers produce same images in a batch as when making a single image": "使 K-diffusion 采样器批量生成与生成单个图像时产出相同的图像",
"Increase coherency by padding from the last comma within n tokens when using more than 75 tokens": "当使用超过 75 个 token 时,通过从 n 个 token 中的最后一个逗号填补来提高一致性",
- "Filter NSFW content": "过滤成人内容",
+ "Filter NSFW content": "过滤成人内容(NSFW)",
"Stop At last layers of CLIP model": "在 CLIP 模型的最后哪一层停下",
"Interrogate Options": "反推提示词选项",
"Interrogate: keep models in VRAM": "反推: 将模型保存在显存(VRAM)中",
@@ -436,18 +436,18 @@
"Show progressbar": "显示进度条",
"Show image creation progress every N sampling steps. Set 0 to disable.": "每 N 个采样迭代步数显示图像生成进度。设置 0 禁用",
"Show previews of all images generated in a batch as a grid": "以网格的形式预览所有批量生成出来的图像",
- "Show grid in results for web": "在网页的结果中显示概览图",
+ "Show grid in results for web": "在网页的结果中显示宫格图",
"Do not show any images in results for web": "不在网页的结果中显示任何图像",
"Add model hash to generation information": "将模型的哈希值添加到生成信息",
"Add model name to generation information": "将模型名称添加到生成信息",
- "When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint.": "当从文本读取生成参数到 UI(从 PNG 信息或粘贴文本)时,不要更改选定的模型(ckpt)",
+ "When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint.": "从文本读取生成参数到 UI(从 PNG 图片信息或粘贴文本)时,不要更改选定的模型(ckpt)",
"Send seed when sending prompt or image to other interface": "将提示词或者图片传送到其他界面的时候同时也把随机种子传送过去",
- "Font for image grids that have text": "有文字的概览图使用的字体",
+ "Font for image grids that have text": "有文字的宫格图使用的字体",
"Enable full page image viewer": "启用整页图像查看器",
"Show images zoomed in by default in full page image viewer": "在整页图像查看器中默认放大显示图像",
"Show generation progress in window title.": "在窗口标题中显示生成进度",
"Quicksettings list": "快速设置列表",
- "Localization (requires restart)": "本地化(需要重新启动)",
+ "Localization (requires restart)": "本地化翻译(需要保存设置,并重启Gradio)",
"Sampler parameters": "采样器参数",
"Hide samplers in user interface (requires restart)": "在用户界面中隐藏采样器(需要重新启动)",
"eta (noise multiplier) for DDIM": "DDIM 的 eta (噪声乘数) ",
@@ -472,8 +472,8 @@
"Download localization template": "下载本地化模板",
"Reload custom script bodies (No ui updates, No restart)": "重新加载自定义脚本主体(无 ui 更新,无重启)",
"Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)": "重启 Gradio 及刷新组件(仅限自定义脚本、ui.py、js 和 css)",
- "Prompt (press Ctrl+Enter or Alt+Enter to generate)": "提示词(按 Ctrl+Enter 或 Alt+Enter 生成)",
- "Negative prompt (press Ctrl+Enter or Alt+Enter to generate)": "反向提示词(按 Ctrl+Enter 或 Alt+Enter 生成)",
+ "Prompt (press Ctrl+Enter or Alt+Enter to generate)": "提示词(按 Ctrl+Enter 或 Alt+Enter 生成)\nPrompt",
+ "Negative prompt (press Ctrl+Enter or Alt+Enter to generate)": "反向提示词(按 Ctrl+Enter 或 Alt+Enter 生成)\nNegative prompt",
"Add a random artist to the prompt.": "随机添加一个艺术家到提示词中",
"Read generation parameters from prompt or last generation if prompt is empty into user interface.": "从提示词中读取生成参数,如果提示词为空,则读取上一次的生成参数到用户界面",
"Save style": "储存为模版风格",
@@ -500,14 +500,14 @@
"This text is used to rotate the feature space of the imgs embs": "此文本用于旋转图集 embeddings 的特征空间",
"Separate values for X axis using commas.": "使用逗号分隔 X 轴的值",
"Separate values for Y axis using commas.": "使用逗号分隔 Y 轴的值",
- "Write image to a directory (default - log/images) and generation parameters into csv file.": "将图像写入目录(默认 - log/images)并将生成参数写入 csv 文件",
+ "Write image to a directory (default - log/images) and generation parameters into csv file.": "将图像写入目录(默认 - log/images)并将生成参数写入 csv 表格文件",
"Open images output directory": "打开图像输出目录",
"How much to blur the mask before processing, in pixels.": "处理前要对蒙版进行多强的模糊,以像素为单位",
"What to put inside the masked area before processing it with Stable Diffusion.": "在使用 Stable Diffusion 处理蒙版区域之前要在蒙版区域内放置什么",
- "fill it with colors of the image": "用图像的颜色填充它",
- "keep whatever was there originally": "保留原来的东西",
- "fill it with latent space noise": "用潜空间的噪声填充它",
- "fill it with latent space zeroes": "用潜空间的零填充它",
+ "fill it with colors of the image": "用图像的颜色(模糊/马赛克)填充它",
+ "keep whatever was there originally": "保留原来的图像,不进行预处理",
+ "fill it with latent space noise": "用潜空间的噪声(随机彩色噪点)填充它",
+ "fill it with latent space zeroes": "用潜空间的零(灰色)填充它",
"Upscale masked region to target resolution, do inpainting, downscale back and paste into original image": "将蒙版区域放大到目标分辨率,做内补绘制,缩小后粘贴到原始图像中",
"Resize image to target resolution. Unless height and width match, you will get incorrect aspect ratio.": "将图像大小调整为目标分辨率。除非高度和宽度匹配,否则你将获得不正确的纵横比",
"Resize the image so that entirety of target resolution is filled with the image. Crop parts that stick out.": "调整图像大小,使整个目标分辨率都被图像填充。裁剪多出来的部分",
@@ -526,7 +526,7 @@
"Input images directory": "输入图像目录",
"Use following tags to define how filenames for images are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; leave empty for default.": "使用以下标签定义如何选择图像的文件名: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; 默认请留空",
"If this option is enabled, watermark will not be added to created images. Warning: if you do not add watermark, you may be behaving in an unethical manner.": "如果启用此选项,水印将不会添加到生成出来的图像中。警告:如果你不添加水印,你的行为可能是不符合专业操守的",
- "Use following tags to define how subdirectories for images and grids are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; leave empty for default.": "使用以下标签定义如何选择图像和概览图的子目录: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; 默认请留空",
+ "Use following tags to define how subdirectories for images and grids are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; leave empty for default.": "使用以下标签定义如何选择图像和宫格图的子目录: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; 默认请留空",
"Restore low quality faces using GFPGAN neural network": "使用 GFPGAN 神经网络修复低质量面部",
"This regular expression will be used extract words from filename, and they will be joined using the option below into label text used for training. Leave empty to keep filename text as it is.": "此正则表达式将用于从文件名中提取单词,并将使用以下选项将它们接合到用于训练的标签文本中。留空以保持文件名文本不变",
"This string will be used to join split words into a single line if the option above is enabled.": "如果启用了上述选项,则此处的字符会用于将拆分的单词接合为同一行",
@@ -536,6 +536,6 @@
"Enable Autocomplete": "开启Tag补全",
"Allowed categories for random artists selection when using the Roll button": "使用抽选艺术家按钮时将会随机的艺术家类别",
"Roll three": "抽三位出来",
- "Generate forever": "不停地生成",
- "Cancel generate forever": "取消不停地生成"
+ "Generate forever": "无限生成",
+ "Cancel generate forever": "停止无限生成"
}
From 71571e3f055237d71ba2d47756846ad1d73be00c Mon Sep 17 00:00:00 2001
From: random_thoughtss
Date: Sun, 30 Oct 2022 00:35:40 -0700
Subject: [PATCH 016/147] Replaced master branch fix with updated fix.
---
modules/processing.py | 2 --
1 file changed, 2 deletions(-)
diff --git a/modules/processing.py b/modules/processing.py
index 3dd44d3a..512c484f 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -688,8 +688,6 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
noise = create_random_tensors(samples.shape[1:], seeds=seeds, subseeds=subseeds, subseed_strength=subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
- image_conditioning = self.txt2img_image_conditioning(x)
-
# GC now before running the next img2img to prevent running out of memory
x = None
devices.torch_gc()
From be27fd4690b1eb6c74da1e31c9696a0f1901fbba Mon Sep 17 00:00:00 2001
From: evshiron
Date: Sun, 30 Oct 2022 17:01:01 +0800
Subject: [PATCH 017/147] fix broken progress api by previous rework
---
modules/shared.py | 3 +++
1 file changed, 3 insertions(+)
diff --git a/modules/shared.py b/modules/shared.py
index e4f163c1..2c7d28a5 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -4,6 +4,7 @@ import json
import os
import sys
from collections import OrderedDict
+import time
import gradio as gr
import tqdm
@@ -132,6 +133,7 @@ class State:
current_image = None
current_image_sampling_step = 0
textinfo = None
+ time_start = None
def skip(self):
self.skipped = True
@@ -168,6 +170,7 @@ class State:
self.skipped = False
self.interrupted = False
self.textinfo = None
+ self.time_start = time.time()
devices.torch_gc()
From 1a4ff2de6a835cd8cc1590bbc1a8dedb5ad37e5b Mon Sep 17 00:00:00 2001
From: evshiron
Date: Sun, 30 Oct 2022 17:02:47 +0800
Subject: [PATCH 018/147] fix current image in progress api when parallel
processing enabled
---
modules/api/api.py | 13 +++++++++++--
1 file changed, 11 insertions(+), 2 deletions(-)
diff --git a/modules/api/api.py b/modules/api/api.py
index 6c06d449..97497f3f 100644
--- a/modules/api/api.py
+++ b/modules/api/api.py
@@ -3,10 +3,9 @@ import uvicorn
from gradio.processing_utils import encode_pil_to_base64, decode_base64_to_file, decode_base64_to_image
from fastapi import APIRouter, Depends, HTTPException
import modules.shared as shared
-from modules import devices
from modules.api.models import *
from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images
-from modules.sd_samplers import all_samplers
+from modules.sd_samplers import all_samplers, sample_to_image, samples_to_image_grid
from modules.extras import run_extras, run_pnginfo
@@ -170,6 +169,16 @@ class Api:
progress = min(progress, 1)
+ # copy from check_progress_call of ui.py
+
+ if shared.parallel_processing_allowed:
+ if shared.state.sampling_step - shared.state.current_image_sampling_step >= shared.opts.show_progress_every_n_steps and shared.state.current_latent is not None:
+ if shared.opts.show_progress_grid:
+ shared.state.current_image = samples_to_image_grid(shared.state.current_latent)
+ else:
+ shared.state.current_image = sample_to_image(shared.state.current_latent)
+ shared.state.current_image_sampling_step = shared.state.sampling_step
+
current_image = None
if shared.state.current_image and not req.skip_current_image:
current_image = encode_pil_to_base64(shared.state.current_image)
From b5e21e3348163f4a17d4a2e6f97af2c370edc9b3 Mon Sep 17 00:00:00 2001
From: batvbs <60730393+batvbs@users.noreply.github.com>
Date: Sun, 30 Oct 2022 17:49:17 +0800
Subject: [PATCH 019/147] Update zh_CN.json
---
localizations/zh_CN.json | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/localizations/zh_CN.json b/localizations/zh_CN.json
index b3d83707..d07c03a7 100644
--- a/localizations/zh_CN.json
+++ b/localizations/zh_CN.json
@@ -21,7 +21,7 @@
"History": "历史记录",
"Settings": "设置",
"Prompt": "提示词",
- "Negative prompt": "负面提示词",
+ "Negative prompt": "反向提示词",
"Run": "运行",
"Skip": "跳过",
"Interrupt": "中止",
From 99c4e8d65357ebd9e3eab95e7c6de4a86f125c1a Mon Sep 17 00:00:00 2001
From: batvbs
Date: Sun, 30 Oct 2022 19:36:01 +0800
Subject: [PATCH 020/147] =?UTF-8?q?=E9=95=BF=E6=96=87=E6=9C=AC=E6=B7=BB?=
=?UTF-8?q?=E5=8A=A0=E9=80=97=E5=8F=B7?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
localizations/zh_CN.json | 50 ++++++++++++++++++++--------------------
1 file changed, 25 insertions(+), 25 deletions(-)
diff --git a/localizations/zh_CN.json b/localizations/zh_CN.json
index d07c03a7..5472535e 100644
--- a/localizations/zh_CN.json
+++ b/localizations/zh_CN.json
@@ -60,8 +60,8 @@
"Firstpass width": "第一遍的宽度",
"Firstpass height": "第一遍的高度",
"Denoising strength": "去噪强度",
- "Batch count": "批次",
- "Batch size": "数量",
+ "Batch count": "生成批次",
+ "Batch size": "每批数量",
"CFG Scale": "提示词相关性(CFG Scale)",
"Seed": "随机种子",
"Extra": "▼",
@@ -224,7 +224,7 @@
"Add difference": "添加差分",
"Save as float16": "以 float16 储存",
"See": "查看",
- "wiki": "wiki",
+ "wiki": "帮助",
"for detailed explanation.": "以了解详细说明",
"Create embedding": "生成 embedding",
"Create aesthetic images embedding": "生成美术风格图集 embedding",
@@ -308,8 +308,8 @@
"Max steps": "最大迭代步数",
"Save an image to log directory every N steps, 0 to disable": "每 N 步保存一个图像到日志目录,0 表示禁用",
"Save a copy of embedding to log directory every N steps, 0 to disable": "每 N 步将 embedding 的副本保存到日志目录,0 表示禁用",
- "Save images with embedding in PNG chunks": "保存图像并在 PNG 图片文件中嵌入 embedding 文件",
- "Read parameters (prompt, etc...) from txt2img tab when making previews": "进行预览时从文生图选项卡中读取参数(提示词等)",
+ "Save images with embedding in PNG chunks": "保存图像,并在 PNG 图片文件中嵌入 embedding 文件",
+ "Read parameters (prompt, etc...) from txt2img tab when making previews": "进行预览时,从文生图选项卡中读取参数(提示词等)",
"Train Hypernetwork": "训练 Hypernetwork",
"Train Embedding": "训练 Embedding",
"Create an aesthetic embedding out of any number of images": "从任意数量的图像中创建美术风格 embedding",
@@ -357,16 +357,16 @@
"Add number to filename when saving": "储存的时候在文件名里添加数字",
"Always save all generated image grids": "始终保存所有生成的宫格图",
"File format for grids": "宫格图的文件格式",
- "Add extended info (seed, prompt) to filename when saving grid": "保存宫格图时将扩展信息(随机种子、提示词)添加到文件名",
+ "Add extended info (seed, prompt) to filename when saving grid": "保存宫格图时,将扩展信息(随机种子、提示词)添加到文件名",
"Do not save grids consisting of one picture": "只有一张图片时不要保存宫格图",
- "Prevent empty spots in grid (when set to autodetect)": "(在自动检测时)防止宫格图中出现空位",
- "Grid row count; use -1 for autodetect and 0 for it to be same as batch size": "宫格图行数; 使用 -1 进行自动检测,使用 0 使其与批量大小相同",
- "Save text information about generation parameters as chunks to png files": "将有关生成参数的文本信息作为块保存到 png 图片文件中",
- "Create a text file next to every image with generation parameters.": "保存图像时在每个图像旁边创建一个文本文件储存生成参数",
+ "Prevent empty spots in grid (when set to autodetect)": "(启用自动检测时)防止宫格图中出现空位",
+ "Grid row count; use -1 for autodetect and 0 for it to be same as batch size": "宫格图行数; 使用 -1 进行自动检测,使用 0 使其与每批数量相同",
+ "Save text information about generation parameters as chunks to png files": "将有关生成参数的文本信息,作为块保存到 png 图片文件中",
+ "Create a text file next to every image with generation parameters.": "保存图像时,在每个图像旁边创建一个文本文件储存生成参数",
"Save a copy of image before doing face restoration.": "在进行面部修复之前保存图像副本",
"Quality for saved jpeg images": "保存的 jpeg 图像的质量",
"If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG": "如果 PNG 图像大于 4MB 或宽高大于 4000,则缩小并保存副本为 JPG 图片",
- "Use original name for output filename during batch process in extras tab": "在更多选项卡中的批量处理过程中使用原始名称作为输出文件名",
+ "Use original name for output filename during batch process in extras tab": "在更多选项卡中的批量处理过程中,使用原始名称作为输出文件名",
"When using 'Save' button, only save a single selected image": "使用“保存”按钮时,只保存一个选定的图像",
"Do not add watermark to images": "不要给图像加水印",
"Paths for saving": "保存路径",
@@ -391,12 +391,12 @@
"Tile overlap, in pixels for SwinIR. Low values = visible seam.": "SwinIR 的图块重叠(Tile overlap)像素。低值 = 可见接缝",
"LDSR processing steps. Lower = faster": "LDSR 处理迭代步数。更低 = 更快",
"Upscaler for img2img": "图生图的放大算法",
- "Upscale latent space image when doing hires. fix": "做高分辨率修复时也放大潜空间图像",
+ "Upscale latent space image when doing hires. fix": "做高分辨率修复时,也放大潜空间图像",
"Face restoration": "面部修复",
"CodeFormer weight parameter; 0 = maximum effect; 1 = minimum effect": "CodeFormer 权重参数; 0 = 最大效果; 1 = 最小效果",
- "Move face restoration model from VRAM into RAM after processing": "面部修复处理完成后将面部修复模型从显存(VRAM)移至内存(RAM)",
+ "Move face restoration model from VRAM into RAM after processing": "面部修复处理完成后,将面部修复模型从显存(VRAM)移至内存(RAM)",
"System": "系统",
- "VRAM usage polls per second during generation. Set to 0 to disable.": "生成图像时每秒轮询显存(VRAM)使用情况的次数。设置为 0 以禁用",
+ "VRAM usage polls per second during generation. Set to 0 to disable.": "生成图像时,每秒轮询显存(VRAM)使用情况的次数。设置为 0 以禁用",
"Always print all generation info to standard output": "始终将所有生成信息输出到 standard output (一般为控制台)",
"Add a second progress bar to the console that shows progress for an entire job.": "向控制台添加第二个进度条,显示整个作业的进度",
"Training": "训练",
@@ -416,7 +416,7 @@
"Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply.": "在 K 采样器中启用量化以获得更清晰、更清晰的结果。这可能会改变现有的随机种子。需要重新启动才能应用",
"Emphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention": "强调符:使用 (文字) 使模型更关注该文本,使用 [文字] 使其减少关注",
"Use old emphasis implementation. Can be useful to reproduce old seeds.": "使用旧的强调符实现。可用于复现旧随机种子",
- "Make K-diffusion samplers produce same images in a batch as when making a single image": "使 K-diffusion 采样器批量生成与生成单个图像时产出相同的图像",
+ "Make K-diffusion samplers produce same images in a batch as when making a single image": "使 K-diffusion 采样器 批量生成与生成单个图像时,产出相同的图像",
"Increase coherency by padding from the last comma within n tokens when using more than 75 tokens": "当使用超过 75 个 token 时,通过从 n 个 token 中的最后一个逗号填补来提高一致性",
"Filter NSFW content": "过滤成人内容(NSFW)",
"Stop At last layers of CLIP model": "在 CLIP 模型的最后哪一层停下",
@@ -435,16 +435,16 @@
"User interface": "用户界面",
"Show progressbar": "显示进度条",
"Show image creation progress every N sampling steps. Set 0 to disable.": "每 N 个采样迭代步数显示图像生成进度。设置 0 禁用",
- "Show previews of all images generated in a batch as a grid": "以网格的形式预览所有批量生成出来的图像",
+ "Show previews of all images generated in a batch as a grid": "以网格的形式,预览批量生成的所有图像",
"Show grid in results for web": "在网页的结果中显示宫格图",
"Do not show any images in results for web": "不在网页的结果中显示任何图像",
"Add model hash to generation information": "将模型的哈希值添加到生成信息",
"Add model name to generation information": "将模型名称添加到生成信息",
"When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint.": "从文本读取生成参数到 UI(从 PNG 图片信息或粘贴文本)时,不要更改选定的模型(ckpt)",
- "Send seed when sending prompt or image to other interface": "将提示词或者图片传送到其他界面的时候同时也把随机种子传送过去",
+ "Send seed when sending prompt or image to other interface": "将提示词或者图片发送到 >> 其他界面时,把随机种子也传送过去",
"Font for image grids that have text": "有文字的宫格图使用的字体",
"Enable full page image viewer": "启用整页图像查看器",
- "Show images zoomed in by default in full page image viewer": "在整页图像查看器中默认放大显示图像",
+ "Show images zoomed in by default in full page image viewer": "在整页图像查看器中,默认放大显示图像",
"Show generation progress in window title.": "在窗口标题中显示生成进度",
"Quicksettings list": "快速设置列表",
"Localization (requires restart)": "本地化翻译(需要保存设置,并重启Gradio)",
@@ -460,7 +460,7 @@
"sigma noise": "sigma 噪声",
"Eta noise seed delta": "Eta 噪声种子偏移(noise seed delta)",
"Images Browser": "图库浏览器",
- "Preload images at startup": "在启动时预载图像",
+ "Preload images at startup": "在启动时预加载图像",
"Number of columns on the page": "每页列数",
"Number of rows on the page": "每页行数",
"Number of pictures displayed on each page": "每页显示的图像数量",
@@ -478,15 +478,15 @@
"Read generation parameters from prompt or last generation if prompt is empty into user interface.": "从提示词中读取生成参数,如果提示词为空,则读取上一次的生成参数到用户界面",
"Save style": "储存为模版风格",
"Apply selected styles to current prompt": "将所选样式应用于当前提示",
- "Stop processing current image and continue processing.": "停止处理当前图像并继续处理下一个",
- "Stop processing images and return any results accumulated so far.": "停止处理图像并返回迄今为止累积的任何结果",
+ "Stop processing current image and continue processing.": "停止处理当前图像,并继续处理下一个",
+ "Stop processing images and return any results accumulated so far.": "停止处理图像,并返回迄今为止累积的任何结果",
"Style to apply; styles have components for both positive and negative prompts and apply to both": "要应用的模版风格; 模版风格包含正向和反向提示词,并应用于两者",
"Do not do anything special": "什么都不做",
"Which algorithm to use to produce the image": "使用哪种算法生成图像",
"Euler Ancestral - very creative, each can get a completely different picture depending on step count, setting steps to higher than 30-40 does not help": "Euler Ancestral - 非常有创意,可以根据迭代步数获得完全不同的图像,将迭代步数设置为高于 30-40 不会有正面作用",
"Denoising Diffusion Implicit Models - best at inpainting": "Denoising Diffusion Implicit models - 最擅长内补绘制",
"Produce an image that can be tiled.": "生成可用于平铺(tiled)的图像",
- "Use a two step process to partially create an image at smaller resolution, upscale, and then improve details in it without changing composition": "使用两步处理的时候以较小的分辨率生成初步图像、接着放大图像,然后在不更改构图的情况下改进其中的细节",
+ "Use a two step process to partially create an image at smaller resolution, upscale, and then improve details in it without changing composition": "使用两步处理的时候,以较小的分辨率生成初步图像、接着放大图像,然后在不更改构图的情况下改进其中的细节",
"Determines how little respect the algorithm should have for image's content. At 0, nothing will change, and at 1 you'll get an unrelated image. With values below 1.0, processing will take less steps than the Sampling Steps slider specifies.": "决定算法对图像内容的影响程度。设置 0 时,什么都不会改变,而在 1 时,你将获得不相关的图像。值低于 1.0 时,处理的迭代步数将少于“采样迭代步数”滑块指定的步数",
"How many batches of images to create": "创建多少批次的图像",
"How many image to create in a single batch": "每批创建多少图像",
@@ -504,10 +504,10 @@
"Open images output directory": "打开图像输出目录",
"How much to blur the mask before processing, in pixels.": "处理前要对蒙版进行多强的模糊,以像素为单位",
"What to put inside the masked area before processing it with Stable Diffusion.": "在使用 Stable Diffusion 处理蒙版区域之前要在蒙版区域内放置什么",
- "fill it with colors of the image": "用图像的颜色(模糊/马赛克)填充它",
+ "fill it with colors of the image": "用图像的颜色(≈模糊/马赛克)填充它",
"keep whatever was there originally": "保留原来的图像,不进行预处理",
- "fill it with latent space noise": "用潜空间的噪声(随机彩色噪点)填充它",
- "fill it with latent space zeroes": "用潜空间的零(灰色)填充它",
+ "fill it with latent space noise": "用潜空间的噪声(≈随机彩色噪点)填充它",
+ "fill it with latent space zeroes": "用潜空间的零(≈灰色)填充它",
"Upscale masked region to target resolution, do inpainting, downscale back and paste into original image": "将蒙版区域放大到目标分辨率,做内补绘制,缩小后粘贴到原始图像中",
"Resize image to target resolution. Unless height and width match, you will get incorrect aspect ratio.": "将图像大小调整为目标分辨率。除非高度和宽度匹配,否则你将获得不正确的纵横比",
"Resize the image so that entirety of target resolution is filled with the image. Crop parts that stick out.": "调整图像大小,使整个目标分辨率都被图像填充。裁剪多出来的部分",
From 4b8a192f680101de247dca79e48974b53bf961fe Mon Sep 17 00:00:00 2001
From: AngelBottomless <35677394+aria1th@users.noreply.github.com>
Date: Sat, 29 Oct 2022 16:36:43 +0900
Subject: [PATCH 021/147] add optimizer save option to shared.opts
---
modules/shared.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/modules/shared.py b/modules/shared.py
index e4f163c1..065b893d 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -286,6 +286,7 @@ options_templates.update(options_section(('system', "System"), {
options_templates.update(options_section(('training', "Training"), {
"unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training hypernetwork. Saves VRAM."),
+ "save_optimizer_state": OptionInfo(False, "Saves Optimizer state with checkpoints. This will cause file size to increase VERY much."),
"dataset_filename_word_regex": OptionInfo("", "Filename word regex"),
"dataset_filename_join_string": OptionInfo(" ", "Filename join string"),
"training_image_repeats_per_epoch": OptionInfo(1, "Number of repeats for a single input image per epoch; used only for displaying epoch number", gr.Number, {"precision": 0}),
From 20194fd9752a280306fb66b57b258609b0918c46 Mon Sep 17 00:00:00 2001
From: AngelBottomless <35677394+aria1th@users.noreply.github.com>
Date: Sat, 29 Oct 2022 16:56:42 +0900
Subject: [PATCH 022/147] We have duplicate linear now
---
modules/hypernetworks/ui.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/hypernetworks/ui.py b/modules/hypernetworks/ui.py
index aad09ffc..c2d4b51c 100644
--- a/modules/hypernetworks/ui.py
+++ b/modules/hypernetworks/ui.py
@@ -9,7 +9,7 @@ from modules import devices, sd_hijack, shared
from modules.hypernetworks import hypernetwork
not_available = ["hardswish", "multiheadattention"]
-keys = ["linear"] + list(x for x in hypernetwork.HypernetworkModule.activation_dict.keys() if x not in not_available)
+keys = list(x for x in hypernetwork.HypernetworkModule.activation_dict.keys() if x not in not_available)
def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False):
# Remove illegal characters from name.
From 9d96d7d0a0aa0a966a9aefd24342345eb65952ed Mon Sep 17 00:00:00 2001
From: aria1th <35677394+aria1th@users.noreply.github.com>
Date: Sun, 30 Oct 2022 20:39:04 +0900
Subject: [PATCH 023/147] resolve conflicts
---
modules/hypernetworks/hypernetwork.py | 44 +++++++++++++++++++++++----
1 file changed, 38 insertions(+), 6 deletions(-)
diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py
index a11e01d6..8f74cdea 100644
--- a/modules/hypernetworks/hypernetwork.py
+++ b/modules/hypernetworks/hypernetwork.py
@@ -21,6 +21,7 @@ from torch.nn.init import normal_, xavier_normal_, xavier_uniform_, kaiming_norm
from collections import defaultdict, deque
from statistics import stdev, mean
+optimizer_dict = {optim_name : cls_obj for optim_name, cls_obj in inspect.getmembers(torch.optim, inspect.isclass) if optim_name != "Optimizer"}
class HypernetworkModule(torch.nn.Module):
multiplier = 1.0
@@ -139,6 +140,8 @@ class Hypernetwork:
self.weight_init = weight_init
self.add_layer_norm = add_layer_norm
self.use_dropout = use_dropout
+ self.optimizer_name = None
+ self.optimizer_state_dict = None
for size in enable_sizes or []:
self.layers[size] = (
@@ -171,6 +174,10 @@ class Hypernetwork:
state_dict['use_dropout'] = self.use_dropout
state_dict['sd_checkpoint'] = self.sd_checkpoint
state_dict['sd_checkpoint_name'] = self.sd_checkpoint_name
+ if self.optimizer_name is not None:
+ state_dict['optimizer_name'] = self.optimizer_name
+ if self.optimizer_state_dict:
+ state_dict['optimizer_state_dict'] = self.optimizer_state_dict
torch.save(state_dict, filename)
@@ -190,7 +197,14 @@ class Hypernetwork:
self.add_layer_norm = state_dict.get('is_layer_norm', False)
print(f"Layer norm is set to {self.add_layer_norm}")
self.use_dropout = state_dict.get('use_dropout', False)
- print(f"Dropout usage is set to {self.use_dropout}" )
+ print(f"Dropout usage is set to {self.use_dropout}")
+ self.optimizer_name = state_dict.get('optimizer_name', 'AdamW')
+ print(f"Optimizer name is {self.optimizer_name}")
+ self.optimizer_state_dict = state_dict.get('optimizer_state_dict', None)
+ if self.optimizer_state_dict:
+ print("Loaded existing optimizer from checkpoint")
+ else:
+ print("No saved optimizer exists in checkpoint")
for size, sd in state_dict.items():
if type(size) == int:
@@ -392,8 +406,19 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
weights = hypernetwork.weights()
for weight in weights:
weight.requires_grad = True
- # if optimizer == "AdamW": or else Adam / AdamW / SGD, etc...
- optimizer = torch.optim.AdamW(weights, lr=scheduler.learn_rate)
+ # Here we use optimizer from saved HN, or we can specify as UI option.
+ if (optimizer_name := hypernetwork.optimizer_name) in optimizer_dict:
+ optimizer = optimizer_dict[hypernetwork.optimizer_name](params=weights, lr=scheduler.learn_rate)
+ else:
+ print(f"Optimizer type {optimizer_name} is not defined!")
+ optimizer = torch.optim.AdamW(params=weights, lr=scheduler.learn_rate)
+ optimizer_name = 'AdamW'
+ if hypernetwork.optimizer_state_dict: # This line must be changed if Optimizer type can be different from saved optimizer.
+ try:
+ optimizer.load_state_dict(hypernetwork.optimizer_state_dict)
+ except RuntimeError as e:
+ print("Cannot resume from saved optimizer!")
+ print(e)
steps_without_grad = 0
@@ -455,8 +480,11 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
# Before saving, change name to match current checkpoint.
hypernetwork_name_every = f'{hypernetwork_name}-{steps_done}'
last_saved_file = os.path.join(hypernetwork_dir, f'{hypernetwork_name_every}.pt')
+ hypernetwork.optimizer_name = optimizer_name
+ if shared.opts.save_optimizer_state:
+ hypernetwork.optimizer_state_dict = optimizer.state_dict()
save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, last_saved_file)
-
+ hypernetwork.optimizer_state_dict = None # dereference it after saving, to save memory.
textual_inversion.write_loss(log_directory, "hypernetwork_loss.csv", hypernetwork.step, len(ds), {
"loss": f"{previous_mean_loss:.7f}",
"learn_rate": scheduler.learn_rate
@@ -514,14 +542,18 @@ Last saved hypernetwork: {html.escape(last_saved_file)}
Last saved image: {html.escape(last_saved_image)}
"""
-
report_statistics(loss_dict)
filename = os.path.join(shared.cmd_opts.hypernetwork_dir, f'{hypernetwork_name}.pt')
+ hypernetwork.optimizer_name = optimizer_name
+ if shared.opts.save_optimizer_state:
+ hypernetwork.optimizer_state_dict = optimizer.state_dict()
save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, filename)
-
+ del optimizer
+ hypernetwork.optimizer_state_dict = None # dereference it after saving, to save memory.
return hypernetwork, filename
+
def save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, filename):
old_hypernetwork_name = hypernetwork.name
old_sd_checkpoint = hypernetwork.sd_checkpoint if hasattr(hypernetwork, "sd_checkpoint") else None
From c9bb33dd43dbb9479ff1b70351df14508c89ac60 Mon Sep 17 00:00:00 2001
From: victorca25
Date: Sun, 30 Oct 2022 12:52:50 +0100
Subject: [PATCH 024/147] add resrgan 8x, allow use 1x and up to 8x extra
models, move BSRGAN model, add nearest
---
modules/esrgan_model.py | 17 +++++++++++++----
modules/modelloader.py | 3 +++
modules/ui.py | 2 +-
modules/upscaler.py | 17 ++++++++++++++++-
4 files changed, 33 insertions(+), 6 deletions(-)
diff --git a/modules/esrgan_model.py b/modules/esrgan_model.py
index a13cf6ac..c61669b4 100644
--- a/modules/esrgan_model.py
+++ b/modules/esrgan_model.py
@@ -50,6 +50,7 @@ def mod2normal(state_dict):
def resrgan2normal(state_dict, nb=23):
# this code is copied from https://github.com/victorca25/iNNfer
if "conv_first.weight" in state_dict and "body.0.rdb1.conv1.weight" in state_dict:
+ re8x = 0
crt_net = {}
items = []
for k, v in state_dict.items():
@@ -75,10 +76,18 @@ def resrgan2normal(state_dict, nb=23):
crt_net['model.3.bias'] = state_dict['conv_up1.bias']
crt_net['model.6.weight'] = state_dict['conv_up2.weight']
crt_net['model.6.bias'] = state_dict['conv_up2.bias']
- crt_net['model.8.weight'] = state_dict['conv_hr.weight']
- crt_net['model.8.bias'] = state_dict['conv_hr.bias']
- crt_net['model.10.weight'] = state_dict['conv_last.weight']
- crt_net['model.10.bias'] = state_dict['conv_last.bias']
+
+ if 'conv_up3.weight' in state_dict:
+ # modification supporting: https://github.com/ai-forever/Real-ESRGAN/blob/main/RealESRGAN/rrdbnet_arch.py
+ re8x = 3
+ crt_net['model.9.weight'] = state_dict['conv_up3.weight']
+ crt_net['model.9.bias'] = state_dict['conv_up3.bias']
+
+ crt_net[f'model.{8+re8x}.weight'] = state_dict['conv_hr.weight']
+ crt_net[f'model.{8+re8x}.bias'] = state_dict['conv_hr.bias']
+ crt_net[f'model.{10+re8x}.weight'] = state_dict['conv_last.weight']
+ crt_net[f'model.{10+re8x}.bias'] = state_dict['conv_last.bias']
+
state_dict = crt_net
return state_dict
diff --git a/modules/modelloader.py b/modules/modelloader.py
index b0f2f33d..e4a6f8ac 100644
--- a/modules/modelloader.py
+++ b/modules/modelloader.py
@@ -85,6 +85,9 @@ def cleanup_models():
src_path = os.path.join(root_path, "ESRGAN")
dest_path = os.path.join(models_path, "ESRGAN")
move_files(src_path, dest_path)
+ src_path = os.path.join(models_path, "BSRGAN")
+ dest_path = os.path.join(models_path, "ESRGAN")
+ move_files(src_path, dest_path, ".pth")
src_path = os.path.join(root_path, "gfpgan")
dest_path = os.path.join(models_path, "GFPGAN")
move_files(src_path, dest_path)
diff --git a/modules/ui.py b/modules/ui.py
index 5055ca64..47610f5c 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -1059,7 +1059,7 @@ def create_ui(wrap_gradio_gpu_call):
with gr.Tabs(elem_id="extras_resize_mode"):
with gr.TabItem('Scale by'):
- upscaling_resize = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Resize", value=2)
+ upscaling_resize = gr.Slider(minimum=1.0, maximum=8.0, step=0.05, label="Resize", value=4)
with gr.TabItem('Scale to'):
with gr.Group():
with gr.Row():
diff --git a/modules/upscaler.py b/modules/upscaler.py
index 6ab2fb40..83fde7ca 100644
--- a/modules/upscaler.py
+++ b/modules/upscaler.py
@@ -10,6 +10,7 @@ import modules.shared
from modules import modelloader, shared
LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS)
+NEAREST = (Image.Resampling.NEAREST if hasattr(Image, 'Resampling') else Image.NEAREST)
from modules.paths import models_path
@@ -57,7 +58,7 @@ class Upscaler:
dest_w = img.width * scale
dest_h = img.height * scale
for i in range(3):
- if img.width >= dest_w and img.height >= dest_h:
+ if img.width > dest_w and img.height > dest_h:
break
img = self.do_upscale(img, selected_model)
if img.width != dest_w or img.height != dest_h:
@@ -120,3 +121,17 @@ class UpscalerLanczos(Upscaler):
self.name = "Lanczos"
self.scalers = [UpscalerData("Lanczos", None, self)]
+
+class UpscalerNearest(Upscaler):
+ scalers = []
+
+ def do_upscale(self, img, selected_model=None):
+ return img.resize((int(img.width * self.scale), int(img.height * self.scale)), resample=NEAREST)
+
+ def load_model(self, _):
+ pass
+
+ def __init__(self, dirname=None):
+ super().__init__(False)
+ self.name = "Nearest"
+ self.scalers = [UpscalerData("Nearest", None, self)]
\ No newline at end of file
From 5d69f75e5bc5e8908cb6c590055157f8c7d4bb3b Mon Sep 17 00:00:00 2001
From: batvbs
Date: Sun, 30 Oct 2022 21:24:28 +0800
Subject: [PATCH 025/147] =?UTF-8?q?=E6=9B=B4=E6=96=B0=20zh=5FCN.json?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
localizations/zh_CN.json | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/localizations/zh_CN.json b/localizations/zh_CN.json
index 5472535e..b796c625 100644
--- a/localizations/zh_CN.json
+++ b/localizations/zh_CN.json
@@ -524,9 +524,9 @@
"Path to directory with input images": "带有输入图像的路径",
"Path to directory where to write outputs": "进行输出的路径",
"Input images directory": "输入图像目录",
- "Use following tags to define how filenames for images are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; leave empty for default.": "使用以下标签定义如何选择图像的文件名: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; 默认请留空",
+ "Use following tags to define how filenames for images are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime], [datetime], [job_timestamp]; leave empty for default.": "使用以下标签定义如何选择图像的文件名: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; 默认请留空",
"If this option is enabled, watermark will not be added to created images. Warning: if you do not add watermark, you may be behaving in an unethical manner.": "如果启用此选项,水印将不会添加到生成出来的图像中。警告:如果你不添加水印,你的行为可能是不符合专业操守的",
- "Use following tags to define how subdirectories for images and grids are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; leave empty for default.": "使用以下标签定义如何选择图像和宫格图的子目录: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; 默认请留空",
+ "Use following tags to define how subdirectories for images and grids are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime], [datetime], [job_timestamp]; leave empty for default.": "使用以下标签定义如何选择图像和宫格图的子目录: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; 默认请留空",
"Restore low quality faces using GFPGAN neural network": "使用 GFPGAN 神经网络修复低质量面部",
"This regular expression will be used extract words from filename, and they will be joined using the option below into label text used for training. Leave empty to keep filename text as it is.": "此正则表达式将用于从文件名中提取单词,并将使用以下选项将它们接合到用于训练的标签文本中。留空以保持文件名文本不变",
"This string will be used to join split words into a single line if the option above is enabled.": "如果启用了上述选项,则此处的字符会用于将拆分的单词接合为同一行",
From cb31abcf58ea1f64266e6d821937eed058c35f4d Mon Sep 17 00:00:00 2001
From: Muhammad Rizqi Nur
Date: Sun, 30 Oct 2022 21:54:31 +0700
Subject: [PATCH 026/147] Settings to select VAE
---
modules/sd_models.py | 31 ++++-------
modules/sd_vae.py | 121 +++++++++++++++++++++++++++++++++++++++++++
modules/shared.py | 8 +--
webui.py | 5 ++
4 files changed, 141 insertions(+), 24 deletions(-)
create mode 100644 modules/sd_vae.py
diff --git a/modules/sd_models.py b/modules/sd_models.py
index f86dc3ed..91ad4b5e 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -8,7 +8,7 @@ from omegaconf import OmegaConf
from ldm.util import instantiate_from_config
-from modules import shared, modelloader, devices, script_callbacks
+from modules import shared, modelloader, devices, script_callbacks, sd_vae
from modules.paths import models_path
from modules.sd_hijack_inpainting import do_inpainting_hijack, should_hijack_inpainting
@@ -160,12 +160,11 @@ def get_state_dict_from_checkpoint(pl_sd):
vae_ignore_keys = {"model_ema.decay", "model_ema.num_updates"}
-
-def load_model_weights(model, checkpoint_info):
+def load_model_weights(model, checkpoint_info, force=False):
checkpoint_file = checkpoint_info.filename
sd_model_hash = checkpoint_info.hash
- if checkpoint_info not in checkpoints_loaded:
+ if force or checkpoint_info not in checkpoints_loaded:
print(f"Loading weights [{sd_model_hash}] from {checkpoint_file}")
pl_sd = torch.load(checkpoint_file, map_location=shared.weight_load_location)
@@ -186,17 +185,7 @@ def load_model_weights(model, checkpoint_info):
devices.dtype = torch.float32 if shared.cmd_opts.no_half else torch.float16
devices.dtype_vae = torch.float32 if shared.cmd_opts.no_half or shared.cmd_opts.no_half_vae else torch.float16
- vae_file = os.path.splitext(checkpoint_file)[0] + ".vae.pt"
-
- if not os.path.exists(vae_file) and shared.cmd_opts.vae_path is not None:
- vae_file = shared.cmd_opts.vae_path
-
- if os.path.exists(vae_file):
- print(f"Loading VAE weights from: {vae_file}")
- vae_ckpt = torch.load(vae_file, map_location=shared.weight_load_location)
- vae_dict = {k: v for k, v in vae_ckpt["state_dict"].items() if k[0:4] != "loss" and k not in vae_ignore_keys}
- model.first_stage_model.load_state_dict(vae_dict)
-
+ sd_vae.load_vae(model, checkpoint_file)
model.first_stage_model.to(devices.dtype_vae)
if shared.opts.sd_checkpoint_cache > 0:
@@ -213,7 +202,7 @@ def load_model_weights(model, checkpoint_info):
model.sd_checkpoint_info = checkpoint_info
-def load_model(checkpoint_info=None):
+def load_model(checkpoint_info=None, force=False):
from modules import lowvram, sd_hijack
checkpoint_info = checkpoint_info or select_checkpoint()
@@ -234,7 +223,7 @@ def load_model(checkpoint_info=None):
do_inpainting_hijack()
sd_model = instantiate_from_config(sd_config.model)
- load_model_weights(sd_model, checkpoint_info)
+ load_model_weights(sd_model, checkpoint_info, force=force)
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
lowvram.setup_for_low_vram(sd_model, shared.cmd_opts.medvram)
@@ -252,16 +241,16 @@ def load_model(checkpoint_info=None):
return sd_model
-def reload_model_weights(sd_model, info=None):
+def reload_model_weights(sd_model, info=None, force=False):
from modules import lowvram, devices, sd_hijack
checkpoint_info = info or select_checkpoint()
- if sd_model.sd_model_checkpoint == checkpoint_info.filename:
+ if sd_model.sd_model_checkpoint == checkpoint_info.filename and not force:
return
if sd_model.sd_checkpoint_info.config != checkpoint_info.config or should_hijack_inpainting(checkpoint_info) != should_hijack_inpainting(sd_model.sd_checkpoint_info):
checkpoints_loaded.clear()
- load_model(checkpoint_info)
+ load_model(checkpoint_info, force=force)
return shared.sd_model
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
@@ -271,7 +260,7 @@ def reload_model_weights(sd_model, info=None):
sd_hijack.model_hijack.undo_hijack(sd_model)
- load_model_weights(sd_model, checkpoint_info)
+ load_model_weights(sd_model, checkpoint_info, force=force)
sd_hijack.model_hijack.hijack(sd_model)
script_callbacks.model_loaded_callback(sd_model)
diff --git a/modules/sd_vae.py b/modules/sd_vae.py
new file mode 100644
index 00000000..82764e55
--- /dev/null
+++ b/modules/sd_vae.py
@@ -0,0 +1,121 @@
+import torch
+import os
+from collections import namedtuple
+from modules import shared, devices
+from modules.paths import models_path
+import glob
+
+model_dir = "Stable-diffusion"
+model_path = os.path.abspath(os.path.join(models_path, model_dir))
+vae_dir = "VAE"
+vae_path = os.path.abspath(os.path.join(models_path, vae_dir))
+
+vae_ignore_keys = {"model_ema.decay", "model_ema.num_updates"}
+default_vae_dict = {"auto": "auto", "None": "None"}
+default_vae_list = ["auto", "None"]
+default_vae_values = [default_vae_dict[x] for x in default_vae_list]
+vae_dict = dict(default_vae_dict)
+vae_list = list(default_vae_list)
+first_load = True
+
+def get_filename(filepath):
+ return os.path.splitext(os.path.basename(filepath))[0]
+
+def refresh_vae_list(vae_path=vae_path, model_path=model_path):
+ global vae_dict, vae_list
+ res = {}
+ candidates = [
+ *glob.iglob(os.path.join(model_path, '**/*.vae.pt'), recursive=True),
+ *glob.iglob(os.path.join(model_path, '**/*.vae.ckpt'), recursive=True),
+ *glob.iglob(os.path.join(vae_path, '**/*.pt'), recursive=True),
+ *glob.iglob(os.path.join(vae_path, '**/*.ckpt'), recursive=True)
+ ]
+ if shared.cmd_opts.vae_path is not None and os.path.isfile(shared.cmd_opts.vae_path):
+ candidates.append(shared.cmd_opts.vae_path)
+ for filepath in candidates:
+ name = get_filename(filepath)
+ res[name] = filepath
+ vae_list.clear()
+ vae_list.extend(default_vae_list)
+ vae_list.extend(list(res.keys()))
+ vae_dict.clear()
+ vae_dict.update(default_vae_dict)
+ vae_dict.update(res)
+ return vae_list
+
+def load_vae(model, checkpoint_file, vae_file="auto"):
+ global first_load, vae_dict, vae_list
+ # save_settings = False
+
+ # if vae_file argument is provided, it takes priority
+ if vae_file and vae_file not in default_vae_list:
+ if not os.path.isfile(vae_file):
+ vae_file = "auto"
+ # save_settings = True
+ print("VAE provided as function argument doesn't exist")
+ # for the first load, if vae-path is provided, it takes priority and failure is reported
+ if first_load and shared.cmd_opts.vae_path is not None:
+ if os.path.isfile(shared.cmd_opts.vae_path):
+ vae_file = shared.cmd_opts.vae_path
+ # save_settings = True
+ # print("Using VAE provided as command line argument")
+ else:
+ print("VAE provided as command line argument doesn't exist")
+ # else, we load from settings
+ if vae_file == "auto" and shared.opts.sd_vae is not None:
+ # if saved VAE settings isn't recognized, fallback to auto
+ vae_file = vae_dict.get(shared.opts.sd_vae, "auto")
+ # if VAE selected but not found, fallback to auto
+ if vae_file not in default_vae_values and not os.path.isfile(vae_file):
+ vae_file = "auto"
+ print("Selected VAE doesn't exist")
+ # vae-path cmd arg takes priority for auto
+ if vae_file == "auto" and shared.cmd_opts.vae_path is not None:
+ if os.path.isfile(shared.cmd_opts.vae_path):
+ vae_file = shared.cmd_opts.vae_path
+ print("Using VAE provided as command line argument")
+ # if still not found, try look for ".vae.pt" beside model
+ model_path = os.path.splitext(checkpoint_file)[0]
+ if vae_file == "auto":
+ vae_file_try = model_path + ".vae.pt"
+ if os.path.isfile(vae_file_try):
+ vae_file = vae_file_try
+ print("Using VAE found beside selected model")
+ # if still not found, try look for ".vae.ckpt" beside model
+ if vae_file == "auto":
+ vae_file_try = model_path + ".vae.ckpt"
+ if os.path.isfile(vae_file_try):
+ vae_file = vae_file_try
+ print("Using VAE found beside selected model")
+ # No more fallbacks for auto
+ if vae_file == "auto":
+ vae_file = None
+ # Last check, just because
+ if vae_file and not os.path.exists(vae_file):
+ vae_file = None
+
+ if vae_file:
+ print(f"Loading VAE weights from: {vae_file}")
+ vae_ckpt = torch.load(vae_file, map_location=shared.weight_load_location)
+ vae_dict_1 = {k: v for k, v in vae_ckpt["state_dict"].items() if k[0:4] != "loss" and k not in vae_ignore_keys}
+ model.first_stage_model.load_state_dict(vae_dict_1)
+
+ # If vae used is not in dict, update it
+ # It will be removed on refresh though
+ if vae_file is not None:
+ vae_opt = get_filename(vae_file)
+ if vae_opt not in vae_dict:
+ vae_dict[vae_opt] = vae_file
+ vae_list.append(vae_opt)
+
+ """
+ # Save current VAE to VAE settings, maybe? will it work?
+ if save_settings:
+ if vae_file is None:
+ vae_opt = "None"
+
+ # shared.opts.sd_vae = vae_opt
+ """
+
+ first_load = False
+ model.first_stage_model.to(devices.dtype_vae)
diff --git a/modules/shared.py b/modules/shared.py
index e4f163c1..06440ac4 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -14,7 +14,7 @@ import modules.memmon
import modules.sd_models
import modules.styles
import modules.devices as devices
-from modules import sd_samplers, sd_models, localization
+from modules import sd_samplers, sd_models, localization, sd_vae
from modules.hypernetworks import hypernetwork
from modules.paths import models_path, script_path, sd_path
@@ -295,6 +295,7 @@ options_templates.update(options_section(('training', "Training"), {
options_templates.update(options_section(('sd', "Stable Diffusion"), {
"sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, refresh=sd_models.list_models),
"sd_checkpoint_cache": OptionInfo(0, "Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}),
+ "sd_vae": OptionInfo("auto", "SD VAE", gr.Dropdown, lambda: {"choices": list(sd_vae.vae_list)}, refresh=sd_vae.refresh_vae_list),
"sd_hypernetwork": OptionInfo("None", "Hypernetwork", gr.Dropdown, lambda: {"choices": ["None"] + [x for x in hypernetworks.keys()]}, refresh=reload_hypernetworks),
"sd_hypernetwork_strength": OptionInfo(1.0, "Hypernetwork strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.001}),
"inpainting_mask_weight": OptionInfo(1.0, "Inpainting conditioning mask strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
@@ -407,11 +408,12 @@ class Options:
if bad_settings > 0:
print(f"The program is likely to not work with bad settings.\nSettings file: {filename}\nEither fix the file, or delete it and restart.", file=sys.stderr)
- def onchange(self, key, func):
+ def onchange(self, key, func, call=True):
item = self.data_labels.get(key)
item.onchange = func
- func()
+ if call:
+ func()
def dumpjson(self):
d = {k: self.data.get(k, self.data_labels.get(k).default) for k in self.data_labels.keys()}
diff --git a/webui.py b/webui.py
index 29530872..27949f3d 100644
--- a/webui.py
+++ b/webui.py
@@ -21,6 +21,7 @@ import modules.paths
import modules.scripts
import modules.sd_hijack
import modules.sd_models
+import modules.sd_vae
import modules.shared as shared
import modules.txt2img
@@ -74,8 +75,12 @@ def initialize():
modules.scripts.load_scripts()
+ modules.sd_vae.refresh_vae_list()
modules.sd_models.load_model()
shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: modules.sd_models.reload_model_weights(shared.sd_model)))
+ # I don't know what needs to be done to only reload VAE, with all those hijacks callbacks, and lowvram,
+ # so for now this reloads the whole model too, and no cache
+ shared.opts.onchange("sd_vae", wrap_queued_call(lambda: modules.sd_models.reload_model_weights(shared.sd_model, force=True)), call=False)
shared.opts.onchange("sd_hypernetwork", wrap_queued_call(lambda: modules.hypernetworks.hypernetwork.load_hypernetwork(shared.opts.sd_hypernetwork)))
shared.opts.onchange("sd_hypernetwork_strength", modules.hypernetworks.hypernetwork.apply_strength)
From 2468039df2c5705039a5c14fd74f1354093bf874 Mon Sep 17 00:00:00 2001
From: Muhammad Rizqi Nur
Date: Sun, 30 Oct 2022 21:58:31 +0700
Subject: [PATCH 027/147] Forgot to add this folder
---
models/VAE/Put VAE here.txt | 0
1 file changed, 0 insertions(+), 0 deletions(-)
create mode 100644 models/VAE/Put VAE here.txt
diff --git a/models/VAE/Put VAE here.txt b/models/VAE/Put VAE here.txt
new file mode 100644
index 00000000..e69de29b
From 0ccc982f096d035ec5c9f406e83bf95311c8349a Mon Sep 17 00:00:00 2001
From: dtlnor
Date: Mon, 31 Oct 2022 00:03:58 +0900
Subject: [PATCH 028/147] re-order content to match the dump
Deprecated content was moved to the bottom to keep (somehow) backwards capability.
---
localizations/zh_CN.json | 69 ++++++++++++++++++++--------------------
1 file changed, 35 insertions(+), 34 deletions(-)
diff --git a/localizations/zh_CN.json b/localizations/zh_CN.json
index b796c625..80962f62 100644
--- a/localizations/zh_CN.json
+++ b/localizations/zh_CN.json
@@ -18,7 +18,6 @@
"Train": "训练",
"Create aesthetic embedding": "生成美术风格",
"Image Browser": "图库浏览器",
- "History": "历史记录",
"Settings": "设置",
"Prompt": "提示词",
"Negative prompt": "反向提示词",
@@ -70,7 +69,6 @@
"Resize seed from width": "自宽度缩放随机种子",
"Resize seed from height": "自高度缩放随机种子",
"Open for Clip Aesthetic!": "打开美术风格 Clip!",
- "▼": "▼",
"Aesthetic weight": "美术风格权重",
"Aesthetic steps": "美术风格迭代步数",
"Aesthetic learning rate": "美术风格学习率",
@@ -87,10 +85,8 @@
"X/Y plot": "X/Y 图表",
"Source embedding to convert": "用于转换的源 Embedding",
"Embedding token": "Embedding 的 token (关键词)",
+ "Output directory": "输出目录",
"Put variable parts at start of prompt": "把变量部分放在提示词文本的开头",
- "Show Textbox": "显示文本框",
- "File with inputs": "含输入内容的文件",
- "Prompts": "提示词",
"Iterate seed every line": "每行输入都换一个种子",
"List of prompt inputs": "提示词输入列表",
"Upload prompt inputs": "上传提示词输入文件",
@@ -119,7 +115,6 @@
"Draw legend": "在图表中包括轴标题",
"Include Separate Images": "包括独立的图像",
"Keep -1 for seeds": "保持随机种子为-1",
- "Drop Image Here": "拖拽图像到此",
"Save": "保存",
"Send to img2img": ">> 图生图",
"Send to inpaint": ">> 内补绘制",
@@ -131,6 +126,7 @@
"Inpaint": "内补绘制",
"Batch img2img": "批量图生图",
"Image for img2img": "图生图的图像",
+ "Drop Image Here": "拖拽图像到此",
"Image for inpainting with mask": "用于内补绘制蒙版内容的图像",
"Mask": "蒙版",
"Mask blur": "蒙版模糊",
@@ -149,9 +145,7 @@
"Inpaint at full resolution padding, pixels": "以完整分辨率进行内补绘制 - 填补像素",
"Process images in a directory on the same machine where the server is running.": "在服务器主机上的目录中处理图像",
"Use an empty output directory to save pictures normally instead of writing to the output directory.": "指定一个空的文件夹为输出目录而非默认的 output 文件夹为输出目录",
- "Disabled when launched with --hide-ui-dir-config.": "启动 --hide-ui-dir-config 时禁用",
"Input directory": "输入目录",
- "Output directory": "输出目录",
"Resize mode": "缩放模式",
"Just resize": "拉伸",
"Crop and resize": "裁剪",
@@ -185,6 +179,7 @@
"Color variation": "色彩变化",
"Will upscale the image to twice the dimensions; use width and height sliders to set tile size": "将图像放大到两倍尺寸; 使用宽度和高度滑块设置图块尺寸(tile size)",
"Tile overlap": "图块重叠的像素(Tile overlap)",
+ "Upscaler": "Upscaler",
"Lanczos": "Lanczos",
"LDSR": "LDSR",
"BSRGAN 4x": "BSRGAN 4x",
@@ -202,14 +197,11 @@
"Scale to": "指定尺寸缩放",
"Resize": "缩放",
"Crop to fit": "裁剪以适应",
- "Upscaler 1": "放大算法 1",
- "Upscaler 2": "放大算法 2",
"Upscaler 2 visibility": "放大算法 2 可见度",
"GFPGAN visibility": "GFPGAN 可见度",
"CodeFormer visibility": "CodeFormer 可见度",
"CodeFormer weight (0 = maximum effect, 1 = minimum effect)": "CodeFormer 权重 (0 = 最大效果, 1 = 最小效果)",
"Upscale Before Restoring Faces": "放大后再进行面部修复",
- "Open output directory": "打开输出目录",
"Send to txt2img": ">> 文生图",
"A merger of the two checkpoints will be generated in your": "合并后的模型(ckpt)会生成在你的",
"checkpoint": "模型(ckpt)",
@@ -227,7 +219,6 @@
"wiki": "帮助",
"for detailed explanation.": "以了解详细说明",
"Create embedding": "生成 embedding",
- "Create aesthetic images embedding": "生成美术风格图集 embedding",
"Create hypernetwork": "生成 hypernetwork",
"Preprocess images": "图像预处理",
"Name": "名称",
@@ -283,7 +274,6 @@
"prepend": "放前面",
"append": "放后面",
"Create flipped copies": "生成镜像副本",
- "Split oversized images into two": "将过大的图像分为两份",
"Split oversized images": "分割过大的图像",
"Auto focal point crop": "自动焦点裁切",
"Use BLIP for caption": "使用 BLIP 生成说明文字(自然语言描述)",
@@ -295,13 +285,11 @@
"Focal point edges weight": "焦点线条权重",
"Create debug image": "生成调试(debug)图片",
"Preprocess": "预处理",
- "Train an embedding; must specify a directory with a set of 1:1 ratio images": "训练 embedding; 必须指定一组具有 1:1 比例图像的目录",
"Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images": "训练 embedding 或者 hypernetwork; 必须指定一组具有 1:1 比例图像的目录",
"[wiki]": "[帮助]",
"Embedding": "Embedding",
"Embedding Learning rate": "Embedding 学习率",
"Hypernetwork Learning rate": "Hypernetwork 学习率",
- "Learning rate": "学习率",
"Dataset directory": "数据集目录",
"Log directory": "日志目录",
"Prompt template file": "提示词模版文件",
@@ -314,25 +302,15 @@
"Train Embedding": "训练 Embedding",
"Create an aesthetic embedding out of any number of images": "从任意数量的图像中创建美术风格 embedding",
"Create images embedding": "生成图集 embedding",
- "txt2img history": "文生图历史记录",
- "img2img history": "图生图历史记录",
- "extras history": "更多选项卡的历史记录",
- "Renew Page": "刷新页面",
- "extras": "更多",
"favorites": "已保存",
"others": "其他",
- "custom fold": "自定义文件夹",
- "Load": "载入",
"Images directory": "图像目录",
"Dropdown": "下拉列表",
- "Prev batch": "上一批",
- "Next batch": "下一批",
"First Page": "首页",
"Prev Page": "上一页",
"Page Index": "页数",
"Next Page": "下一页",
"End Page": "尾页",
- "number of images to delete consecutively next": "接下来要连续删除的图像数",
"delete next": "删除下一张",
"Delete": "删除",
"sort by": "排序方式",
@@ -341,9 +319,8 @@
"keyword": "关键词",
"Generate Info": "生成信息",
"File Name": "文件名",
- "Collect": "保存",
- "Refresh page": "刷新页面",
- "Date to": "日期至",
+ "Move to favorites": "Move to favorites",
+ "Renew Page": "刷新页面",
"Number": "数量",
"set_index": "设置索引",
"load_switch": "载入开关",
@@ -400,7 +377,6 @@
"Always print all generation info to standard output": "始终将所有生成信息输出到 standard output (一般为控制台)",
"Add a second progress bar to the console that shows progress for an entire job.": "向控制台添加第二个进度条,显示整个作业的进度",
"Training": "训练",
- "Unload VAE and CLIP from VRAM when training": "训练时从显存(VRAM)中取消 VAE 和 CLIP 的加载",
"Move VAE and CLIP to RAM when training hypernetwork. Saves VRAM.": "训练时将 VAE 和 CLIP 从显存(VRAM)移放到内存(RAM),节省显存(VRAM)",
"Filename word regex": "文件名用词的正则表达式",
"Filename join string": "文件名连接用字符串",
@@ -463,9 +439,7 @@
"Preload images at startup": "在启动时预加载图像",
"Number of columns on the page": "每页列数",
"Number of rows on the page": "每页行数",
- "Number of pictures displayed on each page": "每页显示的图像数量",
"Minimum number of pages per load": "每次加载的最小页数",
- "Number of grids in each row": "每行显示多少格",
"Wildcards": "通配符",
"Use same seed for all images": "为所有图像使用同一个随机种子",
"Request browser notifications": "请求浏览器通知",
@@ -520,7 +494,6 @@
"Result = A * (1 - M) + B * M": "结果 = A * (1 - M) + B * M",
"Result = A + (B - C) * M": "结果 = A + (B - C) * M",
"1st and last digit must be 1. ex:'1, 2, 1'": "第一个和最后一个数字必须是 1。例:'1, 2, 1'",
- "how fast should the training go. Low values will take longer to train, high values may fail to converge (not generate accurate results) and/or may break the embedding (This has happened if you see Loss: nan in the training info textbox. If this happens, you need to manually restore your embedding from an older not-broken backup).\n\nYou can set a single numeric value, or multiple learning rates using the syntax:\n\n rate_1:max_steps_1, rate_2:max_steps_2, ...\n\nEG: 0.005:100, 1e-3:1000, 1e-5\n\nWill train with rate of 0.005 for first 100 steps, then 1e-3 until 1000 steps, then 1e-5 for all remaining steps.": "训练应该多快。低值将需要更长的时间来训练,高值可能无法收敛(无法产生准确的结果)以及/也许可能会破坏 embedding(如果你在训练信息文本框中看到 Loss: nan 就会发生这种情况。如果发生这种情况,你需要从较旧的未损坏的备份手动恢复 embedding)\n\n你可以使用以下语法设置单个数值或多个学习率:\n\n 率1:步限1, 率2:步限2, ...\n\n如: 0.005:100, 1e-3:1000, 1e-5\n\n即前 100 步将以 0.005 的速率训练,接着直到 1000 步为止以 1e-3 训练,然后剩余所有步以 1e-5 训练",
"Path to directory with input images": "带有输入图像的路径",
"Path to directory where to write outputs": "进行输出的路径",
"Input images directory": "输入图像目录",
@@ -537,5 +510,33 @@
"Allowed categories for random artists selection when using the Roll button": "使用抽选艺术家按钮时将会随机的艺术家类别",
"Roll three": "抽三位出来",
"Generate forever": "无限生成",
- "Cancel generate forever": "停止无限生成"
-}
+ "Cancel generate forever": "停止无限生成",
+ "History": "历史记录",
+ "▼": "▼",
+ "Show Textbox": "显示文本框",
+ "File with inputs": "含输入内容的文件",
+ "Prompts": "提示词",
+ "Disabled when launched with --hide-ui-dir-config.": "启动 --hide-ui-dir-config 时禁用",
+ "Upscaler 1": "放大算法 1",
+ "Upscaler 2": "放大算法 2",
+ "Open output directory": "打开输出目录",
+ "Create aesthetic images embedding": "生成美术风格图集 embedding",
+ "Split oversized images into two": "将过大的图像分为两份",
+ "Train an embedding; must specify a directory with a set of 1:1 ratio images": "训练 embedding; 必须指定一组具有 1:1 比例图像的目录",
+ "Learning rate": "学习率",
+ "txt2img history": "文生图历史记录",
+ "img2img history": "图生图历史记录",
+ "extras history": "更多选项卡的历史记录",
+ "extras": "更多",
+ "custom fold": "自定义文件夹",
+ "Load": "载入",
+ "Prev batch": "上一批",
+ "Next batch": "下一批",
+ "number of images to delete consecutively next": "接下来要连续删除的图像数",
+ "Date to": "日期至",
+ "Refresh page": "刷新页面",
+ "Unload VAE and CLIP from VRAM when training": "训练时从显存(VRAM)中取消 VAE 和 CLIP 的加载",
+ "Number of pictures displayed on each page": "每页显示的图像数量",
+ "Number of grids in each row": "每行显示多少格",
+ "how fast should the training go. Low values will take longer to train, high values may fail to converge (not generate accurate results) and/or may break the embedding (This has happened if you see Loss: nan in the training info textbox. If this happens, you need to manually restore your embedding from an older not-broken backup).\n\nYou can set a single numeric value, or multiple learning rates using the syntax:\n\n rate_1:max_steps_1, rate_2:max_steps_2, ...\n\nEG: 0.005:100, 1e-3:1000, 1e-5\n\nWill train with rate of 0.005 for first 100 steps, then 1e-3 until 1000 steps, then 1e-5 for all remaining steps.": "训练应该多快。低值将需要更长的时间来训练,高值可能无法收敛(无法产生准确的结果)以及/也许可能会破坏 embedding(如果你在训练信息文本框中看到 Loss: nan 就会发生这种情况。如果发生这种情况,你需要从较旧的未损坏的备份手动恢复 embedding)\n\n你可以使用以下语法设置单个数值或多个学习率:\n\n 率1:步限1, 率2:步限2, ...\n\n如: 0.005:100, 1e-3:1000, 1e-5\n\n即前 100 步将以 0.005 的速率训练,接着直到 1000 步为止以 1e-3 训练,然后剩余所有步以 1e-5 训练"
+}
\ No newline at end of file
From 9d7b665d3b10193f6f9bad23f7957ef2469f6eaa Mon Sep 17 00:00:00 2001
From: dtlnor
Date: Mon, 31 Oct 2022 00:09:58 +0900
Subject: [PATCH 029/147] update new content
---
localizations/zh_CN.json | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/localizations/zh_CN.json b/localizations/zh_CN.json
index 80962f62..80e7e4cb 100644
--- a/localizations/zh_CN.json
+++ b/localizations/zh_CN.json
@@ -302,8 +302,8 @@
"Train Embedding": "训练 Embedding",
"Create an aesthetic embedding out of any number of images": "从任意数量的图像中创建美术风格 embedding",
"Create images embedding": "生成图集 embedding",
- "favorites": "已保存",
- "others": "其他",
+ "Favorites": "已保存",
+ "Others": "其他",
"Images directory": "图像目录",
"Dropdown": "下拉列表",
"First Page": "首页",
@@ -319,7 +319,7 @@
"keyword": "关键词",
"Generate Info": "生成信息",
"File Name": "文件名",
- "Move to favorites": "Move to favorites",
+ "Move to favorites": "移动到已保存",
"Renew Page": "刷新页面",
"Number": "数量",
"set_index": "设置索引",
From e1b2ea6e0012ecc988385fc523d8fb50ea5d6be5 Mon Sep 17 00:00:00 2001
From: Muhammad Rizqi Nur
Date: Sun, 30 Oct 2022 22:11:45 +0700
Subject: [PATCH 030/147] Change VAE search order and thus priority
---
modules/sd_vae.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/modules/sd_vae.py b/modules/sd_vae.py
index 82764e55..0767b925 100644
--- a/modules/sd_vae.py
+++ b/modules/sd_vae.py
@@ -25,10 +25,10 @@ def refresh_vae_list(vae_path=vae_path, model_path=model_path):
global vae_dict, vae_list
res = {}
candidates = [
- *glob.iglob(os.path.join(model_path, '**/*.vae.pt'), recursive=True),
*glob.iglob(os.path.join(model_path, '**/*.vae.ckpt'), recursive=True),
- *glob.iglob(os.path.join(vae_path, '**/*.pt'), recursive=True),
+ *glob.iglob(os.path.join(model_path, '**/*.vae.pt'), recursive=True),
*glob.iglob(os.path.join(vae_path, '**/*.ckpt'), recursive=True)
+ *glob.iglob(os.path.join(vae_path, '**/*.pt'), recursive=True),
]
if shared.cmd_opts.vae_path is not None and os.path.isfile(shared.cmd_opts.vae_path):
candidates.append(shared.cmd_opts.vae_path)
From 32ffc324169b73ede71ad02c5540b28b1564e466 Mon Sep 17 00:00:00 2001
From: dtlnor
Date: Mon, 31 Oct 2022 00:29:43 +0900
Subject: [PATCH 031/147] Update zh_CN.json
putting back the sign
---
localizations/zh_CN.json | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/localizations/zh_CN.json b/localizations/zh_CN.json
index 80e7e4cb..f9371c3e 100644
--- a/localizations/zh_CN.json
+++ b/localizations/zh_CN.json
@@ -69,6 +69,7 @@
"Resize seed from width": "自宽度缩放随机种子",
"Resize seed from height": "自高度缩放随机种子",
"Open for Clip Aesthetic!": "打开美术风格 Clip!",
+ "▼": "▼",
"Aesthetic weight": "美术风格权重",
"Aesthetic steps": "美术风格迭代步数",
"Aesthetic learning rate": "美术风格学习率",
@@ -512,7 +513,6 @@
"Generate forever": "无限生成",
"Cancel generate forever": "停止无限生成",
"History": "历史记录",
- "▼": "▼",
"Show Textbox": "显示文本框",
"File with inputs": "含输入内容的文件",
"Prompts": "提示词",
From d9e4e4d7a09d4aee8ce249a3c8e91ce165b10fa5 Mon Sep 17 00:00:00 2001
From: random_thoughtss
Date: Sun, 30 Oct 2022 15:33:02 -0700
Subject: [PATCH 032/147] Fix non-square full resolution inpainting.
---
modules/masking.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/masking.py b/modules/masking.py
index fd8d9241..a5c4d2da 100644
--- a/modules/masking.py
+++ b/modules/masking.py
@@ -49,7 +49,7 @@ def expand_crop_region(crop_region, processing_width, processing_height, image_w
ratio_processing = processing_width / processing_height
if ratio_crop_region > ratio_processing:
- desired_height = (x2 - x1) * ratio_processing
+ desired_height = (x2 - x1) / ratio_processing
desired_height_diff = int(desired_height - (y2-y1))
y1 -= desired_height_diff//2
y2 += desired_height_diff - desired_height_diff//2
From 0018f3ed62927a189c1f162c11a2e7a0cef99c1b Mon Sep 17 00:00:00 2001
From: batvbs
Date: Mon, 31 Oct 2022 12:32:51 +0800
Subject: [PATCH 033/147] =?UTF-8?q?=E6=9B=B4=E6=96=B0=20zh=5FCN.json?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
localizations/zh_CN.json | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/localizations/zh_CN.json b/localizations/zh_CN.json
index b796c625..248245dd 100644
--- a/localizations/zh_CN.json
+++ b/localizations/zh_CN.json
@@ -59,11 +59,11 @@
"Highres. fix": "高分辨率修复",
"Firstpass width": "第一遍的宽度",
"Firstpass height": "第一遍的高度",
- "Denoising strength": "去噪强度",
+ "Denoising strength": "去噪强度(Denoising strength)",
"Batch count": "生成批次",
"Batch size": "每批数量",
"CFG Scale": "提示词相关性(CFG Scale)",
- "Seed": "随机种子",
+ "Seed": "随机种子(seed)",
"Extra": "▼",
"Variation seed": "差异随机种子",
"Variation strength": "差异强度",
@@ -338,7 +338,7 @@
"sort by": "排序方式",
"path name": "路径名",
"date": "日期",
- "keyword": "关键词",
+ "keyword": "搜索",
"Generate Info": "生成信息",
"File Name": "文件名",
"Collect": "保存",
@@ -487,7 +487,7 @@
"Denoising Diffusion Implicit Models - best at inpainting": "Denoising Diffusion Implicit models - 最擅长内补绘制",
"Produce an image that can be tiled.": "生成可用于平铺(tiled)的图像",
"Use a two step process to partially create an image at smaller resolution, upscale, and then improve details in it without changing composition": "使用两步处理的时候,以较小的分辨率生成初步图像、接着放大图像,然后在不更改构图的情况下改进其中的细节",
- "Determines how little respect the algorithm should have for image's content. At 0, nothing will change, and at 1 you'll get an unrelated image. With values below 1.0, processing will take less steps than the Sampling Steps slider specifies.": "决定算法对图像内容的影响程度。设置 0 时,什么都不会改变,而在 1 时,你将获得不相关的图像。值低于 1.0 时,处理的迭代步数将少于“采样迭代步数”滑块指定的步数",
+ "Determines how little respect the algorithm should have for image's content. At 0, nothing will change, and at 1 you'll get an unrelated image. With values below 1.0, processing will take less steps than the Sampling Steps slider specifies.": "决定算法对图像内容的影响程度。设置 0 时,什么都不会改变,而在 1 时,你将获得不相关的图像。\n值低于 1.0 时,处理的迭代步数将少于“采样迭代步数”滑块指定的步数",
"How many batches of images to create": "创建多少批次的图像",
"How many image to create in a single batch": "每批创建多少图像",
"Classifier Free Guidance Scale - how strongly the image should conform to prompt - lower values produce more creative results": "Classifier Free Guidance Scale - 图像应在多大程度上服从提示词 - 较低的值会产生更有创意的结果",
From 7581091ffb14936ce85549ddfee3c2417ffb3c69 Mon Sep 17 00:00:00 2001
From: batvbs
Date: Mon, 31 Oct 2022 12:52:03 +0800
Subject: [PATCH 034/147] Denoising strength
---
localizations/zh_CN.json | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/localizations/zh_CN.json b/localizations/zh_CN.json
index 248245dd..3463964a 100644
--- a/localizations/zh_CN.json
+++ b/localizations/zh_CN.json
@@ -59,7 +59,7 @@
"Highres. fix": "高分辨率修复",
"Firstpass width": "第一遍的宽度",
"Firstpass height": "第一遍的高度",
- "Denoising strength": "去噪强度(Denoising strength)",
+ "Denoising strength": "重绘幅度(Denoising strength)",
"Batch count": "生成批次",
"Batch size": "每批数量",
"CFG Scale": "提示词相关性(CFG Scale)",
@@ -168,13 +168,13 @@
"Original negative prompt": "初始反向提示词",
"Override `Sampling Steps` to the same value as `Decode steps`?": "覆写 `采样迭代步数` 为 `解码迭代步数`?",
"Decode steps": "解码迭代步数",
- "Override `Denoising strength` to 1?": "覆写 `去噪强度` 为 1?",
+ "Override `Denoising strength` to 1?": "覆写 `重绘幅度` 为 1?",
"Decode CFG scale": "解码提示词相关性(CFG scale)",
"Randomness": "随机度",
"Sigma adjustment for finding noise for image": "为寻找图中噪点的 Sigma 调整",
"Loops": "迭代次数",
- "Denoising strength change factor": "去噪强度的调整系数",
- "Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8": "推荐设置:采样迭代步数:80-100,采样器:Euler a,去噪强度:0.8",
+ "Denoising strength change factor": "重绘幅度的调整系数",
+ "Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8": "推荐设置:采样迭代步数:80-100,采样器:Euler a,重绘幅度:0.8",
"Pixels to expand": "拓展的像素数",
"Outpainting direction": "外补绘制的方向",
"left": "左",
@@ -412,7 +412,7 @@
"Inpainting conditioning mask strength": "内补绘制的自适应蒙版强度",
"Apply color correction to img2img results to match original colors.": "对图生图结果应用颜色校正以匹配原始颜色",
"Save a copy of image before applying color correction to img2img results": "在对图生图结果应用颜色校正之前保存图像副本",
- "With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising).": "在进行图生图的时候,确切地执行滑块指定的迭代步数(正常情况下更弱的去噪需要更少的迭代步数)",
+ "With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising).": "在进行图生图的时候,确切地执行滑块指定的迭代步数(正常情况下更弱的重绘幅度需要更少的迭代步数)",
"Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply.": "在 K 采样器中启用量化以获得更清晰、更清晰的结果。这可能会改变现有的随机种子。需要重新启动才能应用",
"Emphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention": "强调符:使用 (文字) 使模型更关注该文本,使用 [文字] 使其减少关注",
"Use old emphasis implementation. Can be useful to reproduce old seeds.": "使用旧的强调符实现。可用于复现旧随机种子",
@@ -513,7 +513,7 @@
"Resize the image so that entirety of target resolution is filled with the image. Crop parts that stick out.": "调整图像大小,使整个目标分辨率都被图像填充。裁剪多出来的部分",
"Resize the image so that entirety of image is inside target resolution. Fill empty space with image's colors.": "调整图像大小,使整个图像在目标分辨率内。用图像的颜色填充空白区域",
"How many times to repeat processing an image and using it as input for the next iteration": "重复处理图像并用作下次迭代输入的次数",
- "In loopback mode, on each loop the denoising strength is multiplied by this value. <1 means decreasing variety so your sequence will converge on a fixed picture. >1 means increasing variety so your sequence will become more and more chaotic.": "在回送模式下,在每个循环中,去噪强度都会乘以该值。<1 表示减少多样性,因此你的这一组图将集中在固定的图像上。>1 意味着增加多样性,因此你的这一组图将变得越来越混乱",
+ "In loopback mode, on each loop the denoising strength is multiplied by this value. <1 means decreasing variety so your sequence will converge on a fixed picture. >1 means increasing variety so your sequence will become more and more chaotic.": "在回送模式下,在每个循环中,重绘幅度都会乘以该值。<1 表示减少多样性,因此你的这一组图将集中在固定的图像上。>1 意味着增加多样性,因此你的这一组图将变得越来越混乱",
"For SD upscale, how much overlap in pixels should there be between tiles. Tiles overlap so that when they are merged back into one picture, there is no clearly visible seam.": "使用 SD 放大(SD upscale)时,图块(Tiles)之间应该有多少像素重叠。图块(Tiles)之间需要重叠才可以让它们在合并回一张图像时,没有清晰可见的接缝",
"A directory on the same machine where the server is running.": "与服务器主机上的目录",
"Leave blank to save images to the default path.": "留空以将图像保存到默认路径",
From f719b7d0128930b49d1f16ee44796e275033415b Mon Sep 17 00:00:00 2001
From: dtlnor
Date: Mon, 31 Oct 2022 16:17:48 +0900
Subject: [PATCH 035/147] Update zh_CN.json
update tag complete.
---
localizations/zh_CN.json | 2 ++
1 file changed, 2 insertions(+)
diff --git a/localizations/zh_CN.json b/localizations/zh_CN.json
index f9371c3e..3ff2c581 100644
--- a/localizations/zh_CN.json
+++ b/localizations/zh_CN.json
@@ -507,7 +507,9 @@
"Only applies to inpainting models. Determines how strongly to mask off the original image for inpainting and img2img. 1.0 means fully masked, which is the default behaviour. 0.0 means a fully unmasked conditioning. Lower values will help preserve the overall composition of the image, but will struggle with large changes.": "仅适用于内补绘制专用的模型。 决定了蒙版在内补绘制以及图生图中屏蔽原图内容的强度。 1.0 表示完全屏蔽,这是默认行为。 0.0 表示完全不屏蔽。 较低的值将有助于保持图像的整体构图,但很难遇到较大的变化。",
"List of setting names, separated by commas, for settings that should go to the quick access bar at the top, rather than the usual setting tab. See modules/shared.py for setting names. Requires restarting to apply.": "设置名称列表,以逗号分隔,设置应转到顶部的快速访问栏,而不是通常的设置选项卡。有关设置名称,请参见 modules/shared.py。需要重新启动才能应用",
"If this values is non-zero, it will be added to seed and used to initialize RNG for noises when using samplers with Eta. You can use this to produce even more variation of images, or you can use this to match images of other software if you know what you are doing.": "如果这个值不为零,它将被添加到随机种子中,并在使用带有 Eta 的采样器时用于初始化随机噪声。你可以使用它来产生更多的图像变化,或者你可以使用它来模仿其他软件生成的图像,如果你知道你在做什么",
+ "Autocomplete options": "自动补全选项",
"Enable Autocomplete": "开启Tag补全",
+ "Append commas": "附加逗号",
"Allowed categories for random artists selection when using the Roll button": "使用抽选艺术家按钮时将会随机的艺术家类别",
"Roll three": "抽三位出来",
"Generate forever": "无限生成",
From b96d0c4e9ecec3c856b9b4ec795dbd0d34fcac51 Mon Sep 17 00:00:00 2001
From: Muhammad Rizqi Nur
Date: Mon, 31 Oct 2022 14:42:28 +0700
Subject: [PATCH 036/147] Fix typo from previous commit
---
modules/sd_vae.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/modules/sd_vae.py b/modules/sd_vae.py
index 0767b925..2ce44d5f 100644
--- a/modules/sd_vae.py
+++ b/modules/sd_vae.py
@@ -27,8 +27,8 @@ def refresh_vae_list(vae_path=vae_path, model_path=model_path):
candidates = [
*glob.iglob(os.path.join(model_path, '**/*.vae.ckpt'), recursive=True),
*glob.iglob(os.path.join(model_path, '**/*.vae.pt'), recursive=True),
- *glob.iglob(os.path.join(vae_path, '**/*.ckpt'), recursive=True)
- *glob.iglob(os.path.join(vae_path, '**/*.pt'), recursive=True),
+ *glob.iglob(os.path.join(vae_path, '**/*.ckpt'), recursive=True),
+ *glob.iglob(os.path.join(vae_path, '**/*.pt'), recursive=True)
]
if shared.cmd_opts.vae_path is not None and os.path.isfile(shared.cmd_opts.vae_path):
candidates.append(shared.cmd_opts.vae_path)
From 726769da35970f4c100fa7edf11850f9dc059c41 Mon Sep 17 00:00:00 2001
From: Muhammad Rizqi Nur
Date: Mon, 31 Oct 2022 15:19:34 +0700
Subject: [PATCH 037/147] Checkpoint cache by combination key of checkpoint and
vae
---
modules/sd_models.py | 27 ++++++++++++++++-----------
modules/sd_vae.py | 8 +++++++-
2 files changed, 23 insertions(+), 12 deletions(-)
diff --git a/modules/sd_models.py b/modules/sd_models.py
index 91ad4b5e..850f7b7b 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -160,11 +160,15 @@ def get_state_dict_from_checkpoint(pl_sd):
vae_ignore_keys = {"model_ema.decay", "model_ema.num_updates"}
-def load_model_weights(model, checkpoint_info, force=False):
+def load_model_weights(model, checkpoint_info, vae_file="auto"):
checkpoint_file = checkpoint_info.filename
sd_model_hash = checkpoint_info.hash
- if force or checkpoint_info not in checkpoints_loaded:
+ vae_file = sd_vae.resolve_vae(checkpoint_file, vae_file=vae_file)
+
+ checkpoint_key = (checkpoint_info, vae_file)
+
+ if checkpoint_key not in checkpoints_loaded:
print(f"Loading weights [{sd_model_hash}] from {checkpoint_file}")
pl_sd = torch.load(checkpoint_file, map_location=shared.weight_load_location)
@@ -185,24 +189,25 @@ def load_model_weights(model, checkpoint_info, force=False):
devices.dtype = torch.float32 if shared.cmd_opts.no_half else torch.float16
devices.dtype_vae = torch.float32 if shared.cmd_opts.no_half or shared.cmd_opts.no_half_vae else torch.float16
- sd_vae.load_vae(model, checkpoint_file)
+ sd_vae.load_vae(model, vae_file)
model.first_stage_model.to(devices.dtype_vae)
if shared.opts.sd_checkpoint_cache > 0:
- checkpoints_loaded[checkpoint_info] = model.state_dict().copy()
+ checkpoints_loaded[checkpoint_key] = model.state_dict().copy()
while len(checkpoints_loaded) > shared.opts.sd_checkpoint_cache:
checkpoints_loaded.popitem(last=False) # LRU
else:
- print(f"Loading weights [{sd_model_hash}] from cache")
- checkpoints_loaded.move_to_end(checkpoint_info)
- model.load_state_dict(checkpoints_loaded[checkpoint_info])
+ vae_name = sd_vae.get_filename(vae_file)
+ print(f"Loading weights [{sd_model_hash}] with {vae_name} VAE from cache")
+ checkpoints_loaded.move_to_end(checkpoint_key)
+ model.load_state_dict(checkpoints_loaded[checkpoint_key])
model.sd_model_hash = sd_model_hash
model.sd_model_checkpoint = checkpoint_file
model.sd_checkpoint_info = checkpoint_info
-def load_model(checkpoint_info=None, force=False):
+def load_model(checkpoint_info=None):
from modules import lowvram, sd_hijack
checkpoint_info = checkpoint_info or select_checkpoint()
@@ -223,7 +228,7 @@ def load_model(checkpoint_info=None, force=False):
do_inpainting_hijack()
sd_model = instantiate_from_config(sd_config.model)
- load_model_weights(sd_model, checkpoint_info, force=force)
+ load_model_weights(sd_model, checkpoint_info)
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
lowvram.setup_for_low_vram(sd_model, shared.cmd_opts.medvram)
@@ -250,7 +255,7 @@ def reload_model_weights(sd_model, info=None, force=False):
if sd_model.sd_checkpoint_info.config != checkpoint_info.config or should_hijack_inpainting(checkpoint_info) != should_hijack_inpainting(sd_model.sd_checkpoint_info):
checkpoints_loaded.clear()
- load_model(checkpoint_info, force=force)
+ load_model(checkpoint_info)
return shared.sd_model
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
@@ -260,7 +265,7 @@ def reload_model_weights(sd_model, info=None, force=False):
sd_hijack.model_hijack.undo_hijack(sd_model)
- load_model_weights(sd_model, checkpoint_info, force=force)
+ load_model_weights(sd_model, checkpoint_info)
sd_hijack.model_hijack.hijack(sd_model)
script_callbacks.model_loaded_callback(sd_model)
diff --git a/modules/sd_vae.py b/modules/sd_vae.py
index 2ce44d5f..e9239326 100644
--- a/modules/sd_vae.py
+++ b/modules/sd_vae.py
@@ -43,7 +43,7 @@ def refresh_vae_list(vae_path=vae_path, model_path=model_path):
vae_dict.update(res)
return vae_list
-def load_vae(model, checkpoint_file, vae_file="auto"):
+def resolve_vae(checkpoint_file, vae_file="auto"):
global first_load, vae_dict, vae_list
# save_settings = False
@@ -94,6 +94,12 @@ def load_vae(model, checkpoint_file, vae_file="auto"):
if vae_file and not os.path.exists(vae_file):
vae_file = None
+ return vae_file
+
+def load_vae(model, vae_file):
+ global first_load, vae_dict, vae_list
+ # save_settings = False
+
if vae_file:
print(f"Loading VAE weights from: {vae_file}")
vae_ckpt = torch.load(vae_file, map_location=shared.weight_load_location)
From 36966e3200943dbf890b5338cfa939df552d3c47 Mon Sep 17 00:00:00 2001
From: Muhammad Rizqi Nur
Date: Mon, 31 Oct 2022 15:38:58 +0700
Subject: [PATCH 038/147] Fix #4035
---
modules/sd_models.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/sd_models.py b/modules/sd_models.py
index f86dc3ed..a29c8c1a 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -201,7 +201,7 @@ def load_model_weights(model, checkpoint_info):
if shared.opts.sd_checkpoint_cache > 0:
checkpoints_loaded[checkpoint_info] = model.state_dict().copy()
- while len(checkpoints_loaded) > shared.opts.sd_checkpoint_cache:
+ while len(checkpoints_loaded) > shared.opts.sd_checkpoint_cache + 1:
checkpoints_loaded.popitem(last=False) # LRU
else:
print(f"Loading weights [{sd_model_hash}] from cache")
From bf7a699845675eefdabb9cfa40c55398976274ae Mon Sep 17 00:00:00 2001
From: Muhammad Rizqi Nur
Date: Mon, 31 Oct 2022 16:27:27 +0700
Subject: [PATCH 039/147] Fix #4035 for real now
---
modules/sd_models.py | 13 +++++++------
1 file changed, 7 insertions(+), 6 deletions(-)
diff --git a/modules/sd_models.py b/modules/sd_models.py
index a29c8c1a..b2dd005a 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -165,6 +165,9 @@ def load_model_weights(model, checkpoint_info):
checkpoint_file = checkpoint_info.filename
sd_model_hash = checkpoint_info.hash
+ if shared.opts.sd_checkpoint_cache > 0 and hasattr(model, "sd_checkpoint_info"):
+ checkpoints_loaded[model.sd_checkpoint_info] = model.state_dict().copy()
+
if checkpoint_info not in checkpoints_loaded:
print(f"Loading weights [{sd_model_hash}] from {checkpoint_file}")
@@ -198,16 +201,14 @@ def load_model_weights(model, checkpoint_info):
model.first_stage_model.load_state_dict(vae_dict)
model.first_stage_model.to(devices.dtype_vae)
-
- if shared.opts.sd_checkpoint_cache > 0:
- checkpoints_loaded[checkpoint_info] = model.state_dict().copy()
- while len(checkpoints_loaded) > shared.opts.sd_checkpoint_cache + 1:
- checkpoints_loaded.popitem(last=False) # LRU
else:
print(f"Loading weights [{sd_model_hash}] from cache")
- checkpoints_loaded.move_to_end(checkpoint_info)
model.load_state_dict(checkpoints_loaded[checkpoint_info])
+ if shared.opts.sd_checkpoint_cache > 0:
+ while len(checkpoints_loaded) > shared.opts.sd_checkpoint_cache:
+ checkpoints_loaded.popitem(last=False) # LRU
+
model.sd_model_hash = sd_model_hash
model.sd_model_checkpoint = checkpoint_file
model.sd_checkpoint_info = checkpoint_info
From 49f0dd4300381a6878ea81c9d7841174080db48f Mon Sep 17 00:00:00 2001
From: batvbs
Date: Mon, 31 Oct 2022 19:26:38 +0800
Subject: [PATCH 040/147] =?UTF-8?q?=E4=BF=AE=E6=94=B9=E4=BA=89=E8=AE=AE?=
=?UTF-8?q?=E9=83=A8=E5=88=86?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
localizations/zh_CN.json | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/localizations/zh_CN.json b/localizations/zh_CN.json
index eece36dd..6b7a34e4 100644
--- a/localizations/zh_CN.json
+++ b/localizations/zh_CN.json
@@ -217,7 +217,7 @@
"Add difference": "添加差分",
"Save as float16": "以 float16 储存",
"See": "查看",
- "wiki": "帮助",
+ "wiki": "wiki文档",
"for detailed explanation.": "以了解详细说明",
"Create embedding": "生成 embedding",
"Create hypernetwork": "生成 hypernetwork",
@@ -303,7 +303,7 @@
"Train Embedding": "训练 Embedding",
"Create an aesthetic embedding out of any number of images": "从任意数量的图像中创建美术风格 embedding",
"Create images embedding": "生成图集 embedding",
- "Favorites": "已保存",
+ "Favorites": "收藏夹(已保存)",
"Others": "其他",
"Images directory": "图像目录",
"Dropdown": "下拉列表",
@@ -320,7 +320,7 @@
"keyword": "搜索",
"Generate Info": "生成信息",
"File Name": "文件名",
- "Move to favorites": "移动到已保存",
+ "Move to favorites": "移动到收藏夹(保存)",
"Renew Page": "刷新页面",
"Number": "数量",
"set_index": "设置索引",
@@ -479,10 +479,10 @@
"Open images output directory": "打开图像输出目录",
"How much to blur the mask before processing, in pixels.": "处理前要对蒙版进行多强的模糊,以像素为单位",
"What to put inside the masked area before processing it with Stable Diffusion.": "在使用 Stable Diffusion 处理蒙版区域之前要在蒙版区域内放置什么",
- "fill it with colors of the image": "用图像的颜色(≈模糊/马赛克)填充它",
+ "fill it with colors of the image": "用图像的颜色(高斯模糊)填充它",
"keep whatever was there originally": "保留原来的图像,不进行预处理",
- "fill it with latent space noise": "用潜空间的噪声(≈随机彩色噪点)填充它",
- "fill it with latent space zeroes": "用潜空间的零(≈灰色)填充它",
+ "fill it with latent space noise": "用潜空间的噪声(图像维度表现为 随机彩色噪点)填充它",
+ "fill it with latent space zeroes": "用潜空间的零(图像维度表现为 无色纯灰)填充它",
"Upscale masked region to target resolution, do inpainting, downscale back and paste into original image": "将蒙版区域放大到目标分辨率,做内补绘制,缩小后粘贴到原始图像中",
"Resize image to target resolution. Unless height and width match, you will get incorrect aspect ratio.": "将图像大小调整为目标分辨率。除非高度和宽度匹配,否则你将获得不正确的纵横比",
"Resize the image so that entirety of target resolution is filled with the image. Crop parts that stick out.": "调整图像大小,使整个目标分辨率都被图像填充。裁剪多出来的部分",
From f65bfd74eacd0b1dac9aaea6cc1cbbbd726a3760 Mon Sep 17 00:00:00 2001
From: batvbs
Date: Mon, 31 Oct 2022 19:28:31 +0800
Subject: [PATCH 041/147] outpainting
---
localizations/zh_CN.json | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/localizations/zh_CN.json b/localizations/zh_CN.json
index 6b7a34e4..f82e6eda 100644
--- a/localizations/zh_CN.json
+++ b/localizations/zh_CN.json
@@ -153,8 +153,8 @@
"Resize and fill": "填充",
"img2img alternative test": "图生图的另一种测试",
"Loopback": "回送",
- "Outpainting mk2": "外补绘制第二版",
- "Poor man's outpainting": "效果稍差的外补绘制",
+ "Outpainting mk2": "向外绘制第二版",
+ "Poor man's outpainting": "效果稍差的向外绘制",
"SD upscale": "使用 SD 放大(SD upscale)",
"should be 2 or lower.": "必须小于等于2",
"Override `Sampling method` to Euler?(this method is built for it)": "覆写 `采样方法` 为 Euler?(这个方法就是为这样做设计的)",
@@ -171,7 +171,7 @@
"Denoising strength change factor": "重绘幅度的调整系数",
"Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8": "推荐设置:采样迭代步数:80-100,采样器:Euler a,重绘幅度:0.8",
"Pixels to expand": "拓展的像素数",
- "Outpainting direction": "外补绘制的方向",
+ "Outpainting direction": "向外绘制的方向",
"left": "左",
"right": "右",
"up": "上",
From faa3639bf1f17eedf458cfe3171d03f8bd8ddc9d Mon Sep 17 00:00:00 2001
From: batvbs
Date: Mon, 31 Oct 2022 20:15:36 +0800
Subject: [PATCH 042/147] =?UTF-8?q?=E5=B0=86=E6=97=A0=E6=B3=95=E6=9C=AC?=
=?UTF-8?q?=E5=9C=B0=E5=8C=96=E7=9A=84=E5=86=85=E5=AE=B9=E7=A7=BB=E5=88=B0?=
=?UTF-8?q?=E5=BA=95=E9=83=A8?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
localizations/zh_CN.json | 14 +++++++++-----
1 file changed, 9 insertions(+), 5 deletions(-)
diff --git a/localizations/zh_CN.json b/localizations/zh_CN.json
index f82e6eda..0306bbdc 100644
--- a/localizations/zh_CN.json
+++ b/localizations/zh_CN.json
@@ -100,7 +100,6 @@
"Prompt order": "提示词顺序",
"Sampler": "采样器",
"Checkpoint name": "模型(ckpt)名",
- "Hypernetwork": "Hypernetwork",
"Hypernet str.": "Hypernetwork 强度",
"Sigma Churn": "Sigma Churn",
"Sigma min": "最小 Sigma",
@@ -198,7 +197,7 @@
"Scale to": "指定尺寸缩放",
"Resize": "缩放",
"Crop to fit": "裁剪以适应",
- "Upscaler 2 visibility": "放大算法 2 可见度",
+ "Upscaler 2 visibility": "放大算法 2 (Upscaler 2) 可见度",
"GFPGAN visibility": "GFPGAN 可见度",
"CodeFormer visibility": "CodeFormer 可见度",
"CodeFormer weight (0 = maximum effect, 1 = minimum effect)": "CodeFormer 权重 (0 = 最大效果, 1 = 最小效果)",
@@ -510,7 +509,6 @@
"Autocomplete options": "自动补全选项",
"Enable Autocomplete": "开启Tag补全",
"Append commas": "附加逗号",
- "Allowed categories for random artists selection when using the Roll button": "使用抽选艺术家按钮时将会随机的艺术家类别",
"Roll three": "抽三位出来",
"Generate forever": "无限生成",
"Cancel generate forever": "停止无限生成",
@@ -519,6 +517,7 @@
"File with inputs": "含输入内容的文件",
"Prompts": "提示词",
"Disabled when launched with --hide-ui-dir-config.": "启动 --hide-ui-dir-config 时禁用",
+ "Upscaler": "放大算法",
"Upscaler 1": "放大算法 1",
"Upscaler 2": "放大算法 2",
"Open output directory": "打开输出目录",
@@ -540,5 +539,10 @@
"Unload VAE and CLIP from VRAM when training": "训练时从显存(VRAM)中取消 VAE 和 CLIP 的加载",
"Number of pictures displayed on each page": "每页显示的图像数量",
"Number of grids in each row": "每行显示多少格",
- "how fast should the training go. Low values will take longer to train, high values may fail to converge (not generate accurate results) and/or may break the embedding (This has happened if you see Loss: nan in the training info textbox. If this happens, you need to manually restore your embedding from an older not-broken backup).\n\nYou can set a single numeric value, or multiple learning rates using the syntax:\n\n rate_1:max_steps_1, rate_2:max_steps_2, ...\n\nEG: 0.005:100, 1e-3:1000, 1e-5\n\nWill train with rate of 0.005 for first 100 steps, then 1e-3 until 1000 steps, then 1e-5 for all remaining steps.": "训练应该多快。低值将需要更长的时间来训练,高值可能无法收敛(无法产生准确的结果)以及/也许可能会破坏 embedding(如果你在训练信息文本框中看到 Loss: nan 就会发生这种情况。如果发生这种情况,你需要从较旧的未损坏的备份手动恢复 embedding)\n\n你可以使用以下语法设置单个数值或多个学习率:\n\n 率1:步限1, 率2:步限2, ...\n\n如: 0.005:100, 1e-3:1000, 1e-5\n\n即前 100 步将以 0.005 的速率训练,接着直到 1000 步为止以 1e-3 训练,然后剩余所有步以 1e-5 训练"
-}
\ No newline at end of file
+ "how fast should the training go. Low values will take longer to train, high values may fail to converge (not generate accurate results) and/or may break the embedding (This has happened if you see Loss: nan in the training info textbox. If this happens, you need to manually restore your embedding from an older not-broken backup).\n\nYou can set a single numeric value, or multiple learning rates using the syntax:\n\n rate_1:max_steps_1, rate_2:max_steps_2, ...\n\nEG: 0.005:100, 1e-3:1000, 1e-5\n\nWill train with rate of 0.005 for first 100 steps, then 1e-3 until 1000 steps, then 1e-5 for all remaining steps.": "训练应该多快。低值将需要更长的时间来训练,高值可能无法收敛(无法产生准确的结果)以及/也许可能会破坏 embedding(如果你在训练信息文本框中看到 Loss: nan 就会发生这种情况。如果发生这种情况,你需要从较旧的未损坏的备份手动恢复 embedding)\n\n你可以使用以下语法设置单个数值或多个学习率:\n\n 率1:步限1, 率2:步限2, ...\n\n如: 0.005:100, 1e-3:1000, 1e-5\n\n即前 100 步将以 0.005 的速率训练,接着直到 1000 步为止以 1e-3 训练,然后剩余所有步以 1e-5 训练",
+ "Separate prompts into parts using vertical pipe character (|) and the script will create a picture for every combination of them (except for the first part, which will be present in all combinations)": "用垂直的管道字符 | 将提示语分成若干部分,脚本将为它们的每一个组合创建一幅图片(除了第一部分,所有的组合都会出现)",
+ "Select which Real-ESRGAN models to show in the web UI. (Requires restart)": "选择哪些Real-ESRGAN模型显示在用户界面。(需要重新启动)",
+ "Face restoration model": "面部修复模型",
+ "Allowed categories for random artists selection when using the Roll button": "使用抽选艺术家按钮时将会随机的艺术家类别",
+ "Hypernetwork": "超网络(Hypernetwork)"
+}
From 48787dc3d17f2bb498b1ebd6bd179176157c330b Mon Sep 17 00:00:00 2001
From: batvbs
Date: Mon, 31 Oct 2022 20:26:52 +0800
Subject: [PATCH 043/147] inpaint
---
localizations/zh_CN.json | 22 +++++++++++-----------
1 file changed, 11 insertions(+), 11 deletions(-)
diff --git a/localizations/zh_CN.json b/localizations/zh_CN.json
index 0306bbdc..222858d1 100644
--- a/localizations/zh_CN.json
+++ b/localizations/zh_CN.json
@@ -117,32 +117,32 @@
"Keep -1 for seeds": "保持随机种子为-1",
"Save": "保存",
"Send to img2img": ">> 图生图",
- "Send to inpaint": ">> 内补绘制",
+ "Send to inpaint": ">> 局部重绘",
"Send to extras": ">> 更多",
"Make Zip when Save?": "保存时生成zip压缩文件?",
"Textbox": "文本框",
"Interrogate\nCLIP": "CLIP\n反推提示词",
"Interrogate\nDeepBooru": "DeepBooru\n反推提示词",
- "Inpaint": "内补绘制",
+ "Inpaint": "局部重绘",
"Batch img2img": "批量图生图",
"Image for img2img": "图生图的图像",
"Drop Image Here": "拖拽图像到此",
- "Image for inpainting with mask": "用于内补绘制蒙版内容的图像",
+ "Image for inpainting with mask": "用于提取蒙版的图像",
"Mask": "蒙版",
"Mask blur": "蒙版模糊",
"Mask mode": "蒙版模式",
"Draw mask": "绘制蒙版",
"Upload mask": "上传蒙版",
"Masking mode": "蒙版模式",
- "Inpaint masked": "内补绘制蒙版内容",
- "Inpaint not masked": "内补绘制非蒙版内容",
+ "Inpaint masked": "重绘蒙版内容",
+ "Inpaint not masked": "重绘非蒙版内容",
"Masked content": "蒙版蒙住的内容",
"fill": "填充",
"original": "原图",
"latent noise": "潜空间噪声",
"latent nothing": "潜空间数值零",
- "Inpaint at full resolution": "以完整分辨率进行内补绘制",
- "Inpaint at full resolution padding, pixels": "以完整分辨率进行内补绘制 - 填补像素",
+ "Inpaint at full resolution": "以完整分辨率进行局部重绘",
+ "Inpaint at full resolution padding, pixels": "以完整分辨率进行局部重绘 - 填补像素",
"Process images in a directory on the same machine where the server is running.": "在服务器主机上的目录中处理图像",
"Use an empty output directory to save pictures normally instead of writing to the output directory.": "指定一个空的文件夹为输出目录而非默认的 output 文件夹为输出目录",
"Input directory": "输入目录",
@@ -385,7 +385,7 @@
"Stable Diffusion": "Stable Diffusion",
"Checkpoints to cache in RAM": "缓存在内存(RAM)中的模型(ckpt)",
"Hypernetwork strength": "Hypernetwork 强度",
- "Inpainting conditioning mask strength": "内补绘制的自适应蒙版强度",
+ "Inpainting conditioning mask strength": "局部重绘的自适应蒙版强度",
"Apply color correction to img2img results to match original colors.": "对图生图结果应用颜色校正以匹配原始颜色",
"Save a copy of image before applying color correction to img2img results": "在对图生图结果应用颜色校正之前保存图像副本",
"With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising).": "在进行图生图的时候,确切地执行滑块指定的迭代步数(正常情况下更弱的重绘幅度需要更少的迭代步数)",
@@ -458,7 +458,7 @@
"Do not do anything special": "什么都不做",
"Which algorithm to use to produce the image": "使用哪种算法生成图像",
"Euler Ancestral - very creative, each can get a completely different picture depending on step count, setting steps to higher than 30-40 does not help": "Euler Ancestral - 非常有创意,可以根据迭代步数获得完全不同的图像,将迭代步数设置为高于 30-40 不会有正面作用",
- "Denoising Diffusion Implicit Models - best at inpainting": "Denoising Diffusion Implicit models - 最擅长内补绘制",
+ "Denoising Diffusion Implicit Models - best at inpainting": "Denoising Diffusion Implicit models - 最擅长局部重绘",
"Produce an image that can be tiled.": "生成可用于平铺(tiled)的图像",
"Use a two step process to partially create an image at smaller resolution, upscale, and then improve details in it without changing composition": "使用两步处理的时候,以较小的分辨率生成初步图像、接着放大图像,然后在不更改构图的情况下改进其中的细节",
"Determines how little respect the algorithm should have for image's content. At 0, nothing will change, and at 1 you'll get an unrelated image. With values below 1.0, processing will take less steps than the Sampling Steps slider specifies.": "决定算法对图像内容的影响程度。设置 0 时,什么都不会改变,而在 1 时,你将获得不相关的图像。\n值低于 1.0 时,处理的迭代步数将少于“采样迭代步数”滑块指定的步数",
@@ -482,7 +482,7 @@
"keep whatever was there originally": "保留原来的图像,不进行预处理",
"fill it with latent space noise": "用潜空间的噪声(图像维度表现为 随机彩色噪点)填充它",
"fill it with latent space zeroes": "用潜空间的零(图像维度表现为 无色纯灰)填充它",
- "Upscale masked region to target resolution, do inpainting, downscale back and paste into original image": "将蒙版区域放大到目标分辨率,做内补绘制,缩小后粘贴到原始图像中",
+ "Upscale masked region to target resolution, do inpainting, downscale back and paste into original image": "将蒙版区域放大到目标分辨率,做局部重绘,缩小后粘贴到原始图像中",
"Resize image to target resolution. Unless height and width match, you will get incorrect aspect ratio.": "将图像大小调整为目标分辨率。除非高度和宽度匹配,否则你将获得不正确的纵横比",
"Resize the image so that entirety of target resolution is filled with the image. Crop parts that stick out.": "调整图像大小,使整个目标分辨率都被图像填充。裁剪多出来的部分",
"Resize the image so that entirety of image is inside target resolution. Fill empty space with image's colors.": "调整图像大小,使整个图像在目标分辨率内。用图像的颜色填充空白区域",
@@ -503,7 +503,7 @@
"Restore low quality faces using GFPGAN neural network": "使用 GFPGAN 神经网络修复低质量面部",
"This regular expression will be used extract words from filename, and they will be joined using the option below into label text used for training. Leave empty to keep filename text as it is.": "此正则表达式将用于从文件名中提取单词,并将使用以下选项将它们接合到用于训练的标签文本中。留空以保持文件名文本不变",
"This string will be used to join split words into a single line if the option above is enabled.": "如果启用了上述选项,则此处的字符会用于将拆分的单词接合为同一行",
- "Only applies to inpainting models. Determines how strongly to mask off the original image for inpainting and img2img. 1.0 means fully masked, which is the default behaviour. 0.0 means a fully unmasked conditioning. Lower values will help preserve the overall composition of the image, but will struggle with large changes.": "仅适用于内补绘制专用的模型。 决定了蒙版在内补绘制以及图生图中屏蔽原图内容的强度。 1.0 表示完全屏蔽,这是默认行为。 0.0 表示完全不屏蔽。 较低的值将有助于保持图像的整体构图,但很难遇到较大的变化。",
+ "Only applies to inpainting models. Determines how strongly to mask off the original image for inpainting and img2img. 1.0 means fully masked, which is the default behaviour. 0.0 means a fully unmasked conditioning. Lower values will help preserve the overall composition of the image, but will struggle with large changes.": "仅适用于局部重绘专用的模型。 决定了蒙版在局部重绘以及图生图中屏蔽原图内容的强度。 1.0 表示完全屏蔽,这是默认行为。 0.0 表示完全不屏蔽。 较低的值将有助于保持图像的整体构图,但很难遇到较大的变化。",
"List of setting names, separated by commas, for settings that should go to the quick access bar at the top, rather than the usual setting tab. See modules/shared.py for setting names. Requires restarting to apply.": "设置名称列表,以逗号分隔,设置应转到顶部的快速访问栏,而不是通常的设置选项卡。有关设置名称,请参见 modules/shared.py。需要重新启动才能应用",
"If this values is non-zero, it will be added to seed and used to initialize RNG for noises when using samplers with Eta. You can use this to produce even more variation of images, or you can use this to match images of other software if you know what you are doing.": "如果这个值不为零,它将被添加到随机种子中,并在使用带有 Eta 的采样器时用于初始化随机噪声。你可以使用它来产生更多的图像变化,或者你可以使用它来模仿其他软件生成的图像,如果你知道你在做什么",
"Autocomplete options": "自动补全选项",
From 6cffcf6b6dfe26f6dc3b886a29c7dd86ec79aa7a Mon Sep 17 00:00:00 2001
From: batvbs
Date: Mon, 31 Oct 2022 20:41:18 +0800
Subject: [PATCH 044/147] =?UTF-8?q?=E6=9B=B4=E6=96=B0=20zh=5FCN.json?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
localizations/zh_CN.json | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/localizations/zh_CN.json b/localizations/zh_CN.json
index 222858d1..4ffee177 100644
--- a/localizations/zh_CN.json
+++ b/localizations/zh_CN.json
@@ -304,6 +304,10 @@
"Create images embedding": "生成图集 embedding",
"Favorites": "收藏夹(已保存)",
"Others": "其他",
+ "Move to favorites": "移动到收藏夹(保存)",
+ "favorites": "收藏夹(已保存)",
+ "others": "其他",
+ "Collect": "收藏(保存)",
"Images directory": "图像目录",
"Dropdown": "下拉列表",
"First Page": "首页",
@@ -319,7 +323,6 @@
"keyword": "搜索",
"Generate Info": "生成信息",
"File Name": "文件名",
- "Move to favorites": "移动到收藏夹(保存)",
"Renew Page": "刷新页面",
"Number": "数量",
"set_index": "设置索引",
@@ -539,6 +542,7 @@
"Unload VAE and CLIP from VRAM when training": "训练时从显存(VRAM)中取消 VAE 和 CLIP 的加载",
"Number of pictures displayed on each page": "每页显示的图像数量",
"Number of grids in each row": "每行显示多少格",
+ "Start drawing": "开始绘制",
"how fast should the training go. Low values will take longer to train, high values may fail to converge (not generate accurate results) and/or may break the embedding (This has happened if you see Loss: nan in the training info textbox. If this happens, you need to manually restore your embedding from an older not-broken backup).\n\nYou can set a single numeric value, or multiple learning rates using the syntax:\n\n rate_1:max_steps_1, rate_2:max_steps_2, ...\n\nEG: 0.005:100, 1e-3:1000, 1e-5\n\nWill train with rate of 0.005 for first 100 steps, then 1e-3 until 1000 steps, then 1e-5 for all remaining steps.": "训练应该多快。低值将需要更长的时间来训练,高值可能无法收敛(无法产生准确的结果)以及/也许可能会破坏 embedding(如果你在训练信息文本框中看到 Loss: nan 就会发生这种情况。如果发生这种情况,你需要从较旧的未损坏的备份手动恢复 embedding)\n\n你可以使用以下语法设置单个数值或多个学习率:\n\n 率1:步限1, 率2:步限2, ...\n\n如: 0.005:100, 1e-3:1000, 1e-5\n\n即前 100 步将以 0.005 的速率训练,接着直到 1000 步为止以 1e-3 训练,然后剩余所有步以 1e-5 训练",
"Separate prompts into parts using vertical pipe character (|) and the script will create a picture for every combination of them (except for the first part, which will be present in all combinations)": "用垂直的管道字符 | 将提示语分成若干部分,脚本将为它们的每一个组合创建一幅图片(除了第一部分,所有的组合都会出现)",
"Select which Real-ESRGAN models to show in the web UI. (Requires restart)": "选择哪些Real-ESRGAN模型显示在用户界面。(需要重新启动)",
From 3ac05d38eb9e67d4c1923f05a2c22139782289fe Mon Sep 17 00:00:00 2001
From: batvbs
Date: Mon, 31 Oct 2022 21:10:27 +0800
Subject: [PATCH 045/147] =?UTF-8?q?bug=E4=BF=AE=E5=A4=8D?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
文本太长,遮挡提示框
---
localizations/zh_CN.json | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/localizations/zh_CN.json b/localizations/zh_CN.json
index 4ffee177..beca5bba 100644
--- a/localizations/zh_CN.json
+++ b/localizations/zh_CN.json
@@ -142,7 +142,7 @@
"latent noise": "潜空间噪声",
"latent nothing": "潜空间数值零",
"Inpaint at full resolution": "以完整分辨率进行局部重绘",
- "Inpaint at full resolution padding, pixels": "以完整分辨率进行局部重绘 - 填补像素",
+ "Inpaint at full resolution padding, pixels": "填补像素",
"Process images in a directory on the same machine where the server is running.": "在服务器主机上的目录中处理图像",
"Use an empty output directory to save pictures normally instead of writing to the output directory.": "指定一个空的文件夹为输出目录而非默认的 output 文件夹为输出目录",
"Input directory": "输入目录",
From 7c8c3715f552378cf81ad28f26fad92b37bd153d Mon Sep 17 00:00:00 2001
From: Muhammad Rizqi Nur
Date: Mon, 31 Oct 2022 20:15:33 +0700
Subject: [PATCH 046/147] Fix VAE refresh button stretching out
From https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/3986#issuecomment-1296990601
---
style.css | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/style.css b/style.css
index 8b2211b1..c1b190e9 100644
--- a/style.css
+++ b/style.css
@@ -491,7 +491,7 @@ input[type="range"]{
padding: 0;
}
-#refresh_sd_model_checkpoint, #refresh_sd_hypernetwork, #refresh_train_hypernetwork_name, #refresh_train_embedding_name, #refresh_localization{
+#refresh_sd_model_checkpoint, #refresh_sd_vae, #refresh_sd_hypernetwork, #refresh_train_hypernetwork_name, #refresh_train_embedding_name, #refresh_localization{
max-width: 2.5em;
min-width: 2.5em;
height: 2.4em;
From 965ed08e31a2e401abd34662b758c13863320fa0 Mon Sep 17 00:00:00 2001
From: batvbs
Date: Mon, 31 Oct 2022 21:25:48 +0800
Subject: [PATCH 047/147] =?UTF-8?q?=E6=9B=B4=E6=96=B0=20zh=5FCN.json?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
localizations/zh_CN.json | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/localizations/zh_CN.json b/localizations/zh_CN.json
index beca5bba..b291f8a8 100644
--- a/localizations/zh_CN.json
+++ b/localizations/zh_CN.json
@@ -143,8 +143,8 @@
"latent nothing": "潜空间数值零",
"Inpaint at full resolution": "以完整分辨率进行局部重绘",
"Inpaint at full resolution padding, pixels": "填补像素",
- "Process images in a directory on the same machine where the server is running.": "在服务器主机上的目录中处理图像",
- "Use an empty output directory to save pictures normally instead of writing to the output directory.": "指定一个空的文件夹为输出目录而非默认的 output 文件夹为输出目录",
+ "Process images in a directory on the same machine where the server is running.": "使用服务器主机上的一个目录,作为输入目录处理图像。",
+ "Use an empty output directory to save pictures normally instead of writing to the output directory.": "使用一个空的文件夹作为输出目录,而不是使用默认的 output 文件夹作为输出目录。",
"Input directory": "输入目录",
"Resize mode": "缩放模式",
"Just resize": "拉伸",
From 56660f0946600da881a742afa8569fe4559cb5c6 Mon Sep 17 00:00:00 2001
From: batvbs <60730393+batvbs@users.noreply.github.com>
Date: Tue, 1 Nov 2022 11:17:30 +0800
Subject: [PATCH 048/147] Update zh_CN.json
---
localizations/zh_CN.json | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/localizations/zh_CN.json b/localizations/zh_CN.json
index b291f8a8..2d70dab1 100644
--- a/localizations/zh_CN.json
+++ b/localizations/zh_CN.json
@@ -500,9 +500,9 @@
"Path to directory with input images": "带有输入图像的路径",
"Path to directory where to write outputs": "进行输出的路径",
"Input images directory": "输入图像目录",
- "Use following tags to define how filenames for images are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime], [datetime], [job_timestamp]; leave empty for default.": "使用以下标签定义如何选择图像的文件名: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; 默认请留空",
+ "Use following tags to define how filenames for images are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime], [datetime], [job_timestamp]; leave empty for default.": "使用以下标签定义如何选择图像的文件名: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime], [datetime], [job_timestamp]; 默认请留空",
"If this option is enabled, watermark will not be added to created images. Warning: if you do not add watermark, you may be behaving in an unethical manner.": "如果启用此选项,水印将不会添加到生成出来的图像中。警告:如果你不添加水印,你的行为可能是不符合专业操守的",
- "Use following tags to define how subdirectories for images and grids are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime], [datetime], [job_timestamp]; leave empty for default.": "使用以下标签定义如何选择图像和宫格图的子目录: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; 默认请留空",
+ "Use following tags to define how subdirectories for images and grids are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime], [datetime], [job_timestamp]; leave empty for default.": "使用以下标签定义如何选择图像和宫格图的子目录: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime], [datetime], [job_timestamp]; 默认请留空",
"Restore low quality faces using GFPGAN neural network": "使用 GFPGAN 神经网络修复低质量面部",
"This regular expression will be used extract words from filename, and they will be joined using the option below into label text used for training. Leave empty to keep filename text as it is.": "此正则表达式将用于从文件名中提取单词,并将使用以下选项将它们接合到用于训练的标签文本中。留空以保持文件名文本不变",
"This string will be used to join split words into a single line if the option above is enabled.": "如果启用了上述选项,则此处的字符会用于将拆分的单词接合为同一行",
From 24a76340ba14c63b8523c254769c781d4b62c347 Mon Sep 17 00:00:00 2001
From: batvbs <60730393+batvbs@users.noreply.github.com>
Date: Tue, 1 Nov 2022 14:30:59 +0800
Subject: [PATCH 049/147] Update zh_CN.json
---
localizations/zh_CN.json | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/localizations/zh_CN.json b/localizations/zh_CN.json
index 2d70dab1..c23e123b 100644
--- a/localizations/zh_CN.json
+++ b/localizations/zh_CN.json
@@ -481,10 +481,10 @@
"Open images output directory": "打开图像输出目录",
"How much to blur the mask before processing, in pixels.": "处理前要对蒙版进行多强的模糊,以像素为单位",
"What to put inside the masked area before processing it with Stable Diffusion.": "在使用 Stable Diffusion 处理蒙版区域之前要在蒙版区域内放置什么",
- "fill it with colors of the image": "用图像的颜色(高斯模糊)填充它",
+ "fill it with colors of the image": "用图像的颜色(高强度模糊)填充它",
"keep whatever was there originally": "保留原来的图像,不进行预处理",
- "fill it with latent space noise": "用潜空间的噪声(图像维度表现为 随机彩色噪点)填充它",
- "fill it with latent space zeroes": "用潜空间的零(图像维度表现为 无色纯灰)填充它",
+ "fill it with latent space noise": "用潜空间的噪声填充它",
+ "fill it with latent space zeroes": "用潜空间的零填充它",
"Upscale masked region to target resolution, do inpainting, downscale back and paste into original image": "将蒙版区域放大到目标分辨率,做局部重绘,缩小后粘贴到原始图像中",
"Resize image to target resolution. Unless height and width match, you will get incorrect aspect ratio.": "将图像大小调整为目标分辨率。除非高度和宽度匹配,否则你将获得不正确的纵横比",
"Resize the image so that entirety of target resolution is filled with the image. Crop parts that stick out.": "调整图像大小,使整个目标分辨率都被图像填充。裁剪多出来的部分",
From bef1d0e836a337b1673a903be5d872b2d2869514 Mon Sep 17 00:00:00 2001
From: batvbs <60730393+batvbs@users.noreply.github.com>
Date: Tue, 1 Nov 2022 14:32:12 +0800
Subject: [PATCH 050/147] Update localizations/zh_CN.json
Co-authored-by: dtlnor
---
localizations/zh_CN.json | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/localizations/zh_CN.json b/localizations/zh_CN.json
index c23e123b..a8b99591 100644
--- a/localizations/zh_CN.json
+++ b/localizations/zh_CN.json
@@ -544,7 +544,7 @@
"Number of grids in each row": "每行显示多少格",
"Start drawing": "开始绘制",
"how fast should the training go. Low values will take longer to train, high values may fail to converge (not generate accurate results) and/or may break the embedding (This has happened if you see Loss: nan in the training info textbox. If this happens, you need to manually restore your embedding from an older not-broken backup).\n\nYou can set a single numeric value, or multiple learning rates using the syntax:\n\n rate_1:max_steps_1, rate_2:max_steps_2, ...\n\nEG: 0.005:100, 1e-3:1000, 1e-5\n\nWill train with rate of 0.005 for first 100 steps, then 1e-3 until 1000 steps, then 1e-5 for all remaining steps.": "训练应该多快。低值将需要更长的时间来训练,高值可能无法收敛(无法产生准确的结果)以及/也许可能会破坏 embedding(如果你在训练信息文本框中看到 Loss: nan 就会发生这种情况。如果发生这种情况,你需要从较旧的未损坏的备份手动恢复 embedding)\n\n你可以使用以下语法设置单个数值或多个学习率:\n\n 率1:步限1, 率2:步限2, ...\n\n如: 0.005:100, 1e-3:1000, 1e-5\n\n即前 100 步将以 0.005 的速率训练,接着直到 1000 步为止以 1e-3 训练,然后剩余所有步以 1e-5 训练",
- "Separate prompts into parts using vertical pipe character (|) and the script will create a picture for every combination of them (except for the first part, which will be present in all combinations)": "用垂直的管道字符 | 将提示语分成若干部分,脚本将为它们的每一个组合创建一幅图片(除了第一部分,所有的组合都会出现)",
+ "Separate prompts into parts using vertical pipe character (|) and the script will create a picture for every combination of them (except for the first part, which will be present in all combinations)": "用竖线分隔符(|)将提示词分成若干部分,脚本将为它们的每一个组合创建一幅图片(除了被分割的第一部分,所有的组合都会包含这部分)",
"Select which Real-ESRGAN models to show in the web UI. (Requires restart)": "选择哪些Real-ESRGAN模型显示在用户界面。(需要重新启动)",
"Face restoration model": "面部修复模型",
"Allowed categories for random artists selection when using the Roll button": "使用抽选艺术家按钮时将会随机的艺术家类别",
From 19b59d320c144718f2cb65b77c111f0ec82c8ccd Mon Sep 17 00:00:00 2001
From: batvbs <60730393+batvbs@users.noreply.github.com>
Date: Tue, 1 Nov 2022 14:33:20 +0800
Subject: [PATCH 051/147] Update localizations/zh_CN.json
Co-authored-by: dtlnor
---
localizations/zh_CN.json | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/localizations/zh_CN.json b/localizations/zh_CN.json
index a8b99591..d3b572e6 100644
--- a/localizations/zh_CN.json
+++ b/localizations/zh_CN.json
@@ -127,7 +127,7 @@
"Batch img2img": "批量图生图",
"Image for img2img": "图生图的图像",
"Drop Image Here": "拖拽图像到此",
- "Image for inpainting with mask": "用于提取蒙版的图像",
+ "Image for inpainting with mask": "用于局部重绘并手动画蒙版的图像",
"Mask": "蒙版",
"Mask blur": "蒙版模糊",
"Mask mode": "蒙版模式",
From 6bab858095a01f3cabdcbbcf3f42c913f54f500f Mon Sep 17 00:00:00 2001
From: batvbs <60730393+batvbs@users.noreply.github.com>
Date: Tue, 1 Nov 2022 18:46:20 +0800
Subject: [PATCH 052/147] Update localizations/zh_CN.json
Co-authored-by: liggest <43201720+liggest@users.noreply.github.com>
---
localizations/zh_CN.json | 1 -
1 file changed, 1 deletion(-)
diff --git a/localizations/zh_CN.json b/localizations/zh_CN.json
index d3b572e6..d6d03e0f 100644
--- a/localizations/zh_CN.json
+++ b/localizations/zh_CN.json
@@ -179,7 +179,6 @@
"Color variation": "色彩变化",
"Will upscale the image to twice the dimensions; use width and height sliders to set tile size": "将图像放大到两倍尺寸; 使用宽度和高度滑块设置图块尺寸(tile size)",
"Tile overlap": "图块重叠的像素(Tile overlap)",
- "Upscaler": "Upscaler",
"Lanczos": "Lanczos",
"LDSR": "LDSR",
"BSRGAN 4x": "BSRGAN 4x",
From 72ea78cf64a111df025e5b71e2106b8c3dad28c3 Mon Sep 17 00:00:00 2001
From: dtlnor
Date: Tue, 1 Nov 2022 23:38:19 +0900
Subject: [PATCH 053/147] Update zh_CN.json
include new changes
---
localizations/zh_CN.json | 16 ++++++++++++++++
1 file changed, 16 insertions(+)
diff --git a/localizations/zh_CN.json b/localizations/zh_CN.json
index d6d03e0f..e700a1ab 100644
--- a/localizations/zh_CN.json
+++ b/localizations/zh_CN.json
@@ -19,6 +19,7 @@
"Create aesthetic embedding": "生成美术风格",
"Image Browser": "图库浏览器",
"Settings": "设置",
+ "Extensions": "扩展",
"Prompt": "提示词",
"Negative prompt": "反向提示词",
"Run": "运行",
@@ -448,6 +449,20 @@
"Download localization template": "下载本地化模板",
"Reload custom script bodies (No ui updates, No restart)": "重新加载自定义脚本主体(无 ui 更新,无重启)",
"Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)": "重启 Gradio 及刷新组件(仅限自定义脚本、ui.py、js 和 css)",
+ "Installed": "已安装",
+ "Available": "可用",
+ "Install from URL": "从网址安装",
+ "Apply and restart UI": "应用并重启用户界面",
+ "Check for updates": "检查更新",
+ "Extension": "扩展",
+ "URL": "网址",
+ "Update": "更新",
+ "unknown": "未知",
+ "Load from:": "加载自",
+ "Extension index URL": "扩展列表链接",
+ "URL for extension's git repository": "扩展的 git 仓库链接",
+ "Local directory name": "本地路径名",
+ "Install": "安装",
"Prompt (press Ctrl+Enter or Alt+Enter to generate)": "提示词(按 Ctrl+Enter 或 Alt+Enter 生成)\nPrompt",
"Negative prompt (press Ctrl+Enter or Alt+Enter to generate)": "反向提示词(按 Ctrl+Enter 或 Alt+Enter 生成)\nNegative prompt",
"Add a random artist to the prompt.": "随机添加一个艺术家到提示词中",
@@ -508,6 +523,7 @@
"Only applies to inpainting models. Determines how strongly to mask off the original image for inpainting and img2img. 1.0 means fully masked, which is the default behaviour. 0.0 means a fully unmasked conditioning. Lower values will help preserve the overall composition of the image, but will struggle with large changes.": "仅适用于局部重绘专用的模型。 决定了蒙版在局部重绘以及图生图中屏蔽原图内容的强度。 1.0 表示完全屏蔽,这是默认行为。 0.0 表示完全不屏蔽。 较低的值将有助于保持图像的整体构图,但很难遇到较大的变化。",
"List of setting names, separated by commas, for settings that should go to the quick access bar at the top, rather than the usual setting tab. See modules/shared.py for setting names. Requires restarting to apply.": "设置名称列表,以逗号分隔,设置应转到顶部的快速访问栏,而不是通常的设置选项卡。有关设置名称,请参见 modules/shared.py。需要重新启动才能应用",
"If this values is non-zero, it will be added to seed and used to initialize RNG for noises when using samplers with Eta. You can use this to produce even more variation of images, or you can use this to match images of other software if you know what you are doing.": "如果这个值不为零,它将被添加到随机种子中,并在使用带有 Eta 的采样器时用于初始化随机噪声。你可以使用它来产生更多的图像变化,或者你可以使用它来模仿其他软件生成的图像,如果你知道你在做什么",
+ "Leave empty for auto": "留空时自动生成",
"Autocomplete options": "自动补全选项",
"Enable Autocomplete": "开启Tag补全",
"Append commas": "附加逗号",
From 8a62d6431d2b1b2bb92409f0403c56fa423763f4 Mon Sep 17 00:00:00 2001
From: dtlnor
Date: Tue, 1 Nov 2022 23:39:02 +0900
Subject: [PATCH 054/147] re-order json content
---
localizations/zh_CN.json | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/localizations/zh_CN.json b/localizations/zh_CN.json
index e700a1ab..83c4ef3c 100644
--- a/localizations/zh_CN.json
+++ b/localizations/zh_CN.json
@@ -102,6 +102,7 @@
"Sampler": "采样器",
"Checkpoint name": "模型(ckpt)名",
"Hypernet str.": "Hypernetwork 强度",
+ "Hypernetwork": "超网络(Hypernetwork)",
"Sigma Churn": "Sigma Churn",
"Sigma min": "最小 Sigma",
"Sigma max": "最大 Sigma",
@@ -304,10 +305,6 @@
"Create images embedding": "生成图集 embedding",
"Favorites": "收藏夹(已保存)",
"Others": "其他",
- "Move to favorites": "移动到收藏夹(保存)",
- "favorites": "收藏夹(已保存)",
- "others": "其他",
- "Collect": "收藏(保存)",
"Images directory": "图像目录",
"Dropdown": "下拉列表",
"First Page": "首页",
@@ -563,5 +560,8 @@
"Select which Real-ESRGAN models to show in the web UI. (Requires restart)": "选择哪些Real-ESRGAN模型显示在用户界面。(需要重新启动)",
"Face restoration model": "面部修复模型",
"Allowed categories for random artists selection when using the Roll button": "使用抽选艺术家按钮时将会随机的艺术家类别",
- "Hypernetwork": "超网络(Hypernetwork)"
+ "Move to favorites": "移动到收藏夹(保存)",
+ "favorites": "收藏夹(已保存)",
+ "others": "其他",
+ "Collect": "收藏(保存)"
}
From 82ba978d8538e932dec633b61411034886720d16 Mon Sep 17 00:00:00 2001
From: batvbs <60730393+batvbs@users.noreply.github.com>
Date: Tue, 1 Nov 2022 22:47:28 +0800
Subject: [PATCH 055/147] Update zh_CN.json
---
localizations/zh_CN.json | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/localizations/zh_CN.json b/localizations/zh_CN.json
index d6d03e0f..3e193024 100644
--- a/localizations/zh_CN.json
+++ b/localizations/zh_CN.json
@@ -431,7 +431,7 @@
"eta (noise multiplier) for DDIM": "DDIM 的 eta (噪声乘数) ",
"eta (noise multiplier) for ancestral samplers": "ancestral 采样器的 eta (噪声乘数)",
"img2img DDIM discretize": "图生图 DDIM 离散化",
- "uniform": "均勻",
+ "uniform": "均匀",
"quad": "二阶",
"sigma churn": "sigma churn",
"sigma tmin": "最小(tmin) sigma",
From 534bcfbac83c92be685b3fbd3f427e9bef716a57 Mon Sep 17 00:00:00 2001
From: batvbs <60730393+batvbs@users.noreply.github.com>
Date: Tue, 1 Nov 2022 22:54:21 +0800
Subject: [PATCH 056/147] Update zh_CN.json
---
localizations/zh_CN.json | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/localizations/zh_CN.json b/localizations/zh_CN.json
index 3e193024..d8bc91cd 100644
--- a/localizations/zh_CN.json
+++ b/localizations/zh_CN.json
@@ -453,7 +453,7 @@
"Add a random artist to the prompt.": "随机添加一个艺术家到提示词中",
"Read generation parameters from prompt or last generation if prompt is empty into user interface.": "从提示词中读取生成参数,如果提示词为空,则读取上一次的生成参数到用户界面",
"Save style": "储存为模版风格",
- "Apply selected styles to current prompt": "将所选样式应用于当前提示",
+ "Apply selected styles to current prompt": "将所选模板风格,应用于当前提示词",
"Stop processing current image and continue processing.": "停止处理当前图像,并继续处理下一个",
"Stop processing images and return any results accumulated so far.": "停止处理图像,并返回迄今为止累积的任何结果",
"Style to apply; styles have components for both positive and negative prompts and apply to both": "要应用的模版风格; 模版风格包含正向和反向提示词,并应用于两者",
From 056f06d3738c267b1014e6e8e1ef5bd97af1fb45 Mon Sep 17 00:00:00 2001
From: Muhammad Rizqi Nur
Date: Wed, 2 Nov 2022 12:51:46 +0700
Subject: [PATCH 057/147] Reload VAE without reloading sd checkpoint
---
modules/sd_models.py | 15 ++++---
modules/sd_vae.py | 97 ++++++++++++++++++++++++++++++++++++++++----
webui.py | 4 +-
3 files changed, 98 insertions(+), 18 deletions(-)
diff --git a/modules/sd_models.py b/modules/sd_models.py
index 6ab85b65..883639d1 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -159,15 +159,13 @@ def get_state_dict_from_checkpoint(pl_sd):
return pl_sd
-vae_ignore_keys = {"model_ema.decay", "model_ema.num_updates"}
-
def load_model_weights(model, checkpoint_info, vae_file="auto"):
checkpoint_file = checkpoint_info.filename
sd_model_hash = checkpoint_info.hash
vae_file = sd_vae.resolve_vae(checkpoint_file, vae_file=vae_file)
- checkpoint_key = (checkpoint_info, vae_file)
+ checkpoint_key = checkpoint_info
if checkpoint_key not in checkpoints_loaded:
print(f"Loading weights [{sd_model_hash}] from {checkpoint_file}")
@@ -190,13 +188,12 @@ def load_model_weights(model, checkpoint_info, vae_file="auto"):
devices.dtype = torch.float32 if shared.cmd_opts.no_half else torch.float16
devices.dtype_vae = torch.float32 if shared.cmd_opts.no_half or shared.cmd_opts.no_half_vae else torch.float16
- sd_vae.load_vae(model, vae_file)
- model.first_stage_model.to(devices.dtype_vae)
-
if shared.opts.sd_checkpoint_cache > 0:
+ # if PR #4035 were to get merged, restore base VAE first before caching
checkpoints_loaded[checkpoint_key] = model.state_dict().copy()
while len(checkpoints_loaded) > shared.opts.sd_checkpoint_cache:
checkpoints_loaded.popitem(last=False) # LRU
+
else:
vae_name = sd_vae.get_filename(vae_file)
print(f"Loading weights [{sd_model_hash}] with {vae_name} VAE from cache")
@@ -207,6 +204,8 @@ def load_model_weights(model, checkpoint_info, vae_file="auto"):
model.sd_model_checkpoint = checkpoint_file
model.sd_checkpoint_info = checkpoint_info
+ sd_vae.load_vae(model, vae_file)
+
def load_model(checkpoint_info=None):
from modules import lowvram, sd_hijack
@@ -254,14 +253,14 @@ def load_model(checkpoint_info=None):
return sd_model
-def reload_model_weights(sd_model=None, info=None, force=False):
+def reload_model_weights(sd_model=None, info=None):
from modules import lowvram, devices, sd_hijack
checkpoint_info = info or select_checkpoint()
if not sd_model:
sd_model = shared.sd_model
- if sd_model.sd_model_checkpoint == checkpoint_info.filename and not force:
+ if sd_model.sd_model_checkpoint == checkpoint_info.filename:
return
if sd_model.sd_checkpoint_info.config != checkpoint_info.config or should_hijack_inpainting(checkpoint_info) != should_hijack_inpainting(sd_model.sd_checkpoint_info):
diff --git a/modules/sd_vae.py b/modules/sd_vae.py
index e9239326..78e14e8a 100644
--- a/modules/sd_vae.py
+++ b/modules/sd_vae.py
@@ -1,26 +1,65 @@
import torch
import os
from collections import namedtuple
-from modules import shared, devices
+from modules import shared, devices, script_callbacks
from modules.paths import models_path
import glob
+
model_dir = "Stable-diffusion"
model_path = os.path.abspath(os.path.join(models_path, model_dir))
vae_dir = "VAE"
vae_path = os.path.abspath(os.path.join(models_path, vae_dir))
+
vae_ignore_keys = {"model_ema.decay", "model_ema.num_updates"}
+
+
default_vae_dict = {"auto": "auto", "None": "None"}
default_vae_list = ["auto", "None"]
+
+
default_vae_values = [default_vae_dict[x] for x in default_vae_list]
vae_dict = dict(default_vae_dict)
vae_list = list(default_vae_list)
first_load = True
+
+base_vae = None
+loaded_vae_file = None
+checkpoint_info = None
+
+
+def get_base_vae(model):
+ if base_vae is not None and checkpoint_info == model.sd_checkpoint_info and model:
+ return base_vae
+ return None
+
+
+def store_base_vae(model):
+ global base_vae, checkpoint_info
+ if checkpoint_info != model.sd_checkpoint_info:
+ base_vae = model.first_stage_model.state_dict().copy()
+ checkpoint_info = model.sd_checkpoint_info
+
+
+def delete_base_vae():
+ global base_vae, checkpoint_info
+ base_vae = None
+ checkpoint_info = None
+
+
+def restore_base_vae(model):
+ global base_vae, checkpoint_info
+ if base_vae is not None and checkpoint_info == model.sd_checkpoint_info:
+ load_vae_dict(model, base_vae)
+ delete_base_vae()
+
+
def get_filename(filepath):
return os.path.splitext(os.path.basename(filepath))[0]
+
def refresh_vae_list(vae_path=vae_path, model_path=model_path):
global vae_dict, vae_list
res = {}
@@ -43,6 +82,7 @@ def refresh_vae_list(vae_path=vae_path, model_path=model_path):
vae_dict.update(res)
return vae_list
+
def resolve_vae(checkpoint_file, vae_file="auto"):
global first_load, vae_dict, vae_list
# save_settings = False
@@ -96,24 +136,26 @@ def resolve_vae(checkpoint_file, vae_file="auto"):
return vae_file
-def load_vae(model, vae_file):
- global first_load, vae_dict, vae_list
+
+def load_vae(model, vae_file=None):
+ global first_load, vae_dict, vae_list, loaded_vae_file
# save_settings = False
if vae_file:
print(f"Loading VAE weights from: {vae_file}")
vae_ckpt = torch.load(vae_file, map_location=shared.weight_load_location)
vae_dict_1 = {k: v for k, v in vae_ckpt["state_dict"].items() if k[0:4] != "loss" and k not in vae_ignore_keys}
- model.first_stage_model.load_state_dict(vae_dict_1)
+ load_vae_dict(model, vae_dict_1)
- # If vae used is not in dict, update it
- # It will be removed on refresh though
- if vae_file is not None:
+ # If vae used is not in dict, update it
+ # It will be removed on refresh though
vae_opt = get_filename(vae_file)
if vae_opt not in vae_dict:
vae_dict[vae_opt] = vae_file
vae_list.append(vae_opt)
+ loaded_vae_file = vae_file
+
"""
# Save current VAE to VAE settings, maybe? will it work?
if save_settings:
@@ -124,4 +166,45 @@ def load_vae(model, vae_file):
"""
first_load = False
+
+
+# don't call this from outside
+def load_vae_dict(model, vae_dict_1=None):
+ if vae_dict_1:
+ store_base_vae(model)
+ model.first_stage_model.load_state_dict(vae_dict_1)
+ else:
+ restore_base_vae()
model.first_stage_model.to(devices.dtype_vae)
+
+
+def reload_vae_weights(sd_model=None, vae_file="auto"):
+ from modules import lowvram, devices, sd_hijack
+
+ if not sd_model:
+ sd_model = shared.sd_model
+
+ checkpoint_info = sd_model.sd_checkpoint_info
+ checkpoint_file = checkpoint_info.filename
+ vae_file = resolve_vae(checkpoint_file, vae_file=vae_file)
+
+ if loaded_vae_file == vae_file:
+ return
+
+ if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
+ lowvram.send_everything_to_cpu()
+ else:
+ sd_model.to(devices.cpu)
+
+ sd_hijack.model_hijack.undo_hijack(sd_model)
+
+ load_vae(sd_model, vae_file)
+
+ sd_hijack.model_hijack.hijack(sd_model)
+ script_callbacks.model_loaded_callback(sd_model)
+
+ if not shared.cmd_opts.lowvram and not shared.cmd_opts.medvram:
+ sd_model.to(devices.device)
+
+ print(f"VAE Weights loaded.")
+ return sd_model
diff --git a/webui.py b/webui.py
index 7cb4691b..034777a2 100644
--- a/webui.py
+++ b/webui.py
@@ -81,9 +81,7 @@ def initialize():
modules.sd_vae.refresh_vae_list()
modules.sd_models.load_model()
shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: modules.sd_models.reload_model_weights()))
- # I don't know what needs to be done to only reload VAE, with all those hijacks callbacks, and lowvram,
- # so for now this reloads the whole model too
- shared.opts.onchange("sd_vae", wrap_queued_call(lambda: modules.sd_models.reload_model_weights(force=True)), call=False)
+ shared.opts.onchange("sd_vae", wrap_queued_call(lambda: modules.sd_vae.reload_vae_weights()), call=False)
shared.opts.onchange("sd_hypernetwork", wrap_queued_call(lambda: modules.hypernetworks.hypernetwork.load_hypernetwork(shared.opts.sd_hypernetwork)))
shared.opts.onchange("sd_hypernetwork_strength", modules.hypernetworks.hypernetwork.apply_strength)
From dd2108fdac2ebf943d4ac3563a49202222b88acf Mon Sep 17 00:00:00 2001
From: Maiko Tan
Date: Wed, 2 Nov 2022 15:04:35 +0800
Subject: [PATCH 058/147] fix: should invoke callback as well in api only mode
---
modules/script_callbacks.py | 3 ++-
webui.py | 2 ++
2 files changed, 4 insertions(+), 1 deletion(-)
diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py
index da88635b..c28e220e 100644
--- a/modules/script_callbacks.py
+++ b/modules/script_callbacks.py
@@ -2,6 +2,7 @@ import sys
import traceback
from collections import namedtuple
import inspect
+from typing import Optional
from fastapi import FastAPI
from gradio import Blocks
@@ -62,7 +63,7 @@ def clear_callbacks():
callbacks_image_saved.clear()
callbacks_cfg_denoiser.clear()
-def app_started_callback(demo: Blocks, app: FastAPI):
+def app_started_callback(demo: Optional[Blocks], app: FastAPI):
for c in callbacks_app_started:
try:
c.callback(demo, app)
diff --git a/webui.py b/webui.py
index 84e5c1fd..dc4223dc 100644
--- a/webui.py
+++ b/webui.py
@@ -114,6 +114,8 @@ def api_only():
app.add_middleware(GZipMiddleware, minimum_size=1000)
api = create_api(app)
+ modules.script_callbacks.app_started_callback(None, app)
+
api.launch(server_name="0.0.0.0" if cmd_opts.listen else "127.0.0.1", port=cmd_opts.port if cmd_opts.port else 7861)
From a5409a6e4bc3eaa9757a7505d4564ad8e0d899ea Mon Sep 17 00:00:00 2001
From: Muhammad Rizqi Nur
Date: Wed, 2 Nov 2022 14:37:22 +0700
Subject: [PATCH 059/147] Save VAE provided by cmd_opts.vae_path
---
modules/sd_vae.py | 11 ++++-------
1 file changed, 4 insertions(+), 7 deletions(-)
diff --git a/modules/sd_vae.py b/modules/sd_vae.py
index 78e14e8a..71e7a6e6 100644
--- a/modules/sd_vae.py
+++ b/modules/sd_vae.py
@@ -78,27 +78,24 @@ def refresh_vae_list(vae_path=vae_path, model_path=model_path):
vae_list.extend(default_vae_list)
vae_list.extend(list(res.keys()))
vae_dict.clear()
- vae_dict.update(default_vae_dict)
vae_dict.update(res)
+ vae_dict.update(default_vae_dict)
return vae_list
def resolve_vae(checkpoint_file, vae_file="auto"):
global first_load, vae_dict, vae_list
- # save_settings = False
- # if vae_file argument is provided, it takes priority
+ # if vae_file argument is provided, it takes priority, but not saved
if vae_file and vae_file not in default_vae_list:
if not os.path.isfile(vae_file):
vae_file = "auto"
- # save_settings = True
print("VAE provided as function argument doesn't exist")
- # for the first load, if vae-path is provided, it takes priority and failure is reported
+ # for the first load, if vae-path is provided, it takes priority, saved, and failure is reported
if first_load and shared.cmd_opts.vae_path is not None:
if os.path.isfile(shared.cmd_opts.vae_path):
vae_file = shared.cmd_opts.vae_path
- # save_settings = True
- # print("Using VAE provided as command line argument")
+ shared.opts.data['sd_vae'] = get_filename(vae_file)
else:
print("VAE provided as command line argument doesn't exist")
# else, we load from settings
From b421c5ee605f087d543d7e5e98856da81184d1c7 Mon Sep 17 00:00:00 2001
From: Dynamic
Date: Wed, 2 Nov 2022 17:16:47 +0900
Subject: [PATCH 060/147] Update ko_KR.json
New options in scripts
---
localizations/ko_KR.json | 1 +
1 file changed, 1 insertion(+)
diff --git a/localizations/ko_KR.json b/localizations/ko_KR.json
index d152e575..7bb15ea6 100644
--- a/localizations/ko_KR.json
+++ b/localizations/ko_KR.json
@@ -547,6 +547,7 @@
"Use following tags to define how subdirectories for images and grids are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime], [datetime], [job_timestamp]; leave empty for default.": "다음 태그들을 사용해 이미지와 그리드의 하위 디렉토리명의 형식을 결정하세요 : [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime], [datetime], [job_timestamp]. 비워두면 기본값으로 설정됩니다.",
"Use old emphasis implementation. Can be useful to reproduce old seeds.": "옛 방식의 강조 구현을 사용합니다. 옛 시드를 재현하는 데 효과적일 수 있습니다.",
"Use original name for output filename during batch process in extras tab": "부가기능 탭에서 이미지를 여러장 처리 시 결과물 파일명에 기존 파일명 사용하기",
+ "Use same random seed for all lines": "모든 줄에 동일한 시드 사용",
"Use same seed for each image": "각 이미지에 동일한 시드 사용",
"use spaces for tags in deepbooru": "deepbooru에서 태그에 공백 사용",
"User interface": "사용자 인터페이스",
From 4a8cf01f6f7f072cc9c67d6b31662384b212dd9c Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Wed, 2 Nov 2022 12:12:32 +0300
Subject: [PATCH 061/147] remove duplicate code from #3970
---
modules/api/api.py | 10 +---------
modules/shared.py | 14 ++++++++++++++
modules/ui.py | 10 +---------
3 files changed, 16 insertions(+), 18 deletions(-)
diff --git a/modules/api/api.py b/modules/api/api.py
index b3d85e46..71c9c160 100644
--- a/modules/api/api.py
+++ b/modules/api/api.py
@@ -178,15 +178,7 @@ class Api:
progress = min(progress, 1)
- # copy from check_progress_call of ui.py
-
- if shared.parallel_processing_allowed:
- if shared.state.sampling_step - shared.state.current_image_sampling_step >= shared.opts.show_progress_every_n_steps and shared.state.current_latent is not None:
- if shared.opts.show_progress_grid:
- shared.state.current_image = samples_to_image_grid(shared.state.current_latent)
- else:
- shared.state.current_image = sample_to_image(shared.state.current_latent)
- shared.state.current_image_sampling_step = shared.state.sampling_step
+ shared.state.set_current_image()
current_image = None
if shared.state.current_image and not req.skip_current_image:
diff --git a/modules/shared.py b/modules/shared.py
index 04aaa648..e65f6080 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -184,6 +184,20 @@ class State:
devices.torch_gc()
+ """sets self.current_image from self.current_latent if enough sampling steps have been made after the last call to this"""
+ def set_current_image(self):
+ if not parallel_processing_allowed:
+ return
+
+ if self.sampling_step - self.current_image_sampling_step >= opts.show_progress_every_n_steps and self.current_latent is not None:
+ if opts.show_progress_grid:
+ self.current_image = sd_samplers.samples_to_image_grid(self.current_latent)
+ else:
+ self.current_image = sd_samplers.sample_to_image(self.current_latent)
+
+ self.current_image_sampling_step = self.sampling_step
+
+
state = State()
artist_db = modules.artists.ArtistsDatabase(os.path.join(script_path, 'artists.csv'))
diff --git a/modules/ui.py b/modules/ui.py
index 45cd8c3f..784439ba 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -277,15 +277,7 @@ def check_progress_call(id_part):
preview_visibility = gr_show(False)
if opts.show_progress_every_n_steps > 0:
- if shared.parallel_processing_allowed:
-
- if shared.state.sampling_step - shared.state.current_image_sampling_step >= opts.show_progress_every_n_steps and shared.state.current_latent is not None:
- if opts.show_progress_grid:
- shared.state.current_image = modules.sd_samplers.samples_to_image_grid(shared.state.current_latent)
- else:
- shared.state.current_image = modules.sd_samplers.sample_to_image(shared.state.current_latent)
- shared.state.current_image_sampling_step = shared.state.sampling_step
-
+ shared.state.set_current_image()
image = shared.state.current_image
if image is None:
From 9c67408004ed132637d10321bf44565f82055fd2 Mon Sep 17 00:00:00 2001
From: timntorres <116157310+timntorres@users.noreply.github.com>
Date: Wed, 2 Nov 2022 02:18:21 -0700
Subject: [PATCH 062/147] Allow saving "before-highres-fix. (#4150)
* Save image/s before doing highres fix.
---
modules/processing.py | 17 +++++++++++++++--
modules/sd_samplers.py | 5 ++---
modules/shared.py | 1 +
3 files changed, 18 insertions(+), 5 deletions(-)
diff --git a/modules/processing.py b/modules/processing.py
index b541ee2b..2dcf4879 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -521,7 +521,11 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
shared.state.job = f"Batch {n+1} out of {p.n_iter}"
with devices.autocast():
- samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength)
+ # Only Txt2Img needs an extra argument, n, when saving intermediate images pre highres fix.
+ if isinstance(p, StableDiffusionProcessingTxt2Img):
+ samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, n=n)
+ else:
+ samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength)
samples_ddim = samples_ddim.to(devices.dtype_vae)
x_samples_ddim = decode_first_stage(p.sd_model, samples_ddim)
@@ -649,7 +653,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
self.truncate_x = int(self.firstphase_width - firstphase_width_truncated) // opt_f
self.truncate_y = int(self.firstphase_height - firstphase_height_truncated) // opt_f
- def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength):
+ def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, n=0):
self.sampler = sd_samplers.create_sampler_with_index(sd_samplers.samplers, self.sampler_index, self.sd_model)
if not self.enable_hr:
@@ -685,6 +689,15 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
samples = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(decoded_samples))
+ # Save a copy of the image/s before doing highres fix, if applicable.
+ if opts.save and not self.do_not_save_samples and opts.save_images_before_highres_fix:
+ for i in range(self.batch_size):
+ # This batch's ith image.
+ img = sd_samplers.sample_to_image(samples, i)
+ # Index that accounts for both batch size and batch count.
+ ind = i + self.batch_size*n
+ images.save_image(img, self.outpath_samples, "", self.all_seeds[ind], self.all_prompts[ind], opts.samples_format, suffix=f"-before-highres-fix")
+
shared.state.nextjob()
self.sampler = sd_samplers.create_sampler_with_index(sd_samplers.samplers, self.sampler_index, self.sd_model)
diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py
index 44d4c189..d7fa89a0 100644
--- a/modules/sd_samplers.py
+++ b/modules/sd_samplers.py
@@ -93,9 +93,8 @@ def single_sample_to_image(sample):
return Image.fromarray(x_sample)
-def sample_to_image(samples):
- return single_sample_to_image(samples[0])
-
+def sample_to_image(samples, index=0):
+ return single_sample_to_image(samples[index])
def samples_to_image_grid(samples):
return images.image_grid([single_sample_to_image(sample) for sample in samples])
diff --git a/modules/shared.py b/modules/shared.py
index e65f6080..ce991424 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -255,6 +255,7 @@ options_templates.update(options_section(('saving-images', "Saving images/grids"
"enable_pnginfo": OptionInfo(True, "Save text information about generation parameters as chunks to png files"),
"save_txt": OptionInfo(False, "Create a text file next to every image with generation parameters."),
"save_images_before_face_restoration": OptionInfo(False, "Save a copy of image before doing face restoration."),
+ "save_images_before_highres_fix": OptionInfo(False, "Save a copy of image before applying highres fix."),
"jpeg_quality": OptionInfo(80, "Quality for saved jpeg images", gr.Slider, {"minimum": 1, "maximum": 100, "step": 1}),
"export_for_4chan": OptionInfo(True, "If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG"),
From eb5e82c7ddf5e72fa13b83bd1f12d3a07a4de1a4 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Wed, 2 Nov 2022 12:45:03 +0300
Subject: [PATCH 063/147] do not unnecessarily run VAE one more time when
saving intermediate image with hires fix
---
modules/processing.py | 39 ++++++++++++++++++++-------------------
modules/sd_samplers.py | 1 +
modules/shared.py | 2 +-
scripts/img2imgalt.py | 3 +--
4 files changed, 23 insertions(+), 22 deletions(-)
diff --git a/modules/processing.py b/modules/processing.py
index 2dcf4879..3a364b5f 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -199,7 +199,7 @@ class StableDiffusionProcessing():
def init(self, all_prompts, all_seeds, all_subseeds):
pass
- def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength):
+ def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts):
raise NotImplementedError()
def close(self):
@@ -521,11 +521,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
shared.state.job = f"Batch {n+1} out of {p.n_iter}"
with devices.autocast():
- # Only Txt2Img needs an extra argument, n, when saving intermediate images pre highres fix.
- if isinstance(p, StableDiffusionProcessingTxt2Img):
- samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, n=n)
- else:
- samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength)
+ samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, prompts=prompts)
samples_ddim = samples_ddim.to(devices.dtype_vae)
x_samples_ddim = decode_first_stage(p.sd_model, samples_ddim)
@@ -653,7 +649,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
self.truncate_x = int(self.firstphase_width - firstphase_width_truncated) // opt_f
self.truncate_y = int(self.firstphase_height - firstphase_height_truncated) // opt_f
- def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, n=0):
+ def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts):
self.sampler = sd_samplers.create_sampler_with_index(sd_samplers.samplers, self.sampler_index, self.sd_model)
if not self.enable_hr:
@@ -666,9 +662,21 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
samples = samples[:, :, self.truncate_y//2:samples.shape[2]-self.truncate_y//2, self.truncate_x//2:samples.shape[3]-self.truncate_x//2]
+ """saves image before applying hires fix, if enabled in options; takes as an arguyment either an image or batch with latent space images"""
+ def save_intermediate(image, index):
+ if not opts.save or self.do_not_save_samples or not opts.save_images_before_highres_fix:
+ return
+
+ if not isinstance(image, Image.Image):
+ image = sd_samplers.sample_to_image(image, index)
+
+ images.save_image(image, self.outpath_samples, "", seeds[index], prompts[index], opts.samples_format, suffix="-before-highres-fix")
+
if opts.use_scale_latent_for_hires_fix:
samples = torch.nn.functional.interpolate(samples, size=(self.height // opt_f, self.width // opt_f), mode="bilinear")
+ for i in range(samples.shape[0]):
+ save_intermediate(samples, i)
else:
decoded_samples = decode_first_stage(self.sd_model, samples)
lowres_samples = torch.clamp((decoded_samples + 1.0) / 2.0, min=0.0, max=1.0)
@@ -678,6 +686,9 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
x_sample = x_sample.astype(np.uint8)
image = Image.fromarray(x_sample)
+
+ save_intermediate(image, i)
+
image = images.resize_image(0, image, self.width, self.height)
image = np.array(image).astype(np.float32) / 255.0
image = np.moveaxis(image, 2, 0)
@@ -689,15 +700,6 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
samples = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(decoded_samples))
- # Save a copy of the image/s before doing highres fix, if applicable.
- if opts.save and not self.do_not_save_samples and opts.save_images_before_highres_fix:
- for i in range(self.batch_size):
- # This batch's ith image.
- img = sd_samplers.sample_to_image(samples, i)
- # Index that accounts for both batch size and batch count.
- ind = i + self.batch_size*n
- images.save_image(img, self.outpath_samples, "", self.all_seeds[ind], self.all_prompts[ind], opts.samples_format, suffix=f"-before-highres-fix")
-
shared.state.nextjob()
self.sampler = sd_samplers.create_sampler_with_index(sd_samplers.samplers, self.sampler_index, self.sd_model)
@@ -844,8 +846,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
self.image_conditioning = self.img2img_image_conditioning(image, self.init_latent, self.image_mask)
-
- def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength):
+ def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts):
x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
samples = self.sampler.sample_img2img(self, self.init_latent, x, conditioning, unconditional_conditioning, image_conditioning=self.image_conditioning)
@@ -856,4 +857,4 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
del x
devices.torch_gc()
- return samples
\ No newline at end of file
+ return samples
diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py
index d7fa89a0..c7c414ef 100644
--- a/modules/sd_samplers.py
+++ b/modules/sd_samplers.py
@@ -96,6 +96,7 @@ def single_sample_to_image(sample):
def sample_to_image(samples, index=0):
return single_sample_to_image(samples[index])
+
def samples_to_image_grid(samples):
return images.image_grid([single_sample_to_image(sample) for sample in samples])
diff --git a/modules/shared.py b/modules/shared.py
index ce991424..01f47e38 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -256,6 +256,7 @@ options_templates.update(options_section(('saving-images', "Saving images/grids"
"save_txt": OptionInfo(False, "Create a text file next to every image with generation parameters."),
"save_images_before_face_restoration": OptionInfo(False, "Save a copy of image before doing face restoration."),
"save_images_before_highres_fix": OptionInfo(False, "Save a copy of image before applying highres fix."),
+ "save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"),
"jpeg_quality": OptionInfo(80, "Quality for saved jpeg images", gr.Slider, {"minimum": 1, "maximum": 100, "step": 1}),
"export_for_4chan": OptionInfo(True, "If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG"),
@@ -322,7 +323,6 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), {
"sd_hypernetwork_strength": OptionInfo(1.0, "Hypernetwork strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.001}),
"inpainting_mask_weight": OptionInfo(1.0, "Inpainting conditioning mask strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
"img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."),
- "save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"),
"img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising)."),
"enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply."),
"enable_emphasis": OptionInfo(True, "Emphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention"),
diff --git a/scripts/img2imgalt.py b/scripts/img2imgalt.py
index 88abc093..964b75c7 100644
--- a/scripts/img2imgalt.py
+++ b/scripts/img2imgalt.py
@@ -166,8 +166,7 @@ class Script(scripts.Script):
if override_strength:
p.denoising_strength = 1.0
-
- def sample_extra(conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength):
+ def sample_extra(conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts):
lat = (p.init_latent.cpu().numpy() * 10).astype(int)
same_params = self.cache is not None and self.cache.cfg_scale == cfg and self.cache.steps == st \
From f2a5cbe6f55592c4c5527b8e0bf99ea8d658f057 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Wed, 2 Nov 2022 14:41:29 +0300
Subject: [PATCH 064/147] fix #3986 breaking --no-half-vae
---
modules/sd_models.py | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/modules/sd_models.py b/modules/sd_models.py
index 883639d1..5075fadb 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -183,11 +183,20 @@ def load_model_weights(model, checkpoint_info, vae_file="auto"):
model.to(memory_format=torch.channels_last)
if not shared.cmd_opts.no_half:
+ vae = model.first_stage_model
+
+ # with --no-half-vae, remove VAE from model when doing half() to prevent its weights from being converted to float16
+ if shared.cmd_opts.no_half_vae:
+ model.first_stage_model = None
+
model.half()
+ model.first_stage_model = vae
devices.dtype = torch.float32 if shared.cmd_opts.no_half else torch.float16
devices.dtype_vae = torch.float32 if shared.cmd_opts.no_half or shared.cmd_opts.no_half_vae else torch.float16
+ model.first_stage_model.to(devices.dtype_vae)
+
if shared.opts.sd_checkpoint_cache > 0:
# if PR #4035 were to get merged, restore base VAE first before caching
checkpoints_loaded[checkpoint_key] = model.state_dict().copy()
From 13cbc1622e78d818b77292b6ae589f45f3040e94 Mon Sep 17 00:00:00 2001
From: Dynamic
Date: Wed, 2 Nov 2022 21:00:24 +0900
Subject: [PATCH 065/147] Update ko_KR.json
Fix some changed setting strings and added new ones
---
localizations/ko_KR.json | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/localizations/ko_KR.json b/localizations/ko_KR.json
index 7bb15ea6..874771f9 100644
--- a/localizations/ko_KR.json
+++ b/localizations/ko_KR.json
@@ -43,6 +43,7 @@
"Apply selected styles to current prompt": "현재 프롬프트에 선택된 스타일 적용",
"Apply settings": "설정 적용하기",
"Artists to study": "연구할만한 작가들",
+ "auto": "자동",
"Auto focal point crop": "초점 기준 크롭(자동 감지)",
"Autocomplete options": "자동완성 설정",
"Available": "지원되는 확장기능 목록",
@@ -303,7 +304,7 @@
"Modules": "모듈",
"Move face restoration model from VRAM into RAM after processing": "처리가 완료되면 얼굴 보정 모델을 VRAM에서 RAM으로 옮기기",
"Move to favorites": "즐겨찾기로 옮기기",
- "Move VAE and CLIP to RAM when training hypernetwork. Saves VRAM.": "하이퍼네트워크 훈련 진행 시 VAE와 CLIP을 RAM으로 옮기기. VRAM이 절약됩니다.",
+ "Move VAE and CLIP to RAM when training if possible. Saves VRAM.": "훈련 진행 시 가능하면 VAE와 CLIP을 RAM으로 옮기기. VRAM이 절약됩니다.",
"Moved to favorites": "즐겨찾기로 옮겨짐",
"Multiplier (M) - set to 0 to get model A": "배율 (M) - 0으로 적용하면 모델 A를 얻게 됩니다",
"Name": "이름",
@@ -418,6 +419,7 @@
"Save": "저장",
"Save a copy of embedding to log directory every N steps, 0 to disable": "N스텝마다 로그 경로에 임베딩을 저장합니다, 비활성화하려면 0으로 설정하십시오.",
"Save a copy of image before applying color correction to img2img results": "이미지→이미지 결과물에 색상 보정을 진행하기 전 이미지의 복사본을 저장하기",
+ "Save a copy of image before applying highres fix.": "고해상도 보정을 진행하기 전 이미지의 복사본을 저장하기",
"Save a copy of image before doing face restoration.": "얼굴 보정을 진행하기 전 이미지의 복사본을 저장하기",
"Save an csv containing the loss to log directory every N steps, 0 to disable": "N스텝마다 로그 경로에 손실(Loss)을 포함하는 csv 파일을 저장합니다, 비활성화하려면 0으로 설정하십시오.",
"Save an image to log directory every N steps, 0 to disable": "N스텝마다 로그 경로에 이미지를 저장합니다, 비활성화하려면 0으로 설정하십시오.",
@@ -541,6 +543,7 @@
"Use BLIP for caption": "캡션에 BLIP 사용",
"Use checkbox to enable the extension; it will be enabled or disabled when you click apply button": "체크박스를 이용해 적용할 확장기능을 선택하세요. 변경사항은 적용 후 UI 재시작 버튼을 눌러야 적용됩니다.",
"Use checkbox to mark the extension for update; it will be updated when you click apply button": "체크박스를 이용해 업데이트할 확장기능을 선택하세요. 업데이트는 적용 후 UI 재시작 버튼을 눌러야 적용됩니다.",
+ "Use cross attention optimizations while training": "훈련 진행 시 크로스 어텐션 최적화 사용",
"Use deepbooru for caption": "캡션에 deepbooru 사용",
"Use dropout": "드롭아웃 사용",
"Use following tags to define how filenames for images are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime], [datetime], [job_timestamp]; leave empty for default.": "다음 태그들을 사용해 이미지 파일명 형식을 결정하세요 : [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime], [datetime], [job_timestamp]. 비워두면 기본값으로 설정됩니다.",
From 3178c35224467893cf8dcedb1028c59c6c23db58 Mon Sep 17 00:00:00 2001
From: AngelBottomless <35677394+aria1th@users.noreply.github.com>
Date: Wed, 2 Nov 2022 22:16:32 +0900
Subject: [PATCH 066/147] resolve conflicts
---
modules/shared.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/shared.py b/modules/shared.py
index 065b893d..959937d7 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -285,7 +285,7 @@ options_templates.update(options_section(('system', "System"), {
}))
options_templates.update(options_section(('training', "Training"), {
- "unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training hypernetwork. Saves VRAM."),
+ "unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training if possible. Saves VRAM."),
"save_optimizer_state": OptionInfo(False, "Saves Optimizer state with checkpoints. This will cause file size to increase VERY much."),
"dataset_filename_word_regex": OptionInfo("", "Filename word regex"),
"dataset_filename_join_string": OptionInfo(" ", "Filename join string"),
From 9b5f85ac83f864310fe19c9deab6670bad695b0d Mon Sep 17 00:00:00 2001
From: AngelBottomless <35677394+aria1th@users.noreply.github.com>
Date: Wed, 2 Nov 2022 22:18:04 +0900
Subject: [PATCH 067/147] first revert
---
modules/shared.py | 1 -
1 file changed, 1 deletion(-)
diff --git a/modules/shared.py b/modules/shared.py
index 959937d7..7e8c552b 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -286,7 +286,6 @@ options_templates.update(options_section(('system', "System"), {
options_templates.update(options_section(('training', "Training"), {
"unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training if possible. Saves VRAM."),
- "save_optimizer_state": OptionInfo(False, "Saves Optimizer state with checkpoints. This will cause file size to increase VERY much."),
"dataset_filename_word_regex": OptionInfo("", "Filename word regex"),
"dataset_filename_join_string": OptionInfo(" ", "Filename join string"),
"training_image_repeats_per_epoch": OptionInfo(1, "Number of repeats for a single input image per epoch; used only for displaying epoch number", gr.Number, {"precision": 0}),
From 7ea5956ad5fa925f92116e8a3bf78d7f6517b654 Mon Sep 17 00:00:00 2001
From: AngelBottomless <35677394+aria1th@users.noreply.github.com>
Date: Wed, 2 Nov 2022 22:18:55 +0900
Subject: [PATCH 068/147] now add
---
modules/shared.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/modules/shared.py b/modules/shared.py
index d8e99f85..7ecb40d8 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -309,6 +309,7 @@ options_templates.update(options_section(('system', "System"), {
options_templates.update(options_section(('training', "Training"), {
"unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training if possible. Saves VRAM."),
+ "save_optimizer_state": OptionInfo(False, "Saves Optimizer state with checkpoints. This will cause file size to increase VERY much."),
"dataset_filename_word_regex": OptionInfo("", "Filename word regex"),
"dataset_filename_join_string": OptionInfo(" ", "Filename join string"),
"training_image_repeats_per_epoch": OptionInfo(1, "Number of repeats for a single input image per epoch; used only for displaying epoch number", gr.Number, {"precision": 0}),
From e21fcd72fcf147904a1df060226c4df12acf251e Mon Sep 17 00:00:00 2001
From: evshiron
Date: Wed, 2 Nov 2022 22:37:45 +0800
Subject: [PATCH 069/147] add back png info in image api
---
modules/api/api.py | 21 +++++++++++++++++----
1 file changed, 17 insertions(+), 4 deletions(-)
diff --git a/modules/api/api.py b/modules/api/api.py
index 71c9c160..ceaf08b0 100644
--- a/modules/api/api.py
+++ b/modules/api/api.py
@@ -7,8 +7,9 @@ from fastapi import APIRouter, Depends, HTTPException
import modules.shared as shared
from modules.api.models import *
from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images
-from modules.sd_samplers import all_samplers, sample_to_image, samples_to_image_grid
+from modules.sd_samplers import all_samplers
from modules.extras import run_extras, run_pnginfo
+from PIL import PngImagePlugin
def upscaler_to_index(name: str):
@@ -31,9 +32,21 @@ def setUpscalers(req: dict):
def encode_pil_to_base64(image):
- buffer = io.BytesIO()
- image.save(buffer, format="png")
- return base64.b64encode(buffer.getvalue())
+ with io.BytesIO() as output_bytes:
+
+ # Copy any text-only metadata
+ use_metadata = False
+ metadata = PngImagePlugin.PngInfo()
+ for key, value in image.info.items():
+ if isinstance(key, str) and isinstance(value, str):
+ metadata.add_text(key, value)
+ use_metadata = True
+
+ image.save(
+ output_bytes, "PNG", pnginfo=(metadata if use_metadata else None)
+ )
+ bytes_data = output_bytes.getvalue()
+ return base64.b64encode(bytes_data)
class Api:
From a9e979977a8e3999b01b6a086bb1332ab7ab308b Mon Sep 17 00:00:00 2001
From: Artem Zagidulin
Date: Wed, 2 Nov 2022 19:05:01 +0300
Subject: [PATCH 070/147] process_one
---
modules/processing.py | 3 +++
modules/scripts.py | 16 ++++++++++++++++
2 files changed, 19 insertions(+)
diff --git a/modules/processing.py b/modules/processing.py
index 3a364b5f..72a2ee4e 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -509,6 +509,9 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
if len(prompts) == 0:
break
+ if p.scripts is not None:
+ p.scripts.process_one(p)
+
with devices.autocast():
uc = prompt_parser.get_learned_conditioning(shared.sd_model, len(prompts) * [p.negative_prompt], p.steps)
c = prompt_parser.get_multicond_learned_conditioning(shared.sd_model, prompts, p.steps)
diff --git a/modules/scripts.py b/modules/scripts.py
index 533db45c..9f82efea 100644
--- a/modules/scripts.py
+++ b/modules/scripts.py
@@ -70,6 +70,13 @@ class Script:
pass
+ def process_one(self, p, *args):
+ """
+ Same as process(), but called for every iteration
+ """
+
+ pass
+
def postprocess(self, p, processed, *args):
"""
This function is called after processing ends for AlwaysVisible scripts.
@@ -294,6 +301,15 @@ class ScriptRunner:
print(f"Error running process: {script.filename}", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
+ def process_one(self, p):
+ for script in self.alwayson_scripts:
+ try:
+ script_args = p.script_args[script.args_from:script.args_to]
+ script.process_one(p, *script_args)
+ except Exception:
+ print(f"Error running process_one: {script.filename}", file=sys.stderr)
+ print(traceback.format_exc(), file=sys.stderr)
+
def postprocess(self, p, processed):
for script in self.alwayson_scripts:
try:
From f1b6ac64e451036fb4dfabe66d79488c56c06776 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Kyu=E2=99=A5?= <3ad4gum@gmail.com>
Date: Wed, 2 Nov 2022 17:24:42 +0100
Subject: [PATCH 071/147] Added option to preview Created images on batch
completion.
---
modules/shared.py | 25 ++++++++++++++++---------
modules/ui.py | 2 +-
2 files changed, 17 insertions(+), 10 deletions(-)
diff --git a/modules/shared.py b/modules/shared.py
index d8e99f85..d4cf32a4 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -146,6 +146,9 @@ class State:
self.interrupted = True
def nextjob(self):
+ if opts.show_progress_every_n_steps == -1:
+ self.do_set_current_image()
+
self.job_no += 1
self.sampling_step = 0
self.current_image_sampling_step = 0
@@ -186,17 +189,21 @@ class State:
"""sets self.current_image from self.current_latent if enough sampling steps have been made after the last call to this"""
def set_current_image(self):
+ if self.sampling_step - self.current_image_sampling_step >= opts.show_progress_every_n_steps and opts.show_progress_every_n_steps > 0:
+ self.do_set_current_image()
+
+ def do_set_current_image(self):
if not parallel_processing_allowed:
return
+ if self.current_latent is None:
+ return
+
+ if opts.show_progress_grid:
+ self.current_image = sd_samplers.samples_to_image_grid(self.current_latent)
+ else:
+ self.current_image = sd_samplers.sample_to_image(self.current_latent)
- if self.sampling_step - self.current_image_sampling_step >= opts.show_progress_every_n_steps and self.current_latent is not None:
- if opts.show_progress_grid:
- self.current_image = sd_samplers.samples_to_image_grid(self.current_latent)
- else:
- self.current_image = sd_samplers.sample_to_image(self.current_latent)
-
- self.current_image_sampling_step = self.sampling_step
-
+ self.current_image_sampling_step = self.sampling_step
state = State()
@@ -351,7 +358,7 @@ options_templates.update(options_section(('interrogate', "Interrogate Options"),
options_templates.update(options_section(('ui', "User interface"), {
"show_progressbar": OptionInfo(True, "Show progressbar"),
- "show_progress_every_n_steps": OptionInfo(0, "Show image creation progress every N sampling steps. Set 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 32, "step": 1}),
+ "show_progress_every_n_steps": OptionInfo(0, "Show image creation progress every N sampling steps. Set to 0 to disable. Set to -1 to show after completion of batch.", gr.Slider, {"minimum": -1, "maximum": 32, "step": 1}),
"show_progress_grid": OptionInfo(True, "Show previews of all images generated in a batch as a grid"),
"return_grid": OptionInfo(True, "Show grid in results for web"),
"do_not_show_images": OptionInfo(False, "Do not show any images in results for web"),
diff --git a/modules/ui.py b/modules/ui.py
index 2609857e..29de1e10 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -276,7 +276,7 @@ def check_progress_call(id_part):
image = gr_show(False)
preview_visibility = gr_show(False)
- if opts.show_progress_every_n_steps > 0:
+ if opts.show_progress_every_n_steps != 0:
shared.state.set_current_image()
image = shared.state.current_image
From c07f1d0d7821f85b9ce1419992c118963d605bd7 Mon Sep 17 00:00:00 2001
From: DepFA <35278260+dfaker@users.noreply.github.com>
Date: Wed, 2 Nov 2022 16:59:10 +0000
Subject: [PATCH 072/147] Convert callbacks into a private map, add utility
functions for removing callbacks
---
modules/script_callbacks.py | 68 ++++++++++++++++++++++---------------
1 file changed, 41 insertions(+), 27 deletions(-)
diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py
index c28e220e..4a7fb944 100644
--- a/modules/script_callbacks.py
+++ b/modules/script_callbacks.py
@@ -46,25 +46,23 @@ class CFGDenoiserParams:
ScriptCallback = namedtuple("ScriptCallback", ["script", "callback"])
-callbacks_app_started = []
-callbacks_model_loaded = []
-callbacks_ui_tabs = []
-callbacks_ui_settings = []
-callbacks_before_image_saved = []
-callbacks_image_saved = []
-callbacks_cfg_denoiser = []
+__callback_map = dict(
+ callbacks_app_started=[],
+ callbacks_model_loaded=[],
+ callbacks_ui_tabs=[],
+ callbacks_ui_settings=[],
+ callbacks_before_image_saved=[],
+ callbacks_image_saved=[],
+ callbacks_cfg_denoiser=[]
+)
def clear_callbacks():
- callbacks_model_loaded.clear()
- callbacks_ui_tabs.clear()
- callbacks_ui_settings.clear()
- callbacks_before_image_saved.clear()
- callbacks_image_saved.clear()
- callbacks_cfg_denoiser.clear()
+ for callback_list in __callback_map.values():
+ callback_list.clear()
def app_started_callback(demo: Optional[Blocks], app: FastAPI):
- for c in callbacks_app_started:
+ for c in __callback_map['callbacks_app_started']:
try:
c.callback(demo, app)
except Exception:
@@ -72,7 +70,7 @@ def app_started_callback(demo: Optional[Blocks], app: FastAPI):
def model_loaded_callback(sd_model):
- for c in callbacks_model_loaded:
+ for c in __callback_map['callbacks_model_loaded']:
try:
c.callback(sd_model)
except Exception:
@@ -82,7 +80,7 @@ def model_loaded_callback(sd_model):
def ui_tabs_callback():
res = []
- for c in callbacks_ui_tabs:
+ for c in __callback_map['callbacks_ui_tabs']:
try:
res += c.callback() or []
except Exception:
@@ -92,7 +90,7 @@ def ui_tabs_callback():
def ui_settings_callback():
- for c in callbacks_ui_settings:
+ for c in __callback_map['callbacks_ui_settings']:
try:
c.callback()
except Exception:
@@ -100,7 +98,7 @@ def ui_settings_callback():
def before_image_saved_callback(params: ImageSaveParams):
- for c in callbacks_before_image_saved:
+ for c in __callback_map['callbacks_before_image_saved']:
try:
c.callback(params)
except Exception:
@@ -108,7 +106,7 @@ def before_image_saved_callback(params: ImageSaveParams):
def image_saved_callback(params: ImageSaveParams):
- for c in callbacks_image_saved:
+ for c in __callback_map['callbacks_image_saved']:
try:
c.callback(params)
except Exception:
@@ -116,7 +114,7 @@ def image_saved_callback(params: ImageSaveParams):
def cfg_denoiser_callback(params: CFGDenoiserParams):
- for c in callbacks_cfg_denoiser:
+ for c in __callback_map['callbacks_cfg_denoiser']:
try:
c.callback(params)
except Exception:
@@ -129,17 +127,33 @@ def add_callback(callbacks, fun):
callbacks.append(ScriptCallback(filename, fun))
+
+def remove_current_script_callbacks():
+ stack = [x for x in inspect.stack() if x.filename != __file__]
+ filename = stack[0].filename if len(stack) > 0 else 'unknown file'
+ if filename == 'unknown file':
+ return
+ for callback_list in __callback_map.values():
+ for callback_to_remove in [cb for cb in callback_list if cb.script == filename]:
+ callback_list.remove(callback_to_remove)
+
+
+def remove_callbacks_for_function(callback_func):
+ for callback_list in __callback_map.values():
+ for callback_to_remove in [cb for cb in callback_list if cb.callback == callback_func]:
+ callback_list.remove(callback_to_remove)
+
def on_app_started(callback):
"""register a function to be called when the webui started, the gradio `Block` component and
fastapi `FastAPI` object are passed as the arguments"""
- add_callback(callbacks_app_started, callback)
+ add_callback(__callback_map['callbacks_app_started'], callback)
def on_model_loaded(callback):
"""register a function to be called when the stable diffusion model is created; the model is
passed as an argument"""
- add_callback(callbacks_model_loaded, callback)
+ add_callback(__callback_map['callbacks_model_loaded'], callback)
def on_ui_tabs(callback):
@@ -152,13 +166,13 @@ def on_ui_tabs(callback):
title is tab text displayed to user in the UI
elem_id is HTML id for the tab
"""
- add_callback(callbacks_ui_tabs, callback)
+ add_callback(__callback_map['callbacks_ui_tabs'], callback)
def on_ui_settings(callback):
"""register a function to be called before UI settings are populated; add your settings
by using shared.opts.add_option(shared.OptionInfo(...)) """
- add_callback(callbacks_ui_settings, callback)
+ add_callback(__callback_map['callbacks_ui_settings'], callback)
def on_before_image_saved(callback):
@@ -166,7 +180,7 @@ def on_before_image_saved(callback):
The callback is called with one argument:
- params: ImageSaveParams - parameters the image is to be saved with. You can change fields in this object.
"""
- add_callback(callbacks_before_image_saved, callback)
+ add_callback(__callback_map['callbacks_before_image_saved'], callback)
def on_image_saved(callback):
@@ -174,7 +188,7 @@ def on_image_saved(callback):
The callback is called with one argument:
- params: ImageSaveParams - parameters the image was saved with. Changing fields in this object does nothing.
"""
- add_callback(callbacks_image_saved, callback)
+ add_callback(__callback_map['callbacks_image_saved'], callback)
def on_cfg_denoiser(callback):
@@ -182,5 +196,5 @@ def on_cfg_denoiser(callback):
The callback is called with one argument:
- params: CFGDenoiserParams - parameters to be passed to the inner model and sampling state details.
"""
- add_callback(callbacks_cfg_denoiser, callback)
+ add_callback(__callback_map['callbacks_cfg_denoiser'], callback)
From 1c9db534bd4ca7facb2b99cc3ad5bbbedf5e539e Mon Sep 17 00:00:00 2001
From: Martucci <73501718+M-art-ucci@users.noreply.github.com>
Date: Wed, 2 Nov 2022 14:35:37 -0300
Subject: [PATCH 073/147] pt_BR minor issue with lat comma
and a few translation tweaks
---
localizations/pt_BR.json | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/localizations/pt_BR.json b/localizations/pt_BR.json
index d869170a..c8ef1b11 100644
--- a/localizations/pt_BR.json
+++ b/localizations/pt_BR.json
@@ -17,7 +17,7 @@
"Checkpoint Merger": "Fusão de Checkpoint",
"Train": "Treinar",
"Settings": "Configurações",
- "Extensions": "Extensions",
+ "Extensions": "Extensões",
"Prompt": "Prompt",
"Negative prompt": "Prompt negativo",
"Run": "Executar",
@@ -57,7 +57,7 @@
"Highres. fix": "Ajuste de alta resolução",
"Firstpass width": "Primeira Passagem da largura",
"Firstpass height": "Primeira Passagem da altura",
- "Denoising strength": "Denoising strength",
+ "Denoising strength": "Força do denoise",
"Batch count": "Quantidade por lote",
"Batch size": "Quantidade de lotes",
"CFG Scale": "Escala CFG",
@@ -480,6 +480,6 @@
"This string will be used to join split words into a single line if the option above is enabled.": "Esta string será usada para unir palavras divididas em uma única linha se a opção acima estiver habilitada.",
"Only applies to inpainting models. Determines how strongly to mask off the original image for inpainting and img2img. 1.0 means fully masked, which is the default behaviour. 0.0 means a fully unmasked conditioning. Lower values will help preserve the overall composition of the image, but will struggle with large changes.": "Aplicável somente para modelos de inpaint. Determina quanto deve mascarar da imagem original para inpaint e img2img. 1.0 significa totalmente mascarado, que é o comportamento padrão. 0.0 significa uma condição totalmente não mascarada. Valores baixos ajudam a preservar a composição geral da imagem, mas vai encontrar dificuldades com grandes mudanças.",
"List of setting names, separated by commas, for settings that should go to the quick access bar at the top, rather than the usual setting tab. See modules/shared.py for setting names. Requires restarting to apply.": "Lista de nomes de configurações, separados por vírgulas, para configurações que devem ir para a barra de acesso rápido na parte superior, em vez da guia de configuração usual. Veja modules/shared.py para nomes de configuração. Necessita reinicialização para aplicar.",
- "If this values is non-zero, it will be added to seed and used to initialize RNG for noises when using samplers with Eta. You can use this to produce even more variation of images, or you can use this to match images of other software if you know what you are doing.": "Se este valor for diferente de zero, ele será adicionado à seed e usado para inicializar o RNG para ruídos ao usar amostragens com Tempo Estimado. Você pode usar isso para produzir ainda mais variações de imagens ou pode usar isso para combinar imagens de outro software se souber o que está fazendo."
+ "If this values is non-zero, it will be added to seed and used to initialize RNG for noises when using samplers with Eta. You can use this to produce even more variation of images, or you can use this to match images of other software if you know what you are doing.": "Se este valor for diferente de zero, ele será adicionado à seed e usado para inicializar o RNG para ruídos ao usar amostragens com Tempo Estimado. Você pode usar isso para produzir ainda mais variações de imagens ou pode usar isso para combinar imagens de outro software se souber o que está fazendo.",
"Leave empty for auto": "Deixar desmarcado para automático"
}
From de64146ad2fc2030a4cd3545676f9e18c93b8b18 Mon Sep 17 00:00:00 2001
From: Artem Zagidulin
Date: Wed, 2 Nov 2022 21:30:50 +0300
Subject: [PATCH 074/147] add number of itter
---
modules/processing.py | 2 +-
modules/scripts.py | 6 +++---
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/modules/processing.py b/modules/processing.py
index 72a2ee4e..17f4a5ec 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -510,7 +510,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
break
if p.scripts is not None:
- p.scripts.process_one(p)
+ p.scripts.process_one(p, n)
with devices.autocast():
uc = prompt_parser.get_learned_conditioning(shared.sd_model, len(prompts) * [p.negative_prompt], p.steps)
diff --git a/modules/scripts.py b/modules/scripts.py
index 9f82efea..7aa0d56a 100644
--- a/modules/scripts.py
+++ b/modules/scripts.py
@@ -70,7 +70,7 @@ class Script:
pass
- def process_one(self, p, *args):
+ def process_one(self, p, n, *args):
"""
Same as process(), but called for every iteration
"""
@@ -301,11 +301,11 @@ class ScriptRunner:
print(f"Error running process: {script.filename}", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
- def process_one(self, p):
+ def process_one(self, p, n):
for script in self.alwayson_scripts:
try:
script_args = p.script_args[script.args_from:script.args_to]
- script.process_one(p, *script_args)
+ script.process_one(p, n, *script_args)
except Exception:
print(f"Error running process_one: {script.filename}", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
From 32d95c1129127ed7b8d5f03b464943fb4ace4342 Mon Sep 17 00:00:00 2001
From: Riccardo Giovanetti <29801031+Harvester62@users.noreply.github.com>
Date: Wed, 2 Nov 2022 22:42:40 +0100
Subject: [PATCH 075/147] Italian localization - Additions and Updates
Updated localization with the latest version of these Scripts/Extensions:
unprompted (new script)
img2tiles
random
random grid
Some new options in the Extras and Settings have been translated too.
---
localizations/it_IT.json | 32 ++++++++++++++++++++++++--------
1 file changed, 24 insertions(+), 8 deletions(-)
diff --git a/localizations/it_IT.json b/localizations/it_IT.json
index 49489f40..de599a85 100644
--- a/localizations/it_IT.json
+++ b/localizations/it_IT.json
@@ -104,6 +104,7 @@
"Seed travel": "Interpolazione semi",
"Shift attention": "Sposta l'attenzione",
"Text to Vector Graphics": "Da testo a grafica vettoriale",
+ "Unprompted": "Unprompted",
"X/Y plot": "Grafico X/Y",
"X/Y/Z plot": "Grafico X/Y/Z",
"Dynamic Prompting v0.13.6": "Prompt dinamici v0.13.6",
@@ -195,7 +196,7 @@
"Beta distribution (VP only)": "Distribuzione Beta (Solo CV)",
"Beta min (VP only)": "Beta min (Solo CV)",
"Epsilon (VP only)": "Epsilon (Solo CV)",
- "Running in txt2img mode:": "Esecuzione in modalità txt2img:",
+ "Running in txt2img mode:": "Esecusione in modalità txt2img:",
"Render these video formats:": "Renderizza in questi formati:",
"GIF": "GIF",
"MP4": "MP4",
@@ -259,6 +260,7 @@
"Save results as video": "Salva i risultati come video",
"Frames per second": "Fotogrammi al secondo",
"Iterate seed every line": "Iterare il seme per ogni riga",
+ "Use same random seed for all lines": "Usa lo stesso seme casuale per tutte le righe",
"List of prompt inputs": "Elenco di prompt di input",
"Upload prompt inputs": "Carica un file contenente i prompt di input",
"n": "Esegui n volte",
@@ -294,6 +296,13 @@
"Transparent PNG": "PNG trasparente",
"Noise Tolerance": "Tolleranza al rumore",
"Quantize": "Quantizzare",
+ "Dry Run": "Esecuzione a vuoto (Debug)",
+ "NEW!": "NUOVO!",
+ "Premium Fantasy Card Template": "Premium Fantasy Card Template",
+ "is now available.": "è ora disponibile.",
+ "Generate a wide variety of creatures and characters in the style of a fantasy card game. Perfect for heroes, animals, monsters, and even crazy hybrids.": "Genera un'ampia varietà di creature e personaggi nello stile di un gioco di carte fantasy. Perfetto per eroi, animali, mostri e persino ibridi incredibili.",
+ "Learn More ➜": "Per saperne di più ➜",
+ "Purchases help fund the continued development of Unprompted. Thank you for your support!": "Gli acquisti aiutano a finanziare il continuo sviluppo di Unprompted. Grazie per il vostro sostegno!",
"X type": "Parametro asse X",
"Nothing": "Niente",
"Var. seed": "Seme della variazione",
@@ -424,6 +433,7 @@
"Sigma adjustment for finding noise for image": "Regolazione Sigma per trovare il rumore per l'immagine",
"Tile size": "Dimensione piastrella",
"Tile overlap": "Sovrapposizione piastrella",
+ "New seed for each tile": "Nuovo seme per ogni piastrella",
"alternate img2img imgage": "Immagine alternativa per img2img",
"interpolation values": "Valori di interpolazione",
"Refinement loops": "Cicli di affinamento",
@@ -455,8 +465,9 @@
"Will upscale the image to twice the dimensions; use width and height sliders to set tile size": "Aumenterà l'immagine al doppio delle dimensioni; utilizzare i cursori di larghezza e altezza per impostare la dimensione della piastrella",
"Upscaler": "Ampliamento immagine",
"Lanczos": "Lanczos",
+ "Nearest": "Nearest",
"LDSR": "LDSR",
- "ESRGAN_4x": "ESRGAN_4x",
+ "BSRGAN": "BSRGAN",
"ScuNET GAN": "ScuNET GAN",
"ScuNET PSNR": "ScuNET PSNR",
"SwinIR 4x": "SwinIR 4x",
@@ -808,6 +819,7 @@
"image_path": "Percorso immagine",
"mp4_path": "Percorso MP4",
"Click here after the generation to show the video": "Clicca qui dopo la generazione per mostrare il video",
+ "NOTE: If the 'Generate' button doesn't work, go in Settings and click 'Restart Gradio and Refresh...'.": "NOTA: se il pulsante 'Genera' non funziona, vai in Impostazioni e fai clic su 'Riavvia Gradio e Aggiorna...'.",
"Save Settings": "Salva le impostazioni",
"Load Settings": "Carica le impostazioni",
"Path relative to the webui folder." : "Percorso relativo alla cartella webui.",
@@ -922,8 +934,8 @@
"Renew Page": "Aggiorna la pagina",
"Number": "Numero",
"set_index": "Imposta indice",
- "load_switch": "load_switch",
- "turn_page_switch": "turn_page_switch",
+ "load_switch": "Carica",
+ "turn_page_switch": "Volta pagina",
"Checkbox": "Casella di controllo",
"Checkbox Group": "Seleziona immagini per",
"artists": "Artisti",
@@ -956,6 +968,8 @@
"Save text information about generation parameters as chunks to png files": "Salva le informazioni di testo dei parametri di generazione come blocchi nel file png",
"Create a text file next to every image with generation parameters.": "Crea un file di testo assieme a ogni immagine con i parametri di generazione.",
"Save a copy of image before doing face restoration.": "Salva una copia dell'immagine prima di eseguire il restauro dei volti.",
+ "Save a copy of image before applying highres fix.": "Salva una copia dell'immagine prima di applicare la correzione ad alta risoluzione.",
+ "Save a copy of image before applying color correction to img2img results": "Salva una copia dell'immagine prima di applicare la correzione del colore ai risultati di img2img",
"Quality for saved jpeg images": "Qualità delle immagini salvate in formato JPEG",
"If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG": "Se l'immagine PNG è più grande di 4 MB o qualsiasi dimensione è maggiore di 4000, ridimensiona e salva la copia come JPG",
"Use original name for output filename during batch process in extras tab": "Usa il nome originale per il nome del file di output durante l'elaborazione a lotti nella scheda 'Extra'",
@@ -997,12 +1011,14 @@
"Filename join string": "Stringa per unire le parole estratte dal nome del file",
"Number of repeats for a single input image per epoch; used only for displaying epoch number": "Numero di ripetizioni per una singola immagine di input per epoca; utilizzato solo per visualizzare il numero di epoca",
"Save an csv containing the loss to log directory every N steps, 0 to disable": "Salva un file CSV contenente la perdita nella cartella di registrazione ogni N passaggi, 0 per disabilitare",
+ "Use cross attention optimizations while training": "Usa le ottimizzazioni di controllo dell'attenzione incrociato durante l'allenamento",
"Stable Diffusion": "Stable Diffusion",
"Checkpoints to cache in RAM": "Checkpoint da memorizzare nella RAM",
+ "SD VAE": "SD VAE",
+ "auto": "auto",
"Hypernetwork strength": "Forza della Iperrete",
"Inpainting conditioning mask strength": "Forza della maschera di condizionamento del Inpainting",
"Apply color correction to img2img results to match original colors.": "Applica la correzione del colore ai risultati di img2img in modo che corrispondano ai colori originali.",
- "Save a copy of image before applying color correction to img2img results": "Salva una copia dell'immagine prima di applicare la correzione del colore ai risultati di img2img",
"With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising).": "Con img2img, esegue esattamente la quantità di passi specificata dalla barra di scorrimento (normalmente se ne effettuano di meno con meno riduzione del rumore).",
"Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply.": "Abilita la quantizzazione nei campionatori K per risultati più nitidi e puliti. Questo può cambiare i semi esistenti. Richiede il riavvio per applicare la modifica.",
"Emphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention": "Enfasi: utilizzare (testo) per fare in modo che il modello presti maggiore attenzione al testo e [testo] per fargli prestare meno attenzione",
@@ -1194,8 +1210,8 @@
"Hue:0": "Hue:0",
"S:0": "S:0",
"L:0": "L:0",
- "Load Canvas": "Carica Tela",
- "saveCanvas": "Salva Tela",
+ "Load Canvas": "Carica Canvas",
+ "Save Canvas": "Salva Canvas",
"latest": "aggiornato",
"behind": "da aggiornare",
"Description": "Descrizione",
@@ -1214,4 +1230,4 @@
"Calculates aesthetic score for generated images using CLIP+MLP Aesthetic Score Predictor based on Chad Scorer": "Calcola il punteggio estetico per le immagini generate utilizzando il predittore del punteggio estetico CLIP+MLP basato su Chad Scorer",
"Lets you edit captions in training datasets.": "Consente di modificare i sottotitoli nei set di dati di addestramento.",
"Time taken:": "Tempo impiegato:"
-}
+}
\ No newline at end of file
From 2ac25ea64f31fd0e7dea35d27a52f3646618c3b6 Mon Sep 17 00:00:00 2001
From: digburn
Date: Wed, 2 Nov 2022 21:52:23 +0000
Subject: [PATCH 076/147] fix: Add required parameter to API extras route
---
modules/api/models.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/modules/api/models.py b/modules/api/models.py
index 9ee42a17..9069c0ac 100644
--- a/modules/api/models.py
+++ b/modules/api/models.py
@@ -131,6 +131,7 @@ class ExtrasBaseRequest(BaseModel):
upscaler_1: str = Field(default="None", title="Main upscaler", description=f"The name of the main upscaler to use, it has to be one of this list: {' , '.join([x.name for x in sd_upscalers])}")
upscaler_2: str = Field(default="None", title="Secondary upscaler", description=f"The name of the secondary upscaler to use, it has to be one of this list: {' , '.join([x.name for x in sd_upscalers])}")
extras_upscaler_2_visibility: float = Field(default=0, title="Secondary upscaler visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of secondary upscaler, values should be between 0 and 1.")
+ upscale_first: bool = Field(default=True, title="Upscale first", description="Should the upscaler run before restoring faces?")
class ExtraBaseResponse(BaseModel):
html_info: str = Field(title="HTML info", description="A series of HTML tags containing the process info.")
From b2c08283bc92a62a66117923b0b13b9b968b6870 Mon Sep 17 00:00:00 2001
From: innovaciones
Date: Wed, 2 Nov 2022 16:14:44 -0600
Subject: [PATCH 077/147] New strings, some tweaks and fixes
---
localizations/es_ES.json | 422 ++++++++++++++++++++++++++++++++-------
1 file changed, 348 insertions(+), 74 deletions(-)
diff --git a/localizations/es_ES.json b/localizations/es_ES.json
index d9ca4ef7..9ba66c5b 100644
--- a/localizations/es_ES.json
+++ b/localizations/es_ES.json
@@ -5,18 +5,21 @@
"❮": "❮",
"❯": "❯",
"Loading...": "Cargando...",
- "view": "mostrar",
- "api": "api",
- "•": "•",
- "built with gradio": "Construido con Gradio",
+ "view": "Mostrar ",
+ "api": "API",
+ "•": " • ",
+ "Construido con Gradio": "Construido con Gradio",
"Stable Diffusion checkpoint": "Stable Diffusion checkpoint",
"txt2img": "txt2img",
"img2img": "img2img",
"Extras": "Extras",
- "PNG Info": "PNG Info",
- "Checkpoint Merger": "Checkpoint Merger",
- "Train": "Train",
+ "PNG Info": "Info PNG",
+ "Checkpoint Merger": "Fusionar Checkpoints",
+ "Train": "Entrenar",
+ "Deforum": "Deforum",
+ "Image Browser": "Navegador de Imágenes",
"Settings": "Ajustes",
+ "Extensions": "Extensiones",
"Prompt": "Prompt",
"Negative prompt": "Prompt negativo",
"Run": "Ejecutar",
@@ -27,15 +30,15 @@
"Style 2": "Estilo 2",
"Label": "Etiqueta",
"File": "Archivo",
- "Drop File Here": "Suelta el Archivo Aquí",
+ "Coloque el archivo aquí": "Suelta el archivo aquí",
"-": "-",
"o": "o",
- "Click to Upload": "Click para Cargar",
+ "Haga click para cargar": "Haz click para cargar",
"Image": "Imagen",
"Check progress": "Comprobar progreso",
"Check progress (first)": "Comprobar progreso (inicial)",
"Sampling Steps": "Sampling Steps",
- "Sampling method": "Sampling method",
+ "Sampling method": "Método de Sampling",
"Euler a": "Euler a",
"Euler": "Euler",
"LMS": "LMS",
@@ -57,33 +60,34 @@
"Firstpass width": "Ancho original",
"Firstpass height": "Alto original",
"Denoising strength": "Denoising strength",
- "Batch count": "Batch count",
- "Batch size": "Batch size",
- "CFG Scale": "CFG Scale",
+ "Batch count": "Cantidad del Batch",
+ "Batch size": "Tamaño del Batch",
+ "CFG Scale": "Escala CFG",
"Seed": "Seed",
"Extra": "Extra",
- "Variation seed": "Variation seed",
- "Variation strength": "Variation strength",
+ "Variation seed": "Seed de variación",
+ "Variation strength": "Fuerza de variación",
"Resize seed from width": "Redimensionar seed del ancho",
"Resize seed from height": "Redimensionar seed del alto",
"Script": "Script",
"None": "Ninguno",
- "Prompt matrix": "Prompt matrix",
+ "Prompt matrix": "Prompt en matriz",
"Prompts from file or textbox": "Prompts desde archivo o campo de texto",
- "X/Y plot": "X/Y plot",
+ "X/Y plot": "Tabla X/Y",
"Put variable parts at start of prompt": "Poner partes variables al inicio del prompt",
- "Show Textbox": "Mostrar Campo de texto",
- "File with inputs": "Archivo con inputs",
- "Prompts": "Prompts",
- "X type": "X type",
+ "Iterate seed every line": "Repetir seed en cada línea",
+ "Use same random seed for all lines": "Utiliza el mismo seed aleatorio para todas las líneas",
+ "List of prompt inputs": "Listado de prompts",
+ "Upload prompt inputs": "Cargar archivo de prompts",
+ "X type": "X",
"Nothing": "Nada",
"Var. seed": "Var. seed",
- "Var. strength": "Var. strength",
+ "Var. strength": "Var. fuerza",
"Steps": "Steps",
"Prompt S/R": "Prompt S/R",
"Prompt order": "Prompt order",
"Sampler": "Sampler",
- "Checkpoint name": "Checkpoint name",
+ "Checkpoint name": "Nombre Checkpoint",
"Hypernetwork": "Hypernetwork",
"Hypernet str.": "Hypernet str.",
"Sigma Churn": "Sigma Churn",
@@ -93,13 +97,13 @@
"Eta": "Eta",
"Clip skip": "Clip skip",
"Denoising": "Denoising",
- "X values": "X values",
- "Y type": "Y type",
- "Y values": "Y values",
+ "Cond. Image Mask Weight": "Cond. Image Mask Weight",
+ "X values": "Valores X",
+ "Y type": "Y",
+ "Y values": "Valores Y",
"Draw legend": "Agregar leyenda",
"Include Separate Images": "Incluir Imágenes Separadas",
"Keep -1 for seeds": "Mantener -1 para seeds",
- "Drop Image Here": "Suelta la Imagen Aquí",
"Save": "Guardar",
"Send to img2img": "Enviar a img2img",
"Send to inpaint": "Enviar a inpaint",
@@ -110,22 +114,23 @@
"Inpaint": "Inpaint",
"Batch img2img": "Batch img2img",
"Image for img2img": "Imagen para img2img",
- "Image for inpainting with mask": "Imagen para inpainting con mask",
- "Mask": "Mask",
- "Mask blur": "Mask blur",
- "Mask mode": "Mask mode",
- "Draw mask": "Dibujar mask",
- "Upload mask": "Cargar mask",
- "Masking mode": "Masking mode",
- "Inpaint masked": "Inpaint masked",
- "Inpaint not masked": "Inpaint not masked",
- "Masked content": "Masked content",
- "fill": "fill",
+ "Coloque la imagen aquí": "Suelta la imagen aquí",
+ "Image for inpainting with mask": "Imagen para inpainting con máscara",
+ "Mask": "Máscara",
+ "Mask blur": "Difuminar máscara",
+ "Mask mode": "Modo máscara",
+ "Draw mask": "Dibujar máscara",
+ "Upload mask": "Cargar máscara",
+ "Masking mode": "Modo de enmascarado",
+ "Inpaint masked": "Inpaint con enmascarado",
+ "Inpaint not masked": "Inpaint sin enmascarado",
+ "Masked content": "Contenido enmascarado",
+ "fill": "rellenar",
"original": "original",
"latent noise": "latent noise",
"latent nothing": "latent nothing",
"Inpaint at full resolution": "Inpaint a resolución completa",
- "Inpaint at full resolution padding, pixels": "Inpaint a resolución completa padding, pixeles",
+ "Inpaint at full resolution padding, pixels": "Inpaint a resolución completa con relleno, en pixeles",
"Process images in a directory on the same machine where the server is running.": "Procesa imágenes en un directorio en la misma máquina donde se ejecuta el servidor.",
"Use an empty output directory to save pictures normally instead of writing to the output directory.": "Usa un directorio de salida vacío para guardar imágenes normalmente en lugar de escribir en el directorio de salida.",
"Input directory": "Directorio de entrada",
@@ -139,15 +144,16 @@
"Outpainting mk2": "Outpainting mk2",
"Poor man's outpainting": "Poor man's outpainting",
"SD upscale": "SD upscale",
+ "Deforum-webui (use tab extension instead!)": "Deforum-webui (utiliza la extensión en su lugar!)",
"should be 2 or lower.": "debe ser 2 o menos.",
- "Override `Sampling method` to Euler?(this method is built for it)": "Anular `Sampling method` a Euler? (este método está diseñado para ello)",
- "Override `prompt` to the same value as `original prompt`?(and `negative prompt`)": "Anular `prompt` al mismo valor `prompt original`? (y `prompt negativo`)",
+ "Override `Sampling method` to Euler?(this method is built for it)": "Invalidar `Sampling method` a Euler? (este método está diseñado para ello)",
+ "Override `prompt` to the same value as `original prompt`?(and `negative prompt`)": "Invalidar `prompt` al mismo valor `prompt original`? (y `prompt negativo`)",
"Original prompt": "Prompt original",
"Original negative prompt": "Promp negativo original",
- "Override `Sampling Steps` to the same value as `Decode steps`?": "Anular `Sampling Steps` al mismo valor de `Decode steps`?",
+ "Override `Sampling Steps` to the same value as `Decode steps`?": "Invalidar `Sampling Steps` al mismo valor de `Decode steps`?",
"Decode steps": "Decode steps",
- "Override `Denoising strength` to 1?": "Anular `Denoising strength` a 1?",
- "Decode CFG scale": "Decode CFG scale",
+ "Override `Denoising strength` to 1?": "Invalidar `Denoising strength` a 1?",
+ "Decode CFG scale": "Decodificar escala CFG",
"Randomness": "Aleatoriedad",
"Sigma adjustment for finding noise for image": "Ajuste Sigma para encontrar ruido para la imagen.",
"Loops": "Loops",
@@ -165,29 +171,35 @@
"Tile overlap": "Solapar mosaicos",
"Upscaler": "Upscaler",
"Lanczos": "Lanczos",
+ "Nearest": "Nearest",
"LDSR": "LDSR",
- "SwinIR 4x": "SwinIR 4x",
+ "ESRGAN_4x": "ESRGAN_4x",
"ScuNET GAN": "ScuNET GAN",
"ScuNET PSNR": "ScuNET PSNR",
- "ESRGAN_4x": "ESRGAN_4x",
+ "SwinIR 4x": "SwinIR 4x",
+ "Deforum v0.5-webui-beta": "Deforum v0.5-webui-beta",
+ "This script is deprecated. Please use the full Deforum extension instead.": "Este script está obsoleto. Utiliza la extensión completa de Deforum en su lugar.",
+ "Update instructions:": "Instrucciones para actualizar:",
+ "github.com/deforum-art/deforum-for-automatic1111-webui/blob/automatic1111-webui/README.md": "github.com/deforum-art/deforum-for-automatic1111-webui/blob/automatic1111-webui/README.md",
+ "discord.gg/deforum": "discord.gg/deforum",
"Single Image": "Imagen Única",
"Batch Process": "Batch Process",
"Batch from Directory": "Batch desde Directorio",
"Source": "Origen",
- "Show result images": "Mostrar resultados de imágenes",
+ "Show result images": "Mostrar imágenes generadas",
"Scale by": "Escalar por",
"Scale to": "Escalar a",
"Resize": "Redimensionar",
"Crop to fit": "Recortar para ajustar",
- "Upscaler 2 visibility": "Upscaler 2 visibilidad",
- "GFPGAN visibility": "GFPGAN visibilidad",
- "CodeFormer visibility": "CodeFormer visibilidad",
- "CodeFormer weight (0 = maximum effect, 1 = minimum effect)": "CodeFormer peso (0 = efecto máximo, 1 = efecto mínimo)",
- "Open output directory": "Abrir directorio de salida",
+ "Upscaler 2 visibility": "Visibilidad Upscaler 2",
+ "GFPGAN visibility": "Visibilidad GFPGAN",
+ "CodeFormer visibility": "Visibilidad CodeFormer",
+ "CodeFormer weight (0 = maximum effect, 1 = minimum effect)": "Influencia CodeFormer (0 = efecto máximo, 1 = efecto mínimo)",
+ "Upscale Before Restoring Faces": "Escalar antes de Restaurar Rostros",
"Send to txt2img": "Enviar a txt2img",
- "A merger of the two checkpoints will be generated in your": "Se generará una fusión de los dos checkpoints en su",
- "checkpoint": "checkpoint",
- "directory.": "directorio.",
+ "A merger of the two checkpoints will be generated in your": "Se generará una fusión de los dos checkpoints en tu ",
+ "checkpoint": "directorio ",
+ "directory.": "de modelos.",
"Primary model (A)": "Modelo primario (A)",
"Secondary model (B)": "Modelo secundario (B)",
"Tertiary model (C)": "Modelo terciario (C)",
@@ -215,6 +227,36 @@
"leakyrelu": "leakyrelu",
"elu": "elu",
"swish": "swish",
+ "tanh": "tanh",
+ "sigmoid": "sigmoid",
+ "celu": "celu",
+ "gelu": "gelu",
+ "glu": "glu",
+ "hardshrink": "hardshrink",
+ "hardsigmoid": "hardsigmoid",
+ "hardtanh": "hardtanh",
+ "logsigmoid": "logsigmoid",
+ "logsoftmax": "logsoftmax",
+ "mish": "mish",
+ "prelu": "prelu",
+ "rrelu": "rrelu",
+ "relu6": "relu6",
+ "selu": "selu",
+ "silu": "silu",
+ "softmax": "softmax",
+ "softmax2d": "softmax2d",
+ "softmin": "softmin",
+ "softplus": "softplus",
+ "softshrink": "softshrink",
+ "softsign": "softsign",
+ "tanhshrink": "tanhshrink",
+ "threshold": "threshold",
+ "Select Layer weights initialization. relu-like - Kaiming, sigmoid-like - Xavier is recommended": "Seleccionar inicialización de modelos de capa. relu-like - Kaiming, sigmoid-like - Xavier es el recomendado",
+ "Normal": "Normal",
+ "KaimingUniform": "KaimingUniform",
+ "KaimingNormal": "KaimingNormal",
+ "XavierUniform": "XavierUniform",
+ "XavierNormal": "XavierNormal",
"Add layer normalization": "Agregar normalización de capa",
"Use dropout": "Usar dropout",
"Overwrite Old Hypernetwork": "Sobrescribir Hypernetwork Anterior",
@@ -226,11 +268,16 @@
"prepend": "anteponer",
"append": "añadir",
"Create flipped copies": "Crear copias volteadas",
- "Split oversized images": "Dividir imágenes grandes",
- "Use BLIP for caption": "Usar BLIP para subtítulo",
- "Use deepbooru for caption": "Usar deepbooru para subtítulo",
- "Split image threshold": "Threshold imagen dividida",
- "Split image overlap ratio": "Overlap ratio imagen dividida",
+ "Split oversized images": "Dividir imágenes muy grandes",
+ "Auto focal point crop": "Recorte de punto focal automático",
+ "Use BLIP for caption": "Usar BLIP para leyenda",
+ "Use deepbooru for caption": "Usar deepbooru para leyenda",
+ "Split image threshold": "Umbral en imagen dividida",
+ "Split image overlap ratio": "Relación de superposición en imagen dividida",
+ "Focal point face weight": "Peso de la cara del punto focal",
+ "Focal point entropy weight": "Focal point entropy weight",
+ "Focal point edges weight": "Focal point edges weight",
+ "Create debug image": "Crear imagen de depuración",
"Preprocess": "Preproceso",
"Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images": "Entrenar un embedding o Hypernetwork; debes especificar un directorio con un conjunto de imágenes con una proporción de 1:1 ",
"[wiki]": "[wiki]",
@@ -245,13 +292,194 @@
"Save a copy of embedding to log directory every N steps, 0 to disable": "Guarda una copia de embedding en el directorio log cada N pasos, 0 para deshabilitar",
"Save images with embedding in PNG chunks": "Guarda imágenes con embedding en fragmentos PNG",
"Read parameters (prompt, etc...) from txt2img tab when making previews": "Leer parámetros (prompt, etc...) desde la pestaña txt2img al hacer vistas previas",
- "Train Hypernetwork": "Train Hypernetwork",
- "Train Embedding": "Train Embedding",
+ "Train Hypernetwork": "Entrenar Hypernetwork",
+ "Train Embedding": "Entrenar Embedding",
+ "Info and links": "Información y enlaces",
+ "▼": "▼",
+ "Made by deforum.github.io, port for AUTOMATIC1111's webui maintained by kabachuha": "Desarrolado por deforum.github.io, port para AUTOMATIC1111's webui mantenido por kabachuha",
+ "Original Deforum Github repo github.com/deforum/stable-diffusion": "Deforum Github repo github.com/deforum/stable-diffusion",
+ "This fork for auto1111's webui github.com/deforum-art/deforum-for-automatic1111-webui": "Fork para auto1111's webui github.com/deforum-art/deforum-for-automatic1111-webui",
+ "Join the official Deforum Discord discord.gg/deforum to share your creations and suggestions": "Únete al Discord oficial Deforum discord.gg/deforum para compartir tus creaciones y sugerencias",
+ "User guide for v0.5 docs.google.com/document/d/1pEobUknMFMkn8F5TMsv8qRzamXX_75BShMMXV8IFslI/edit": "Guía de usuario v0.5 docs.google.com/document/d/1pEobUknMFMkn8F5TMsv8qRzamXX_75BShMMXV8IFslI/edit",
+ "Math keyframing explanation docs.google.com/document/d/1pfW1PwbDIuW0cv-dnuyYj1UzPqe23BlSLTJsqazffXM/edit?usp=sharing": "Explicación de fotogramas matemáticos docs.google.com/document/d/1pfW1PwbDIuW0cv-dnuyYj1UzPqe23BlSLTJsqazffXM/edit?usp=sharing",
+ "Keyframes": "Keyframes",
+ "Prompts": "Prompts",
+ "Init": "Init",
+ "Video output": "Salida de vídeo",
+ "Run settings": "Ejecutar ajustes",
+ "Import settings from file": "Importar ajustes desde archivo",
+ "Override settings": "Invalidar ajustes",
+ "Custom settings file": "Archivo de ajustes personalizados",
+ "Sampling settings": "Ajustes de sampling",
+ "override_these_with_webui": "override_these_with_webui",
+ "W": "Ancho",
+ "H": "Alto",
+ "seed": "seed",
+ "sampler": "sampler",
+ "Enable extras": "Activar extras",
+ "subseed": "subseed",
+ "subseed_strength": "subseed_strength",
+ "steps": "steps",
+ "ddim_eta": "ddim_eta",
+ "n_batch": "n_batch",
+ "make_grid": "make_grid",
+ "grid_rows": "grid_rows",
+ "save_settings": "save_settings",
+ "save_samples": "save_samples",
+ "display_samples": "display_samples",
+ "save_sample_per_step": "save_sample_per_step",
+ "show_sample_per_step": "show_sample_per_step",
+ "Batch settings": "Ajustes de batch",
+ "batch_name": "batch_name",
+ "filename_format": "filename_format",
+ "seed_behavior": "seed_behavior",
+ "iter": "iter",
+ "fixed": "fixed",
+ "random": "random",
+ "schedule": "schedule",
+ "Animation settings": "Ajustes de animación",
+ "animation_mode": "animation_mode",
+ "2D": "2D",
+ "3D": "3D",
+ "Video Input": "Entrada de Video",
+ "max_frames": "max_frames",
+ "border": "border",
+ "replicate": "replicate",
+ "wrap": "wrap",
+ "Motion parameters:": "Parámetros de movimiento:",
+ "2D and 3D settings": "Ajustes 2D y 3D",
+ "angle": "angle",
+ "zoom": "zoom",
+ "translation_x": "translation_x",
+ "translation_y": "translation_y",
+ "3D settings": "Ajustes 3D",
+ "translation_z": "translation_z",
+ "rotation_3d_x": "rotation_3d_x",
+ "rotation_3d_y": "rotation_3d_y",
+ "rotation_3d_z": "rotation_3d_z",
+ "Prespective flip — Low VRAM pseudo-3D mode:": "Prespective flip — Modo Low VRAM pseudo-3D:",
+ "flip_2d_perspective": "flip_2d_perspective",
+ "perspective_flip_theta": "perspective_flip_theta",
+ "perspective_flip_phi": "perspective_flip_phi",
+ "perspective_flip_gamma": "perspective_flip_gamma",
+ "perspective_flip_fv": "perspective_flip_fv",
+ "Generation settings:": "Ajustes de generación:",
+ "noise_schedule": "noise_schedule",
+ "strength_schedule": "strength_schedule",
+ "contrast_schedule": "contrast_schedule",
+ "cfg_scale_schedule": "cfg_scale_schedule",
+ "3D Fov settings:": "Ajustes 3D Fov:",
+ "fov_schedule": "fov_schedule",
+ "near_schedule": "near_schedule",
+ "far_schedule": "far_schedule",
+ "To enable seed schedule select seed behavior — 'schedule'": "Para habilitar el seed schedule, selecciona el comportamiento del seed — 'schedule'",
+ "seed_schedule": "seed_schedule",
+ "Coherence:": "Coherencia:",
+ "color_coherence": "color_coherence",
+ "Match Frame 0 HSV": "Match Frame 0 HSV",
+ "Match Frame 0 LAB": "Match Frame 0 LAB",
+ "Match Frame 0 RGB": "Match Frame 0 RGB",
+ "diffusion_cadence": "diffusion_cadence",
+ "3D Depth Warping:": "3D Depth Warping:",
+ "use_depth_warping": "use_depth_warping",
+ "midas_weight": "midas_weight",
+ "near_plane": "near_plane",
+ "far_plane": "far_plane",
+ "fov": "fov",
+ "padding_mode": "padding_mode",
+ "reflection": "reflection",
+ "zeros": "zeros",
+ "sampling_mode": "sampling_mode",
+ "bicubic": "bicubic",
+ "bilinear": "bilinear",
+ "nearest": "nearest",
+ "save_depth_maps": "save_depth_maps",
+ "`animation_mode: None` batches on list of *prompts*. (Batch mode disabled atm, only animation_prompts are working)": "`animation_mode: None` batches en lista de *prompts*. (Modo batch deshabilitado por el momento, solamente animation_prompts esta funcionando)",
+ "*Important change from vanilla Deforum!*": "*Cambios importantes en Deforum!*",
+ "This script uses the built-in webui weighting settings.": "Este script utiliza la configuración de pesos integrados.",
+ "So if you want to use math functions as prompt weights,": "Entonces, si deseas usar funciones matemáticas con pesos en los prompts,",
+ "keep the values above zero in both parts": "manten los valores por encima de cero en ambas partes",
+ "Negative prompt part can be specified with --neg": "La parte de prompt negativo se puede especificar utilizando --neg",
+ "batch_prompts (disabled atm)": "batch_prompts (deshabilitado por el momento)",
+ "animation_prompts": "animation_prompts",
+ "Init settings": "Ajustes Init",
+ "use_init": "use_init",
+ "from_img2img_instead_of_link": "from_img2img_instead_of_link",
+ "strength_0_no_init": "strength_0_no_init",
+ "strength": "strength",
+ "init_image": "init_image",
+ "use_mask": "use_mask",
+ "use_alpha_as_mask": "use_alpha_as_mask",
+ "invert_mask": "invert_mask",
+ "overlay_mask": "overlay_mask",
+ "mask_file": "mask_file",
+ "mask_brightness_adjust": "mask_brightness_adjust",
+ "mask_overlay_blur": "mask_overlay_blur",
+ "Video Input:": "Entrada de Video:",
+ "video_init_path": "video_init_path",
+ "extract_nth_frame": "extract_nth_frame",
+ "overwrite_extracted_frames": "overwrite_extracted_frames",
+ "use_mask_video": "use_mask_video",
+ "video_mask_path": "video_mask_path",
+ "Interpolation (turned off atm)": "Interpolación (apagado por el momento)",
+ "interpolate_key_frames": "interpolate_key_frames",
+ "interpolate_x_frames": "interpolate_x_frames",
+ "Resume animation:": "Reanudar animación:",
+ "resume_from_timestring": "resume_from_timestring",
+ "resume_timestring": "resume_timestring",
+ "Video output settings": "Ajustes video de salida",
+ "skip_video_for_run_all": "skip_video_for_run_all",
+ "fps": "fps",
+ "output_format": "output_format",
+ "PIL gif": "PIL gif",
+ "FFMPEG mp4": "FFMPEG mp4",
+ "ffmpeg_location": "ffmpeg_location",
+ "add_soundtrack": "add_soundtrack",
+ "soundtrack_path": "soundtrack_path",
+ "use_manual_settings": "use_manual_settings",
+ "render_steps": "render_steps",
+ "max_video_frames": "max_video_frames",
+ "path_name_modifier": "path_name_modifier",
+ "x0_pred": "x0_pred",
+ "x": "x",
+ "image_path": "image_path",
+ "mp4_path": "mp4_path",
+ "Click here after the generation to show the video": "Haz click aquí después de la generación para mostrar el video",
+ "NOTE: If the 'Generate' button doesn't work, go in Settings and click 'Restart Gradio and Refresh...'.": "NOTA: Si el botón 'Generar' no funciona, ve a los Ajustes y presiona 'Reinciar Gradio y Refrescar...'.",
+ "Save Settings": "Guardar Ajustes",
+ "Load Settings": "Cargar Ajustes",
+ "Path relative to the webui folder": "Ruta relativa al folder principal",
+ "Save Video Settings": "Guardar Ajustes de Video",
+ "Load Video Settings": "Cargar Ajustes de Video",
+ "Favorites": "Favoritos",
+ "Others": "Otros",
+ "Images directory": "Directorio de Imágenes",
+ "Dropdown": "Menú desplegable",
+ "First Page": "Primera Página",
+ "Prev Page": "Página Anterior",
+ "Page Index": "Índice de Página",
+ "Next Page": "Página Siguiente",
+ "End Page": "Última Página",
+ "delete next": "eliminar siguiente",
+ "Delete": "Eliminar",
+ "sort by": "ordenar por",
+ "path name": "nombre de ruta",
+ "date": "fecha",
+ "keyword": "palabra clave",
+ "Generate Info": "Generar Info",
+ "File Name": "Nombre de Archivo",
+ "Move to favorites": "Mover a favoritos",
+ "Renew Page": "Recargar Página",
+ "Number": "Número",
+ "set_index": "set_index",
+ "load_switch": "load_switch",
+ "turn_page_switch": "turn_page_switch",
+ "Checkbox": "Checkbox",
"Apply settings": "Aplicar ajustes",
"Saving images/grids": "Guardar imágenes/grids",
"Always save all generated images": "Siempre guardar imágenes generadas",
"File format for images": "Formato de archivo para imágenes",
- "Images filename pattern": "Patrón nombre archivo imágenes",
+ "Images filename pattern": "Patrón en nombre archivo imágenes",
"Add number to filename when saving": "Agregar número al nombre de archivo al guardar",
"Always save all generated image grids": "Siempre guardar grids de imágenes generadas",
"File format for grids": "Formato de archivo para grids",
@@ -262,6 +490,8 @@
"Save text information about generation parameters as chunks to png files": "Guardar información de texto sobre parámetros de generación como fragmentos en archivos png",
"Create a text file next to every image with generation parameters.": "Crear un archivo de texto junto a cada imagen con parámetros de generación.",
"Save a copy of image before doing face restoration.": "Guardar una copia de la imagen antes de restaurar rostro.",
+ "Save a copy of image before applying highres fix.": "Guardar una copia de la imagen antes de aplicar highres fix.",
+ "Save a copy of image before applying color correction to img2img results": "Guarda una copia de la imagen antes de aplicar la corrección de color a los resultados de img2img",
"Quality for saved jpeg images": "Calidad para imágenes jpeg guardadas",
"If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG": "Si la imagen PNG es más grande de 4 MB o cualquier dimensión es más grande que 4000, reduce la escala y guarda la copia como JPG",
"Use original name for output filename during batch process in extras tab": "Use el nombre original para el nombre del archivo de salida durante el batch process en la pestaña extras",
@@ -291,23 +521,26 @@
"Upscaler for img2img": "Upscaler para img2img",
"Upscale latent space image when doing hires. fix": "Upscale latent space al aplicar hires. fix",
"Face restoration": "Restauración de rostro",
- "CodeFormer weight parameter; 0 = maximum effect; 1 = minimum effect": "CodeFormer parámetro peso; 0 = máximo efecto; 1 = mínimo efecto",
+ "CodeFormer weight parameter; 0 = maximum effect; 1 = minimum effect": "Parámetro influencia CodeFormer; 0 = máximo efecto; 1 = mínimo efecto",
"Move face restoration model from VRAM into RAM after processing": "Mover modelo de restauración de rostro del VRAM al RAM después de procesar",
"System": "Sistema",
"VRAM usage polls per second during generation. Set to 0 to disable.": "Sondeos de uso de VRAM por segundo durante la generación. Establecer en 0 para deshabilitar.",
"Always print all generation info to standard output": "Imprime siempre toda la información de generación en la salida estándar",
"Add a second progress bar to the console that shows progress for an entire job.": "Agrega una segunda barra de progreso a la consola que muestra el progreso de un trabajo completo.",
"Training": "Entrenamiento",
- "Move VAE and CLIP to RAM when training hypernetwork. Saves VRAM.": "Mover VAE y CLIP al RAM al entrenar un hypernetwork. Ahorra VRAM.",
+ "Move VAE and CLIP to RAM when training if possible. Saves VRAM.": "Mover VAE y CLIP al RAM al entrenar cuando sea posible. Ahorra VRAM.",
"Filename word regex": "Filename word regex",
"Filename join string": "Filename join string",
"Number of repeats for a single input image per epoch; used only for displaying epoch number": "Número de repeticiones para una sola imagen de entrada por epoch; utilizado solo para mostrar el número epoch",
"Save an csv containing the loss to log directory every N steps, 0 to disable": "Guarda un csv que contenga la pérdida en el directorio log cada N pasos, 0 para deshabilitar",
+ "Use cross attention optimizations while training": "Utiliza optimizaciones de atención cruzada durante el entrenamiento",
"Stable Diffusion": "Stable Diffusion",
"Checkpoints to cache in RAM": "Checkpoints al cache en RAM",
+ "SD VAE": "SD VAE",
+ "auto": "auto",
"Hypernetwork strength": "Hypernetwork strength",
+ "Inpainting conditioning mask strength": "Fuerza de la máscara en acondicionamiento Inpainting",
"Apply color correction to img2img results to match original colors.": "Aplica la corrección de color a los resultados de img2img para que coincidan con los colores originales.",
- "Save a copy of image before applying color correction to img2img results": "Guarda una copia de la imagen antes de aplicar la corrección de color a los resultados de img2img",
"With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising).": "Con img2img, hace exactamente la cantidad de pasos que especifica el slider (normalmente haría menos con menos eliminación de ruido).",
"Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply.": "Habilita la cuantificación en K samplers para obtener resultados más nítidos y limpios. Esto puede cambiar los seeds existentes. Requiere reiniciar para aplicar.",
"Emphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention": "Énfasis: utiliza (texto) para que el modelo preste más atención al texto y [texto] para que preste menos atención",
@@ -324,7 +557,7 @@
"Interrogate: minimum description length (excluding artists, etc..)": "Interrogar: longitud mínima de la descripción (excluyendo artistas, etc.)",
"Interrogate: maximum description length": "Interrogar: longitud máxima de la descripción",
"CLIP: maximum number of lines in text file (0 = No limit)": "CLIP: número máximo de líneas en el archivo de texto (0 = Sin límite)",
- "Interrogate: deepbooru score threshold": "Interrogar: deepbooru score threshold",
+ "Interrogate: deepbooru score threshold": "Interrogar: deepbooru umbral de puntuación",
"Interrogate: deepbooru sort alphabetically": "Interrogar: deepbooru ordenar alfabéticamente",
"use spaces for tags in deepbooru": "usar espacios para etiquetas en deepbooru",
"escape (\\) brackets in deepbooru (so they are used as literal brackets and not for emphasis)": "corchetes de escape (\\) en deepbooru (por lo que se usan como corchetes literales y no para enfatizar)",
@@ -337,13 +570,26 @@
"Add model hash to generation information": "Agregar hash de modelo a la información de generación",
"Add model name to generation information": "Agregar nombre de modelo a la información de generación",
"When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint.": "Al leer los parámetros de generación del texto en la interfaz de usuario (desde PNG Info o texto pegado), no cambia el modelo/checkpoint seleccionado.",
+ "Send seed when sending prompt or image to other interface": "Enviar seed cuando se envíe el prompt o imagen a otra interfase",
"Font for image grids that have text": "Tipografía para grids de imágenes que tienen texto",
"Enable full page image viewer": "Habilitar visor de imágenes de página completa",
"Show images zoomed in by default in full page image viewer": "Mostrar imágenes ampliadas de forma predeterminada en el visor de imágenes de página completa",
"Show generation progress in window title.": "Muestra el progreso de la generación en el título de la ventana del navegador.",
"Quicksettings list": "Lista de ajustes rápidos",
"Localization (requires restart)": "Traducción (requiere reiniciar)",
- "Sampler parameters": "Parámetros sampler",
+ "fr_FR": "fr_FR",
+ "tr_TR": "tr_TR",
+ "it_IT": "it_IT",
+ "de_DE": "de_DE",
+ "ru_RU": "ru_RU",
+ "ja_JP": "ja_JP",
+ "es_ES": "es_ES",
+ "ko_KR": "ko_KR",
+ "zh_TW": "zh_TW",
+ "zh_CN": "zh_CN",
+ "ar_AR": "ar_AR",
+ "pt_BR": "pt_BR",
+ "Sampler parameters": "Parámetros del sampler",
"Hide samplers in user interface (requires restart)": "Ocultar samplers en interfaz de usuario (requiere reiniciar)",
"eta (noise multiplier) for DDIM": "eta (noise multiplier) para DDIM",
"eta (noise multiplier) for ancestral samplers": "eta (noise multiplier) para ancestral samplers",
@@ -354,10 +600,35 @@
"sigma tmin": "sigma tmin",
"sigma noise": "sigma noise",
"Eta noise seed delta": "Eta noise seed delta",
+ "Images Browser": "Navegador de Imágenes",
+ "Preload images at startup": "Precargar imágenes al iniciar",
+ "Number of columns on the page": "Número de columnas en la página",
+ "Number of rows on the page": "Número de filas en la página",
+ "Minimum number of pages per load": "Número mínimo de páginas por carga",
"Request browser notifications": "Solicitar notificaciones del navegador",
"Download localization template": "Descargar plantilla de traducción",
"Reload custom script bodies (No ui updates, No restart)": "Recargar custom script bodies (Sin actualizar UI, Sin reiniciar)",
"Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)": "Reiniciar Gradio y Refrescar componentes (Custom Scripts, ui.py, js y css)",
+ "Installed": "Instaladas",
+ "Available": "Disponibles",
+ "Install from URL": "Instalar desde URL",
+ "Apply and restart UI": "Aplicar y reiniciar UI",
+ "Check for updates": "Buscar actualizaciones",
+ "Extension": "Extensión",
+ "URL": "URL",
+ "Update": "Actualizar",
+ "deforum-for-automatic1111-webui": "deforum-for-automatic1111-webui",
+ "https://github.com/deforum-art/deforum-for-automatic1111-webui": "https://github.com/deforum-art/deforum-for-automatic1111-webui",
+ "unknown": "desconocido",
+ "stable-diffusion-webui-images-browser": "stable-diffusion-webui-images-browser",
+ "https://github.com/yfszzx/stable-diffusion-webui-images-browser": "https://github.com/yfszzx/stable-diffusion-webui-images-browser",
+ "Load from:": "Cargar desde:",
+ "Extension index URL": "URL índice de extensiones",
+ "URL for extension's git repository": "URL repositorio git de extensión",
+ "Local directory name": "Nombre directorio local",
+ "Install": "Instalar",
+ "Ver": "Ver",
+ "Entrenar un embedding o Hypernetwork; debes especificar un directorio con un conjunto de imágenes con una proporción de 1:1": "Entrenar un embedding o Hypernetwork; debes especificar un directorio con un conjunto de imágenes con una proporción de 1:1",
"Prompt (press Ctrl+Enter or Alt+Enter to generate)": "Prompt (presiona Ctrl+Enter o Alt+Enter para generar)",
"Negative prompt (press Ctrl+Enter or Alt+Enter to generate)": "Prompt negativo (presiona Ctrl+Enter o Alt+Enter para generar)",
"Add a random artist to the prompt.": "Agregar un artista aleatorio al prompt.",
@@ -387,16 +658,16 @@
"Separate values for Y axis using commas.": "Separar valores para Y usando comas.",
"Write image to a directory (default - log/images) and generation parameters into csv file.": "Escribe la imagen en un directorio (predeterminado: log/images) y los parámetros de generación en el archivo csv.",
"Open images output directory": "Abrir directorio de imágenes",
- "How much to blur the mask before processing, in pixels.": "Cuánto blur al mask antes de procesar, en píxeles.",
- "What to put inside the masked area before processing it with Stable Diffusion.": "Qué poner dentro del área con mask antes de procesarlo con Stable Diffusion.",
+ "How much to blur the mask before processing, in pixels.": "Cuánto difuminado a la máscara antes de procesarla, en píxeles.",
+ "What to put inside the masked area before processing it with Stable Diffusion.": "Qué poner dentro del área enmascarada antes de procesarla con Stable Diffusion.",
"fill it with colors of the image": "rellenarlo con los colores de la imagen",
"keep whatever was there originally": "mantener lo que estaba allí originalmente",
"fill it with latent space noise": "rellenarlo con latent space noise",
"fill it with latent space zeroes": "rellenarlo con latent space zeroes",
- "Upscale masked region to target resolution, do inpainting, downscale back and paste into original image": "Upscale la región con mask a la resolución objetivo, vuelve a pintar, reduce la escala hacia atrás y pégala en la imagen original",
- "Resize image to target resolution. Unless height and width match, you will get incorrect aspect ratio.": "Cambia el tamaño de la imagen a la resolución destino. A menos que la altura y el ancho coincidan, obtendrás una relación de aspecto incorrecta.",
+ "Upscale masked region to target resolution, do inpainting, downscale back and paste into original image": "Escalar la región con máscara a la resolución objetivo, vuelve a pintar, reduce la escala hacia atrás y pégala en la imagen original",
+ "Resize image to target resolution. Unless height and width match, you will get incorrect aspect ratio.": "Cambia el tamaño de la imagen a la resolución objetivo. A menos que la altura y el ancho coincidan, obtendrás una relación de aspecto incorrecta.",
"Resize the image so that entirety of target resolution is filled with the image. Crop parts that stick out.": "Cambia el tamaño de la imagen para que la totalidad de la resolución destino se llene con la imagen. Recorta las partes que sobresalen.",
- "Resize the image so that entirety of image is inside target resolution. Fill empty space with image's colors.": "Cambia el tamaño de la imagen para que la totalidad de la imagen esté dentro de la resolución de destino. Rellena el espacio vacío con los colores de la imagen.",
+ "Resize the image so that entirety of image is inside target resolution. Fill empty space with image's colors.": "Cambia el tamaño de la imagen para que la totalidad de la imagen esté dentro de la resolución objetivo. Rellena el espacio vacío con los colores de la imagen.",
"How many times to repeat processing an image and using it as input for the next iteration": "Cuántas veces repetir el procesamiento de una imagen y usarla como entrada para la próxima iteración",
"In loopback mode, on each loop the denoising strength is multiplied by this value. <1 means decreasing variety so your sequence will converge on a fixed picture. >1 means increasing variety so your sequence will become more and more chaotic.": "En modo loopback, en cada bucle, la fuerza de eliminación de ruido se multiplica por este valor. <1 significa variedad decreciente, por lo que su secuencia convergerá en una imagen fija. >1 significa aumentar la variedad, por lo que su secuencia se volverá cada vez más caótica.",
"For SD upscale, how much overlap in pixels should there be between tiles. Tiles overlap so that when they are merged back into one picture, there is no clearly visible seam.": "Para SD upscale, cuánta superposición en píxeles debe haber entre mosaicos. Los mosaicos se superponen de modo que cuando se fusionan nuevamente en una imagen, no hay una unión claramente visible.",
@@ -407,12 +678,15 @@
"1st and last digit must be 1. ex:'1, 2, 1'": "Primer y último dígito debe ser 1. ej:'1, 2, 1'",
"Path to directory with input images": "Ruta al directorio con imágenes de entrada",
"Path to directory where to write outputs": "Ruta al directorio donde escribir salidas",
+ "Input images directory": "Directorio de imágenes de entrada",
"Use following tags to define how filenames for images are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime], [datetime], [job_timestamp]; leave empty for default.": "Usa las siguientes etiquetas para definir cómo se eligen los nombres de archivo para las imágenes: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime], [datetime], [job_timestamp]; dejar vacío para utilizar predeterminados.",
"If this option is enabled, watermark will not be added to created images. Warning: if you do not add watermark, you may be behaving in an unethical manner.": "Si esta opción está habilitada, el watermark no se agregará a las imágenes creadas. Advertencia: si no agregas un watermark, es posible que te estés comportando de manera poco ética.",
"Use following tags to define how subdirectories for images and grids are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime], [datetime], [job_timestamp]; leave empty for default.": "Usa las siguiente etiquetas para definir cómo los subdirectorios para imágenes y grids son seleccionados: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime], [datetime], [job_timestamp]; dejar vacío para utilizar predeterminados.",
"Restore low quality faces using GFPGAN neural network": "Restaurar rostros de baja calidad utilizando GFPGAN neural network",
"This regular expression will be used extract words from filename, and they will be joined using the option below into label text used for training. Leave empty to keep filename text as it is.": "Esta expresión regular se usará para extraer palabras del nombre de archivo y se unirán usando la opción a continuación en el texto de la etiqueta que se usa para el entrenamiento. Dejar vacío para mantener el texto del nombre de archivo tal como está.",
"This string will be used to join split words into a single line if the option above is enabled.": "Esta cadena se usará para unir palabras divididas en una sola línea si la opción anterior está habilitada.",
+ "Only applies to inpainting models. Determines how strongly to mask off the original image for inpainting and img2img. 1.0 means fully masked, which is the default behaviour. 0.0 means a fully unmasked conditioning. Lower values will help preserve the overall composition of the image, but will struggle with large changes.": "Solo se aplica a modelos inpainting. Determina con qué fuerza enmascarar la imagen original para inpainting en img2img. 1.0 significa totalmente enmascarado, que es el comportamiento predeterminado. 0.0 significa un condicionamiento totalmente desenmascarado. Los valores más bajos ayudarán a preservar la composición general de la imagen, pero tendrán problemas con los grandes cambios.",
"List of setting names, separated by commas, for settings that should go to the quick access bar at the top, rather than the usual setting tab. See modules/shared.py for setting names. Requires restarting to apply.": "Lista de nombres de configuración, separados por comas, para configuraciones que deben ir a la barra de acceso rápido en la parte superior, en lugar de la pestaña de configuración habitual. Ver modules/shared.py para configurar los nombres. Requiere reiniciar para aplicar.",
- "If this values is non-zero, it will be added to seed and used to initialize RNG for noises when using samplers with Eta. You can use this to produce even more variation of images, or you can use this to match images of other software if you know what you are doing.": "Si este valor no es cero, se agregará al seed y se usará para inicializar RNG para ruidos cuando se usan samplers con Eta. Puedes usar esto para producir aún más variaciones de imágenes, o puedes usar esto para hacer coincidir imágenes de otro software si sabes lo que estás haciendo."
+ "If this values is non-zero, it will be added to seed and used to initialize RNG for noises when using samplers with Eta. You can use this to produce even more variation of images, or you can use this to match images of other software if you know what you are doing.": "Si este valor no es cero, se agregará al seed y se usará para inicializar RNG para ruidos cuando se usan samplers con Eta. Puedes usar esto para producir aún más variaciones de imágenes, o puedes usar esto para hacer coincidir imágenes de otro software si sabes lo que estás haciendo.",
+ "Leave empty for auto": "Dejar vacío para automático"
}
From 17315499eab7b3e5d6f36f0de5ef3bfc09230d46 Mon Sep 17 00:00:00 2001
From: Riccardo Giovanetti <29801031+Harvester62@users.noreply.github.com>
Date: Wed, 2 Nov 2022 23:15:17 +0100
Subject: [PATCH 078/147] Italian localization - Additions and Updates (fix
typos)
Updated localization with the latest version of these Scripts/Extensions:
unprompted (new)
img2tiles
random
random grid
Some new options in the Extras and Settings have been translated too.
P.S.: I fixed a couple of typos. By mistake I uploaded this file also in the main branch of my fork and didn't know how to revert the commit. Sorry for the mess.
---
it_IT.json | 1233 ++++++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 1233 insertions(+)
create mode 100644 it_IT.json
diff --git a/it_IT.json b/it_IT.json
new file mode 100644
index 00000000..a78fd574
--- /dev/null
+++ b/it_IT.json
@@ -0,0 +1,1233 @@
+{
+ "⤡": "⤡",
+ "⊞": "⊞",
+ "×": "×",
+ "❮": "❮",
+ "❯": "❯",
+ "Loading...": "Caricamento...",
+ "view": "mostra ",
+ "api": "API",
+ "•": " • ",
+ "built with gradio": " Sviluppato con Gradio",
+ "Stable Diffusion checkpoint": "Stable Diffusion checkpoint",
+ "txt2img": "txt2img",
+ "img2img": "img2img",
+ "Extras": "Extra",
+ "PNG Info": "Info PNG",
+ "Checkpoint Merger": "Miscelatore di Checkpoint",
+ "Train": "Addestramento",
+ "Create aesthetic embedding": "Crea incorporamento estetico",
+ "Dataset Tag Editor": "Dataset Tag Editor",
+ "Deforum": "Deforum",
+ "Artists To Study": "Artisti per studiare",
+ "Image Browser": "Galleria immagini",
+ "Inspiration": "Ispirazione",
+ "Settings": "Impostazioni",
+ "Extensions": "Estensioni",
+ "Prompt": "Prompt",
+ "Negative prompt": "Prompt negativo",
+ "Run": "Esegui",
+ "Skip": "Salta",
+ "Interrupt": "Interrompi",
+ "Generate": "Genera",
+ "Style 1": "Stile 1",
+ "Style 2": "Stile 2",
+ "Label": "Etichetta",
+ "File": "File",
+ "Drop File Here": "Trascina il file qui",
+ "-": "-",
+ "or": "o",
+ "Click to Upload": "Clicca per caricare",
+ "Image": "Immagine",
+ "Check progress": "Controlla i progressi",
+ "Check progress (first)": "Controlla i progressi (primo)",
+ "Sampling Steps": "Passi di campionamento",
+ "Sampling method": "Metodo di campionamento",
+ "Euler a": "Euler a",
+ "Euler": "Euler",
+ "LMS": "LMS",
+ "Heun": "Heun",
+ "DPM2": "DPM2",
+ "DPM2 a": "DPM2 a",
+ "DPM fast": "DPM fast",
+ "DPM adaptive": "DPM adaptive",
+ "LMS Karras": "LMS Karras",
+ "DPM2 Karras": "DPM2 Karras",
+ "DPM2 a Karras": "DPM2 a Karras",
+ "DDIM": "DDIM",
+ "PLMS": "PLMS",
+ "Width": "Larghezza",
+ "Height": "Altezza",
+ "Restore faces": "Restaura i volti",
+ "Tiling": "Piastrellatura",
+ "Highres. fix": "Correzione alta risoluzione",
+ "Firstpass width": "Larghezza del primo passaggio",
+ "Firstpass height": "Altezza del primo passaggio",
+ "Denoising strength": "Forza del Denoising",
+ "Batch count": "Lotti di immagini",
+ "Batch size": "Immagini per lotto",
+ "CFG Scale": "Scala CFG",
+ "Seed": "Seme",
+ "Extra": "Extra",
+ "Variation seed": "Seme della variazione",
+ "Variation strength": "Forza della variazione",
+ "Resize seed from width": "Ridimensiona il seme dalla larghezza",
+ "Resize seed from height": "Ridimensiona il seme dall'altezza",
+ "Open for Clip Aesthetic!": "Apri per Gradienti Estetici (CLIP)",
+ "▼": "▼",
+ "Aesthetic weight": "Estetica - Peso",
+ "Aesthetic steps": "Estetica - Passi",
+ "Aesthetic learning rate": "Estetica - Tasso di apprendimento",
+ "Slerp interpolation": "Interpolazione Slerp",
+ "Aesthetic imgs embedding": "Estetica - Incorporamento di immagini",
+ "None": "Niente",
+ "Aesthetic text for imgs": "Estetica - Testo per le immagini",
+ "Slerp angle": "Angolo Slerp",
+ "Is negative text": "È un testo negativo",
+ "Script": "Script",
+ "Random grid": "Generaz. casuale (griglia)",
+ "Random": "Generaz. casuale (no griglia)",
+ "StylePile": "StylePile",
+ "Advanced prompt matrix": "Matrice di prompt avanzata",
+ "Advanced Seed Blending": "Miscelazione Semi Avanzata",
+ "Alternate Sampler Noise Schedules": "Metodi alternativi di campionamento del rumore",
+ "Animator v6": "Animator v6",
+ "Asymmetric tiling": "Piastrellatura asimmetrica",
+ "Custom code": "Codice personalizzato",
+ "Embedding to Shareable PNG": "Incorporamento convertito in PNG condivisibile",
+ "Force symmetry": "Forza la simmetria",
+ "Prompts interpolation": "Interpola Prompt",
+ "Prompt matrix": "Matrice dei prompt",
+ "Prompt morph": "Metamorfosi del prompt",
+ "Prompts from file or textbox": "Prompt da file o da casella di testo",
+ "To Infinity and Beyond": "Verso l'infinito e oltre",
+ "Seed travel": "Interpolazione semi",
+ "Shift attention": "Sposta l'attenzione",
+ "Text to Vector Graphics": "Da testo a grafica vettoriale",
+ "Unprompted": "Unprompted",
+ "X/Y plot": "Grafico X/Y",
+ "X/Y/Z plot": "Grafico X/Y/Z",
+ "Dynamic Prompting v0.13.6": "Prompt dinamici v0.13.6",
+ "Create inspiration images": "Crea immagini di ispirazione",
+ "step1 min/max": "Passi min(o max)",
+ "step2 min/max": "Passi max (o min)",
+ "step cnt": "Q.tà di Passi",
+ "cfg1 min/max": "CFG min (o max)",
+ "cfg2 min/max": "CFG max (o min)",
+ "cfg cnt": "Q.tà di CFG",
+ "Draw legend": "Disegna legenda",
+ "Include Separate Images": "Includi immagini separate",
+ "Keep -1 for seeds": "Mantieni sempre il seme a -1",
+ "x/y change": "Inverti ordine assi X/Y (Passi/CFG)",
+ "Loops": "Cicli",
+ "Focus on:": "Focus su:",
+ "No focus": "Nessun Focus",
+ "Portraits (tick Restore faces above for best results)": "Ritratti (selezionare 'Restaura volti' in alto per ottenere i migliori risultati)",
+ "Feminine and extra attractive (tick Restore faces above for best results)": "Femminile ed estremamente attraente (selezionare 'Restaura volti' per ottenere i migliori risultati)",
+ "Masculine and extra attractive (tick Restore faces above for best results)": "Maschile ed estremamente attraente (selezionare 'Restaura volti' per ottenere i migliori risultati)",
+ "Monsters": "Mostri",
+ "Robots": "Robot",
+ "Retrofuturistic": "Retrofuturistico",
+ "Propaganda": "Propaganda",
+ "Landscapes": "Paesaggi",
+ "Hints": "Suggerimenti",
+ "Image type": "Tipo di immagine",
+ "Not set": "Non impostato",
+ "Photography": "Fotografia",
+ "Digital art": "Arte digitale",
+ "3D Rendering": "3D Rendering",
+ "Painting": "Dipinto",
+ "Sketch": "Schizzo",
+ "Classic Comics": "Fumetti classici",
+ "Modern Comics": "Fumetti moderni",
+ "Manga": "Manga",
+ "Vector art": "Arte vettoriale",
+ "Visual style": "Stile visivo",
+ "Realism": "Realismo",
+ "Photorealism": "Fotorealismo",
+ "Hyperrealism": "Iperrealismo",
+ "Surrealism": "Surrealismo",
+ "Modern Art": "Arte moderna",
+ "Fauvism": "Fauvismo",
+ "Futurism": "Futurismo",
+ "Painterly": "Pittorico",
+ "Pointillisme": "Puntinismo",
+ "Abstract": "Astratto",
+ "Pop Art": "Pop Art",
+ "Impressionist": "Impressionista",
+ "Cubism": "Cubismo",
+ "Linocut": "Linoleografia",
+ "Fantasy": "Fantasia",
+ "Colors": "Colori",
+ "Chaotic": "Caotico",
+ "Primary colors": "Colori primari",
+ "Colorful": "Colorato",
+ "Vivid": "Vivido",
+ "Muted colors": "Colori tenui",
+ "Low contrast": "Basso contrasto",
+ "Desaturated": "Desaturato",
+ "Grayscale": "Scala di grigi",
+ "Black and white": "Bianco e nero",
+ "Infrared": "Infrarosso",
+ "Complementary": "Colori complementari",
+ "Non-complementary": "Colori non complementari",
+ "View": "Visuale",
+ "Tilt shift": "Tilt shift",
+ "Wide-angle": "Angolo ampio",
+ "Portrait": "Ritratto",
+ "Macro": "Macro",
+ "Microscopic": "Microscopico",
+ "Isometric": "Isometrico",
+ "Panorama": "Panorama",
+ "Aerial photograph": "Fotografia aerea",
+ "Artist focus (not quite finished, not sure it helps)": "Focus sull'artista (non del tutto finito, non è sicuro che aiuti)",
+ "B/W Photograpy": "Fotografia B/N",
+ "Portrait photo": "Foto ritratto",
+ "Usage: a wearing ": "Utilizzo: a wearing ",
+ "Seeds": "Semi",
+ "Noise Scheduler": "Pianificazione del rumore",
+ "Default": "Predefinito",
+ "Karras": "Karras",
+ "Exponential": "Esponenziale",
+ "Variance Preserving": "Conservazione della Varianza",
+ "Sigma min": "Sigma min",
+ "Sigma max": "Sigma max",
+ "Sigma rho (Karras only)": "Sigma rho (Solo Karras)",
+ "Beta distribution (VP only)": "Distribuzione Beta (Solo CV)",
+ "Beta min (VP only)": "Beta min (Solo CV)",
+ "Epsilon (VP only)": "Epsilon (Solo CV)",
+ "Running in txt2img mode:": "Esecuzione in modalità txt2img:",
+ "Render these video formats:": "Renderizza in questi formati:",
+ "GIF": "GIF",
+ "MP4": "MP4",
+ "WEBM": "WEBM",
+ "Animation Parameters": "Parametri animazione",
+ "Total Animation Length (s)": "Durata totale dell'animazione (s)",
+ "Framerate": "Frequenza dei fotogrammi",
+ "Smoothing_Frames": "Fotogrammi da appianare",
+ "Add_Noise": "Aggiungi rumore",
+ "Noise Strength": "Intensità del rumore",
+ "Initial Parameters": "Parametri iniziali",
+ "Denoising Strength": "Intensità di riduzione del rumore",
+ "Seed_March": "Seed_March",
+ "Zoom Factor (scale/s)": "Fattore di ingrandimento (scala/s)",
+ "X Pixel Shift (pixels/s)": "Sposta i Pixel sull'asse X (pixel/s)",
+ "Y Pixel Shift (pixels/s)": "Sposta i Pixel sull'asse Y (pixel/s)",
+ "Rotation (deg/s)": "Rotazione (gradi/s)",
+ "Prompt Template, applied to each keyframe below": "Modello di prompt, applicato a ciascun fotogramma chiave qui di seguito",
+ "Positive Prompts": "Prompt positivi",
+ "Negative Prompts": "Prompt negativi",
+ "Props, Stamps": "Immagini Clipart da diffondere (prop), o da applicare in post elaborazione e non essere diffuse (stamp).",
+ "Poper_Folder:": "Cartella Immagini Clipart (PNG trasparenti):",
+ "Supported Keyframes:": "Fotogrammi chiave supportati:",
+ "time_s | source | video, images, img2img | path": "time_s | source | video, images, img2img | path",
+ "time_s | prompt | positive_prompts | negative_prompts": "time_s | prompt | positive_prompts | negative_prompts",
+ "time_s | template | positive_prompts | negative_prompts": "time_s | template | positive_prompts | negative_prompts",
+ "time_s | transform | zoom | x_shift | y_shift | rotation": "time_s | transform | zoom | x_shift | y_shift | rotation",
+ "time_s | seed | new_seed_int": "time_s | seed | new_seed_int",
+ "time_s | noise | added_noise_strength": "time_s | noise | added_noise_strength",
+ "time_s | denoise | denoise_value": "time_s | denoise | denoise_value",
+ "time_s | set_text | textblock_name | text_prompt | x | y | w | h | fore_color | back_color | font_name": "time_s | set_text | textblock_name | text_prompt | x | y | w | h | fore_color | back_color | font_name",
+ "time_s | clear_text | textblock_name": "time_s | clear_text | textblock_name",
+ "time_s | prop | prop_name | prop_filename | x pos | y pos | scale | rotation": "time_s | prop | prop_name | prop_filename | x pos | y pos | scale | rotation",
+ "time_s | set_stamp | stamp_name | stamp_filename | x pos | y pos | scale | rotation": "time_s | set_stamp | stamp_name | stamp_filename | x pos | y pos | scale | rotation",
+ "time_s | clear_stamp | stamp_name": "time_s | clear_stamp | stamp_name",
+ "time_s | col_set": "time_s | col_set",
+ "time_s | col_clear": "time_s | col_clear",
+ "time_s | model | model": "time_s | model | model",
+ "img2img_mode": "Modalità img2img",
+ "Keyframes:": "Fotogrammi chiave:",
+ "Tile X": "Piastrella asse X",
+ "Tile Y": "Piastrella asse Y",
+ "Python code": "Codice Python",
+ "Source embedding to convert": "Incorporamento sorgente da convertire",
+ "Embedding token": "Token Incorporamento",
+ "Output directory": "Cartella di output",
+ "Horizontal symmetry": "Simmetria orizzontale",
+ "Vertical symmetry": "Simmetria verticale",
+ "Alt. symmetry method (blending)": "Metodo di simmetria alternativo (miscelazione)",
+ "Apply every n steps": "Applica ogni n passi",
+ "Skip last n steps": "Salta gli ultimi n passi",
+ "Interpolation prompt": "Prompt di interpolazione",
+ "Number of images": "Numero di immagini",
+ "Make a gif": "Crea GIF",
+ "Duration of images (ms)": "Durata delle immagini (ms)",
+ "Put variable parts at start of prompt": "Inserisce le parti variabili all'inizio del prompt",
+ "Keyframe Format:": "Formato dei fotogrammi chiave:",
+ "Seed | Prompt or just Prompt": "Seme | Prompt o semplicemente Prompt",
+ "Prompt list": "Elenco dei prompt",
+ "Number of images between keyframes": "Numero di immagini tra fotogrammi chiave",
+ "Save results as video": "Salva i risultati come video",
+ "Frames per second": "Fotogrammi al secondo",
+ "Iterate seed every line": "Iterare il seme per ogni riga",
+ "Use same random seed for all lines": "Usa lo stesso seme casuale per tutte le righe",
+ "List of prompt inputs": "Elenco di prompt di input",
+ "Upload prompt inputs": "Carica un file contenente i prompt di input",
+ "n": "Esegui n volte",
+ "Destination seed(s) (Comma separated)": "Seme/i di destinazione (separati da virgola)",
+ "Only use Random seeds (Unless comparing paths)": "Usa solo semi casuali (a meno che non si confrontino i percorsi)",
+ "Number of random seed(s)": "Numero di semi casuali",
+ "Compare paths (Separate travels from 1st seed to each destination)": "Confronta percorsi (transizioni separate dal primo seme a ciascuna destinazione)",
+ "Steps": "Passi",
+ "Loop back to initial seed": "Ritorna al seme iniziale",
+ "Bump seed (If > 0 do a Compare Paths but only one image. No video)": "Modula seme (se > 0 mescola il seme iniziale con quelli di destinazione ma solo un'immagine. Nessun video)",
+ "Show generated images in ui": "Mostra le immagini generate nell'interfaccia utente",
+ "\"Hug the middle\" during interpolation": "\"Hug the middle\" durante l'interpolazione. Rende l'interpolazione un po' più veloce all'inizio e alla fine. A volte può produrre video più fluidi, il più delle volte no.",
+ "Allow the default Euler a Sampling method. (Does not produce good results)": "Consenti Euler_a come metodo di campionamento predefinito. (Non produce buoni risultati)",
+ "Illustration": "Illustrazione",
+ "Logo": "Logo",
+ "Drawing": "Disegno",
+ "Artistic": "Artistico",
+ "Tattoo": "Tatuaggio",
+ "Gothic": "Gotico",
+ "Anime": "Anime",
+ "Cartoon": "Cartoon",
+ "Sticker": "Etichetta",
+ "Gold Pendant": "Ciondolo in oro",
+ "None - prompt only": "Nessuno - solo prompt",
+ "Enable Vectorizing": "Abilita vettorizzazione",
+ "Output format": "Formato di output",
+ "svg": "svg",
+ "pdf": "pdf",
+ "White is Opaque": "Il bianco è opaco",
+ "Cut white margin from input": "Taglia il margine bianco dall'input",
+ "Keep temp images": "Conserva le immagini temporanee",
+ "Threshold": "Soglia",
+ "Transparent PNG": "PNG trasparente",
+ "Noise Tolerance": "Tolleranza al rumore",
+ "Quantize": "Quantizzare",
+ "Dry Run": "Esecuzione a vuoto (Debug)",
+ "NEW!": "NUOVO!",
+ "Premium Fantasy Card Template": "Premium Fantasy Card Template",
+ "is now available.": "è ora disponibile.",
+ "Generate a wide variety of creatures and characters in the style of a fantasy card game. Perfect for heroes, animals, monsters, and even crazy hybrids.": "Genera un'ampia varietà di creature e personaggi nello stile di un gioco di carte fantasy. Perfetto per eroi, animali, mostri e persino ibridi incredibili.",
+ "Learn More ➜": "Per saperne di più ➜",
+ "Purchases help fund the continued development of Unprompted. Thank you for your support!": "Gli acquisti aiutano a finanziare il continuo sviluppo di Unprompted. Grazie per il vostro sostegno!",
+ "X type": "Parametro asse X",
+ "Nothing": "Niente",
+ "Var. seed": "Seme della variazione",
+ "Var. strength": "Forza della variazione",
+ "Prompt S/R": "Cerca e Sostituisci nel Prompt",
+ "Prompt order": "In ordine di prompt",
+ "Sampler": "Campionatore",
+ "Checkpoint name": "Nome del checkpoint",
+ "Hypernetwork": "Iperrete",
+ "Hypernet str.": "Forza della Iperrete",
+ "Sigma Churn": "Sigma Churn",
+ "Sigma noise": "Sigma noise",
+ "Eta": "ETA",
+ "Clip skip": "Salta CLIP",
+ "Denoising": "Riduzione del rumore",
+ "Cond. Image Mask Weight": "Peso maschera immagine condizionale",
+ "X values": "Valori per X",
+ "Y type": "Parametro asse Y",
+ "Y values": "Valori per Y",
+ "Z type": "Parametro asse Z",
+ "Z values": "Valori per Z",
+ "Combinatorial generation": "Generazione combinatoria",
+ "Combinatorial batches": "Lotti combinatori",
+ "Magic prompt": "Prompt magico",
+ "Fixed seed": "Seme fisso",
+ "Combinations": "Combinazioni",
+ "Choose a number of terms from a list, in this case we choose two artists": "Scegli un numero di termini da un elenco, in questo caso scegliamo due artisti",
+ "{{2$artist1|artist2|artist3}}": "{{2$artist1|artist2|artist3}}",
+ "If $ is not provided, then 1$ is assumed.\n\n A range can be provided:": "Se $ non viene fornito, si presume 1$.\n\n È possibile fornire un intervallo di valori:",
+ "{{1-3$artist1|artist2|artist3}}": "{{1-3$artist1|artist2|artist3}}",
+ "In this case, a random number of artists between 1 and 3 is chosen.": "In questo caso viene scelto un numero casuale di artisti compreso tra 1 e 3.",
+ "Wildcards": "Termini jolly",
+ "If the groups wont drop down click": "Se i gruppi non vengono visualizzati, clicca",
+ "here": "qui",
+ "to fix the issue.": "per correggere il problema.",
+ "WILDCARD_DIR: C:\\stable-diffusion-webui\\extensions\\sd-dynamic-prompts\\wildcards": "WILDCARD_DIR: C:\\stable-diffusion-webui\\extensions\\sd-dynamic-prompts\\wildcards",
+ "You can add more wildcards by creating a text file with one term per line and name is mywildcards.txt. Place it in scripts/wildcards.": "Puoi aggiungere termini jolly creando un file di testo con un termine per riga e nominandolo, per esempio, mywildcards.txt. Inseriscilo in scripts/wildcards.",
+ "__/mywildcards__": "__/mywildcards__",
+ "will then become available.": "diverrà quindi disponibile.",
+ "Artist or styles name list. '.txt' files with one name per line": "Elenco nomi di artisti o stili. File '.txt' con un nome per riga",
+ "Prompt words before artist or style name": "Parole chiave prima del nome dell'artista o dello stile",
+ "Prompt words after artist or style name": "Parole chiave dopo il nome dell'artista o dello stile",
+ "Negative Prompt": "Prompt negativo",
+ "Save": "Salva",
+ "Send to img2img": "Invia a img2img",
+ "Send to inpaint": "Invia a Inpaint",
+ "Send to extras": "Invia a Extra",
+ "Make Zip when Save?": "Crea un file ZIP quando si usa 'Salva'",
+ "Textbox": "Casella di testo",
+ "Interrogate\nCLIP": "Interroga\nCLIP",
+ "Interrogate\nDeepBooru": "Interroga\nDeepBooru",
+ "Inpaint": "Inpaint",
+ "Batch img2img": "img2img in lotti",
+ "Image for img2img": "Immagine per img2img",
+ "Drop Image Here": "Trascina l'immagine qui",
+ "Image for inpainting with mask": "Immagine per inpainting con maschera",
+ "Mask": "Maschera",
+ "Mask blur": "Sfocatura maschera",
+ "Mask mode": "Modalità maschera",
+ "Draw mask": "Disegna maschera",
+ "Upload mask": "Carica maschera",
+ "Masking mode": "Modalità mascheratura",
+ "Inpaint masked": "Inpaint mascherato",
+ "Inpaint not masked": "Inpaint non mascherato",
+ "Masked content": "Contenuto mascherato",
+ "fill": "riempi",
+ "original": "originale",
+ "latent noise": "rumore nello spazio latente",
+ "latent nothing": "nulla nello spazio latente",
+ "Inpaint at full resolution": "Inpaint alla massima risoluzione",
+ "Inpaint at full resolution padding, pixels": "Inpaint con riempimento a piena risoluzione, pixel",
+ "Process images in a directory on the same machine where the server is running.": "Elabora le immagini in una cartella sulla stessa macchina su cui è in esecuzione il server.",
+ "Use an empty output directory to save pictures normally instead of writing to the output directory.": "Usa una cartella di output vuota per salvare normalmente le immagini invece di scrivere nella cartella di output.",
+ "Input directory": "Cartella di Input",
+ "Resize mode": "Modalità di ridimensionamento",
+ "Just resize": "Ridimensiona solamente",
+ "Crop and resize": "Ritaglia e ridimensiona",
+ "Resize and fill": "Ridimensiona e riempie",
+ "Advanced loopback": "Advanced loopback",
+ "External Image Masking": "Immagine esterna per la mascheratura",
+ "img2img alternative test": "Test alternativo per img2img",
+ "img2tiles": "img2tiles",
+ "Interpolate": "Interpola immagini",
+ "Loopback": "Rielaborazione ricorsiva",
+ "Loopback and Superimpose": "Rielabora ricorsivamente e sovraimponi",
+ "Alpha Canvas": "Alpha Canvas",
+ "Outpainting mk2": "Outpainting mk2",
+ "Poor man's outpainting": "Poor man's outpainting",
+ "SD upscale": "Ampliamento SD",
+ "txt2mask v0.1.1": "txt2mask v0.1.1",
+ "[C] Video to video": "[C] Video to video",
+ "Videos": "Filmati",
+ "Deforum-webui (use tab extension instead!)": "Deforum-webui (usa piuttosto la scheda Deforum delle estensioni!)",
+ "Use first image colors (custom color correction)": "Usa i colori della prima immagine (correzione del colore personalizzata)",
+ "Denoising strength change factor (overridden if proportional used)": "Fattore di variazione dell'intensità di riduzione del rumore (sovrascritto se si usa proporzionale)",
+ "Zoom level": "Livello di Zoom",
+ "Direction X": "Direzione X",
+ "Direction Y": "Direzione Y",
+ "Denoising strength start": "Intensità di riduzione del rumore - Inizio",
+ "Denoising strength end": "Intensità di riduzione del rumore - Fine",
+ "Denoising strength proportional change starting value": "Intensità di riduzione del rumore - Valore iniziale della variazione proporzionale",
+ "Denoising strength proportional change ending value (0.1 = disabled)": "Intensità di riduzione del rumore - Valore finale della variazione proporzionale (0.1 = disabilitato)",
+ "Saturation enhancement per image": "Miglioramento della saturazione per ciascuna immagine",
+ "Use sine denoising strength variation": "Utilizzare la variazione sinusoidale dell'intensità di riduzione del rumore",
+ "Phase difference": "Differenza di Fase",
+ "Denoising strength exponentiation": "Esponenziazione dell'intensità di riduzione del rumore",
+ "Use sine zoom variation": "Usa la variazione sinusoidale dello zoom",
+ "Zoom exponentiation": "Esponeniazione dello Zoom",
+ "Use multiple prompts": "Usa prompt multipli",
+ "Same seed per prompt": "Stesso seme per ogni prompt",
+ "Same seed for everything": "Stesso seme per tutto",
+ "Original init image for everything": "Immagine originale di inizializzazione per tutto",
+ "Multiple prompts : 1 line positive, 1 line negative, leave a blank line for no negative": "Prompt multipli: 1 riga positivo, 1 riga negativo, lasciare una riga vuota per nessun negativo",
+ "Running in img2img mode:": "Esecuzione in modalità img2img:",
+ "Masking preview size": "Dimensione dell'anteprima della mascheratura",
+ "Draw new mask on every run": "Disegna una nuova maschera ad ogni esecuzione",
+ "Process non-contigious masks separately": "Elaborare le maschere non contigue separatamente",
+ "should be 2 or lower.": "dovrebbe essere 2 o inferiore.",
+ "Override `Sampling method` to Euler?(this method is built for it)": "Sovrascrivi il 'Metodo di campionamento' con Eulero? (questo metodo è stato creato per questo)",
+ "Override `prompt` to the same value as `original prompt`?(and `negative prompt`)": "Sovrascrivi `prompt` con lo stesso valore del `prompt originale`? (e `prompt negativo`)",
+ "Original prompt": "Prompt originale",
+ "Original negative prompt": "Prompt negativo originale",
+ "Override `Sampling Steps` to the same val due as `Decode steps`?": "Sovrascrivere 'Passi di campionamento' allo stesso valore di 'Passi di decodifica'?",
+ "Decode steps": "Passi di decodifica",
+ "Override `Denoising strength` to 1?": "Sostituisci 'Forza di denoising' a 1?",
+ "Decode CFG scale": "Scala CFG di decodifica",
+ "Randomness": "Casualità",
+ "Sigma adjustment for finding noise for image": "Regolazione Sigma per trovare il rumore per l'immagine",
+ "Tile size": "Dimensione piastrella",
+ "Tile overlap": "Sovrapposizione piastrella",
+ "New seed for each tile": "Nuovo seme per ogni piastrella",
+ "alternate img2img imgage": "Immagine alternativa per img2img",
+ "interpolation values": "Valori di interpolazione",
+ "Refinement loops": "Cicli di affinamento",
+ "Loopback alpha": "Trasparenza rielaborazione ricorsiva",
+ "Border alpha": "Trasparenza del bordo",
+ "Blending strides": "Passi di fusione",
+ "Reuse Seed": "Riusa il seme",
+ "One grid": "Singola griglia",
+ "Interpolate VarSeed": "Interpola il seme della variazione",
+ "Paste on mask": "Incolla sulla maschera",
+ "Inpaint all": "Inpaint tutto",
+ "Interpolate in latent": "Interpola nello spazio latente",
+ "Denoising strength change factor": "Fattore di variazione dell'intensità di denoising",
+ "Superimpose alpha": "Sovrapporre Alpha",
+ "Show extra settings": "Mostra impostazioni aggiuntive",
+ "Reuse seed": "Riusa il seme",
+ "CFG decay factor": "Fattore di decadimento CFG",
+ "CFG target": "CFG di destinazione",
+ "Show/Hide AlphaCanvas": "Mostra/Nascondi AlphaCanvas",
+ "Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8": "Impostazioni consigliate: Passi di campionamento: 80-100, Campionatore: Euler a, Intensità denoising: 0.8",
+ "Pixels to expand": "Pixel da espandere",
+ "Outpainting direction": "Direzione di Outpainting",
+ "left": "sinistra",
+ "right": "destra",
+ "up": "sopra",
+ "down": "sotto",
+ "Fall-off exponent (lower=higher detail)": "Esponente di decremento (più basso=maggior dettaglio)",
+ "Color variation": "Variazione di colore",
+ "Will upscale the image to twice the dimensions; use width and height sliders to set tile size": "Aumenterà l'immagine al doppio delle dimensioni; utilizzare i cursori di larghezza e altezza per impostare la dimensione della piastrella",
+ "Upscaler": "Ampliamento immagine",
+ "Lanczos": "Lanczos",
+ "Nearest": "Nearest",
+ "LDSR": "LDSR",
+ "BSRGAN": "BSRGAN",
+ "ScuNET GAN": "ScuNET GAN",
+ "ScuNET PSNR": "ScuNET PSNR",
+ "SwinIR 4x": "SwinIR 4x",
+ "Mask prompt": "Prompt maschera",
+ "Negative mask prompt": "Prompt maschera negativa",
+ "Mask precision": "Precisione della maschera",
+ "Mask padding": "Estendi i bordi della maschera",
+ "Brush mask mode": "Modalità pennello maschera",
+ "discard": "Scarta",
+ "add": "Aggiungi",
+ "subtract": "Sottrai",
+ "Show mask in output?": "Mostra maschera in uscita?",
+ "If you like my work, please consider showing your support on": "Se ti piace il mio lavoro, per favore considera di mostrare il tuo supporto su ",
+ "Patreon": "Patreon",
+ "Input file path": "Percorso file di input",
+ "CRF (quality, less is better, x264 param)": "CRF (qualità, meno è meglio, x264 param)",
+ "FPS": "FPS",
+ "Seed step size": "Ampiezza del gradiente del seme",
+ "Seed max distance": "Distanza massima del seme",
+ "Start time": "Orario di inizio",
+ "End time": "Orario di fine",
+ "End Prompt Blend Trigger Percent": "Percentuale di innesco del mix col prompt finale",
+ "Prompt end": "Prompt finale",
+ "Smooth video": "Rendi il filmato fluido",
+ "Seconds": "Secondi",
+ "Zoom": "Zoom",
+ "Rotate": "Ruota",
+ "Degrees": "Gradi",
+ "Is the Image Tiled?": "L'immagine è piastrellata?",
+ "TranslateX": "Traslazione X",
+ "Left": "Sinistra",
+ "PercentX": "Percentuale X",
+ "TranslateY": "Traslazione Y",
+ "Up": "Sopra",
+ "PercentY": "Percentuale Y",
+ "Show generated pictures in ui": "Mostra le immagini generate nell'interfaccia utente",
+ "Deforum v0.5-webui-beta": "Deforum v0.5-webui-beta",
+ "This script is deprecated. Please use the full Deforum extension instead.": "Questo script è obsoleto. Utilizzare invece l'estensione Deforum completa.",
+ "Update instructions:": "Istruzioni per l'aggiornamento:",
+ "github.com/deforum-art/deforum-for-automatic1111-webui/blob/automatic1111-webui/README.md": "github.com/deforum-art/deforum-for-automatic1111-webui/blob/automatic1111-webui/README.md",
+ "discord.gg/deforum": "discord.gg/deforum",
+ "Single Image": "Singola immagine",
+ "Batch Process": "Elaborare a lotti",
+ "Batch from Directory": "Lotto da cartella",
+ "Source": "Sorgente",
+ "Show result images": "Mostra le immagini dei risultati",
+ "Scale by": "Scala di",
+ "Scale to": "Scala a",
+ "Resize": "Ridimensiona",
+ "Crop to fit": "Ritaglia per adattare",
+ "Upscaler 2 visibility": "Visibilità Ampliamento immagine 2",
+ "GFPGAN visibility": "Visibilità GFPGAN",
+ "CodeFormer visibility": "Visibilità CodeFormer",
+ "CodeFormer weight (0 = maximum effect, 1 = minimum effect)": "Peso di CodeFormer (0 = effetto massimo, 1 = effetto minimo)",
+ "Upscale Before Restoring Faces": "Amplia prima di restaurare i volti",
+ "Send to txt2img": "Invia a txt2img",
+ "A merger of the two checkpoints will be generated in your": "I due checkpoint verranno fusi nella cartella dei",
+ "checkpoint": "checkpoint",
+ "directory.": ".",
+ "Primary model (A)": "Modello Primario (A)",
+ "Secondary model (B)": "Modello Secondario (B)",
+ "Tertiary model (C)": "Modello Terziario (C)",
+ "Custom Name (Optional)": "Nome personalizzato (facoltativo)",
+ "Multiplier (M) - set to 0 to get model A": "Moltiplicatore (M): impostare a 0 per ottenere il modello A",
+ "Interpolation Method": "Metodo di interpolazione",
+ "Weighted sum": "Somma pesata",
+ "Add difference": "Aggiungi differenza",
+ "Save as float16": "Salva come float16",
+ "See": "Consulta la ",
+ "wiki": "wiki",
+ "for detailed explanation.": " per una spiegazione dettagliata.",
+ "Create embedding": "Crea Incorporamento",
+ "Create hypernetwork": "Crea Iperrete",
+ "Preprocess images": "Preprocessa le immagini",
+ "Name": "Nome",
+ "Initialization text": "Testo di inizializzazione",
+ "Number of vectors per token": "Numero di vettori per token",
+ "Overwrite Old Embedding": "Sovrascrivi il vecchio incorporamento",
+ "Modules": "Moduli",
+ "Enter hypernetwork layer structure": "Immettere la struttura del livello della Iperrete",
+ "Select activation function of hypernetwork": "Selezionare la funzione di attivazione della Iperrete",
+ "linear": "lineare",
+ "relu": "relu",
+ "leakyrelu": "leakyrelu",
+ "elu": "elu",
+ "swish": "swish",
+ "tanh": "tanh",
+ "sigmoid": "sigmoid",
+ "celu": "celu",
+ "gelu": "gelu",
+ "glu": "glu",
+ "hardshrink": "hardshrink",
+ "hardsigmoid": "hardsigmoid",
+ "hardtanh": "hardtanh",
+ "logsigmoid": "logsigmoid",
+ "logsoftmax": "logsoftmax",
+ "mish": "mish",
+ "prelu": "prelu",
+ "rrelu": "rrelu",
+ "relu6": "relu6",
+ "selu": "selu",
+ "silu": "silu",
+ "softmax": "softmax",
+ "softmax2d": "softmax2d",
+ "softmin": "softmin",
+ "softplus": "softplus",
+ "softshrink": "softshrink",
+ "softsign": "softsign",
+ "tanhshrink": "tanhshrink",
+ "threshold": "soglia",
+ "Select Layer weights initialization. relu-like - Kaiming, sigmoid-like - Xavier is recommended": "Seleziona inizializzazione dei pesi dei livelli. relu-like - Kaiming, Si consiglia sigmoid-like - Xavier",
+ "Normal": "Normale",
+ "KaimingUniform": "KaimingUniform",
+ "KaimingNormal": "KaimingNormal",
+ "XavierUniform": "XavierUniform",
+ "XavierNormal": "XavierNormal",
+ "Add layer normalization": "Aggiunge la normalizzazione del livello",
+ "Use dropout": "Usa Dropout",
+ "Overwrite Old Hypernetwork": "Sovrascrive la vecchia Iperrete",
+ "Source directory": "Cartella sorgente",
+ "Destination directory": "Cartella di destinazione",
+ "Existing Caption txt Action": "Azione sul testo della didascalia esistente",
+ "ignore": "ignora",
+ "copy": "copia",
+ "prepend": "anteporre",
+ "append": "appendere",
+ "Create flipped copies": "Crea copie specchiate",
+ "Split oversized images": "Dividi immagini di grandi dimensioni",
+ "Auto focal point crop": "Ritaglio automatico al punto focale",
+ "Use BLIP for caption": "Usa BLIP per la didascalia",
+ "Use deepbooru for caption": "Usa deepbooru per la didascalia",
+ "Split image threshold": "Soglia di divisione dell'immagine",
+ "Split image overlap ratio": "Rapporto di sovrapposizione dell'immagine",
+ "Focal point face weight": "Peso della faccia del punto focale",
+ "Focal point entropy weight": "Peso dell'entropia del punto focale",
+ "Focal point edges weight": "Peso dei bordi del punto focale",
+ "Create debug image": "Crea immagine di debug",
+ "Preprocess": "Preprocessa",
+ "Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images": "Addestra un Incorporamento o Iperrete; è necessario specificare una directory con un set di immagini con rapporto 1:1",
+ "[wiki]": "[wiki]",
+ "Embedding": "Incorporamento",
+ "Embedding Learning rate": "Tasso di apprendimento Incorporamento",
+ "Hypernetwork Learning rate": "Tasso di apprendimento Iperrete",
+ "Dataset directory": "Cartella del Dataset",
+ "Log directory": "Cartella del registro",
+ "Prompt template file": "File modello prompt",
+ "Max steps": "Passi massimi",
+ "Save an image to log directory every N steps, 0 to disable": "Salva un'immagine nella cartella del registro ogni N passaggi, 0 per disabilitare",
+ "Save a copy of embedding to log directory every N steps, 0 to disable": "Salva una copia dell'incorporamento nella cartella del registro ogni N passaggi, 0 per disabilitare",
+ "Save images with embedding in PNG chunks": "Salva le immagini con l'incorporamento in blocchi PNG",
+ "Read parameters (prompt, etc...) from txt2img tab when making previews": "Legge i parametri (prompt, ecc...) dalla scheda txt2img durante la creazione delle anteprime",
+ "Train Hypernetwork": "Addestra Iperrete",
+ "Train Embedding": "Addestra Incorporamento",
+ "Create an aesthetic embedding out of any number of images": "Crea un'incorporamento estetico da qualsiasi numero di immagini",
+ "Create images embedding": "Crea incorporamento di immagini",
+ "-1": "-1",
+ "This extension works well with text captions in comma-separated style (such as the tags generated by DeepBooru interrogator).": "Questa estensione funziona bene con i sottotitoli di testo in stile separato da virgole (come i tag generati dall'interrogatore DeepBooru).",
+ "Save all changes": "Salva tutte le modifiche",
+ "Backup original text file (original file will be renamed like filename.000, .001, .002, ...)": "Backup del file di testo originale (il file originale verrà rinominato come nomefile.000, .001, .002, ...)",
+ "Note:": "Note:",
+ "New text file will be created if you are using filename as captions.": "Verrà creato un nuovo file di testo se si utilizza il nome del file come didascalia.",
+ "Results": "Risultati",
+ "Load": "Carica",
+ "Dataset Images": "Immagini del Dataset",
+ "Filter and Edit Tags": "Filtra e modifica i tag",
+ "Edit Caption of Selected Image": "Modifica la didascalia dell'immagine selezionata",
+ "Search tags / Filter images by tags": "Cerca tag / Filtra le immagini per tag",
+ "Search Tags": "Cerca tag",
+ "Clear all filters": "Rimuovi tutti i filtri",
+ "Sort by": "Ordina per",
+ "Alphabetical Order": "Ordine alfabetico",
+ "Frequency": "Frequenza",
+ "Sort Order": "Ordinamento",
+ "Ascending": "Ascendente",
+ "Descending": "Discendente",
+ "Filter Images by Tags": "Filtra le immagini per tag",
+ "Edit tags in filtered images": "Modifica i tag nelle immagini filtrate",
+ "Selected Tags": "Tag selezionati",
+ "Edit Tags": "Modificare i tag",
+ "Apply changes to filtered images": "Applica le modifiche alle immagini filtrate",
+ "Append additional tags to the beginning": "Aggiungi tag addizionali all'inizio",
+ "1. The selected tags are displayed in comma separated style.": "1. I tag selezionati vengono visualizzati in uno stile separato da virgole.",
+ "2. When changes are applied, all tags in each displayed images are replaced.": "2. Quando vengono applicate le modifiche, tutti i tag in ciascuna immagine visualizzata vengono sostituiti.",
+ "3. If you change some tags into blank, they will be erased.": "3. Se modifichi alcuni tag con uno spazio vuoto, verranno cancellati.",
+ "4. If you add some tags to the end, they will be appended to the end/beginning of the text file.": "4. Se aggiungi dei tag alla fine, questi verranno aggiunti alla fine/inizio del file di testo.",
+ "5. Changes are not applied to the text files until the \"Save all changes\" button is pressed.": "5. Le modifiche non vengono applicate ai file di testo finché non viene premuto il pulsante \"Salva tutte le modifiche\"..",
+ "ex A.": "esempio A.",
+ "Original Text = \"A, A, B, C\" Selected Tags = \"B, A\" Edit Tags = \"X, Y\"": "Testo originale = \"A, A, B, C\" Tag selezionati = \"B, A\" Modifica tag = \"X, Y\"",
+ "Result = \"Y, Y, X, C\" (B->X, A->Y)": "Risultato = \"Y, Y, X, C\" (B->X, A->Y)",
+ "ex B.": "esempio B.",
+ "Original Text = \"A, B, C\" Selected Tags = \"(nothing)\" Edit Tags = \"X, Y\"": "Testo originale = \"A, B, C\" Tag selezionati = \"(nothing)\" Modifica tag = \"X, Y\"",
+ "Result = \"A, B, C, X, Y\" (add X and Y to the end (default))": "Risultato = \"A, B, C, X, Y\" (aggiunge X e Y alla fine (predefinito))",
+ "Result = \"X, Y, A, B, C\" (add X and Y to the beginning (\"Append additional tags to the beginning\" checked))": "Risultato = \"X, Y, A, B, C\" (aggiunge X e Y all'inizio (\"Aggiungi tag addizionali all'inizio\" selezionato))",
+ "ex C.": "esempio C.",
+ "Original Text = \"A, B, C, D, E\" Selected Tags = \"A, B, D\" Edit Tags = \", X, \"": "Testo originale = \"A, B, C, D, E\" Tag selezionati = \"A, B, D\" Modifica tag = \", X, \"",
+ "Result = \"X, C, E\" (A->\"\", B->X, D->\"\")": "Risultato = \"X, C, E\" (A->\"\", B->X, D->\"\")",
+ "Caption of Selected Image": "Didascalia dell'immagine selezionata",
+ "Copy caption": "Copia didascalia",
+ "Edit Caption": "Modifica didascalia",
+ "Apply changes to selected image": "Applica le modifiche all'immagine selezionata",
+ "Changes are not applied to the text files until the \"Save all changes\" button is pressed.": "Le modifiche non vengono applicate ai file di testo finché non viene premuto il pulsante \"Salva tutte le modifiche\".",
+ "Info and links": "Info e link",
+ "Made by deforum.github.io, port for AUTOMATIC1111's webui maintained by kabachuha": "Realizzato da deforum.github.io, port per l'interfaccia web di AUTOMATIC1111 manutenuto da kabachuha",
+ "Original Deforum Github repo github.com/deforum/stable-diffusion": "Repository Github originale di Deforum github.com/deforum/stable-diffusion",
+ "This fork for auto1111's webui github.com/deforum-art/deforum-for-automatic1111-webui": "Questo fork è per l'interfaccia web di AUTOMATIC1111 github.com/deforum-art/deforum-for-automatic1111-webui",
+ "Join the official Deforum Discord discord.gg/deforum to share your creations and suggestions": "Unisciti al canale Discord ufficiale di Deforum discord.gg/deforum per condividere le tue creazioni e suggerimenti",
+ "User guide for v0.5 docs.google.com/document/d/1pEobUknMFMkn8F5TMsv8qRzamXX_75BShMMXV8IFslI/edit": "Manuale d'uso per la versione 0.5 docs.google.com/document/d/1pEobUknMFMkn8F5TMsv8qRzamXX_75BShMMXV8IFslI/edit",
+ "Math keyframing explanation docs.google.com/document/d/1pfW1PwbDIuW0cv-dnuyYj1UzPqe23BlSLTJsqazffXM/edit?usp=sharing": "Spiegazione della matematica dei fotogrammi chiave docs.google.com/document/d/1pfW1PwbDIuW0cv-dnuyYj1UzPqe23BlSLTJsqazffXM/edit?usp=sharing",
+ "Keyframes": "Fotogrammi chiave",
+ "Prompts": "Prompt",
+ "Init": "Inizializzare",
+ "Video output": "Uscita video",
+ "Run settings": "Esegui le impostazioni",
+ "Import settings from file": "Importa impostazioni da file",
+ "Override settings": "Sostituisci le impostazioni",
+ "Custom settings file": "File delle impostazioni personalizzate",
+ "Sampling settings": "Impostazioni di campionamento",
+ "override_these_with_webui": "Sovrascrivi con Web UI",
+ "W": "L",
+ "H": "A",
+ "seed": "Seme",
+ "sampler": "Campionatore",
+ "Enable extras": "Abilita 'Extra'",
+ "subseed": "Sub seme",
+ "subseed_strength": "Intensità subseme",
+ "steps": "Passi",
+ "ddim_eta": "ETA DDIM",
+ "n_batch": "Numero lotto",
+ "make_grid": "Crea griglia",
+ "grid_rows": "Righe griglia",
+ "save_settings": "Salva impostazioni",
+ "save_samples": "Salva i campioni",
+ "display_samples": "Mostra i campioni",
+ "save_sample_per_step": "Salva campioni per passo",
+ "show_sample_per_step": "Mostra campioni per passo",
+ "Batch settings": "Impostazioni lotto",
+ "batch_name": "Nome del lotto",
+ "filename_format": "Formato nome del file",
+ "seed_behavior": "Comportamento seme",
+ "iter": "Iterativo",
+ "fixed": "Fisso",
+ "random": "Casuale",
+ "schedule": "Pianificato",
+ "Animation settings": "Impostazioni animazione",
+ "animation_mode": "Modalità animazione",
+ "2D": "2D",
+ "3D": "3D",
+ "Video Input": "Ingresso video",
+ "max_frames": "Fotogrammi max",
+ "border": "Bordo",
+ "replicate": "Replica",
+ "wrap": "Impacchetta",
+ "Motion parameters:": "Parametri di movimento:",
+ "2D and 3D settings": "Impostazioni 2D e 3D",
+ "angle": "Angolo",
+ "zoom": "Zoom",
+ "translation_x": "Traslazione X",
+ "translation_y": "Traslazione Y",
+ "3D settings": "Impostazioni 3D",
+ "translation_z": "Traslazione Z",
+ "rotation_3d_x": "Rotazione 3D X",
+ "rotation_3d_y": "Rotazione 3D Y",
+ "rotation_3d_z": "Rotazione 3D Z",
+ "Prespective flip — Low VRAM pseudo-3D mode:": "Inversione prospettica: modalità pseudo-3D a bassa VRAM:",
+ "flip_2d_perspective": "Inverti prospettiva 2D",
+ "perspective_flip_theta": "Inverti prospettiva theta",
+ "perspective_flip_phi": "Inverti prospettiva phi",
+ "perspective_flip_gamma": "Inverti prospettiva gamma",
+ "perspective_flip_fv": "Inverti prospettiva fv",
+ "Generation settings:": "Impostazioni di generazione:",
+ "noise_schedule": "Pianificazione del rumore",
+ "strength_schedule": "Intensità della pianificazione",
+ "contrast_schedule": "Contrasto della pianificazione",
+ "cfg_scale_schedule": "Pianificazione della scala CFG",
+ "3D Fov settings:": "Impostazioni del campo visivo 3D:",
+ "fov_schedule": "Pianificazione del campo visivo",
+ "near_schedule": "Pianificazione da vicino",
+ "far_schedule": "Pianificazione da lontano",
+ "To enable seed schedule select seed behavior — 'schedule'": "Per abilitare la pianificazione del seme, seleziona il comportamento del seme — 'pianifica'",
+ "seed_schedule": "Pianificazione del seme",
+ "Coherence:": "Coerenza:",
+ "color_coherence": "Coerenza del colore",
+ "Match Frame 0 HSV": "Uguaglia HSV del fotogramma 0",
+ "Match Frame 0 LAB": "Uguaglia LAB del fotogramma 0",
+ "Match Frame 0 RGB": "Uguaglia RGB del fotogramma 0",
+ "diffusion_cadence": "Cadenza di diffusione",
+ "3D Depth Warping:": "Deformazione della profondità 3D:",
+ "use_depth_warping": "Usa la deformazione della profondità",
+ "midas_weight": "Peso MIDAS",
+ "near_plane": "Piano vicino",
+ "far_plane": "Piano lontano",
+ "fov": "Campo visivo",
+ "padding_mode": "Modalità di riempimento",
+ "reflection": "Rifletti",
+ "zeros": "Zeri",
+ "sampling_mode": "Modalità di campionamento",
+ "bicubic": "bicubic",
+ "bilinear": "bilinear",
+ "nearest": "nearest",
+ "save_depth_maps": "Salva le mappe di profondità",
+ "`animation_mode: None` batches on list of *prompts*. (Batch mode disabled atm, only animation_prompts are working)": "`modalità animazione: Nessuno` si inserisce nell'elenco di *prompt*. (Modalità batch disabilitata atm, funzionano solo i prompt di animazione)",
+ "*Important change from vanilla Deforum!*": "*Importante cambiamento rispetto alla versione originale di Deforum!*",
+ "This script uses the built-in webui weighting settings.": "Questo script utilizza le impostazioni di pesatura webui integrate.",
+ "So if you want to use math functions as prompt weights,": "Quindi, se vuoi usare le funzioni matematiche come pesi dei prompt,",
+ "keep the values above zero in both parts": "mantenere i valori sopra lo zero in entrambe le parti",
+ "Negative prompt part can be specified with --neg": "La parte negativa del prompt può essere specificata con --neg",
+ "batch_prompts (disabled atm)": "Prompt in lotti (al momento è disabilitato)",
+ "animation_prompts": "Prompt animazione",
+ "Init settings": "Impostazioni iniziali",
+ "use_init": "Usa le impostazioni iniziali",
+ "from_img2img_instead_of_link": "da img2img invece che da link",
+ "strength_0_no_init": "Intensità 0 nessuna inizializzazione",
+ "strength": "Intensità",
+ "init_image": "Immagine di inizializzazione",
+ "use_mask": "Usa maschera",
+ "use_alpha_as_mask": "Usa alpha come maschera",
+ "invert_mask": "Inverti la maschera",
+ "overlay_mask": "Sovrapponi la maschera",
+ "mask_file": "File della maschera",
+ "mask_brightness_adjust": "Regola la luminosità della maschera",
+ "mask_overlay_blur": "Sfocatura della sovrapposizione della maschera",
+ "Video Input:": "Ingresso video:",
+ "video_init_path": "Percorso del video di inizializzazione",
+ "extract_nth_frame": "Estrai ogni ennesimo fotogramma",
+ "overwrite_extracted_frames": "Sovrascrivi i fotogrammi estratti",
+ "use_mask_video": "Usa maschera video",
+ "video_mask_path": "Percorso della maschera video",
+ "Interpolation (turned off atm)": "Interpolazione (attualmente spento)",
+ "interpolate_key_frames": "Interpola fotogrammi chiave",
+ "interpolate_x_frames": "Interpola x fotogrammi",
+ "Resume animation:": "Riprendi l'animazione:",
+ "resume_from_timestring": "Riprendi da stringa temporale",
+ "resume_timestring": "Stringa temporale",
+ "Video output settings": "Impostazioni uscita video",
+ "skip_video_for_run_all": "Salta il video per eseguire tutto",
+ "fps": "FPS",
+ "output_format": "Formato di uscita",
+ "PIL gif": "PIL GIF",
+ "FFMPEG mp4": "FFMPEG MP4",
+ "ffmpeg_location": "Percorso ffmpeg",
+ "add_soundtrack": "Aggiungi colonna sonora",
+ "soundtrack_path": "Percorso colonna sonora",
+ "use_manual_settings": "Usa impostazioni manuali",
+ "render_steps": "Passi di renderizzazione",
+ "max_video_frames": "Numero max fotogrammi video",
+ "path_name_modifier": "Modificatore del nome del percorso",
+ "x0_pred": "x0_pred",
+ "x": "x",
+ "image_path": "Percorso immagine",
+ "mp4_path": "Percorso MP4",
+ "Click here after the generation to show the video": "Clicca qui dopo la generazione per mostrare il video",
+ "NOTE: If the 'Generate' button doesn't work, go in Settings and click 'Restart Gradio and Refresh...'.": "NOTA: se il pulsante 'Genera' non funziona, vai in Impostazioni e fai clic su 'Riavvia Gradio e Aggiorna...'.",
+ "Save Settings": "Salva le impostazioni",
+ "Load Settings": "Carica le impostazioni",
+ "Path relative to the webui folder." : "Percorso relativo alla cartella webui.",
+ "Save Video Settings": "Salva impostazioni video",
+ "Load Video Settings": "Carica impostazioni video",
+ "dog": "cane",
+ "house": "casa",
+ "portrait": "ritratto",
+ "spaceship": "nave spaziale",
+ "anime": "anime",
+ "cartoon": "cartoon",
+ "digipa-high-impact": "digipa-high-impact",
+ "digipa-med-impact": "digipa-med-impact",
+ "digipa-low-impact": "digipa-low-impact",
+ "fareast": "estremo oriente",
+ "fineart": "fineart",
+ "scribbles": "scarabocchi",
+ "special": "special",
+ "ukioe": "ukioe",
+ "weird": "strano",
+ "black-white": "bianco e nero",
+ "nudity": "nudità",
+ "c": "c",
+ "Get Images": "Ottieni immagini",
+ "dog-anime": "dog-anime",
+ "dog-cartoon": "dog-cartoon",
+ "dog-digipa-high-impact": "dog-digipa-high-impact",
+ "dog-digipa-med-impact": "dog-digipa-med-impact",
+ "dog-digipa-low-impact": "dog-digipa-low-impact",
+ "dog-fareast": "dog-fareast",
+ "dog-fineart": "dog-fineart",
+ "dog-scribbles": "dog-scribbles",
+ "dog-special": "dog-special",
+ "dog-ukioe": "dog-ukioe",
+ "dog-weird": "dog-weird",
+ "dog-black-white": "dog-black-white",
+ "dog-nudity": "dog-nudity",
+ "dog-c": "dog-c",
+ "dog-n": "dog-n",
+ "house-anime": "house-anime",
+ "house-cartoon": "house-cartoon",
+ "house-digipa-high-impact": "house-digipa-high-impact",
+ "house-digipa-med-impact": "house-digipa-med-impact",
+ "house-digipa-low-impact": "house-digipa-low-impact",
+ "house-fareast": "house-fareast",
+ "house-fineart": "house-fineart",
+ "house-scribbles": "house-scribbles",
+ "house-special": "house-special",
+ "house-ukioe": "house-ukioe",
+ "house-weird": "house-weird",
+ "house-black-white": "house-black-white",
+ "house-nudity": "house-nudity",
+ "house-c": "house-c",
+ "house-n": "house-n",
+ "portrait-anime": "portrait-anime",
+ "portrait-cartoon": "portrait-cartoon",
+ "portrait-digipa-high-impact": "portrait-digipa-high-impact",
+ "portrait-digipa-med-impact": "portrait-digipa-med-impact",
+ "portrait-digipa-low-impact": "portrait-digipa-low-impact",
+ "portrait-fareast": "portrait-fareast",
+ "portrait-fineart": "portrait-fineart",
+ "portrait-scribbles": "portrait-scribbles",
+ "portrait-special": "portrait-special",
+ "portrait-ukioe": "portrait-ukioe",
+ "portrait-weird": "portrait-weird",
+ "portrait-black-white": "portrait-black-white",
+ "portrait-nudity": "portrait-nudity",
+ "portrait-c": "portrait-c",
+ "portrait-n": "portrait-n",
+ "spaceship-anime": "spaceship-anime",
+ "spaceship-cartoon": "spaceship-cartoon",
+ "spaceship-digipa-high-impact": "spaceship-digipa-high-impact",
+ "spaceship-digipa-med-impact": "spaceship-digipa-med-impact",
+ "spaceship-digipa-low-impact": "spaceship-digipa-low-impact",
+ "spaceship-fareast": "spaceship-fareast",
+ "spaceship-fineart": "spaceship-fineart",
+ "spaceship-scribbles": "spaceship-scribbles",
+ "spaceship-special": "spaceship-special",
+ "spaceship-ukioe": "spaceship-ukioe",
+ "spaceship-weird": "spaceship-weird",
+ "spaceship-black-white": "spaceship-black-white",
+ "spaceship-nudity": "spaceship-nudity",
+ "spaceship-c": "spaceship-c",
+ "spaceship-n": "spaceship-n",
+ "artists to study extension by camenduru |": "Estensione 'Artisti per studiare' a cura di camenduru |",
+ "github": "Github",
+ "|": "|",
+ "twitter": "Twitter",
+ "youtube": "Youtube",
+ "hi-res images": "Immagini in alta risoluzione",
+ "All images generated with CompVis/stable-diffusion-v1-4 +": "Tutte le immagini sono state generate con CompVis/stable-diffusion-v1-4 +",
+ "artists.csv": "artists.csv",
+ "| License: Attribution 4.0 International (CC BY 4.0)": "| Licenza: Attribution 4.0 International (CC BY 4.0)",
+ "Favorites": "Preferiti",
+ "Others": "Altre immagini",
+ "Images directory": "Cartella immagini",
+ "Dropdown": "Elenco cartelle",
+ "First Page": "Prima pagina",
+ "Prev Page": "Pagina precedente",
+ "Page Index": "Indice pagina",
+ "Next Page": "Pagina successiva",
+ "End Page": "Ultima pagina",
+ "delete next": "Cancella successivo",
+ "Delete": "Elimina",
+ "sort by": "Ordina per",
+ "path name": "Nome percorso",
+ "date": "Data",
+ "keyword": "Parola chiave",
+ "Generate Info": "Genera Info",
+ "File Name": "Nome del file",
+ "Move to favorites": "Aggiungi ai preferiti",
+ "Renew Page": "Aggiorna la pagina",
+ "Number": "Numero",
+ "set_index": "Imposta indice",
+ "load_switch": "Carica",
+ "turn_page_switch": "Volta pagina",
+ "Checkbox": "Casella di controllo",
+ "Checkbox Group": "Seleziona immagini per",
+ "artists": "Artisti",
+ "flavors": "Stili",
+ "mediums": "Tecniche",
+ "movements": "Movimenti artistici",
+ "All": "Tutto",
+ "Exclude abandoned": "Escludi scartati",
+ "Abandoned": "Scartati",
+ "Key word": "Parola chiave",
+ "Get inspiration": "Ispirami",
+ "to txt2img": "Invia a txt2img",
+ "to img2img": "Invia a img2img",
+ "Collect": "Salva nei preferiti",
+ "Don't show again": "Scarta",
+ "Move out": "Rimuovi",
+ "set button": "Pulsante imposta",
+ "Apply settings": "Applica le impostazioni",
+ "Saving images/grids": "Salva immagini/griglie",
+ "Always save all generated images": "Salva sempre tutte le immagini generate",
+ "File format for images": "Formato del file delle immagini",
+ "Images filename pattern": "Modello del nome dei file immagine",
+ "Add number to filename when saving": "Aggiungi un numero al nome del file al salvataggio",
+ "Always save all generated image grids": "Salva sempre tutte le griglie di immagini generate",
+ "File format for grids": "Formato del file per le griglie",
+ "Add extended info (seed, prompt) to filename when saving grid": "Aggiungi informazioni estese (seme, prompt) al nome del file durante il salvataggio della griglia",
+ "Do not save grids consisting of one picture": "Non salvare le griglie composte da una sola immagine",
+ "Prevent empty spots in grid (when set to autodetect)": "Previeni spazi vuoti nella griglia (se impostato su rilevamento automatico)",
+ "Grid row count; use -1 for autodetect and 0 for it to be same as batch size": "Numero di righe della griglia; utilizzare -1 per il rilevamento automatico e 0 per essere uguale alla dimensione del lotto",
+ "Save text information about generation parameters as chunks to png files": "Salva le informazioni di testo dei parametri di generazione come blocchi nel file png",
+ "Create a text file next to every image with generation parameters.": "Crea un file di testo assieme a ogni immagine con i parametri di generazione.",
+ "Save a copy of image before doing face restoration.": "Salva una copia dell'immagine prima di eseguire il restauro dei volti.",
+ "Save a copy of image before applying highres fix.": "Salva una copia dell'immagine prima di applicare la correzione ad alta risoluzione.",
+ "Save a copy of image before applying color correction to img2img results": "Salva una copia dell'immagine prima di applicare la correzione del colore ai risultati di img2img",
+ "Quality for saved jpeg images": "Qualità delle immagini salvate in formato JPEG",
+ "If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG": "Se l'immagine PNG è più grande di 4 MB o qualsiasi dimensione è maggiore di 4000, ridimensiona e salva la copia come JPG",
+ "Use original name for output filename during batch process in extras tab": "Usa il nome originale per il nome del file di output durante l'elaborazione a lotti nella scheda 'Extra'",
+ "When using 'Save' button, only save a single selected image": "Usando il pulsante 'Salva', verrà salvata solo la singola immagine selezionata",
+ "Do not add watermark to images": "Non aggiungere la filigrana alle immagini",
+ "Paths for saving": "Percorsi di salvataggio",
+ "Output directory for images; if empty, defaults to three directories below": "Cartella di output per le immagini; se vuoto, per impostazione predefinita verranno usate le cartelle seguenti",
+ "Output directory for txt2img images": "Cartella di output per le immagini txt2img",
+ "Output directory for img2img images": "Cartella di output per le immagini img2img",
+ "Output directory for images from extras tab": "Cartella di output per le immagini dalla scheda 'Extra'",
+ "Output directory for grids; if empty, defaults to two directories below": "Cartella di output per le griglie; se vuoto, per impostazione predefinita veranno usate cartelle seguenti",
+ "Output directory for txt2img grids": "Cartella di output per le griglie txt2img",
+ "Output directory for img2img grids": "Cartella di output per le griglie img2img",
+ "Directory for saving images using the Save button": "Cartella dove salvare le immagini usando il pulsante 'Salva'",
+ "Saving to a directory": "Salva in una cartella",
+ "Save images to a subdirectory": "Salva le immagini in una sotto cartella",
+ "Save grids to a subdirectory": "Salva le griglie in una sotto cartella",
+ "When using \"Save\" button, save images to a subdirectory": "Usando il pulsante \"Salva\", le immagini verranno salvate in una sotto cartella",
+ "Directory name pattern": "Modello del nome della cartella",
+ "Max prompt words for [prompt_words] pattern": "Numero massimo di parole del prompt per il modello [prompt_words]",
+ "Upscaling": "Ampliamento",
+ "Tile size for ESRGAN upscalers. 0 = no tiling.": "Dimensione piastrella per ampliamento ESRGAN. 0 = nessuna piastrellatura.",
+ "Tile overlap, in pixels for ESRGAN upscalers. Low values = visible seam.": "Sovrapposizione delle piastrelle, in pixel per gli ampliamenti ESRGAN. Valori bassi = cucitura visibile.",
+ "Tile size for all SwinIR.": "Dimensione piastrella per tutti gli SwinIR.",
+ "Tile overlap, in pixels for SwinIR. Low values = visible seam.": "Sovrapposizione delle piastrelle, in pixel per SwinIR. Valori bassi = cucitura visibile.",
+ "LDSR processing steps. Lower = faster": "Fasi di elaborazione LDSR. Più basso = più veloce",
+ "Upscaler for img2img": "Metodo di ampliamento per img2img",
+ "Upscale latent space image when doing hires. fix": "Amplia l'immagine nello spazio latente durante la correzione in alta risoluzione",
+ "Face restoration": "Restauro del viso",
+ "CodeFormer weight parameter; 0 = maximum effect; 1 = minimum effect": "Peso di CodeFormer; 0 = effetto massimo; 1 = effetto minimo",
+ "Move face restoration model from VRAM into RAM after processing": "Sposta il modello di restauro facciale dalla VRAM alla RAM dopo l'elaborazione",
+ "System": "Sistema",
+ "VRAM usage polls per second during generation. Set to 0 to disable.": "Verifiche al secondo sull'utilizzo della VRAM durante la generazione. Impostare a 0 per disabilitare.",
+ "Always print all generation info to standard output": "Stampa sempre tutte le informazioni di generazione sul output standard",
+ "Add a second progress bar to the console that shows progress for an entire job.": "Aggiungi una seconda barra di avanzamento alla console che mostra l'avanzamento complessivo del lavoro.",
+ "Training": "Addestramento",
+ "Move VAE and CLIP to RAM when training hypernetwork. Saves VRAM.": "Sposta VAE e CLIP nella RAM durante l'addestramento di Iperreti. Risparmia VRAM.",
+ "Filename word regex": "Espressione regolare per estrarre parole dal nome del file",
+ "Filename join string": "Stringa per unire le parole estratte dal nome del file",
+ "Number of repeats for a single input image per epoch; used only for displaying epoch number": "Numero di ripetizioni per una singola immagine di input per epoca; utilizzato solo per visualizzare il numero di epoca",
+ "Save an csv containing the loss to log directory every N steps, 0 to disable": "Salva un file CSV contenente la perdita nella cartella di registrazione ogni N passaggi, 0 per disabilitare",
+ "Use cross attention optimizations while training": "Usa le ottimizzazioni di controllo dell'attenzione incrociato durante l'addestramento",
+ "Stable Diffusion": "Stable Diffusion",
+ "Checkpoints to cache in RAM": "Checkpoint da memorizzare nella RAM",
+ "SD VAE": "SD VAE",
+ "auto": "auto",
+ "Hypernetwork strength": "Forza della Iperrete",
+ "Inpainting conditioning mask strength": "Forza della maschera di condizionamento del Inpainting",
+ "Apply color correction to img2img results to match original colors.": "Applica la correzione del colore ai risultati di img2img in modo che corrispondano ai colori originali.",
+ "With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising).": "Con img2img, esegue esattamente la quantità di passi specificata dalla barra di scorrimento (normalmente se ne effettuano di meno con meno riduzione del rumore).",
+ "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply.": "Abilita la quantizzazione nei campionatori K per risultati più nitidi e puliti. Questo può cambiare i semi esistenti. Richiede il riavvio per applicare la modifica.",
+ "Emphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention": "Enfasi: utilizzare (testo) per fare in modo che il modello presti maggiore attenzione al testo e [testo] per fargli prestare meno attenzione",
+ "Use old emphasis implementation. Can be useful to reproduce old seeds.": "Usa la vecchia implementazione dell'enfasi. Può essere utile per riprodurre vecchi semi.",
+ "Make K-diffusion samplers produce same images in a batch as when making a single image": "Fa sì che i campionatori di diffusione K producano le stesse immagini in un lotto come quando si genera una singola immagine",
+ "Increase coherency by padding from the last comma within n tokens when using more than 75 tokens": "Aumenta la coerenza disattivando dall'ultima virgola all'indietro di n token quando si utilizzano più di 75 token",
+ "Filter NSFW content": "Filtra i contenuti NSFW",
+ "Stop At last layers of CLIP model": "Fermati agli ultimi livelli del modello CLIP",
+ "Interrogate Options": "Opzioni di interrogazione",
+ "Interrogate: keep models in VRAM": "Interroga: mantieni i modelli nella VRAM",
+ "Interrogate: use artists from artists.csv": "Interroga: utilizza artisti dal file artisti.csv",
+ "Interrogate: include ranks of model tags matches in results (Has no effect on caption-based interrogators).": "Interroga: include la classifica delle corrispondenze dei tag del modello nei risultati (non ha effetto sulle interrogazioni basate su didascalie).",
+ "Interrogate: num_beams for BLIP": "Interroga: num_beams per BLIP",
+ "Interrogate: minimum description length (excluding artists, etc..)": "Interroga: lunghezza minima della descrizione (esclusi artisti, ecc..)",
+ "Interrogate: maximum description length": "Interroga: lunghezza massima della descrizione",
+ "CLIP: maximum number of lines in text file (0 = No limit)": "CLIP: numero massimo di righe nel file di testo (0 = Nessun limite)",
+ "Interrogate: deepbooru score threshold": "Interroga: soglia del punteggio deepbooru",
+ "Interrogate: deepbooru sort alphabetically": "Interroga: deepbooru ordinato alfabeticamente",
+ "use spaces for tags in deepbooru": "usa gli spazi per i tag in deepbooru",
+ "escape (\\) brackets in deepbooru (so they are used as literal brackets and not for emphasis)": "Effettua l'escape (\\) delle parentesi in deepbooru (così vengono usate come parentesi letterali e non per enfatizzare)",
+ "User interface": "Interfaccia Utente",
+ "Show progressbar": "Mostra la barra di avanzamento",
+ "Show image creation progress every N sampling steps. Set 0 to disable.": "Mostra l'avanzamento della generazione dell'immagine ogni N passaggi di campionamento. Impostare a 0 per disabilitare.",
+ "Show previews of all images generated in a batch as a grid": "Mostra le anteprime di tutte le immagini generate in un lotto come una griglia",
+ "Show grid in results for web": "Mostra la griglia nei risultati per il web",
+ "Do not show any images in results for web": "Non mostrare alcuna immagine nei risultati per il web",
+ "Add model hash to generation information": "Aggiungi l'hash del modello alle informazioni sulla generazione",
+ "Add model name to generation information": "Aggiungi il nome del modello alle informazioni sulla generazione",
+ "When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint.": "Durante la lettura dei parametri di generazione dal testo nell'interfaccia utente (da informazioni PNG o testo incollato), non modificare il modello/checkpoint selezionato.",
+ "Send seed when sending prompt or image to other interface": "Invia il seme quando si invia un prompt o un'immagine a un'altra interfaccia",
+ "Font for image grids that have text": "Font per griglie di immagini con testo",
+ "Enable full page image viewer": "Abilita la visualizzazione delle immagini a pagina intera",
+ "Show images zoomed in by default in full page image viewer": "Mostra le immagini ingrandite per impostazione predefinita nella visualizzazione a pagina intera",
+ "Show generation progress in window title.": "Mostra l'avanzamento della generazione nel titolo della finestra.",
+ "Quicksettings list": "Elenco delle impostazioni rapide",
+ "Localization (requires restart)": "Localizzazione (richiede il riavvio)",
+ "ar_AR": "ar_AR",
+ "de_DE": "de_DE",
+ "es_ES": "es_ES",
+ "fr_FR": "fr_FR",
+ "it_IT": "it_IT",
+ "ja_JP": "ja_JP",
+ "ko_KR": "ko_KR",
+ "pt_BR": "pt_BR",
+ "ru_RU": "ru_RU",
+ "tr_TR": "tr_TR",
+ "zh_CN": "zh_CN",
+ "zh_TW": "zh_TW",
+ "Sampler parameters": "Parametri del campionatore",
+ "Hide samplers in user interface (requires restart)": "Nascondi campionatori nell'interfaccia utente (richiede il riavvio)",
+ "eta (noise multiplier) for DDIM": "ETA (moltiplicatore di rumore) per DDIM",
+ "eta (noise multiplier) for ancestral samplers": "ETA (moltiplicatore di rumore) per campionatori ancestrali",
+ "img2img DDIM discretize": "discretizzazione DDIM per img2img",
+ "uniform": "uniforme",
+ "quad": "quad",
+ "sigma churn": "sigma churn",
+ "sigma tmin": "sigma tmin",
+ "sigma noise": "sigma noise",
+ "Eta noise seed delta": "ETA del delta del seme del rumore",
+ "Number of columns on image gallery": "Numero di colonne nella galleria di immagini",
+ "Aesthetic Image Scorer": "Punteggio delle immagini estetiche",
+ "Save score as EXIF or PNG Info Chunk": "Salva il punteggio come info EXIF o PNG",
+ "aesthetic_score": "Punteggio estetico",
+ "cfg_scale": "Scala CFG",
+ "sd_model_hash": "Hash del modello SD",
+ "hash": "Hash",
+ "Save tags (Windows only)": "Salva etichette (solo Windows)",
+ "Save category (Windows only)": "Salva categoria (solo Windows)",
+ "Save generation params text": "Salva testo parametri di generazione",
+ "Force CPU (Requires Custom Script Reload)": "Forza CPU (richiede il ricaricamento dello script personalizzato)",
+ "Images Browser": "Galleria immagini",
+ "Preload images at startup": "Precarica le immagini all'avvio",
+ "Number of columns on the page": "Numero di colonne nella pagina",
+ "Number of rows on the page": "Numero di righe nella pagina",
+ "Minimum number of pages per load": "Numero minimo di pagine da caricare",
+ "Maximum number of samples, used to determine which folders to skip when continue running the create script": "Numero massimo di campioni, utilizzato per determinare quali cartelle ignorare quando si continua a eseguire lo script di creazione",
+ "Use same seed for all images": "Usa lo stesso seme per tutte le immagini",
+ "Request browser notifications": "Richiedi le notifiche del browser",
+ "Download localization template": "Scarica il modello per la localizzazione",
+ "Reload custom script bodies (No ui updates, No restart)": "Ricarica gli script personalizzati (nessun aggiornamento dell'interfaccia utente, nessun riavvio)",
+ "Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)": "Riavvia Gradio e aggiorna i componenti (solo script personalizzati, ui.py, js e css)",
+ "Installed": "Installato",
+ "Available": "Disponibile",
+ "Install from URL": "Installa da URL",
+ "Apply and restart UI": "Applica e riavvia l'interfaccia utente",
+ "Check for updates": "Controlla aggiornamenti",
+ "Extension": "Estensione",
+ "URL": "URL",
+ "Update": "Aggiorna",
+ "aesthetic-gradients": "Gradienti Estetici (CLIP)",
+ "https://github.com/AUTOMATIC1111/stable-diffusion-webui-aesthetic-gradients": "https://github.com/AUTOMATIC1111/stable-diffusion-webui-aesthetic-gradients",
+ "unknown": "sconosciuto",
+ "dataset-tag-editor": "Dataset Tag Editor",
+ "https://github.com/toshiaki1729/stable-diffusion-webui-dataset-tag-editor.git": "https://github.com/toshiaki1729/stable-diffusion-webui-dataset-tag-editor.git",
+ "deforum-for-automatic1111-webui": "Deforum",
+ "https://github.com/deforum-art/deforum-for-automatic1111-webui": "https://github.com/deforum-art/deforum-for-automatic1111-webui",
+ "sd-dynamic-prompts": "Prompt dinamici",
+ "https://github.com/adieyal/sd-dynamic-prompts": "https://github.com/adieyal/sd-dynamic-prompts",
+ "stable-diffusion-webui-aesthetic-image-scorer": "Punteggio immagini estetiche",
+ "https://github.com/tsngo/stable-diffusion-webui-aesthetic-image-scorer": "https://github.com/tsngo/stable-diffusion-webui-aesthetic-image-scorer",
+ "stable-diffusion-webui-artists-to-study": "Artisti per studiare",
+ "https://github.com/camenduru/stable-diffusion-webui-artists-to-study": "https://github.com/camenduru/stable-diffusion-webui-artists-to-study",
+ "stable-diffusion-webui-images-browser": "Galleria immagini",
+ "https://github.com/yfszzx/stable-diffusion-webui-images-browser": "https://github.com/yfszzx/stable-diffusion-webui-images-browser",
+ "stable-diffusion-webui-inspiration": "Ispirazione",
+ "https://github.com/yfszzx/stable-diffusion-webui-inspiration": "https://github.com/yfszzx/stable-diffusion-webui-inspiration",
+ "tag-autocomplete": "Autocompletamento etichette",
+ "https://github.com/DominikDoom/a1111-sd-webui-tagcomplete.git": "https://github.com/DominikDoom/a1111-sd-webui-tagcomplete.git",
+ "wildcards": "Termini Jolly",
+ "https://github.com/AUTOMATIC1111/stable-diffusion-webui-wildcards.git": "https://github.com/AUTOMATIC1111/stable-diffusion-webui-wildcards.git",
+ "Load from:": "Carica da:",
+ "Extension index URL": "URL dell'indice delle Estensioni",
+ "URL for extension's git repository": "URL del repository GIT dell'estensione",
+ "Local directory name": "Nome cartella locale",
+ "Install": "Installa",
+ "Prompt (press Ctrl+Enter or Alt+Enter to generate)": "Prompt (premi Ctrl+Invio o Alt+Invio per generare)",
+ "Negative prompt (press Ctrl+Enter or Alt+Enter to generate)": "Prompt negativo (premere Ctrl+Invio o Alt+Invio per generare)",
+ "Add a random artist to the prompt.": "Aggiungi un artista casuale al prompt.",
+ "Read generation parameters from prompt or last generation if prompt is empty into user interface.": "Leggere i parametri di generazione dal prompt o dall'ultima generazione se il prompt è vuoto ed inserirli nell'interfaccia utente.",
+ "Save style": "Salva stile",
+ "Apply selected styles to current prompt": "Applica gli stili selezionati al prompt corrente",
+ "Stop processing current image and continue processing.": "Interrompe l'elaborazione dell'immagine corrente e continua l'elaborazione.",
+ "Stop processing images and return any results accumulated so far.": "Interrompe l'elaborazione delle immagini e restituisce tutti i risultati accumulati finora.",
+ "Style to apply; styles have components for both positive and negative prompts and apply to both": "Stile da applicare; gli stili hanno componenti sia per i prompt positivi che per quelli negativi e si applicano a entrambi",
+ "Do not do anything special": "Non fa nulla di speciale",
+ "Which algorithm to use to produce the image": "Quale algoritmo utilizzare per produrre l'immagine",
+ "Euler Ancestral - very creative, each can get a completely different picture depending on step count, setting steps to higher than 30-40 does not help": "Euler Ancestral - molto creativo, si può ottenere un'immagine completamente diversa a seconda del numero di passi, impostare i passi su un valore superiore a 30-40 non aiuta",
+ "Denoising Diffusion Implicit Models - best at inpainting": "Denoising Diffusion Implicit Models - il migliore per inpainting",
+ "Produce an image that can be tiled.": "Produce un'immagine che può essere piastrellata.",
+ "Use a two step process to partially create an image at smaller resolution, upscale, and then improve details in it without changing composition": "Utilizza un processo in due fasi per creare parzialmente un'immagine con una risoluzione inferiore, aumentare la scala e quindi migliorarne i dettagli senza modificare la composizione",
+ "Determines how little respect the algorithm should have for image's content. At 0, nothing will change, and at 1 you'll get an unrelated image. With values below 1.0, processing will take less steps than the Sampling Steps slider specifies.": "Determina quanto poco l'algoritmo dovrebbe rispettare dovrebbe il contenuto dell'immagine. A 0, non cambierà nulla e a 1 otterrai un'immagine non correlata. Con valori inferiori a 1.0 l'elaborazione richiederà meno passaggi di quelli specificati dalla barra di scorrimento dei passi di campionamento.",
+ "How many batches of images to create": "Quanti lotti di immagini generare",
+ "How many image to create in a single batch": "Quante immagini generare in un singolo lotto",
+ "Classifier Free Guidance Scale - how strongly the image should conform to prompt - lower values produce more creative results": "Classifier Free Guidance Scale - quanto fortemente l'immagine deve conformarsi al prompt: valori più bassi producono risultati più creativi",
+ "A value that determines the output of random number generator - if you create an image with same parameters and seed as another image, you'll get the same result": "Un valore che determina l'output del generatore di numeri casuali: se create un'immagine con gli stessi parametri e seme di un'altra immagine, otterrete lo stesso risultato",
+ "Set seed to -1, which will cause a new random number to be used every time": "Imposta il seme su -1, che farà sì che ogni volta venga utilizzato un nuovo numero casuale",
+ "Reuse seed from last generation, mostly useful if it was randomed": "Riusa il seme dell'ultima generazione, utile soprattutto se casuale",
+ "Seed of a different picture to be mixed into the generation.": "Seme di un'immagine diversa da miscelare nella generazione.",
+ "How strong of a variation to produce. At 0, there will be no effect. At 1, you will get the complete picture with variation seed (except for ancestral samplers, where you will just get something).": "Quanto è forte la variazione da produrre. A 0, non ci sarà alcun effetto. A 1, otterrai l'intera immagine con il seme della variazione (tranne per i campionatori ancestrali, dove otterrai solo una leggera variazione).",
+ "Make an attempt to produce a picture similar to what would have been produced with same seed at specified resolution": "Prova a produrre un'immagine simile a quella che sarebbe stata prodotta con lo stesso seme alla risoluzione specificata",
+ "This text is used to rotate the feature space of the imgs embs": "Questo testo viene utilizzato per ruotare lo spazio delle funzioni delle immagini incorporate",
+ "How many times to repeat processing an image and using it as input for the next iteration": "Quante volte ripetere l'elaborazione di un'immagine e utilizzarla come input per l'iterazione successiva",
+ "Hello, StylePile here.\nUntil some weird bug gets fixed you will see this even if the script itself is not active. Meanwhile, some hints to take your artwork to new heights:\nUse the 'Focus on' dropdown to select complex presets. Toggle selections below (with or without Focus) to affect your results. Mix and match to get some interesting results. \nAnd some general Stable Diffusion tips that will take your designs to next level:\nYou can add parenthesis to make parts of the prompt stronger. So (((cute))) kitten will make it extra cute (try it out). This is alsow important if a style is affecting your original prompt too much. Make that prompt stronger by adding parenthesis around it, like this: ((promt)).\nYou can type promts like [A|B] to sequentially use terms one after another on each step. So, like [cat|dog] will produce a hybrid catdog. And [A:B:0.4] to switch to other terms after the first one has been active for a certain percentage of steps. So [cat:dog:0.4] will build a cat 40% of the time and then start turning it into a dog. This needs more steps to work properly.": "Salve, qui è StylePile.\nFinché qualche strano bug non verrà risolto, vedrai questo testo anche se lo script non è attivo. Nel frattempo, alcuni suggerimenti per portare la tua grafica a nuovi livelli:\nUtilizza il menu a discesa 'Focus on' per selezionare valori predefiniti complessi. Attiva o disattiva le selezioni seguenti (con o senza Focus) per influire sui risultati. Mescola e abbina per ottenere risultati interessanti. \nE alcuni suggerimenti generali su Stable Diffusion che porteranno i tuoi risultati a un livello superiore:\nPuoi aggiungere parentesi per aumentare l'influenza di certe parti del prompt. Quindi '(((cute))) kitten' lo renderà molto carino (fai delle prove). Questo è importante quando uno stile influisce troppo sul prompt originale. Rendi più forte quel prompt aggiungendo delle parentesi intorno ad esso, così: ((promt)).\nPuoi digitare prompt nel formato [A|B] per usare in sequenza i termini uno dopo l'altro in ogni passaggio. Quindi, come [cat|dog] produrrà un 'canegatto' ibrido. E [A:B:0.4] per passare ad altri termini dopo che il primo è stato attivo per una certa percentuale di passaggi. Quindi [cat:dog:0.4] genererà un gatto il 40% dei passaggi e poi inizierà a trasformarlo in un cane. Sono richiesti più passaggi perchè funzioni correttamente.",
+ "Enter one prompt per line. Blank lines will be ignored.": "Immettere un prompt per riga. Le righe vuote verranno ignorate.",
+ "Separate values for X axis using commas.": "Separare i valori per l'asse X usando le virgole.",
+ "Separate values for Y axis using commas.": "Separare i valori per l'asse Y usando le virgole.",
+ "Separate a list of words with commas, and the script will make a variation of prompt with those words for their every possible order": "Separa un elenco di parole con virgole e lo script eseguirà una variazione di prompt con quelle parole per ogni loro possibile ordine",
+ "Write image to a directory (default - log/images) and generation parameters into csv file.": "Salva l'immagine/i in una cartella (predefinita - log/images) ed i parametri di generazione in un file CSV.",
+ "Open images output directory": "Apri la cartella di output delle immagini",
+ "How much to blur the mask before processing, in pixels.": "Quanto sfocare la maschera prima dell'elaborazione, in pixel.",
+ "What to put inside the masked area before processing it with Stable Diffusion.": "Cosa mettere all'interno dell'area mascherata prima di elaborarla con Stable Diffusion.",
+ "fill it with colors of the image": "riempi con i colori dell'immagine",
+ "keep whatever was there originally": "conserva tutto ciò che c'era in origine",
+ "fill it with latent space noise": "riempi di rumore spaziale latente",
+ "fill it with latent space zeroes": "riempi con zeri di spazio latente",
+ "Upscale masked region to target resolution, do inpainting, downscale back and paste into original image": "Ingrandisce la regione mascherata per raggiungere la risoluzione, esegue la pittura, riduce la scala e incolla nell'immagine originale",
+ "Resize image to target resolution. Unless height and width match, you will get incorrect aspect ratio.": "Ridimensiona l'immagine alla risoluzione di destinazione. A meno che altezza e larghezza non corrispondano, otterrai proporzioni errate.",
+ "Resize the image so that entirety of target resolution is filled with the image. Crop parts that stick out.": "Ridimensionare l'immagine in modo che l'intera risoluzione di destinazione sia riempita con l'immagine. Ritaglia le parti che sporgono.",
+ "Resize the image so that entirety of image is inside target resolution. Fill empty space with image's colors.": "Ridimensiona l'immagine in modo che l'intera immagine rientri nella risoluzione di destinazione. Riempi lo spazio vuoto con i colori dell'immagine.",
+ "For SD upscale, how much overlap in pixels should there be between tiles. Tiles overlap so that when they are merged back into one picture, there is no clearly visible seam.": "Per l'upscaling SD, quanta sovrapposizione in pixel dovrebbe esserci tra le piastrelle. Le piastrelle si sovrappongono in modo che quando vengono unite nuovamente in un'immagine, non ci siano giunture chiaramente visibili.",
+ "Process an image, use it as an input, repeat.": "Elabora un'immagine, usala come input, ripeti.",
+ "In loopback mode, on each loop the denoising strength is multiplied by this value. <1 means decreasing variety so your sequence will converge on a fixed picture. >1 means increasing variety so your sequence will become more and more chaotic.": "In modalità rielaborazione ricorsiva, su ogni ciclo la forza di denoising viene moltiplicata per questo valore. <1 significa varietà decrescente in modo che la sequenza converga su un'immagine fissa. >1 significa aumentare la varietà in modo che la tua sequenza diventi sempre più caotica.",
+ "A directory on the same machine where the server is running.": "Una cartella sulla stessa macchina su cui è in esecuzione il server.",
+ "Leave blank to save images to the default path.": "Lascia vuoto per salvare le immagini nel percorso predefinito.",
+ "Result = A * (1 - M) + B * M": "Risultato = A * (1 - M) + B * M",
+ "Result = A + (B - C) * M": "Risultato = A + (B - C) * M",
+ "1st and last digit must be 1. ex:'1, 2, 1'": "La prima e l'ultima cifra devono essere 1. Es.:'1, 2, 1'",
+ "Path to directory with input images": "Percorso della cartella con immagini di input",
+ "Path to directory where to write outputs": "Percorso della cartella in cui scrivere i risultati",
+ "C:\\directory\\of\\datasets": "C:\\cartella\\del\\dataset",
+ "Input images directory": "Cartella di input delle immagini",
+ "Use following tags to define how filenames for images are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; leave empty for default.": "Usa i seguenti tag per definire come vengono scelti i nomi dei file per le immagini: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed ], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; lasciare vuoto per usare l'impostazione predefinita.",
+ "If this option is enabled, watermark will not be added to created images. Warning: if you do not add watermark, you may be behaving in an unethical manner.": "Se questa opzione è abilitata, la filigrana non verrà aggiunta alle immagini create. Attenzione: se non aggiungi la filigrana, potresti comportarti in modo non etico.",
+ "Use following tags to define how subdirectories for images and grids are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; leave empty for default.": "Utilizzare i seguenti tag per definire come vengono scelte le sottodirectory per le immagini e le griglie: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; lasciare vuoto per usare l'impostazione predefinita.",
+ "Restore low quality faces using GFPGAN neural network": "Ripristina volti di bassa qualità utilizzando la rete neurale GFPGAN",
+ "This regular expression will be used extract words from filename, and they will be joined using the option below into label text used for training. Leave empty to keep filename text as it is.": "Questa espressione regolare verrà utilizzata per estrarre le parole dal nome del file e verranno unite utilizzando l'opzione seguente nel testo dell'etichetta utilizzato per l'addestramento. Lascia vuoto per mantenere il testo del nome del file così com'è.",
+ "This string will be used to join split words into a single line if the option above is enabled.": "Questa stringa verrà utilizzata per unire le parole divise in un'unica riga se l'opzione sopra è abilitata.",
+ "Only applies to inpainting models. Determines how strongly to mask off the original image for inpainting and img2img. 1.0 means fully masked, which is the default behaviour. 0.0 means a fully unmasked conditioning. Lower values will help preserve the overall composition of the image, but will struggle with large changes.": "Si applica solo ai modelli di pittura. Determina con quale forza mascherare l'immagine originale per inpainting e img2img. 1.0 significa completamente mascherato, che è il comportamento predefinito. 0.0 significa un condizionamento completamente non mascherato. Valori più bassi aiuteranno a preservare la composizione generale dell'immagine, ma avranno difficoltà con grandi cambiamenti.",
+ "List of setting names, separated by commas, for settings that should go to the quick access bar at the top, rather than the usual setting tab. See modules/shared.py for setting names. Requires restarting to apply.": "Elenco dei nomi delle impostazioni, separati da virgole, per le impostazioni che dovrebbero essere visualizzate nella barra di accesso rapido in alto, anziché nella normale scheda delle impostazioni. Vedi modules/shared.py per impostare i nomi. Richiede il riavvio per applicare.",
+ "If this values is non-zero, it will be added to seed and used to initialize RNG for noises when using samplers with Eta. You can use this to produce even more variation of images, or you can use this to match images of other software if you know what you are doing.": "Se questo valore è diverso da zero, verrà aggiunto al seed e utilizzato per inizializzare il generatore di numeri casuali per il rumore quando si utilizzano campionatori con ETA. Puoi usarlo per produrre ancora più variazioni di immagini, oppure puoi usarlo per abbinare le immagini di altri software se sai cosa stai facendo.",
+ "Leave empty for auto": "Lasciare vuoto per automatico",
+ "Autocomplete options": "Opzioni di autocompletamento",
+ "Enable Autocomplete": "Abilita autocompletamento",
+ "Append commas": "Aggiungi virgole",
+ "AlphaCanvas": "AlphaCanvas",
+ "Close": "Chiudi",
+ "Grab Results": "Ottieni risultati",
+ "Apply Patch": "Applica Patch",
+ "Hue:0": "Hue:0",
+ "S:0": "S:0",
+ "L:0": "L:0",
+ "Load Canvas": "Carica Canvas",
+ "Save Canvas": "Salva Canvas",
+ "latest": "aggiornato",
+ "behind": "da aggiornare",
+ "Description": "Descrizione",
+ "Action": "Azione",
+ "Aesthetic Gradients": "Gradienti estetici",
+ "Create an embedding from one or few pictures and use it to apply their style to generated images.": "Crea un incorporamento da una o poche immagini e usalo per applicare il loro stile alle immagini generate.",
+ "Sample extension. Allows you to use __name__ syntax in your prompt to get a random line from a file named name.txt in the wildcards directory. Also see Dynamic Prompts for similar functionality.": "Estensione del campione. Consente di utilizzare la sintassi __name__ nel prompt per ottenere una riga casuale da un file denominato name.txt nella cartella dei termini jolly. Vedi anche 'Prompt dinamici' per funzionalità simili.",
+ "Dynamic Prompts": "Prompt dinamici",
+ "Implements an expressive template language for random or combinatorial prompt generation along with features to support deep wildcard directory structures.": "Implementa un modello di linguaggio espressivo per la generazione di prompt casuale o combinatoria insieme a funzionalità per supportare cartelle strutturate contenenti termini jolly.",
+ "Image browser": "Galleria immagini",
+ "Provides an interface to browse created images in the web browser.": "Fornisce un'interfaccia nel browser web per sfogliare le immagini create.",
+ "Randomly display the pictures of the artist's or artistic genres typical style, more pictures of this artist or genre is displayed after selecting. So you don't have to worry about how hard it is to choose the right style of art when you create.": "Visualizza in modo casuale le immagini dello stile tipico dell'artista o dei generi artistici, dopo la selezione vengono visualizzate più immagini di questo artista o genere. Così non dovete preoccuparvi della difficoltà di scegliere lo stile artistico giusto quando create.",
+ "The official port of Deforum, an extensive script for 2D and 3D animations, supporting keyframable sequences, dynamic math parameters (even inside the prompts), dynamic masking, depth estimation and warping.": "Il porting ufficiale di Deforum, uno script completo per animazioni 2D e 3D, che supporta sequenze di fotogrammi chiave, parametri matematici dinamici (anche all'interno dei prompt), mascheramento dinamico, stima della profondità e warping.",
+ "Artists to study": "Artisti per studiare",
+ "Shows a gallery of generated pictures by artists separated into categories.": "Mostra una galleria di immagini generate dagli artisti suddivise in categorie.",
+ "Calculates aesthetic score for generated images using CLIP+MLP Aesthetic Score Predictor based on Chad Scorer": "Calcola il punteggio estetico per le immagini generate utilizzando il predittore del punteggio estetico CLIP+MLP basato su Chad Scorer",
+ "Lets you edit captions in training datasets.": "Consente di modificare i sottotitoli nei set di dati di addestramento.",
+ "Time taken:": "Tempo impiegato:"
+}
\ No newline at end of file
From de11709479431df6e5b78afd83c70dac7bc8c2e6 Mon Sep 17 00:00:00 2001
From: batvbs <60730393+batvbs@users.noreply.github.com>
Date: Thu, 3 Nov 2022 07:34:23 +0800
Subject: [PATCH 079/147] Inpaint at full resolution
---
localizations/zh_CN.json | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/localizations/zh_CN.json b/localizations/zh_CN.json
index d8bc91cd..f91bd02a 100644
--- a/localizations/zh_CN.json
+++ b/localizations/zh_CN.json
@@ -141,7 +141,7 @@
"original": "原图",
"latent noise": "潜空间噪声",
"latent nothing": "潜空间数值零",
- "Inpaint at full resolution": "以完整分辨率进行局部重绘",
+ "Inpaint at full resolution": "全分辨率局部重绘",
"Inpaint at full resolution padding, pixels": "填补像素",
"Process images in a directory on the same machine where the server is running.": "使用服务器主机上的一个目录,作为输入目录处理图像。",
"Use an empty output directory to save pictures normally instead of writing to the output directory.": "使用一个空的文件夹作为输出目录,而不是使用默认的 output 文件夹作为输出目录。",
@@ -484,7 +484,7 @@
"keep whatever was there originally": "保留原来的图像,不进行预处理",
"fill it with latent space noise": "用潜空间的噪声填充它",
"fill it with latent space zeroes": "用潜空间的零填充它",
- "Upscale masked region to target resolution, do inpainting, downscale back and paste into original image": "将蒙版区域放大到目标分辨率,做局部重绘,缩小后粘贴到原始图像中",
+ "Upscale masked region to target resolution, do inpainting, downscale back and paste into original image": "将蒙版区域放大到目标分辨率,做局部重绘,缩小后粘贴到原始图像中。请注意,填补像素 仅对 全分辨率局部重绘 生效。",
"Resize image to target resolution. Unless height and width match, you will get incorrect aspect ratio.": "将图像大小调整为目标分辨率。除非高度和宽度匹配,否则你将获得不正确的纵横比",
"Resize the image so that entirety of target resolution is filled with the image. Crop parts that stick out.": "调整图像大小,使整个目标分辨率都被图像填充。裁剪多出来的部分",
"Resize the image so that entirety of image is inside target resolution. Fill empty space with image's colors.": "调整图像大小,使整个图像在目标分辨率内。用图像的颜色填充空白区域",
From 29c43935fb110d2a329b31ec9bc7bbe344e7a8e2 Mon Sep 17 00:00:00 2001
From: dtlnor
Date: Thu, 3 Nov 2022 11:17:44 +0900
Subject: [PATCH 080/147] unify translation style
---
localizations/zh_CN.json | 24 ++++++++++++------------
1 file changed, 12 insertions(+), 12 deletions(-)
diff --git a/localizations/zh_CN.json b/localizations/zh_CN.json
index 04e63c79..7f8f4ba3 100644
--- a/localizations/zh_CN.json
+++ b/localizations/zh_CN.json
@@ -37,8 +37,8 @@
"Image": "图像",
"Check progress": "查看进度",
"Check progress (first)": "(首次)查看进度",
- "Sampling Steps": "采样迭代步数",
- "Sampling method": "采样方法",
+ "Sampling Steps": "采样迭代步数 (Steps)",
+ "Sampling method": "采样方法 (Sampler)",
"Euler a": "Euler a",
"Euler": "Euler",
"LMS": "LMS",
@@ -101,8 +101,8 @@
"Prompt order": "提示词顺序",
"Sampler": "采样器",
"Checkpoint name": "模型(ckpt)名",
- "Hypernet str.": "Hypernetwork 强度",
"Hypernetwork": "超网络(Hypernetwork)",
+ "Hypernet str.": "超网络(Hypernetwork) 强度",
"Sigma Churn": "Sigma Churn",
"Sigma min": "最小 Sigma",
"Sigma max": "最大 Sigma",
@@ -260,10 +260,10 @@
"threshold": "阈值",
"Select Layer weights initialization. relu-like - Kaiming, sigmoid-like - Xavier is recommended": "选择初始化层权重的方案. 类relu - Kaiming, 类sigmoid - Xavier 都是比较推荐的选项",
"Normal": "正态",
- "KaimingUniform": "Kaiming均匀",
- "KaimingNormal": "Kaiming正态",
- "XavierUniform": "Xavier均匀",
- "XavierNormal": "Xavier正态",
+ "KaimingUniform": "Kaiming 均匀",
+ "KaimingNormal": "Kaiming 正态",
+ "XavierUniform": "Xavier 均匀",
+ "XavierNormal": "Xavier 正态",
"Add layer normalization": "添加层标准化",
"Use dropout": "采用 dropout 防止过拟合",
"Overwrite Old Hypernetwork": "覆写旧的 Hypernetwork",
@@ -395,7 +395,7 @@
"Make K-diffusion samplers produce same images in a batch as when making a single image": "使 K-diffusion 采样器 批量生成与生成单个图像时,产出相同的图像",
"Increase coherency by padding from the last comma within n tokens when using more than 75 tokens": "当使用超过 75 个 token 时,通过从 n 个 token 中的最后一个逗号填补来提高一致性",
"Filter NSFW content": "过滤成人内容(NSFW)",
- "Stop At last layers of CLIP model": "在 CLIP 模型的最后哪一层停下",
+ "Stop At last layers of CLIP model": "在 CLIP 模型的最后哪一层停下 (Clip skip)",
"Interrogate Options": "反推提示词选项",
"Interrogate: keep models in VRAM": "反推: 将模型保存在显存(VRAM)中",
"Interrogate: use artists from artists.csv": "反推: 使用 artists.csv 中的艺术家",
@@ -416,7 +416,7 @@
"Do not show any images in results for web": "不在网页的结果中显示任何图像",
"Add model hash to generation information": "将模型的哈希值添加到生成信息",
"Add model name to generation information": "将模型名称添加到生成信息",
- "When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint.": "从文本读取生成参数到 UI(从 PNG 图片信息或粘贴文本)时,不要更改选定的模型(ckpt)",
+ "When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint.": "从文本读取生成参数到用户界面(从 PNG 图片信息或粘贴文本)时,不要更改选定的模型(ckpt)",
"Send seed when sending prompt or image to other interface": "将提示词或者图片发送到 >> 其他界面时,把随机种子也传送过去",
"Font for image grids that have text": "有文字的宫格图使用的字体",
"Enable full page image viewer": "启用整页图像查看器",
@@ -434,7 +434,7 @@
"sigma churn": "sigma churn",
"sigma tmin": "最小(tmin) sigma",
"sigma noise": "sigma 噪声",
- "Eta noise seed delta": "Eta 噪声种子偏移(noise seed delta)",
+ "Eta noise seed delta": "Eta 噪声种子偏移(ENSD - Eta noise seed delta)",
"Images Browser": "图库浏览器",
"Preload images at startup": "在启动时预加载图像",
"Number of columns on the page": "每页列数",
@@ -444,7 +444,7 @@
"Use same seed for all images": "为所有图像使用同一个随机种子",
"Request browser notifications": "请求浏览器通知",
"Download localization template": "下载本地化模板",
- "Reload custom script bodies (No ui updates, No restart)": "重新加载自定义脚本主体(无 ui 更新,无重启)",
+ "Reload custom script bodies (No ui updates, No restart)": "重新加载自定义脚本主体(无用户界面更新,无重启)",
"Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)": "重启 Gradio 及刷新组件(仅限自定义脚本、ui.py、js 和 css)",
"Installed": "已安装",
"Available": "可用",
@@ -557,7 +557,7 @@
"Start drawing": "开始绘制",
"how fast should the training go. Low values will take longer to train, high values may fail to converge (not generate accurate results) and/or may break the embedding (This has happened if you see Loss: nan in the training info textbox. If this happens, you need to manually restore your embedding from an older not-broken backup).\n\nYou can set a single numeric value, or multiple learning rates using the syntax:\n\n rate_1:max_steps_1, rate_2:max_steps_2, ...\n\nEG: 0.005:100, 1e-3:1000, 1e-5\n\nWill train with rate of 0.005 for first 100 steps, then 1e-3 until 1000 steps, then 1e-5 for all remaining steps.": "训练应该多快。低值将需要更长的时间来训练,高值可能无法收敛(无法产生准确的结果)以及/也许可能会破坏 embedding(如果你在训练信息文本框中看到 Loss: nan 就会发生这种情况。如果发生这种情况,你需要从较旧的未损坏的备份手动恢复 embedding)\n\n你可以使用以下语法设置单个数值或多个学习率:\n\n 率1:步限1, 率2:步限2, ...\n\n如: 0.005:100, 1e-3:1000, 1e-5\n\n即前 100 步将以 0.005 的速率训练,接着直到 1000 步为止以 1e-3 训练,然后剩余所有步以 1e-5 训练",
"Separate prompts into parts using vertical pipe character (|) and the script will create a picture for every combination of them (except for the first part, which will be present in all combinations)": "用竖线分隔符(|)将提示词分成若干部分,脚本将为它们的每一个组合创建一幅图片(除了被分割的第一部分,所有的组合都会包含这部分)",
- "Select which Real-ESRGAN models to show in the web UI. (Requires restart)": "选择哪些Real-ESRGAN模型显示在用户界面。(需要重新启动)",
+ "Select which Real-ESRGAN models to show in the web UI. (Requires restart)": "选择哪些 Real-ESRGAN 模型显示在网页用户界面。(需要重新启动)",
"Face restoration model": "面部修复模型",
"Allowed categories for random artists selection when using the Roll button": "使用抽选艺术家按钮时将会随机的艺术家类别",
"Move to favorites": "移动到收藏夹(保存)",
From 313e14de04d9955c6ad077341feceb0fc7f2f1d3 Mon Sep 17 00:00:00 2001
From: Chris OBryan <13701027+cobryan05@users.noreply.github.com>
Date: Wed, 2 Nov 2022 21:37:43 -0500
Subject: [PATCH 081/147] extras - skip unnecessary second hash of image
There is no need to re-hash the input image each iteration of the loop.
This also reverts PR #4026 as it was determined the cache hits it avoids
were actually valid.
---
modules/extras.py | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/modules/extras.py b/modules/extras.py
index 8e2ab35c..71b93a06 100644
--- a/modules/extras.py
+++ b/modules/extras.py
@@ -136,12 +136,13 @@ def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_
def run_upscalers_blend(params: List[UpscaleParams], image: Image.Image, info: str) -> Tuple[Image.Image, str]:
blended_result: Image.Image = None
+ image_hash: str = hash(np.array(image.getdata()).tobytes())
for upscaler in params:
upscale_args = (upscaler.upscaler_idx, upscaling_resize, resize_mode,
upscaling_resize_w, upscaling_resize_h, upscaling_crop)
- cache_key = LruCache.Key(image_hash=hash(np.array(image.getdata()).tobytes()),
+ cache_key = LruCache.Key(image_hash=image_hash,
info_hash=hash(info),
- args_hash=hash((upscale_args, upscale_first)))
+ args_hash=hash(upscale_args))
cached_entry = cached_images.get(cache_key)
if cached_entry is None:
res = upscale(image, *upscale_args)
From dcf73cf779b8c72e022082a7178254b3e3ec110c Mon Sep 17 00:00:00 2001
From: dtlnor
Date: Thu, 3 Nov 2022 11:45:24 +0900
Subject: [PATCH 082/147] Update zh_CN.json
- re-order some element
- update new content
---
localizations/zh_CN.json | 15 +++++++++++----
1 file changed, 11 insertions(+), 4 deletions(-)
diff --git a/localizations/zh_CN.json b/localizations/zh_CN.json
index 7f8f4ba3..30373160 100644
--- a/localizations/zh_CN.json
+++ b/localizations/zh_CN.json
@@ -90,6 +90,7 @@
"Output directory": "输出目录",
"Put variable parts at start of prompt": "把变量部分放在提示词文本的开头",
"Iterate seed every line": "每行输入都换一个种子",
+ "Use same random seed for all lines": "每行输入都使用同一个随机种子",
"List of prompt inputs": "提示词输入列表",
"Upload prompt inputs": "上传提示词输入文件",
"X type": "X轴类型",
@@ -320,6 +321,7 @@
"keyword": "搜索",
"Generate Info": "生成信息",
"File Name": "文件名",
+ "Move to favorites": "移动到收藏夹(保存)",
"Renew Page": "刷新页面",
"Number": "数量",
"set_index": "设置索引",
@@ -341,6 +343,8 @@
"Save text information about generation parameters as chunks to png files": "将有关生成参数的文本信息,作为块保存到 png 图片文件中",
"Create a text file next to every image with generation parameters.": "保存图像时,在每个图像旁边创建一个文本文件储存生成参数",
"Save a copy of image before doing face restoration.": "在进行面部修复之前保存图像副本",
+ "Save a copy of image before applying highres fix.": "在做高分辨率修复之前保存初始图像副本",
+ "Save a copy of image before applying color correction to img2img results": "在对图生图结果应用颜色校正之前保存图像副本",
"Quality for saved jpeg images": "保存的 jpeg 图像的质量",
"If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG": "如果 PNG 图像大于 4MB 或宽高大于 4000,则缩小并保存副本为 JPG 图片",
"Use original name for output filename during batch process in extras tab": "在更多选项卡中的批量处理过程中,使用原始名称作为输出文件名",
@@ -377,17 +381,19 @@
"Always print all generation info to standard output": "始终将所有生成信息输出到 standard output (一般为控制台)",
"Add a second progress bar to the console that shows progress for an entire job.": "向控制台添加第二个进度条,显示整个作业的进度",
"Training": "训练",
- "Move VAE and CLIP to RAM when training hypernetwork. Saves VRAM.": "训练时将 VAE 和 CLIP 从显存(VRAM)移放到内存(RAM),节省显存(VRAM)",
+ "Move VAE and CLIP to RAM when training if possible. Saves VRAM.": "训练时将 VAE 和 CLIP 从显存(VRAM)移放到内存(RAM)如果可行的话,节省显存(VRAM)",
"Filename word regex": "文件名用词的正则表达式",
"Filename join string": "文件名连接用字符串",
"Number of repeats for a single input image per epoch; used only for displaying epoch number": "每个 epoch 中单个输入图像的重复次数; 仅用于显示 epoch 数",
"Save an csv containing the loss to log directory every N steps, 0 to disable": "每 N 步保存一个包含 loss 的 csv 表格到日志目录,0 表示禁用",
+ "Use cross attention optimizations while training": "训练时开启 cross attention 优化",
"Stable Diffusion": "Stable Diffusion",
"Checkpoints to cache in RAM": "缓存在内存(RAM)中的模型(ckpt)",
+ "SD VAE": "模型的 VAE (SD VAE)",
+ "auto": "自动",
"Hypernetwork strength": "Hypernetwork 强度",
"Inpainting conditioning mask strength": "局部重绘的自适应蒙版强度",
"Apply color correction to img2img results to match original colors.": "对图生图结果应用颜色校正以匹配原始颜色",
- "Save a copy of image before applying color correction to img2img results": "在对图生图结果应用颜色校正之前保存图像副本",
"With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising).": "在进行图生图的时候,确切地执行滑块指定的迭代步数(正常情况下更弱的重绘幅度需要更少的迭代步数)",
"Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply.": "在 K 采样器中启用量化以获得更清晰、更清晰的结果。这可能会改变现有的随机种子。需要重新启动才能应用",
"Emphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention": "强调符:使用 (文字) 使模型更关注该文本,使用 [文字] 使其减少关注",
@@ -560,8 +566,9 @@
"Select which Real-ESRGAN models to show in the web UI. (Requires restart)": "选择哪些 Real-ESRGAN 模型显示在网页用户界面。(需要重新启动)",
"Face restoration model": "面部修复模型",
"Allowed categories for random artists selection when using the Roll button": "使用抽选艺术家按钮时将会随机的艺术家类别",
- "Move to favorites": "移动到收藏夹(保存)",
"favorites": "收藏夹(已保存)",
"others": "其他",
- "Collect": "收藏(保存)"
+ "Collect": "收藏(保存)",
+ "Move VAE and CLIP to RAM when training hypernetwork. Saves VRAM.": "训练时将 VAE 和 CLIP 从显存(VRAM)移放到内存(RAM),节省显存(VRAM)",
+
}
From 53e72e15f0916eb63a5e231ef0e749e9c2d0e531 Mon Sep 17 00:00:00 2001
From: dtlnor
Date: Thu, 3 Nov 2022 12:27:32 +0900
Subject: [PATCH 083/147] polish translation content
---
localizations/zh_CN.json | 9 ++++-----
1 file changed, 4 insertions(+), 5 deletions(-)
diff --git a/localizations/zh_CN.json b/localizations/zh_CN.json
index 30373160..eee27793 100644
--- a/localizations/zh_CN.json
+++ b/localizations/zh_CN.json
@@ -111,7 +111,7 @@
"Eta": "Eta",
"Clip skip": "Clip 跳过",
"Denoising": "去噪",
- "Cond. Image Mask Weight": "自适应图像蒙版强度",
+ "Cond. Image Mask Weight": "图像调节屏蔽度",
"X values": "X轴数值",
"Y type": "Y轴类型",
"Y values": "Y轴数值",
@@ -392,7 +392,7 @@
"SD VAE": "模型的 VAE (SD VAE)",
"auto": "自动",
"Hypernetwork strength": "Hypernetwork 强度",
- "Inpainting conditioning mask strength": "局部重绘的自适应蒙版强度",
+ "Inpainting conditioning mask strength": "局部重绘时图像调节的蒙版屏蔽强度",
"Apply color correction to img2img results to match original colors.": "对图生图结果应用颜色校正以匹配原始颜色",
"With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising).": "在进行图生图的时候,确切地执行滑块指定的迭代步数(正常情况下更弱的重绘幅度需要更少的迭代步数)",
"Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply.": "在 K 采样器中启用量化以获得更清晰、更清晰的结果。这可能会改变现有的随机种子。需要重新启动才能应用",
@@ -523,7 +523,7 @@
"Restore low quality faces using GFPGAN neural network": "使用 GFPGAN 神经网络修复低质量面部",
"This regular expression will be used extract words from filename, and they will be joined using the option below into label text used for training. Leave empty to keep filename text as it is.": "此正则表达式将用于从文件名中提取单词,并将使用以下选项将它们接合到用于训练的标签文本中。留空以保持文件名文本不变",
"This string will be used to join split words into a single line if the option above is enabled.": "如果启用了上述选项,则此处的字符会用于将拆分的单词接合为同一行",
- "Only applies to inpainting models. Determines how strongly to mask off the original image for inpainting and img2img. 1.0 means fully masked, which is the default behaviour. 0.0 means a fully unmasked conditioning. Lower values will help preserve the overall composition of the image, but will struggle with large changes.": "仅适用于局部重绘专用的模型。 决定了蒙版在局部重绘以及图生图中屏蔽原图内容的强度。 1.0 表示完全屏蔽,这是默认行为。 0.0 表示完全不屏蔽。 较低的值将有助于保持图像的整体构图,但很难遇到较大的变化。",
+ "Only applies to inpainting models. Determines how strongly to mask off the original image for inpainting and img2img. 1.0 means fully masked, which is the default behaviour. 0.0 means a fully unmasked conditioning. Lower values will help preserve the overall composition of the image, but will struggle with large changes.": "仅适用于局部重绘专用的模型(模型后缀为 inpainting.ckpt 的模型)。 决定了蒙版在局部重绘以及图生图中屏蔽原图内容的强度。 1.0 表示完全屏蔽原图,这是默认行为。 0.0 表示完全不屏蔽让原图进行图像调节。 较低的值将有助于保持原图的整体构图,但很难遇到较大的变化。",
"List of setting names, separated by commas, for settings that should go to the quick access bar at the top, rather than the usual setting tab. See modules/shared.py for setting names. Requires restarting to apply.": "设置名称列表,以逗号分隔,设置应转到顶部的快速访问栏,而不是通常的设置选项卡。有关设置名称,请参见 modules/shared.py。需要重新启动才能应用",
"If this values is non-zero, it will be added to seed and used to initialize RNG for noises when using samplers with Eta. You can use this to produce even more variation of images, or you can use this to match images of other software if you know what you are doing.": "如果这个值不为零,它将被添加到随机种子中,并在使用带有 Eta 的采样器时用于初始化随机噪声。你可以使用它来产生更多的图像变化,或者你可以使用它来模仿其他软件生成的图像,如果你知道你在做什么",
"Leave empty for auto": "留空时自动生成",
@@ -569,6 +569,5 @@
"favorites": "收藏夹(已保存)",
"others": "其他",
"Collect": "收藏(保存)",
- "Move VAE and CLIP to RAM when training hypernetwork. Saves VRAM.": "训练时将 VAE 和 CLIP 从显存(VRAM)移放到内存(RAM),节省显存(VRAM)",
-
+ "Move VAE and CLIP to RAM when training hypernetwork. Saves VRAM.": "训练时将 VAE 和 CLIP 从显存(VRAM)移放到内存(RAM),节省显存(VRAM)"
}
From 7a2e36b583ef9eaefa44322e16faff6f9f1af169 Mon Sep 17 00:00:00 2001
From: Bruno Seoane
Date: Thu, 3 Nov 2022 00:51:22 -0300
Subject: [PATCH 084/147] Add config and lists endpoints
---
modules/api/api.py | 97 ++++++++++++++++++++++++++++++++++++++++---
modules/api/models.py | 70 +++++++++++++++++++++++++++++--
2 files changed, 159 insertions(+), 8 deletions(-)
diff --git a/modules/api/api.py b/modules/api/api.py
index 71c9c160..ed2dce5d 100644
--- a/modules/api/api.py
+++ b/modules/api/api.py
@@ -2,14 +2,17 @@ import base64
import io
import time
import uvicorn
-from gradio.processing_utils import decode_base64_to_file, decode_base64_to_image
-from fastapi import APIRouter, Depends, HTTPException
+from threading import Lock
+from gradio.processing_utils import encode_pil_to_base64, decode_base64_to_file, decode_base64_to_image
+from fastapi import APIRouter, Depends, FastAPI, HTTPException
import modules.shared as shared
from modules.api.models import *
from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images
-from modules.sd_samplers import all_samplers, sample_to_image, samples_to_image_grid
+from modules.sd_samplers import all_samplers
from modules.extras import run_extras, run_pnginfo
-
+from modules.sd_models import checkpoints_list
+from modules.realesrgan_model import get_realesrgan_models
+from typing import List
def upscaler_to_index(name: str):
try:
@@ -37,7 +40,7 @@ def encode_pil_to_base64(image):
class Api:
- def __init__(self, app, queue_lock):
+ def __init__(self, app: FastAPI, queue_lock: Lock):
self.router = APIRouter()
self.app = app
self.queue_lock = queue_lock
@@ -48,6 +51,19 @@ class Api:
self.app.add_api_route("/sdapi/v1/png-info", self.pnginfoapi, methods=["POST"], response_model=PNGInfoResponse)
self.app.add_api_route("/sdapi/v1/progress", self.progressapi, methods=["GET"], response_model=ProgressResponse)
self.app.add_api_route("/sdapi/v1/interrupt", self.interruptapi, methods=["POST"])
+ self.app.add_api_route("/sdapi/v1/options", self.get_config, methods=["GET"], response_model=OptionsModel)
+ self.app.add_api_route("/sdapi/v1/options", self.set_config, methods=["POST"])
+ self.app.add_api_route("/sdapi/v1/cmd-flags", self.get_cmd_flags, methods=["GET"], response_model=FlagsModel)
+ self.app.add_api_route("/sdapi/v1/info", self.get_info, methods=["GET"])
+ self.app.add_api_route("/sdapi/v1/samplers", self.get_samplers, methods=["GET"], response_model=List[SamplerItem])
+ self.app.add_api_route("/sdapi/v1/upscalers", self.get_upscalers, methods=["GET"], response_model=List[UpscalerItem])
+ self.app.add_api_route("/sdapi/v1/sd-models", self.get_sd_models, methods=["GET"], response_model=List[SDModelItem])
+ self.app.add_api_route("/sdapi/v1/hypernetworks", self.get_hypernetworks, methods=["GET"], response_model=List[HypernetworkItem])
+ self.app.add_api_route("/sdapi/v1/face-restorers", self.get_face_restorers, methods=["GET"], response_model=List[FaceRestorerItem])
+ self.app.add_api_route("/sdapi/v1/realesrgan-models", self.get_realesrgan_models, methods=["GET"], response_model=List[RealesrganItem])
+ self.app.add_api_route("/sdapi/v1/prompt-styles", self.get_promp_styles, methods=["GET"], response_model=List[PromptStyleItem])
+ self.app.add_api_route("/sdapi/v1/artist-categories", self.get_artists_categories, methods=["GET"], response_model=List[str])
+ self.app.add_api_route("/sdapi/v1/artists", self.get_artists, methods=["GET"], response_model=List[ArtistItem])
def text2imgapi(self, txt2imgreq: StableDiffusionTxt2ImgProcessingAPI):
sampler_index = sampler_to_index(txt2imgreq.sampler_index)
@@ -190,6 +206,77 @@ class Api:
shared.state.interrupt()
return {}
+
+ def get_config(self):
+ options = {}
+ for key in shared.opts.data.keys():
+ metadata = shared.opts.data_labels.get(key)
+ if(metadata is not None):
+ options.update({key: shared.opts.data.get(key, shared.opts.data_labels.get(key).default)})
+ else:
+ options.update({key: shared.opts.data.get(key, None)})
+
+ return options
+
+ def set_config(self, req: OptionsModel):
+ reqDict = vars(req)
+ for o in reqDict:
+ setattr(shared.opts, o, reqDict[o])
+
+ shared.opts.save(shared.config_filename)
+ return
+
+ def get_cmd_flags(self):
+ return vars(shared.cmd_opts)
+
+ def get_info(self):
+
+ return {
+ "hypernetworks": [{"name": name, "path": shared.hypernetworks[name]} for name in shared.hypernetworks],
+ "face_restorers": [{"name":x.name(), "cmd_dir": getattr(x, "cmd_dir", None)} for x in shared.face_restorers],
+ "realesrgan_models":[{"name":x.name,"path":x.data_path, "scale":x.scale} for x in get_realesrgan_models(None)],
+ "promp_styles":[shared.prompt_styles.styles[k] for k in shared.prompt_styles.styles],
+ "artists_categories": shared.artist_db.cats,
+ # "artists": [{"name":x[0], "score":x[1], "category":x[2]} for x in shared.artist_db.artists]
+ }
+
+ def get_samplers(self):
+ return [{"name":sampler[0], "aliases":sampler[2], "options":sampler[3]} for sampler in all_samplers]
+
+ def get_upscalers(self):
+ upscalers = []
+
+ for upscaler in shared.sd_upscalers:
+ u = upscaler.scaler
+ upscalers.append({"name":u.name, "model_name":u.model_name, "model_path":u.model_path, "model_url":u.model_url})
+
+ return upscalers
+
+ def get_sd_models(self):
+ return [{"title":x.title, "model_name":x.model_name, "hash":x.hash, "filename": x.filename, "config": x.config} for x in checkpoints_list.values()]
+
+ def get_hypernetworks(self):
+ return [{"name": name, "path": shared.hypernetworks[name]} for name in shared.hypernetworks]
+
+ def get_face_restorers(self):
+ return [{"name":x.name(), "cmd_dir": getattr(x, "cmd_dir", None)} for x in shared.face_restorers]
+
+ def get_realesrgan_models(self):
+ return [{"name":x.name,"path":x.data_path, "scale":x.scale} for x in get_realesrgan_models(None)]
+
+ def get_promp_styles(self):
+ styleList = []
+ for k in shared.prompt_styles.styles:
+ style = shared.prompt_styles.styles[k]
+ styleList.append({"name":style[0], "prompt": style[1], "negative_prompr": style[2]})
+
+ return styleList
+
+ def get_artists_categories(self):
+ return shared.artist_db.cats
+
+ def get_artists(self):
+ return [{"name":x[0], "score":x[1], "category":x[2]} for x in shared.artist_db.artists]
def launch(self, server_name, port):
self.app.include_router(self.router)
diff --git a/modules/api/models.py b/modules/api/models.py
index 9ee42a17..b54b188a 100644
--- a/modules/api/models.py
+++ b/modules/api/models.py
@@ -1,11 +1,10 @@
import inspect
-from click import prompt
from pydantic import BaseModel, Field, create_model
-from typing import Any, Optional
+from typing import Any, Optional, Union
from typing_extensions import Literal
from inflection import underscore
from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img
-from modules.shared import sd_upscalers
+from modules.shared import sd_upscalers, opts, parser
API_NOT_ALLOWED = [
"self",
@@ -165,3 +164,68 @@ class ProgressResponse(BaseModel):
eta_relative: float = Field(title="ETA in secs")
state: dict = Field(title="State", description="The current state snapshot")
current_image: str = Field(default=None, title="Current image", description="The current image in base64 format. opts.show_progress_every_n_steps is required for this to work.")
+
+fields = {}
+for key, value in opts.data.items():
+ metadata = opts.data_labels.get(key)
+ optType = opts.typemap.get(type(value), type(value))
+
+ if (metadata is not None):
+ fields.update({key: (Optional[optType], Field(
+ default=metadata.default ,description=metadata.label))})
+ else:
+ fields.update({key: (Optional[optType], Field())})
+
+OptionsModel = create_model("Options", **fields)
+
+flags = {}
+_options = vars(parser)['_option_string_actions']
+for key in _options:
+ if(_options[key].dest != 'help'):
+ flag = _options[key]
+ _type = str
+ if(_options[key].default != None): _type = type(_options[key].default)
+ flags.update({flag.dest: (_type,Field(default=flag.default, description=flag.help))})
+
+FlagsModel = create_model("Flags", **flags)
+
+class SamplerItem(BaseModel):
+ name: str = Field(title="Name")
+ aliases: list[str] = Field(title="Aliases")
+ options: dict[str, str] = Field(title="Options")
+
+class UpscalerItem(BaseModel):
+ name: str = Field(title="Name")
+ model_name: str | None = Field(title="Model Name")
+ model_path: str | None = Field(title="Path")
+ model_url: str | None = Field(title="URL")
+
+class SDModelItem(BaseModel):
+ title: str = Field(title="Title")
+ model_name: str = Field(title="Model Name")
+ hash: str = Field(title="Hash")
+ filename: str = Field(title="Filename")
+ config: str = Field(title="Config file")
+
+class HypernetworkItem(BaseModel):
+ name: str = Field(title="Name")
+ path: str | None = Field(title="Path")
+
+class FaceRestorerItem(BaseModel):
+ name: str = Field(title="Name")
+ cmd_dir: str | None = Field(title="Path")
+
+class RealesrganItem(BaseModel):
+ name: str = Field(title="Name")
+ path: str | None = Field(title="Path")
+ scale: int | None = Field(title="Scale")
+
+class PromptStyleItem(BaseModel):
+ name: str = Field(title="Name")
+ prompt: str | None = Field(title="Prompt")
+ negative_prompt: str | None = Field(title="Negative Prompt")
+
+class ArtistItem(BaseModel):
+ name: str = Field(title="Name")
+ score: float = Field(title="Score")
+ category: str = Field(title="Category")
\ No newline at end of file
From 743fffa3d6c2e9e6bb5f48093a4c88f3b53e001d Mon Sep 17 00:00:00 2001
From: Bruno Seoane
Date: Thu, 3 Nov 2022 00:52:01 -0300
Subject: [PATCH 085/147] Remove unused endpoint
---
modules/api/api.py | 12 ------------
1 file changed, 12 deletions(-)
diff --git a/modules/api/api.py b/modules/api/api.py
index ed2dce5d..a49f3755 100644
--- a/modules/api/api.py
+++ b/modules/api/api.py
@@ -54,7 +54,6 @@ class Api:
self.app.add_api_route("/sdapi/v1/options", self.get_config, methods=["GET"], response_model=OptionsModel)
self.app.add_api_route("/sdapi/v1/options", self.set_config, methods=["POST"])
self.app.add_api_route("/sdapi/v1/cmd-flags", self.get_cmd_flags, methods=["GET"], response_model=FlagsModel)
- self.app.add_api_route("/sdapi/v1/info", self.get_info, methods=["GET"])
self.app.add_api_route("/sdapi/v1/samplers", self.get_samplers, methods=["GET"], response_model=List[SamplerItem])
self.app.add_api_route("/sdapi/v1/upscalers", self.get_upscalers, methods=["GET"], response_model=List[UpscalerItem])
self.app.add_api_route("/sdapi/v1/sd-models", self.get_sd_models, methods=["GET"], response_model=List[SDModelItem])
@@ -229,17 +228,6 @@ class Api:
def get_cmd_flags(self):
return vars(shared.cmd_opts)
- def get_info(self):
-
- return {
- "hypernetworks": [{"name": name, "path": shared.hypernetworks[name]} for name in shared.hypernetworks],
- "face_restorers": [{"name":x.name(), "cmd_dir": getattr(x, "cmd_dir", None)} for x in shared.face_restorers],
- "realesrgan_models":[{"name":x.name,"path":x.data_path, "scale":x.scale} for x in get_realesrgan_models(None)],
- "promp_styles":[shared.prompt_styles.styles[k] for k in shared.prompt_styles.styles],
- "artists_categories": shared.artist_db.cats,
- # "artists": [{"name":x[0], "score":x[1], "category":x[2]} for x in shared.artist_db.artists]
- }
-
def get_samplers(self):
return [{"name":sampler[0], "aliases":sampler[2], "options":sampler[3]} for sampler in all_samplers]
From e33d6cbddd08870e348d10a58af41fb677a39fd6 Mon Sep 17 00:00:00 2001
From: Ju1-js <40339350+Ju1-js@users.noreply.github.com>
Date: Wed, 2 Nov 2022 21:04:49 -0700
Subject: [PATCH 086/147] Make extension manager Remote links open a new tab
---
modules/ui_extensions.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py
index ab807722..a81de9a7 100644
--- a/modules/ui_extensions.py
+++ b/modules/ui_extensions.py
@@ -86,7 +86,7 @@ def extension_table():
code += f"""
{html.escape(ext.name)}
- {html.escape(ext.remote or '')}
+ {html.escape(ext.remote or '')}
{ext_status}
"""
From 792b72fd6b6686e77cea09781852528fb15f40b4 Mon Sep 17 00:00:00 2001
From: batvbs
Date: Thu, 3 Nov 2022 13:03:08 +0800
Subject: [PATCH 087/147] =?UTF-8?q?=E6=9B=B4=E6=96=B0=20zh=5FCN.json?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
localizations/zh_CN.json | 23 +++++++++++++++++++++--
1 file changed, 21 insertions(+), 2 deletions(-)
diff --git a/localizations/zh_CN.json b/localizations/zh_CN.json
index f91bd02a..4a06941e 100644
--- a/localizations/zh_CN.json
+++ b/localizations/zh_CN.json
@@ -484,7 +484,7 @@
"keep whatever was there originally": "保留原来的图像,不进行预处理",
"fill it with latent space noise": "用潜空间的噪声填充它",
"fill it with latent space zeroes": "用潜空间的零填充它",
- "Upscale masked region to target resolution, do inpainting, downscale back and paste into original image": "将蒙版区域放大到目标分辨率,做局部重绘,缩小后粘贴到原始图像中。请注意,填补像素 仅对 全分辨率局部重绘 生效。",
+ "Upscale masked region to target resolution, do inpainting, downscale back and paste into original image": "将蒙版区域放大到目标分辨率,做局部重绘,缩小后粘贴到原始图像中。\n请注意,填补像素 仅对 全分辨率局部重绘 生效。",
"Resize image to target resolution. Unless height and width match, you will get incorrect aspect ratio.": "将图像大小调整为目标分辨率。除非高度和宽度匹配,否则你将获得不正确的纵横比",
"Resize the image so that entirety of target resolution is filled with the image. Crop parts that stick out.": "调整图像大小,使整个目标分辨率都被图像填充。裁剪多出来的部分",
"Resize the image so that entirety of image is inside target resolution. Fill empty space with image's colors.": "调整图像大小,使整个图像在目标分辨率内。用图像的颜色填充空白区域",
@@ -508,6 +508,7 @@
"Only applies to inpainting models. Determines how strongly to mask off the original image for inpainting and img2img. 1.0 means fully masked, which is the default behaviour. 0.0 means a fully unmasked conditioning. Lower values will help preserve the overall composition of the image, but will struggle with large changes.": "仅适用于局部重绘专用的模型。 决定了蒙版在局部重绘以及图生图中屏蔽原图内容的强度。 1.0 表示完全屏蔽,这是默认行为。 0.0 表示完全不屏蔽。 较低的值将有助于保持图像的整体构图,但很难遇到较大的变化。",
"List of setting names, separated by commas, for settings that should go to the quick access bar at the top, rather than the usual setting tab. See modules/shared.py for setting names. Requires restarting to apply.": "设置名称列表,以逗号分隔,设置应转到顶部的快速访问栏,而不是通常的设置选项卡。有关设置名称,请参见 modules/shared.py。需要重新启动才能应用",
"If this values is non-zero, it will be added to seed and used to initialize RNG for noises when using samplers with Eta. You can use this to produce even more variation of images, or you can use this to match images of other software if you know what you are doing.": "如果这个值不为零,它将被添加到随机种子中,并在使用带有 Eta 的采样器时用于初始化随机噪声。你可以使用它来产生更多的图像变化,或者你可以使用它来模仿其他软件生成的图像,如果你知道你在做什么",
+
"Autocomplete options": "自动补全选项",
"Enable Autocomplete": "开启Tag补全",
"Append commas": "附加逗号",
@@ -541,11 +542,29 @@
"Unload VAE and CLIP from VRAM when training": "训练时从显存(VRAM)中取消 VAE 和 CLIP 的加载",
"Number of pictures displayed on each page": "每页显示的图像数量",
"Number of grids in each row": "每行显示多少格",
+
"Start drawing": "开始绘制",
"how fast should the training go. Low values will take longer to train, high values may fail to converge (not generate accurate results) and/or may break the embedding (This has happened if you see Loss: nan in the training info textbox. If this happens, you need to manually restore your embedding from an older not-broken backup).\n\nYou can set a single numeric value, or multiple learning rates using the syntax:\n\n rate_1:max_steps_1, rate_2:max_steps_2, ...\n\nEG: 0.005:100, 1e-3:1000, 1e-5\n\nWill train with rate of 0.005 for first 100 steps, then 1e-3 until 1000 steps, then 1e-5 for all remaining steps.": "训练应该多快。低值将需要更长的时间来训练,高值可能无法收敛(无法产生准确的结果)以及/也许可能会破坏 embedding(如果你在训练信息文本框中看到 Loss: nan 就会发生这种情况。如果发生这种情况,你需要从较旧的未损坏的备份手动恢复 embedding)\n\n你可以使用以下语法设置单个数值或多个学习率:\n\n 率1:步限1, 率2:步限2, ...\n\n如: 0.005:100, 1e-3:1000, 1e-5\n\n即前 100 步将以 0.005 的速率训练,接着直到 1000 步为止以 1e-3 训练,然后剩余所有步以 1e-5 训练",
"Separate prompts into parts using vertical pipe character (|) and the script will create a picture for every combination of them (except for the first part, which will be present in all combinations)": "用竖线分隔符(|)将提示词分成若干部分,脚本将为它们的每一个组合创建一幅图片(除了被分割的第一部分,所有的组合都会包含这部分)",
"Select which Real-ESRGAN models to show in the web UI. (Requires restart)": "选择哪些Real-ESRGAN模型显示在用户界面。(需要重新启动)",
"Face restoration model": "面部修复模型",
"Allowed categories for random artists selection when using the Roll button": "使用抽选艺术家按钮时将会随机的艺术家类别",
- "Hypernetwork": "超网络(Hypernetwork)"
+ "Hypernetwork": "超网络(Hypernetwork)",
+ "How many times to improve the generated image iteratively; higher values take longer; very low values can produce bad results": "迭代改进生成的图像多少次;更高的值需要更长的时间;非常低的值会产生不好的结果",
+ "Draw a mask over an image, and the script will regenerate the masked area with content according to prompt": "在图像上画一个蒙版,脚本会根据提示重新生成蒙版区域的内容",
+ "Upscale image normally, split result into tiles, improve each tile using img2img, merge whole image back": "正常提升图像,将结果分割成瓦片,用img2img改进每个瓦片,将整个图像合并回来",
+ "Create a grid where images will have different parameters. Use inputs below to specify which parameters will be shared by columns and rows": "创建一个网格,图像将有不同的参数。使用下面的输入来指定哪些参数将由列和行共享",
+ "Run Python code. Advanced user only. Must run program with --allow-code for this to work": "运行Python代码。仅限高级用户。必须用 --allow-code 来运行程序,这样才能工作。",
+ "Separate a list of words with commas, and the first word will be used as a keyword: script will search for this word in the prompt, and replace it with others": "用逗号隔开一个单词列表,第一个单词将被用作关键词:脚本将在提示中搜索这个单词,并用其他单词替换它。",
+ "Separate a list of words with commas, and the script will make a variation of prompt with those words for their every possible order": "用逗号分开一个单词列表,脚本将用这些单词的每一个可能的顺序制作一个变体的提示。",
+ "Reconstruct prompt from existing image and put it into the prompt field.": "从现有的图像中重构提示,并将其放入提示字段。",
+ "Set the maximum number of words to be used in the [prompt_words] option; ATTENTION: If the words are too long, they may exceed the maximum length of the file path that the system can handle": "在[prompt_words]选项中设置要使用的最大字数;注意:如果字数太长,可能会超过系统可处理的文件路径的最大长度",
+ "Process an image, use it as an input, repeat.": "处理一张图片,将其作为输入,重复。",
+ "Insert selected styles into prompt fields": "在提示字段中插入选定的样式",
+ "Save current prompts as a style. If you add the token {prompt} to the text, the style use that as placeholder for your prompt when you use the style in the future.": "将当前的提示语保存为样式。如果你在文本中添加标记{prompt},当你将来使用该样式时,该样式会将其作为你的提示的占位符。",
+ "Loads weights from checkpoint before making images. You can either use hash or a part of filename (as seen in settings) for checkpoint name. Recommended to use with Y axis for less switching.": "在制作图像之前从检查点加载权重。你可以使用哈希值或文件名的一部分(如设置中所示)作为检查点名称。建议与Y轴一起使用以减少切换。",
+ "Torch active: Peak amount of VRAM used by Torch during generation, excluding cached data.\nTorch reserved: Peak amount of VRAM allocated by Torch, including all active and cached data.\nSys VRAM: Peak amount of VRAM allocation across all applications / total GPU VRAM (peak utilization%).": "Torch active: 在生成过程中,Torch使用的显存(VRAM)峰值,不包括缓存的数据。\nTorch reserved: Torch分配的显存(VRAM)的峰值量,包括所有活动和缓存数据。\nSys VRAM: 所有应用程序的显存(VRAM)分配的峰值量 / GPU的总显存(VRAM)(峰值利用率%)。",
+ "Uscale the image in latent space. Alternative is to produce the full image from latent representation, upscale that, and then move it back to latent space.": "缩放潜在空间中的图像。另一种方法是,从潜在表示中产生完整的图像,提高其比例,然后再将其移回潜在空间。",
+
+ "----": "----"
}
From fb1374791bf4b4c9b49de5378f29b12fdabcac97 Mon Sep 17 00:00:00 2001
From: Billy Cao
Date: Thu, 3 Nov 2022 13:08:11 +0800
Subject: [PATCH 088/147] Fix --nowebui argument being ineffective
---
launch.py | 11 +++++++----
1 file changed, 7 insertions(+), 4 deletions(-)
diff --git a/launch.py b/launch.py
index 958336f2..b061bce6 100644
--- a/launch.py
+++ b/launch.py
@@ -217,12 +217,15 @@ def tests(argv):
proc.kill()
-def start_webui():
- print(f"Launching Web UI with arguments: {' '.join(sys.argv[1:])}")
+def start():
+ print(f"Launching {'API server' if '--nowebui' in sys.argv else 'Web UI'} with arguments: {' '.join(sys.argv[1:])}")
import webui
- webui.webui()
+ if '--nowebui' in sys.argv:
+ webui.api_only()
+ else:
+ webui.webui()
if __name__ == "__main__":
prepare_enviroment()
- start_webui()
+ start()
From 0b143c1163a96b193a4e8512be9c5831c661a50d Mon Sep 17 00:00:00 2001
From: aria1th <35677394+aria1th@users.noreply.github.com>
Date: Thu, 3 Nov 2022 14:30:53 +0900
Subject: [PATCH 089/147] Separate .optim file from model
---
modules/hypernetworks/hypernetwork.py | 12 ++++++++----
1 file changed, 8 insertions(+), 4 deletions(-)
diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py
index 8f74cdea..63c25de8 100644
--- a/modules/hypernetworks/hypernetwork.py
+++ b/modules/hypernetworks/hypernetwork.py
@@ -161,6 +161,7 @@ class Hypernetwork:
def save(self, filename):
state_dict = {}
+ optimizer_saved_dict = {}
for k, v in self.layers.items():
state_dict[k] = (v[0].state_dict(), v[1].state_dict())
@@ -175,9 +176,10 @@ class Hypernetwork:
state_dict['sd_checkpoint'] = self.sd_checkpoint
state_dict['sd_checkpoint_name'] = self.sd_checkpoint_name
if self.optimizer_name is not None:
- state_dict['optimizer_name'] = self.optimizer_name
+ optimizer_saved_dict['optimizer_name'] = self.optimizer_name
if self.optimizer_state_dict:
- state_dict['optimizer_state_dict'] = self.optimizer_state_dict
+ optimizer_saved_dict['optimizer_state_dict'] = self.optimizer_state_dict
+ torch.save(optimizer_saved_dict, filename + '.optim')
torch.save(state_dict, filename)
@@ -198,9 +200,11 @@ class Hypernetwork:
print(f"Layer norm is set to {self.add_layer_norm}")
self.use_dropout = state_dict.get('use_dropout', False)
print(f"Dropout usage is set to {self.use_dropout}")
- self.optimizer_name = state_dict.get('optimizer_name', 'AdamW')
+
+ optimizer_saved_dict = torch.load(self.filename + '.optim', map_location = 'cpu') if os.path.exists(self.filename + '.optim') else {}
+ self.optimizer_name = optimizer_saved_dict.get('optimizer_name', 'AdamW')
print(f"Optimizer name is {self.optimizer_name}")
- self.optimizer_state_dict = state_dict.get('optimizer_state_dict', None)
+ self.optimizer_state_dict = optimizer_saved_dict.get('optimizer_state_dict', None)
if self.optimizer_state_dict:
print("Loaded existing optimizer from checkpoint")
else:
From 1764ac3c8bc482bd575987850e96630d9115e51a Mon Sep 17 00:00:00 2001
From: aria1th <35677394+aria1th@users.noreply.github.com>
Date: Thu, 3 Nov 2022 14:49:26 +0900
Subject: [PATCH 090/147] use hash to check valid optim
---
modules/hypernetworks/hypernetwork.py | 15 ++++++++++-----
1 file changed, 10 insertions(+), 5 deletions(-)
diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py
index 63c25de8..4230b8cf 100644
--- a/modules/hypernetworks/hypernetwork.py
+++ b/modules/hypernetworks/hypernetwork.py
@@ -177,11 +177,12 @@ class Hypernetwork:
state_dict['sd_checkpoint_name'] = self.sd_checkpoint_name
if self.optimizer_name is not None:
optimizer_saved_dict['optimizer_name'] = self.optimizer_name
- if self.optimizer_state_dict:
- optimizer_saved_dict['optimizer_state_dict'] = self.optimizer_state_dict
- torch.save(optimizer_saved_dict, filename + '.optim')
torch.save(state_dict, filename)
+ if self.optimizer_state_dict:
+ optimizer_saved_dict['hash'] = sd_models.model_hash(filename)
+ optimizer_saved_dict['optimizer_state_dict'] = self.optimizer_state_dict
+ torch.save(optimizer_saved_dict, filename + '.optim')
def load(self, filename):
self.filename = filename
@@ -204,7 +205,10 @@ class Hypernetwork:
optimizer_saved_dict = torch.load(self.filename + '.optim', map_location = 'cpu') if os.path.exists(self.filename + '.optim') else {}
self.optimizer_name = optimizer_saved_dict.get('optimizer_name', 'AdamW')
print(f"Optimizer name is {self.optimizer_name}")
- self.optimizer_state_dict = optimizer_saved_dict.get('optimizer_state_dict', None)
+ if sd_models.model_hash(filename) == optimizer_saved_dict.get('hash', None):
+ self.optimizer_state_dict = optimizer_saved_dict.get('optimizer_state_dict', None)
+ else:
+ self.optimizer_state_dict = None
if self.optimizer_state_dict:
print("Loaded existing optimizer from checkpoint")
else:
@@ -229,7 +233,7 @@ def list_hypernetworks(path):
name = os.path.splitext(os.path.basename(filename))[0]
# Prevent a hypothetical "None.pt" from being listed.
if name != "None":
- res[name] = filename
+ res[name + f"({sd_models.model_hash(filename)})"] = filename
return res
@@ -375,6 +379,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
else:
hypernetwork_dir = None
+ hypernetwork_name = hypernetwork_name.rsplit('(', 1)[0]
if create_image_every > 0:
images_dir = os.path.join(log_directory, "images")
os.makedirs(images_dir, exist_ok=True)
From fd785eab48e3e5b4762d46e0c537e60492cbee20 Mon Sep 17 00:00:00 2001
From: dtlnor
Date: Thu, 3 Nov 2022 15:51:33 +0900
Subject: [PATCH 091/147] polish translation content
---
localizations/zh_CN.json | 34 +++++++++++++++++-----------------
1 file changed, 17 insertions(+), 17 deletions(-)
diff --git a/localizations/zh_CN.json b/localizations/zh_CN.json
index 94ebe1b6..6ca50fbd 100644
--- a/localizations/zh_CN.json
+++ b/localizations/zh_CN.json
@@ -146,8 +146,8 @@
"latent nothing": "潜空间数值零",
"Inpaint at full resolution": "全分辨率局部重绘",
"Inpaint at full resolution padding, pixels": "填补像素",
- "Process images in a directory on the same machine where the server is running.": "使用服务器主机上的一个目录,作为输入目录处理图像。",
- "Use an empty output directory to save pictures normally instead of writing to the output directory.": "使用一个空的文件夹作为输出目录,而不是使用默认的 output 文件夹作为输出目录。",
+ "Process images in a directory on the same machine where the server is running.": "使用服务器主机上的一个目录,作为输入目录处理图像",
+ "Use an empty output directory to save pictures normally instead of writing to the output directory.": "使用一个空的文件夹作为输出目录,而不是使用默认的 output 文件夹作为输出目录",
"Input directory": "输入目录",
"Resize mode": "缩放模式",
"Just resize": "拉伸",
@@ -502,7 +502,7 @@
"keep whatever was there originally": "保留原来的图像,不进行预处理",
"fill it with latent space noise": "用潜空间的噪声填充它",
"fill it with latent space zeroes": "用潜空间的零填充它",
- "Upscale masked region to target resolution, do inpainting, downscale back and paste into original image": "将蒙版区域放大到目标分辨率,做局部重绘,缩小后粘贴到原始图像中。\n请注意,填补像素 仅对 全分辨率局部重绘 生效。",
+ "Upscale masked region to target resolution, do inpainting, downscale back and paste into original image": "将蒙版区域放大到目标分辨率,做局部重绘,缩小后粘贴到原始图像中。\n请注意,填补像素 仅对 全分辨率局部重绘 生效",
"Resize image to target resolution. Unless height and width match, you will get incorrect aspect ratio.": "将图像大小调整为目标分辨率。除非高度和宽度匹配,否则你将获得不正确的纵横比",
"Resize the image so that entirety of target resolution is filled with the image. Crop parts that stick out.": "调整图像大小,使整个目标分辨率都被图像填充。裁剪多出来的部分",
"Resize the image so that entirety of image is inside target resolution. Fill empty space with image's colors.": "调整图像大小,使整个图像在目标分辨率内。用图像的颜色填充空白区域",
@@ -523,8 +523,8 @@
"Restore low quality faces using GFPGAN neural network": "使用 GFPGAN 神经网络修复低质量面部",
"This regular expression will be used extract words from filename, and they will be joined using the option below into label text used for training. Leave empty to keep filename text as it is.": "此正则表达式将用于从文件名中提取单词,并将使用以下选项将它们接合到用于训练的标签文本中。留空以保持文件名文本不变",
"This string will be used to join split words into a single line if the option above is enabled.": "如果启用了上述选项,则此处的字符会用于将拆分的单词接合为同一行",
- "Only applies to inpainting models. Determines how strongly to mask off the original image for inpainting and img2img. 1.0 means fully masked, which is the default behaviour. 0.0 means a fully unmasked conditioning. Lower values will help preserve the overall composition of the image, but will struggle with large changes.": "仅适用于局部重绘专用的模型(模型后缀为 inpainting.ckpt 的模型)。 决定了蒙版在局部重绘以及图生图中屏蔽原图内容的强度。 1.0 表示完全屏蔽原图,这是默认行为。 0.0 表示完全不屏蔽让原图进行图像调节。 较低的值将有助于保持原图的整体构图,但很难遇到较大的变化。",
- "List of setting names, separated by commas, for settings that should go to the quick access bar at the top, rather than the usual setting tab. See modules/shared.py for setting names. Requires restarting to apply.": "设置名称列表,以逗号分隔,设置应转到顶部的快速访问栏,而不是通常的设置选项卡。有关设置名称,请参见 modules/shared.py。需要重新启动才能应用",
+ "Only applies to inpainting models. Determines how strongly to mask off the original image for inpainting and img2img. 1.0 means fully masked, which is the default behaviour. 0.0 means a fully unmasked conditioning. Lower values will help preserve the overall composition of the image, but will struggle with large changes.": "仅适用于局部重绘专用的模型(模型后缀为 inpainting.ckpt 的模型)。决定了蒙版在局部重绘以及图生图中屏蔽原图内容的强度。 1.0 表示完全屏蔽原图,这是默认行为。0.0 表示完全不屏蔽让原图进行图像调节。较低的值将有助于保持原图的整体构图,但很难遇到较大的变化",
+ "List of setting names, separated by commas, for settings that should go to the quick access bar at the top, rather than the usual setting tab. See modules/shared.py for setting names. Requires restarting to apply.": "设置项名称的列表,以逗号分隔,该设置会移动到顶部的快速访问栏,而不是默认的设置选项卡。有关设置名称,请参见 modules/shared.py。需要重新启动才能应用",
"If this values is non-zero, it will be added to seed and used to initialize RNG for noises when using samplers with Eta. You can use this to produce even more variation of images, or you can use this to match images of other software if you know what you are doing.": "如果这个值不为零,它将被添加到随机种子中,并在使用带有 Eta 的采样器时用于初始化随机噪声。你可以使用它来产生更多的图像变化,或者你可以使用它来模仿其他软件生成的图像,如果你知道你在做什么",
"Leave empty for auto": "留空时自动生成",
"Autocomplete options": "自动补全选项",
@@ -573,19 +573,19 @@
"Move VAE and CLIP to RAM when training hypernetwork. Saves VRAM.": "训练时将 VAE 和 CLIP 从显存(VRAM)移放到内存(RAM),节省显存(VRAM)",
"How many times to improve the generated image iteratively; higher values take longer; very low values can produce bad results": "迭代改进生成的图像多少次;更高的值需要更长的时间;非常低的值会产生不好的结果",
"Draw a mask over an image, and the script will regenerate the masked area with content according to prompt": "在图像上画一个蒙版,脚本会根据提示重新生成蒙版区域的内容",
- "Upscale image normally, split result into tiles, improve each tile using img2img, merge whole image back": "正常提升图像,将结果分割成瓦片,用img2img改进每个瓦片,将整个图像合并回来",
+ "Upscale image normally, split result into tiles, improve each tile using img2img, merge whole image back": "正常放大图像,将结果分割成图块(tiles),用图生图改进每个图块(tiles),最后将整个图像合并回来",
"Create a grid where images will have different parameters. Use inputs below to specify which parameters will be shared by columns and rows": "创建一个网格,图像将有不同的参数。使用下面的输入来指定哪些参数将由列和行共享",
- "Run Python code. Advanced user only. Must run program with --allow-code for this to work": "运行Python代码。仅限高级用户。必须用 --allow-code 来运行程序,这样才能工作。",
- "Separate a list of words with commas, and the first word will be used as a keyword: script will search for this word in the prompt, and replace it with others": "用逗号隔开一个单词列表,第一个单词将被用作关键词:脚本将在提示中搜索这个单词,并用其他单词替换它。",
- "Separate a list of words with commas, and the script will make a variation of prompt with those words for their every possible order": "用逗号分开一个单词列表,脚本将用这些单词的每一个可能的顺序制作一个变体的提示。",
- "Reconstruct prompt from existing image and put it into the prompt field.": "从现有的图像中重构提示,并将其放入提示字段。",
- "Set the maximum number of words to be used in the [prompt_words] option; ATTENTION: If the words are too long, they may exceed the maximum length of the file path that the system can handle": "在[prompt_words]选项中设置要使用的最大字数;注意:如果字数太长,可能会超过系统可处理的文件路径的最大长度",
- "Process an image, use it as an input, repeat.": "处理一张图片,将其作为输入,重复。",
- "Insert selected styles into prompt fields": "在提示字段中插入选定的样式",
- "Save current prompts as a style. If you add the token {prompt} to the text, the style use that as placeholder for your prompt when you use the style in the future.": "将当前的提示语保存为样式。如果你在文本中添加标记{prompt},当你将来使用该样式时,该样式会将其作为你的提示的占位符。",
- "Loads weights from checkpoint before making images. You can either use hash or a part of filename (as seen in settings) for checkpoint name. Recommended to use with Y axis for less switching.": "在制作图像之前从检查点加载权重。你可以使用哈希值或文件名的一部分(如设置中所示)作为检查点名称。建议与Y轴一起使用以减少切换。",
- "Torch active: Peak amount of VRAM used by Torch during generation, excluding cached data.\nTorch reserved: Peak amount of VRAM allocated by Torch, including all active and cached data.\nSys VRAM: Peak amount of VRAM allocation across all applications / total GPU VRAM (peak utilization%).": "Torch active: 在生成过程中,Torch使用的显存(VRAM)峰值,不包括缓存的数据。\nTorch reserved: Torch分配的显存(VRAM)的峰值量,包括所有活动和缓存数据。\nSys VRAM: 所有应用程序的显存(VRAM)分配的峰值量 / GPU的总显存(VRAM)(峰值利用率%)。",
- "Uscale the image in latent space. Alternative is to produce the full image from latent representation, upscale that, and then move it back to latent space.": "缩放潜在空间中的图像。另一种方法是,从潜在表示中产生完整的图像,提高其比例,然后再将其移回潜在空间。",
+ "Run Python code. Advanced user only. Must run program with --allow-code for this to work": "运行 Python 代码。仅限老手使用。必须以 --allow-code 来开启程序,才能使其运行",
+ "Separate a list of words with commas, and the first word will be used as a keyword: script will search for this word in the prompt, and replace it with others": "以逗号分割的单词列表,第一个单词将被用作关键词:脚本将在提示词中搜索这个单词,并用其他单词替换它",
+ "Separate a list of words with commas, and the script will make a variation of prompt with those words for their every possible order": "以逗号分割的单词列表,脚本会排列出这些单词的所有排列方式,并加入提示词各生成一次",
+ "Reconstruct prompt from existing image and put it into the prompt field.": "从现有的图像中重构出提示词,并将其放入提示词的输入文本框",
+ "Set the maximum number of words to be used in the [prompt_words] option; ATTENTION: If the words are too long, they may exceed the maximum length of the file path that the system can handle": "设置在[prompt_words]选项中要使用的最大字数;注意:如果字数太长,可能会超过系统可处理的文件路径的最大长度",
+ "Process an image, use it as an input, repeat.": "处理一张图像,将其作为输入,并重复",
+ "Insert selected styles into prompt fields": "在提示词中插入选定的模版风格",
+ "Save current prompts as a style. If you add the token {prompt} to the text, the style use that as placeholder for your prompt when you use the style in the future.": "将当前的提示词保存为模版风格。如果你在文本中添加{prompt}标记,那么将来你使用该模版风格时,你现有的提示词会替换模版风格中的{prompt}",
+ "Loads weights from checkpoint before making images. You can either use hash or a part of filename (as seen in settings) for checkpoint name. Recommended to use with Y axis for less switching.": "在生成图像之前从模型(ckpt)中加载权重。你可以使用哈希值或文件名的一部分(如设置中所示)作为模型(ckpt)名称。建议用在Y轴上以减少过程中模型的切换",
+ "Torch active: Peak amount of VRAM used by Torch during generation, excluding cached data.\nTorch reserved: Peak amount of VRAM allocated by Torch, including all active and cached data.\nSys VRAM: Peak amount of VRAM allocation across all applications / total GPU VRAM (peak utilization%).": "Torch active: 在生成过程中,Torch使用的显存(VRAM)峰值,不包括缓存的数据。\nTorch reserved: Torch 分配的显存(VRAM)的峰值量,包括所有活动和缓存数据。\nSys VRAM: 所有应用程序分配的显存(VRAM)的峰值量 / GPU 的总显存(VRAM)(峰值利用率%)",
+ "Uscale the image in latent space. Alternative is to produce the full image from latent representation, upscale that, and then move it back to latent space.": "放大潜空间中的图像。而另一种方法是,从潜变量表达中直接解码并生成完整的图像,接着放大它,然后再将其编码回潜空间",
"----": "----"
}
From 5bbef814adbe1aa827fb1c312c6e8183dae99d7c Mon Sep 17 00:00:00 2001
From: batvbs <60730393+batvbs@users.noreply.github.com>
Date: Thu, 3 Nov 2022 16:47:37 +0800
Subject: [PATCH 092/147] Update localizations/zh_CN.json
Co-authored-by: dtlnor
---
localizations/zh_CN.json | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/localizations/zh_CN.json b/localizations/zh_CN.json
index 6ca50fbd..7abd793d 100644
--- a/localizations/zh_CN.json
+++ b/localizations/zh_CN.json
@@ -145,7 +145,7 @@
"latent noise": "潜空间噪声",
"latent nothing": "潜空间数值零",
"Inpaint at full resolution": "全分辨率局部重绘",
- "Inpaint at full resolution padding, pixels": "填补像素",
+ "Inpaint at full resolution padding, pixels": "预留像素",
"Process images in a directory on the same machine where the server is running.": "使用服务器主机上的一个目录,作为输入目录处理图像",
"Use an empty output directory to save pictures normally instead of writing to the output directory.": "使用一个空的文件夹作为输出目录,而不是使用默认的 output 文件夹作为输出目录",
"Input directory": "输入目录",
From 8db85d597e2d930e338250052e76c9bc1d68a8ef Mon Sep 17 00:00:00 2001
From: batvbs <60730393+batvbs@users.noreply.github.com>
Date: Thu, 3 Nov 2022 16:47:45 +0800
Subject: [PATCH 093/147] Update localizations/zh_CN.json
Co-authored-by: dtlnor
---
localizations/zh_CN.json | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/localizations/zh_CN.json b/localizations/zh_CN.json
index 7abd793d..cb70b094 100644
--- a/localizations/zh_CN.json
+++ b/localizations/zh_CN.json
@@ -502,7 +502,7 @@
"keep whatever was there originally": "保留原来的图像,不进行预处理",
"fill it with latent space noise": "用潜空间的噪声填充它",
"fill it with latent space zeroes": "用潜空间的零填充它",
- "Upscale masked region to target resolution, do inpainting, downscale back and paste into original image": "将蒙版区域放大到目标分辨率,做局部重绘,缩小后粘贴到原始图像中。\n请注意,填补像素 仅对 全分辨率局部重绘 生效",
+ "Upscale masked region to target resolution, do inpainting, downscale back and paste into original image": "将蒙版区域(包括预留像素长度的缓冲区域)放大到目标分辨率,进行局部重绘。\n然后缩小并粘贴回原始图像中",
"Resize image to target resolution. Unless height and width match, you will get incorrect aspect ratio.": "将图像大小调整为目标分辨率。除非高度和宽度匹配,否则你将获得不正确的纵横比",
"Resize the image so that entirety of target resolution is filled with the image. Crop parts that stick out.": "调整图像大小,使整个目标分辨率都被图像填充。裁剪多出来的部分",
"Resize the image so that entirety of image is inside target resolution. Fill empty space with image's colors.": "调整图像大小,使整个图像在目标分辨率内。用图像的颜色填充空白区域",
From 7e5f1562ecb08557c2997bdc4fd26a77e41b0e25 Mon Sep 17 00:00:00 2001
From: byzod
Date: Thu, 3 Nov 2022 18:54:25 +0800
Subject: [PATCH 094/147] Update edit-attention.js
Fix https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/3904
(Some sort of a workaround, the best way is to add unique id or class name to those prompt boxes)
---
javascript/edit-attention.js | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/javascript/edit-attention.js b/javascript/edit-attention.js
index c0d29a74..b947cbec 100644
--- a/javascript/edit-attention.js
+++ b/javascript/edit-attention.js
@@ -1,7 +1,6 @@
addEventListener('keydown', (event) => {
let target = event.originalTarget || event.composedPath()[0];
- if (!target.hasAttribute("placeholder")) return;
- if (!target.placeholder.toLowerCase().includes("prompt")) return;
+ if (!target.matches("#toprow textarea.gr-text-input[placeholder]")) return;
if (! (event.metaKey || event.ctrlKey)) return;
From 70714be4301d9873c7f90eab65c71aa06182ee5b Mon Sep 17 00:00:00 2001
From: batvbs
Date: Thu, 3 Nov 2022 19:28:25 +0800
Subject: [PATCH 095/147] =?UTF-8?q?=E5=B0=86=E6=97=A0=E6=B3=95=E6=9C=AC?=
=?UTF-8?q?=E5=9C=B0=E5=8C=96=E7=9A=84=E5=86=85=E5=AE=B9=E7=A7=BB=E5=88=B0?=
=?UTF-8?q?=E5=BA=95=E9=83=A8?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
localizations/zh_CN.json | 70 ++++++++++++++++++++++------------------
1 file changed, 38 insertions(+), 32 deletions(-)
diff --git a/localizations/zh_CN.json b/localizations/zh_CN.json
index cb70b094..6d6720c9 100644
--- a/localizations/zh_CN.json
+++ b/localizations/zh_CN.json
@@ -70,7 +70,6 @@
"Resize seed from width": "自宽度缩放随机种子",
"Resize seed from height": "自高度缩放随机种子",
"Open for Clip Aesthetic!": "打开美术风格 Clip!",
- "▼": "▼",
"Aesthetic weight": "美术风格权重",
"Aesthetic steps": "美术风格迭代步数",
"Aesthetic learning rate": "美术风格学习率",
@@ -468,10 +467,6 @@
"Install": "安装",
"Prompt (press Ctrl+Enter or Alt+Enter to generate)": "提示词(按 Ctrl+Enter 或 Alt+Enter 生成)\nPrompt",
"Negative prompt (press Ctrl+Enter or Alt+Enter to generate)": "反向提示词(按 Ctrl+Enter 或 Alt+Enter 生成)\nNegative prompt",
- "Add a random artist to the prompt.": "随机添加一个艺术家到提示词中",
- "Read generation parameters from prompt or last generation if prompt is empty into user interface.": "从提示词中读取生成参数,如果提示词为空,则读取上一次的生成参数到用户界面",
- "Save style": "储存为模版风格",
- "Apply selected styles to current prompt": "将所选模板风格,应用于当前提示词",
"Stop processing current image and continue processing.": "停止处理当前图像,并继续处理下一个",
"Stop processing images and return any results accumulated so far.": "停止处理图像,并返回迄今为止累积的任何结果",
"Style to apply; styles have components for both positive and negative prompts and apply to both": "要应用的模版风格; 模版风格包含正向和反向提示词,并应用于两者",
@@ -533,14 +528,47 @@
"Roll three": "抽三位出来",
"Generate forever": "无限生成",
"Cancel generate forever": "停止无限生成",
+ "how fast should the training go. Low values will take longer to train, high values may fail to converge (not generate accurate results) and/or may break the embedding (This has happened if you see Loss: nan in the training info textbox. If this happens, you need to manually restore your embedding from an older not-broken backup).\n\nYou can set a single numeric value, or multiple learning rates using the syntax:\n\n rate_1:max_steps_1, rate_2:max_steps_2, ...\n\nEG: 0.005:100, 1e-3:1000, 1e-5\n\nWill train with rate of 0.005 for first 100 steps, then 1e-3 until 1000 steps, then 1e-5 for all remaining steps.": "训练应该多快。低值将需要更长的时间来训练,高值可能无法收敛(无法产生准确的结果)以及/也许可能会破坏 embedding(如果你在训练信息文本框中看到 Loss: nan 就会发生这种情况。如果发生这种情况,你需要从较旧的未损坏的备份手动恢复 embedding)\n\n你可以使用以下语法设置单个数值或多个学习率:\n\n 率1:步限1, 率2:步限2, ...\n\n如: 0.005:100, 1e-3:1000, 1e-5\n\n即前 100 步将以 0.005 的速率训练,接着直到 1000 步为止以 1e-3 训练,然后剩余所有步以 1e-5 训练",
+ "Move VAE and CLIP to RAM when training hypernetwork. Saves VRAM.": "训练时将 VAE 和 CLIP 从显存(VRAM)移放到内存(RAM),节省显存(VRAM)",
+ "How many times to improve the generated image iteratively; higher values take longer; very low values can produce bad results": "迭代改进生成的图像多少次;更高的值需要更长的时间;非常低的值会产生不好的结果",
+ "Draw a mask over an image, and the script will regenerate the masked area with content according to prompt": "在图像上画一个蒙版,脚本会根据提示重新生成蒙版区域的内容",
+ "Upscale image normally, split result into tiles, improve each tile using img2img, merge whole image back": "正常放大图像,将结果分割成图块(tiles),用图生图改进每个图块(tiles),最后将整个图像合并回来",
+ "Create a grid where images will have different parameters. Use inputs below to specify which parameters will be shared by columns and rows": "创建一个网格,图像将有不同的参数。使用下面的输入来指定哪些参数将由列和行共享",
+ "Run Python code. Advanced user only. Must run program with --allow-code for this to work": "运行 Python 代码。仅限老手使用。必须以 --allow-code 来开启程序,才能使其运行",
+ "Separate a list of words with commas, and the first word will be used as a keyword: script will search for this word in the prompt, and replace it with others": "以逗号分割的单词列表,第一个单词将被用作关键词:脚本将在提示词中搜索这个单词,并用其他单词替换它",
+ "Separate a list of words with commas, and the script will make a variation of prompt with those words for their every possible order": "以逗号分割的单词列表,脚本会排列出这些单词的所有排列方式,并加入提示词各生成一次",
+ "Reconstruct prompt from existing image and put it into the prompt field.": "从现有的图像中重构出提示词,并将其放入提示词的输入文本框",
+ "Set the maximum number of words to be used in the [prompt_words] option; ATTENTION: If the words are too long, they may exceed the maximum length of the file path that the system can handle": "设置在[prompt_words]选项中要使用的最大字数;注意:如果字数太长,可能会超过系统可处理的文件路径的最大长度",
+ "Process an image, use it as an input, repeat.": "处理一张图像,将其作为输入,并重复",
+ "Insert selected styles into prompt fields": "在提示词中插入选定的模版风格",
+ "Save current prompts as a style. If you add the token {prompt} to the text, the style use that as placeholder for your prompt when you use the style in the future.": "将当前的提示词保存为模版风格。如果你在文本中添加{prompt}标记,那么将来你使用该模版风格时,你现有的提示词会替换模版风格中的{prompt}",
+ "Loads weights from checkpoint before making images. You can either use hash or a part of filename (as seen in settings) for checkpoint name. Recommended to use with Y axis for less switching.": "在生成图像之前从模型(ckpt)中加载权重。你可以使用哈希值或文件名的一部分(如设置中所示)作为模型(ckpt)名称。建议用在Y轴上以减少过程中模型的切换",
+ "Torch active: Peak amount of VRAM used by Torch during generation, excluding cached data.\nTorch reserved: Peak amount of VRAM allocated by Torch, including all active and cached data.\nSys VRAM: Peak amount of VRAM allocation across all applications / total GPU VRAM (peak utilization%).": "Torch active: 在生成过程中,Torch使用的显存(VRAM)峰值,不包括缓存的数据。\nTorch reserved: Torch 分配的显存(VRAM)的峰值量,包括所有活动和缓存数据。\nSys VRAM: 所有应用程序分配的显存(VRAM)的峰值量 / GPU 的总显存(VRAM)(峰值利用率%)",
+ "Uscale the image in latent space. Alternative is to produce the full image from latent representation, upscale that, and then move it back to latent space.": "放大潜空间中的图像。而另一种方法是,从潜变量表达中直接解码并生成完整的图像,接着放大它,然后再将其编码回潜空间",
+ "Upscaler": "放大算法",
+ "Start drawing": "开始绘制",
+
+
+ "----无效----": "----以下内容无法被翻译,Bug----",
+ "Add a random artist to the prompt.": "随机添加一个艺术家到提示词中",
+ "Read generation parameters from prompt or last generation if prompt is empty into user interface.": "从提示词中读取生成参数,如果提示词为空,则读取上一次的生成参数到用户界面",
+ "Save style": "储存为模版风格",
+ "Apply selected styles to current prompt": "将所选模板风格,应用于当前提示词",
+ "Upscaler 1": "放大算法 1",
+ "Upscaler 2": "放大算法 2",
+ "Separate prompts into parts using vertical pipe character (|) and the script will create a picture for every combination of them (except for the first part, which will be present in all combinations)": "用竖线分隔符(|)将提示词分成若干部分,脚本将为它们的每一个组合创建一幅图片(除了被分割的第一部分,所有的组合都会包含这部分)",
+ "Select which Real-ESRGAN models to show in the web UI. (Requires restart)": "选择哪些 Real-ESRGAN 模型显示在网页用户界面。(需要重新启动)",
+ "Allowed categories for random artists selection when using the Roll button": "使用抽选艺术家按钮时将会随机的艺术家类别",
+ "Face restoration model": "面部修复模型",
+
+
+ "----已移除----": "----以下内容在webui新版本已移除----",
+ "▼": "▼",
"History": "历史记录",
"Show Textbox": "显示文本框",
"File with inputs": "含输入内容的文件",
"Prompts": "提示词",
"Disabled when launched with --hide-ui-dir-config.": "启动 --hide-ui-dir-config 时禁用",
- "Upscaler": "放大算法",
- "Upscaler 1": "放大算法 1",
- "Upscaler 2": "放大算法 2",
"Open output directory": "打开输出目录",
"Create aesthetic images embedding": "生成美术风格图集 embedding",
"Split oversized images into two": "将过大的图像分为两份",
@@ -560,32 +588,10 @@
"Unload VAE and CLIP from VRAM when training": "训练时从显存(VRAM)中取消 VAE 和 CLIP 的加载",
"Number of pictures displayed on each page": "每页显示的图像数量",
"Number of grids in each row": "每行显示多少格",
-
- "Start drawing": "开始绘制",
- "how fast should the training go. Low values will take longer to train, high values may fail to converge (not generate accurate results) and/or may break the embedding (This has happened if you see Loss: nan in the training info textbox. If this happens, you need to manually restore your embedding from an older not-broken backup).\n\nYou can set a single numeric value, or multiple learning rates using the syntax:\n\n rate_1:max_steps_1, rate_2:max_steps_2, ...\n\nEG: 0.005:100, 1e-3:1000, 1e-5\n\nWill train with rate of 0.005 for first 100 steps, then 1e-3 until 1000 steps, then 1e-5 for all remaining steps.": "训练应该多快。低值将需要更长的时间来训练,高值可能无法收敛(无法产生准确的结果)以及/也许可能会破坏 embedding(如果你在训练信息文本框中看到 Loss: nan 就会发生这种情况。如果发生这种情况,你需要从较旧的未损坏的备份手动恢复 embedding)\n\n你可以使用以下语法设置单个数值或多个学习率:\n\n 率1:步限1, 率2:步限2, ...\n\n如: 0.005:100, 1e-3:1000, 1e-5\n\n即前 100 步将以 0.005 的速率训练,接着直到 1000 步为止以 1e-3 训练,然后剩余所有步以 1e-5 训练",
- "Separate prompts into parts using vertical pipe character (|) and the script will create a picture for every combination of them (except for the first part, which will be present in all combinations)": "用竖线分隔符(|)将提示词分成若干部分,脚本将为它们的每一个组合创建一幅图片(除了被分割的第一部分,所有的组合都会包含这部分)",
- "Select which Real-ESRGAN models to show in the web UI. (Requires restart)": "选择哪些 Real-ESRGAN 模型显示在网页用户界面。(需要重新启动)",
- "Face restoration model": "面部修复模型",
- "Allowed categories for random artists selection when using the Roll button": "使用抽选艺术家按钮时将会随机的艺术家类别",
"favorites": "收藏夹(已保存)",
"others": "其他",
"Collect": "收藏(保存)",
- "Move VAE and CLIP to RAM when training hypernetwork. Saves VRAM.": "训练时将 VAE 和 CLIP 从显存(VRAM)移放到内存(RAM),节省显存(VRAM)",
- "How many times to improve the generated image iteratively; higher values take longer; very low values can produce bad results": "迭代改进生成的图像多少次;更高的值需要更长的时间;非常低的值会产生不好的结果",
- "Draw a mask over an image, and the script will regenerate the masked area with content according to prompt": "在图像上画一个蒙版,脚本会根据提示重新生成蒙版区域的内容",
- "Upscale image normally, split result into tiles, improve each tile using img2img, merge whole image back": "正常放大图像,将结果分割成图块(tiles),用图生图改进每个图块(tiles),最后将整个图像合并回来",
- "Create a grid where images will have different parameters. Use inputs below to specify which parameters will be shared by columns and rows": "创建一个网格,图像将有不同的参数。使用下面的输入来指定哪些参数将由列和行共享",
- "Run Python code. Advanced user only. Must run program with --allow-code for this to work": "运行 Python 代码。仅限老手使用。必须以 --allow-code 来开启程序,才能使其运行",
- "Separate a list of words with commas, and the first word will be used as a keyword: script will search for this word in the prompt, and replace it with others": "以逗号分割的单词列表,第一个单词将被用作关键词:脚本将在提示词中搜索这个单词,并用其他单词替换它",
- "Separate a list of words with commas, and the script will make a variation of prompt with those words for their every possible order": "以逗号分割的单词列表,脚本会排列出这些单词的所有排列方式,并加入提示词各生成一次",
- "Reconstruct prompt from existing image and put it into the prompt field.": "从现有的图像中重构出提示词,并将其放入提示词的输入文本框",
- "Set the maximum number of words to be used in the [prompt_words] option; ATTENTION: If the words are too long, they may exceed the maximum length of the file path that the system can handle": "设置在[prompt_words]选项中要使用的最大字数;注意:如果字数太长,可能会超过系统可处理的文件路径的最大长度",
- "Process an image, use it as an input, repeat.": "处理一张图像,将其作为输入,并重复",
- "Insert selected styles into prompt fields": "在提示词中插入选定的模版风格",
- "Save current prompts as a style. If you add the token {prompt} to the text, the style use that as placeholder for your prompt when you use the style in the future.": "将当前的提示词保存为模版风格。如果你在文本中添加{prompt}标记,那么将来你使用该模版风格时,你现有的提示词会替换模版风格中的{prompt}",
- "Loads weights from checkpoint before making images. You can either use hash or a part of filename (as seen in settings) for checkpoint name. Recommended to use with Y axis for less switching.": "在生成图像之前从模型(ckpt)中加载权重。你可以使用哈希值或文件名的一部分(如设置中所示)作为模型(ckpt)名称。建议用在Y轴上以减少过程中模型的切换",
- "Torch active: Peak amount of VRAM used by Torch during generation, excluding cached data.\nTorch reserved: Peak amount of VRAM allocated by Torch, including all active and cached data.\nSys VRAM: Peak amount of VRAM allocation across all applications / total GPU VRAM (peak utilization%).": "Torch active: 在生成过程中,Torch使用的显存(VRAM)峰值,不包括缓存的数据。\nTorch reserved: Torch 分配的显存(VRAM)的峰值量,包括所有活动和缓存数据。\nSys VRAM: 所有应用程序分配的显存(VRAM)的峰值量 / GPU 的总显存(VRAM)(峰值利用率%)",
- "Uscale the image in latent space. Alternative is to produce the full image from latent representation, upscale that, and then move it back to latent space.": "放大潜空间中的图像。而另一种方法是,从潜变量表达中直接解码并生成完整的图像,接着放大它,然后再将其编码回潜空间",
- "----": "----"
+
+ "--------": "--------"
}
From 8b6a9035d56fb4ce03f74e133246838f70383663 Mon Sep 17 00:00:00 2001
From: Eugenio Buffo <58123757+EugenioBuffo@users.noreply.github.com>
Date: Thu, 3 Nov 2022 13:26:59 +0100
Subject: [PATCH 096/147] Delete it_IT.json
---
it_IT.json | 1233 ----------------------------------------------------
1 file changed, 1233 deletions(-)
delete mode 100644 it_IT.json
diff --git a/it_IT.json b/it_IT.json
deleted file mode 100644
index a78fd574..00000000
--- a/it_IT.json
+++ /dev/null
@@ -1,1233 +0,0 @@
-{
- "⤡": "⤡",
- "⊞": "⊞",
- "×": "×",
- "❮": "❮",
- "❯": "❯",
- "Loading...": "Caricamento...",
- "view": "mostra ",
- "api": "API",
- "•": " • ",
- "built with gradio": " Sviluppato con Gradio",
- "Stable Diffusion checkpoint": "Stable Diffusion checkpoint",
- "txt2img": "txt2img",
- "img2img": "img2img",
- "Extras": "Extra",
- "PNG Info": "Info PNG",
- "Checkpoint Merger": "Miscelatore di Checkpoint",
- "Train": "Addestramento",
- "Create aesthetic embedding": "Crea incorporamento estetico",
- "Dataset Tag Editor": "Dataset Tag Editor",
- "Deforum": "Deforum",
- "Artists To Study": "Artisti per studiare",
- "Image Browser": "Galleria immagini",
- "Inspiration": "Ispirazione",
- "Settings": "Impostazioni",
- "Extensions": "Estensioni",
- "Prompt": "Prompt",
- "Negative prompt": "Prompt negativo",
- "Run": "Esegui",
- "Skip": "Salta",
- "Interrupt": "Interrompi",
- "Generate": "Genera",
- "Style 1": "Stile 1",
- "Style 2": "Stile 2",
- "Label": "Etichetta",
- "File": "File",
- "Drop File Here": "Trascina il file qui",
- "-": "-",
- "or": "o",
- "Click to Upload": "Clicca per caricare",
- "Image": "Immagine",
- "Check progress": "Controlla i progressi",
- "Check progress (first)": "Controlla i progressi (primo)",
- "Sampling Steps": "Passi di campionamento",
- "Sampling method": "Metodo di campionamento",
- "Euler a": "Euler a",
- "Euler": "Euler",
- "LMS": "LMS",
- "Heun": "Heun",
- "DPM2": "DPM2",
- "DPM2 a": "DPM2 a",
- "DPM fast": "DPM fast",
- "DPM adaptive": "DPM adaptive",
- "LMS Karras": "LMS Karras",
- "DPM2 Karras": "DPM2 Karras",
- "DPM2 a Karras": "DPM2 a Karras",
- "DDIM": "DDIM",
- "PLMS": "PLMS",
- "Width": "Larghezza",
- "Height": "Altezza",
- "Restore faces": "Restaura i volti",
- "Tiling": "Piastrellatura",
- "Highres. fix": "Correzione alta risoluzione",
- "Firstpass width": "Larghezza del primo passaggio",
- "Firstpass height": "Altezza del primo passaggio",
- "Denoising strength": "Forza del Denoising",
- "Batch count": "Lotti di immagini",
- "Batch size": "Immagini per lotto",
- "CFG Scale": "Scala CFG",
- "Seed": "Seme",
- "Extra": "Extra",
- "Variation seed": "Seme della variazione",
- "Variation strength": "Forza della variazione",
- "Resize seed from width": "Ridimensiona il seme dalla larghezza",
- "Resize seed from height": "Ridimensiona il seme dall'altezza",
- "Open for Clip Aesthetic!": "Apri per Gradienti Estetici (CLIP)",
- "▼": "▼",
- "Aesthetic weight": "Estetica - Peso",
- "Aesthetic steps": "Estetica - Passi",
- "Aesthetic learning rate": "Estetica - Tasso di apprendimento",
- "Slerp interpolation": "Interpolazione Slerp",
- "Aesthetic imgs embedding": "Estetica - Incorporamento di immagini",
- "None": "Niente",
- "Aesthetic text for imgs": "Estetica - Testo per le immagini",
- "Slerp angle": "Angolo Slerp",
- "Is negative text": "È un testo negativo",
- "Script": "Script",
- "Random grid": "Generaz. casuale (griglia)",
- "Random": "Generaz. casuale (no griglia)",
- "StylePile": "StylePile",
- "Advanced prompt matrix": "Matrice di prompt avanzata",
- "Advanced Seed Blending": "Miscelazione Semi Avanzata",
- "Alternate Sampler Noise Schedules": "Metodi alternativi di campionamento del rumore",
- "Animator v6": "Animator v6",
- "Asymmetric tiling": "Piastrellatura asimmetrica",
- "Custom code": "Codice personalizzato",
- "Embedding to Shareable PNG": "Incorporamento convertito in PNG condivisibile",
- "Force symmetry": "Forza la simmetria",
- "Prompts interpolation": "Interpola Prompt",
- "Prompt matrix": "Matrice dei prompt",
- "Prompt morph": "Metamorfosi del prompt",
- "Prompts from file or textbox": "Prompt da file o da casella di testo",
- "To Infinity and Beyond": "Verso l'infinito e oltre",
- "Seed travel": "Interpolazione semi",
- "Shift attention": "Sposta l'attenzione",
- "Text to Vector Graphics": "Da testo a grafica vettoriale",
- "Unprompted": "Unprompted",
- "X/Y plot": "Grafico X/Y",
- "X/Y/Z plot": "Grafico X/Y/Z",
- "Dynamic Prompting v0.13.6": "Prompt dinamici v0.13.6",
- "Create inspiration images": "Crea immagini di ispirazione",
- "step1 min/max": "Passi min(o max)",
- "step2 min/max": "Passi max (o min)",
- "step cnt": "Q.tà di Passi",
- "cfg1 min/max": "CFG min (o max)",
- "cfg2 min/max": "CFG max (o min)",
- "cfg cnt": "Q.tà di CFG",
- "Draw legend": "Disegna legenda",
- "Include Separate Images": "Includi immagini separate",
- "Keep -1 for seeds": "Mantieni sempre il seme a -1",
- "x/y change": "Inverti ordine assi X/Y (Passi/CFG)",
- "Loops": "Cicli",
- "Focus on:": "Focus su:",
- "No focus": "Nessun Focus",
- "Portraits (tick Restore faces above for best results)": "Ritratti (selezionare 'Restaura volti' in alto per ottenere i migliori risultati)",
- "Feminine and extra attractive (tick Restore faces above for best results)": "Femminile ed estremamente attraente (selezionare 'Restaura volti' per ottenere i migliori risultati)",
- "Masculine and extra attractive (tick Restore faces above for best results)": "Maschile ed estremamente attraente (selezionare 'Restaura volti' per ottenere i migliori risultati)",
- "Monsters": "Mostri",
- "Robots": "Robot",
- "Retrofuturistic": "Retrofuturistico",
- "Propaganda": "Propaganda",
- "Landscapes": "Paesaggi",
- "Hints": "Suggerimenti",
- "Image type": "Tipo di immagine",
- "Not set": "Non impostato",
- "Photography": "Fotografia",
- "Digital art": "Arte digitale",
- "3D Rendering": "3D Rendering",
- "Painting": "Dipinto",
- "Sketch": "Schizzo",
- "Classic Comics": "Fumetti classici",
- "Modern Comics": "Fumetti moderni",
- "Manga": "Manga",
- "Vector art": "Arte vettoriale",
- "Visual style": "Stile visivo",
- "Realism": "Realismo",
- "Photorealism": "Fotorealismo",
- "Hyperrealism": "Iperrealismo",
- "Surrealism": "Surrealismo",
- "Modern Art": "Arte moderna",
- "Fauvism": "Fauvismo",
- "Futurism": "Futurismo",
- "Painterly": "Pittorico",
- "Pointillisme": "Puntinismo",
- "Abstract": "Astratto",
- "Pop Art": "Pop Art",
- "Impressionist": "Impressionista",
- "Cubism": "Cubismo",
- "Linocut": "Linoleografia",
- "Fantasy": "Fantasia",
- "Colors": "Colori",
- "Chaotic": "Caotico",
- "Primary colors": "Colori primari",
- "Colorful": "Colorato",
- "Vivid": "Vivido",
- "Muted colors": "Colori tenui",
- "Low contrast": "Basso contrasto",
- "Desaturated": "Desaturato",
- "Grayscale": "Scala di grigi",
- "Black and white": "Bianco e nero",
- "Infrared": "Infrarosso",
- "Complementary": "Colori complementari",
- "Non-complementary": "Colori non complementari",
- "View": "Visuale",
- "Tilt shift": "Tilt shift",
- "Wide-angle": "Angolo ampio",
- "Portrait": "Ritratto",
- "Macro": "Macro",
- "Microscopic": "Microscopico",
- "Isometric": "Isometrico",
- "Panorama": "Panorama",
- "Aerial photograph": "Fotografia aerea",
- "Artist focus (not quite finished, not sure it helps)": "Focus sull'artista (non del tutto finito, non è sicuro che aiuti)",
- "B/W Photograpy": "Fotografia B/N",
- "Portrait photo": "Foto ritratto",
- "Usage: a wearing ": "Utilizzo: a wearing ",
- "Seeds": "Semi",
- "Noise Scheduler": "Pianificazione del rumore",
- "Default": "Predefinito",
- "Karras": "Karras",
- "Exponential": "Esponenziale",
- "Variance Preserving": "Conservazione della Varianza",
- "Sigma min": "Sigma min",
- "Sigma max": "Sigma max",
- "Sigma rho (Karras only)": "Sigma rho (Solo Karras)",
- "Beta distribution (VP only)": "Distribuzione Beta (Solo CV)",
- "Beta min (VP only)": "Beta min (Solo CV)",
- "Epsilon (VP only)": "Epsilon (Solo CV)",
- "Running in txt2img mode:": "Esecuzione in modalità txt2img:",
- "Render these video formats:": "Renderizza in questi formati:",
- "GIF": "GIF",
- "MP4": "MP4",
- "WEBM": "WEBM",
- "Animation Parameters": "Parametri animazione",
- "Total Animation Length (s)": "Durata totale dell'animazione (s)",
- "Framerate": "Frequenza dei fotogrammi",
- "Smoothing_Frames": "Fotogrammi da appianare",
- "Add_Noise": "Aggiungi rumore",
- "Noise Strength": "Intensità del rumore",
- "Initial Parameters": "Parametri iniziali",
- "Denoising Strength": "Intensità di riduzione del rumore",
- "Seed_March": "Seed_March",
- "Zoom Factor (scale/s)": "Fattore di ingrandimento (scala/s)",
- "X Pixel Shift (pixels/s)": "Sposta i Pixel sull'asse X (pixel/s)",
- "Y Pixel Shift (pixels/s)": "Sposta i Pixel sull'asse Y (pixel/s)",
- "Rotation (deg/s)": "Rotazione (gradi/s)",
- "Prompt Template, applied to each keyframe below": "Modello di prompt, applicato a ciascun fotogramma chiave qui di seguito",
- "Positive Prompts": "Prompt positivi",
- "Negative Prompts": "Prompt negativi",
- "Props, Stamps": "Immagini Clipart da diffondere (prop), o da applicare in post elaborazione e non essere diffuse (stamp).",
- "Poper_Folder:": "Cartella Immagini Clipart (PNG trasparenti):",
- "Supported Keyframes:": "Fotogrammi chiave supportati:",
- "time_s | source | video, images, img2img | path": "time_s | source | video, images, img2img | path",
- "time_s | prompt | positive_prompts | negative_prompts": "time_s | prompt | positive_prompts | negative_prompts",
- "time_s | template | positive_prompts | negative_prompts": "time_s | template | positive_prompts | negative_prompts",
- "time_s | transform | zoom | x_shift | y_shift | rotation": "time_s | transform | zoom | x_shift | y_shift | rotation",
- "time_s | seed | new_seed_int": "time_s | seed | new_seed_int",
- "time_s | noise | added_noise_strength": "time_s | noise | added_noise_strength",
- "time_s | denoise | denoise_value": "time_s | denoise | denoise_value",
- "time_s | set_text | textblock_name | text_prompt | x | y | w | h | fore_color | back_color | font_name": "time_s | set_text | textblock_name | text_prompt | x | y | w | h | fore_color | back_color | font_name",
- "time_s | clear_text | textblock_name": "time_s | clear_text | textblock_name",
- "time_s | prop | prop_name | prop_filename | x pos | y pos | scale | rotation": "time_s | prop | prop_name | prop_filename | x pos | y pos | scale | rotation",
- "time_s | set_stamp | stamp_name | stamp_filename | x pos | y pos | scale | rotation": "time_s | set_stamp | stamp_name | stamp_filename | x pos | y pos | scale | rotation",
- "time_s | clear_stamp | stamp_name": "time_s | clear_stamp | stamp_name",
- "time_s | col_set": "time_s | col_set",
- "time_s | col_clear": "time_s | col_clear",
- "time_s | model | model": "time_s | model | model",
- "img2img_mode": "Modalità img2img",
- "Keyframes:": "Fotogrammi chiave:",
- "Tile X": "Piastrella asse X",
- "Tile Y": "Piastrella asse Y",
- "Python code": "Codice Python",
- "Source embedding to convert": "Incorporamento sorgente da convertire",
- "Embedding token": "Token Incorporamento",
- "Output directory": "Cartella di output",
- "Horizontal symmetry": "Simmetria orizzontale",
- "Vertical symmetry": "Simmetria verticale",
- "Alt. symmetry method (blending)": "Metodo di simmetria alternativo (miscelazione)",
- "Apply every n steps": "Applica ogni n passi",
- "Skip last n steps": "Salta gli ultimi n passi",
- "Interpolation prompt": "Prompt di interpolazione",
- "Number of images": "Numero di immagini",
- "Make a gif": "Crea GIF",
- "Duration of images (ms)": "Durata delle immagini (ms)",
- "Put variable parts at start of prompt": "Inserisce le parti variabili all'inizio del prompt",
- "Keyframe Format:": "Formato dei fotogrammi chiave:",
- "Seed | Prompt or just Prompt": "Seme | Prompt o semplicemente Prompt",
- "Prompt list": "Elenco dei prompt",
- "Number of images between keyframes": "Numero di immagini tra fotogrammi chiave",
- "Save results as video": "Salva i risultati come video",
- "Frames per second": "Fotogrammi al secondo",
- "Iterate seed every line": "Iterare il seme per ogni riga",
- "Use same random seed for all lines": "Usa lo stesso seme casuale per tutte le righe",
- "List of prompt inputs": "Elenco di prompt di input",
- "Upload prompt inputs": "Carica un file contenente i prompt di input",
- "n": "Esegui n volte",
- "Destination seed(s) (Comma separated)": "Seme/i di destinazione (separati da virgola)",
- "Only use Random seeds (Unless comparing paths)": "Usa solo semi casuali (a meno che non si confrontino i percorsi)",
- "Number of random seed(s)": "Numero di semi casuali",
- "Compare paths (Separate travels from 1st seed to each destination)": "Confronta percorsi (transizioni separate dal primo seme a ciascuna destinazione)",
- "Steps": "Passi",
- "Loop back to initial seed": "Ritorna al seme iniziale",
- "Bump seed (If > 0 do a Compare Paths but only one image. No video)": "Modula seme (se > 0 mescola il seme iniziale con quelli di destinazione ma solo un'immagine. Nessun video)",
- "Show generated images in ui": "Mostra le immagini generate nell'interfaccia utente",
- "\"Hug the middle\" during interpolation": "\"Hug the middle\" durante l'interpolazione. Rende l'interpolazione un po' più veloce all'inizio e alla fine. A volte può produrre video più fluidi, il più delle volte no.",
- "Allow the default Euler a Sampling method. (Does not produce good results)": "Consenti Euler_a come metodo di campionamento predefinito. (Non produce buoni risultati)",
- "Illustration": "Illustrazione",
- "Logo": "Logo",
- "Drawing": "Disegno",
- "Artistic": "Artistico",
- "Tattoo": "Tatuaggio",
- "Gothic": "Gotico",
- "Anime": "Anime",
- "Cartoon": "Cartoon",
- "Sticker": "Etichetta",
- "Gold Pendant": "Ciondolo in oro",
- "None - prompt only": "Nessuno - solo prompt",
- "Enable Vectorizing": "Abilita vettorizzazione",
- "Output format": "Formato di output",
- "svg": "svg",
- "pdf": "pdf",
- "White is Opaque": "Il bianco è opaco",
- "Cut white margin from input": "Taglia il margine bianco dall'input",
- "Keep temp images": "Conserva le immagini temporanee",
- "Threshold": "Soglia",
- "Transparent PNG": "PNG trasparente",
- "Noise Tolerance": "Tolleranza al rumore",
- "Quantize": "Quantizzare",
- "Dry Run": "Esecuzione a vuoto (Debug)",
- "NEW!": "NUOVO!",
- "Premium Fantasy Card Template": "Premium Fantasy Card Template",
- "is now available.": "è ora disponibile.",
- "Generate a wide variety of creatures and characters in the style of a fantasy card game. Perfect for heroes, animals, monsters, and even crazy hybrids.": "Genera un'ampia varietà di creature e personaggi nello stile di un gioco di carte fantasy. Perfetto per eroi, animali, mostri e persino ibridi incredibili.",
- "Learn More ➜": "Per saperne di più ➜",
- "Purchases help fund the continued development of Unprompted. Thank you for your support!": "Gli acquisti aiutano a finanziare il continuo sviluppo di Unprompted. Grazie per il vostro sostegno!",
- "X type": "Parametro asse X",
- "Nothing": "Niente",
- "Var. seed": "Seme della variazione",
- "Var. strength": "Forza della variazione",
- "Prompt S/R": "Cerca e Sostituisci nel Prompt",
- "Prompt order": "In ordine di prompt",
- "Sampler": "Campionatore",
- "Checkpoint name": "Nome del checkpoint",
- "Hypernetwork": "Iperrete",
- "Hypernet str.": "Forza della Iperrete",
- "Sigma Churn": "Sigma Churn",
- "Sigma noise": "Sigma noise",
- "Eta": "ETA",
- "Clip skip": "Salta CLIP",
- "Denoising": "Riduzione del rumore",
- "Cond. Image Mask Weight": "Peso maschera immagine condizionale",
- "X values": "Valori per X",
- "Y type": "Parametro asse Y",
- "Y values": "Valori per Y",
- "Z type": "Parametro asse Z",
- "Z values": "Valori per Z",
- "Combinatorial generation": "Generazione combinatoria",
- "Combinatorial batches": "Lotti combinatori",
- "Magic prompt": "Prompt magico",
- "Fixed seed": "Seme fisso",
- "Combinations": "Combinazioni",
- "Choose a number of terms from a list, in this case we choose two artists": "Scegli un numero di termini da un elenco, in questo caso scegliamo due artisti",
- "{{2$artist1|artist2|artist3}}": "{{2$artist1|artist2|artist3}}",
- "If $ is not provided, then 1$ is assumed.\n\n A range can be provided:": "Se $ non viene fornito, si presume 1$.\n\n È possibile fornire un intervallo di valori:",
- "{{1-3$artist1|artist2|artist3}}": "{{1-3$artist1|artist2|artist3}}",
- "In this case, a random number of artists between 1 and 3 is chosen.": "In questo caso viene scelto un numero casuale di artisti compreso tra 1 e 3.",
- "Wildcards": "Termini jolly",
- "If the groups wont drop down click": "Se i gruppi non vengono visualizzati, clicca",
- "here": "qui",
- "to fix the issue.": "per correggere il problema.",
- "WILDCARD_DIR: C:\\stable-diffusion-webui\\extensions\\sd-dynamic-prompts\\wildcards": "WILDCARD_DIR: C:\\stable-diffusion-webui\\extensions\\sd-dynamic-prompts\\wildcards",
- "You can add more wildcards by creating a text file with one term per line and name is mywildcards.txt. Place it in scripts/wildcards.": "Puoi aggiungere termini jolly creando un file di testo con un termine per riga e nominandolo, per esempio, mywildcards.txt. Inseriscilo in scripts/wildcards.",
- "__/mywildcards__": "__/mywildcards__",
- "will then become available.": "diverrà quindi disponibile.",
- "Artist or styles name list. '.txt' files with one name per line": "Elenco nomi di artisti o stili. File '.txt' con un nome per riga",
- "Prompt words before artist or style name": "Parole chiave prima del nome dell'artista o dello stile",
- "Prompt words after artist or style name": "Parole chiave dopo il nome dell'artista o dello stile",
- "Negative Prompt": "Prompt negativo",
- "Save": "Salva",
- "Send to img2img": "Invia a img2img",
- "Send to inpaint": "Invia a Inpaint",
- "Send to extras": "Invia a Extra",
- "Make Zip when Save?": "Crea un file ZIP quando si usa 'Salva'",
- "Textbox": "Casella di testo",
- "Interrogate\nCLIP": "Interroga\nCLIP",
- "Interrogate\nDeepBooru": "Interroga\nDeepBooru",
- "Inpaint": "Inpaint",
- "Batch img2img": "img2img in lotti",
- "Image for img2img": "Immagine per img2img",
- "Drop Image Here": "Trascina l'immagine qui",
- "Image for inpainting with mask": "Immagine per inpainting con maschera",
- "Mask": "Maschera",
- "Mask blur": "Sfocatura maschera",
- "Mask mode": "Modalità maschera",
- "Draw mask": "Disegna maschera",
- "Upload mask": "Carica maschera",
- "Masking mode": "Modalità mascheratura",
- "Inpaint masked": "Inpaint mascherato",
- "Inpaint not masked": "Inpaint non mascherato",
- "Masked content": "Contenuto mascherato",
- "fill": "riempi",
- "original": "originale",
- "latent noise": "rumore nello spazio latente",
- "latent nothing": "nulla nello spazio latente",
- "Inpaint at full resolution": "Inpaint alla massima risoluzione",
- "Inpaint at full resolution padding, pixels": "Inpaint con riempimento a piena risoluzione, pixel",
- "Process images in a directory on the same machine where the server is running.": "Elabora le immagini in una cartella sulla stessa macchina su cui è in esecuzione il server.",
- "Use an empty output directory to save pictures normally instead of writing to the output directory.": "Usa una cartella di output vuota per salvare normalmente le immagini invece di scrivere nella cartella di output.",
- "Input directory": "Cartella di Input",
- "Resize mode": "Modalità di ridimensionamento",
- "Just resize": "Ridimensiona solamente",
- "Crop and resize": "Ritaglia e ridimensiona",
- "Resize and fill": "Ridimensiona e riempie",
- "Advanced loopback": "Advanced loopback",
- "External Image Masking": "Immagine esterna per la mascheratura",
- "img2img alternative test": "Test alternativo per img2img",
- "img2tiles": "img2tiles",
- "Interpolate": "Interpola immagini",
- "Loopback": "Rielaborazione ricorsiva",
- "Loopback and Superimpose": "Rielabora ricorsivamente e sovraimponi",
- "Alpha Canvas": "Alpha Canvas",
- "Outpainting mk2": "Outpainting mk2",
- "Poor man's outpainting": "Poor man's outpainting",
- "SD upscale": "Ampliamento SD",
- "txt2mask v0.1.1": "txt2mask v0.1.1",
- "[C] Video to video": "[C] Video to video",
- "Videos": "Filmati",
- "Deforum-webui (use tab extension instead!)": "Deforum-webui (usa piuttosto la scheda Deforum delle estensioni!)",
- "Use first image colors (custom color correction)": "Usa i colori della prima immagine (correzione del colore personalizzata)",
- "Denoising strength change factor (overridden if proportional used)": "Fattore di variazione dell'intensità di riduzione del rumore (sovrascritto se si usa proporzionale)",
- "Zoom level": "Livello di Zoom",
- "Direction X": "Direzione X",
- "Direction Y": "Direzione Y",
- "Denoising strength start": "Intensità di riduzione del rumore - Inizio",
- "Denoising strength end": "Intensità di riduzione del rumore - Fine",
- "Denoising strength proportional change starting value": "Intensità di riduzione del rumore - Valore iniziale della variazione proporzionale",
- "Denoising strength proportional change ending value (0.1 = disabled)": "Intensità di riduzione del rumore - Valore finale della variazione proporzionale (0.1 = disabilitato)",
- "Saturation enhancement per image": "Miglioramento della saturazione per ciascuna immagine",
- "Use sine denoising strength variation": "Utilizzare la variazione sinusoidale dell'intensità di riduzione del rumore",
- "Phase difference": "Differenza di Fase",
- "Denoising strength exponentiation": "Esponenziazione dell'intensità di riduzione del rumore",
- "Use sine zoom variation": "Usa la variazione sinusoidale dello zoom",
- "Zoom exponentiation": "Esponeniazione dello Zoom",
- "Use multiple prompts": "Usa prompt multipli",
- "Same seed per prompt": "Stesso seme per ogni prompt",
- "Same seed for everything": "Stesso seme per tutto",
- "Original init image for everything": "Immagine originale di inizializzazione per tutto",
- "Multiple prompts : 1 line positive, 1 line negative, leave a blank line for no negative": "Prompt multipli: 1 riga positivo, 1 riga negativo, lasciare una riga vuota per nessun negativo",
- "Running in img2img mode:": "Esecuzione in modalità img2img:",
- "Masking preview size": "Dimensione dell'anteprima della mascheratura",
- "Draw new mask on every run": "Disegna una nuova maschera ad ogni esecuzione",
- "Process non-contigious masks separately": "Elaborare le maschere non contigue separatamente",
- "should be 2 or lower.": "dovrebbe essere 2 o inferiore.",
- "Override `Sampling method` to Euler?(this method is built for it)": "Sovrascrivi il 'Metodo di campionamento' con Eulero? (questo metodo è stato creato per questo)",
- "Override `prompt` to the same value as `original prompt`?(and `negative prompt`)": "Sovrascrivi `prompt` con lo stesso valore del `prompt originale`? (e `prompt negativo`)",
- "Original prompt": "Prompt originale",
- "Original negative prompt": "Prompt negativo originale",
- "Override `Sampling Steps` to the same val due as `Decode steps`?": "Sovrascrivere 'Passi di campionamento' allo stesso valore di 'Passi di decodifica'?",
- "Decode steps": "Passi di decodifica",
- "Override `Denoising strength` to 1?": "Sostituisci 'Forza di denoising' a 1?",
- "Decode CFG scale": "Scala CFG di decodifica",
- "Randomness": "Casualità",
- "Sigma adjustment for finding noise for image": "Regolazione Sigma per trovare il rumore per l'immagine",
- "Tile size": "Dimensione piastrella",
- "Tile overlap": "Sovrapposizione piastrella",
- "New seed for each tile": "Nuovo seme per ogni piastrella",
- "alternate img2img imgage": "Immagine alternativa per img2img",
- "interpolation values": "Valori di interpolazione",
- "Refinement loops": "Cicli di affinamento",
- "Loopback alpha": "Trasparenza rielaborazione ricorsiva",
- "Border alpha": "Trasparenza del bordo",
- "Blending strides": "Passi di fusione",
- "Reuse Seed": "Riusa il seme",
- "One grid": "Singola griglia",
- "Interpolate VarSeed": "Interpola il seme della variazione",
- "Paste on mask": "Incolla sulla maschera",
- "Inpaint all": "Inpaint tutto",
- "Interpolate in latent": "Interpola nello spazio latente",
- "Denoising strength change factor": "Fattore di variazione dell'intensità di denoising",
- "Superimpose alpha": "Sovrapporre Alpha",
- "Show extra settings": "Mostra impostazioni aggiuntive",
- "Reuse seed": "Riusa il seme",
- "CFG decay factor": "Fattore di decadimento CFG",
- "CFG target": "CFG di destinazione",
- "Show/Hide AlphaCanvas": "Mostra/Nascondi AlphaCanvas",
- "Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8": "Impostazioni consigliate: Passi di campionamento: 80-100, Campionatore: Euler a, Intensità denoising: 0.8",
- "Pixels to expand": "Pixel da espandere",
- "Outpainting direction": "Direzione di Outpainting",
- "left": "sinistra",
- "right": "destra",
- "up": "sopra",
- "down": "sotto",
- "Fall-off exponent (lower=higher detail)": "Esponente di decremento (più basso=maggior dettaglio)",
- "Color variation": "Variazione di colore",
- "Will upscale the image to twice the dimensions; use width and height sliders to set tile size": "Aumenterà l'immagine al doppio delle dimensioni; utilizzare i cursori di larghezza e altezza per impostare la dimensione della piastrella",
- "Upscaler": "Ampliamento immagine",
- "Lanczos": "Lanczos",
- "Nearest": "Nearest",
- "LDSR": "LDSR",
- "BSRGAN": "BSRGAN",
- "ScuNET GAN": "ScuNET GAN",
- "ScuNET PSNR": "ScuNET PSNR",
- "SwinIR 4x": "SwinIR 4x",
- "Mask prompt": "Prompt maschera",
- "Negative mask prompt": "Prompt maschera negativa",
- "Mask precision": "Precisione della maschera",
- "Mask padding": "Estendi i bordi della maschera",
- "Brush mask mode": "Modalità pennello maschera",
- "discard": "Scarta",
- "add": "Aggiungi",
- "subtract": "Sottrai",
- "Show mask in output?": "Mostra maschera in uscita?",
- "If you like my work, please consider showing your support on": "Se ti piace il mio lavoro, per favore considera di mostrare il tuo supporto su ",
- "Patreon": "Patreon",
- "Input file path": "Percorso file di input",
- "CRF (quality, less is better, x264 param)": "CRF (qualità, meno è meglio, x264 param)",
- "FPS": "FPS",
- "Seed step size": "Ampiezza del gradiente del seme",
- "Seed max distance": "Distanza massima del seme",
- "Start time": "Orario di inizio",
- "End time": "Orario di fine",
- "End Prompt Blend Trigger Percent": "Percentuale di innesco del mix col prompt finale",
- "Prompt end": "Prompt finale",
- "Smooth video": "Rendi il filmato fluido",
- "Seconds": "Secondi",
- "Zoom": "Zoom",
- "Rotate": "Ruota",
- "Degrees": "Gradi",
- "Is the Image Tiled?": "L'immagine è piastrellata?",
- "TranslateX": "Traslazione X",
- "Left": "Sinistra",
- "PercentX": "Percentuale X",
- "TranslateY": "Traslazione Y",
- "Up": "Sopra",
- "PercentY": "Percentuale Y",
- "Show generated pictures in ui": "Mostra le immagini generate nell'interfaccia utente",
- "Deforum v0.5-webui-beta": "Deforum v0.5-webui-beta",
- "This script is deprecated. Please use the full Deforum extension instead.": "Questo script è obsoleto. Utilizzare invece l'estensione Deforum completa.",
- "Update instructions:": "Istruzioni per l'aggiornamento:",
- "github.com/deforum-art/deforum-for-automatic1111-webui/blob/automatic1111-webui/README.md": "github.com/deforum-art/deforum-for-automatic1111-webui/blob/automatic1111-webui/README.md",
- "discord.gg/deforum": "discord.gg/deforum",
- "Single Image": "Singola immagine",
- "Batch Process": "Elaborare a lotti",
- "Batch from Directory": "Lotto da cartella",
- "Source": "Sorgente",
- "Show result images": "Mostra le immagini dei risultati",
- "Scale by": "Scala di",
- "Scale to": "Scala a",
- "Resize": "Ridimensiona",
- "Crop to fit": "Ritaglia per adattare",
- "Upscaler 2 visibility": "Visibilità Ampliamento immagine 2",
- "GFPGAN visibility": "Visibilità GFPGAN",
- "CodeFormer visibility": "Visibilità CodeFormer",
- "CodeFormer weight (0 = maximum effect, 1 = minimum effect)": "Peso di CodeFormer (0 = effetto massimo, 1 = effetto minimo)",
- "Upscale Before Restoring Faces": "Amplia prima di restaurare i volti",
- "Send to txt2img": "Invia a txt2img",
- "A merger of the two checkpoints will be generated in your": "I due checkpoint verranno fusi nella cartella dei",
- "checkpoint": "checkpoint",
- "directory.": ".",
- "Primary model (A)": "Modello Primario (A)",
- "Secondary model (B)": "Modello Secondario (B)",
- "Tertiary model (C)": "Modello Terziario (C)",
- "Custom Name (Optional)": "Nome personalizzato (facoltativo)",
- "Multiplier (M) - set to 0 to get model A": "Moltiplicatore (M): impostare a 0 per ottenere il modello A",
- "Interpolation Method": "Metodo di interpolazione",
- "Weighted sum": "Somma pesata",
- "Add difference": "Aggiungi differenza",
- "Save as float16": "Salva come float16",
- "See": "Consulta la ",
- "wiki": "wiki",
- "for detailed explanation.": " per una spiegazione dettagliata.",
- "Create embedding": "Crea Incorporamento",
- "Create hypernetwork": "Crea Iperrete",
- "Preprocess images": "Preprocessa le immagini",
- "Name": "Nome",
- "Initialization text": "Testo di inizializzazione",
- "Number of vectors per token": "Numero di vettori per token",
- "Overwrite Old Embedding": "Sovrascrivi il vecchio incorporamento",
- "Modules": "Moduli",
- "Enter hypernetwork layer structure": "Immettere la struttura del livello della Iperrete",
- "Select activation function of hypernetwork": "Selezionare la funzione di attivazione della Iperrete",
- "linear": "lineare",
- "relu": "relu",
- "leakyrelu": "leakyrelu",
- "elu": "elu",
- "swish": "swish",
- "tanh": "tanh",
- "sigmoid": "sigmoid",
- "celu": "celu",
- "gelu": "gelu",
- "glu": "glu",
- "hardshrink": "hardshrink",
- "hardsigmoid": "hardsigmoid",
- "hardtanh": "hardtanh",
- "logsigmoid": "logsigmoid",
- "logsoftmax": "logsoftmax",
- "mish": "mish",
- "prelu": "prelu",
- "rrelu": "rrelu",
- "relu6": "relu6",
- "selu": "selu",
- "silu": "silu",
- "softmax": "softmax",
- "softmax2d": "softmax2d",
- "softmin": "softmin",
- "softplus": "softplus",
- "softshrink": "softshrink",
- "softsign": "softsign",
- "tanhshrink": "tanhshrink",
- "threshold": "soglia",
- "Select Layer weights initialization. relu-like - Kaiming, sigmoid-like - Xavier is recommended": "Seleziona inizializzazione dei pesi dei livelli. relu-like - Kaiming, Si consiglia sigmoid-like - Xavier",
- "Normal": "Normale",
- "KaimingUniform": "KaimingUniform",
- "KaimingNormal": "KaimingNormal",
- "XavierUniform": "XavierUniform",
- "XavierNormal": "XavierNormal",
- "Add layer normalization": "Aggiunge la normalizzazione del livello",
- "Use dropout": "Usa Dropout",
- "Overwrite Old Hypernetwork": "Sovrascrive la vecchia Iperrete",
- "Source directory": "Cartella sorgente",
- "Destination directory": "Cartella di destinazione",
- "Existing Caption txt Action": "Azione sul testo della didascalia esistente",
- "ignore": "ignora",
- "copy": "copia",
- "prepend": "anteporre",
- "append": "appendere",
- "Create flipped copies": "Crea copie specchiate",
- "Split oversized images": "Dividi immagini di grandi dimensioni",
- "Auto focal point crop": "Ritaglio automatico al punto focale",
- "Use BLIP for caption": "Usa BLIP per la didascalia",
- "Use deepbooru for caption": "Usa deepbooru per la didascalia",
- "Split image threshold": "Soglia di divisione dell'immagine",
- "Split image overlap ratio": "Rapporto di sovrapposizione dell'immagine",
- "Focal point face weight": "Peso della faccia del punto focale",
- "Focal point entropy weight": "Peso dell'entropia del punto focale",
- "Focal point edges weight": "Peso dei bordi del punto focale",
- "Create debug image": "Crea immagine di debug",
- "Preprocess": "Preprocessa",
- "Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images": "Addestra un Incorporamento o Iperrete; è necessario specificare una directory con un set di immagini con rapporto 1:1",
- "[wiki]": "[wiki]",
- "Embedding": "Incorporamento",
- "Embedding Learning rate": "Tasso di apprendimento Incorporamento",
- "Hypernetwork Learning rate": "Tasso di apprendimento Iperrete",
- "Dataset directory": "Cartella del Dataset",
- "Log directory": "Cartella del registro",
- "Prompt template file": "File modello prompt",
- "Max steps": "Passi massimi",
- "Save an image to log directory every N steps, 0 to disable": "Salva un'immagine nella cartella del registro ogni N passaggi, 0 per disabilitare",
- "Save a copy of embedding to log directory every N steps, 0 to disable": "Salva una copia dell'incorporamento nella cartella del registro ogni N passaggi, 0 per disabilitare",
- "Save images with embedding in PNG chunks": "Salva le immagini con l'incorporamento in blocchi PNG",
- "Read parameters (prompt, etc...) from txt2img tab when making previews": "Legge i parametri (prompt, ecc...) dalla scheda txt2img durante la creazione delle anteprime",
- "Train Hypernetwork": "Addestra Iperrete",
- "Train Embedding": "Addestra Incorporamento",
- "Create an aesthetic embedding out of any number of images": "Crea un'incorporamento estetico da qualsiasi numero di immagini",
- "Create images embedding": "Crea incorporamento di immagini",
- "-1": "-1",
- "This extension works well with text captions in comma-separated style (such as the tags generated by DeepBooru interrogator).": "Questa estensione funziona bene con i sottotitoli di testo in stile separato da virgole (come i tag generati dall'interrogatore DeepBooru).",
- "Save all changes": "Salva tutte le modifiche",
- "Backup original text file (original file will be renamed like filename.000, .001, .002, ...)": "Backup del file di testo originale (il file originale verrà rinominato come nomefile.000, .001, .002, ...)",
- "Note:": "Note:",
- "New text file will be created if you are using filename as captions.": "Verrà creato un nuovo file di testo se si utilizza il nome del file come didascalia.",
- "Results": "Risultati",
- "Load": "Carica",
- "Dataset Images": "Immagini del Dataset",
- "Filter and Edit Tags": "Filtra e modifica i tag",
- "Edit Caption of Selected Image": "Modifica la didascalia dell'immagine selezionata",
- "Search tags / Filter images by tags": "Cerca tag / Filtra le immagini per tag",
- "Search Tags": "Cerca tag",
- "Clear all filters": "Rimuovi tutti i filtri",
- "Sort by": "Ordina per",
- "Alphabetical Order": "Ordine alfabetico",
- "Frequency": "Frequenza",
- "Sort Order": "Ordinamento",
- "Ascending": "Ascendente",
- "Descending": "Discendente",
- "Filter Images by Tags": "Filtra le immagini per tag",
- "Edit tags in filtered images": "Modifica i tag nelle immagini filtrate",
- "Selected Tags": "Tag selezionati",
- "Edit Tags": "Modificare i tag",
- "Apply changes to filtered images": "Applica le modifiche alle immagini filtrate",
- "Append additional tags to the beginning": "Aggiungi tag addizionali all'inizio",
- "1. The selected tags are displayed in comma separated style.": "1. I tag selezionati vengono visualizzati in uno stile separato da virgole.",
- "2. When changes are applied, all tags in each displayed images are replaced.": "2. Quando vengono applicate le modifiche, tutti i tag in ciascuna immagine visualizzata vengono sostituiti.",
- "3. If you change some tags into blank, they will be erased.": "3. Se modifichi alcuni tag con uno spazio vuoto, verranno cancellati.",
- "4. If you add some tags to the end, they will be appended to the end/beginning of the text file.": "4. Se aggiungi dei tag alla fine, questi verranno aggiunti alla fine/inizio del file di testo.",
- "5. Changes are not applied to the text files until the \"Save all changes\" button is pressed.": "5. Le modifiche non vengono applicate ai file di testo finché non viene premuto il pulsante \"Salva tutte le modifiche\"..",
- "ex A.": "esempio A.",
- "Original Text = \"A, A, B, C\" Selected Tags = \"B, A\" Edit Tags = \"X, Y\"": "Testo originale = \"A, A, B, C\" Tag selezionati = \"B, A\" Modifica tag = \"X, Y\"",
- "Result = \"Y, Y, X, C\" (B->X, A->Y)": "Risultato = \"Y, Y, X, C\" (B->X, A->Y)",
- "ex B.": "esempio B.",
- "Original Text = \"A, B, C\" Selected Tags = \"(nothing)\" Edit Tags = \"X, Y\"": "Testo originale = \"A, B, C\" Tag selezionati = \"(nothing)\" Modifica tag = \"X, Y\"",
- "Result = \"A, B, C, X, Y\" (add X and Y to the end (default))": "Risultato = \"A, B, C, X, Y\" (aggiunge X e Y alla fine (predefinito))",
- "Result = \"X, Y, A, B, C\" (add X and Y to the beginning (\"Append additional tags to the beginning\" checked))": "Risultato = \"X, Y, A, B, C\" (aggiunge X e Y all'inizio (\"Aggiungi tag addizionali all'inizio\" selezionato))",
- "ex C.": "esempio C.",
- "Original Text = \"A, B, C, D, E\" Selected Tags = \"A, B, D\" Edit Tags = \", X, \"": "Testo originale = \"A, B, C, D, E\" Tag selezionati = \"A, B, D\" Modifica tag = \", X, \"",
- "Result = \"X, C, E\" (A->\"\", B->X, D->\"\")": "Risultato = \"X, C, E\" (A->\"\", B->X, D->\"\")",
- "Caption of Selected Image": "Didascalia dell'immagine selezionata",
- "Copy caption": "Copia didascalia",
- "Edit Caption": "Modifica didascalia",
- "Apply changes to selected image": "Applica le modifiche all'immagine selezionata",
- "Changes are not applied to the text files until the \"Save all changes\" button is pressed.": "Le modifiche non vengono applicate ai file di testo finché non viene premuto il pulsante \"Salva tutte le modifiche\".",
- "Info and links": "Info e link",
- "Made by deforum.github.io, port for AUTOMATIC1111's webui maintained by kabachuha": "Realizzato da deforum.github.io, port per l'interfaccia web di AUTOMATIC1111 manutenuto da kabachuha",
- "Original Deforum Github repo github.com/deforum/stable-diffusion": "Repository Github originale di Deforum github.com/deforum/stable-diffusion",
- "This fork for auto1111's webui github.com/deforum-art/deforum-for-automatic1111-webui": "Questo fork è per l'interfaccia web di AUTOMATIC1111 github.com/deforum-art/deforum-for-automatic1111-webui",
- "Join the official Deforum Discord discord.gg/deforum to share your creations and suggestions": "Unisciti al canale Discord ufficiale di Deforum discord.gg/deforum per condividere le tue creazioni e suggerimenti",
- "User guide for v0.5 docs.google.com/document/d/1pEobUknMFMkn8F5TMsv8qRzamXX_75BShMMXV8IFslI/edit": "Manuale d'uso per la versione 0.5 docs.google.com/document/d/1pEobUknMFMkn8F5TMsv8qRzamXX_75BShMMXV8IFslI/edit",
- "Math keyframing explanation docs.google.com/document/d/1pfW1PwbDIuW0cv-dnuyYj1UzPqe23BlSLTJsqazffXM/edit?usp=sharing": "Spiegazione della matematica dei fotogrammi chiave docs.google.com/document/d/1pfW1PwbDIuW0cv-dnuyYj1UzPqe23BlSLTJsqazffXM/edit?usp=sharing",
- "Keyframes": "Fotogrammi chiave",
- "Prompts": "Prompt",
- "Init": "Inizializzare",
- "Video output": "Uscita video",
- "Run settings": "Esegui le impostazioni",
- "Import settings from file": "Importa impostazioni da file",
- "Override settings": "Sostituisci le impostazioni",
- "Custom settings file": "File delle impostazioni personalizzate",
- "Sampling settings": "Impostazioni di campionamento",
- "override_these_with_webui": "Sovrascrivi con Web UI",
- "W": "L",
- "H": "A",
- "seed": "Seme",
- "sampler": "Campionatore",
- "Enable extras": "Abilita 'Extra'",
- "subseed": "Sub seme",
- "subseed_strength": "Intensità subseme",
- "steps": "Passi",
- "ddim_eta": "ETA DDIM",
- "n_batch": "Numero lotto",
- "make_grid": "Crea griglia",
- "grid_rows": "Righe griglia",
- "save_settings": "Salva impostazioni",
- "save_samples": "Salva i campioni",
- "display_samples": "Mostra i campioni",
- "save_sample_per_step": "Salva campioni per passo",
- "show_sample_per_step": "Mostra campioni per passo",
- "Batch settings": "Impostazioni lotto",
- "batch_name": "Nome del lotto",
- "filename_format": "Formato nome del file",
- "seed_behavior": "Comportamento seme",
- "iter": "Iterativo",
- "fixed": "Fisso",
- "random": "Casuale",
- "schedule": "Pianificato",
- "Animation settings": "Impostazioni animazione",
- "animation_mode": "Modalità animazione",
- "2D": "2D",
- "3D": "3D",
- "Video Input": "Ingresso video",
- "max_frames": "Fotogrammi max",
- "border": "Bordo",
- "replicate": "Replica",
- "wrap": "Impacchetta",
- "Motion parameters:": "Parametri di movimento:",
- "2D and 3D settings": "Impostazioni 2D e 3D",
- "angle": "Angolo",
- "zoom": "Zoom",
- "translation_x": "Traslazione X",
- "translation_y": "Traslazione Y",
- "3D settings": "Impostazioni 3D",
- "translation_z": "Traslazione Z",
- "rotation_3d_x": "Rotazione 3D X",
- "rotation_3d_y": "Rotazione 3D Y",
- "rotation_3d_z": "Rotazione 3D Z",
- "Prespective flip — Low VRAM pseudo-3D mode:": "Inversione prospettica: modalità pseudo-3D a bassa VRAM:",
- "flip_2d_perspective": "Inverti prospettiva 2D",
- "perspective_flip_theta": "Inverti prospettiva theta",
- "perspective_flip_phi": "Inverti prospettiva phi",
- "perspective_flip_gamma": "Inverti prospettiva gamma",
- "perspective_flip_fv": "Inverti prospettiva fv",
- "Generation settings:": "Impostazioni di generazione:",
- "noise_schedule": "Pianificazione del rumore",
- "strength_schedule": "Intensità della pianificazione",
- "contrast_schedule": "Contrasto della pianificazione",
- "cfg_scale_schedule": "Pianificazione della scala CFG",
- "3D Fov settings:": "Impostazioni del campo visivo 3D:",
- "fov_schedule": "Pianificazione del campo visivo",
- "near_schedule": "Pianificazione da vicino",
- "far_schedule": "Pianificazione da lontano",
- "To enable seed schedule select seed behavior — 'schedule'": "Per abilitare la pianificazione del seme, seleziona il comportamento del seme — 'pianifica'",
- "seed_schedule": "Pianificazione del seme",
- "Coherence:": "Coerenza:",
- "color_coherence": "Coerenza del colore",
- "Match Frame 0 HSV": "Uguaglia HSV del fotogramma 0",
- "Match Frame 0 LAB": "Uguaglia LAB del fotogramma 0",
- "Match Frame 0 RGB": "Uguaglia RGB del fotogramma 0",
- "diffusion_cadence": "Cadenza di diffusione",
- "3D Depth Warping:": "Deformazione della profondità 3D:",
- "use_depth_warping": "Usa la deformazione della profondità",
- "midas_weight": "Peso MIDAS",
- "near_plane": "Piano vicino",
- "far_plane": "Piano lontano",
- "fov": "Campo visivo",
- "padding_mode": "Modalità di riempimento",
- "reflection": "Rifletti",
- "zeros": "Zeri",
- "sampling_mode": "Modalità di campionamento",
- "bicubic": "bicubic",
- "bilinear": "bilinear",
- "nearest": "nearest",
- "save_depth_maps": "Salva le mappe di profondità",
- "`animation_mode: None` batches on list of *prompts*. (Batch mode disabled atm, only animation_prompts are working)": "`modalità animazione: Nessuno` si inserisce nell'elenco di *prompt*. (Modalità batch disabilitata atm, funzionano solo i prompt di animazione)",
- "*Important change from vanilla Deforum!*": "*Importante cambiamento rispetto alla versione originale di Deforum!*",
- "This script uses the built-in webui weighting settings.": "Questo script utilizza le impostazioni di pesatura webui integrate.",
- "So if you want to use math functions as prompt weights,": "Quindi, se vuoi usare le funzioni matematiche come pesi dei prompt,",
- "keep the values above zero in both parts": "mantenere i valori sopra lo zero in entrambe le parti",
- "Negative prompt part can be specified with --neg": "La parte negativa del prompt può essere specificata con --neg",
- "batch_prompts (disabled atm)": "Prompt in lotti (al momento è disabilitato)",
- "animation_prompts": "Prompt animazione",
- "Init settings": "Impostazioni iniziali",
- "use_init": "Usa le impostazioni iniziali",
- "from_img2img_instead_of_link": "da img2img invece che da link",
- "strength_0_no_init": "Intensità 0 nessuna inizializzazione",
- "strength": "Intensità",
- "init_image": "Immagine di inizializzazione",
- "use_mask": "Usa maschera",
- "use_alpha_as_mask": "Usa alpha come maschera",
- "invert_mask": "Inverti la maschera",
- "overlay_mask": "Sovrapponi la maschera",
- "mask_file": "File della maschera",
- "mask_brightness_adjust": "Regola la luminosità della maschera",
- "mask_overlay_blur": "Sfocatura della sovrapposizione della maschera",
- "Video Input:": "Ingresso video:",
- "video_init_path": "Percorso del video di inizializzazione",
- "extract_nth_frame": "Estrai ogni ennesimo fotogramma",
- "overwrite_extracted_frames": "Sovrascrivi i fotogrammi estratti",
- "use_mask_video": "Usa maschera video",
- "video_mask_path": "Percorso della maschera video",
- "Interpolation (turned off atm)": "Interpolazione (attualmente spento)",
- "interpolate_key_frames": "Interpola fotogrammi chiave",
- "interpolate_x_frames": "Interpola x fotogrammi",
- "Resume animation:": "Riprendi l'animazione:",
- "resume_from_timestring": "Riprendi da stringa temporale",
- "resume_timestring": "Stringa temporale",
- "Video output settings": "Impostazioni uscita video",
- "skip_video_for_run_all": "Salta il video per eseguire tutto",
- "fps": "FPS",
- "output_format": "Formato di uscita",
- "PIL gif": "PIL GIF",
- "FFMPEG mp4": "FFMPEG MP4",
- "ffmpeg_location": "Percorso ffmpeg",
- "add_soundtrack": "Aggiungi colonna sonora",
- "soundtrack_path": "Percorso colonna sonora",
- "use_manual_settings": "Usa impostazioni manuali",
- "render_steps": "Passi di renderizzazione",
- "max_video_frames": "Numero max fotogrammi video",
- "path_name_modifier": "Modificatore del nome del percorso",
- "x0_pred": "x0_pred",
- "x": "x",
- "image_path": "Percorso immagine",
- "mp4_path": "Percorso MP4",
- "Click here after the generation to show the video": "Clicca qui dopo la generazione per mostrare il video",
- "NOTE: If the 'Generate' button doesn't work, go in Settings and click 'Restart Gradio and Refresh...'.": "NOTA: se il pulsante 'Genera' non funziona, vai in Impostazioni e fai clic su 'Riavvia Gradio e Aggiorna...'.",
- "Save Settings": "Salva le impostazioni",
- "Load Settings": "Carica le impostazioni",
- "Path relative to the webui folder." : "Percorso relativo alla cartella webui.",
- "Save Video Settings": "Salva impostazioni video",
- "Load Video Settings": "Carica impostazioni video",
- "dog": "cane",
- "house": "casa",
- "portrait": "ritratto",
- "spaceship": "nave spaziale",
- "anime": "anime",
- "cartoon": "cartoon",
- "digipa-high-impact": "digipa-high-impact",
- "digipa-med-impact": "digipa-med-impact",
- "digipa-low-impact": "digipa-low-impact",
- "fareast": "estremo oriente",
- "fineart": "fineart",
- "scribbles": "scarabocchi",
- "special": "special",
- "ukioe": "ukioe",
- "weird": "strano",
- "black-white": "bianco e nero",
- "nudity": "nudità",
- "c": "c",
- "Get Images": "Ottieni immagini",
- "dog-anime": "dog-anime",
- "dog-cartoon": "dog-cartoon",
- "dog-digipa-high-impact": "dog-digipa-high-impact",
- "dog-digipa-med-impact": "dog-digipa-med-impact",
- "dog-digipa-low-impact": "dog-digipa-low-impact",
- "dog-fareast": "dog-fareast",
- "dog-fineart": "dog-fineart",
- "dog-scribbles": "dog-scribbles",
- "dog-special": "dog-special",
- "dog-ukioe": "dog-ukioe",
- "dog-weird": "dog-weird",
- "dog-black-white": "dog-black-white",
- "dog-nudity": "dog-nudity",
- "dog-c": "dog-c",
- "dog-n": "dog-n",
- "house-anime": "house-anime",
- "house-cartoon": "house-cartoon",
- "house-digipa-high-impact": "house-digipa-high-impact",
- "house-digipa-med-impact": "house-digipa-med-impact",
- "house-digipa-low-impact": "house-digipa-low-impact",
- "house-fareast": "house-fareast",
- "house-fineart": "house-fineart",
- "house-scribbles": "house-scribbles",
- "house-special": "house-special",
- "house-ukioe": "house-ukioe",
- "house-weird": "house-weird",
- "house-black-white": "house-black-white",
- "house-nudity": "house-nudity",
- "house-c": "house-c",
- "house-n": "house-n",
- "portrait-anime": "portrait-anime",
- "portrait-cartoon": "portrait-cartoon",
- "portrait-digipa-high-impact": "portrait-digipa-high-impact",
- "portrait-digipa-med-impact": "portrait-digipa-med-impact",
- "portrait-digipa-low-impact": "portrait-digipa-low-impact",
- "portrait-fareast": "portrait-fareast",
- "portrait-fineart": "portrait-fineart",
- "portrait-scribbles": "portrait-scribbles",
- "portrait-special": "portrait-special",
- "portrait-ukioe": "portrait-ukioe",
- "portrait-weird": "portrait-weird",
- "portrait-black-white": "portrait-black-white",
- "portrait-nudity": "portrait-nudity",
- "portrait-c": "portrait-c",
- "portrait-n": "portrait-n",
- "spaceship-anime": "spaceship-anime",
- "spaceship-cartoon": "spaceship-cartoon",
- "spaceship-digipa-high-impact": "spaceship-digipa-high-impact",
- "spaceship-digipa-med-impact": "spaceship-digipa-med-impact",
- "spaceship-digipa-low-impact": "spaceship-digipa-low-impact",
- "spaceship-fareast": "spaceship-fareast",
- "spaceship-fineart": "spaceship-fineart",
- "spaceship-scribbles": "spaceship-scribbles",
- "spaceship-special": "spaceship-special",
- "spaceship-ukioe": "spaceship-ukioe",
- "spaceship-weird": "spaceship-weird",
- "spaceship-black-white": "spaceship-black-white",
- "spaceship-nudity": "spaceship-nudity",
- "spaceship-c": "spaceship-c",
- "spaceship-n": "spaceship-n",
- "artists to study extension by camenduru |": "Estensione 'Artisti per studiare' a cura di camenduru |",
- "github": "Github",
- "|": "|",
- "twitter": "Twitter",
- "youtube": "Youtube",
- "hi-res images": "Immagini in alta risoluzione",
- "All images generated with CompVis/stable-diffusion-v1-4 +": "Tutte le immagini sono state generate con CompVis/stable-diffusion-v1-4 +",
- "artists.csv": "artists.csv",
- "| License: Attribution 4.0 International (CC BY 4.0)": "| Licenza: Attribution 4.0 International (CC BY 4.0)",
- "Favorites": "Preferiti",
- "Others": "Altre immagini",
- "Images directory": "Cartella immagini",
- "Dropdown": "Elenco cartelle",
- "First Page": "Prima pagina",
- "Prev Page": "Pagina precedente",
- "Page Index": "Indice pagina",
- "Next Page": "Pagina successiva",
- "End Page": "Ultima pagina",
- "delete next": "Cancella successivo",
- "Delete": "Elimina",
- "sort by": "Ordina per",
- "path name": "Nome percorso",
- "date": "Data",
- "keyword": "Parola chiave",
- "Generate Info": "Genera Info",
- "File Name": "Nome del file",
- "Move to favorites": "Aggiungi ai preferiti",
- "Renew Page": "Aggiorna la pagina",
- "Number": "Numero",
- "set_index": "Imposta indice",
- "load_switch": "Carica",
- "turn_page_switch": "Volta pagina",
- "Checkbox": "Casella di controllo",
- "Checkbox Group": "Seleziona immagini per",
- "artists": "Artisti",
- "flavors": "Stili",
- "mediums": "Tecniche",
- "movements": "Movimenti artistici",
- "All": "Tutto",
- "Exclude abandoned": "Escludi scartati",
- "Abandoned": "Scartati",
- "Key word": "Parola chiave",
- "Get inspiration": "Ispirami",
- "to txt2img": "Invia a txt2img",
- "to img2img": "Invia a img2img",
- "Collect": "Salva nei preferiti",
- "Don't show again": "Scarta",
- "Move out": "Rimuovi",
- "set button": "Pulsante imposta",
- "Apply settings": "Applica le impostazioni",
- "Saving images/grids": "Salva immagini/griglie",
- "Always save all generated images": "Salva sempre tutte le immagini generate",
- "File format for images": "Formato del file delle immagini",
- "Images filename pattern": "Modello del nome dei file immagine",
- "Add number to filename when saving": "Aggiungi un numero al nome del file al salvataggio",
- "Always save all generated image grids": "Salva sempre tutte le griglie di immagini generate",
- "File format for grids": "Formato del file per le griglie",
- "Add extended info (seed, prompt) to filename when saving grid": "Aggiungi informazioni estese (seme, prompt) al nome del file durante il salvataggio della griglia",
- "Do not save grids consisting of one picture": "Non salvare le griglie composte da una sola immagine",
- "Prevent empty spots in grid (when set to autodetect)": "Previeni spazi vuoti nella griglia (se impostato su rilevamento automatico)",
- "Grid row count; use -1 for autodetect and 0 for it to be same as batch size": "Numero di righe della griglia; utilizzare -1 per il rilevamento automatico e 0 per essere uguale alla dimensione del lotto",
- "Save text information about generation parameters as chunks to png files": "Salva le informazioni di testo dei parametri di generazione come blocchi nel file png",
- "Create a text file next to every image with generation parameters.": "Crea un file di testo assieme a ogni immagine con i parametri di generazione.",
- "Save a copy of image before doing face restoration.": "Salva una copia dell'immagine prima di eseguire il restauro dei volti.",
- "Save a copy of image before applying highres fix.": "Salva una copia dell'immagine prima di applicare la correzione ad alta risoluzione.",
- "Save a copy of image before applying color correction to img2img results": "Salva una copia dell'immagine prima di applicare la correzione del colore ai risultati di img2img",
- "Quality for saved jpeg images": "Qualità delle immagini salvate in formato JPEG",
- "If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG": "Se l'immagine PNG è più grande di 4 MB o qualsiasi dimensione è maggiore di 4000, ridimensiona e salva la copia come JPG",
- "Use original name for output filename during batch process in extras tab": "Usa il nome originale per il nome del file di output durante l'elaborazione a lotti nella scheda 'Extra'",
- "When using 'Save' button, only save a single selected image": "Usando il pulsante 'Salva', verrà salvata solo la singola immagine selezionata",
- "Do not add watermark to images": "Non aggiungere la filigrana alle immagini",
- "Paths for saving": "Percorsi di salvataggio",
- "Output directory for images; if empty, defaults to three directories below": "Cartella di output per le immagini; se vuoto, per impostazione predefinita verranno usate le cartelle seguenti",
- "Output directory for txt2img images": "Cartella di output per le immagini txt2img",
- "Output directory for img2img images": "Cartella di output per le immagini img2img",
- "Output directory for images from extras tab": "Cartella di output per le immagini dalla scheda 'Extra'",
- "Output directory for grids; if empty, defaults to two directories below": "Cartella di output per le griglie; se vuoto, per impostazione predefinita veranno usate cartelle seguenti",
- "Output directory for txt2img grids": "Cartella di output per le griglie txt2img",
- "Output directory for img2img grids": "Cartella di output per le griglie img2img",
- "Directory for saving images using the Save button": "Cartella dove salvare le immagini usando il pulsante 'Salva'",
- "Saving to a directory": "Salva in una cartella",
- "Save images to a subdirectory": "Salva le immagini in una sotto cartella",
- "Save grids to a subdirectory": "Salva le griglie in una sotto cartella",
- "When using \"Save\" button, save images to a subdirectory": "Usando il pulsante \"Salva\", le immagini verranno salvate in una sotto cartella",
- "Directory name pattern": "Modello del nome della cartella",
- "Max prompt words for [prompt_words] pattern": "Numero massimo di parole del prompt per il modello [prompt_words]",
- "Upscaling": "Ampliamento",
- "Tile size for ESRGAN upscalers. 0 = no tiling.": "Dimensione piastrella per ampliamento ESRGAN. 0 = nessuna piastrellatura.",
- "Tile overlap, in pixels for ESRGAN upscalers. Low values = visible seam.": "Sovrapposizione delle piastrelle, in pixel per gli ampliamenti ESRGAN. Valori bassi = cucitura visibile.",
- "Tile size for all SwinIR.": "Dimensione piastrella per tutti gli SwinIR.",
- "Tile overlap, in pixels for SwinIR. Low values = visible seam.": "Sovrapposizione delle piastrelle, in pixel per SwinIR. Valori bassi = cucitura visibile.",
- "LDSR processing steps. Lower = faster": "Fasi di elaborazione LDSR. Più basso = più veloce",
- "Upscaler for img2img": "Metodo di ampliamento per img2img",
- "Upscale latent space image when doing hires. fix": "Amplia l'immagine nello spazio latente durante la correzione in alta risoluzione",
- "Face restoration": "Restauro del viso",
- "CodeFormer weight parameter; 0 = maximum effect; 1 = minimum effect": "Peso di CodeFormer; 0 = effetto massimo; 1 = effetto minimo",
- "Move face restoration model from VRAM into RAM after processing": "Sposta il modello di restauro facciale dalla VRAM alla RAM dopo l'elaborazione",
- "System": "Sistema",
- "VRAM usage polls per second during generation. Set to 0 to disable.": "Verifiche al secondo sull'utilizzo della VRAM durante la generazione. Impostare a 0 per disabilitare.",
- "Always print all generation info to standard output": "Stampa sempre tutte le informazioni di generazione sul output standard",
- "Add a second progress bar to the console that shows progress for an entire job.": "Aggiungi una seconda barra di avanzamento alla console che mostra l'avanzamento complessivo del lavoro.",
- "Training": "Addestramento",
- "Move VAE and CLIP to RAM when training hypernetwork. Saves VRAM.": "Sposta VAE e CLIP nella RAM durante l'addestramento di Iperreti. Risparmia VRAM.",
- "Filename word regex": "Espressione regolare per estrarre parole dal nome del file",
- "Filename join string": "Stringa per unire le parole estratte dal nome del file",
- "Number of repeats for a single input image per epoch; used only for displaying epoch number": "Numero di ripetizioni per una singola immagine di input per epoca; utilizzato solo per visualizzare il numero di epoca",
- "Save an csv containing the loss to log directory every N steps, 0 to disable": "Salva un file CSV contenente la perdita nella cartella di registrazione ogni N passaggi, 0 per disabilitare",
- "Use cross attention optimizations while training": "Usa le ottimizzazioni di controllo dell'attenzione incrociato durante l'addestramento",
- "Stable Diffusion": "Stable Diffusion",
- "Checkpoints to cache in RAM": "Checkpoint da memorizzare nella RAM",
- "SD VAE": "SD VAE",
- "auto": "auto",
- "Hypernetwork strength": "Forza della Iperrete",
- "Inpainting conditioning mask strength": "Forza della maschera di condizionamento del Inpainting",
- "Apply color correction to img2img results to match original colors.": "Applica la correzione del colore ai risultati di img2img in modo che corrispondano ai colori originali.",
- "With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising).": "Con img2img, esegue esattamente la quantità di passi specificata dalla barra di scorrimento (normalmente se ne effettuano di meno con meno riduzione del rumore).",
- "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply.": "Abilita la quantizzazione nei campionatori K per risultati più nitidi e puliti. Questo può cambiare i semi esistenti. Richiede il riavvio per applicare la modifica.",
- "Emphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention": "Enfasi: utilizzare (testo) per fare in modo che il modello presti maggiore attenzione al testo e [testo] per fargli prestare meno attenzione",
- "Use old emphasis implementation. Can be useful to reproduce old seeds.": "Usa la vecchia implementazione dell'enfasi. Può essere utile per riprodurre vecchi semi.",
- "Make K-diffusion samplers produce same images in a batch as when making a single image": "Fa sì che i campionatori di diffusione K producano le stesse immagini in un lotto come quando si genera una singola immagine",
- "Increase coherency by padding from the last comma within n tokens when using more than 75 tokens": "Aumenta la coerenza disattivando dall'ultima virgola all'indietro di n token quando si utilizzano più di 75 token",
- "Filter NSFW content": "Filtra i contenuti NSFW",
- "Stop At last layers of CLIP model": "Fermati agli ultimi livelli del modello CLIP",
- "Interrogate Options": "Opzioni di interrogazione",
- "Interrogate: keep models in VRAM": "Interroga: mantieni i modelli nella VRAM",
- "Interrogate: use artists from artists.csv": "Interroga: utilizza artisti dal file artisti.csv",
- "Interrogate: include ranks of model tags matches in results (Has no effect on caption-based interrogators).": "Interroga: include la classifica delle corrispondenze dei tag del modello nei risultati (non ha effetto sulle interrogazioni basate su didascalie).",
- "Interrogate: num_beams for BLIP": "Interroga: num_beams per BLIP",
- "Interrogate: minimum description length (excluding artists, etc..)": "Interroga: lunghezza minima della descrizione (esclusi artisti, ecc..)",
- "Interrogate: maximum description length": "Interroga: lunghezza massima della descrizione",
- "CLIP: maximum number of lines in text file (0 = No limit)": "CLIP: numero massimo di righe nel file di testo (0 = Nessun limite)",
- "Interrogate: deepbooru score threshold": "Interroga: soglia del punteggio deepbooru",
- "Interrogate: deepbooru sort alphabetically": "Interroga: deepbooru ordinato alfabeticamente",
- "use spaces for tags in deepbooru": "usa gli spazi per i tag in deepbooru",
- "escape (\\) brackets in deepbooru (so they are used as literal brackets and not for emphasis)": "Effettua l'escape (\\) delle parentesi in deepbooru (così vengono usate come parentesi letterali e non per enfatizzare)",
- "User interface": "Interfaccia Utente",
- "Show progressbar": "Mostra la barra di avanzamento",
- "Show image creation progress every N sampling steps. Set 0 to disable.": "Mostra l'avanzamento della generazione dell'immagine ogni N passaggi di campionamento. Impostare a 0 per disabilitare.",
- "Show previews of all images generated in a batch as a grid": "Mostra le anteprime di tutte le immagini generate in un lotto come una griglia",
- "Show grid in results for web": "Mostra la griglia nei risultati per il web",
- "Do not show any images in results for web": "Non mostrare alcuna immagine nei risultati per il web",
- "Add model hash to generation information": "Aggiungi l'hash del modello alle informazioni sulla generazione",
- "Add model name to generation information": "Aggiungi il nome del modello alle informazioni sulla generazione",
- "When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint.": "Durante la lettura dei parametri di generazione dal testo nell'interfaccia utente (da informazioni PNG o testo incollato), non modificare il modello/checkpoint selezionato.",
- "Send seed when sending prompt or image to other interface": "Invia il seme quando si invia un prompt o un'immagine a un'altra interfaccia",
- "Font for image grids that have text": "Font per griglie di immagini con testo",
- "Enable full page image viewer": "Abilita la visualizzazione delle immagini a pagina intera",
- "Show images zoomed in by default in full page image viewer": "Mostra le immagini ingrandite per impostazione predefinita nella visualizzazione a pagina intera",
- "Show generation progress in window title.": "Mostra l'avanzamento della generazione nel titolo della finestra.",
- "Quicksettings list": "Elenco delle impostazioni rapide",
- "Localization (requires restart)": "Localizzazione (richiede il riavvio)",
- "ar_AR": "ar_AR",
- "de_DE": "de_DE",
- "es_ES": "es_ES",
- "fr_FR": "fr_FR",
- "it_IT": "it_IT",
- "ja_JP": "ja_JP",
- "ko_KR": "ko_KR",
- "pt_BR": "pt_BR",
- "ru_RU": "ru_RU",
- "tr_TR": "tr_TR",
- "zh_CN": "zh_CN",
- "zh_TW": "zh_TW",
- "Sampler parameters": "Parametri del campionatore",
- "Hide samplers in user interface (requires restart)": "Nascondi campionatori nell'interfaccia utente (richiede il riavvio)",
- "eta (noise multiplier) for DDIM": "ETA (moltiplicatore di rumore) per DDIM",
- "eta (noise multiplier) for ancestral samplers": "ETA (moltiplicatore di rumore) per campionatori ancestrali",
- "img2img DDIM discretize": "discretizzazione DDIM per img2img",
- "uniform": "uniforme",
- "quad": "quad",
- "sigma churn": "sigma churn",
- "sigma tmin": "sigma tmin",
- "sigma noise": "sigma noise",
- "Eta noise seed delta": "ETA del delta del seme del rumore",
- "Number of columns on image gallery": "Numero di colonne nella galleria di immagini",
- "Aesthetic Image Scorer": "Punteggio delle immagini estetiche",
- "Save score as EXIF or PNG Info Chunk": "Salva il punteggio come info EXIF o PNG",
- "aesthetic_score": "Punteggio estetico",
- "cfg_scale": "Scala CFG",
- "sd_model_hash": "Hash del modello SD",
- "hash": "Hash",
- "Save tags (Windows only)": "Salva etichette (solo Windows)",
- "Save category (Windows only)": "Salva categoria (solo Windows)",
- "Save generation params text": "Salva testo parametri di generazione",
- "Force CPU (Requires Custom Script Reload)": "Forza CPU (richiede il ricaricamento dello script personalizzato)",
- "Images Browser": "Galleria immagini",
- "Preload images at startup": "Precarica le immagini all'avvio",
- "Number of columns on the page": "Numero di colonne nella pagina",
- "Number of rows on the page": "Numero di righe nella pagina",
- "Minimum number of pages per load": "Numero minimo di pagine da caricare",
- "Maximum number of samples, used to determine which folders to skip when continue running the create script": "Numero massimo di campioni, utilizzato per determinare quali cartelle ignorare quando si continua a eseguire lo script di creazione",
- "Use same seed for all images": "Usa lo stesso seme per tutte le immagini",
- "Request browser notifications": "Richiedi le notifiche del browser",
- "Download localization template": "Scarica il modello per la localizzazione",
- "Reload custom script bodies (No ui updates, No restart)": "Ricarica gli script personalizzati (nessun aggiornamento dell'interfaccia utente, nessun riavvio)",
- "Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)": "Riavvia Gradio e aggiorna i componenti (solo script personalizzati, ui.py, js e css)",
- "Installed": "Installato",
- "Available": "Disponibile",
- "Install from URL": "Installa da URL",
- "Apply and restart UI": "Applica e riavvia l'interfaccia utente",
- "Check for updates": "Controlla aggiornamenti",
- "Extension": "Estensione",
- "URL": "URL",
- "Update": "Aggiorna",
- "aesthetic-gradients": "Gradienti Estetici (CLIP)",
- "https://github.com/AUTOMATIC1111/stable-diffusion-webui-aesthetic-gradients": "https://github.com/AUTOMATIC1111/stable-diffusion-webui-aesthetic-gradients",
- "unknown": "sconosciuto",
- "dataset-tag-editor": "Dataset Tag Editor",
- "https://github.com/toshiaki1729/stable-diffusion-webui-dataset-tag-editor.git": "https://github.com/toshiaki1729/stable-diffusion-webui-dataset-tag-editor.git",
- "deforum-for-automatic1111-webui": "Deforum",
- "https://github.com/deforum-art/deforum-for-automatic1111-webui": "https://github.com/deforum-art/deforum-for-automatic1111-webui",
- "sd-dynamic-prompts": "Prompt dinamici",
- "https://github.com/adieyal/sd-dynamic-prompts": "https://github.com/adieyal/sd-dynamic-prompts",
- "stable-diffusion-webui-aesthetic-image-scorer": "Punteggio immagini estetiche",
- "https://github.com/tsngo/stable-diffusion-webui-aesthetic-image-scorer": "https://github.com/tsngo/stable-diffusion-webui-aesthetic-image-scorer",
- "stable-diffusion-webui-artists-to-study": "Artisti per studiare",
- "https://github.com/camenduru/stable-diffusion-webui-artists-to-study": "https://github.com/camenduru/stable-diffusion-webui-artists-to-study",
- "stable-diffusion-webui-images-browser": "Galleria immagini",
- "https://github.com/yfszzx/stable-diffusion-webui-images-browser": "https://github.com/yfszzx/stable-diffusion-webui-images-browser",
- "stable-diffusion-webui-inspiration": "Ispirazione",
- "https://github.com/yfszzx/stable-diffusion-webui-inspiration": "https://github.com/yfszzx/stable-diffusion-webui-inspiration",
- "tag-autocomplete": "Autocompletamento etichette",
- "https://github.com/DominikDoom/a1111-sd-webui-tagcomplete.git": "https://github.com/DominikDoom/a1111-sd-webui-tagcomplete.git",
- "wildcards": "Termini Jolly",
- "https://github.com/AUTOMATIC1111/stable-diffusion-webui-wildcards.git": "https://github.com/AUTOMATIC1111/stable-diffusion-webui-wildcards.git",
- "Load from:": "Carica da:",
- "Extension index URL": "URL dell'indice delle Estensioni",
- "URL for extension's git repository": "URL del repository GIT dell'estensione",
- "Local directory name": "Nome cartella locale",
- "Install": "Installa",
- "Prompt (press Ctrl+Enter or Alt+Enter to generate)": "Prompt (premi Ctrl+Invio o Alt+Invio per generare)",
- "Negative prompt (press Ctrl+Enter or Alt+Enter to generate)": "Prompt negativo (premere Ctrl+Invio o Alt+Invio per generare)",
- "Add a random artist to the prompt.": "Aggiungi un artista casuale al prompt.",
- "Read generation parameters from prompt or last generation if prompt is empty into user interface.": "Leggere i parametri di generazione dal prompt o dall'ultima generazione se il prompt è vuoto ed inserirli nell'interfaccia utente.",
- "Save style": "Salva stile",
- "Apply selected styles to current prompt": "Applica gli stili selezionati al prompt corrente",
- "Stop processing current image and continue processing.": "Interrompe l'elaborazione dell'immagine corrente e continua l'elaborazione.",
- "Stop processing images and return any results accumulated so far.": "Interrompe l'elaborazione delle immagini e restituisce tutti i risultati accumulati finora.",
- "Style to apply; styles have components for both positive and negative prompts and apply to both": "Stile da applicare; gli stili hanno componenti sia per i prompt positivi che per quelli negativi e si applicano a entrambi",
- "Do not do anything special": "Non fa nulla di speciale",
- "Which algorithm to use to produce the image": "Quale algoritmo utilizzare per produrre l'immagine",
- "Euler Ancestral - very creative, each can get a completely different picture depending on step count, setting steps to higher than 30-40 does not help": "Euler Ancestral - molto creativo, si può ottenere un'immagine completamente diversa a seconda del numero di passi, impostare i passi su un valore superiore a 30-40 non aiuta",
- "Denoising Diffusion Implicit Models - best at inpainting": "Denoising Diffusion Implicit Models - il migliore per inpainting",
- "Produce an image that can be tiled.": "Produce un'immagine che può essere piastrellata.",
- "Use a two step process to partially create an image at smaller resolution, upscale, and then improve details in it without changing composition": "Utilizza un processo in due fasi per creare parzialmente un'immagine con una risoluzione inferiore, aumentare la scala e quindi migliorarne i dettagli senza modificare la composizione",
- "Determines how little respect the algorithm should have for image's content. At 0, nothing will change, and at 1 you'll get an unrelated image. With values below 1.0, processing will take less steps than the Sampling Steps slider specifies.": "Determina quanto poco l'algoritmo dovrebbe rispettare dovrebbe il contenuto dell'immagine. A 0, non cambierà nulla e a 1 otterrai un'immagine non correlata. Con valori inferiori a 1.0 l'elaborazione richiederà meno passaggi di quelli specificati dalla barra di scorrimento dei passi di campionamento.",
- "How many batches of images to create": "Quanti lotti di immagini generare",
- "How many image to create in a single batch": "Quante immagini generare in un singolo lotto",
- "Classifier Free Guidance Scale - how strongly the image should conform to prompt - lower values produce more creative results": "Classifier Free Guidance Scale - quanto fortemente l'immagine deve conformarsi al prompt: valori più bassi producono risultati più creativi",
- "A value that determines the output of random number generator - if you create an image with same parameters and seed as another image, you'll get the same result": "Un valore che determina l'output del generatore di numeri casuali: se create un'immagine con gli stessi parametri e seme di un'altra immagine, otterrete lo stesso risultato",
- "Set seed to -1, which will cause a new random number to be used every time": "Imposta il seme su -1, che farà sì che ogni volta venga utilizzato un nuovo numero casuale",
- "Reuse seed from last generation, mostly useful if it was randomed": "Riusa il seme dell'ultima generazione, utile soprattutto se casuale",
- "Seed of a different picture to be mixed into the generation.": "Seme di un'immagine diversa da miscelare nella generazione.",
- "How strong of a variation to produce. At 0, there will be no effect. At 1, you will get the complete picture with variation seed (except for ancestral samplers, where you will just get something).": "Quanto è forte la variazione da produrre. A 0, non ci sarà alcun effetto. A 1, otterrai l'intera immagine con il seme della variazione (tranne per i campionatori ancestrali, dove otterrai solo una leggera variazione).",
- "Make an attempt to produce a picture similar to what would have been produced with same seed at specified resolution": "Prova a produrre un'immagine simile a quella che sarebbe stata prodotta con lo stesso seme alla risoluzione specificata",
- "This text is used to rotate the feature space of the imgs embs": "Questo testo viene utilizzato per ruotare lo spazio delle funzioni delle immagini incorporate",
- "How many times to repeat processing an image and using it as input for the next iteration": "Quante volte ripetere l'elaborazione di un'immagine e utilizzarla come input per l'iterazione successiva",
- "Hello, StylePile here.\nUntil some weird bug gets fixed you will see this even if the script itself is not active. Meanwhile, some hints to take your artwork to new heights:\nUse the 'Focus on' dropdown to select complex presets. Toggle selections below (with or without Focus) to affect your results. Mix and match to get some interesting results. \nAnd some general Stable Diffusion tips that will take your designs to next level:\nYou can add parenthesis to make parts of the prompt stronger. So (((cute))) kitten will make it extra cute (try it out). This is alsow important if a style is affecting your original prompt too much. Make that prompt stronger by adding parenthesis around it, like this: ((promt)).\nYou can type promts like [A|B] to sequentially use terms one after another on each step. So, like [cat|dog] will produce a hybrid catdog. And [A:B:0.4] to switch to other terms after the first one has been active for a certain percentage of steps. So [cat:dog:0.4] will build a cat 40% of the time and then start turning it into a dog. This needs more steps to work properly.": "Salve, qui è StylePile.\nFinché qualche strano bug non verrà risolto, vedrai questo testo anche se lo script non è attivo. Nel frattempo, alcuni suggerimenti per portare la tua grafica a nuovi livelli:\nUtilizza il menu a discesa 'Focus on' per selezionare valori predefiniti complessi. Attiva o disattiva le selezioni seguenti (con o senza Focus) per influire sui risultati. Mescola e abbina per ottenere risultati interessanti. \nE alcuni suggerimenti generali su Stable Diffusion che porteranno i tuoi risultati a un livello superiore:\nPuoi aggiungere parentesi per aumentare l'influenza di certe parti del prompt. Quindi '(((cute))) kitten' lo renderà molto carino (fai delle prove). Questo è importante quando uno stile influisce troppo sul prompt originale. Rendi più forte quel prompt aggiungendo delle parentesi intorno ad esso, così: ((promt)).\nPuoi digitare prompt nel formato [A|B] per usare in sequenza i termini uno dopo l'altro in ogni passaggio. Quindi, come [cat|dog] produrrà un 'canegatto' ibrido. E [A:B:0.4] per passare ad altri termini dopo che il primo è stato attivo per una certa percentuale di passaggi. Quindi [cat:dog:0.4] genererà un gatto il 40% dei passaggi e poi inizierà a trasformarlo in un cane. Sono richiesti più passaggi perchè funzioni correttamente.",
- "Enter one prompt per line. Blank lines will be ignored.": "Immettere un prompt per riga. Le righe vuote verranno ignorate.",
- "Separate values for X axis using commas.": "Separare i valori per l'asse X usando le virgole.",
- "Separate values for Y axis using commas.": "Separare i valori per l'asse Y usando le virgole.",
- "Separate a list of words with commas, and the script will make a variation of prompt with those words for their every possible order": "Separa un elenco di parole con virgole e lo script eseguirà una variazione di prompt con quelle parole per ogni loro possibile ordine",
- "Write image to a directory (default - log/images) and generation parameters into csv file.": "Salva l'immagine/i in una cartella (predefinita - log/images) ed i parametri di generazione in un file CSV.",
- "Open images output directory": "Apri la cartella di output delle immagini",
- "How much to blur the mask before processing, in pixels.": "Quanto sfocare la maschera prima dell'elaborazione, in pixel.",
- "What to put inside the masked area before processing it with Stable Diffusion.": "Cosa mettere all'interno dell'area mascherata prima di elaborarla con Stable Diffusion.",
- "fill it with colors of the image": "riempi con i colori dell'immagine",
- "keep whatever was there originally": "conserva tutto ciò che c'era in origine",
- "fill it with latent space noise": "riempi di rumore spaziale latente",
- "fill it with latent space zeroes": "riempi con zeri di spazio latente",
- "Upscale masked region to target resolution, do inpainting, downscale back and paste into original image": "Ingrandisce la regione mascherata per raggiungere la risoluzione, esegue la pittura, riduce la scala e incolla nell'immagine originale",
- "Resize image to target resolution. Unless height and width match, you will get incorrect aspect ratio.": "Ridimensiona l'immagine alla risoluzione di destinazione. A meno che altezza e larghezza non corrispondano, otterrai proporzioni errate.",
- "Resize the image so that entirety of target resolution is filled with the image. Crop parts that stick out.": "Ridimensionare l'immagine in modo che l'intera risoluzione di destinazione sia riempita con l'immagine. Ritaglia le parti che sporgono.",
- "Resize the image so that entirety of image is inside target resolution. Fill empty space with image's colors.": "Ridimensiona l'immagine in modo che l'intera immagine rientri nella risoluzione di destinazione. Riempi lo spazio vuoto con i colori dell'immagine.",
- "For SD upscale, how much overlap in pixels should there be between tiles. Tiles overlap so that when they are merged back into one picture, there is no clearly visible seam.": "Per l'upscaling SD, quanta sovrapposizione in pixel dovrebbe esserci tra le piastrelle. Le piastrelle si sovrappongono in modo che quando vengono unite nuovamente in un'immagine, non ci siano giunture chiaramente visibili.",
- "Process an image, use it as an input, repeat.": "Elabora un'immagine, usala come input, ripeti.",
- "In loopback mode, on each loop the denoising strength is multiplied by this value. <1 means decreasing variety so your sequence will converge on a fixed picture. >1 means increasing variety so your sequence will become more and more chaotic.": "In modalità rielaborazione ricorsiva, su ogni ciclo la forza di denoising viene moltiplicata per questo valore. <1 significa varietà decrescente in modo che la sequenza converga su un'immagine fissa. >1 significa aumentare la varietà in modo che la tua sequenza diventi sempre più caotica.",
- "A directory on the same machine where the server is running.": "Una cartella sulla stessa macchina su cui è in esecuzione il server.",
- "Leave blank to save images to the default path.": "Lascia vuoto per salvare le immagini nel percorso predefinito.",
- "Result = A * (1 - M) + B * M": "Risultato = A * (1 - M) + B * M",
- "Result = A + (B - C) * M": "Risultato = A + (B - C) * M",
- "1st and last digit must be 1. ex:'1, 2, 1'": "La prima e l'ultima cifra devono essere 1. Es.:'1, 2, 1'",
- "Path to directory with input images": "Percorso della cartella con immagini di input",
- "Path to directory where to write outputs": "Percorso della cartella in cui scrivere i risultati",
- "C:\\directory\\of\\datasets": "C:\\cartella\\del\\dataset",
- "Input images directory": "Cartella di input delle immagini",
- "Use following tags to define how filenames for images are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; leave empty for default.": "Usa i seguenti tag per definire come vengono scelti i nomi dei file per le immagini: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed ], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; lasciare vuoto per usare l'impostazione predefinita.",
- "If this option is enabled, watermark will not be added to created images. Warning: if you do not add watermark, you may be behaving in an unethical manner.": "Se questa opzione è abilitata, la filigrana non verrà aggiunta alle immagini create. Attenzione: se non aggiungi la filigrana, potresti comportarti in modo non etico.",
- "Use following tags to define how subdirectories for images and grids are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; leave empty for default.": "Utilizzare i seguenti tag per definire come vengono scelte le sottodirectory per le immagini e le griglie: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; lasciare vuoto per usare l'impostazione predefinita.",
- "Restore low quality faces using GFPGAN neural network": "Ripristina volti di bassa qualità utilizzando la rete neurale GFPGAN",
- "This regular expression will be used extract words from filename, and they will be joined using the option below into label text used for training. Leave empty to keep filename text as it is.": "Questa espressione regolare verrà utilizzata per estrarre le parole dal nome del file e verranno unite utilizzando l'opzione seguente nel testo dell'etichetta utilizzato per l'addestramento. Lascia vuoto per mantenere il testo del nome del file così com'è.",
- "This string will be used to join split words into a single line if the option above is enabled.": "Questa stringa verrà utilizzata per unire le parole divise in un'unica riga se l'opzione sopra è abilitata.",
- "Only applies to inpainting models. Determines how strongly to mask off the original image for inpainting and img2img. 1.0 means fully masked, which is the default behaviour. 0.0 means a fully unmasked conditioning. Lower values will help preserve the overall composition of the image, but will struggle with large changes.": "Si applica solo ai modelli di pittura. Determina con quale forza mascherare l'immagine originale per inpainting e img2img. 1.0 significa completamente mascherato, che è il comportamento predefinito. 0.0 significa un condizionamento completamente non mascherato. Valori più bassi aiuteranno a preservare la composizione generale dell'immagine, ma avranno difficoltà con grandi cambiamenti.",
- "List of setting names, separated by commas, for settings that should go to the quick access bar at the top, rather than the usual setting tab. See modules/shared.py for setting names. Requires restarting to apply.": "Elenco dei nomi delle impostazioni, separati da virgole, per le impostazioni che dovrebbero essere visualizzate nella barra di accesso rapido in alto, anziché nella normale scheda delle impostazioni. Vedi modules/shared.py per impostare i nomi. Richiede il riavvio per applicare.",
- "If this values is non-zero, it will be added to seed and used to initialize RNG for noises when using samplers with Eta. You can use this to produce even more variation of images, or you can use this to match images of other software if you know what you are doing.": "Se questo valore è diverso da zero, verrà aggiunto al seed e utilizzato per inizializzare il generatore di numeri casuali per il rumore quando si utilizzano campionatori con ETA. Puoi usarlo per produrre ancora più variazioni di immagini, oppure puoi usarlo per abbinare le immagini di altri software se sai cosa stai facendo.",
- "Leave empty for auto": "Lasciare vuoto per automatico",
- "Autocomplete options": "Opzioni di autocompletamento",
- "Enable Autocomplete": "Abilita autocompletamento",
- "Append commas": "Aggiungi virgole",
- "AlphaCanvas": "AlphaCanvas",
- "Close": "Chiudi",
- "Grab Results": "Ottieni risultati",
- "Apply Patch": "Applica Patch",
- "Hue:0": "Hue:0",
- "S:0": "S:0",
- "L:0": "L:0",
- "Load Canvas": "Carica Canvas",
- "Save Canvas": "Salva Canvas",
- "latest": "aggiornato",
- "behind": "da aggiornare",
- "Description": "Descrizione",
- "Action": "Azione",
- "Aesthetic Gradients": "Gradienti estetici",
- "Create an embedding from one or few pictures and use it to apply their style to generated images.": "Crea un incorporamento da una o poche immagini e usalo per applicare il loro stile alle immagini generate.",
- "Sample extension. Allows you to use __name__ syntax in your prompt to get a random line from a file named name.txt in the wildcards directory. Also see Dynamic Prompts for similar functionality.": "Estensione del campione. Consente di utilizzare la sintassi __name__ nel prompt per ottenere una riga casuale da un file denominato name.txt nella cartella dei termini jolly. Vedi anche 'Prompt dinamici' per funzionalità simili.",
- "Dynamic Prompts": "Prompt dinamici",
- "Implements an expressive template language for random or combinatorial prompt generation along with features to support deep wildcard directory structures.": "Implementa un modello di linguaggio espressivo per la generazione di prompt casuale o combinatoria insieme a funzionalità per supportare cartelle strutturate contenenti termini jolly.",
- "Image browser": "Galleria immagini",
- "Provides an interface to browse created images in the web browser.": "Fornisce un'interfaccia nel browser web per sfogliare le immagini create.",
- "Randomly display the pictures of the artist's or artistic genres typical style, more pictures of this artist or genre is displayed after selecting. So you don't have to worry about how hard it is to choose the right style of art when you create.": "Visualizza in modo casuale le immagini dello stile tipico dell'artista o dei generi artistici, dopo la selezione vengono visualizzate più immagini di questo artista o genere. Così non dovete preoccuparvi della difficoltà di scegliere lo stile artistico giusto quando create.",
- "The official port of Deforum, an extensive script for 2D and 3D animations, supporting keyframable sequences, dynamic math parameters (even inside the prompts), dynamic masking, depth estimation and warping.": "Il porting ufficiale di Deforum, uno script completo per animazioni 2D e 3D, che supporta sequenze di fotogrammi chiave, parametri matematici dinamici (anche all'interno dei prompt), mascheramento dinamico, stima della profondità e warping.",
- "Artists to study": "Artisti per studiare",
- "Shows a gallery of generated pictures by artists separated into categories.": "Mostra una galleria di immagini generate dagli artisti suddivise in categorie.",
- "Calculates aesthetic score for generated images using CLIP+MLP Aesthetic Score Predictor based on Chad Scorer": "Calcola il punteggio estetico per le immagini generate utilizzando il predittore del punteggio estetico CLIP+MLP basato su Chad Scorer",
- "Lets you edit captions in training datasets.": "Consente di modificare i sottotitoli nei set di dati di addestramento.",
- "Time taken:": "Tempo impiegato:"
-}
\ No newline at end of file
From b81fad071d08fad9ea9fab87b77e22b7f04b4062 Mon Sep 17 00:00:00 2001
From: batvbs
Date: Thu, 3 Nov 2022 20:28:06 +0800
Subject: [PATCH 097/147] =?UTF-8?q?=20=E9=83=A8=E5=88=86=E6=97=A0=E6=B3=95?=
=?UTF-8?q?=E6=9C=AC=E5=9C=B0=E5=8C=96=E5=86=85=E5=AE=B9=E7=9A=84=E6=9B=BF?=
=?UTF-8?q?=E4=BB=A3=E6=96=B9=E6=A1=88?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
localizations/zh_CN.json | 15 ++++++++-------
1 file changed, 8 insertions(+), 7 deletions(-)
diff --git a/localizations/zh_CN.json b/localizations/zh_CN.json
index 6d6720c9..bacf905a 100644
--- a/localizations/zh_CN.json
+++ b/localizations/zh_CN.json
@@ -198,7 +198,6 @@
"Scale to": "指定尺寸缩放",
"Resize": "缩放",
"Crop to fit": "裁剪以适应",
- "Upscaler 2 visibility": "放大算法 2 (Upscaler 2) 可见度",
"GFPGAN visibility": "GFPGAN 可见度",
"CodeFormer visibility": "CodeFormer 可见度",
"CodeFormer weight (0 = maximum effect, 1 = minimum effect)": "CodeFormer 权重 (0 = 最大效果, 1 = 最小效果)",
@@ -469,7 +468,6 @@
"Negative prompt (press Ctrl+Enter or Alt+Enter to generate)": "反向提示词(按 Ctrl+Enter 或 Alt+Enter 生成)\nNegative prompt",
"Stop processing current image and continue processing.": "停止处理当前图像,并继续处理下一个",
"Stop processing images and return any results accumulated so far.": "停止处理图像,并返回迄今为止累积的任何结果",
- "Style to apply; styles have components for both positive and negative prompts and apply to both": "要应用的模版风格; 模版风格包含正向和反向提示词,并应用于两者",
"Do not do anything special": "什么都不做",
"Which algorithm to use to produce the image": "使用哪种算法生成图像",
"Euler Ancestral - very creative, each can get a completely different picture depending on step count, setting steps to higher than 30-40 does not help": "Euler Ancestral - 非常有创意,可以根据迭代步数获得完全不同的图像,将迭代步数设置为高于 30-40 不会有正面作用",
@@ -480,9 +478,6 @@
"How many batches of images to create": "创建多少批次的图像",
"How many image to create in a single batch": "每批创建多少图像",
"Classifier Free Guidance Scale - how strongly the image should conform to prompt - lower values produce more creative results": "Classifier Free Guidance Scale - 图像应在多大程度上服从提示词 - 较低的值会产生更有创意的结果",
- "A value that determines the output of random number generator - if you create an image with same parameters and seed as another image, you'll get the same result": "一个固定随机数生成器输出的值 - 以相同参数和随机种子生成的图像会得到相同的结果",
- "Set seed to -1, which will cause a new random number to be used every time": "将随机种子设置为-1,则每次都会使用一个新的随机数",
- "Reuse seed from last generation, mostly useful if it was randomed": "重用上一次使用的随机种子,如果想要固定结果就会很有用",
"Seed of a different picture to be mixed into the generation.": "将要参与生成的另一张图的随机种子",
"How strong of a variation to produce. At 0, there will be no effect. At 1, you will get the complete picture with variation seed (except for ancestral samplers, where you will just get something).": "想要产生多强烈的变化。设为 0 时,将没有效果。设为 1 时,你将获得完全产自差异随机种子的图像(ancestral 采样器除外,你只是单纯地生成了一些东西)",
"Make an attempt to produce a picture similar to what would have been produced with same seed at specified resolution": "尝试生成与在指定分辨率下使用相同随机种子生成的图像相似的图片",
@@ -490,7 +485,6 @@
"Separate values for X axis using commas.": "使用逗号分隔 X 轴的值",
"Separate values for Y axis using commas.": "使用逗号分隔 Y 轴的值",
"Write image to a directory (default - log/images) and generation parameters into csv file.": "将图像写入目录(默认 - log/images)并将生成参数写入 csv 表格文件",
- "Open images output directory": "打开图像输出目录",
"How much to blur the mask before processing, in pixels.": "处理前要对蒙版进行多强的模糊,以像素为单位",
"What to put inside the masked area before processing it with Stable Diffusion.": "在使用 Stable Diffusion 处理蒙版区域之前要在蒙版区域内放置什么",
"fill it with colors of the image": "用图像的颜色(高强度模糊)填充它",
@@ -547,13 +541,16 @@
"Uscale the image in latent space. Alternative is to produce the full image from latent representation, upscale that, and then move it back to latent space.": "放大潜空间中的图像。而另一种方法是,从潜变量表达中直接解码并生成完整的图像,接着放大它,然后再将其编码回潜空间",
"Upscaler": "放大算法",
"Start drawing": "开始绘制",
-
+
"----无效----": "----以下内容无法被翻译,Bug----",
"Add a random artist to the prompt.": "随机添加一个艺术家到提示词中",
"Read generation parameters from prompt or last generation if prompt is empty into user interface.": "从提示词中读取生成参数,如果提示词为空,则读取上一次的生成参数到用户界面",
"Save style": "储存为模版风格",
"Apply selected styles to current prompt": "将所选模板风格,应用于当前提示词",
+ "Set seed to -1, which will cause a new random number to be used every time": "将随机种子设置为-1,则每次都会使用一个新的随机数",
+ "Reuse seed from last generation, mostly useful if it was randomed": "重用上一次使用的随机种子,如果想要固定结果就会很有用",
+ "Open images output directory": "打开图像输出目录",
"Upscaler 1": "放大算法 1",
"Upscaler 2": "放大算法 2",
"Separate prompts into parts using vertical pipe character (|) and the script will create a picture for every combination of them (except for the first part, which will be present in all combinations)": "用竖线分隔符(|)将提示词分成若干部分,脚本将为它们的每一个组合创建一幅图片(除了被分割的第一部分,所有的组合都会包含这部分)",
@@ -561,6 +558,10 @@
"Allowed categories for random artists selection when using the Roll button": "使用抽选艺术家按钮时将会随机的艺术家类别",
"Face restoration model": "面部修复模型",
+ "Style to apply; styles have components for both positive and negative prompts and apply to both": "要使用的模版风格; 模版风格包含正向和反向提示词,并应用于两者\n\ud83c\udfa8 随机添加一个艺术家到提示词中\n \u2199\ufe0f 从提示词中读取生成参数,如果提示词为空,则读取上一次的生成参数到用户界面\n\ud83d\udcbe 将当前的提示词保存为模版风格(保存在styles.csv)\n\ud83d\udccb 将所选模板风格,应用于当前提示词\n如果你在文本中添加{prompt}标记,并保存为模版风格\n那么将来你使用该模版风格时,你现有的提示词会替换模版风格中的{prompt}",
+ "A value that determines the output of random number generator - if you create an image with same parameters and seed as another image, you'll get the same result": "一个固定随机数生成器输出的值 - 以相同参数和随机种子生成的图像会得到相同的结果\n\ud83c\udfb2 将随机种子设置为-1,则每次都会使用一个新的随机数\n\u267b\ufe0f 重用上一次使用的随机种子,如果想要固定输出结果就会很有用",
+ "Upscaler 2 visibility": "放大算法 2 (Upscaler 2) 可见度",
+
"----已移除----": "----以下内容在webui新版本已移除----",
"▼": "▼",
From 8bc003c9bb7ba3f90a4d075196a2231915a3fa06 Mon Sep 17 00:00:00 2001
From: Eugenio Buffo <58123757+EugenioBuffo@users.noreply.github.com>
Date: Thu, 3 Nov 2022 13:28:56 +0100
Subject: [PATCH 098/147] Fixed misspelled word
---
localizations/it_IT.json | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/localizations/it_IT.json b/localizations/it_IT.json
index de599a85..83d0ccce 100644
--- a/localizations/it_IT.json
+++ b/localizations/it_IT.json
@@ -196,7 +196,7 @@
"Beta distribution (VP only)": "Distribuzione Beta (Solo CV)",
"Beta min (VP only)": "Beta min (Solo CV)",
"Epsilon (VP only)": "Epsilon (Solo CV)",
- "Running in txt2img mode:": "Esecusione in modalità txt2img:",
+ "Running in txt2img mode:": "Esecuzione in modalità txt2img:",
"Render these video formats:": "Renderizza in questi formati:",
"GIF": "GIF",
"MP4": "MP4",
@@ -1230,4 +1230,4 @@
"Calculates aesthetic score for generated images using CLIP+MLP Aesthetic Score Predictor based on Chad Scorer": "Calcola il punteggio estetico per le immagini generate utilizzando il predittore del punteggio estetico CLIP+MLP basato su Chad Scorer",
"Lets you edit captions in training datasets.": "Consente di modificare i sottotitoli nei set di dati di addestramento.",
"Time taken:": "Tempo impiegato:"
-}
\ No newline at end of file
+}
From 17bd3f4ea730436599849eddbaa78e2879b793d2 Mon Sep 17 00:00:00 2001
From: Bruno Seoane
Date: Thu, 3 Nov 2022 10:08:18 -0300
Subject: [PATCH 099/147] Add tests
---
test/utils_test.py | 63 ++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 63 insertions(+)
create mode 100644 test/utils_test.py
diff --git a/test/utils_test.py b/test/utils_test.py
new file mode 100644
index 00000000..65d3d177
--- /dev/null
+++ b/test/utils_test.py
@@ -0,0 +1,63 @@
+import unittest
+import requests
+
+class UtilsTests(unittest.TestCase):
+ def setUp(self):
+ self.url_options = "http://localhost:7860/sdapi/v1/options"
+ self.url_cmd_flags = "http://localhost:7860/sdapi/v1/cmd-flags"
+ self.url_samplers = "http://localhost:7860/sdapi/v1/samplers"
+ self.url_upscalers = "http://localhost:7860/sdapi/v1/upscalers"
+ self.url_sd_models = "http://localhost:7860/sdapi/v1/sd-models"
+ self.url_hypernetworks = "http://localhost:7860/sdapi/v1/hypernetworks"
+ self.url_face_restorers = "http://localhost:7860/sdapi/v1/face-restorers"
+ self.url_realesrgan_models = "http://localhost:7860/sdapi/v1/realesrgan-models"
+ self.url_prompt_styles = "http://localhost:7860/sdapi/v1/prompt-styles"
+ self.url_artist_categories = "http://localhost:7860/sdapi/v1/artist-categories"
+ self.url_artists = "http://localhost:7860/sdapi/v1/artists"
+
+ def test_options_get(self):
+ self.assertEqual(requests.get(self.url_options).status_code, 200)
+
+ def test_options_write(self):
+ response = requests.get(self.url_options)
+ self.assertEqual(response.status_code, 200)
+
+ pre_value = response.json()["send_seed"]
+
+ self.assertEqual(requests.post(self.url_options, json={"send_seed":not pre_value}).status_code, 200)
+
+ response = requests.get(self.url_options)
+ self.assertEqual(response.status_code, 200)
+ self.assertEqual(response.json()["send_seed"], not pre_value)
+
+ requests.post(self.url_options, json={"send_seed": pre_value})
+
+ def test_cmd_flags(self):
+ self.assertEqual(requests.get(self.url_cmd_flags).status_code, 200)
+
+ def test_samplers(self):
+ self.assertEqual(requests.get(self.url_samplers).status_code, 200)
+
+ def test_upscalers(self):
+ self.assertEqual(requests.get(self.url_upscalers).status_code, 200)
+
+ def test_sd_models(self):
+ self.assertEqual(requests.get(self.url_sd_models).status_code, 200)
+
+ def test_hypernetworks(self):
+ self.assertEqual(requests.get(self.url_hypernetworks).status_code, 200)
+
+ def test_face_restorers(self):
+ self.assertEqual(requests.get(self.url_face_restorers).status_code, 200)
+
+ def test_realesrgan_models(self):
+ self.assertEqual(requests.get(self.url_realesrgan_models).status_code, 200)
+
+ def test_prompt_styles(self):
+ self.assertEqual(requests.get(self.url_prompt_styles).status_code, 200)
+
+ def test_artist_categories(self):
+ self.assertEqual(requests.get(self.url_artist_categories).status_code, 200)
+
+ def test_artists(self):
+ self.assertEqual(requests.get(self.url_artists).status_code, 200)
\ No newline at end of file
From 3bf8da465946126efbe66becb31c055ee7f70ea6 Mon Sep 17 00:00:00 2001
From: batvbs
Date: Thu, 3 Nov 2022 21:58:46 +0800
Subject: [PATCH 100/147] =?UTF-8?q?=E6=9B=B4=E6=96=B0=20zh=5FCN.json?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
localizations/zh_CN.json | 20 +++++++++++++++++---
1 file changed, 17 insertions(+), 3 deletions(-)
diff --git a/localizations/zh_CN.json b/localizations/zh_CN.json
index bacf905a..8a00c11c 100644
--- a/localizations/zh_CN.json
+++ b/localizations/zh_CN.json
@@ -444,13 +444,11 @@
"Number of columns on the page": "每页列数",
"Number of rows on the page": "每页行数",
"Minimum number of pages per load": "每次加载的最小页数",
- "Wildcards": "通配符",
"Use same seed for all images": "为所有图像使用同一个随机种子",
"Request browser notifications": "请求浏览器通知",
"Download localization template": "下载本地化模板",
"Reload custom script bodies (No ui updates, No restart)": "重新加载自定义脚本主体(无用户界面更新,无重启)",
"Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)": "重启 Gradio 及刷新组件(仅限自定义脚本、ui.py、js 和 css)",
- "Installed": "已安装",
"Available": "可用",
"Install from URL": "从网址安装",
"Apply and restart UI": "应用并重启用户界面",
@@ -463,7 +461,6 @@
"Extension index URL": "扩展列表链接",
"URL for extension's git repository": "扩展的 git 仓库链接",
"Local directory name": "本地路径名",
- "Install": "安装",
"Prompt (press Ctrl+Enter or Alt+Enter to generate)": "提示词(按 Ctrl+Enter 或 Alt+Enter 生成)\nPrompt",
"Negative prompt (press Ctrl+Enter or Alt+Enter to generate)": "反向提示词(按 Ctrl+Enter 或 Alt+Enter 生成)\nNegative prompt",
"Stop processing current image and continue processing.": "停止处理当前图像,并继续处理下一个",
@@ -541,6 +538,20 @@
"Uscale the image in latent space. Alternative is to produce the full image from latent representation, upscale that, and then move it back to latent space.": "放大潜空间中的图像。而另一种方法是,从潜变量表达中直接解码并生成完整的图像,接着放大它,然后再将其编码回潜空间",
"Upscaler": "放大算法",
"Start drawing": "开始绘制",
+ "Description": "描述",
+ "Action": "行动",
+ "Aesthetic Gradients": "美术风格",
+ "aesthetic-gradients": "美术风格",
+ "Wildcards": "通配符",
+ "stable-diffusion-webui-wildcards": "通配符",
+ "Dynamic Prompts": "动态提示",
+ "Image browser": "图库浏览器",
+ "images-browser": "图库浏览器",
+ "Inspiration": "灵感",
+ "Deforum": "Deforum",
+ "Artists to study": "艺术家图库",
+ "Aesthetic Image Scorer": "美术风格评分",
+ "Dataset Tag Editor": "数据集标签编辑器",
"----无效----": "----以下内容无法被翻译,Bug----",
@@ -557,6 +568,9 @@
"Select which Real-ESRGAN models to show in the web UI. (Requires restart)": "选择哪些 Real-ESRGAN 模型显示在网页用户界面。(需要重新启动)",
"Allowed categories for random artists selection when using the Roll button": "使用抽选艺术家按钮时将会随机的艺术家类别",
"Face restoration model": "面部修复模型",
+ "Install": "安装",
+ "Installing...": "安装中...",
+ "Installed": "已安装",
"Style to apply; styles have components for both positive and negative prompts and apply to both": "要使用的模版风格; 模版风格包含正向和反向提示词,并应用于两者\n\ud83c\udfa8 随机添加一个艺术家到提示词中\n \u2199\ufe0f 从提示词中读取生成参数,如果提示词为空,则读取上一次的生成参数到用户界面\n\ud83d\udcbe 将当前的提示词保存为模版风格(保存在styles.csv)\n\ud83d\udccb 将所选模板风格,应用于当前提示词\n如果你在文本中添加{prompt}标记,并保存为模版风格\n那么将来你使用该模版风格时,你现有的提示词会替换模版风格中的{prompt}",
"A value that determines the output of random number generator - if you create an image with same parameters and seed as another image, you'll get the same result": "一个固定随机数生成器输出的值 - 以相同参数和随机种子生成的图像会得到相同的结果\n\ud83c\udfb2 将随机种子设置为-1,则每次都会使用一个新的随机数\n\u267b\ufe0f 重用上一次使用的随机种子,如果想要固定输出结果就会很有用",
From 8b913ea03a21d49278609b20e77183559372817c Mon Sep 17 00:00:00 2001
From: benlisquare
Date: Fri, 4 Nov 2022 04:30:29 +1100
Subject: [PATCH 101/147] Update Traditional Chinese (zh_TW) localisation JSON
---
localizations/zh_TW.json | 280 +++++++++++++++++++++++++++------------
1 file changed, 194 insertions(+), 86 deletions(-)
diff --git a/localizations/zh_TW.json b/localizations/zh_TW.json
index 724df1ac..7467db88 100644
--- a/localizations/zh_TW.json
+++ b/localizations/zh_TW.json
@@ -12,14 +12,15 @@
"Stable Diffusion checkpoint": "Stable Diffusion 模型權重存檔點",
"txt2img": "文生圖",
"img2img": "圖生圖",
- "Extras": "後處理",
- "PNG Info": "PNG 資訊",
- "Checkpoint Merger": "模型權重存檔點合併工具",
+ "Extras": "更多",
+ "PNG Info": "圖片資訊",
+ "Checkpoint Merger": "模型權重存檔點合併",
"Train": "訓練",
- "Create aesthetic embedding": "生成美術風格 embedding",
+ "Create aesthetic embedding": "生成美術風格",
"Image Browser": "圖庫瀏覽器",
"History": "歷史記錄",
"Settings": "設定",
+ "Extensions": "擴充",
"Prompt": "提示詞",
"Negative prompt": "反向提示詞",
"Run": "執行",
@@ -28,7 +29,7 @@
"Generate": "生成",
"Style 1": "模版風格 1",
"Style 2": "模版風格 2",
- "Label": "標籤",
+ "Label": "標記",
"File": "檔案",
"Drop File Here": "拖曳檔案到此",
"-": "-",
@@ -59,12 +60,12 @@
"Highres. fix": "高解析度修復",
"Firstpass width": "第一遍的寬度",
"Firstpass height": "第一遍的高度",
- "Denoising strength": "去噪強度",
- "Batch count": "批次",
- "Batch size": "批量",
+ "Denoising strength": "重繪幅度",
+ "Batch count": "生成批次",
+ "Batch size": "每批數量",
"CFG Scale": "提示詞相關性(CFG)",
"Seed": "隨機種子",
- "Extra": "額外參數",
+ "Extra": "▼",
"Variation seed": "差異隨機種子",
"Variation strength": "差異強度",
"Resize seed from width": "自寬度縮放隨機種子",
@@ -81,7 +82,7 @@
"Slerp angle": "Slerp 角度",
"Is negative text": "是反向提示詞",
"Script": "指令碼",
- "Embedding to Shareable PNG": "將 Embedding 轉換為可分享的 PNG",
+ "Embedding to Shareable PNG": "將 Embedding 轉換為可分享的 PNG 圖片檔案",
"Prompt matrix": "提示詞矩陣",
"Prompts from file or textbox": "從文字方塊或檔案載入提示詞",
"X/Y plot": "X/Y 圖表",
@@ -91,6 +92,10 @@
"Show Textbox": "顯示文字方塊",
"File with inputs": "含輸入內容的檔案",
"Prompts": "提示詞",
+ "Iterate seed every line": "每行輸入都換一個種子",
+ "Use same random seed for all lines": "每行輸入都使用同一個隨機種子",
+ "List of prompt inputs": "提示詞輸入列表",
+ "Upload prompt inputs": "上傳提示詞輸入檔案",
"X type": "X軸類型",
"Nothing": "無",
"Var. seed": "差異隨機種子",
@@ -100,8 +105,8 @@
"Prompt order": "提示詞順序",
"Sampler": "採樣器",
"Checkpoint name": "模型權重存檔點的名稱",
- "Hypernetwork": "Hypernetwork",
- "Hypernet str.": "Hypernetwork 強度",
+ "Hypernetwork": "超網路",
+ "Hypernet str.": "超網路強度",
"Sigma Churn": "Sigma Churn",
"Sigma min": "最小 Sigma",
"Sigma max": "最大 Sigma",
@@ -109,6 +114,7 @@
"Eta": "Eta",
"Clip skip": "Clip 跳過",
"Denoising": "去噪",
+ "Cond. Image Mask Weight": "圖像調節屏蔽度",
"X values": "X軸數值",
"Y type": "Y軸類型",
"Y values": "Y軸數值",
@@ -118,44 +124,44 @@
"Drop Image Here": "拖曳圖像到此",
"Save": "儲存",
"Send to img2img": ">> 圖生圖",
- "Send to inpaint": ">> 內補繪製",
- "Send to extras": ">> 後處理",
+ "Send to inpaint": ">> 局部重繪",
+ "Send to extras": ">> 更多",
"Make Zip when Save?": "儲存時生成ZIP壓縮檔案?",
"Textbox": "文字方塊",
"Interrogate\nCLIP": "CLIP\n反推提示詞",
"Interrogate\nDeepBooru": "DeepBooru\n反推提示詞",
- "Inpaint": "內補繪製",
+ "Inpaint": "局部重繪",
"Batch img2img": "批量圖生圖",
"Image for img2img": "圖生圖的圖像",
- "Image for inpainting with mask": "用於內補繪製蒙版內容的圖像",
+ "Image for inpainting with mask": "用於局部重繪並手動畫蒙版的圖像",
"Mask": "蒙版",
"Mask blur": "蒙版模糊",
"Mask mode": "蒙版模式",
"Draw mask": "繪製蒙版",
"Upload mask": "上傳蒙版",
"Masking mode": "蒙版模式",
- "Inpaint masked": "內補繪製蒙版內容",
- "Inpaint not masked": "內補繪製非蒙版內容",
+ "Inpaint masked": "重繪蒙版內容",
+ "Inpaint not masked": "重繪非蒙版內容",
"Masked content": "蒙版蒙住的內容",
"fill": "填充",
"original": "原圖",
"latent noise": "潛空間噪聲",
"latent nothing": "潛空間數值零",
- "Inpaint at full resolution": "以完整解析度進行內補繪製",
- "Inpaint at full resolution padding, pixels": "以完整解析度進行內補繪製 — 填補畫素",
- "Process images in a directory on the same machine where the server is running.": "在伺服器主機上的目錄中處理圖像",
- "Use an empty output directory to save pictures normally instead of writing to the output directory.": "指定一個空的資料夾為輸出目錄而非預設的 output 資料夾為輸出目錄",
+ "Inpaint at full resolution": "全解析度局部重繪",
+ "Inpaint at full resolution padding, pixels": "預留畫素",
+ "Process images in a directory on the same machine where the server is running.": "使用伺服器主機上的一個目錄,作為輸入目錄處理圖像",
+ "Use an empty output directory to save pictures normally instead of writing to the output directory.": "使用一個空的資料夾作為輸出目錄,而不是使用預設的 output 資料夾作為輸出目錄",
"Disabled when launched with --hide-ui-dir-config.": "啟動 --hide-ui-dir-config 時禁用",
"Input directory": "輸入目錄",
"Output directory": "輸出目錄",
"Resize mode": "縮放模式",
- "Just resize": "只縮放",
- "Crop and resize": "縮放並剪裁",
- "Resize and fill": "縮放並填充",
+ "Just resize": "拉伸",
+ "Crop and resize": "裁剪",
+ "Resize and fill": "填充",
"img2img alternative test": "圖生圖的另一種測試",
"Loopback": "回送",
- "Outpainting mk2": "外補繪製第二版",
- "Poor man's outpainting": "效果稍差的外補繪製",
+ "Outpainting mk2": "向外繪製第二版",
+ "Poor man's outpainting": "效果稍差的向外繪製",
"SD upscale": "使用 SD 放大",
"should be 2 or lower.": "必須小於等於2",
"Override `Sampling method` to Euler?(this method is built for it)": "覆寫「採樣方法」為 Euler?(這個方法就是為這樣做設計的)",
@@ -164,15 +170,15 @@
"Original negative prompt": "初始反向提示詞",
"Override `Sampling Steps` to the same value as `Decode steps`?": "覆寫「採樣疊代步數」為「解碼疊代步數」?",
"Decode steps": "解碼疊代步數",
- "Override `Denoising strength` to 1?": "覆寫「去噪強度」為1?",
+ "Override `Denoising strength` to 1?": "覆寫「重繪幅度」為1?",
"Decode CFG scale": "解碼提示詞相關性(CFG)",
"Randomness": "隨機度",
"Sigma adjustment for finding noise for image": "為尋找圖中噪點的 Sigma 調整",
"Loops": "疊代次數",
- "Denoising strength change factor": "去噪強度的調整係數",
- "Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8": "推薦設定:採樣疊代步數:80-100,採樣器:Euler a,去噪強度:0.8",
+ "Denoising strength change factor": "重繪幅度的調整係數",
+ "Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8": "推薦設定:採樣疊代步數:80-100,採樣器:Euler a,重繪幅度:0.8",
"Pixels to expand": "拓展的畫素數",
- "Outpainting direction": "外補繪製的方向",
+ "Outpainting direction": "向外繪製的方向",
"left": "左",
"right": "右",
"up": "上",
@@ -205,6 +211,7 @@
"CodeFormer visibility": "CodeFormer 可見度",
"CodeFormer weight (0 = maximum effect, 1 = minimum effect)": "CodeFormer 權重 (0 = 最大效果, 1 = 最小效果)",
"Open output directory": "打開輸出目錄",
+ "Upscale Before Restoring Faces": "放大後再進行面部修復",
"Send to txt2img": ">> 文生圖",
"A merger of the two checkpoints will be generated in your": "合併後的模型權重存檔點會生成在你的",
"checkpoint": "模型權重存檔點",
@@ -219,7 +226,7 @@
"Add difference": "加入差分",
"Save as float16": "以 float16 儲存",
"See": "檢視",
- "wiki": "wiki",
+ "wiki": "wiki文件",
"for detailed explanation.": "以了解詳細說明",
"Create embedding": "生成 embedding",
"Create aesthetic images embedding": "生成美術風格圖集 embedding",
@@ -237,6 +244,36 @@
"leakyrelu": "leakyrelu",
"elu": "elu",
"swish": "swish",
+ "tanh": "tanh",
+ "sigmoid": "sigmoid",
+ "celu": "celu",
+ "gelu": "gelu",
+ "glu": "glu",
+ "hardshrink": "hardshrink",
+ "hardsigmoid": "hardsigmoid",
+ "hardtanh": "hardtanh",
+ "logsigmoid": "logsigmoid",
+ "logsoftmax": "logsoftmax",
+ "mish": "mish",
+ "prelu": "prelu",
+ "rrelu": "rrelu",
+ "relu6": "relu6",
+ "selu": "selu",
+ "silu": "silu",
+ "softmax": "softmax",
+ "softmax2d": "softmax2d",
+ "softmin": "softmin",
+ "softplus": "softplus",
+ "softshrink": "softshrink",
+ "softsign": "softsign",
+ "tanhshrink": "tanhshrink",
+ "threshold": "閾值",
+ "Select Layer weights initialization. relu-like - Kaiming, sigmoid-like - Xavier is recommended": "挑選初始化層權重的方案. 類relu - Kaiming, 類sigmoid - Xavier 都是比較推薦的選項",
+ "Normal": "正態",
+ "KaimingUniform": "Kaiming 均勻",
+ "KaimingNormal": "Kaiming 正態",
+ "XavierUniform": "Xavier 均勻",
+ "XavierNormal": "Xavier 正態",
"Add layer normalization": "加入層標準化",
"Use dropout": "採用 dropout 防止過擬合",
"Overwrite Old Hypernetwork": "覆寫舊的 Hypernetwork",
@@ -250,10 +287,15 @@
"Create flipped copies": "生成鏡像副本",
"Split oversized images into two": "將過大的圖像分為兩份",
"Split oversized images": "分割過大的圖像",
+ "Auto focal point crop": "自動焦點裁切",
"Use BLIP for caption": "使用 BLIP 生成說明文字(自然語言描述)",
- "Use deepbooru for caption": "使用 deepbooru 生成說明文字(標籤)",
+ "Use deepbooru for caption": "使用 deepbooru 生成說明文字(標記)",
"Split image threshold": "圖像分割閾值",
"Split image overlap ratio": "分割圖像重疊的比率",
+ "Focal point face weight": "焦點面部權重",
+ "Focal point entropy weight": "焦點熵權重",
+ "Focal point edges weight": "焦點線條權重",
+ "Create debug image": "生成除錯圖片",
"Preprocess": "預處理",
"Train an embedding; must specify a directory with a set of 1:1 ratio images": "訓練 embedding; 必須指定一組具有 1:1 比例圖像的目錄",
"Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images": "訓練 embedding 或者 hypernetwork; 必須指定一組具有 1:1 比例圖像的目錄",
@@ -268,8 +310,8 @@
"Max steps": "最大疊代步數",
"Save an image to log directory every N steps, 0 to disable": "每 N 步儲存一個圖像到日誌目錄,0 表示禁用",
"Save a copy of embedding to log directory every N steps, 0 to disable": "每 N 步將 embedding 的副本儲存到日誌目錄,0 表示禁用",
- "Save images with embedding in PNG chunks": "儲存圖像並在 PNG 檔案中嵌入 embedding 檔案",
- "Read parameters (prompt, etc...) from txt2img tab when making previews": "進行預覽時從文生圖頁籤中讀取參數(提示詞等)",
+ "Save images with embedding in PNG chunks": "儲存圖像,並在 PNG 圖片檔案中嵌入 embedding 檔案",
+ "Read parameters (prompt, etc...) from txt2img tab when making previews": "進行預覽時,從文生圖頁籤中讀取參數(提示詞等)",
"Train Hypernetwork": "訓練 Hypernetwork",
"Train Embedding": "訓練 Embedding",
"Create an aesthetic embedding out of any number of images": "從任意數量的圖像中建立美術風格 embedding",
@@ -277,60 +319,74 @@
"txt2img history": "文生圖歷史記錄",
"img2img history": "圖生圖歷史記錄",
"extras history": "後處理歷史記錄",
- "Renew Page": "刷新頁面",
"extras": "後處理",
"favorites": "收藏夾",
+ "Favorites": "收藏夾",
+ "Others": "其他",
"custom fold": "自訂資料夾",
"Load": "載入",
"Images directory": "圖像目錄",
"Prev batch": "上一批",
"Next batch": "下一批",
+ "Dropdown": "下拉式清單",
"First Page": "首頁",
"Prev Page": "上一頁",
"Page Index": "頁數",
"Next Page": "下一頁",
"End Page": "尾頁",
"number of images to delete consecutively next": "接下來要連續刪除的圖像數",
+ "delete next": "刪除下一張",
"Delete": "刪除",
+ "sort by": "排序方式",
+ "path name": "路徑名",
+ "date": "日期",
+ "keyword": "搜尋",
"Generate Info": "生成資訊",
"File Name": "檔案名",
"Collect": "收藏",
"Refresh page": "刷新頁面",
"Date to": "日期至",
+ "Move to favorites": "移動到收藏夾",
+ "Renew Page": "刷新頁面",
"Number": "數量",
"set_index": "設定索引",
+ "load_switch": "載入開關",
+ "turn_page_switch": "翻頁開關",
"Checkbox": "核取方塊",
"Apply settings": "儲存設定",
- "Saving images/grids": "儲存圖像/概覽圖",
+ "Saving images/grids": "儲存圖像/宮格圖",
"Always save all generated images": "始終儲存所有生成的圖像",
"File format for images": "圖像的檔案格式",
"Images filename pattern": "圖像檔案名格式",
- "Always save all generated image grids": "始終儲存所有生成的概覽圖",
- "File format for grids": "概覽圖的檔案格式",
- "Add extended info (seed, prompt) to filename when saving grid": "儲存概覽時將擴展資訊(隨機種子,提示詞)加入到檔案名",
- "Do not save grids consisting of one picture": "只有一張圖片時不要儲存概覽圖",
- "Prevent empty spots in grid (when set to autodetect)": "(在自動檢測時)防止概覽圖中出現空位",
- "Grid row count; use -1 for autodetect and 0 for it to be same as batch size": "概覽行數; 使用 -1 進行自動檢測,使用 0 使其與批量大小相同",
- "Save text information about generation parameters as chunks to png files": "將有關生成參數的文本資訊作為塊儲存到PNG檔案中",
- "Create a text file next to every image with generation parameters.": "儲存圖像時在每個圖像旁邊建立一個文本檔案儲存生成參數",
+ "Add number to filename when saving": "儲存的時候在檔案名里加入數字",
+ "Always save all generated image grids": "始終儲存所有生成的宮格圖",
+ "File format for grids": "宮格圖的檔案格式",
+ "Add extended info (seed, prompt) to filename when saving grid": "儲存宮格圖時,將擴展資訊(隨機種子,提示詞)加入到檔案名",
+ "Do not save grids consisting of one picture": "只有一張圖片時不要儲存宮格圖",
+ "Prevent empty spots in grid (when set to autodetect)": "(啟用自動偵測時)防止宮格圖中出現空位",
+ "Grid row count; use -1 for autodetect and 0 for it to be same as batch size": "宮格圖行數; 使用 -1 進行自動檢測,使用 0 使其與每批數量相同",
+ "Save text information about generation parameters as chunks to png files": "將有關生成參數的文本資訊,作為塊儲存到PNG圖片檔案中",
+ "Create a text file next to every image with generation parameters.": "儲存圖像時,在每個圖像旁邊建立一個文本檔案儲存生成參數",
"Save a copy of image before doing face restoration.": "在進行面部修復之前儲存圖像副本",
+ "Save a copy of image before applying highres fix.": "在做高解析度修復之前儲存初始圖像副本",
+ "Save a copy of image before applying color correction to img2img results": "在對圖生圖結果套用顏色校正之前儲存圖像副本",
"Quality for saved jpeg images": "儲存的JPEG圖像的品質",
- "If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG": "如果 PNG 圖像大於 4MB 或寬高大於 4000,則縮小並儲存副本為 JPG",
- "Use original name for output filename during batch process in extras tab": "在後處理頁籤中的批量處理過程中使用原始名稱作為輸出檔案名",
+ "If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG": "如果 PNG 圖像大於 4MB 或寬高大於 4000,則縮小並儲存副本為 JPG 圖片",
+ "Use original name for output filename during batch process in extras tab": "在更多頁籤中的批量處理過程中,使用原始名稱作為輸出檔案名",
"When using 'Save' button, only save a single selected image": "使用「儲存」按鈕時,只儲存一個選定的圖像",
"Do not add watermark to images": "不要給圖像加浮水印",
"Paths for saving": "儲存路徑",
"Output directory for images; if empty, defaults to three directories below": "圖像的輸出目錄; 如果為空,則預設為以下三個目錄",
"Output directory for txt2img images": "文生圖的輸出目錄",
"Output directory for img2img images": "圖生圖的輸出目錄",
- "Output directory for images from extras tab": "後處理的輸出目錄",
- "Output directory for grids; if empty, defaults to two directories below": "概覽圖的輸出目錄; 如果為空,則預設為以下兩個目錄",
- "Output directory for txt2img grids": "文生圖概覽的輸出目錄",
- "Output directory for img2img grids": "圖生圖概覽的輸出目錄",
+ "Output directory for images from extras tab": "更多頁籤的輸出目錄",
+ "Output directory for grids; if empty, defaults to two directories below": "宮格圖的輸出目錄; 如果為空,則預設為以下兩個目錄",
+ "Output directory for txt2img grids": "文生圖宮格的輸出目錄",
+ "Output directory for img2img grids": "圖生圖宮格的輸出目錄",
"Directory for saving images using the Save button": "使用「儲存」按鈕儲存圖像的目錄",
"Saving to a directory": "儲存到目錄",
"Save images to a subdirectory": "將圖像儲存到子目錄",
- "Save grids to a subdirectory": "將概覽圖儲存到子目錄",
+ "Save grids to a subdirectory": "將宮格圖儲存到子目錄",
"When using \"Save\" button, save images to a subdirectory": "使用「儲存」按鈕時,將圖像儲存到子目錄",
"Directory name pattern": "目錄名稱格式",
"Max prompt words for [prompt_words] pattern": "[prompt_words] 格式的最大提示詞數量",
@@ -341,10 +397,10 @@
"Tile overlap, in pixels for SwinIR. Low values = visible seam.": "SwinIR 的圖塊重疊畫素。低值 = 可見接縫",
"LDSR processing steps. Lower = faster": "LDSR 處理疊代步數。更低 = 更快",
"Upscaler for img2img": "圖生圖的放大演算法",
- "Upscale latent space image when doing hires. fix": "做高解析度修復時也放大潛空間圖像",
+ "Upscale latent space image when doing hires. fix": "做高解析度修復時,也放大潛空間圖像",
"Face restoration": "面部修復",
"CodeFormer weight parameter; 0 = maximum effect; 1 = minimum effect": "CodeFormer 權重參數; 0 = 最大效果; 1 = 最小效果",
- "Move face restoration model from VRAM into RAM after processing": "面部修復處理完成後將面部修復模型從顯存(VRAM)移至內存(RAM)",
+ "Move face restoration model from VRAM into RAM after processing": "面部修復處理完成後,將面部修復模型從顯存(VRAM)移至內存(RAM)",
"System": "系統",
"VRAM usage polls per second during generation. Set to 0 to disable.": "生成圖像時每秒輪詢顯存(VRAM)使用情況的次數。設定為 0 以禁用",
"Always print all generation info to standard output": "始終將所有生成資訊輸出到 standard output (一般為控制台)",
@@ -355,47 +411,51 @@
"Filename word regex": "檔案名用詞的正則表達式",
"Filename join string": "檔案名連接用字串",
"Number of repeats for a single input image per epoch; used only for displaying epoch number": "每個 epoch 中單個輸入圖像的重複次數; 僅用於顯示 epoch 數",
- "Save an csv containing the loss to log directory every N steps, 0 to disable": "每 N 步儲存一個包含 loss 的CSV到日誌目錄,0 表示禁用",
+ "Save an csv containing the loss to log directory every N steps, 0 to disable": "每 N 步儲存一個包含 loss 的CSV表格到日誌目錄,0 表示禁用",
+ "Use cross attention optimizations while training": "訓練時開啟 cross attention 最佳化",
"Stable Diffusion": "Stable Diffusion",
"Checkpoints to cache in RAM": "快取在內存(RAM)中的模型權重存檔點",
+ "SD VAE": "模型的VAE",
+ "auto": "自動",
"Hypernetwork strength": "Hypernetwork 強度",
+ "Inpainting conditioning mask strength": "局部重繪時圖像調節的蒙版屏蔽強度",
"Apply color correction to img2img results to match original colors.": "對圖生圖結果套用顏色校正以匹配原始顏色",
- "Save a copy of image before applying color correction to img2img results": "在對圖生圖結果套用顏色校正之前儲存圖像副本",
- "With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising).": "在進行圖生圖的時候,確切地執行滑塊指定的疊代步數(正常情況下更弱的去噪需要更少的疊代步數)",
+ "With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising).": "在進行圖生圖的時候,確切地執行滑塊指定的疊代步數(正常情況下更弱的重繪幅度需要更少的疊代步數)",
"Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply.": "在 K 採樣器中啟用量化以獲得更清晰,更清晰的結果。這可能會改變現有的隨機種子。需要重新啟動才能套用",
"Emphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention": "強調符:使用 (文字) 使模型更關注該文本,使用 [文字] 使其減少關注",
"Use old emphasis implementation. Can be useful to reproduce old seeds.": "使用舊的強調符實作。可用於復現舊隨機種子",
- "Make K-diffusion samplers produce same images in a batch as when making a single image": "使 K-diffusion 採樣器批量生成與生成單個圖像時產出相同的圖像",
+ "Make K-diffusion samplers produce same images in a batch as when making a single image": "使 K-diffusion 採樣器批量生成與生成單個圖像時,產出相同的圖像",
"Increase coherency by padding from the last comma within n tokens when using more than 75 tokens": "當使用超過 75 個 token 時,通過從 n 個 token 中的最後一個逗號填補來提高一致性",
"Filter NSFW content": "過濾成人內容",
"Stop At last layers of CLIP model": "在 CLIP 模型的最後哪一層停下",
"Interrogate Options": "反推提示詞選項",
"Interrogate: keep models in VRAM": "反推: 將模型儲存在顯存(VRAM)中",
"Interrogate: use artists from artists.csv": "反推: 使用 artists.csv 中的藝術家",
- "Interrogate: include ranks of model tags matches in results (Has no effect on caption-based interrogators).": "反推: 在生成結果中包含與模型標籤相匹配的等級(對基於生成自然語言描述的反推沒有影響)",
+ "Interrogate: include ranks of model tags matches in results (Has no effect on caption-based interrogators).": "反推: 在生成結果中包含與模型標記相匹配的等級(對基於生成自然語言描述的反推沒有影響)",
"Interrogate: num_beams for BLIP": "反推: BLIP 的 num_beams",
- "Interrogate: minimum description length (excluding artists, etc..)": "反推: 最小描述長度(不包括藝術家, 等…)",
+ "Interrogate: minimum description length (excluding artists, etc..)": "反推: 最小描述長度(不包括藝術家,等…)",
"Interrogate: maximum description length": "反推: 最大描述長度",
"CLIP: maximum number of lines in text file (0 = No limit)": "CLIP: 文本檔案中的最大行數(0 = 無限制)",
"Interrogate: deepbooru score threshold": "反推: deepbooru 分數閾值",
"Interrogate: deepbooru sort alphabetically": "反推: deepbooru 按字母順序排序",
- "use spaces for tags in deepbooru": "在 deepbooru 中為標籤使用空格",
+ "use spaces for tags in deepbooru": "在 deepbooru 中為標記使用空格",
"escape (\\) brackets in deepbooru (so they are used as literal brackets and not for emphasis)": "在 deepbooru 中使用轉義 (\\) 括號(因此它們用作文字括號而不是強調符號)",
"User interface": "使用者介面",
"Show progressbar": "顯示進度列",
"Show image creation progress every N sampling steps. Set 0 to disable.": "每 N 個採樣疊代步數顯示圖像生成進度。設定 0 禁用",
- "Show previews of all images generated in a batch as a grid": "以網格的形式預覽所有批量生成出來的圖像",
- "Show grid in results for web": "在網頁的結果中顯示概覽圖",
+ "Show previews of all images generated in a batch as a grid": "以網格的形式,預覽批量生成的所有圖像",
+ "Show grid in results for web": "在網頁的結果中顯示宮格圖",
"Do not show any images in results for web": "不在網頁的結果中顯示任何圖像",
"Add model hash to generation information": "將模型的雜湊值加入到生成資訊",
"Add model name to generation information": "將模型名稱加入到生成資訊",
- "When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint.": "當從文本讀取生成參數到 UI(從 PNG 資訊或粘貼文本)時,不要更改選定的模型權重存檔點",
- "Font for image grids that have text": "有文字的概覽圖使用的字體",
+ "When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint.": "從文本讀取生成參數到使用者介面(從 PNG 圖片資訊或粘貼文本)時,不要更改選定的模型權重存檔點",
+ "Send seed when sending prompt or image to other interface": "將提示詞或者圖片發送到 >> 其他界面時,把隨機種子也傳送過去",
+ "Font for image grids that have text": "有文字的宮格圖使用的字體",
"Enable full page image viewer": "啟用整頁圖像檢視器",
- "Show images zoomed in by default in full page image viewer": "在整頁圖像檢視器中預設放大顯示圖像",
+ "Show images zoomed in by default in full page image viewer": "在整頁圖像檢視器中,預設放大顯示圖像",
"Show generation progress in window title.": "在視窗標題中顯示生成進度",
"Quicksettings list": "快速設定列表",
- "Localization (requires restart)": "本地化(需要重新啟動)",
+ "Localization (requires restart)": "本地化翻譯(需要儲存設定,並重啟Gradio)",
"Sampler parameters": "採樣器參數",
"Hide samplers in user interface (requires restart)": "在使用者介面中隱藏採樣器(需要重新啟動)",
"eta (noise multiplier) for DDIM": "DDIM 的 eta (噪聲乘數)",
@@ -406,9 +466,9 @@
"sigma churn": "sigma churn",
"sigma tmin": "最小(tmin) sigma",
"sigma noise": "sigma 噪聲",
- "Eta noise seed delta": "Eta 噪聲種子偏移(noise seed delta)",
+ "Eta noise seed delta": "Eta 噪聲種子偏移(ENSD)",
"Images Browser": "圖庫瀏覽器",
- "Preload images at startup": "在啟動時預載圖像",
+ "Preload images at startup": "在啟動時預加載圖像",
"Number of columns on the page": "每頁列數",
"Number of rows on the page": "每頁行數",
"Number of pictures displayed on each page": "每頁顯示的圖像數量",
@@ -418,24 +478,36 @@
"Use same seed for all images": "為所有圖像使用同一個隨機種子",
"Request browser notifications": "請求瀏覽器通知",
"Download localization template": "下載本地化模板",
- "Reload custom script bodies (No ui updates, No restart)": "重新載入自訂指令碼主體(無UI更新,無重啟)",
+ "Reload custom script bodies (No ui updates, No restart)": "重新載入自訂指令碼主體(無使用者介面更新,無重啟)",
"Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)": "重啟 Gradio 及刷新組件(僅限自訂指令碼,ui.py,JS 和 CSS)",
+ "Available": "可用",
+ "Install from URL": "從網址安裝",
+ "Apply and restart UI": "應用並重啟使用者介面",
+ "Check for updates": "檢查更新",
+ "Extension": "擴充",
+ "URL": "網址",
+ "Update": "更新",
+ "unknown": "未知",
+ "Load from:": "載入自",
+ "Extension index URL": "擴充清單連結",
+ "URL for extension's git repository": "擴充的 git 倉庫連結",
+ "Local directory name": "本地路徑名",
"Prompt (press Ctrl+Enter or Alt+Enter to generate)": "提示詞(按 Ctrl+Enter 或 Alt+Enter 生成)",
"Negative prompt (press Ctrl+Enter or Alt+Enter to generate)": "反向提示詞(按 Ctrl+Enter 或 Alt+Enter 生成)",
"Add a random artist to the prompt.": "隨機加入一個藝術家到提示詞中",
"Read generation parameters from prompt or last generation if prompt is empty into user interface.": "從提示詞中讀取生成參數,如果提示詞為空,則讀取上一次的生成參數到使用者介面",
"Save style": "存儲為模板風格",
"Apply selected styles to current prompt": "將所選樣式套用於當前提示",
- "Stop processing current image and continue processing.": "停止處理當前圖像並繼續處理下一個",
- "Stop processing images and return any results accumulated so far.": "停止處理圖像並返回迄今為止累積的任何結果",
+ "Stop processing current image and continue processing.": "停止處理當前圖像,並繼續處理下一個",
+ "Stop processing images and return any results accumulated so far.": "停止處理圖像,並返回迄今為止累積的任何結果",
"Style to apply; styles have components for both positive and negative prompts and apply to both": "要套用的模版風格; 模版風格包含正向和反向提示詞,並套用於兩者",
"Do not do anything special": "什麼都不做",
"Which algorithm to use to produce the image": "使用哪種演算法生成圖像",
"Euler Ancestral - very creative, each can get a completely different picture depending on step count, setting steps to higher than 30-40 does not help": "Euler Ancestral - 非常有創意,可以根據疊代步數獲得完全不同的圖像,將疊代步數設定為高於 30-40 不會有正面作用",
- "Denoising Diffusion Implicit Models - best at inpainting": "Denoising Diffusion Implicit models - 最擅長內補繪製",
+ "Denoising Diffusion Implicit Models - best at inpainting": "Denoising Diffusion Implicit models - 最擅長局部重繪",
"Produce an image that can be tiled.": "生成可用於平舖的圖像",
- "Use a two step process to partially create an image at smaller resolution, upscale, and then improve details in it without changing composition": "使用兩步處理的時候以較小的解析度生成初步圖像,接著放大圖像,然後在不更改構圖的情況下改進其中的細節",
- "Determines how little respect the algorithm should have for image's content. At 0, nothing will change, and at 1 you'll get an unrelated image. With values below 1.0, processing will take less steps than the Sampling Steps slider specifies.": "決定演算法對圖像內容的影響程度。設定 0 時,什麼都不會改變,而在 1 時,你將獲得不相關的圖像。值低於 1.0 時,處理的疊代步數將少於「採樣疊代步數」滑塊指定的步數",
+ "Use a two step process to partially create an image at smaller resolution, upscale, and then improve details in it without changing composition": "使用兩步處理的時候,以較小的解析度生成初步圖像,接著放大圖像,然後在不更改構圖的情況下改進其中的細節",
+ "Determines how little respect the algorithm should have for image's content. At 0, nothing will change, and at 1 you'll get an unrelated image. With values below 1.0, processing will take less steps than the Sampling Steps slider specifies.": "決定演算法對圖像內容的影響程度。設定 0 時,什麼都不會改變,而在 1 時,你將獲得不相關的圖像。\n值低於 1.0 時,處理的疊代步數將少於「採樣疊代步數」滑塊指定的步數",
"How many batches of images to create": "建立多少批次的圖像",
"How many image to create in a single batch": "每批建立多少圖像",
"Classifier Free Guidance Scale - how strongly the image should conform to prompt - lower values produce more creative results": "Classifier Free Guidance Scale - 圖像應在多大程度上服從提示詞 - 較低的值會產生更有創意的結果",
@@ -448,20 +520,20 @@
"This text is used to rotate the feature space of the imgs embs": "此文本用於旋轉圖集 embeddings 的特徵空間",
"Separate values for X axis using commas.": "使用逗號分隔 X 軸的值",
"Separate values for Y axis using commas.": "使用逗號分隔 Y 軸的值",
- "Write image to a directory (default - log/images) and generation parameters into csv file.": "將圖像寫入目錄(預設 — log/images)並將生成參數寫入CSV檔案",
+ "Write image to a directory (default - log/images) and generation parameters into csv file.": "將圖像寫入目錄(預設 — log/images)並將生成參數寫入CSV表格檔案",
"Open images output directory": "打開圖像輸出目錄",
"How much to blur the mask before processing, in pixels.": "處理前要對蒙版進行多強的模糊,以畫素為單位",
"What to put inside the masked area before processing it with Stable Diffusion.": "在使用 Stable Diffusion 處理蒙版區域之前要在蒙版區域內放置什麼",
- "fill it with colors of the image": "用圖像的顏色填充它",
- "keep whatever was there originally": "保留原來的内容",
+ "fill it with colors of the image": "用圖像的顏色(高強度模糊)填充它",
+ "keep whatever was there originally": "保留原來的圖像,不進行預處理",
"fill it with latent space noise": "用潛空間的噪聲填充它",
"fill it with latent space zeroes": "用潛空間的零填充它",
- "Upscale masked region to target resolution, do inpainting, downscale back and paste into original image": "將蒙版區域放大到目標解析度,做內補繪製,縮小後粘貼到原始圖像中",
+ "Upscale masked region to target resolution, do inpainting, downscale back and paste into original image": "將蒙版區域(包括預留畫素長度的緩衝區域)放大到目標解析度,進行局部重繪。\n然後縮小並粘貼回原始圖像中",
"Resize image to target resolution. Unless height and width match, you will get incorrect aspect ratio.": "將圖像大小調整為目標解析度。除非高度和寬度匹配,否則你將獲得不正確的縱橫比",
"Resize the image so that entirety of target resolution is filled with the image. Crop parts that stick out.": "調整圖像大小,使整個目標解析度都被圖像填充。裁剪多出來的部分",
"Resize the image so that entirety of image is inside target resolution. Fill empty space with image's colors.": "調整圖像大小,使整個圖像在目標解析度內。用圖像的顏色填充空白區域",
"How many times to repeat processing an image and using it as input for the next iteration": "重複處理圖像並用作下次疊代輸入的次數",
- "In loopback mode, on each loop the denoising strength is multiplied by this value. <1 means decreasing variety so your sequence will converge on a fixed picture. >1 means increasing variety so your sequence will become more and more chaotic.": "在回送模式下,在每個循環中,去噪強度都會乘以該值。<1 表示減少多樣性,因此你的這一組圖將集中在固定的圖像上。>1 意味著增加多樣性,因此你的這一組圖將變得越來越混亂",
+ "In loopback mode, on each loop the denoising strength is multiplied by this value. <1 means decreasing variety so your sequence will converge on a fixed picture. >1 means increasing variety so your sequence will become more and more chaotic.": "在回送模式下,在每個循環中,重繪幅度都會乘以該值。<1 表示減少多樣性,因此你的這一組圖將集中在固定的圖像上。>1 意味著增加多樣性,因此你的這一組圖將變得越來越混亂",
"For SD upscale, how much overlap in pixels should there be between tiles. Tiles overlap so that when they are merged back into one picture, there is no clearly visible seam.": "使用 SD 放大時,圖塊之間應該有多少畫素重疊。圖塊之間需要重疊才可以讓它們在合併回一張圖像時,沒有清晰可見的接縫",
"A directory on the same machine where the server is running.": "與伺服器主機上的目錄",
"Leave blank to save images to the default path.": "留空以將圖像儲存到預設路徑",
@@ -472,17 +544,53 @@
"Path to directory with input images": "帶有輸入圖像的路徑",
"Path to directory where to write outputs": "進行輸出的路徑",
"Input images directory": "輸入圖像目錄",
- "Use following tags to define how filenames for images are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; leave empty for default.": "使用以下標籤定義如何選擇圖像的檔案名: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; 預設請留空",
+ "Use following tags to define how filenames for images are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime], [datetime], [job_timestamp]; leave empty for default.": "使用以下標記定義如何選擇圖像的檔案名: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime], [datetime], [job_timestamp]; 預設請留空",
"If this option is enabled, watermark will not be added to created images. Warning: if you do not add watermark, you may be behaving in an unethical manner.": "如果啟用此選項,浮水印將不會加入到生成出來的圖像中。警告:如果你不加入浮水印,你的行為可能是不符合道德操守的",
- "Use following tags to define how subdirectories for images and grids are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; leave empty for default.": "使用以下標籤定義如何選擇圖像和概覽圖的子目錄: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; 預設請留空",
+ "Use following tags to define how subdirectories for images and grids are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime], [datetime], [job_timestamp]; leave empty for default.": "使用以下標記定義如何選擇圖像和宮格圖的子目錄: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime], [datetime], [job_timestamp]; 預設請留空",
"Restore low quality faces using GFPGAN neural network": "使用 GFPGAN 神經網路修復低品質面部",
- "This regular expression will be used extract words from filename, and they will be joined using the option below into label text used for training. Leave empty to keep filename text as it is.": "此正則表達式將用於從檔案名中提取單詞,並將使用以下選項將它們接合到用於訓練的標籤文本中。留空以保持檔案名文本不變",
+ "This regular expression will be used extract words from filename, and they will be joined using the option below into label text used for training. Leave empty to keep filename text as it is.": "此正則表達式將用於從檔案名中提取單詞,並將使用以下選項將它們接合到用於訓練的標記文本中。留空以保持檔案名文本不變",
"This string will be used to join split words into a single line if the option above is enabled.": "如果啟用了上述選項,則此處的字元會用於將拆分的單詞接合為同一行",
- "List of setting names, separated by commas, for settings that should go to the quick access bar at the top, rather than the usual setting tab. See modules/shared.py for setting names. Requires restarting to apply.": "設定名稱列表,以逗號分隔,設定應轉到頂部的快速存取列,而不是通常的設定頁籤。有關設定名稱,請參見 modules/shared.py。需要重新啟動才能套用",
+ "Only applies to inpainting models. Determines how strongly to mask off the original image for inpainting and img2img. 1.0 means fully masked, which is the default behaviour. 0.0 means a fully unmasked conditioning. Lower values will help preserve the overall composition of the image, but will struggle with large changes.": "僅適用於局部重繪專用的模型(模型後綴為 inpainting.ckpt 的模型)。決定了蒙版在局部重繪以及圖生圖中屏蔽原圖內容的強度。 1.0 表示完全屏蔽原圖,這是預設行為。 0.0 表示完全不屏蔽讓原圖進行圖像調節。較低的值將有助於保持原圖的整體構圖,但很難遇到較大的變化",
+ "List of setting names, separated by commas, for settings that should go to the quick access bar at the top, rather than the usual setting tab. See modules/shared.py for setting names. Requires restarting to apply.": "設定項名稱列表,以逗號分隔,該設定會移動到頂部的快速存取列,而不是預設的設定頁籤。有關設定名稱,請參見 modules/shared.py。需要重新啟動才能套用",
"If this values is non-zero, it will be added to seed and used to initialize RNG for noises when using samplers with Eta. You can use this to produce even more variation of images, or you can use this to match images of other software if you know what you are doing.": "如果這個值不為零,它將被加入到隨機種子中,並在使用帶有 Eta 的採樣器時用於初始化隨機噪聲。你可以使用它來產生更多的圖像變化,或者你可以使用它來模仿其他軟體生成的圖像,如果你知道你在做什麼",
+ "Leave empty for auto": "留空時自動生成",
+ "Autocomplete options": "自動補全選項",
"Enable Autocomplete": "開啟Tag補全",
+ "Select which Real-ESRGAN models to show in the web UI. (Requires restart)": "選擇哪些 Real-ESRGAN 模型顯示在網頁使用者介面。(需要重新啟動)",
"Allowed categories for random artists selection when using the Roll button": "使用抽選藝術家按鈕時將會隨機的藝術家類別",
+ "Append commas": "附加逗號",
"Roll three": "抽三位出來",
- "Generate forever": "不停地生成",
- "Cancel generate forever": "取消不停地生成"
+ "Generate forever": "無限生成",
+ "Cancel generate forever": "停止無限生成",
+ "How many times to improve the generated image iteratively; higher values take longer; very low values can produce bad results": "疊代改進生成的圖像多少次;更高的值需要更長的時間;非常低的值會產生不好的結果",
+ "Draw a mask over an image, and the script will regenerate the masked area with content according to prompt": "在圖像上畫一個蒙版,指令碼會根據提示重新生成蒙版區域的內容",
+ "Upscale image normally, split result into tiles, improve each tile using img2img, merge whole image back": "正常放大圖像,將結果分割成圖塊,用圖生圖改進每個圖塊,最後將整個圖像合併回來",
+ "Create a grid where images will have different parameters. Use inputs below to specify which parameters will be shared by columns and rows": "創建一個網格,圖像將有不同的參數。使用下面的輸入來指定哪些參數將由列和行共享",
+ "Run Python code. Advanced user only. Must run program with --allow-code for this to work": "執行 Python 程式碼。僅限老手使用。必須以 --allow-code 來開啟程式,才能使其執行",
+ "Separate a list of words with commas, and the first word will be used as a keyword: script will search for this word in the prompt, and replace it with others": "以逗號分割的單詞列表,第一個單詞將被用作關鍵詞:指令碼將在提示詞中搜尋這個單詞,並用其他單詞替換它",
+ "Separate a list of words with commas, and the script will make a variation of prompt with those words for their every possible order": "以逗號分割的單詞列表,指令碼會排列出這些單詞的所有排列方式,並加入提示詞各生成一次",
+ "Reconstruct prompt from existing image and put it into the prompt field.": "從現有的圖像中重構出提示詞,並將其放入提示詞的輸入文字方塊",
+ "Set the maximum number of words to be used in the [prompt_words] option; ATTENTION: If the words are too long, they may exceed the maximum length of the file path that the system can handle": "設定在[prompt_words]選項中要使用的最大字數;注意:如果字數太長,可能會超過系統可處理的檔案路徑的最大長度",
+ "Process an image, use it as an input, repeat.": "處理一張圖像,將其作為輸入,並重複",
+ "Insert selected styles into prompt fields": "在提示詞中插入選定的模版風格",
+ "Save current prompts as a style. If you add the token {prompt} to the text, the style use that as placeholder for your prompt when you use the style in the future.": "將當前的提示詞儲存為模版風格。如果你在文本中加入{prompt}標記,那麼將來你使用該模版風格時,你現有的提示詞會替換模版風格中的{prompt}",
+ "Loads weights from checkpoint before making images. You can either use hash or a part of filename (as seen in settings) for checkpoint name. Recommended to use with Y axis for less switching.": "在生成圖像之前從模型權重存檔點中載入權重。你可以使用哈希值或檔案名的一部分(如設定中所示)作為模型權重存檔點名稱。建議用在Y軸上以減少過程中模型的切換",
+ "Torch active: Peak amount of VRAM used by Torch during generation, excluding cached data.\nTorch reserved: Peak amount of VRAM allocated by Torch, including all active and cached data.\nSys VRAM: Peak amount of VRAM allocation across all applications / total GPU VRAM (peak utilization%).": "Torch active: 在生成過程中,Torch使用的顯存(VRAM)峰值,不包括快取的數據。\nTorch reserved: Torch 分配的顯存(VRAM)的峰值量,包括所有活動和快取數據。\nSys VRAM: 所有應用程式分配的顯存(VRAM)的峰值量 / GPU 的總顯存(VRAM)(峰值利用率%)",
+ "Uscale the image in latent space. Alternative is to produce the full image from latent representation, upscale that, and then move it back to latent space.": "放大潛空間中的圖像。而另一種方法是,從潛變量表達中直接解碼並生成完整的圖像,接著放大它,然後再將其編碼回潛空間",
+ "Start drawing": "開始繪製",
+ "Description": "描述",
+ "Action": "行動",
+ "Aesthetic Gradients": "美術風格",
+ "aesthetic-gradients": "美術風格",
+ "stable-diffusion-webui-wildcards": "萬用字元",
+ "Dynamic Prompts": "動態提示",
+ "images-browser": "圖庫瀏覽器",
+ "Inspiration": "靈感",
+ "Deforum": "Deforum",
+ "Artists to study": "藝術家圖庫",
+ "Aesthetic Image Scorer": "美術風格評分",
+ "Dataset Tag Editor": "數據集標記編輯器",
+ "Install": "安裝",
+ "Installing...": "安裝中…",
+ "Installed": "已安裝"
}
From 459e05c2bdaf9e4e9babc49875eaf11510a898d1 Mon Sep 17 00:00:00 2001
From: dtlnor
Date: Fri, 4 Nov 2022 07:25:12 +0900
Subject: [PATCH 102/147] Update zh_CN.json
- update new content
- polish some translation
---
localizations/zh_CN.json | 42 ++++++++++++++++++++++++++--------------
1 file changed, 27 insertions(+), 15 deletions(-)
diff --git a/localizations/zh_CN.json b/localizations/zh_CN.json
index 8a00c11c..56c8980e 100644
--- a/localizations/zh_CN.json
+++ b/localizations/zh_CN.json
@@ -7,7 +7,7 @@
"Loading...": "载入中...",
"view": "查看",
"api": "api",
- "•": "•",
+ "•": " • ",
"built with gradio": "基于 Gradio 构建",
"Stable Diffusion checkpoint": "Stable Diffusion 模型(ckpt)",
"txt2img": "文生图",
@@ -69,15 +69,15 @@
"Variation strength": "差异强度",
"Resize seed from width": "自宽度缩放随机种子",
"Resize seed from height": "自高度缩放随机种子",
- "Open for Clip Aesthetic!": "打开美术风格 Clip!",
+ "Open for Clip Aesthetic!": "打开以调整 Clip 的美术风格!",
"Aesthetic weight": "美术风格权重",
"Aesthetic steps": "美术风格迭代步数",
"Aesthetic learning rate": "美术风格学习率",
- "Slerp interpolation": "Slerp 插值",
+ "Slerp interpolation": "球面线性插值",
"Aesthetic imgs embedding": "美术风格图集 embedding",
"None": "无",
"Aesthetic text for imgs": "该图集的美术风格描述",
- "Slerp angle": "Slerp 角度",
+ "Slerp angle": "球面线性插值角度",
"Is negative text": "是反向提示词",
"Script": "脚本",
"Embedding to Shareable PNG": "将 Embedding 转换为可分享的 PNG 图片文件",
@@ -181,7 +181,9 @@
"Color variation": "色彩变化",
"Will upscale the image to twice the dimensions; use width and height sliders to set tile size": "将图像放大到两倍尺寸; 使用宽度和高度滑块设置图块尺寸(tile size)",
"Tile overlap": "图块重叠的像素(Tile overlap)",
+ "Upscaler": "放大算法",
"Lanczos": "Lanczos",
+ "Nearest": "最邻近(整数缩放)",
"LDSR": "LDSR",
"BSRGAN 4x": "BSRGAN 4x",
"ESRGAN_4x": "ESRGAN_4x",
@@ -198,6 +200,7 @@
"Scale to": "指定尺寸缩放",
"Resize": "缩放",
"Crop to fit": "裁剪以适应",
+ "Upscaler 2 visibility": "放大算法 2 (Upscaler 2) 可见度",
"GFPGAN visibility": "GFPGAN 可见度",
"CodeFormer visibility": "CodeFormer 可见度",
"CodeFormer weight (0 = maximum effect, 1 = minimum effect)": "CodeFormer 权重 (0 = 最大效果, 1 = 最小效果)",
@@ -286,7 +289,7 @@
"Create debug image": "生成调试(debug)图片",
"Preprocess": "预处理",
"Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images": "训练 embedding 或者 hypernetwork; 必须指定一组具有 1:1 比例图像的目录",
- "[wiki]": "[帮助]",
+ "[wiki]": "[wiki文档]",
"Embedding": "Embedding",
"Embedding Learning rate": "Embedding 学习率",
"Hypernetwork Learning rate": "Hypernetwork 学习率",
@@ -456,7 +459,17 @@
"Extension": "扩展",
"URL": "网址",
"Update": "更新",
+ "a1111-sd-webui-tagcomplete": "Tag自动补全",
"unknown": "未知",
+ "deforum-for-automatic1111-webui": "Deforum",
+ "sd-dynamic-prompting": "动态提示词",
+ "stable-diffusion-webui-aesthetic-gradients": "美术风格梯度",
+ "stable-diffusion-webui-aesthetic-image-scorer": "美术风格评分",
+ "stable-diffusion-webui-artists-to-study": "艺术家图库",
+ "stable-diffusion-webui-dataset-tag-editor": "数据集标签编辑器",
+ "stable-diffusion-webui-images-browser": "图库浏览器",
+ "stable-diffusion-webui-inspiration": "灵感",
+ "stable-diffusion-webui-wildcards": "通配符",
"Load from:": "加载自",
"Extension index URL": "扩展列表链接",
"URL for extension's git repository": "扩展的 git 仓库链接",
@@ -486,8 +499,8 @@
"What to put inside the masked area before processing it with Stable Diffusion.": "在使用 Stable Diffusion 处理蒙版区域之前要在蒙版区域内放置什么",
"fill it with colors of the image": "用图像的颜色(高强度模糊)填充它",
"keep whatever was there originally": "保留原来的图像,不进行预处理",
- "fill it with latent space noise": "用潜空间的噪声填充它",
- "fill it with latent space zeroes": "用潜空间的零填充它",
+ "fill it with latent space noise": "于潜空间填充噪声",
+ "fill it with latent space zeroes": "于潜空间填零",
"Upscale masked region to target resolution, do inpainting, downscale back and paste into original image": "将蒙版区域(包括预留像素长度的缓冲区域)放大到目标分辨率,进行局部重绘。\n然后缩小并粘贴回原始图像中",
"Resize image to target resolution. Unless height and width match, you will get incorrect aspect ratio.": "将图像大小调整为目标分辨率。除非高度和宽度匹配,否则你将获得不正确的纵横比",
"Resize the image so that entirety of target resolution is filled with the image. Crop parts that stick out.": "调整图像大小,使整个目标分辨率都被图像填充。裁剪多出来的部分",
@@ -516,6 +529,8 @@
"Autocomplete options": "自动补全选项",
"Enable Autocomplete": "开启Tag补全",
"Append commas": "附加逗号",
+ "latest": "最新",
+ "behind": "落后",
"Roll three": "抽三位出来",
"Generate forever": "无限生成",
"Cancel generate forever": "停止无限生成",
@@ -536,15 +551,13 @@
"Loads weights from checkpoint before making images. You can either use hash or a part of filename (as seen in settings) for checkpoint name. Recommended to use with Y axis for less switching.": "在生成图像之前从模型(ckpt)中加载权重。你可以使用哈希值或文件名的一部分(如设置中所示)作为模型(ckpt)名称。建议用在Y轴上以减少过程中模型的切换",
"Torch active: Peak amount of VRAM used by Torch during generation, excluding cached data.\nTorch reserved: Peak amount of VRAM allocated by Torch, including all active and cached data.\nSys VRAM: Peak amount of VRAM allocation across all applications / total GPU VRAM (peak utilization%).": "Torch active: 在生成过程中,Torch使用的显存(VRAM)峰值,不包括缓存的数据。\nTorch reserved: Torch 分配的显存(VRAM)的峰值量,包括所有活动和缓存数据。\nSys VRAM: 所有应用程序分配的显存(VRAM)的峰值量 / GPU 的总显存(VRAM)(峰值利用率%)",
"Uscale the image in latent space. Alternative is to produce the full image from latent representation, upscale that, and then move it back to latent space.": "放大潜空间中的图像。而另一种方法是,从潜变量表达中直接解码并生成完整的图像,接着放大它,然后再将其编码回潜空间",
- "Upscaler": "放大算法",
"Start drawing": "开始绘制",
"Description": "描述",
"Action": "行动",
- "Aesthetic Gradients": "美术风格",
- "aesthetic-gradients": "美术风格",
+ "Aesthetic Gradients": "美术风格梯度",
+ "aesthetic-gradients": "美术风格梯度",
"Wildcards": "通配符",
- "stable-diffusion-webui-wildcards": "通配符",
- "Dynamic Prompts": "动态提示",
+ "Dynamic Prompts": "动态提示词",
"Image browser": "图库浏览器",
"images-browser": "图库浏览器",
"Inspiration": "灵感",
@@ -554,7 +567,7 @@
"Dataset Tag Editor": "数据集标签编辑器",
- "----无效----": "----以下内容无法被翻译,Bug----",
+ "----not work----": "----以下内容无法被翻译,Bug----",
"Add a random artist to the prompt.": "随机添加一个艺术家到提示词中",
"Read generation parameters from prompt or last generation if prompt is empty into user interface.": "从提示词中读取生成参数,如果提示词为空,则读取上一次的生成参数到用户界面",
"Save style": "储存为模版风格",
@@ -574,10 +587,9 @@
"Style to apply; styles have components for both positive and negative prompts and apply to both": "要使用的模版风格; 模版风格包含正向和反向提示词,并应用于两者\n\ud83c\udfa8 随机添加一个艺术家到提示词中\n \u2199\ufe0f 从提示词中读取生成参数,如果提示词为空,则读取上一次的生成参数到用户界面\n\ud83d\udcbe 将当前的提示词保存为模版风格(保存在styles.csv)\n\ud83d\udccb 将所选模板风格,应用于当前提示词\n如果你在文本中添加{prompt}标记,并保存为模版风格\n那么将来你使用该模版风格时,你现有的提示词会替换模版风格中的{prompt}",
"A value that determines the output of random number generator - if you create an image with same parameters and seed as another image, you'll get the same result": "一个固定随机数生成器输出的值 - 以相同参数和随机种子生成的图像会得到相同的结果\n\ud83c\udfb2 将随机种子设置为-1,则每次都会使用一个新的随机数\n\u267b\ufe0f 重用上一次使用的随机种子,如果想要固定输出结果就会很有用",
- "Upscaler 2 visibility": "放大算法 2 (Upscaler 2) 可见度",
- "----已移除----": "----以下内容在webui新版本已移除----",
+ "----deprecated----": "----以下内容在webui新版本已移除----",
"▼": "▼",
"History": "历史记录",
"Show Textbox": "显示文本框",
From b2c48091db394c2b7d375a33f18d90c924cd4363 Mon Sep 17 00:00:00 2001
From: Gur
Date: Fri, 4 Nov 2022 06:55:03 +0800
Subject: [PATCH 103/147] fixed api compatibility with python 3.8
---
modules/api/models.py | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/modules/api/models.py b/modules/api/models.py
index 9ee42a17..29a934ba 100644
--- a/modules/api/models.py
+++ b/modules/api/models.py
@@ -6,6 +6,7 @@ from typing_extensions import Literal
from inflection import underscore
from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img
from modules.shared import sd_upscalers
+from typing import List
API_NOT_ALLOWED = [
"self",
@@ -109,12 +110,12 @@ StableDiffusionImg2ImgProcessingAPI = PydanticModelGenerator(
).generate_model()
class TextToImageResponse(BaseModel):
- images: list[str] = Field(default=None, title="Image", description="The generated image in base64 format.")
+ images: List[str] = Field(default=None, title="Image", description="The generated image in base64 format.")
parameters: dict
info: str
class ImageToImageResponse(BaseModel):
- images: list[str] = Field(default=None, title="Image", description="The generated image in base64 format.")
+ images: List[str] = Field(default=None, title="Image", description="The generated image in base64 format.")
parameters: dict
info: str
@@ -146,10 +147,10 @@ class FileData(BaseModel):
name: str = Field(title="File name")
class ExtrasBatchImagesRequest(ExtrasBaseRequest):
- imageList: list[FileData] = Field(title="Images", description="List of images to work on. Must be Base64 strings")
+ imageList: List[FileData] = Field(title="Images", description="List of images to work on. Must be Base64 strings")
class ExtrasBatchImagesResponse(ExtraBaseResponse):
- images: list[str] = Field(title="Images", description="The generated images in base64 format.")
+ images: List[str] = Field(title="Images", description="The generated images in base64 format.")
class PNGInfoRequest(BaseModel):
image: str = Field(title="Image", description="The base64 encoded PNG image")
From 8eb64dab3e9e40531f6a3fa606a1c23a62987249 Mon Sep 17 00:00:00 2001
From: digburn <115176097+digburn@users.noreply.github.com>
Date: Fri, 4 Nov 2022 00:35:18 +0000
Subject: [PATCH 104/147] fix: correct default val of upscale_first to False
---
modules/api/models.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/api/models.py b/modules/api/models.py
index 9069c0ac..68fb45c6 100644
--- a/modules/api/models.py
+++ b/modules/api/models.py
@@ -131,7 +131,7 @@ class ExtrasBaseRequest(BaseModel):
upscaler_1: str = Field(default="None", title="Main upscaler", description=f"The name of the main upscaler to use, it has to be one of this list: {' , '.join([x.name for x in sd_upscalers])}")
upscaler_2: str = Field(default="None", title="Secondary upscaler", description=f"The name of the secondary upscaler to use, it has to be one of this list: {' , '.join([x.name for x in sd_upscalers])}")
extras_upscaler_2_visibility: float = Field(default=0, title="Secondary upscaler visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of secondary upscaler, values should be between 0 and 1.")
- upscale_first: bool = Field(default=True, title="Upscale first", description="Should the upscaler run before restoring faces?")
+ upscale_first: bool = Field(default=False, title="Upscale first", description="Should the upscaler run before restoring faces?")
class ExtraBaseResponse(BaseModel):
html_info: str = Field(title="HTML info", description="A series of HTML tags containing the process info.")
From f59855dce3dbcceb676da5e1f6d2c9fdbc344198 Mon Sep 17 00:00:00 2001
From: benlisquare <116663807+benlisquare@users.noreply.github.com>
Date: Fri, 4 Nov 2022 11:42:53 +1100
Subject: [PATCH 105/147] Apply missing translations to Traditional Chinese
(zh_TW) localisation JSON
---
localizations/zh_TW.json | 2 ++
1 file changed, 2 insertions(+)
diff --git a/localizations/zh_TW.json b/localizations/zh_TW.json
index 7467db88..4e6dac44 100644
--- a/localizations/zh_TW.json
+++ b/localizations/zh_TW.json
@@ -408,6 +408,7 @@
"Training": "訓練",
"Unload VAE and CLIP from VRAM when training": "訓練時從顯存(VRAM)中取消 VAE 和 CLIP 的載入",
"Move VAE and CLIP to RAM when training hypernetwork. Saves VRAM.": "訓練時將 VAE 和 CLIP 從顯存(VRAM)移放到內存(RAM),節省顯存(VRAM)",
+ "Move VAE and CLIP to RAM when training if possible. Saves VRAM.": "訓練時將 VAE 和 CLIP 從顯存(VRAM)移放到內存(RAM)如果可行的話,節省顯存(VRAM)",
"Filename word regex": "檔案名用詞的正則表達式",
"Filename join string": "檔案名連接用字串",
"Number of repeats for a single input image per epoch; used only for displaying epoch number": "每個 epoch 中單個輸入圖像的重複次數; 僅用於顯示 epoch 數",
@@ -590,6 +591,7 @@
"Artists to study": "藝術家圖庫",
"Aesthetic Image Scorer": "美術風格評分",
"Dataset Tag Editor": "數據集標記編輯器",
+ "Face restoration model": "面部修復模型",
"Install": "安裝",
"Installing...": "安裝中…",
"Installed": "已安裝"
From 3780ad3ad837dd406da39eebd5d91009b5a58445 Mon Sep 17 00:00:00 2001
From: digburn
Date: Fri, 4 Nov 2022 00:40:21 +0000
Subject: [PATCH 106/147] fix: loading models without vae from cache
---
modules/sd_models.py | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/modules/sd_models.py b/modules/sd_models.py
index 5075fadb..ae427a5c 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -204,8 +204,9 @@ def load_model_weights(model, checkpoint_info, vae_file="auto"):
checkpoints_loaded.popitem(last=False) # LRU
else:
- vae_name = sd_vae.get_filename(vae_file)
- print(f"Loading weights [{sd_model_hash}] with {vae_name} VAE from cache")
+ vae_name = sd_vae.get_filename(vae_file) if vae_file else None
+ vae_message = f" with {vae_name} VAE" if vae_name else ""
+ print(f"Loading weights [{sd_model_hash}]{vae_message} from cache")
checkpoints_loaded.move_to_end(checkpoint_key)
model.load_state_dict(checkpoints_loaded[checkpoint_key])
From e533ff61c1baa4ad047f9c8dc05c17b64ee89ddf Mon Sep 17 00:00:00 2001
From: timntorres
Date: Thu, 3 Nov 2022 22:28:22 -0700
Subject: [PATCH 107/147] Lift extras generate button a la #4246.
---
modules/ui.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/modules/ui.py b/modules/ui.py
index 2609857e..6461002a 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -1052,6 +1052,8 @@ def create_ui(wrap_gradio_gpu_call):
extras_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, placeholder="Leave blank to save images to the default path.")
show_extras_results = gr.Checkbox(label='Show result images', value=True)
+ submit = gr.Button('Generate', elem_id="extras_generate", variant='primary')
+
with gr.Tabs(elem_id="extras_resize_mode"):
with gr.TabItem('Scale by'):
upscaling_resize = gr.Slider(minimum=1.0, maximum=8.0, step=0.05, label="Resize", value=4)
@@ -1079,8 +1081,6 @@ def create_ui(wrap_gradio_gpu_call):
with gr.Group():
upscale_before_face_fix = gr.Checkbox(label='Upscale Before Restoring Faces', value=False)
- submit = gr.Button('Generate', elem_id="extras_generate", variant='primary')
-
result_images, html_info_x, html_info = create_output_panel("extras", opts.outdir_extras_samples)
submit.click(
From 4dd898b8c15e342f817d3fb1c8dc9f2d5d111022 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Fri, 4 Nov 2022 08:38:11 +0300
Subject: [PATCH 108/147] do not mess with components' visibility for scripts;
instead create group components and show/hide those; this will break scripts
that create invisible components and rely on UI but the earlier i make this
change the better
---
modules/scripts.py | 34 +++++++++++++++++---------------
scripts/custom_code.py | 2 +-
scripts/outpainting_mk_2.py | 2 +-
scripts/poor_mans_outpainting.py | 4 ++--
scripts/prompts_from_file.py | 10 +++++-----
scripts/sd_upscale.py | 4 ++--
scripts/xy_grid.py | 8 ++++----
7 files changed, 33 insertions(+), 31 deletions(-)
diff --git a/modules/scripts.py b/modules/scripts.py
index 533db45c..28ce07f4 100644
--- a/modules/scripts.py
+++ b/modules/scripts.py
@@ -18,6 +18,9 @@ class Script:
args_to = None
alwayson = False
+ """A gr.Group component that has all script's UI inside it"""
+ group = None
+
infotext_fields = None
"""if set in ui(), this is a list of pairs of gradio component + text; the text will be used when
parsing infotext to set the value for the component; see ui.py's txt2img_paste_fields for an example
@@ -218,8 +221,6 @@ class ScriptRunner:
for control in controls:
control.custom_script_source = os.path.basename(script.filename)
- if not script.alwayson:
- control.visible = False
if script.infotext_fields is not None:
self.infotext_fields += script.infotext_fields
@@ -229,40 +230,41 @@ class ScriptRunner:
script.args_to = len(inputs)
for script in self.alwayson_scripts:
- with gr.Group():
+ with gr.Group() as group:
create_script_ui(script, inputs, inputs_alwayson)
+ script.group = group
+
dropdown = gr.Dropdown(label="Script", elem_id="script_list", choices=["None"] + self.titles, value="None", type="index")
dropdown.save_to_config = True
inputs[0] = dropdown
for script in self.selectable_scripts:
- create_script_ui(script, inputs, inputs_alwayson)
+ with gr.Group(visible=False) as group:
+ create_script_ui(script, inputs, inputs_alwayson)
+
+ script.group = group
def select_script(script_index):
- if 0 < script_index <= len(self.selectable_scripts):
- script = self.selectable_scripts[script_index-1]
- args_from = script.args_from
- args_to = script.args_to
- else:
- args_from = 0
- args_to = 0
+ selected_script = self.selectable_scripts[script_index - 1] if script_index>0 else None
- return [ui.gr_show(True if i == 0 else args_from <= i < args_to or is_alwayson) for i, is_alwayson in enumerate(inputs_alwayson)]
+ return [gr.update(visible=selected_script == s) for s in self.selectable_scripts]
def init_field(title):
+ """called when an initial value is set from ui-config.json to show script's UI components"""
+
if title == 'None':
return
+
script_index = self.titles.index(title)
- script = self.selectable_scripts[script_index]
- for i in range(script.args_from, script.args_to):
- inputs[i].visible = True
+ self.selectable_scripts[script_index].group.visible = True
dropdown.init_field = init_field
+
dropdown.change(
fn=select_script,
inputs=[dropdown],
- outputs=inputs
+ outputs=[script.group for script in self.selectable_scripts]
)
return inputs
diff --git a/scripts/custom_code.py b/scripts/custom_code.py
index a9b10c09..22e7b77a 100644
--- a/scripts/custom_code.py
+++ b/scripts/custom_code.py
@@ -14,7 +14,7 @@ class Script(scripts.Script):
return cmd_opts.allow_code
def ui(self, is_img2img):
- code = gr.Textbox(label="Python code", visible=False, lines=1)
+ code = gr.Textbox(label="Python code", lines=1)
return [code]
diff --git a/scripts/outpainting_mk_2.py b/scripts/outpainting_mk_2.py
index 2afd4aa5..cf71cb92 100644
--- a/scripts/outpainting_mk_2.py
+++ b/scripts/outpainting_mk_2.py
@@ -132,7 +132,7 @@ class Script(scripts.Script):
info = gr.HTML("Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8
")
pixels = gr.Slider(label="Pixels to expand", minimum=8, maximum=256, step=8, value=128)
- mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=8, visible=False)
+ mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=8)
direction = gr.CheckboxGroup(label="Outpainting direction", choices=['left', 'right', 'up', 'down'], value=['left', 'right', 'up', 'down'])
noise_q = gr.Slider(label="Fall-off exponent (lower=higher detail)", minimum=0.0, maximum=4.0, step=0.01, value=1.0)
color_variation = gr.Slider(label="Color variation", minimum=0.0, maximum=1.0, step=0.01, value=0.05)
diff --git a/scripts/poor_mans_outpainting.py b/scripts/poor_mans_outpainting.py
index b0469110..ea45beb0 100644
--- a/scripts/poor_mans_outpainting.py
+++ b/scripts/poor_mans_outpainting.py
@@ -22,8 +22,8 @@ class Script(scripts.Script):
return None
pixels = gr.Slider(label="Pixels to expand", minimum=8, maximum=256, step=8, value=128)
- mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, visible=False)
- inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='fill', type="index", visible=False)
+ mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4)
+ inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='fill', type="index")
direction = gr.CheckboxGroup(label="Outpainting direction", choices=['left', 'right', 'up', 'down'], value=['left', 'right', 'up', 'down'])
return [pixels, mask_blur, inpainting_fill, direction]
diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py
index d187cd9c..3388bc77 100644
--- a/scripts/prompts_from_file.py
+++ b/scripts/prompts_from_file.py
@@ -83,13 +83,14 @@ def cmdargs(line):
def load_prompt_file(file):
- if (file is None):
+ if file is None:
lines = []
else:
lines = [x.strip() for x in file.decode('utf8', errors='ignore').split("\n")]
return None, "\n".join(lines), gr.update(lines=7)
+
class Script(scripts.Script):
def title(self):
return "Prompts from file or textbox"
@@ -107,9 +108,9 @@ class Script(scripts.Script):
# We don't shrink back to 1, because that causes the control to ignore [enter], and it may
# be unclear to the user that shift-enter is needed.
prompt_txt.change(lambda tb: gr.update(lines=7) if ("\n" in tb) else gr.update(lines=2), inputs=[prompt_txt], outputs=[prompt_txt])
- return [checkbox_iterate, checkbox_iterate_batch, file, prompt_txt]
+ return [checkbox_iterate, checkbox_iterate_batch, prompt_txt]
- def run(self, p, checkbox_iterate, checkbox_iterate_batch, file, prompt_txt: str):
+ def run(self, p, checkbox_iterate, checkbox_iterate_batch, prompt_txt: str):
lines = [x.strip() for x in prompt_txt.splitlines()]
lines = [x for x in lines if len(x) > 0]
@@ -157,5 +158,4 @@ class Script(scripts.Script):
if checkbox_iterate:
p.seed = p.seed + (p.batch_size * p.n_iter)
-
- return Processed(p, images, p.seed, "")
\ No newline at end of file
+ return Processed(p, images, p.seed, "")
diff --git a/scripts/sd_upscale.py b/scripts/sd_upscale.py
index cb37ff7e..01074291 100644
--- a/scripts/sd_upscale.py
+++ b/scripts/sd_upscale.py
@@ -18,8 +18,8 @@ class Script(scripts.Script):
def ui(self, is_img2img):
info = gr.HTML("Will upscale the image to twice the dimensions; use width and height sliders to set tile size
")
- overlap = gr.Slider(minimum=0, maximum=256, step=16, label='Tile overlap', value=64, visible=False)
- upscaler_index = gr.Radio(label='Upscaler', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index", visible=False)
+ overlap = gr.Slider(minimum=0, maximum=256, step=16, label='Tile overlap', value=64)
+ upscaler_index = gr.Radio(label='Upscaler', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index")
return [info, overlap, upscaler_index]
diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py
index f5255786..417ed0d4 100644
--- a/scripts/xy_grid.py
+++ b/scripts/xy_grid.py
@@ -263,12 +263,12 @@ class Script(scripts.Script):
current_axis_options = [x for x in axis_options if type(x) == AxisOption or type(x) == AxisOptionImg2Img and is_img2img]
with gr.Row():
- x_type = gr.Dropdown(label="X type", choices=[x.label for x in current_axis_options], value=current_axis_options[1].label, visible=False, type="index", elem_id="x_type")
- x_values = gr.Textbox(label="X values", visible=False, lines=1)
+ x_type = gr.Dropdown(label="X type", choices=[x.label for x in current_axis_options], value=current_axis_options[1].label, type="index", elem_id="x_type")
+ x_values = gr.Textbox(label="X values", lines=1)
with gr.Row():
- y_type = gr.Dropdown(label="Y type", choices=[x.label for x in current_axis_options], value=current_axis_options[0].label, visible=False, type="index", elem_id="y_type")
- y_values = gr.Textbox(label="Y values", visible=False, lines=1)
+ y_type = gr.Dropdown(label="Y type", choices=[x.label for x in current_axis_options], value=current_axis_options[0].label, type="index", elem_id="y_type")
+ y_values = gr.Textbox(label="Y values", lines=1)
draw_legend = gr.Checkbox(label='Draw legend', value=True)
include_lone_images = gr.Checkbox(label='Include Separate Images', value=False)
From f2b69709eaff88fc3a2bd49585556ec0883bf5ea Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Fri, 4 Nov 2022 09:42:25 +0300
Subject: [PATCH 109/147] move option access checking to options class out of
various places scattered through code
---
modules/processing.py | 4 ++--
modules/shared.py | 11 +++++++++++
modules/ui.py | 20 +++++---------------
3 files changed, 18 insertions(+), 17 deletions(-)
diff --git a/modules/processing.py b/modules/processing.py
index 2168208c..a46e592d 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -418,13 +418,13 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
try:
for k, v in p.override_settings.items():
- opts.data[k] = v # we don't call onchange for simplicity which makes changing model, hypernet impossible
+ setattr(opts, k, v) # we don't call onchange for simplicity which makes changing model, hypernet impossible
res = process_images_inner(p)
finally:
for k, v in stored_opts.items():
- opts.data[k] = v
+ setattr(opts, k, v)
return res
diff --git a/modules/shared.py b/modules/shared.py
index d8e99f85..024c771a 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -396,6 +396,15 @@ class Options:
def __setattr__(self, key, value):
if self.data is not None:
if key in self.data or key in self.data_labels:
+ assert not cmd_opts.freeze_settings, "changing settings is disabled"
+
+ comp_args = opts.data_labels[key].component_args
+ if isinstance(comp_args, dict) and comp_args.get('visible', True) is False:
+ raise RuntimeError(f"not possible to set {key} because it is restricted")
+
+ if cmd_opts.hide_ui_dir_config and key in restricted_opts:
+ raise RuntimeError(f"not possible to set {key} because it is restricted")
+
self.data[key] = value
return
@@ -412,6 +421,8 @@ class Options:
return super(Options, self).__getattribute__(item)
def save(self, filename):
+ assert not cmd_opts.freeze_settings, "saving settings is disabled"
+
with open(filename, "w", encoding="utf8") as file:
json.dump(self.data, file, indent=4)
diff --git a/modules/ui.py b/modules/ui.py
index b2b1c854..633b56ef 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -1438,8 +1438,6 @@ def create_ui(wrap_gradio_gpu_call):
def run_settings(*args):
changed = 0
- assert not shared.cmd_opts.freeze_settings, "changing settings is disabled"
-
for key, value, comp in zip(opts.data_labels.keys(), args, components):
if comp != dummy_component and not opts.same_type(value, opts.data_labels[key].default):
return f"Bad value for setting {key}: {value}; expecting {type(opts.data_labels[key].default).__name__}", opts.dumpjson()
@@ -1448,15 +1446,9 @@ def create_ui(wrap_gradio_gpu_call):
if comp == dummy_component:
continue
- comp_args = opts.data_labels[key].component_args
- if comp_args and isinstance(comp_args, dict) and comp_args.get('visible') is False:
- continue
-
- if cmd_opts.hide_ui_dir_config and key in restricted_opts:
- continue
-
oldval = opts.data.get(key, None)
- opts.data[key] = value
+
+ setattr(opts, key, value)
if oldval != value:
if opts.data_labels[key].onchange is not None:
@@ -1469,17 +1461,15 @@ def create_ui(wrap_gradio_gpu_call):
return f'{changed} settings changed.', opts.dumpjson()
def run_settings_single(value, key):
- assert not shared.cmd_opts.freeze_settings, "changing settings is disabled"
-
if not opts.same_type(value, opts.data_labels[key].default):
return gr.update(visible=True), opts.dumpjson()
oldval = opts.data.get(key, None)
- if cmd_opts.hide_ui_dir_config and key in restricted_opts:
+ try:
+ setattr(opts, key, value)
+ except Exception:
return gr.update(value=oldval), opts.dumpjson()
- opts.data[key] = value
-
if oldval != value:
if opts.data_labels[key].onchange is not None:
opts.data_labels[key].onchange()
From 0abb39f461baa343ae7c23abffb261e57c3168d4 Mon Sep 17 00:00:00 2001
From: aria1th <35677394+aria1th@users.noreply.github.com>
Date: Fri, 4 Nov 2022 15:47:19 +0900
Subject: [PATCH 110/147] resolve conflict - first revert
---
modules/hypernetworks/hypernetwork.py | 123 +++++++++++---------------
1 file changed, 52 insertions(+), 71 deletions(-)
diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py
index 4230b8cf..674fcedd 100644
--- a/modules/hypernetworks/hypernetwork.py
+++ b/modules/hypernetworks/hypernetwork.py
@@ -21,7 +21,6 @@ from torch.nn.init import normal_, xavier_normal_, xavier_uniform_, kaiming_norm
from collections import defaultdict, deque
from statistics import stdev, mean
-optimizer_dict = {optim_name : cls_obj for optim_name, cls_obj in inspect.getmembers(torch.optim, inspect.isclass) if optim_name != "Optimizer"}
class HypernetworkModule(torch.nn.Module):
multiplier = 1.0
@@ -34,9 +33,12 @@ class HypernetworkModule(torch.nn.Module):
"tanh": torch.nn.Tanh,
"sigmoid": torch.nn.Sigmoid,
}
- activation_dict.update({cls_name.lower(): cls_obj for cls_name, cls_obj in inspect.getmembers(torch.nn.modules.activation) if inspect.isclass(cls_obj) and cls_obj.__module__ == 'torch.nn.modules.activation'})
+ activation_dict.update(
+ {cls_name.lower(): cls_obj for cls_name, cls_obj in inspect.getmembers(torch.nn.modules.activation) if
+ inspect.isclass(cls_obj) and cls_obj.__module__ == 'torch.nn.modules.activation'})
- def __init__(self, dim, state_dict=None, layer_structure=None, activation_func=None, weight_init='Normal', add_layer_norm=False, use_dropout=False):
+ def __init__(self, dim, state_dict=None, layer_structure=None, activation_func=None, weight_init='Normal',
+ add_layer_norm=False, use_dropout=False):
super().__init__()
assert layer_structure is not None, "layer_structure must not be None"
@@ -47,7 +49,7 @@ class HypernetworkModule(torch.nn.Module):
for i in range(len(layer_structure) - 1):
# Add a fully-connected layer
- linears.append(torch.nn.Linear(int(dim * layer_structure[i]), int(dim * layer_structure[i+1])))
+ linears.append(torch.nn.Linear(int(dim * layer_structure[i]), int(dim * layer_structure[i + 1])))
# Add an activation func
if activation_func == "linear" or activation_func is None:
@@ -59,7 +61,7 @@ class HypernetworkModule(torch.nn.Module):
# Add layer normalization
if add_layer_norm:
- linears.append(torch.nn.LayerNorm(int(dim * layer_structure[i+1])))
+ linears.append(torch.nn.LayerNorm(int(dim * layer_structure[i + 1])))
# Add dropout expect last layer
if use_dropout and i < len(layer_structure) - 3:
@@ -128,7 +130,8 @@ class Hypernetwork:
filename = None
name = None
- def __init__(self, name=None, enable_sizes=None, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False):
+ def __init__(self, name=None, enable_sizes=None, layer_structure=None, activation_func=None, weight_init=None,
+ add_layer_norm=False, use_dropout=False):
self.filename = None
self.name = name
self.layers = {}
@@ -140,13 +143,13 @@ class Hypernetwork:
self.weight_init = weight_init
self.add_layer_norm = add_layer_norm
self.use_dropout = use_dropout
- self.optimizer_name = None
- self.optimizer_state_dict = None
for size in enable_sizes or []:
self.layers[size] = (
- HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init, self.add_layer_norm, self.use_dropout),
- HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init, self.add_layer_norm, self.use_dropout),
+ HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init,
+ self.add_layer_norm, self.use_dropout),
+ HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init,
+ self.add_layer_norm, self.use_dropout),
)
def weights(self):
@@ -161,7 +164,6 @@ class Hypernetwork:
def save(self, filename):
state_dict = {}
- optimizer_saved_dict = {}
for k, v in self.layers.items():
state_dict[k] = (v[0].state_dict(), v[1].state_dict())
@@ -175,14 +177,8 @@ class Hypernetwork:
state_dict['use_dropout'] = self.use_dropout
state_dict['sd_checkpoint'] = self.sd_checkpoint
state_dict['sd_checkpoint_name'] = self.sd_checkpoint_name
- if self.optimizer_name is not None:
- optimizer_saved_dict['optimizer_name'] = self.optimizer_name
torch.save(state_dict, filename)
- if self.optimizer_state_dict:
- optimizer_saved_dict['hash'] = sd_models.model_hash(filename)
- optimizer_saved_dict['optimizer_state_dict'] = self.optimizer_state_dict
- torch.save(optimizer_saved_dict, filename + '.optim')
def load(self, filename):
self.filename = filename
@@ -202,23 +198,13 @@ class Hypernetwork:
self.use_dropout = state_dict.get('use_dropout', False)
print(f"Dropout usage is set to {self.use_dropout}")
- optimizer_saved_dict = torch.load(self.filename + '.optim', map_location = 'cpu') if os.path.exists(self.filename + '.optim') else {}
- self.optimizer_name = optimizer_saved_dict.get('optimizer_name', 'AdamW')
- print(f"Optimizer name is {self.optimizer_name}")
- if sd_models.model_hash(filename) == optimizer_saved_dict.get('hash', None):
- self.optimizer_state_dict = optimizer_saved_dict.get('optimizer_state_dict', None)
- else:
- self.optimizer_state_dict = None
- if self.optimizer_state_dict:
- print("Loaded existing optimizer from checkpoint")
- else:
- print("No saved optimizer exists in checkpoint")
-
for size, sd in state_dict.items():
if type(size) == int:
self.layers[size] = (
- HypernetworkModule(size, sd[0], self.layer_structure, self.activation_func, self.weight_init, self.add_layer_norm, self.use_dropout),
- HypernetworkModule(size, sd[1], self.layer_structure, self.activation_func, self.weight_init, self.add_layer_norm, self.use_dropout),
+ HypernetworkModule(size, sd[0], self.layer_structure, self.activation_func, self.weight_init,
+ self.add_layer_norm, self.use_dropout),
+ HypernetworkModule(size, sd[1], self.layer_structure, self.activation_func, self.weight_init,
+ self.add_layer_norm, self.use_dropout),
)
self.name = state_dict.get('name', self.name)
@@ -233,7 +219,7 @@ def list_hypernetworks(path):
name = os.path.splitext(os.path.basename(filename))[0]
# Prevent a hypothetical "None.pt" from being listed.
if name != "None":
- res[name + f"({sd_models.model_hash(filename)})"] = filename
+ res[name] = filename
return res
@@ -330,7 +316,7 @@ def statistics(data):
std = 0
else:
std = stdev(data)
- total_information = f"loss:{mean(data):.3f}" + u"\u00B1" + f"({std/ (len(data) ** 0.5):.3f})"
+ total_information = f"loss:{mean(data):.3f}" + u"\u00B1" + f"({std / (len(data) ** 0.5):.3f})"
recent_data = data[-32:]
if len(recent_data) < 2:
std = 0
@@ -340,7 +326,7 @@ def statistics(data):
return total_information, recent_information
-def report_statistics(loss_info:dict):
+def report_statistics(loss_info: dict):
keys = sorted(loss_info.keys(), key=lambda x: sum(loss_info[x]) / len(loss_info[x]))
for key in keys:
try:
@@ -352,14 +338,18 @@ def report_statistics(loss_info:dict):
print(e)
-
-def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_hypernetwork_every, template_file, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
+def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log_directory, training_width,
+ training_height, steps, create_image_every, save_hypernetwork_every, template_file,
+ preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps,
+ preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
# images allows training previews to have infotext. Importing it at the top causes a circular import problem.
from modules import images
save_hypernetwork_every = save_hypernetwork_every or 0
create_image_every = create_image_every or 0
- textual_inversion.validate_train_inputs(hypernetwork_name, learn_rate, batch_size, data_root, template_file, steps, save_hypernetwork_every, create_image_every, log_directory, name="hypernetwork")
+ textual_inversion.validate_train_inputs(hypernetwork_name, learn_rate, batch_size, data_root, template_file, steps,
+ save_hypernetwork_every, create_image_every, log_directory,
+ name="hypernetwork")
path = shared.hypernetworks.get(hypernetwork_name, None)
shared.loaded_hypernetwork = Hypernetwork()
@@ -379,7 +369,6 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
else:
hypernetwork_dir = None
- hypernetwork_name = hypernetwork_name.rsplit('(', 1)[0]
if create_image_every > 0:
images_dir = os.path.join(log_directory, "images")
os.makedirs(images_dir, exist_ok=True)
@@ -395,39 +384,34 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
return hypernetwork, filename
scheduler = LearnRateScheduler(learn_rate, steps, ititial_step)
-
+
# dataset loading may take a while, so input validations and early returns should be done before this
shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..."
with torch.autocast("cuda"):
- ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=hypernetwork_name, model=shared.sd_model, device=devices.device, template_file=template_file, include_cond=True, batch_size=batch_size)
+ ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width,
+ height=training_height,
+ repeats=shared.opts.training_image_repeats_per_epoch,
+ placeholder_token=hypernetwork_name,
+ model=shared.sd_model, device=devices.device,
+ template_file=template_file, include_cond=True,
+ batch_size=batch_size)
if unload:
shared.sd_model.cond_stage_model.to(devices.cpu)
shared.sd_model.first_stage_model.to(devices.cpu)
size = len(ds.indexes)
- loss_dict = defaultdict(lambda : deque(maxlen = 1024))
+ loss_dict = defaultdict(lambda: deque(maxlen=1024))
losses = torch.zeros((size,))
previous_mean_losses = [0]
previous_mean_loss = 0
print("Mean loss of {} elements".format(size))
-
+
weights = hypernetwork.weights()
for weight in weights:
weight.requires_grad = True
- # Here we use optimizer from saved HN, or we can specify as UI option.
- if (optimizer_name := hypernetwork.optimizer_name) in optimizer_dict:
- optimizer = optimizer_dict[hypernetwork.optimizer_name](params=weights, lr=scheduler.learn_rate)
- else:
- print(f"Optimizer type {optimizer_name} is not defined!")
- optimizer = torch.optim.AdamW(params=weights, lr=scheduler.learn_rate)
- optimizer_name = 'AdamW'
- if hypernetwork.optimizer_state_dict: # This line must be changed if Optimizer type can be different from saved optimizer.
- try:
- optimizer.load_state_dict(hypernetwork.optimizer_state_dict)
- except RuntimeError as e:
- print("Cannot resume from saved optimizer!")
- print(e)
+ # if optimizer == "AdamW": or else Adam / AdamW / SGD, etc...
+ optimizer = torch.optim.AdamW(weights, lr=scheduler.learn_rate)
steps_without_grad = 0
@@ -441,7 +425,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
if len(loss_dict) > 0:
previous_mean_losses = [i[-1] for i in loss_dict.values()]
previous_mean_loss = mean(previous_mean_losses)
-
+
scheduler.apply(optimizer, hypernetwork.step)
if scheduler.finished:
break
@@ -460,7 +444,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
losses[hypernetwork.step % losses.shape[0]] = loss.item()
for entry in entries:
loss_dict[entry.filename].append(loss.item())
-
+
optimizer.zero_grad()
weights[0].grad = None
loss.backward()
@@ -475,9 +459,9 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
steps_done = hypernetwork.step + 1
- if torch.isnan(losses[hypernetwork.step % losses.shape[0]]):
+ if torch.isnan(losses[hypernetwork.step % losses.shape[0]]):
raise RuntimeError("Loss diverged.")
-
+
if len(previous_mean_losses) > 1:
std = stdev(previous_mean_losses)
else:
@@ -489,11 +473,8 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
# Before saving, change name to match current checkpoint.
hypernetwork_name_every = f'{hypernetwork_name}-{steps_done}'
last_saved_file = os.path.join(hypernetwork_dir, f'{hypernetwork_name_every}.pt')
- hypernetwork.optimizer_name = optimizer_name
- if shared.opts.save_optimizer_state:
- hypernetwork.optimizer_state_dict = optimizer.state_dict()
save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, last_saved_file)
- hypernetwork.optimizer_state_dict = None # dereference it after saving, to save memory.
+
textual_inversion.write_loss(log_directory, "hypernetwork_loss.csv", hypernetwork.step, len(ds), {
"loss": f"{previous_mean_loss:.7f}",
"learn_rate": scheduler.learn_rate
@@ -529,7 +510,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
preview_text = p.prompt
processed = processing.process_images(p)
- image = processed.images[0] if len(processed.images)>0 else None
+ image = processed.images[0] if len(processed.images) > 0 else None
if unload:
shared.sd_model.cond_stage_model.to(devices.cpu)
@@ -537,7 +518,10 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
if image is not None:
shared.state.current_image = image
- last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, shared.opts.samples_format, processed.infotexts[0], p=p, forced_filename=forced_filename, save_to_dirs=False)
+ last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt,
+ shared.opts.samples_format, processed.infotexts[0],
+ p=p, forced_filename=forced_filename,
+ save_to_dirs=False)
last_saved_image += f", prompt: {preview_text}"
shared.state.job_no = hypernetwork.step
@@ -551,15 +535,12 @@ Last saved hypernetwork: {html.escape(last_saved_file)}
Last saved image: {html.escape(last_saved_image)}
"""
+
report_statistics(loss_dict)
filename = os.path.join(shared.cmd_opts.hypernetwork_dir, f'{hypernetwork_name}.pt')
- hypernetwork.optimizer_name = optimizer_name
- if shared.opts.save_optimizer_state:
- hypernetwork.optimizer_state_dict = optimizer.state_dict()
save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, filename)
- del optimizer
- hypernetwork.optimizer_state_dict = None # dereference it after saving, to save memory.
+
return hypernetwork, filename
@@ -576,4 +557,4 @@ def save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, filename):
hypernetwork.sd_checkpoint = old_sd_checkpoint
hypernetwork.sd_checkpoint_name = old_sd_checkpoint_name
hypernetwork.name = old_hypernetwork_name
- raise
+ raise
\ No newline at end of file
From 0d07cbfa15d34294a4fa22d74359cdd6fe2f799c Mon Sep 17 00:00:00 2001
From: AngelBottomless <35677394+aria1th@users.noreply.github.com>
Date: Fri, 4 Nov 2022 15:50:54 +0900
Subject: [PATCH 111/147] I blame code autocomplete
---
modules/hypernetworks/hypernetwork.py | 76 ++++++++++-----------------
1 file changed, 27 insertions(+), 49 deletions(-)
diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py
index 674fcedd..a11e01d6 100644
--- a/modules/hypernetworks/hypernetwork.py
+++ b/modules/hypernetworks/hypernetwork.py
@@ -33,12 +33,9 @@ class HypernetworkModule(torch.nn.Module):
"tanh": torch.nn.Tanh,
"sigmoid": torch.nn.Sigmoid,
}
- activation_dict.update(
- {cls_name.lower(): cls_obj for cls_name, cls_obj in inspect.getmembers(torch.nn.modules.activation) if
- inspect.isclass(cls_obj) and cls_obj.__module__ == 'torch.nn.modules.activation'})
+ activation_dict.update({cls_name.lower(): cls_obj for cls_name, cls_obj in inspect.getmembers(torch.nn.modules.activation) if inspect.isclass(cls_obj) and cls_obj.__module__ == 'torch.nn.modules.activation'})
- def __init__(self, dim, state_dict=None, layer_structure=None, activation_func=None, weight_init='Normal',
- add_layer_norm=False, use_dropout=False):
+ def __init__(self, dim, state_dict=None, layer_structure=None, activation_func=None, weight_init='Normal', add_layer_norm=False, use_dropout=False):
super().__init__()
assert layer_structure is not None, "layer_structure must not be None"
@@ -49,7 +46,7 @@ class HypernetworkModule(torch.nn.Module):
for i in range(len(layer_structure) - 1):
# Add a fully-connected layer
- linears.append(torch.nn.Linear(int(dim * layer_structure[i]), int(dim * layer_structure[i + 1])))
+ linears.append(torch.nn.Linear(int(dim * layer_structure[i]), int(dim * layer_structure[i+1])))
# Add an activation func
if activation_func == "linear" or activation_func is None:
@@ -61,7 +58,7 @@ class HypernetworkModule(torch.nn.Module):
# Add layer normalization
if add_layer_norm:
- linears.append(torch.nn.LayerNorm(int(dim * layer_structure[i + 1])))
+ linears.append(torch.nn.LayerNorm(int(dim * layer_structure[i+1])))
# Add dropout expect last layer
if use_dropout and i < len(layer_structure) - 3:
@@ -130,8 +127,7 @@ class Hypernetwork:
filename = None
name = None
- def __init__(self, name=None, enable_sizes=None, layer_structure=None, activation_func=None, weight_init=None,
- add_layer_norm=False, use_dropout=False):
+ def __init__(self, name=None, enable_sizes=None, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False):
self.filename = None
self.name = name
self.layers = {}
@@ -146,10 +142,8 @@ class Hypernetwork:
for size in enable_sizes or []:
self.layers[size] = (
- HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init,
- self.add_layer_norm, self.use_dropout),
- HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init,
- self.add_layer_norm, self.use_dropout),
+ HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init, self.add_layer_norm, self.use_dropout),
+ HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init, self.add_layer_norm, self.use_dropout),
)
def weights(self):
@@ -196,15 +190,13 @@ class Hypernetwork:
self.add_layer_norm = state_dict.get('is_layer_norm', False)
print(f"Layer norm is set to {self.add_layer_norm}")
self.use_dropout = state_dict.get('use_dropout', False)
- print(f"Dropout usage is set to {self.use_dropout}")
+ print(f"Dropout usage is set to {self.use_dropout}" )
for size, sd in state_dict.items():
if type(size) == int:
self.layers[size] = (
- HypernetworkModule(size, sd[0], self.layer_structure, self.activation_func, self.weight_init,
- self.add_layer_norm, self.use_dropout),
- HypernetworkModule(size, sd[1], self.layer_structure, self.activation_func, self.weight_init,
- self.add_layer_norm, self.use_dropout),
+ HypernetworkModule(size, sd[0], self.layer_structure, self.activation_func, self.weight_init, self.add_layer_norm, self.use_dropout),
+ HypernetworkModule(size, sd[1], self.layer_structure, self.activation_func, self.weight_init, self.add_layer_norm, self.use_dropout),
)
self.name = state_dict.get('name', self.name)
@@ -316,7 +308,7 @@ def statistics(data):
std = 0
else:
std = stdev(data)
- total_information = f"loss:{mean(data):.3f}" + u"\u00B1" + f"({std / (len(data) ** 0.5):.3f})"
+ total_information = f"loss:{mean(data):.3f}" + u"\u00B1" + f"({std/ (len(data) ** 0.5):.3f})"
recent_data = data[-32:]
if len(recent_data) < 2:
std = 0
@@ -326,7 +318,7 @@ def statistics(data):
return total_information, recent_information
-def report_statistics(loss_info: dict):
+def report_statistics(loss_info:dict):
keys = sorted(loss_info.keys(), key=lambda x: sum(loss_info[x]) / len(loss_info[x]))
for key in keys:
try:
@@ -338,18 +330,14 @@ def report_statistics(loss_info: dict):
print(e)
-def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log_directory, training_width,
- training_height, steps, create_image_every, save_hypernetwork_every, template_file,
- preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps,
- preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
+
+def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_hypernetwork_every, template_file, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
# images allows training previews to have infotext. Importing it at the top causes a circular import problem.
from modules import images
save_hypernetwork_every = save_hypernetwork_every or 0
create_image_every = create_image_every or 0
- textual_inversion.validate_train_inputs(hypernetwork_name, learn_rate, batch_size, data_root, template_file, steps,
- save_hypernetwork_every, create_image_every, log_directory,
- name="hypernetwork")
+ textual_inversion.validate_train_inputs(hypernetwork_name, learn_rate, batch_size, data_root, template_file, steps, save_hypernetwork_every, create_image_every, log_directory, name="hypernetwork")
path = shared.hypernetworks.get(hypernetwork_name, None)
shared.loaded_hypernetwork = Hypernetwork()
@@ -384,29 +372,23 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
return hypernetwork, filename
scheduler = LearnRateScheduler(learn_rate, steps, ititial_step)
-
+
# dataset loading may take a while, so input validations and early returns should be done before this
shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..."
with torch.autocast("cuda"):
- ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width,
- height=training_height,
- repeats=shared.opts.training_image_repeats_per_epoch,
- placeholder_token=hypernetwork_name,
- model=shared.sd_model, device=devices.device,
- template_file=template_file, include_cond=True,
- batch_size=batch_size)
+ ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=hypernetwork_name, model=shared.sd_model, device=devices.device, template_file=template_file, include_cond=True, batch_size=batch_size)
if unload:
shared.sd_model.cond_stage_model.to(devices.cpu)
shared.sd_model.first_stage_model.to(devices.cpu)
size = len(ds.indexes)
- loss_dict = defaultdict(lambda: deque(maxlen=1024))
+ loss_dict = defaultdict(lambda : deque(maxlen = 1024))
losses = torch.zeros((size,))
previous_mean_losses = [0]
previous_mean_loss = 0
print("Mean loss of {} elements".format(size))
-
+
weights = hypernetwork.weights()
for weight in weights:
weight.requires_grad = True
@@ -425,7 +407,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
if len(loss_dict) > 0:
previous_mean_losses = [i[-1] for i in loss_dict.values()]
previous_mean_loss = mean(previous_mean_losses)
-
+
scheduler.apply(optimizer, hypernetwork.step)
if scheduler.finished:
break
@@ -444,7 +426,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
losses[hypernetwork.step % losses.shape[0]] = loss.item()
for entry in entries:
loss_dict[entry.filename].append(loss.item())
-
+
optimizer.zero_grad()
weights[0].grad = None
loss.backward()
@@ -459,9 +441,9 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
steps_done = hypernetwork.step + 1
- if torch.isnan(losses[hypernetwork.step % losses.shape[0]]):
+ if torch.isnan(losses[hypernetwork.step % losses.shape[0]]):
raise RuntimeError("Loss diverged.")
-
+
if len(previous_mean_losses) > 1:
std = stdev(previous_mean_losses)
else:
@@ -510,7 +492,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
preview_text = p.prompt
processed = processing.process_images(p)
- image = processed.images[0] if len(processed.images) > 0 else None
+ image = processed.images[0] if len(processed.images)>0 else None
if unload:
shared.sd_model.cond_stage_model.to(devices.cpu)
@@ -518,10 +500,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
if image is not None:
shared.state.current_image = image
- last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt,
- shared.opts.samples_format, processed.infotexts[0],
- p=p, forced_filename=forced_filename,
- save_to_dirs=False)
+ last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, shared.opts.samples_format, processed.infotexts[0], p=p, forced_filename=forced_filename, save_to_dirs=False)
last_saved_image += f", prompt: {preview_text}"
shared.state.job_no = hypernetwork.step
@@ -535,7 +514,7 @@ Last saved hypernetwork: {html.escape(last_saved_file)}
Last saved image: {html.escape(last_saved_image)}
"""
-
+
report_statistics(loss_dict)
filename = os.path.join(shared.cmd_opts.hypernetwork_dir, f'{hypernetwork_name}.pt')
@@ -543,7 +522,6 @@ Last saved image: {html.escape(last_saved_image)}
return hypernetwork, filename
-
def save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, filename):
old_hypernetwork_name = hypernetwork.name
old_sd_checkpoint = hypernetwork.sd_checkpoint if hasattr(hypernetwork, "sd_checkpoint") else None
@@ -557,4 +535,4 @@ def save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, filename):
hypernetwork.sd_checkpoint = old_sd_checkpoint
hypernetwork.sd_checkpoint_name = old_sd_checkpoint_name
hypernetwork.name = old_hypernetwork_name
- raise
\ No newline at end of file
+ raise
From 283249d2390f0f3a1c8a55d5d9aa551e3e9b2f9c Mon Sep 17 00:00:00 2001
From: aria1th <35677394+aria1th@users.noreply.github.com>
Date: Fri, 4 Nov 2022 15:57:17 +0900
Subject: [PATCH 112/147] apply
---
modules/hypernetworks/hypernetwork.py | 54 ++++++++++++++++++++++++---
1 file changed, 49 insertions(+), 5 deletions(-)
diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py
index 6e1a10cf..de8688a9 100644
--- a/modules/hypernetworks/hypernetwork.py
+++ b/modules/hypernetworks/hypernetwork.py
@@ -22,6 +22,8 @@ from collections import defaultdict, deque
from statistics import stdev, mean
+optimizer_dict = {optim_name : cls_obj for optim_name, cls_obj in inspect.getmembers(torch.optim, inspect.isclass) if optim_name != "Optimizer"}
+
class HypernetworkModule(torch.nn.Module):
multiplier = 1.0
activation_dict = {
@@ -142,6 +144,8 @@ class Hypernetwork:
self.use_dropout = use_dropout
self.activate_output = activate_output
self.last_layer_dropout = kwargs['last_layer_dropout'] if 'last_layer_dropout' in kwargs else True
+ self.optimizer_name = None
+ self.optimizer_state_dict = None
for size in enable_sizes or []:
self.layers[size] = (
@@ -163,6 +167,7 @@ class Hypernetwork:
def save(self, filename):
state_dict = {}
+ optimizer_saved_dict = {}
for k, v in self.layers.items():
state_dict[k] = (v[0].state_dict(), v[1].state_dict())
@@ -178,8 +183,15 @@ class Hypernetwork:
state_dict['sd_checkpoint_name'] = self.sd_checkpoint_name
state_dict['activate_output'] = self.activate_output
state_dict['last_layer_dropout'] = self.last_layer_dropout
-
+
+ if self.optimizer_name is not None:
+ optimizer_saved_dict['optimizer_name'] = self.optimizer_name
+
torch.save(state_dict, filename)
+ if self.optimizer_state_dict:
+ optimizer_saved_dict['hash'] = sd_models.model_hash(filename)
+ optimizer_saved_dict['optimizer_state_dict'] = self.optimizer_state_dict
+ torch.save(optimizer_saved_dict, filename + '.optim')
def load(self, filename):
self.filename = filename
@@ -202,6 +214,18 @@ class Hypernetwork:
print(f"Activate last layer is set to {self.activate_output}")
self.last_layer_dropout = state_dict.get('last_layer_dropout', False)
+ optimizer_saved_dict = torch.load(self.filename + '.optim', map_location = 'cpu') if os.path.exists(self.filename + '.optim') else {}
+ self.optimizer_name = optimizer_saved_dict.get('optimizer_name', 'AdamW')
+ print(f"Optimizer name is {self.optimizer_name}")
+ if sd_models.model_hash(filename) == optimizer_saved_dict.get('hash', None):
+ self.optimizer_state_dict = optimizer_saved_dict.get('optimizer_state_dict', None)
+ else:
+ self.optimizer_state_dict = None
+ if self.optimizer_state_dict:
+ print("Loaded existing optimizer from checkpoint")
+ else:
+ print("No saved optimizer exists in checkpoint")
+
for size, sd in state_dict.items():
if type(size) == int:
self.layers[size] = (
@@ -223,7 +247,7 @@ def list_hypernetworks(path):
name = os.path.splitext(os.path.basename(filename))[0]
# Prevent a hypothetical "None.pt" from being listed.
if name != "None":
- res[name] = filename
+ res[name + f"({sd_models.model_hash(filename)})"] = filename
return res
@@ -369,6 +393,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
else:
hypernetwork_dir = None
+ hypernetwork_name = hypernetwork_name.rsplit('(', 1)[0]
if create_image_every > 0:
images_dir = os.path.join(log_directory, "images")
os.makedirs(images_dir, exist_ok=True)
@@ -404,8 +429,19 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
weights = hypernetwork.weights()
for weight in weights:
weight.requires_grad = True
- # if optimizer == "AdamW": or else Adam / AdamW / SGD, etc...
- optimizer = torch.optim.AdamW(weights, lr=scheduler.learn_rate)
+ # Here we use optimizer from saved HN, or we can specify as UI option.
+ if (optimizer_name := hypernetwork.optimizer_name) in optimizer_dict:
+ optimizer = optimizer_dict[hypernetwork.optimizer_name](params=weights, lr=scheduler.learn_rate)
+ else:
+ print(f"Optimizer type {optimizer_name} is not defined!")
+ optimizer = torch.optim.AdamW(params=weights, lr=scheduler.learn_rate)
+ optimizer_name = 'AdamW'
+ if hypernetwork.optimizer_state_dict: # This line must be changed if Optimizer type can be different from saved optimizer.
+ try:
+ optimizer.load_state_dict(hypernetwork.optimizer_state_dict)
+ except RuntimeError as e:
+ print("Cannot resume from saved optimizer!")
+ print(e)
steps_without_grad = 0
@@ -467,7 +503,11 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
# Before saving, change name to match current checkpoint.
hypernetwork_name_every = f'{hypernetwork_name}-{steps_done}'
last_saved_file = os.path.join(hypernetwork_dir, f'{hypernetwork_name_every}.pt')
+ hypernetwork.optimizer_name = optimizer_name
+ if shared.opts.save_optimizer_state:
+ hypernetwork.optimizer_state_dict = optimizer.state_dict()
save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, last_saved_file)
+ hypernetwork.optimizer_state_dict = None # dereference it after saving, to save memory.
textual_inversion.write_loss(log_directory, "hypernetwork_loss.csv", hypernetwork.step, len(ds), {
"loss": f"{previous_mean_loss:.7f}",
@@ -530,8 +570,12 @@ Last saved image: {html.escape(last_saved_image)}
report_statistics(loss_dict)
filename = os.path.join(shared.cmd_opts.hypernetwork_dir, f'{hypernetwork_name}.pt')
+ hypernetwork.optimizer_name = optimizer_name
+ if shared.opts.save_optimizer_state:
+ hypernetwork.optimizer_state_dict = optimizer.state_dict()
save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, filename)
-
+ del optimizer
+ hypernetwork.optimizer_state_dict = None # dereference it after saving, to save memory.
return hypernetwork, filename
def save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, filename):
From f5d394214d6ee74a682d0a1016bcbebc4b43c13a Mon Sep 17 00:00:00 2001
From: aria1th <35677394+aria1th@users.noreply.github.com>
Date: Fri, 4 Nov 2022 16:04:03 +0900
Subject: [PATCH 113/147] split before declaring file name
---
modules/hypernetworks/hypernetwork.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py
index de8688a9..9b6a3e62 100644
--- a/modules/hypernetworks/hypernetwork.py
+++ b/modules/hypernetworks/hypernetwork.py
@@ -382,6 +382,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
shared.state.textinfo = "Initializing hypernetwork training..."
shared.state.job_count = steps
+ hypernetwork_name = hypernetwork_name.rsplit('(', 1)[0]
filename = os.path.join(shared.cmd_opts.hypernetwork_dir, f'{hypernetwork_name}.pt')
log_directory = os.path.join(log_directory, datetime.datetime.now().strftime("%Y-%m-%d"), hypernetwork_name)
@@ -393,7 +394,6 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
else:
hypernetwork_dir = None
- hypernetwork_name = hypernetwork_name.rsplit('(', 1)[0]
if create_image_every > 0:
images_dir = os.path.join(log_directory, "images")
os.makedirs(images_dir, exist_ok=True)
From 5f0117154382eb0e2547c72630256681673e353b Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Fri, 4 Nov 2022 10:07:29 +0300
Subject: [PATCH 114/147] shut down gradio's "everything allowed" CORS policy;
I checked the main functionality to work with this, but if this breaks some
exotic workflow, I'm sorry.
---
README.md | 7 ++++---
webui.py | 6 ++++++
2 files changed, 10 insertions(+), 3 deletions(-)
diff --git a/README.md b/README.md
index 55c050d5..33508f31 100644
--- a/README.md
+++ b/README.md
@@ -155,14 +155,15 @@ The documentation was moved from this README over to the project's [wiki](https:
- Swin2SR - https://github.com/mv-lab/swin2sr
- LDSR - https://github.com/Hafiidz/latent-diffusion
- Ideas for optimizations - https://github.com/basujindal/stable-diffusion
-- Doggettx - Cross Attention layer optimization - https://github.com/Doggettx/stable-diffusion, original idea for prompt editing.
-- InvokeAI, lstein - Cross Attention layer optimization - https://github.com/invoke-ai/InvokeAI (originally http://github.com/lstein/stable-diffusion)
-- Rinon Gal - Textual Inversion - https://github.com/rinongal/textual_inversion (we're not using his code, but we are using his ideas).
+- Cross Attention layer optimization - Doggettx - https://github.com/Doggettx/stable-diffusion, original idea for prompt editing.
+- Cross Attention layer optimization - InvokeAI, lstein - https://github.com/invoke-ai/InvokeAI (originally http://github.com/lstein/stable-diffusion)
+- Textual Inversion - Rinon Gal - https://github.com/rinongal/textual_inversion (we're not using his code, but we are using his ideas).
- Idea for SD upscale - https://github.com/jquesnelle/txt2imghd
- Noise generation for outpainting mk2 - https://github.com/parlance-zz/g-diffuser-bot
- CLIP interrogator idea and borrowing some code - https://github.com/pharmapsychotic/clip-interrogator
- Idea for Composable Diffusion - https://github.com/energy-based-model/Compositional-Visual-Generation-with-Composable-Diffusion-Models-PyTorch
- xformers - https://github.com/facebookresearch/xformers
- DeepDanbooru - interrogator for anime diffusers https://github.com/KichangKim/DeepDanbooru
+- Security advice - RyotaK
- Initial Gradio script - posted on 4chan by an Anonymous user. Thank you Anonymous user.
- (You)
diff --git a/webui.py b/webui.py
index 3b21c071..81df09dd 100644
--- a/webui.py
+++ b/webui.py
@@ -141,6 +141,12 @@ def webui():
# after initial launch, disable --autolaunch for subsequent restarts
cmd_opts.autolaunch = False
+ # gradio uses a very open CORS policy via app.user_middleware, which makes it possible for
+ # an attacker to trick the user into opening a malicious HTML page, which makes a request to the
+ # running web ui and do whatever the attcker wants, including installing an extension and
+ # runnnig its code. We disable this here. Suggested by RyotaK.
+ app.user_middleware = [x for x in app.user_middleware if x.cls.__name__ != 'CORSMiddleware']
+
app.add_middleware(GZipMiddleware, minimum_size=1000)
if launch_api:
From 1ca0bcd3a7003dd2c1324de7d97fd2a6fc5ddc53 Mon Sep 17 00:00:00 2001
From: aria1th <35677394+aria1th@users.noreply.github.com>
Date: Fri, 4 Nov 2022 16:09:19 +0900
Subject: [PATCH 115/147] only save if option is enabled
---
modules/hypernetworks/hypernetwork.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py
index 9b6a3e62..b1f308e2 100644
--- a/modules/hypernetworks/hypernetwork.py
+++ b/modules/hypernetworks/hypernetwork.py
@@ -188,7 +188,7 @@ class Hypernetwork:
optimizer_saved_dict['optimizer_name'] = self.optimizer_name
torch.save(state_dict, filename)
- if self.optimizer_state_dict:
+ if shared.opts.save_optimizer_state and self.optimizer_state_dict:
optimizer_saved_dict['hash'] = sd_models.model_hash(filename)
optimizer_saved_dict['optimizer_state_dict'] = self.optimizer_state_dict
torch.save(optimizer_saved_dict, filename + '.optim')
From ccf1a15412ef6b518f9f54cc26a0ee5edf458108 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Fri, 4 Nov 2022 10:16:19 +0300
Subject: [PATCH 116/147] add an option to enable installing extensions with
--listen or --share
---
modules/shared.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/modules/shared.py b/modules/shared.py
index 024c771a..0a39cdf2 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -44,6 +44,7 @@ parser.add_argument("--precision", type=str, help="evaluate at this precision",
parser.add_argument("--share", action='store_true', help="use share=True for gradio and make the UI accessible through their site")
parser.add_argument("--ngrok", type=str, help="ngrok authtoken, alternative to gradio --share", default=None)
parser.add_argument("--ngrok-region", type=str, help="The region in which ngrok should start.", default="us")
+parser.add_argument("--enable-insecure-extension-access", action='store_true', help="enable extensions tab regardless of other options")
parser.add_argument("--codeformer-models-path", type=str, help="Path to directory with codeformer model file(s).", default=os.path.join(models_path, 'Codeformer'))
parser.add_argument("--gfpgan-models-path", type=str, help="Path to directory with GFPGAN model file(s).", default=os.path.join(models_path, 'GFPGAN'))
parser.add_argument("--esrgan-models-path", type=str, help="Path to directory with ESRGAN model file(s).", default=os.path.join(models_path, 'ESRGAN'))
@@ -99,7 +100,7 @@ restricted_opts = {
"outdir_save",
}
-cmd_opts.disable_extension_access = cmd_opts.share or cmd_opts.listen
+cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen) and not cmd_opts.enable_insecure_extension_access
devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_swinir, devices.device_esrgan, devices.device_scunet, devices.device_codeformer = \
(devices.cpu if any(y in cmd_opts.use_cpu for y in [x, 'all']) else devices.get_optimal_device() for x in ['sd', 'interrogate', 'gfpgan', 'swinir', 'esrgan', 'scunet', 'codeformer'])
From 321e13ca176b256177c4a752d1f2bbee79b5532e Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Fri, 4 Nov 2022 10:35:30 +0300
Subject: [PATCH 117/147] produce a readable error message when setting an
option fails on the settings screen
---
modules/ui.py | 9 ++++-----
1 file changed, 4 insertions(+), 5 deletions(-)
diff --git a/modules/ui.py b/modules/ui.py
index 633b56ef..3ac7540c 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -1439,8 +1439,7 @@ def create_ui(wrap_gradio_gpu_call):
changed = 0
for key, value, comp in zip(opts.data_labels.keys(), args, components):
- if comp != dummy_component and not opts.same_type(value, opts.data_labels[key].default):
- return f"Bad value for setting {key}: {value}; expecting {type(opts.data_labels[key].default).__name__}", opts.dumpjson()
+ assert comp == dummy_component or opts.same_type(value, opts.data_labels[key].default), f"Bad value for setting {key}: {value}; expecting {type(opts.data_labels[key].default).__name__}"
for key, value, comp in zip(opts.data_labels.keys(), args, components):
if comp == dummy_component:
@@ -1458,7 +1457,7 @@ def create_ui(wrap_gradio_gpu_call):
opts.save(shared.config_filename)
- return f'{changed} settings changed.', opts.dumpjson()
+ return opts.dumpjson(), f'{changed} settings changed.'
def run_settings_single(value, key):
if not opts.same_type(value, opts.data_labels[key].default):
@@ -1622,9 +1621,9 @@ def create_ui(wrap_gradio_gpu_call):
text_settings = gr.Textbox(elem_id="settings_json", value=lambda: opts.dumpjson(), visible=False)
settings_submit.click(
- fn=run_settings,
+ fn=wrap_gradio_call(run_settings, extra_outputs=[gr.update()]),
inputs=components,
- outputs=[result, text_settings],
+ outputs=[text_settings, result],
)
for i, k, item in quicksettings_list:
From f674c488d9701e577e2aaf25e331fb44ada4f1ef Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Fri, 4 Nov 2022 10:45:34 +0300
Subject: [PATCH 118/147] bugfix: save image for hires fix BEFORE upscaling
latent space
---
modules/processing.py | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/modules/processing.py b/modules/processing.py
index a46e592d..7a2fc218 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -665,17 +665,17 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
images.save_image(image, self.outpath_samples, "", seeds[index], prompts[index], opts.samples_format, suffix="-before-highres-fix")
if opts.use_scale_latent_for_hires_fix:
+ for i in range(samples.shape[0]):
+ save_intermediate(samples, i)
+
samples = torch.nn.functional.interpolate(samples, size=(self.height // opt_f, self.width // opt_f), mode="bilinear")
-
+
# Avoid making the inpainting conditioning unless necessary as
# this does need some extra compute to decode / encode the image again.
if getattr(self, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) < 1.0:
image_conditioning = self.img2img_image_conditioning(decode_first_stage(self.sd_model, samples), samples)
else:
image_conditioning = self.txt2img_image_conditioning(samples)
-
- for i in range(samples.shape[0]):
- save_intermediate(samples, i)
else:
decoded_samples = decode_first_stage(self.sd_model, samples)
lowres_samples = torch.clamp((decoded_samples + 1.0) / 2.0, min=0.0, max=1.0)
From 7278897982bfb640ee95f144c97ed25fb3f77ea3 Mon Sep 17 00:00:00 2001
From: AngelBottomless <35677394+aria1th@users.noreply.github.com>
Date: Fri, 4 Nov 2022 17:12:28 +0900
Subject: [PATCH 119/147] Update shared.py
---
modules/shared.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/shared.py b/modules/shared.py
index 4d6e1c8b..6e7a02e0 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -309,7 +309,7 @@ options_templates.update(options_section(('system', "System"), {
options_templates.update(options_section(('training', "Training"), {
"unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training if possible. Saves VRAM."),
- "save_optimizer_state": OptionInfo(False, "Saves Optimizer state with checkpoints. This will cause file size to increase VERY much."),
+ "save_optimizer_state": OptionInfo(False, "Saves Optimizer state as separate *.optim file. Training can be resumed with HN itself and matching optim file."),
"dataset_filename_word_regex": OptionInfo("", "Filename word regex"),
"dataset_filename_join_string": OptionInfo(" ", "Filename join string"),
"training_image_repeats_per_epoch": OptionInfo(1, "Number of repeats for a single input image per epoch; used only for displaying epoch number", gr.Number, {"precision": 0}),
From 99043f33606d3057f83ea52a403e10cd29d1f7e7 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Fri, 4 Nov 2022 11:20:42 +0300
Subject: [PATCH 120/147] fix one of previous merges breaking the program
---
modules/sd_models.py | 2 ++
1 file changed, 2 insertions(+)
diff --git a/modules/sd_models.py b/modules/sd_models.py
index 63e07a12..34c57bfa 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -167,6 +167,8 @@ def load_model_weights(model, checkpoint_info, vae_file="auto"):
sd_vae.restore_base_vae(model)
checkpoints_loaded[model.sd_checkpoint_info] = model.state_dict().copy()
+ vae_file = sd_vae.resolve_vae(checkpoint_file, vae_file=vae_file)
+
if checkpoint_info not in checkpoints_loaded:
print(f"Loading weights [{sd_model_hash}] from {checkpoint_file}")
From eeb07330131012c0294afb79165b90270679b9c7 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Fri, 4 Nov 2022 11:21:40 +0300
Subject: [PATCH 121/147] change process_one virtual function for script to
process_batch, add extra args and docs
---
modules/processing.py | 2 +-
modules/scripts.py | 16 +++++++++++-----
2 files changed, 12 insertions(+), 6 deletions(-)
diff --git a/modules/processing.py b/modules/processing.py
index e20d8fc4..03c9143d 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -502,7 +502,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
break
if p.scripts is not None:
- p.scripts.process_one(p, n)
+ p.scripts.process_batch(p, batch_number=n, prompts=prompts, seeds=seeds, subseeds=subseeds)
with devices.autocast():
uc = prompt_parser.get_learned_conditioning(shared.sd_model, len(prompts) * [p.negative_prompt], p.steps)
diff --git a/modules/scripts.py b/modules/scripts.py
index 75e47cd2..366c90d7 100644
--- a/modules/scripts.py
+++ b/modules/scripts.py
@@ -73,9 +73,15 @@ class Script:
pass
- def process_one(self, p, n, *args):
+ def process_batch(self, p, *args, **kwargs):
"""
- Same as process(), but called for every iteration
+ Same as process(), but called for every batch.
+
+ **kwargs will have those items:
+ - batch_number - index of current batch, from 0 to number of batches-1
+ - prompts - list of prompts for current batch; you can change contents of this list but changing the number of entries will likely break things
+ - seeds - list of seeds for current batch
+ - subseeds - list of subseeds for current batch
"""
pass
@@ -303,13 +309,13 @@ class ScriptRunner:
print(f"Error running process: {script.filename}", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
- def process_one(self, p, n):
+ def process_batch(self, p, **kwargs):
for script in self.alwayson_scripts:
try:
script_args = p.script_args[script.args_from:script.args_to]
- script.process_one(p, n, *script_args)
+ script.process_batch(p, *script_args, **kwargs)
except Exception:
- print(f"Error running process_one: {script.filename}", file=sys.stderr)
+ print(f"Error running process_batch: {script.filename}", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
def postprocess(self, p, processed):
From db50a9ab6cac5d66513e828db96e4270a33f7803 Mon Sep 17 00:00:00 2001
From: Dynamic
Date: Fri, 4 Nov 2022 18:12:27 +0900
Subject: [PATCH 122/147] Update ko_KR.json
Added new strings/Revamped edited strings
---
localizations/ko_KR.json | 9 ++++++---
1 file changed, 6 insertions(+), 3 deletions(-)
diff --git a/localizations/ko_KR.json b/localizations/ko_KR.json
index 874771f9..29e10075 100644
--- a/localizations/ko_KR.json
+++ b/localizations/ko_KR.json
@@ -24,6 +24,7 @@
"Add model hash to generation information": "생성 정보에 모델 해시 추가",
"Add model name to generation information": "생성 정보에 모델 이름 추가",
"Add number to filename when saving": "이미지를 저장할 때 파일명에 숫자 추가하기",
+ "Adds a tab to the webui that allows the user to automatically extract keyframes from video, and manually extract 512x512 crops of those frames for use in model training.": "WebUI에 비디오로부터 자동으로 키프레임을 추출하고, 그 키프레임으로부터 모델 훈련에 사용될 512x512 이미지를 잘라낼 수 있는 탭을 추가합니다.",
"Aesthetic Gradients": "스타일 그라디언트",
"Aesthetic Image Scorer": "스타일 이미지 스코어러",
"Aesthetic imgs embedding": "스타일 이미지 임베딩",
@@ -260,6 +261,7 @@
"Keep -1 for seeds": "시드값 -1로 유지",
"keep whatever was there originally": "이미지 원본 유지",
"keyword": "프롬프트",
+ "Krita Plugin.": "Kirta 플러그인입니다.",
"Label": "라벨",
"Lanczos": "Lanczos",
"Last prompt:": "마지막 프롬프트 : ",
@@ -441,8 +443,8 @@
"See": "자세한 설명은",
"Seed": "시드",
"Seed of a different picture to be mixed into the generation.": "결과물에 섞일 다른 그림의 시드",
- "Select activation function of hypernetwork": "하이퍼네트워크 활성화 함수 선택",
- "Select Layer weights initialization. relu-like - Kaiming, sigmoid-like - Xavier is recommended": "레이어 가중치 초기화 방식 선택 - relu류 : Kaiming 추천, sigmoid류 : Xavier 추천",
+ "Select activation function of hypernetwork. Recommended : Swish / Linear(none)": "하이퍼네트워크 활성화 함수 선택 - 추천 : Swish / Linear(None)",
+ "Select Layer weights initialization. Recommended: Kaiming for relu-like, Xavier for sigmoid-like, Normal otherwise": "레이어 가중치 초기화 방식 선택 - relu류 : Kaiming 추천, sigmoid류 : Xavier 추천, 그 외 : Normal",
"Select which Real-ESRGAN models to show in the web UI. (Requires restart)": "WebUI에 표시할 Real-ESRGAN 모델을 선택하십시오. (재시작 필요)",
"Send seed when sending prompt or image to other interface": "다른 화면으로 프롬프트나 이미지를 보낼 때 시드도 함께 보내기",
"Send to extras": "부가기능으로 전송",
@@ -458,7 +460,7 @@
"should be 2 or lower.": "이 2 이하여야 합니다.",
"Show generation progress in window title.": "창 타이틀에 생성 진행도 보여주기",
"Show grid in results for web": "웹에서 결과창에 그리드 보여주기",
- "Show image creation progress every N sampling steps. Set 0 to disable.": "N번째 샘플링 스텝마다 이미지 생성 과정 보이기 - 비활성화하려면 0으로 설정",
+ "Show image creation progress every N sampling steps. Set to 0 to disable. Set to -1 to show after completion of batch.": "N번째 샘플링 스텝마다 이미지 생성 과정 보이기 - 비활성화하려면 0으로 설정, 배치 생성 완료시 보이려면 -1로 설정",
"Show images zoomed in by default in full page image viewer": "전체 페이지 이미지 뷰어에서 기본값으로 이미지 확대해서 보여주기",
"Show previews of all images generated in a batch as a grid": "배치에서 생성된 모든 이미지의 미리보기를 그리드 형식으로 보여주기",
"Show progressbar": "프로그레스 바 보이기",
@@ -520,6 +522,7 @@
"Train Embedding": "임베딩 훈련",
"Train Hypernetwork": "하이퍼네트워크 훈련",
"Training": "훈련",
+ "training-picker": "훈련용 선택기",
"txt2img": "텍스트→이미지",
"txt2img history": "텍스트→이미지 기록",
"uniform": "uniform",
From 5359fd30f2c4f6a3d9c42a6b837f8188d459a1ec Mon Sep 17 00:00:00 2001
From: Sihan Wang <31711261+shwang95@users.noreply.github.com>
Date: Fri, 4 Nov 2022 17:52:46 +0800
Subject: [PATCH 123/147] Rename confusing translation
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
"Denoising strength" in UI was translated as "重绘幅度" while "denoising" in the X/Y plot is translated as "去噪", totally confusing.
---
localizations/zh_CN.json | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/localizations/zh_CN.json b/localizations/zh_CN.json
index 56c8980e..ff785fc0 100644
--- a/localizations/zh_CN.json
+++ b/localizations/zh_CN.json
@@ -109,7 +109,7 @@
"Sigma noise": "Sigma noise",
"Eta": "Eta",
"Clip skip": "Clip 跳过",
- "Denoising": "去噪",
+ "Denoising": "重绘幅度",
"Cond. Image Mask Weight": "图像调节屏蔽度",
"X values": "X轴数值",
"Y type": "Y轴类型",
From fd62727893f9face287b0a9620251afaa38a627d Mon Sep 17 00:00:00 2001
From: Isaac Poulton
Date: Fri, 4 Nov 2022 18:34:35 +0700
Subject: [PATCH 124/147] Sort hypernetworks
---
modules/hypernetworks/hypernetwork.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py
index 6e1a10cf..f1f04a70 100644
--- a/modules/hypernetworks/hypernetwork.py
+++ b/modules/hypernetworks/hypernetwork.py
@@ -224,7 +224,7 @@ def list_hypernetworks(path):
# Prevent a hypothetical "None.pt" from being listed.
if name != "None":
res[name] = filename
- return res
+ return dict(sorted(res.items()))
def load_hypernetwork(filename):
From c3cd0d7a86f35a5bfc58fdc3ecfaf203c0aee06f Mon Sep 17 00:00:00 2001
From: DepFA <35278260+dfaker@users.noreply.github.com>
Date: Fri, 4 Nov 2022 12:19:16 +0000
Subject: [PATCH 125/147] Should be one underscore for module privates not two
---
modules/script_callbacks.py | 37 ++++++++++++++++++-------------------
1 file changed, 18 insertions(+), 19 deletions(-)
diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py
index 4a7fb944..83da7ca4 100644
--- a/modules/script_callbacks.py
+++ b/modules/script_callbacks.py
@@ -46,7 +46,7 @@ class CFGDenoiserParams:
ScriptCallback = namedtuple("ScriptCallback", ["script", "callback"])
-__callback_map = dict(
+_callback_map = dict(
callbacks_app_started=[],
callbacks_model_loaded=[],
callbacks_ui_tabs=[],
@@ -58,11 +58,11 @@ __callback_map = dict(
def clear_callbacks():
- for callback_list in __callback_map.values():
+ for callback_list in _callback_map.values():
callback_list.clear()
def app_started_callback(demo: Optional[Blocks], app: FastAPI):
- for c in __callback_map['callbacks_app_started']:
+ for c in _callback_map['callbacks_app_started']:
try:
c.callback(demo, app)
except Exception:
@@ -70,7 +70,7 @@ def app_started_callback(demo: Optional[Blocks], app: FastAPI):
def model_loaded_callback(sd_model):
- for c in __callback_map['callbacks_model_loaded']:
+ for c in _callback_map['callbacks_model_loaded']:
try:
c.callback(sd_model)
except Exception:
@@ -80,7 +80,7 @@ def model_loaded_callback(sd_model):
def ui_tabs_callback():
res = []
- for c in __callback_map['callbacks_ui_tabs']:
+ for c in _callback_map['callbacks_ui_tabs']:
try:
res += c.callback() or []
except Exception:
@@ -90,7 +90,7 @@ def ui_tabs_callback():
def ui_settings_callback():
- for c in __callback_map['callbacks_ui_settings']:
+ for c in _callback_map['callbacks_ui_settings']:
try:
c.callback()
except Exception:
@@ -98,7 +98,7 @@ def ui_settings_callback():
def before_image_saved_callback(params: ImageSaveParams):
- for c in __callback_map['callbacks_before_image_saved']:
+ for c in _callback_map['callbacks_before_image_saved']:
try:
c.callback(params)
except Exception:
@@ -106,7 +106,7 @@ def before_image_saved_callback(params: ImageSaveParams):
def image_saved_callback(params: ImageSaveParams):
- for c in __callback_map['callbacks_image_saved']:
+ for c in _callback_map['callbacks_image_saved']:
try:
c.callback(params)
except Exception:
@@ -114,7 +114,7 @@ def image_saved_callback(params: ImageSaveParams):
def cfg_denoiser_callback(params: CFGDenoiserParams):
- for c in __callback_map['callbacks_cfg_denoiser']:
+ for c in _callback_map['callbacks_cfg_denoiser']:
try:
c.callback(params)
except Exception:
@@ -133,13 +133,13 @@ def remove_current_script_callbacks():
filename = stack[0].filename if len(stack) > 0 else 'unknown file'
if filename == 'unknown file':
return
- for callback_list in __callback_map.values():
+ for callback_list in _callback_map.values():
for callback_to_remove in [cb for cb in callback_list if cb.script == filename]:
callback_list.remove(callback_to_remove)
def remove_callbacks_for_function(callback_func):
- for callback_list in __callback_map.values():
+ for callback_list in _callback_map.values():
for callback_to_remove in [cb for cb in callback_list if cb.callback == callback_func]:
callback_list.remove(callback_to_remove)
@@ -147,13 +147,13 @@ def remove_callbacks_for_function(callback_func):
def on_app_started(callback):
"""register a function to be called when the webui started, the gradio `Block` component and
fastapi `FastAPI` object are passed as the arguments"""
- add_callback(__callback_map['callbacks_app_started'], callback)
+ add_callback(_callback_map['callbacks_app_started'], callback)
def on_model_loaded(callback):
"""register a function to be called when the stable diffusion model is created; the model is
passed as an argument"""
- add_callback(__callback_map['callbacks_model_loaded'], callback)
+ add_callback(_callback_map['callbacks_model_loaded'], callback)
def on_ui_tabs(callback):
@@ -166,13 +166,13 @@ def on_ui_tabs(callback):
title is tab text displayed to user in the UI
elem_id is HTML id for the tab
"""
- add_callback(__callback_map['callbacks_ui_tabs'], callback)
+ add_callback(_callback_map['callbacks_ui_tabs'], callback)
def on_ui_settings(callback):
"""register a function to be called before UI settings are populated; add your settings
by using shared.opts.add_option(shared.OptionInfo(...)) """
- add_callback(__callback_map['callbacks_ui_settings'], callback)
+ add_callback(_callback_map['callbacks_ui_settings'], callback)
def on_before_image_saved(callback):
@@ -180,7 +180,7 @@ def on_before_image_saved(callback):
The callback is called with one argument:
- params: ImageSaveParams - parameters the image is to be saved with. You can change fields in this object.
"""
- add_callback(__callback_map['callbacks_before_image_saved'], callback)
+ add_callback(_callback_map['callbacks_before_image_saved'], callback)
def on_image_saved(callback):
@@ -188,7 +188,7 @@ def on_image_saved(callback):
The callback is called with one argument:
- params: ImageSaveParams - parameters the image was saved with. Changing fields in this object does nothing.
"""
- add_callback(__callback_map['callbacks_image_saved'], callback)
+ add_callback(_callback_map['callbacks_image_saved'], callback)
def on_cfg_denoiser(callback):
@@ -196,5 +196,4 @@ def on_cfg_denoiser(callback):
The callback is called with one argument:
- params: CFGDenoiserParams - parameters to be passed to the inner model and sampling state details.
"""
- add_callback(__callback_map['callbacks_cfg_denoiser'], callback)
-
+ add_callback(_callback_map['callbacks_cfg_denoiser'], callback)
From f316280ad3634a2343b086a6de0bfcd473e18599 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Fri, 4 Nov 2022 16:48:40 +0300
Subject: [PATCH 126/147] fix the error that prevents from setting some options
---
modules/shared.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/modules/shared.py b/modules/shared.py
index a9e28b9c..962115f6 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -406,7 +406,8 @@ class Options:
if key in self.data or key in self.data_labels:
assert not cmd_opts.freeze_settings, "changing settings is disabled"
- comp_args = opts.data_labels[key].component_args
+ info = opts.data_labels.get(key, None)
+ comp_args = info.component_args if info else None
if isinstance(comp_args, dict) and comp_args.get('visible', True) is False:
raise RuntimeError(f"not possible to set {key} because it is restricted")
From 116bcf730ade8d3ac5d76d04c5887b6bba000970 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Fri, 4 Nov 2022 16:48:46 +0300
Subject: [PATCH 127/147] disable setting options via API until it is fixed by
the author
---
modules/api/api.py | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/modules/api/api.py b/modules/api/api.py
index a49f3755..8a7ab2f5 100644
--- a/modules/api/api.py
+++ b/modules/api/api.py
@@ -218,6 +218,10 @@ class Api:
return options
def set_config(self, req: OptionsModel):
+ # currently req has all options fields even if you send a dict like { "send_seed": false }, which means it will
+ # overwrite all options with default values.
+ raise RuntimeError('Setting options via API is not supported')
+
reqDict = vars(req)
for o in reqDict:
setattr(shared.opts, o, reqDict[o])
From 08feb4c364e8b2aed929fd7d22dfa21a93d78b2c Mon Sep 17 00:00:00 2001
From: Isaac Poulton
Date: Fri, 4 Nov 2022 20:53:11 +0700
Subject: [PATCH 128/147] Sort straight out of the glob
---
modules/hypernetworks/hypernetwork.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py
index f1f04a70..a441ab10 100644
--- a/modules/hypernetworks/hypernetwork.py
+++ b/modules/hypernetworks/hypernetwork.py
@@ -219,12 +219,12 @@ class Hypernetwork:
def list_hypernetworks(path):
res = {}
- for filename in glob.iglob(os.path.join(path, '**/*.pt'), recursive=True):
+ for filename in sorted(glob.iglob(os.path.join(path, '**/*.pt'), recursive=True)):
name = os.path.splitext(os.path.basename(filename))[0]
# Prevent a hypothetical "None.pt" from being listed.
if name != "None":
res[name] = filename
- return dict(sorted(res.items()))
+ return res
def load_hypernetwork(filename):
From 4a73d433b19741cacebac82987f49a15bea9913e Mon Sep 17 00:00:00 2001
From: benlisquare <116663807+benlisquare@users.noreply.github.com>
Date: Sat, 5 Nov 2022 02:48:36 +1100
Subject: [PATCH 129/147] General fixes to Traditional Chinese (zh_TW)
localisation JSON
---
localizations/zh_TW.json | 54 ++++++++++++++++++++++++----------------
1 file changed, 33 insertions(+), 21 deletions(-)
diff --git a/localizations/zh_TW.json b/localizations/zh_TW.json
index 4e6dac44..b818936b 100644
--- a/localizations/zh_TW.json
+++ b/localizations/zh_TW.json
@@ -7,7 +7,7 @@
"Loading...": "載入中…",
"view": "檢視",
"api": "api",
- "•": "•",
+ "•": " • ",
"built with gradio": "基於 Gradio 構建",
"Stable Diffusion checkpoint": "Stable Diffusion 模型權重存檔點",
"txt2img": "文生圖",
@@ -70,12 +70,12 @@
"Variation strength": "差異強度",
"Resize seed from width": "自寬度縮放隨機種子",
"Resize seed from height": "自高度縮放隨機種子",
- "Open for Clip Aesthetic!": "打開美術風格 Clip!",
+ "Open for Clip Aesthetic!": "打開以調整 Clip 的美術風格!",
"▼": "▼",
"Aesthetic weight": "美術風格權重",
"Aesthetic steps": "美術風格疊代步數",
"Aesthetic learning rate": "美術風格學習率",
- "Slerp interpolation": "Slerp 插值",
+ "Slerp interpolation": "球麵線性插值角度",
"Aesthetic imgs embedding": "美術風格圖集 embedding",
"None": "無",
"Aesthetic text for imgs": "該圖集的美術風格描述",
@@ -105,15 +105,15 @@
"Prompt order": "提示詞順序",
"Sampler": "採樣器",
"Checkpoint name": "模型權重存檔點的名稱",
- "Hypernetwork": "超網路",
- "Hypernet str.": "超網路強度",
+ "Hypernetwork": "超網路(Hypernetwork)",
+ "Hypernet str.": "超網路(Hypernetwork)強度",
"Sigma Churn": "Sigma Churn",
"Sigma min": "最小 Sigma",
"Sigma max": "最大 Sigma",
"Sigma noise": "Sigma noise",
"Eta": "Eta",
"Clip skip": "Clip 跳過",
- "Denoising": "去噪",
+ "Denoising": "重繪幅度",
"Cond. Image Mask Weight": "圖像調節屏蔽度",
"X values": "X軸數值",
"Y type": "Y軸類型",
@@ -189,6 +189,7 @@
"Tile overlap": "圖塊重疊的畫素",
"Upscaler": "放大演算法",
"Lanczos": "Lanczos",
+ "Nearest": "最鄰近(整數縮放)",
"LDSR": "LDSR",
"BSRGAN 4x": "BSRGAN 4x",
"ESRGAN_4x": "ESRGAN_4x",
@@ -230,15 +231,15 @@
"for detailed explanation.": "以了解詳細說明",
"Create embedding": "生成 embedding",
"Create aesthetic images embedding": "生成美術風格圖集 embedding",
- "Create hypernetwork": "生成 hypernetwork",
+ "Create hypernetwork": "生成超網路(Hypernetwork)",
"Preprocess images": "圖像預處理",
"Name": "名稱",
"Initialization text": "初始化文字",
"Number of vectors per token": "每個 token 的向量數",
"Overwrite Old Embedding": "覆寫舊的 Embedding",
"Modules": "模組",
- "Enter hypernetwork layer structure": "輸入 hypernetwork 層結構",
- "Select activation function of hypernetwork": "選擇 hypernetwork 的激活函數",
+ "Enter hypernetwork layer structure": "輸入超網路(Hypernetwork)層結構",
+ "Select activation function of hypernetwork": "選擇超網路(Hypernetwork)的激活函數",
"linear": "linear",
"relu": "relu",
"leakyrelu": "leakyrelu",
@@ -276,7 +277,7 @@
"XavierNormal": "Xavier 正態",
"Add layer normalization": "加入層標準化",
"Use dropout": "採用 dropout 防止過擬合",
- "Overwrite Old Hypernetwork": "覆寫舊的 Hypernetwork",
+ "Overwrite Old Hypernetwork": "覆寫舊的超網路(Hypernetwork)",
"Source directory": "來源目錄",
"Destination directory": "目標目錄",
"Existing Caption txt Action": "對已有的TXT說明文字的行為",
@@ -298,11 +299,11 @@
"Create debug image": "生成除錯圖片",
"Preprocess": "預處理",
"Train an embedding; must specify a directory with a set of 1:1 ratio images": "訓練 embedding; 必須指定一組具有 1:1 比例圖像的目錄",
- "Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images": "訓練 embedding 或者 hypernetwork; 必須指定一組具有 1:1 比例圖像的目錄",
- "[wiki]": "[wiki]",
+ "Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images": "訓練 embedding 或者超網路(Hypernetwork); 必須指定一組具有 1:1 比例圖像的目錄",
+ "[wiki]": "[wiki文件]",
"Embedding": "Embedding",
"Embedding Learning rate": "Embedding 學習率",
- "Hypernetwork Learning rate": "Hypernetwork 學習率",
+ "Hypernetwork Learning rate": "超網路(Hypernetwork)學習率",
"Learning rate": "學習率",
"Dataset directory": "資料集目錄",
"Log directory": "日誌目錄",
@@ -312,7 +313,7 @@
"Save a copy of embedding to log directory every N steps, 0 to disable": "每 N 步將 embedding 的副本儲存到日誌目錄,0 表示禁用",
"Save images with embedding in PNG chunks": "儲存圖像,並在 PNG 圖片檔案中嵌入 embedding 檔案",
"Read parameters (prompt, etc...) from txt2img tab when making previews": "進行預覽時,從文生圖頁籤中讀取參數(提示詞等)",
- "Train Hypernetwork": "訓練 Hypernetwork",
+ "Train Hypernetwork": "訓練超網路(Hypernetwork)",
"Train Embedding": "訓練 Embedding",
"Create an aesthetic embedding out of any number of images": "從任意數量的圖像中建立美術風格 embedding",
"Create images embedding": "生成圖集 embedding",
@@ -418,7 +419,7 @@
"Checkpoints to cache in RAM": "快取在內存(RAM)中的模型權重存檔點",
"SD VAE": "模型的VAE",
"auto": "自動",
- "Hypernetwork strength": "Hypernetwork 強度",
+ "Hypernetwork strength": "超網路(Hypernetwork)強度",
"Inpainting conditioning mask strength": "局部重繪時圖像調節的蒙版屏蔽強度",
"Apply color correction to img2img results to match original colors.": "對圖生圖結果套用顏色校正以匹配原始顏色",
"With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising).": "在進行圖生圖的時候,確切地執行滑塊指定的疊代步數(正常情況下更弱的重繪幅度需要更少的疊代步數)",
@@ -488,7 +489,17 @@
"Extension": "擴充",
"URL": "網址",
"Update": "更新",
+ "a1111-sd-webui-tagcomplete": "標記自動補全",
"unknown": "未知",
+ "deforum-for-automatic1111-webui": "Deforum",
+ "sd-dynamic-prompting": "動態提示詞",
+ "stable-diffusion-webui-aesthetic-gradients": "美術風格梯度",
+ "stable-diffusion-webui-aesthetic-image-scorer": "美術風格評等",
+ "stable-diffusion-webui-artists-to-study": "藝術家圖庫",
+ "stable-diffusion-webui-dataset-tag-editor": "資料集標記編輯器",
+ "stable-diffusion-webui-images-browser": "圖庫瀏覽器",
+ "stable-diffusion-webui-inspiration": "靈感",
+ "stable-diffusion-webui-wildcards": "萬用字元",
"Load from:": "載入自",
"Extension index URL": "擴充清單連結",
"URL for extension's git repository": "擴充的 git 倉庫連結",
@@ -527,8 +538,8 @@
"What to put inside the masked area before processing it with Stable Diffusion.": "在使用 Stable Diffusion 處理蒙版區域之前要在蒙版區域內放置什麼",
"fill it with colors of the image": "用圖像的顏色(高強度模糊)填充它",
"keep whatever was there originally": "保留原來的圖像,不進行預處理",
- "fill it with latent space noise": "用潛空間的噪聲填充它",
- "fill it with latent space zeroes": "用潛空間的零填充它",
+ "fill it with latent space noise": "於潛空間填充噪聲",
+ "fill it with latent space zeroes": "於潛空間填零",
"Upscale masked region to target resolution, do inpainting, downscale back and paste into original image": "將蒙版區域(包括預留畫素長度的緩衝區域)放大到目標解析度,進行局部重繪。\n然後縮小並粘貼回原始圖像中",
"Resize image to target resolution. Unless height and width match, you will get incorrect aspect ratio.": "將圖像大小調整為目標解析度。除非高度和寬度匹配,否則你將獲得不正確的縱橫比",
"Resize the image so that entirety of target resolution is filled with the image. Crop parts that stick out.": "調整圖像大小,使整個目標解析度都被圖像填充。裁剪多出來的部分",
@@ -560,6 +571,8 @@
"Select which Real-ESRGAN models to show in the web UI. (Requires restart)": "選擇哪些 Real-ESRGAN 模型顯示在網頁使用者介面。(需要重新啟動)",
"Allowed categories for random artists selection when using the Roll button": "使用抽選藝術家按鈕時將會隨機的藝術家類別",
"Append commas": "附加逗號",
+ "latest": "最新",
+ "behind": "落後",
"Roll three": "抽三位出來",
"Generate forever": "無限生成",
"Cancel generate forever": "停止無限生成",
@@ -581,10 +594,9 @@
"Start drawing": "開始繪製",
"Description": "描述",
"Action": "行動",
- "Aesthetic Gradients": "美術風格",
- "aesthetic-gradients": "美術風格",
- "stable-diffusion-webui-wildcards": "萬用字元",
- "Dynamic Prompts": "動態提示",
+ "Aesthetic Gradients": "美術風格梯度",
+ "aesthetic-gradients": "美術風格梯度",
+ "Dynamic Prompts": "動態提示詞",
"images-browser": "圖庫瀏覽器",
"Inspiration": "靈感",
"Deforum": "Deforum",
From 5844ef8a9a165e0f456a4658bda830282cf5a55e Mon Sep 17 00:00:00 2001
From: DepFA <35278260+dfaker@users.noreply.github.com>
Date: Fri, 4 Nov 2022 16:02:25 +0000
Subject: [PATCH 130/147] remove private underscore indicator
---
modules/script_callbacks.py | 36 ++++++++++++++++++------------------
1 file changed, 18 insertions(+), 18 deletions(-)
diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py
index 83da7ca4..74dfb880 100644
--- a/modules/script_callbacks.py
+++ b/modules/script_callbacks.py
@@ -46,7 +46,7 @@ class CFGDenoiserParams:
ScriptCallback = namedtuple("ScriptCallback", ["script", "callback"])
-_callback_map = dict(
+callback_map = dict(
callbacks_app_started=[],
callbacks_model_loaded=[],
callbacks_ui_tabs=[],
@@ -58,11 +58,11 @@ _callback_map = dict(
def clear_callbacks():
- for callback_list in _callback_map.values():
+ for callback_list in callback_map.values():
callback_list.clear()
def app_started_callback(demo: Optional[Blocks], app: FastAPI):
- for c in _callback_map['callbacks_app_started']:
+ for c in callback_map['callbacks_app_started']:
try:
c.callback(demo, app)
except Exception:
@@ -70,7 +70,7 @@ def app_started_callback(demo: Optional[Blocks], app: FastAPI):
def model_loaded_callback(sd_model):
- for c in _callback_map['callbacks_model_loaded']:
+ for c in callback_map['callbacks_model_loaded']:
try:
c.callback(sd_model)
except Exception:
@@ -80,7 +80,7 @@ def model_loaded_callback(sd_model):
def ui_tabs_callback():
res = []
- for c in _callback_map['callbacks_ui_tabs']:
+ for c in callback_map['callbacks_ui_tabs']:
try:
res += c.callback() or []
except Exception:
@@ -90,7 +90,7 @@ def ui_tabs_callback():
def ui_settings_callback():
- for c in _callback_map['callbacks_ui_settings']:
+ for c in callback_map['callbacks_ui_settings']:
try:
c.callback()
except Exception:
@@ -98,7 +98,7 @@ def ui_settings_callback():
def before_image_saved_callback(params: ImageSaveParams):
- for c in _callback_map['callbacks_before_image_saved']:
+ for c in callback_map['callbacks_before_image_saved']:
try:
c.callback(params)
except Exception:
@@ -106,7 +106,7 @@ def before_image_saved_callback(params: ImageSaveParams):
def image_saved_callback(params: ImageSaveParams):
- for c in _callback_map['callbacks_image_saved']:
+ for c in callback_map['callbacks_image_saved']:
try:
c.callback(params)
except Exception:
@@ -114,7 +114,7 @@ def image_saved_callback(params: ImageSaveParams):
def cfg_denoiser_callback(params: CFGDenoiserParams):
- for c in _callback_map['callbacks_cfg_denoiser']:
+ for c in callback_map['callbacks_cfg_denoiser']:
try:
c.callback(params)
except Exception:
@@ -133,13 +133,13 @@ def remove_current_script_callbacks():
filename = stack[0].filename if len(stack) > 0 else 'unknown file'
if filename == 'unknown file':
return
- for callback_list in _callback_map.values():
+ for callback_list in callback_map.values():
for callback_to_remove in [cb for cb in callback_list if cb.script == filename]:
callback_list.remove(callback_to_remove)
def remove_callbacks_for_function(callback_func):
- for callback_list in _callback_map.values():
+ for callback_list in callback_map.values():
for callback_to_remove in [cb for cb in callback_list if cb.callback == callback_func]:
callback_list.remove(callback_to_remove)
@@ -147,13 +147,13 @@ def remove_callbacks_for_function(callback_func):
def on_app_started(callback):
"""register a function to be called when the webui started, the gradio `Block` component and
fastapi `FastAPI` object are passed as the arguments"""
- add_callback(_callback_map['callbacks_app_started'], callback)
+ add_callback(callback_map['callbacks_app_started'], callback)
def on_model_loaded(callback):
"""register a function to be called when the stable diffusion model is created; the model is
passed as an argument"""
- add_callback(_callback_map['callbacks_model_loaded'], callback)
+ add_callback(callback_map['callbacks_model_loaded'], callback)
def on_ui_tabs(callback):
@@ -166,13 +166,13 @@ def on_ui_tabs(callback):
title is tab text displayed to user in the UI
elem_id is HTML id for the tab
"""
- add_callback(_callback_map['callbacks_ui_tabs'], callback)
+ add_callback(callback_map['callbacks_ui_tabs'], callback)
def on_ui_settings(callback):
"""register a function to be called before UI settings are populated; add your settings
by using shared.opts.add_option(shared.OptionInfo(...)) """
- add_callback(_callback_map['callbacks_ui_settings'], callback)
+ add_callback(callback_map['callbacks_ui_settings'], callback)
def on_before_image_saved(callback):
@@ -180,7 +180,7 @@ def on_before_image_saved(callback):
The callback is called with one argument:
- params: ImageSaveParams - parameters the image is to be saved with. You can change fields in this object.
"""
- add_callback(_callback_map['callbacks_before_image_saved'], callback)
+ add_callback(callback_map['callbacks_before_image_saved'], callback)
def on_image_saved(callback):
@@ -188,7 +188,7 @@ def on_image_saved(callback):
The callback is called with one argument:
- params: ImageSaveParams - parameters the image was saved with. Changing fields in this object does nothing.
"""
- add_callback(_callback_map['callbacks_image_saved'], callback)
+ add_callback(callback_map['callbacks_image_saved'], callback)
def on_cfg_denoiser(callback):
@@ -196,4 +196,4 @@ def on_cfg_denoiser(callback):
The callback is called with one argument:
- params: CFGDenoiserParams - parameters to be passed to the inner model and sampling state details.
"""
- add_callback(_callback_map['callbacks_cfg_denoiser'], callback)
+ add_callback(callback_map['callbacks_cfg_denoiser'], callback)
From d469186fb051aa8ae0c868d1b1e3e11b0c846312 Mon Sep 17 00:00:00 2001
From: benlisquare <116663807+benlisquare@users.noreply.github.com>
Date: Sat, 5 Nov 2022 03:42:50 +1100
Subject: [PATCH 131/147] Minor fix to Traditional Chinese (zh_TW) JSON
---
localizations/zh_TW.json | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/localizations/zh_TW.json b/localizations/zh_TW.json
index b818936b..04bde864 100644
--- a/localizations/zh_TW.json
+++ b/localizations/zh_TW.json
@@ -75,7 +75,7 @@
"Aesthetic weight": "美術風格權重",
"Aesthetic steps": "美術風格疊代步數",
"Aesthetic learning rate": "美術風格學習率",
- "Slerp interpolation": "球麵線性插值角度",
+ "Slerp interpolation": "球面線性插值角度",
"Aesthetic imgs embedding": "美術風格圖集 embedding",
"None": "無",
"Aesthetic text for imgs": "該圖集的美術風格描述",
From 0d7e01d9950e013784c4b77c05aa7583ea69edc8 Mon Sep 17 00:00:00 2001
From: innovaciones
Date: Fri, 4 Nov 2022 12:14:32 -0600
Subject: [PATCH 132/147] Open extensions links in new tab
Fixed for "Available" tab
---
modules/ui_extensions.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py
index a81de9a7..8e0d41d5 100644
--- a/modules/ui_extensions.py
+++ b/modules/ui_extensions.py
@@ -188,7 +188,7 @@ def refresh_available_extensions_from_data():
code += f"""
- {html.escape(name)}
+ {html.escape(name)}
{html.escape(description)}
{install_code}
From b8435e632f7ba0da12a2c8e9c788dda519279d24 Mon Sep 17 00:00:00 2001
From: evshiron
Date: Sat, 5 Nov 2022 02:36:47 +0800
Subject: [PATCH 133/147] add --cors-allow-origins cmd opt
---
modules/shared.py | 7 ++++---
webui.py | 9 +++++++++
2 files changed, 13 insertions(+), 3 deletions(-)
diff --git a/modules/shared.py b/modules/shared.py
index a9e28b9c..e83cbcdf 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -86,6 +86,7 @@ parser.add_argument("--nowebui", action='store_true', help="use api=True to laun
parser.add_argument("--ui-debug-mode", action='store_true', help="Don't load model to quickly launch UI")
parser.add_argument("--device-id", type=str, help="Select the default CUDA device to use (export CUDA_VISIBLE_DEVICES=0,1,etc might be needed before)", default=None)
parser.add_argument("--administrator", action='store_true', help="Administrator rights", default=False)
+parser.add_argument("--cors-allow-origins", type=str, help="Allowed CORS origins", default=None)
cmd_opts = parser.parse_args()
restricted_opts = {
@@ -147,9 +148,9 @@ class State:
self.interrupted = True
def nextjob(self):
- if opts.show_progress_every_n_steps == -1:
+ if opts.show_progress_every_n_steps == -1:
self.do_set_current_image()
-
+
self.job_no += 1
self.sampling_step = 0
self.current_image_sampling_step = 0
@@ -198,7 +199,7 @@ class State:
return
if self.current_latent is None:
return
-
+
if opts.show_progress_grid:
self.current_image = sd_samplers.samples_to_image_grid(self.current_latent)
else:
diff --git a/webui.py b/webui.py
index 81df09dd..3788af0b 100644
--- a/webui.py
+++ b/webui.py
@@ -5,6 +5,7 @@ import importlib
import signal
import threading
from fastapi import FastAPI
+from fastapi.middleware.cors import CORSMiddleware
from fastapi.middleware.gzip import GZipMiddleware
from modules.paths import script_path
@@ -93,6 +94,11 @@ def initialize():
signal.signal(signal.SIGINT, sigint_handler)
+def setup_cors(app):
+ if cmd_opts.cors_allow_origins:
+ app.add_middleware(CORSMiddleware, allow_origins=cmd_opts.cors_allow_origins.split(','), allow_methods=['*'])
+
+
def create_api(app):
from modules.api.api import Api
api = Api(app, queue_lock)
@@ -114,6 +120,7 @@ def api_only():
initialize()
app = FastAPI()
+ setup_cors(app)
app.add_middleware(GZipMiddleware, minimum_size=1000)
api = create_api(app)
@@ -147,6 +154,8 @@ def webui():
# runnnig its code. We disable this here. Suggested by RyotaK.
app.user_middleware = [x for x in app.user_middleware if x.cls.__name__ != 'CORSMiddleware']
+ setup_cors(app)
+
app.add_middleware(GZipMiddleware, minimum_size=1000)
if launch_api:
From 467d8b967b5d1b1984ab113bec3fff217736e7ac Mon Sep 17 00:00:00 2001
From: AngelBottomless <35677394+aria1th@users.noreply.github.com>
Date: Sat, 5 Nov 2022 04:24:42 +0900
Subject: [PATCH 134/147] Fix errors from commit f2b697 with
--hide-ui-dir-config
https://github.com/AUTOMATIC1111/stable-diffusion-webui/commit/f2b69709eaff88fc3a2bd49585556ec0883bf5ea
---
modules/ui.py | 14 ++++++++------
1 file changed, 8 insertions(+), 6 deletions(-)
diff --git a/modules/ui.py b/modules/ui.py
index 4c2829af..76ca9b07 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -1446,17 +1446,19 @@ def create_ui(wrap_gradio_gpu_call):
continue
oldval = opts.data.get(key, None)
-
- setattr(opts, key, value)
-
+ try:
+ setattr(opts, key, value)
+ except RuntimeError:
+ continue
if oldval != value:
if opts.data_labels[key].onchange is not None:
opts.data_labels[key].onchange()
changed += 1
-
- opts.save(shared.config_filename)
-
+ try:
+ opts.save(shared.config_filename)
+ except RuntimeError:
+ return opts.dumpjson(), f'{changed} settings changed without save.'
return opts.dumpjson(), f'{changed} settings changed.'
def run_settings_single(value, key):
From 30b1bcc64e67ad50c5d3af3a6fe1bd1e9553f34e Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Fri, 4 Nov 2022 22:56:18 +0300
Subject: [PATCH 135/147] fix upscale loop erroneously applied multiple times
---
modules/upscaler.py | 12 ++++++++++--
1 file changed, 10 insertions(+), 2 deletions(-)
diff --git a/modules/upscaler.py b/modules/upscaler.py
index 83fde7ca..c4e6e6bd 100644
--- a/modules/upscaler.py
+++ b/modules/upscaler.py
@@ -57,10 +57,18 @@ class Upscaler:
self.scale = scale
dest_w = img.width * scale
dest_h = img.height * scale
+
for i in range(3):
- if img.width > dest_w and img.height > dest_h:
- break
+ shape = (img.width, img.height)
+
img = self.do_upscale(img, selected_model)
+
+ if shape == (img.width, img.height):
+ break
+
+ if img.width >= dest_w and img.height >= dest_h:
+ break
+
if img.width != dest_w or img.height != dest_h:
img = img.resize((int(dest_w), int(dest_h)), resample=LANCZOS)
From c0f7dbda3361daaa3e315e747fe5bebb75ea55d0 Mon Sep 17 00:00:00 2001
From: hentailord85ez <112723046+hentailord85ez@users.noreply.github.com>
Date: Fri, 4 Nov 2022 23:01:58 +0000
Subject: [PATCH 136/147] Update k-diffusion to release 0.0.10
---
launch.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/launch.py b/launch.py
index 2a51f20e..5fa11560 100644
--- a/launch.py
+++ b/launch.py
@@ -142,7 +142,7 @@ def prepare_enviroment():
stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "69ae4b35e0a0f6ee1af8bb9a5d0016ccb27e36dc")
taming_transformers_commit_hash = os.environ.get('TAMING_TRANSFORMERS_COMMIT_HASH', "24268930bf1dce879235a7fddd0b2355b84d7ea6")
- k_diffusion_commit_hash = os.environ.get('K_DIFFUSION_COMMIT_HASH', "f4e99857772fc3a126ba886aadf795a332774878")
+ k_diffusion_commit_hash = os.environ.get('K_DIFFUSION_COMMIT_HASH', "60e5042ca0da89c14d1dd59d73883280f8fce991")
codeformer_commit_hash = os.environ.get('CODEFORMER_COMMIT_HASH', "c5b4593074ba6214284d6acd5f1719b6c5d739af")
blip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', "48211a1594f1321b00f14c9f7a5b4813144b2fb9")
From 6008c0773ea575353f9b87da8a58454e20cc7857 Mon Sep 17 00:00:00 2001
From: hentailord85ez <112723046+hentailord85ez@users.noreply.github.com>
Date: Fri, 4 Nov 2022 23:03:05 +0000
Subject: [PATCH 137/147] Add support for new DPM-Solver++ samplers
---
modules/sd_samplers.py | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py
index c7c414ef..7ece6556 100644
--- a/modules/sd_samplers.py
+++ b/modules/sd_samplers.py
@@ -29,6 +29,10 @@ samplers_k_diffusion = [
('LMS Karras', 'sample_lms', ['k_lms_ka'], {'scheduler': 'karras'}),
('DPM2 Karras', 'sample_dpm_2', ['k_dpm_2_ka'], {'scheduler': 'karras'}),
('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras'}),
+ ('DPM-Solver++(2S) a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {}),
+ ('DPM-Solver++(2M)', 'sample_dpmpp_2m', ['k_dpmpp_2m'], {}),
+ ('DPM-Solver++(2S) Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras'}),
+ ('DPM-Solver++(2M) Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}),
]
samplers_data_k_diffusion = [
From f92dc505a013af9e385c7edbdf97539be62503d6 Mon Sep 17 00:00:00 2001
From: hentailord85ez <112723046+hentailord85ez@users.noreply.github.com>
Date: Fri, 4 Nov 2022 23:12:48 +0000
Subject: [PATCH 138/147] Fix name
---
modules/sd_samplers.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py
index 7ece6556..b28a2e4c 100644
--- a/modules/sd_samplers.py
+++ b/modules/sd_samplers.py
@@ -31,7 +31,7 @@ samplers_k_diffusion = [
('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras'}),
('DPM-Solver++(2S) a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {}),
('DPM-Solver++(2M)', 'sample_dpmpp_2m', ['k_dpmpp_2m'], {}),
- ('DPM-Solver++(2S) Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras'}),
+ ('DPM-Solver++(2S) a Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras'}),
('DPM-Solver++(2M) Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}),
]
From 1b6c2fc749e12f12bbee4705e65f217d23fa9072 Mon Sep 17 00:00:00 2001
From: hentailord85ez <112723046+hentailord85ez@users.noreply.github.com>
Date: Fri, 4 Nov 2022 23:28:13 +0000
Subject: [PATCH 139/147] Reorder samplers
---
modules/sd_samplers.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py
index b28a2e4c..1e88f7ee 100644
--- a/modules/sd_samplers.py
+++ b/modules/sd_samplers.py
@@ -24,13 +24,13 @@ samplers_k_diffusion = [
('Heun', 'sample_heun', ['k_heun'], {}),
('DPM2', 'sample_dpm_2', ['k_dpm_2'], {}),
('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a'], {}),
+ ('DPM-Solver++(2S) a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {}),
+ ('DPM-Solver++(2M)', 'sample_dpmpp_2m', ['k_dpmpp_2m'], {}),
('DPM fast', 'sample_dpm_fast', ['k_dpm_fast'], {}),
('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad'], {}),
('LMS Karras', 'sample_lms', ['k_lms_ka'], {'scheduler': 'karras'}),
('DPM2 Karras', 'sample_dpm_2', ['k_dpm_2_ka'], {'scheduler': 'karras'}),
('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras'}),
- ('DPM-Solver++(2S) a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {}),
- ('DPM-Solver++(2M)', 'sample_dpmpp_2m', ['k_dpmpp_2m'], {}),
('DPM-Solver++(2S) a Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras'}),
('DPM-Solver++(2M) Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}),
]
From ebce0c57c78a3f22178e3a38938d19ec0dfb703d Mon Sep 17 00:00:00 2001
From: Billy Cao
Date: Sat, 5 Nov 2022 11:38:24 +0800
Subject: [PATCH 140/147] Use typing.Optional instead of | to add support for
Python 3.9 and below.
---
modules/api/models.py | 26 +++++++++++++-------------
1 file changed, 13 insertions(+), 13 deletions(-)
diff --git a/modules/api/models.py b/modules/api/models.py
index 2ae75f43..a44c5ddd 100644
--- a/modules/api/models.py
+++ b/modules/api/models.py
@@ -1,6 +1,6 @@
import inspect
from pydantic import BaseModel, Field, create_model
-from typing import Any, Optional, Union
+from typing import Any, Optional
from typing_extensions import Literal
from inflection import underscore
from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img
@@ -185,22 +185,22 @@ _options = vars(parser)['_option_string_actions']
for key in _options:
if(_options[key].dest != 'help'):
flag = _options[key]
- _type = str
- if(_options[key].default != None): _type = type(_options[key].default)
+ _type = str
+ if _options[key].default is not None: _type = type(_options[key].default)
flags.update({flag.dest: (_type,Field(default=flag.default, description=flag.help))})
FlagsModel = create_model("Flags", **flags)
class SamplerItem(BaseModel):
name: str = Field(title="Name")
- aliases: list[str] = Field(title="Aliases")
+ aliases: list[str] = Field(title="Aliases")
options: dict[str, str] = Field(title="Options")
class UpscalerItem(BaseModel):
name: str = Field(title="Name")
- model_name: str | None = Field(title="Model Name")
- model_path: str | None = Field(title="Path")
- model_url: str | None = Field(title="URL")
+ model_name: Optional[str] = Field(title="Model Name")
+ model_path: Optional[str] = Field(title="Path")
+ model_url: Optional[str] = Field(title="URL")
class SDModelItem(BaseModel):
title: str = Field(title="Title")
@@ -211,21 +211,21 @@ class SDModelItem(BaseModel):
class HypernetworkItem(BaseModel):
name: str = Field(title="Name")
- path: str | None = Field(title="Path")
+ path: Optional[str] = Field(title="Path")
class FaceRestorerItem(BaseModel):
name: str = Field(title="Name")
- cmd_dir: str | None = Field(title="Path")
+ cmd_dir: Optional[str] = Field(title="Path")
class RealesrganItem(BaseModel):
name: str = Field(title="Name")
- path: str | None = Field(title="Path")
- scale: int | None = Field(title="Scale")
+ path: Optional[str] = Field(title="Path")
+ scale: Optional[int] = Field(title="Scale")
class PromptStyleItem(BaseModel):
name: str = Field(title="Name")
- prompt: str | None = Field(title="Prompt")
- negative_prompt: str | None = Field(title="Negative Prompt")
+ prompt: Optional[str] = Field(title="Prompt")
+ negative_prompt: Optional[str] = Field(title="Negative Prompt")
class ArtistItem(BaseModel):
name: str = Field(title="Name")
From e9a5562b9b27a1a4f9c282637b111cefd9727a41 Mon Sep 17 00:00:00 2001
From: papuSpartan
Date: Sat, 5 Nov 2022 04:06:51 -0500
Subject: [PATCH 141/147] add support for tls (gradio tls options)
---
modules/shared.py | 3 +++
webui.py | 22 ++++++++++++++++++++--
2 files changed, 23 insertions(+), 2 deletions(-)
diff --git a/modules/shared.py b/modules/shared.py
index 962115f6..7a20c3af 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -86,6 +86,9 @@ parser.add_argument("--nowebui", action='store_true', help="use api=True to laun
parser.add_argument("--ui-debug-mode", action='store_true', help="Don't load model to quickly launch UI")
parser.add_argument("--device-id", type=str, help="Select the default CUDA device to use (export CUDA_VISIBLE_DEVICES=0,1,etc might be needed before)", default=None)
parser.add_argument("--administrator", action='store_true', help="Administrator rights", default=False)
+parser.add_argument("--tls-keyfile", type=str, help="Partially enables TLS, requires --tls-certfile to fully function", default=None)
+parser.add_argument("--tls-certfile", type=str, help="Partially enables TLS, requires --tls-keyfile to fully function", default=None)
+parser.add_argument("--server-name", type=str, help="Sets hostname of server", default=None)
cmd_opts = parser.parse_args()
restricted_opts = {
diff --git a/webui.py b/webui.py
index 81df09dd..d366f4ca 100644
--- a/webui.py
+++ b/webui.py
@@ -34,7 +34,7 @@ from modules.shared import cmd_opts
import modules.hypernetworks.hypernetwork
queue_lock = threading.Lock()
-
+server_name = "0.0.0.0" if cmd_opts.listen else cmd_opts.server_name
def wrap_queued_call(func):
def f(*args, **kwargs):
@@ -85,6 +85,22 @@ def initialize():
shared.opts.onchange("sd_hypernetwork", wrap_queued_call(lambda: modules.hypernetworks.hypernetwork.load_hypernetwork(shared.opts.sd_hypernetwork)))
shared.opts.onchange("sd_hypernetwork_strength", modules.hypernetworks.hypernetwork.apply_strength)
+ if cmd_opts.tls_keyfile is not None and cmd_opts.tls_keyfile is not None:
+
+ try:
+ if not os.path.exists(cmd_opts.tls_keyfile):
+ print("Invalid path to TLS keyfile given")
+ if not os.path.exists(cmd_opts.tls_certfile):
+ print(f"Invalid path to TLS certfile: '{cmd_opts.tls_certfile}'")
+ except TypeError:
+ cmd_opts.tls_keyfile = cmd_opts.tls_certfile = None
+ print(f"path: '{cmd_opts.tls_keyfile}' {type(cmd_opts.tls_keyfile)}")
+ print(f"path: '{cmd_opts.tls_certfile}' {type(cmd_opts.tls_certfile)}")
+ print("TLS setup invalid, running webui without TLS")
+ else:
+ print("Running with TLS")
+
+
# make the program just exit at ctrl+c without waiting for anything
def sigint_handler(sig, frame):
print(f'Interrupted with signal {sig} in {frame}')
@@ -131,8 +147,10 @@ def webui():
app, local_url, share_url = demo.launch(
share=cmd_opts.share,
- server_name="0.0.0.0" if cmd_opts.listen else None,
+ server_name=server_name,
server_port=cmd_opts.port,
+ ssl_keyfile=cmd_opts.tls_keyfile,
+ ssl_certfile=cmd_opts.tls_certfile,
debug=cmd_opts.gradio_debug,
auth=[tuple(cred.split(':')) for cred in cmd_opts.gradio_auth.strip('"').split(',')] if cmd_opts.gradio_auth else None,
inbrowser=cmd_opts.autolaunch,
From a02bad570ef7718436369bb4e4aa5b8e0f1f5689 Mon Sep 17 00:00:00 2001
From: papuSpartan
Date: Sat, 5 Nov 2022 04:14:21 -0500
Subject: [PATCH 142/147] rm dbg
---
webui.py | 2 --
1 file changed, 2 deletions(-)
diff --git a/webui.py b/webui.py
index d366f4ca..222dbeee 100644
--- a/webui.py
+++ b/webui.py
@@ -94,8 +94,6 @@ def initialize():
print(f"Invalid path to TLS certfile: '{cmd_opts.tls_certfile}'")
except TypeError:
cmd_opts.tls_keyfile = cmd_opts.tls_certfile = None
- print(f"path: '{cmd_opts.tls_keyfile}' {type(cmd_opts.tls_keyfile)}")
- print(f"path: '{cmd_opts.tls_certfile}' {type(cmd_opts.tls_certfile)}")
print("TLS setup invalid, running webui without TLS")
else:
print("Running with TLS")
From 03b08c4a6b0609f24ec789d40100529b92ef0612 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sat, 5 Nov 2022 15:04:48 +0300
Subject: [PATCH 143/147] do not die when an extension's repo has no remote
---
modules/extensions.py | 7 +++++--
1 file changed, 5 insertions(+), 2 deletions(-)
diff --git a/modules/extensions.py b/modules/extensions.py
index 897af96e..8e0977fd 100644
--- a/modules/extensions.py
+++ b/modules/extensions.py
@@ -34,8 +34,11 @@ class Extension:
if repo is None or repo.bare:
self.remote = None
else:
- self.remote = next(repo.remote().urls, None)
- self.status = 'unknown'
+ try:
+ self.remote = next(repo.remote().urls, None)
+ self.status = 'unknown'
+ except Exception:
+ self.remote = None
def list_files(self, subdir, extension):
from modules import scripts
From a170e3d22231e145f42bb878a76ae5f76fdca230 Mon Sep 17 00:00:00 2001
From: Evgeniy
Date: Sat, 5 Nov 2022 17:06:56 +0300
Subject: [PATCH 144/147] Python 3.8 typing compatibility
Solves problems with
```Traceback (most recent call last):
File "webui.py", line 201, in
webui()
File "webui.py", line 178, in webui
create_api(app)
File "webui.py", line 117, in create_api
from modules.api.api import Api
File "H:\AIart\stable-diffusion\stable-diffusion-webui\modules\api\api.py", line 9, in
from modules.api.models import *
File "H:\AIart\stable-diffusion\stable-diffusion-webui\modules\api\models.py", line 194, in
class SamplerItem(BaseModel):
File "H:\AIart\stable-diffusion\stable-diffusion-webui\modules\api\models.py", line 196, in SamplerItem
aliases: list[str] = Field(title="Aliases")
TypeError: 'type' object is not subscriptable```
and
```Traceback (most recent call last):
File "webui.py", line 201, in
webui()
File "webui.py", line 178, in webui
create_api(app)
File "webui.py", line 117, in create_api
from modules.api.api import Api
File "H:\AIart\stable-diffusion\stable-diffusion-webui\modules\api\api.py", line 9, in
from modules.api.models import *
File "H:\AIart\stable-diffusion\stable-diffusion-webui\modules\api\models.py", line 194, in
class SamplerItem(BaseModel):
File "H:\AIart\stable-diffusion\stable-diffusion-webui\modules\api\models.py", line 197, in SamplerItem
options: dict[str, str] = Field(title="Options")
TypeError: 'type' object is not subscriptable```
---
modules/api/models.py | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/modules/api/models.py b/modules/api/models.py
index a44c5ddd..f89da1ff 100644
--- a/modules/api/models.py
+++ b/modules/api/models.py
@@ -5,7 +5,7 @@ from typing_extensions import Literal
from inflection import underscore
from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img
from modules.shared import sd_upscalers, opts, parser
-from typing import List
+from typing import Dict, List
API_NOT_ALLOWED = [
"self",
@@ -193,8 +193,8 @@ FlagsModel = create_model("Flags", **flags)
class SamplerItem(BaseModel):
name: str = Field(title="Name")
- aliases: list[str] = Field(title="Aliases")
- options: dict[str, str] = Field(title="Options")
+ aliases: List[str] = Field(title="Aliases")
+ options: Dict[str, str] = Field(title="Options")
class UpscalerItem(BaseModel):
name: str = Field(title="Name")
@@ -230,4 +230,4 @@ class PromptStyleItem(BaseModel):
class ArtistItem(BaseModel):
name: str = Field(title="Name")
score: float = Field(title="Score")
- category: str = Field(title="Category")
\ No newline at end of file
+ category: str = Field(title="Category")
From 62e3d71aa778928d63cab81d9d8cde33e55bebb3 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sat, 5 Nov 2022 17:09:42 +0300
Subject: [PATCH 145/147] rework the code to not use the walrus operator
because colab's 3.7 does not support it
---
modules/hypernetworks/hypernetwork.py | 7 +++++--
1 file changed, 5 insertions(+), 2 deletions(-)
diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py
index 5ceed6ee..7f182712 100644
--- a/modules/hypernetworks/hypernetwork.py
+++ b/modules/hypernetworks/hypernetwork.py
@@ -429,13 +429,16 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
weights = hypernetwork.weights()
for weight in weights:
weight.requires_grad = True
+
# Here we use optimizer from saved HN, or we can specify as UI option.
- if (optimizer_name := hypernetwork.optimizer_name) in optimizer_dict:
+ if hypernetwork.optimizer_name in optimizer_dict:
optimizer = optimizer_dict[hypernetwork.optimizer_name](params=weights, lr=scheduler.learn_rate)
+ optimizer_name = hypernetwork.optimizer_name
else:
- print(f"Optimizer type {optimizer_name} is not defined!")
+ print(f"Optimizer type {hypernetwork.optimizer_name} is not defined!")
optimizer = torch.optim.AdamW(params=weights, lr=scheduler.learn_rate)
optimizer_name = 'AdamW'
+
if hypernetwork.optimizer_state_dict: # This line must be changed if Optimizer type can be different from saved optimizer.
try:
optimizer.load_state_dict(hypernetwork.optimizer_state_dict)
From 159475e072f2ed3db8235aab9c3fa18640b93b80 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sat, 5 Nov 2022 18:32:22 +0300
Subject: [PATCH 146/147] tweak names a bit for new samplers
---
modules/sd_samplers.py | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py
index 1e88f7ee..783992d2 100644
--- a/modules/sd_samplers.py
+++ b/modules/sd_samplers.py
@@ -24,15 +24,15 @@ samplers_k_diffusion = [
('Heun', 'sample_heun', ['k_heun'], {}),
('DPM2', 'sample_dpm_2', ['k_dpm_2'], {}),
('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a'], {}),
- ('DPM-Solver++(2S) a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {}),
- ('DPM-Solver++(2M)', 'sample_dpmpp_2m', ['k_dpmpp_2m'], {}),
+ ('DPM++ 2S a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {}),
+ ('DPM++ 2M', 'sample_dpmpp_2m', ['k_dpmpp_2m'], {}),
('DPM fast', 'sample_dpm_fast', ['k_dpm_fast'], {}),
('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad'], {}),
('LMS Karras', 'sample_lms', ['k_lms_ka'], {'scheduler': 'karras'}),
('DPM2 Karras', 'sample_dpm_2', ['k_dpm_2_ka'], {'scheduler': 'karras'}),
('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras'}),
- ('DPM-Solver++(2S) a Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras'}),
- ('DPM-Solver++(2M) Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}),
+ ('DPM++ 2S a Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras'}),
+ ('DPM++ 2M Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}),
]
samplers_data_k_diffusion = [
From 29f48b7803dd0890cb5328fa290ab12045706316 Mon Sep 17 00:00:00 2001
From: Dynamic
Date: Sun, 6 Nov 2022 00:37:37 +0900
Subject: [PATCH 147/147] Update ko_KR.json
New setting option and some additional extension index strings
---
localizations/ko_KR.json | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/localizations/ko_KR.json b/localizations/ko_KR.json
index 29e10075..cf302aaf 100644
--- a/localizations/ko_KR.json
+++ b/localizations/ko_KR.json
@@ -16,6 +16,7 @@
"A merger of the two checkpoints will be generated in your": "체크포인트들이 병합된 결과물이 당신의",
"A value that determines the output of random number generator - if you create an image with same parameters and seed as another image, you'll get the same result": "난수 생성기의 결과물을 지정하는 값 - 동일한 설정값과 동일한 시드를 적용 시, 완전히 똑같은 결과물을 얻게 됩니다.",
"Action": "작업",
+ "Add a button to convert the prompts used in NovelAI for use in the WebUI. In addition, add a button that allows you to recall a previously used prompt.": "NovelAI에서 사용되는 프롬프트를 WebUI에서 사용할 수 있게 변환하는 버튼을 추가합니다. 덤으로 이전에 사용한 프롬프트를 불러오는 버튼도 추가됩니다.",
"Add a random artist to the prompt.": "프롬프트에 랜덤한 작가 추가",
"Add a second progress bar to the console that shows progress for an entire job.": "콘솔에 전체 작업의 진행도를 보여주는 2번째 프로그레스 바 추가하기",
"Add difference": "차이점 추가",
@@ -24,6 +25,7 @@
"Add model hash to generation information": "생성 정보에 모델 해시 추가",
"Add model name to generation information": "생성 정보에 모델 이름 추가",
"Add number to filename when saving": "이미지를 저장할 때 파일명에 숫자 추가하기",
+ "Adds a tab that lets you preview how CLIP model would tokenize your text.": "CLIP 모델이 텍스트를 어떻게 토큰화할지 미리 보여주는 탭을 추가합니다.",
"Adds a tab to the webui that allows the user to automatically extract keyframes from video, and manually extract 512x512 crops of those frames for use in model training.": "WebUI에 비디오로부터 자동으로 키프레임을 추출하고, 그 키프레임으로부터 모델 훈련에 사용될 512x512 이미지를 잘라낼 수 있는 탭을 추가합니다.",
"Aesthetic Gradients": "스타일 그라디언트",
"Aesthetic Image Scorer": "스타일 이미지 스코어러",
@@ -33,6 +35,7 @@
"Aesthetic text for imgs": "스타일 텍스트",
"Aesthetic weight": "스타일 가중치",
"Allowed categories for random artists selection when using the Roll button": "랜덤 버튼을 눌러 무작위 작가를 선택할 때 허용된 카테고리",
+ "Allows you to include various shortcodes in your prompts. You can pull text from files, set up your own variables, process text through conditional functions, and so much more - it's like wildcards on steroids.": "프롬프트에 다양한 숏코드를 추가할 수 있게 해줍니다. 파일로부터 텍스트 추출, 변수 설정, 조건 함수로 텍스트 처리 등등 - 스테로이드를 맞은 와일드카드라 할 수 있죠.",
"Always print all generation info to standard output": "기본 아웃풋에 모든 생성 정보 항상 출력하기",
"Always save all generated image grids": "생성된 이미지 그리드 항상 저장하기",
"Always save all generated images": "생성된 이미지 항상 저장하기",
@@ -54,6 +57,7 @@
"Batch Process": "이미지 여러장 처리",
"Batch size": "배치 크기",
"behind": "최신 아님",
+ "Booru tag autocompletion": "Booru 태그 자동완성",
"BSRGAN 4x": "BSRGAN 4x",
"built with gradio": "gradio로 제작되었습니다",
"Calculates aesthetic score for generated images using CLIP+MLP Aesthetic Score Predictor based on Chad Scorer": "Chad 스코어러를 기반으로 한 CLIP+MLP 스타일 점수 예측기를 이용해 생성된 이미지의 스타일 점수를 계산합니다.",
@@ -114,6 +118,7 @@
"Directory for saving images using the Save button": "저장 버튼을 이용해 저장하는 이미지들의 저장 경로",
"Directory name pattern": "디렉토리명 패턴",
"directory.": "저장 경로에 저장됩니다.",
+ "Displays autocompletion hints for tags from image booru boards such as Danbooru. Uses local tag CSV files and includes a config for customization.": "Danbooru 같은 이미지 booru 보드의 태그에 대한 자동완성 힌트를 보여줍니다. 로컬 환경에 저장된 CSV 파일을 사용하고 조정 가능한 설정 파일이 포함되어 있습니다.",
"Do not add watermark to images": "이미지에 워터마크 추가하지 않기",
"Do not do anything special": "아무것도 하지 않기",
"Do not save grids consisting of one picture": "이미지가 1개뿐인 그리드는 저장하지 않기",
@@ -317,6 +322,7 @@
"None": "없음",
"Nothing": "없음",
"Nothing found in the image.": "Nothing found in the image.",
+ "novelai-2-local-prompt": "NovelAI 프롬프트 변환기",
"Number of columns on the page": "각 페이지마다 표시할 가로줄 수",
"Number of grids in each row": "각 세로줄마다 표시될 그리드 수",
"number of images to delete consecutively next": "연속적으로 삭제할 이미지 수",
@@ -431,6 +437,7 @@
"Save images with embedding in PNG chunks": "PNG 청크로 이미지에 임베딩을 포함시켜 저장",
"Save style": "스타일 저장",
"Save text information about generation parameters as chunks to png files": "이미지 생성 설정값을 PNG 청크에 텍스트로 저장",
+ "Saves Optimizer state as separate *.optim file. Training can be resumed with HN itself and matching optim file.": "옵티마이저 상태를 별개의 *.optim 파일로 저장하기. 하이퍼네트워크 파일과 일치하는 optim 파일로부터 훈련을 재개할 수 있습니다.",
"Saving images/grids": "이미지/그리드 저장",
"Saving to a directory": "디렉토리에 저장",
"Scale by": "스케일링 배수 지정",
@@ -515,6 +522,7 @@
"Tile size for ESRGAN upscalers. 0 = no tiling.": "ESRGAN 업스케일러들의 타일 사이즈. 0 = 타일링 없음.",
"Tiling": "타일링",
"Time taken:": "소요 시간 : ",
+ "tokenizer": "토크나이저",
"Torch active/reserved:": "활성화/예약된 Torch 양 : ",
"Torch active: Peak amount of VRAM used by Torch during generation, excluding cached data.\nTorch reserved: Peak amount of VRAM allocated by Torch, including all active and cached data.\nSys VRAM: Peak amount of VRAM allocation across all applications / total GPU VRAM (peak utilization%).": "활성화된 Torch : 생성 도중 캐시된 데이터를 포함해 사용된 VRAM의 최대량\n예약된 Torch : 활성화되고 캐시된 모든 데이터를 포함해 Torch에게 할당된 VRAM의 최대량\n시스템 VRAM : 모든 어플리케이션에 할당된 VRAM 최대량 / 총 GPU VRAM (최고 이용도%)",
"Train": "훈련",