From bdaa36c84470adbdce3e98c01a69af5e95adfb02 Mon Sep 17 00:00:00 2001 From: brkirch Date: Fri, 30 Sep 2022 23:53:25 -0400 Subject: [PATCH 01/33] When device is MPS, use CPU for GFPGAN instead GFPGAN will not work if the device is MPS, so default to CPU instead. --- modules/devices.py | 2 +- modules/gfpgan_model.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/devices.py b/modules/devices.py index 07bb2339..08bb26d6 100644 --- a/modules/devices.py +++ b/modules/devices.py @@ -34,7 +34,7 @@ errors.run(enable_tf32, "Enabling TF32") device = get_optimal_device() -device_codeformer = cpu if has_mps else device +device_gfpgan = device_codeformer = cpu if device.type == 'mps' else device def randn(seed, shape): diff --git a/modules/gfpgan_model.py b/modules/gfpgan_model.py index bb30d733..fcd8544a 100644 --- a/modules/gfpgan_model.py +++ b/modules/gfpgan_model.py @@ -21,7 +21,7 @@ def gfpgann(): global loaded_gfpgan_model global model_path if loaded_gfpgan_model is not None: - loaded_gfpgan_model.gfpgan.to(shared.device) + loaded_gfpgan_model.gfpgan.to(devices.device_gfpgan) return loaded_gfpgan_model if gfpgan_constructor is None: @@ -36,8 +36,8 @@ def gfpgann(): else: print("Unable to load gfpgan model!") return None - model = gfpgan_constructor(model_path=model_file, upscale=1, arch='clean', channel_multiplier=2, bg_upsampler=None) - model.gfpgan.to(shared.device) + model = gfpgan_constructor(model_path=model_file, upscale=1, arch='clean', channel_multiplier=2, bg_upsampler=None, device=devices.device_gfpgan) + model.gfpgan.to(devices.device_gfpgan) loaded_gfpgan_model = model return model From 3cf1a96006daffedb8ecd0ae142eca4c4da06105 Mon Sep 17 00:00:00 2001 From: RnDMonkey Date: Sat, 1 Oct 2022 21:11:03 -0700 Subject: [PATCH 02/33] added safety for blank directory naming patterns --- modules/images.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/images.py b/modules/images.py index f1aed5d6..e7894b4c 100644 --- a/modules/images.py +++ b/modules/images.py @@ -311,7 +311,7 @@ def apply_filename_pattern(x, p, seed, prompt): x = x.replace("[cfg]", str(p.cfg_scale)) x = x.replace("[width]", str(p.width)) x = x.replace("[height]", str(p.height)) - x = x.replace("[styles]", sanitize_filename_part(", ".join([x for x in p.styles if not x == "None"]), replace_spaces=False)) + x = x.replace("[styles]", sanitize_filename_part(", ".join([x for x in p.styles if not x == "None"]) or "No styles", replace_spaces=False)) x = x.replace("[sampler]", sanitize_filename_part(sd_samplers.samplers[p.sampler_index].name, replace_spaces=False)) x = x.replace("[model_hash]", shared.sd_model.sd_model_hash) @@ -374,7 +374,7 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i save_to_dirs = (grid and opts.grid_save_to_dirs) or (not grid and opts.save_to_dirs and not no_prompt) if save_to_dirs: - dirname = apply_filename_pattern(opts.directories_filename_pattern or "[prompt_words]", p, seed, prompt) + dirname = apply_filename_pattern(opts.directories_filename_pattern or "[prompt_words]", p, seed, prompt).strip('\\ ') path = os.path.join(path, dirname) os.makedirs(path, exist_ok=True) From 70f526704721a303ae045f6406439dcceee4302e Mon Sep 17 00:00:00 2001 From: RnDMonkey Date: Sat, 1 Oct 2022 21:18:15 -0700 Subject: [PATCH 03/33] use os.path.normpath for better safety checking --- modules/images.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/images.py b/modules/images.py index e7894b4c..5ef7eb92 100644 --- a/modules/images.py +++ b/modules/images.py @@ -374,8 +374,8 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i save_to_dirs = (grid and opts.grid_save_to_dirs) or (not grid and opts.save_to_dirs and not no_prompt) if save_to_dirs: - dirname = apply_filename_pattern(opts.directories_filename_pattern or "[prompt_words]", p, seed, prompt).strip('\\ ') - path = os.path.join(path, dirname) + dirname = apply_filename_pattern(opts.directories_filename_pattern or "[prompt_words]", p, seed, prompt) + path = os.path.normpath(os.path.join(path, dirname)) os.makedirs(path, exist_ok=True) From 32edf1732f27a1fad5133667c22b948adda1b070 Mon Sep 17 00:00:00 2001 From: RnDMonkey Date: Sat, 1 Oct 2022 21:37:14 -0700 Subject: [PATCH 04/33] os.path.normpath wasn't working, reverting to manual strip --- modules/images.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/images.py b/modules/images.py index 5ef7eb92..4998e92c 100644 --- a/modules/images.py +++ b/modules/images.py @@ -374,8 +374,8 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i save_to_dirs = (grid and opts.grid_save_to_dirs) or (not grid and opts.save_to_dirs and not no_prompt) if save_to_dirs: - dirname = apply_filename_pattern(opts.directories_filename_pattern or "[prompt_words]", p, seed, prompt) - path = os.path.normpath(os.path.join(path, dirname)) + dirname = apply_filename_pattern(opts.directories_filename_pattern or "[prompt_words]", p, seed, prompt).strip('\\ /') + path = os.path.join(path, dirname) os.makedirs(path, exist_ok=True) From 138662734c25dab4e73e632b7eaff9ad9c0ce2b4 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Mon, 3 Oct 2022 07:57:59 +0300 Subject: [PATCH 05/33] use dropdown instead of radio for img2img upscaler selection --- modules/shared.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/shared.py b/modules/shared.py index 7246eadc..2a599e9c 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -183,7 +183,7 @@ options_templates.update(options_section(('upscaling', "Upscaling"), { "SWIN_tile": OptionInfo(192, "Tile size for all SwinIR.", gr.Slider, {"minimum": 16, "maximum": 512, "step": 16}), "SWIN_tile_overlap": OptionInfo(8, "Tile overlap, in pixels for SwinIR. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}), "ldsr_steps": OptionInfo(100, "LDSR processing steps. Lower = faster", gr.Slider, {"minimum": 1, "maximum": 200, "step": 1}), - "upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Radio, lambda: {"choices": [x.name for x in sd_upscalers]}), + "upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Dropdown, lambda: {"choices": [x.name for x in sd_upscalers]}), })) options_templates.update(options_section(('face-restoration', "Face restoration"), { From e615d4f9d101e2712c7c2d0e3e8feb19cb430c74 Mon Sep 17 00:00:00 2001 From: Hanusz Leszek Date: Sun, 2 Oct 2022 21:08:23 +0200 Subject: [PATCH 06/33] Convert folder icon surrogate pair to valid utf8 --- javascript/hints.js | 2 +- modules/ui.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/javascript/hints.js b/javascript/hints.js index 84694eeb..e72e9338 100644 --- a/javascript/hints.js +++ b/javascript/hints.js @@ -15,7 +15,7 @@ titles = { "\u267b\ufe0f": "Reuse seed from last generation, mostly useful if it was randomed", "\u{1f3a8}": "Add a random artist to the prompt.", "\u2199\ufe0f": "Read generation parameters from prompt into user interface.", - "\uD83D\uDCC2": "Open images output directory", + "\u{1f4c2}": "Open images output directory", "Inpaint a part of image": "Draw a mask over an image, and the script will regenerate the masked area with content according to prompt", "SD upscale": "Upscale image normally, split result into tiles, improve each tile using img2img, merge whole image back", diff --git a/modules/ui.py b/modules/ui.py index d9d02ece..16432151 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -69,7 +69,7 @@ random_symbol = '\U0001f3b2\ufe0f' # 🎲️ reuse_symbol = '\u267b\ufe0f' # ♻️ art_symbol = '\U0001f3a8' # 🎨 paste_symbol = '\u2199\ufe0f' # ↙ -folder_symbol = '\uD83D\uDCC2' +folder_symbol = '\U0001f4c2' # 📂 def plaintext_to_html(text): text = "

" + "
\n".join([f"{html.escape(x)}" for x in text.split('\n')]) + "

" From 34c638142eaa57f89b86545ba3c72085036398bb Mon Sep 17 00:00:00 2001 From: hentailord85ez <112723046+hentailord85ez@users.noreply.github.com> Date: Fri, 30 Sep 2022 22:38:14 +0100 Subject: [PATCH 07/33] Fixed when eta = 0 Unexpected behavior when using eta = 0 in something like XY, but your default eta was set to something not 0. --- modules/sd_samplers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index 9316875a..dbf570d2 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -127,7 +127,7 @@ class VanillaStableDiffusionSampler: return res def initialize(self, p): - self.eta = p.eta or opts.eta_ddim + self.eta = p.eta if p.eta is not None else opts.eta_ddim for fieldname in ['p_sample_ddim', 'p_sample_plms']: if hasattr(self.sampler, fieldname): From 36ea4ac0f5844e5c8dec124edbdb714ccdd6013c Mon Sep 17 00:00:00 2001 From: RnDMonkey Date: Sun, 2 Oct 2022 22:21:16 -0700 Subject: [PATCH 08/33] moved no-style return outside join function --- modules/images.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/images.py b/modules/images.py index bba55158..1a046aca 100644 --- a/modules/images.py +++ b/modules/images.py @@ -315,7 +315,7 @@ def apply_filename_pattern(x, p, seed, prompt): #currently disabled if using the save button, will work otherwise # if enabled it will cause a bug because styles is not included in the save_files data dictionary if hasattr(p, "styles"): - x = x.replace("[styles]", sanitize_filename_part(", ".join([x for x in p.styles if not x == "None"] or "None"), replace_spaces=False)) + x = x.replace("[styles]", sanitize_filename_part(", ".join([x for x in p.styles if not x == "None"]) or "None", replace_spaces=False)) x = x.replace("[sampler]", sanitize_filename_part(sd_samplers.samplers[p.sampler_index].name, replace_spaces=False)) From 6491b09c24ea77f1f69990ea80a216f9ce319589 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Mon, 3 Oct 2022 08:53:52 +0300 Subject: [PATCH 09/33] use existing function for gfpgan --- modules/gfpgan_model.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/modules/gfpgan_model.py b/modules/gfpgan_model.py index bb30d733..dd3fbcab 100644 --- a/modules/gfpgan_model.py +++ b/modules/gfpgan_model.py @@ -97,11 +97,7 @@ def setup_model(dirname): return "GFPGAN" def restore(self, np_image): - np_image_bgr = np_image[:, :, ::-1] - cropped_faces, restored_faces, gfpgan_output_bgr = gfpgann().enhance(np_image_bgr, has_aligned=False, only_center_face=False, paste_back=True) - np_image = gfpgan_output_bgr[:, :, ::-1] - - return np_image + return gfpgan_fix_faces(np_image) shared.face_restorers.append(FaceRestorerGFPGAN()) except Exception: From 43a74fa595003321200a40bd2431e56c245e75ed Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Mon, 3 Oct 2022 11:48:19 +0300 Subject: [PATCH 10/33] batch processing for img2img with an empty output directory, by request --- modules/img2img.py | 7 +++++-- modules/ui.py | 2 +- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/modules/img2img.py b/modules/img2img.py index f4455c90..2ff8e261 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -23,8 +23,10 @@ def process_batch(p, input_dir, output_dir, args): print(f"Will process {len(images)} images, creating {p.n_iter * p.batch_size} new images for each.") + save_normally = output_dir == '' + p.do_not_save_grid = True - p.do_not_save_samples = True + p.do_not_save_samples = not save_normally state.job_count = len(images) * p.n_iter @@ -48,7 +50,8 @@ def process_batch(p, input_dir, output_dir, args): left, right = os.path.splitext(filename) filename = f"{left}-{n}{right}" - processed_image.save(os.path.join(output_dir, filename)) + if not save_normally: + processed_image.save(os.path.join(output_dir, filename)) def img2img(mode: int, prompt: str, negative_prompt: str, prompt_style: str, prompt_style2: str, init_img, init_img_with_mask, init_img_inpaint, init_mask_inpaint, mask_mode, steps: int, sampler_index: int, mask_blur: int, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, *args): diff --git a/modules/ui.py b/modules/ui.py index 16432151..55f7aa95 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -658,7 +658,7 @@ def create_ui(wrap_gradio_gpu_call): with gr.TabItem('Batch img2img', id='batch'): hidden = '
Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else '' - gr.HTML(f"

Process images in a directory on the same machine where the server is running.{hidden}

") + gr.HTML(f"

Process images in a directory on the same machine where the server is running.
Use an empty output directory to save pictures normally instead of writing to the output directory.{hidden}

") img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs) img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs) From 2865ef4b9ab16d56326cc805541bebcf01d099bc Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Mon, 3 Oct 2022 13:10:03 +0300 Subject: [PATCH 11/33] fix broken date in TI --- modules/textual_inversion/textual_inversion.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 8686f534..cd9f3498 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -164,7 +164,7 @@ def train_embedding(embedding_name, learn_rate, data_root, log_directory, steps, filename = os.path.join(shared.cmd_opts.embeddings_dir, f'{embedding_name}.pt') - log_directory = os.path.join(log_directory, datetime.datetime.now().strftime("%Y-%d-%m"), embedding_name) + log_directory = os.path.join(log_directory, datetime.datetime.now().strftime("%Y-%m-%d"), embedding_name) if save_embedding_every > 0: embedding_dir = os.path.join(log_directory, "embeddings") From 5ef0baf5eaec7f21a1666af424405cbee19f3764 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 4 Oct 2022 08:52:11 +0300 Subject: [PATCH 12/33] add support for gelbooru tags in filenames for textual inversion --- modules/textual_inversion/dataset.py | 7 +++++-- modules/textual_inversion/preprocess.py | 4 +++- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/modules/textual_inversion/dataset.py b/modules/textual_inversion/dataset.py index e8394ff6..7c44ea5b 100644 --- a/modules/textual_inversion/dataset.py +++ b/modules/textual_inversion/dataset.py @@ -9,6 +9,9 @@ from torchvision import transforms import random import tqdm from modules import devices +import re + +re_tag = re.compile(r"[a-zA-Z][_\w\d()]+") class PersonalizedBase(Dataset): @@ -38,8 +41,8 @@ class PersonalizedBase(Dataset): image = image.resize((self.width, self.height), PIL.Image.BICUBIC) filename = os.path.basename(path) - filename_tokens = os.path.splitext(filename)[0].replace('_', '-').replace(' ', '-').split('-') - filename_tokens = [token for token in filename_tokens if token.isalpha()] + filename_tokens = os.path.splitext(filename)[0] + filename_tokens = re_tag.findall(filename_tokens) npimage = np.array(image).astype(np.uint8) npimage = (npimage / 127.5 - 1.0).astype(np.float32) diff --git a/modules/textual_inversion/preprocess.py b/modules/textual_inversion/preprocess.py index 209e928f..f545a993 100644 --- a/modules/textual_inversion/preprocess.py +++ b/modules/textual_inversion/preprocess.py @@ -26,7 +26,9 @@ def preprocess(process_src, process_dst, process_flip, process_split, process_ca if process_caption: caption = "-" + shared.interrogator.generate_caption(image) else: - caption = "" + caption = filename + caption = os.path.splitext(caption)[0] + caption = os.path.basename(caption) image.save(os.path.join(dst, f"{index:05}-{subindex[0]}{caption}.png")) subindex[0] += 1 From 1c5604791da7e57f40880698666b6617a1754c65 Mon Sep 17 00:00:00 2001 From: DoTheSneedful Date: Mon, 3 Oct 2022 22:20:09 -0400 Subject: [PATCH 13/33] Add a prompt order option to XY plot script --- scripts/xy_grid.py | 40 ++++++++++++++++++++++++++++++++++++++-- 1 file changed, 38 insertions(+), 2 deletions(-) diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py index 146663b0..044c30e6 100644 --- a/scripts/xy_grid.py +++ b/scripts/xy_grid.py @@ -1,5 +1,6 @@ from collections import namedtuple from copy import copy +from itertools import permutations import random from PIL import Image @@ -28,6 +29,27 @@ def apply_prompt(p, x, xs): p.prompt = p.prompt.replace(xs[0], x) p.negative_prompt = p.negative_prompt.replace(xs[0], x) +def apply_order(p, x, xs): + token_order = [] + + # Initally grab the tokens from the prompt so they can be later be replaced in order of earliest seen in the prompt + for token in x: + token_order.append((p.prompt.find(token), token)) + + token_order.sort(key=lambda t: t[0]) + + search_from_pos = 0 + for idx, token in enumerate(x): + original_pos, old_token = token_order[idx] + + # Get position of the token again as it will likely change as tokens are being replaced + pos = p.prompt.find(old_token) + if original_pos >= 0: + # Avoid trying to replace what was just replaced by searching later in the prompt string + p.prompt = p.prompt[0:search_from_pos] + p.prompt[search_from_pos:].replace(old_token, token, 1) + + search_from_pos = pos + len(token) + samplers_dict = {} for i, sampler in enumerate(modules.sd_samplers.samplers): @@ -60,7 +82,8 @@ def format_value_add_label(p, opt, x): def format_value(p, opt, x): if type(x) == float: x = round(x, 8) - + if type(x) == type(list()): + x = str(x) return x def do_nothing(p, x, xs): @@ -89,6 +112,7 @@ axis_options = [ AxisOption("Sigma max", float, apply_field("s_tmax"), format_value_add_label), AxisOption("Sigma noise", float, apply_field("s_noise"), format_value_add_label), AxisOption("Eta", float, apply_field("eta"), format_value_add_label), + AxisOption("Prompt order", type(list()), apply_order, format_value), AxisOptionImg2Img("Denoising", float, apply_field("denoising_strength"), format_value_add_label), # as it is now all AxisOptionImg2Img items must go after AxisOption ones ] @@ -159,7 +183,11 @@ class Script(scripts.Script): if opt.label == 'Nothing': return [0] - valslist = [x.strip() for x in vals.split(",")] + if opt.type == type(list()): + valslist = [x for x in vals] + else: + valslist = [x.strip() for x in vals.split(",")] + if opt.type == int: valslist_ext = [] @@ -212,9 +240,17 @@ class Script(scripts.Script): return valslist x_opt = axis_options[x_type] + + if x_opt.label == "Prompt order": + x_values = list(permutations([x.strip() for x in x_values.split(",")])) + xs = process_axis(x_opt, x_values) y_opt = axis_options[y_type] + + if y_opt.label == "Prompt order": + y_values = list(permutations([y.strip() for y in y_values.split(",")])) + ys = process_axis(y_opt, y_values) def fix_axis_seeds(axis_opt, axis_list): From 1a6d40db35656083d5bf9d3a3430b45fda4e85eb Mon Sep 17 00:00:00 2001 From: DoTheSneedful Date: Tue, 4 Oct 2022 00:18:15 -0400 Subject: [PATCH 14/33] Fix token ordering in prompt order XY plot --- scripts/xy_grid.py | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py index 044c30e6..5bcd3921 100644 --- a/scripts/xy_grid.py +++ b/scripts/xy_grid.py @@ -32,24 +32,21 @@ def apply_prompt(p, x, xs): def apply_order(p, x, xs): token_order = [] - # Initally grab the tokens from the prompt so they can be later be replaced in order of earliest seen in the prompt + # Initally grab the tokens from the prompt so they can be be replaced in order of earliest seen for token in x: token_order.append((p.prompt.find(token), token)) token_order.sort(key=lambda t: t[0]) search_from_pos = 0 - for idx, token in enumerate(x): - original_pos, old_token = token_order[idx] - + for idx, (original_pos, old_token) in enumerate(token_order): # Get position of the token again as it will likely change as tokens are being replaced - pos = p.prompt.find(old_token) + pos = search_from_pos + p.prompt[search_from_pos:].find(old_token) if original_pos >= 0: # Avoid trying to replace what was just replaced by searching later in the prompt string - p.prompt = p.prompt[0:search_from_pos] + p.prompt[search_from_pos:].replace(old_token, token, 1) - - search_from_pos = pos + len(token) + p.prompt = p.prompt[0:search_from_pos] + p.prompt[search_from_pos:].replace(old_token, x[idx], 1) + search_from_pos = pos + len(x[idx]) samplers_dict = {} for i, sampler in enumerate(modules.sd_samplers.samplers): From 56371153b545e3a43c3a5f206264019af361f3af Mon Sep 17 00:00:00 2001 From: DoTheSneedful Date: Tue, 4 Oct 2022 01:07:36 -0400 Subject: [PATCH 15/33] XY plot prompt order simplify logic --- scripts/xy_grid.py | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py index 5bcd3921..7def47f5 100644 --- a/scripts/xy_grid.py +++ b/scripts/xy_grid.py @@ -38,15 +38,21 @@ def apply_order(p, x, xs): token_order.sort(key=lambda t: t[0]) - search_from_pos = 0 - for idx, (original_pos, old_token) in enumerate(token_order): - # Get position of the token again as it will likely change as tokens are being replaced - pos = search_from_pos + p.prompt[search_from_pos:].find(old_token) - if original_pos >= 0: - # Avoid trying to replace what was just replaced by searching later in the prompt string - p.prompt = p.prompt[0:search_from_pos] + p.prompt[search_from_pos:].replace(old_token, x[idx], 1) + prompt_parts = [] - search_from_pos = pos + len(x[idx]) + # Split the prompt up, taking out the tokens + for _, token in token_order: + n = p.prompt.find(token) + prompt_parts.append(p.prompt[0:n]) + p.prompt = p.prompt[n + len(token):] + + # Rebuild the prompt with the tokens in the order we want + prompt_tmp = "" + for idx, part in enumerate(prompt_parts): + prompt_tmp += part + prompt_tmp += x[idx] + p.prompt = prompt_tmp + p.prompt + samplers_dict = {} for i, sampler in enumerate(modules.sd_samplers.samplers): From 556c36b9607e3f4eacdddc85f8e7a78b29476ea7 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 4 Oct 2022 09:18:00 +0300 Subject: [PATCH 16/33] add hint, refactor code for #1607 --- javascript/hints.js | 1 + scripts/xy_grid.py | 35 ++++++++++++++++++----------------- 2 files changed, 19 insertions(+), 17 deletions(-) diff --git a/javascript/hints.js b/javascript/hints.js index e72e9338..8adcd983 100644 --- a/javascript/hints.js +++ b/javascript/hints.js @@ -47,6 +47,7 @@ titles = { "Custom code": "Run Python code. Advanced user only. Must run program with --allow-code for this to work", "Prompt S/R": "Separate a list of words with commas, and the first word will be used as a keyword: script will search for this word in the prompt, and replace it with others", + "Prompt order": "Separate a list of words with commas, and the script will make a variation of prompt with those words for their every possible order", "Tiling": "Produce an image that can be tiled.", "Tile overlap": "For SD upscale, how much overlap in pixels should there be between tiles. Tiles overlap so that when they are merged back into one picture, there is no clearly visible seam.", diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py index 7def47f5..1237e754 100644 --- a/scripts/xy_grid.py +++ b/scripts/xy_grid.py @@ -29,10 +29,11 @@ def apply_prompt(p, x, xs): p.prompt = p.prompt.replace(xs[0], x) p.negative_prompt = p.negative_prompt.replace(xs[0], x) + def apply_order(p, x, xs): token_order = [] - # Initally grab the tokens from the prompt so they can be be replaced in order of earliest seen + # Initally grab the tokens from the prompt, so they can be replaced in order of earliest seen for token in x: token_order.append((p.prompt.find(token), token)) @@ -85,17 +86,26 @@ def format_value_add_label(p, opt, x): def format_value(p, opt, x): if type(x) == float: x = round(x, 8) - if type(x) == type(list()): - x = str(x) return x + +def format_value_join_list(p, opt, x): + return ", ".join(x) + + def do_nothing(p, x, xs): pass + def format_nothing(p, opt, x): return "" +def str_permutations(x): + """dummy function for specifying it in AxisOption's type when you want to get a list of permutations""" + return x + + AxisOption = namedtuple("AxisOption", ["label", "type", "apply", "format_value"]) AxisOptionImg2Img = namedtuple("AxisOptionImg2Img", ["label", "type", "apply", "format_value"]) @@ -108,6 +118,7 @@ axis_options = [ AxisOption("Steps", int, apply_field("steps"), format_value_add_label), AxisOption("CFG Scale", float, apply_field("cfg_scale"), format_value_add_label), AxisOption("Prompt S/R", str, apply_prompt, format_value), + AxisOption("Prompt order", str_permutations, apply_order, format_value_join_list), AxisOption("Sampler", str, apply_sampler, format_value), AxisOption("Checkpoint name", str, apply_checkpoint, format_value), AxisOption("Sigma Churn", float, apply_field("s_churn"), format_value_add_label), @@ -115,7 +126,6 @@ axis_options = [ AxisOption("Sigma max", float, apply_field("s_tmax"), format_value_add_label), AxisOption("Sigma noise", float, apply_field("s_noise"), format_value_add_label), AxisOption("Eta", float, apply_field("eta"), format_value_add_label), - AxisOption("Prompt order", type(list()), apply_order, format_value), AxisOptionImg2Img("Denoising", float, apply_field("denoising_strength"), format_value_add_label), # as it is now all AxisOptionImg2Img items must go after AxisOption ones ] @@ -158,6 +168,7 @@ re_range_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d re_range_count = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\[(\d+)\s*\])?\s*") re_range_count_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d*)?)(?:\s*\[(\d+(?:.\d*)?)\s*\])?\s*") + class Script(scripts.Script): def title(self): return "X/Y plot" @@ -186,11 +197,7 @@ class Script(scripts.Script): if opt.label == 'Nothing': return [0] - if opt.type == type(list()): - valslist = [x for x in vals] - else: - valslist = [x.strip() for x in vals.split(",")] - + valslist = [x.strip() for x in vals.split(",")] if opt.type == int: valslist_ext = [] @@ -237,23 +244,17 @@ class Script(scripts.Script): valslist_ext.append(val) valslist = valslist_ext + elif opt.type == str_permutations: + valslist = list(permutations(valslist)) valslist = [opt.type(x) for x in valslist] return valslist x_opt = axis_options[x_type] - - if x_opt.label == "Prompt order": - x_values = list(permutations([x.strip() for x in x_values.split(",")])) - xs = process_axis(x_opt, x_values) y_opt = axis_options[y_type] - - if y_opt.label == "Prompt order": - y_values = list(permutations([y.strip() for y in y_values.split(",")])) - ys = process_axis(y_opt, y_values) def fix_axis_seeds(axis_opt, axis_list): From eeab7aedf532680a6ae9058ee272450bb07e41eb Mon Sep 17 00:00:00 2001 From: brkirch Date: Tue, 4 Oct 2022 04:24:35 -0400 Subject: [PATCH 17/33] Add --use-cpu command line option Remove MPS detection to use CPU for GFPGAN / CodeFormer and add a --use-cpu command line option. --- modules/devices.py | 5 ++--- modules/esrgan_model.py | 9 ++++----- modules/scunet_model.py | 8 ++++---- modules/shared.py | 9 +++++++-- 4 files changed, 17 insertions(+), 14 deletions(-) diff --git a/modules/devices.py b/modules/devices.py index 5d9c7a07..b5a0cd29 100644 --- a/modules/devices.py +++ b/modules/devices.py @@ -1,8 +1,8 @@ import torch -# has_mps is only available in nightly pytorch (for now), `getattr` for compatibility from modules import errors +# has_mps is only available in nightly pytorch (for now), `getattr` for compatibility has_mps = getattr(torch, 'has_mps', False) cpu = torch.device("cpu") @@ -32,8 +32,7 @@ def enable_tf32(): errors.run(enable_tf32, "Enabling TF32") -device = get_optimal_device() -device_gfpgan = device_codeformer = cpu if device.type == 'mps' else device +device = device_gfpgan = device_esrgan = device_scunet = device_codeformer = get_optimal_device() dtype = torch.float16 def randn(seed, shape): diff --git a/modules/esrgan_model.py b/modules/esrgan_model.py index 4aed9283..d17e730f 100644 --- a/modules/esrgan_model.py +++ b/modules/esrgan_model.py @@ -6,8 +6,7 @@ from PIL import Image from basicsr.utils.download_util import load_file_from_url import modules.esrgam_model_arch as arch -from modules import shared, modelloader, images -from modules.devices import has_mps +from modules import shared, modelloader, images, devices from modules.paths import models_path from modules.upscaler import Upscaler, UpscalerData from modules.shared import opts @@ -97,7 +96,7 @@ class UpscalerESRGAN(Upscaler): model = self.load_model(selected_model) if model is None: return img - model.to(shared.device) + model.to(devices.device_esrgan) img = esrgan_upscale(model, img) return img @@ -112,7 +111,7 @@ class UpscalerESRGAN(Upscaler): print("Unable to load %s from %s" % (self.model_path, filename)) return None - pretrained_net = torch.load(filename, map_location='cpu' if has_mps else None) + pretrained_net = torch.load(filename, map_location='cpu' if shared.device.type == 'mps' else None) crt_model = arch.RRDBNet(3, 3, 64, 23, gc=32) pretrained_net = fix_model_layers(crt_model, pretrained_net) @@ -127,7 +126,7 @@ def upscale_without_tiling(model, img): img = img[:, :, ::-1] img = np.moveaxis(img, 2, 0) / 255 img = torch.from_numpy(img).float() - img = img.unsqueeze(0).to(shared.device) + img = img.unsqueeze(0).to(devices.device_esrgan) with torch.no_grad(): output = model(img) output = output.squeeze().float().cpu().clamp_(0, 1).numpy() diff --git a/modules/scunet_model.py b/modules/scunet_model.py index 7987ac14..fb64b740 100644 --- a/modules/scunet_model.py +++ b/modules/scunet_model.py @@ -8,7 +8,7 @@ import torch from basicsr.utils.download_util import load_file_from_url import modules.upscaler -from modules import shared, modelloader +from modules import devices, modelloader from modules.paths import models_path from modules.scunet_model_arch import SCUNet as net @@ -51,12 +51,12 @@ class UpscalerScuNET(modules.upscaler.Upscaler): if model is None: return img - device = shared.device + device = devices.device_scunet img = np.array(img) img = img[:, :, ::-1] img = np.moveaxis(img, 2, 0) / 255 img = torch.from_numpy(img).float() - img = img.unsqueeze(0).to(shared.device) + img = img.unsqueeze(0).to(device) img = img.to(device) with torch.no_grad(): @@ -69,7 +69,7 @@ class UpscalerScuNET(modules.upscaler.Upscaler): return PIL.Image.fromarray(output, 'RGB') def load_model(self, path: str): - device = shared.device + device = devices.device_scunet if "http" in path: filename = load_file_from_url(url=self.model_url, model_dir=self.model_path, file_name="%s.pth" % self.name, progress=True) diff --git a/modules/shared.py b/modules/shared.py index 2a599e9c..7899ab8d 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -12,7 +12,7 @@ import modules.interrogate import modules.memmon import modules.sd_models import modules.styles -from modules.devices import get_optimal_device +import modules.devices as devices from modules.paths import script_path, sd_path sd_model_file = os.path.join(script_path, 'model.ckpt') @@ -46,6 +46,7 @@ parser.add_argument("--ldsr-models-path", type=str, help="Path to directory with parser.add_argument("--opt-split-attention", action='store_true', help="force-enables cross-attention layer optimization. By default, it's on for torch.cuda and off for other torch devices.") parser.add_argument("--disable-opt-split-attention", action='store_true', help="force-disables cross-attention layer optimization") parser.add_argument("--opt-split-attention-v1", action='store_true', help="enable older version of split attention optimization that does not consume all the VRAM it can find") +parser.add_argument("--use-cpu", nargs='+',choices=['SD', 'GFPGAN', 'ESRGAN', 'SCUNet', 'CodeFormer'], help="use CPU for specified modules", default=[]) parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests") parser.add_argument("--port", type=int, help="launch gradio with given server port, you need root/admin rights for ports < 1024, defaults to 7860 if available", default=None) parser.add_argument("--show-negative-prompt", action='store_true', help="does not do anything", default=False) @@ -63,7 +64,11 @@ parser.add_argument("--enable-console-prompts", action='store_true', help="print cmd_opts = parser.parse_args() -device = get_optimal_device() + +devices.device, devices.device_gfpgan, devices.device_esrgan, devices.device_scunet, devices.device_codeformer = \ +(devices.cpu if x in cmd_opts.use_cpu else devices.get_optimal_device() for x in ['SD', 'GFPGAN', 'ESRGAN', 'SCUNet', 'CodeFormer']) + +device = devices.device batch_cond_uncond = cmd_opts.always_batch_cond_uncond or not (cmd_opts.lowvram or cmd_opts.medvram) parallel_processing_allowed = not cmd_opts.lowvram and not cmd_opts.medvram From 27ddc24fdee1fbe709054a43235ab7f9c51b3e9f Mon Sep 17 00:00:00 2001 From: brkirch Date: Tue, 4 Oct 2022 05:18:17 -0400 Subject: [PATCH 18/33] Add BSRGAN to --add-cpu --- modules/bsrgan_model.py | 6 +++--- modules/devices.py | 2 +- modules/shared.py | 6 +++--- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/modules/bsrgan_model.py b/modules/bsrgan_model.py index e62c6657..3bd80791 100644 --- a/modules/bsrgan_model.py +++ b/modules/bsrgan_model.py @@ -8,7 +8,7 @@ import torch from basicsr.utils.download_util import load_file_from_url import modules.upscaler -from modules import shared, modelloader +from modules import devices, modelloader from modules.bsrgan_model_arch import RRDBNet from modules.paths import models_path @@ -44,13 +44,13 @@ class UpscalerBSRGAN(modules.upscaler.Upscaler): model = self.load_model(selected_file) if model is None: return img - model.to(shared.device) + model.to(devices.device_bsrgan) torch.cuda.empty_cache() img = np.array(img) img = img[:, :, ::-1] img = np.moveaxis(img, 2, 0) / 255 img = torch.from_numpy(img).float() - img = img.unsqueeze(0).to(shared.device) + img = img.unsqueeze(0).to(devices.device_bsrgan) with torch.no_grad(): output = model(img) output = output.squeeze().float().cpu().clamp_(0, 1).numpy() diff --git a/modules/devices.py b/modules/devices.py index b5a0cd29..b7899632 100644 --- a/modules/devices.py +++ b/modules/devices.py @@ -32,7 +32,7 @@ def enable_tf32(): errors.run(enable_tf32, "Enabling TF32") -device = device_gfpgan = device_esrgan = device_scunet = device_codeformer = get_optimal_device() +device = device_gfpgan = device_bsrgan = device_esrgan = device_scunet = device_codeformer = get_optimal_device() dtype = torch.float16 def randn(seed, shape): diff --git a/modules/shared.py b/modules/shared.py index 7899ab8d..95b98a06 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -46,7 +46,7 @@ parser.add_argument("--ldsr-models-path", type=str, help="Path to directory with parser.add_argument("--opt-split-attention", action='store_true', help="force-enables cross-attention layer optimization. By default, it's on for torch.cuda and off for other torch devices.") parser.add_argument("--disable-opt-split-attention", action='store_true', help="force-disables cross-attention layer optimization") parser.add_argument("--opt-split-attention-v1", action='store_true', help="enable older version of split attention optimization that does not consume all the VRAM it can find") -parser.add_argument("--use-cpu", nargs='+',choices=['SD', 'GFPGAN', 'ESRGAN', 'SCUNet', 'CodeFormer'], help="use CPU for specified modules", default=[]) +parser.add_argument("--use-cpu", nargs='+',choices=['SD', 'GFPGAN', 'BSRGAN', 'ESRGAN', 'SCUNet', 'CodeFormer'], help="use CPU for specified modules", default=[]) parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests") parser.add_argument("--port", type=int, help="launch gradio with given server port, you need root/admin rights for ports < 1024, defaults to 7860 if available", default=None) parser.add_argument("--show-negative-prompt", action='store_true', help="does not do anything", default=False) @@ -65,8 +65,8 @@ parser.add_argument("--enable-console-prompts", action='store_true', help="print cmd_opts = parser.parse_args() -devices.device, devices.device_gfpgan, devices.device_esrgan, devices.device_scunet, devices.device_codeformer = \ -(devices.cpu if x in cmd_opts.use_cpu else devices.get_optimal_device() for x in ['SD', 'GFPGAN', 'ESRGAN', 'SCUNet', 'CodeFormer']) +devices.device, devices.device_gfpgan, devices.device_bsrgan, devices.device_esrgan, devices.device_scunet, devices.device_codeformer = \ +(devices.cpu if x in cmd_opts.use_cpu else devices.get_optimal_device() for x in ['SD', 'GFPGAN', 'BSRGAN', 'ESRGAN', 'SCUNet', 'CodeFormer']) device = devices.device From dc9c5a97742e3a34d37da7108642d8adc0dc5858 Mon Sep 17 00:00:00 2001 From: brkirch Date: Tue, 4 Oct 2022 05:22:50 -0400 Subject: [PATCH 19/33] Modify --add-cpu description --- modules/shared.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/shared.py b/modules/shared.py index 95b98a06..25aff5b0 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -46,7 +46,7 @@ parser.add_argument("--ldsr-models-path", type=str, help="Path to directory with parser.add_argument("--opt-split-attention", action='store_true', help="force-enables cross-attention layer optimization. By default, it's on for torch.cuda and off for other torch devices.") parser.add_argument("--disable-opt-split-attention", action='store_true', help="force-disables cross-attention layer optimization") parser.add_argument("--opt-split-attention-v1", action='store_true', help="enable older version of split attention optimization that does not consume all the VRAM it can find") -parser.add_argument("--use-cpu", nargs='+',choices=['SD', 'GFPGAN', 'BSRGAN', 'ESRGAN', 'SCUNet', 'CodeFormer'], help="use CPU for specified modules", default=[]) +parser.add_argument("--use-cpu", nargs='+',choices=['SD', 'GFPGAN', 'BSRGAN', 'ESRGAN', 'SCUNet', 'CodeFormer'], help="use CPU as torch device for specified modules", default=[]) parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests") parser.add_argument("--port", type=int, help="launch gradio with given server port, you need root/admin rights for ports < 1024, defaults to 7860 if available", default=None) parser.add_argument("--show-negative-prompt", action='store_true', help="does not do anything", default=False) From 6c6ae28bf5fd1e8bc3e8f64a3430b6f29f338f77 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 4 Oct 2022 12:32:22 +0300 Subject: [PATCH 20/33] send all three of GFPGAN's and codeformer's models to CPU memory instead of just one for #1283 --- modules/codeformer_model.py | 12 ++++++++++-- modules/devices.py | 10 ++++++++++ modules/gfpgan_model.py | 14 ++++++++++++-- modules/processing.py | 16 +++++++++------- 4 files changed, 41 insertions(+), 11 deletions(-) diff --git a/modules/codeformer_model.py b/modules/codeformer_model.py index a29f3855..e6d9fa4f 100644 --- a/modules/codeformer_model.py +++ b/modules/codeformer_model.py @@ -69,10 +69,14 @@ def setup_model(dirname): self.net = net self.face_helper = face_helper - self.net.to(devices.device_codeformer) return net, face_helper + def send_model_to(self, device): + self.net.to(device) + self.face_helper.face_det.to(device) + self.face_helper.face_parse.to(device) + def restore(self, np_image, w=None): np_image = np_image[:, :, ::-1] @@ -82,6 +86,8 @@ def setup_model(dirname): if self.net is None or self.face_helper is None: return np_image + self.send_model_to(devices.device_codeformer) + self.face_helper.clean_all() self.face_helper.read_image(np_image) self.face_helper.get_face_landmarks_5(only_center_face=False, resize=640, eye_dist_threshold=5) @@ -113,8 +119,10 @@ def setup_model(dirname): if original_resolution != restored_img.shape[0:2]: restored_img = cv2.resize(restored_img, (0, 0), fx=original_resolution[1]/restored_img.shape[1], fy=original_resolution[0]/restored_img.shape[0], interpolation=cv2.INTER_LINEAR) + self.face_helper.clean_all() + if shared.opts.face_restoration_unload: - self.net.to(devices.cpu) + self.send_model_to(devices.cpu) return restored_img diff --git a/modules/devices.py b/modules/devices.py index ff82f2f6..12aab665 100644 --- a/modules/devices.py +++ b/modules/devices.py @@ -1,3 +1,5 @@ +import contextlib + import torch # has_mps is only available in nightly pytorch (for now), `getattr` for compatibility @@ -57,3 +59,11 @@ def randn_without_seed(shape): return torch.randn(shape, device=device) + +def autocast(): + from modules import shared + + if dtype == torch.float32 or shared.cmd_opts.precision == "full": + return contextlib.nullcontext() + + return torch.autocast("cuda") diff --git a/modules/gfpgan_model.py b/modules/gfpgan_model.py index dd3fbcab..5586b554 100644 --- a/modules/gfpgan_model.py +++ b/modules/gfpgan_model.py @@ -37,22 +37,32 @@ def gfpgann(): print("Unable to load gfpgan model!") return None model = gfpgan_constructor(model_path=model_file, upscale=1, arch='clean', channel_multiplier=2, bg_upsampler=None) - model.gfpgan.to(shared.device) loaded_gfpgan_model = model return model +def send_model_to(model, device): + model.gfpgan.to(device) + model.face_helper.face_det.to(device) + model.face_helper.face_parse.to(device) + + def gfpgan_fix_faces(np_image): model = gfpgann() if model is None: return np_image + + send_model_to(model, devices.device) + np_image_bgr = np_image[:, :, ::-1] cropped_faces, restored_faces, gfpgan_output_bgr = model.enhance(np_image_bgr, has_aligned=False, only_center_face=False, paste_back=True) np_image = gfpgan_output_bgr[:, :, ::-1] + model.face_helper.clean_all() + if shared.opts.face_restoration_unload: - model.gfpgan.to(devices.cpu) + send_model_to(model, devices.cpu) return np_image diff --git a/modules/processing.py b/modules/processing.py index 0a4b6198..9cbecdd8 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -1,4 +1,3 @@ -import contextlib import json import math import os @@ -330,9 +329,8 @@ def process_images(p: StableDiffusionProcessing) -> Processed: infotexts = [] output_images = [] - precision_scope = torch.autocast if cmd_opts.precision == "autocast" else contextlib.nullcontext - ema_scope = (contextlib.nullcontext if cmd_opts.lowvram else p.sd_model.ema_scope) - with torch.no_grad(), precision_scope("cuda"), ema_scope(): + + with torch.no_grad(): p.init(all_prompts, all_seeds, all_subseeds) if state.job_count == -1: @@ -351,8 +349,9 @@ def process_images(p: StableDiffusionProcessing) -> Processed: #uc = p.sd_model.get_learned_conditioning(len(prompts) * [p.negative_prompt]) #c = p.sd_model.get_learned_conditioning(prompts) - uc = prompt_parser.get_learned_conditioning(len(prompts) * [p.negative_prompt], p.steps) - c = prompt_parser.get_learned_conditioning(prompts, p.steps) + with devices.autocast(): + uc = prompt_parser.get_learned_conditioning(len(prompts) * [p.negative_prompt], p.steps) + c = prompt_parser.get_learned_conditioning(prompts, p.steps) if len(model_hijack.comments) > 0: for comment in model_hijack.comments: @@ -361,7 +360,9 @@ def process_images(p: StableDiffusionProcessing) -> Processed: if p.n_iter > 1: shared.state.job = f"Batch {n+1} out of {p.n_iter}" - samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength) + with devices.autocast(): + samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength).to(devices.dtype) + if state.interrupted: # if we are interruped, sample returns just noise @@ -386,6 +387,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed: devices.torch_gc() x_sample = modules.face_restoration.restore_faces(x_sample) + devices.torch_gc() image = Image.fromarray(x_sample) From 2f1b61d97987ae0a52a7dfc6bc99c68928bdb594 Mon Sep 17 00:00:00 2001 From: dan Date: Mon, 3 Oct 2022 19:25:36 +0800 Subject: [PATCH 21/33] Allow nested structures inside schedules --- modules/prompt_parser.py | 119 +++++++++++++++++--------------------- requirements.txt | 1 + requirements_versions.txt | 1 + 3 files changed, 55 insertions(+), 66 deletions(-) diff --git a/modules/prompt_parser.py b/modules/prompt_parser.py index e811eb9e..99c8ed99 100644 --- a/modules/prompt_parser.py +++ b/modules/prompt_parser.py @@ -1,20 +1,11 @@ import re from collections import namedtuple import torch +from lark import Lark, Transformer, Visitor +import functools import modules.shared as shared -re_prompt = re.compile(r''' -(.*?) -\[ - ([^]:]+): - (?:([^]:]*):)? - ([0-9]*\.?[0-9]+) -] -| -(.+) -''', re.X) - # a prompt like this: "fantasy landscape with a [mountain:lake:0.25] and [an oak:a christmas tree:0.75][ in foreground::0.6][ in background:0.25] [shoddy:masterful:0.5]" # will be represented with prompt_schedule like this (assuming steps=100): # [25, 'fantasy landscape with a mountain and an oak in foreground shoddy'] @@ -25,61 +16,57 @@ re_prompt = re.compile(r''' def get_learned_conditioning_prompt_schedules(prompts, steps): - res = [] - cache = {} - - for prompt in prompts: - prompt_schedule: list[list[str | int]] = [[steps, ""]] - - cached = cache.get(prompt, None) - if cached is not None: - res.append(cached) - continue - - for m in re_prompt.finditer(prompt): - plaintext = m.group(1) if m.group(5) is None else m.group(5) - concept_from = m.group(2) - concept_to = m.group(3) - if concept_to is None: - concept_to = concept_from - concept_from = "" - swap_position = float(m.group(4)) if m.group(4) is not None else None - - if swap_position is not None: - if swap_position < 1: - swap_position = swap_position * steps - swap_position = int(min(swap_position, steps)) - - swap_index = None - found_exact_index = False - for i in range(len(prompt_schedule)): - end_step = prompt_schedule[i][0] - prompt_schedule[i][1] += plaintext - - if swap_position is not None and swap_index is None: - if swap_position == end_step: - swap_index = i - found_exact_index = True - - if swap_position < end_step: - swap_index = i - - if swap_index is not None: - if not found_exact_index: - prompt_schedule.insert(swap_index, [swap_position, prompt_schedule[swap_index][1]]) - - for i in range(len(prompt_schedule)): - end_step = prompt_schedule[i][0] - must_replace = swap_position < end_step - - prompt_schedule[i][1] += concept_to if must_replace else concept_from - - res.append(prompt_schedule) - cache[prompt] = prompt_schedule - #for t in prompt_schedule: - # print(t) - - return res + grammar = r""" + start: prompt + prompt: (emphasized | scheduled | weighted | plain)* + !emphasized: "(" prompt ")" + | "(" prompt ":" prompt ")" + | "[" prompt "]" + scheduled: "[" (prompt ":")? prompt ":" NUMBER "]" + !weighted: "{" weighted_item ("|" weighted_item)* "}" + !weighted_item: prompt (":" prompt)? + plain: /([^\\\[\](){}:|]|\\.)+/ + %import common.SIGNED_NUMBER -> NUMBER + """ + parser = Lark(grammar, parser='lalr') + def collect_steps(steps, tree): + l = [steps] + class CollectSteps(Visitor): + def scheduled(self, tree): + tree.children[-1] = float(tree.children[-1]) + if tree.children[-1] < 1: + tree.children[-1] *= steps + tree.children[-1] = min(steps, int(tree.children[-1])) + l.append(tree.children[-1]) + CollectSteps().visit(tree) + return sorted(set(l)) + def at_step(step, tree): + class AtStep(Transformer): + def scheduled(self, args): + if len(args) == 2: + before, after, when = (), *args + else: + before, after, when = args + yield before if step <= when else after + def start(self, args): + def flatten(x): + if type(x) == str: + yield x + else: + for gen in x: + yield from flatten(gen) + return ''.join(flatten(args[0])) + def plain(self, args): + yield args[0].value + def __default__(self, data, children, meta): + for child in children: + yield from child + return AtStep().transform(tree) + @functools.cache + def get_schedule(prompt): + tree = parser.parse(prompt) + return [[t, at_step(t, tree)] for t in collect_steps(steps, tree)] + return [get_schedule(prompt) for prompt in prompts] ScheduledPromptConditioning = namedtuple("ScheduledPromptConditioning", ["end_at_step", "cond"]) diff --git a/requirements.txt b/requirements.txt index d4b337fc..631fe616 100644 --- a/requirements.txt +++ b/requirements.txt @@ -22,3 +22,4 @@ clean-fid resize-right torchdiffeq kornia +lark diff --git a/requirements_versions.txt b/requirements_versions.txt index 8a9acf20..fdff2687 100644 --- a/requirements_versions.txt +++ b/requirements_versions.txt @@ -21,3 +21,4 @@ clean-fid==0.1.29 resize-right==0.0.2 torchdiffeq==0.2.3 kornia==0.6.7 +lark==1.1.2 From 61652461242951966e5b4cee83ce359cefa91c17 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 4 Oct 2022 14:23:22 +0300 Subject: [PATCH 22/33] support interrupting after the previous change --- modules/processing.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/modules/processing.py b/modules/processing.py index 9cbecdd8..6f5599c7 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -361,7 +361,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed: shared.state.job = f"Batch {n+1} out of {p.n_iter}" with devices.autocast(): - samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength).to(devices.dtype) + samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength) if state.interrupted: @@ -369,6 +369,8 @@ def process_images(p: StableDiffusionProcessing) -> Processed: # use the image collected previously in sampler loop samples_ddim = shared.state.current_latent + samples_ddim = samples_ddim.to(devices.dtype) + x_samples_ddim = p.sd_model.decode_first_stage(samples_ddim) x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0) From d5bba20a58f43a9f984bb67b4e17f48661f6b818 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 4 Oct 2022 14:35:12 +0300 Subject: [PATCH 23/33] ignore errors in parse for purposes of token counting for #1564 --- modules/ui.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index 55f7aa95..20dc8c37 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -386,14 +386,22 @@ def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info: outputs=[seed, dummy_component] ) + def update_token_counter(text, steps): - prompt_schedules = get_learned_conditioning_prompt_schedules([text], steps) + try: + prompt_schedules = get_learned_conditioning_prompt_schedules([text], steps) + except Exception: + # a parsing error can happen here during typing, and we don't want to bother the user with + # messages related to it in console + prompt_schedules = [[[steps, text]]] + flat_prompts = reduce(lambda list1, list2: list1+list2, prompt_schedules) - prompts = [prompt_text for step,prompt_text in flat_prompts] + prompts = [prompt_text for step, prompt_text in flat_prompts] tokens, token_count, max_length = max([model_hijack.tokenize(prompt) for prompt in prompts], key=lambda args: args[1]) style_class = ' class="red"' if (token_count > max_length) else "" return f"{token_count}/{max_length}" + def create_toprow(is_img2img): id_part = "img2img" if is_img2img else "txt2img" From accd00d6b8258c12b5168918a4c546b02357924a Mon Sep 17 00:00:00 2001 From: Justin Riddiough Date: Tue, 4 Oct 2022 01:14:28 -0500 Subject: [PATCH 24/33] Explain how to use second progress bar in pycharm --- modules/shared.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/shared.py b/modules/shared.py index 25aff5b0..11bdf01a 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -200,7 +200,7 @@ options_templates.update(options_section(('face-restoration', "Face restoration" options_templates.update(options_section(('system', "System"), { "memmon_poll_rate": OptionInfo(8, "VRAM usage polls per second during generation. Set to 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 40, "step": 1}), "samples_log_stdout": OptionInfo(False, "Always print all generation info to standard output"), - "multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job. Broken in PyCharm console."), + "multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job. In PyCharm select 'emulate terminal in console output'."), })) options_templates.update(options_section(('sd', "Stable Diffusion"), { From ea6b0d98a64290a0305e27126ea59ce1da7959a2 Mon Sep 17 00:00:00 2001 From: Justin Riddiough Date: Tue, 4 Oct 2022 06:38:45 -0500 Subject: [PATCH 25/33] Remove pycharm note, fix typo --- modules/shared.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/shared.py b/modules/shared.py index 11bdf01a..a7d13b2d 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -200,7 +200,7 @@ options_templates.update(options_section(('face-restoration', "Face restoration" options_templates.update(options_section(('system', "System"), { "memmon_poll_rate": OptionInfo(8, "VRAM usage polls per second during generation. Set to 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 40, "step": 1}), "samples_log_stdout": OptionInfo(False, "Always print all generation info to standard output"), - "multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job. In PyCharm select 'emulate terminal in console output'."), + "multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job."), })) options_templates.update(options_section(('sd', "Stable Diffusion"), { @@ -209,7 +209,7 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), { "save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"), "img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising)."), "enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply."), - "enable_emphasis": OptionInfo(True, "Eemphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention"), + "enable_emphasis": OptionInfo(True, "Emphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention"), "use_old_emphasis_implementation": OptionInfo(False, "Use old emphasis implementation. Can be useful to reproduce old seeds."), "enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"), "filter_nsfw": OptionInfo(False, "Filter NSFW content"), From eec1b39bd54711ca31e43022d2d6ac8c6d7281da Mon Sep 17 00:00:00 2001 From: Milly Date: Tue, 4 Oct 2022 20:16:52 +0900 Subject: [PATCH 26/33] Apply prompt pattern last --- modules/images.py | 43 ++++++++++++++++++++++--------------------- 1 file changed, 22 insertions(+), 21 deletions(-) diff --git a/modules/images.py b/modules/images.py index bba55158..5b56c7e3 100644 --- a/modules/images.py +++ b/modules/images.py @@ -287,32 +287,13 @@ def apply_filename_pattern(x, p, seed, prompt): if seed is not None: x = x.replace("[seed]", str(seed)) - if prompt is not None: - x = x.replace("[prompt]", sanitize_filename_part(prompt)) - if "[prompt_no_styles]" in x: - prompt_no_style = prompt - for style in shared.prompt_styles.get_style_prompts(p.styles): - if len(style) > 0: - style_parts = [y for y in style.split("{prompt}")] - for part in style_parts: - prompt_no_style = prompt_no_style.replace(part, "").replace(", ,", ",").strip().strip(',') - prompt_no_style = prompt_no_style.replace(style, "").strip().strip(',').strip() - x = x.replace("[prompt_no_styles]", sanitize_filename_part(prompt_no_style, replace_spaces=False)) - - x = x.replace("[prompt_spaces]", sanitize_filename_part(prompt, replace_spaces=False)) - if "[prompt_words]" in x: - words = [x for x in re_nonletters.split(prompt or "") if len(x) > 0] - if len(words) == 0: - words = ["empty"] - x = x.replace("[prompt_words]", sanitize_filename_part(" ".join(words[0:max_prompt_words]), replace_spaces=False)) - if p is not None: x = x.replace("[steps]", str(p.steps)) x = x.replace("[cfg]", str(p.cfg_scale)) x = x.replace("[width]", str(p.width)) x = x.replace("[height]", str(p.height)) - - #currently disabled if using the save button, will work otherwise + + #currently disabled if using the save button, will work otherwise # if enabled it will cause a bug because styles is not included in the save_files data dictionary if hasattr(p, "styles"): x = x.replace("[styles]", sanitize_filename_part(", ".join([x for x in p.styles if not x == "None"] or "None"), replace_spaces=False)) @@ -324,6 +305,26 @@ def apply_filename_pattern(x, p, seed, prompt): x = x.replace("[datetime]", datetime.datetime.now().strftime("%Y%m%d%H%M%S")) x = x.replace("[job_timestamp]", shared.state.job_timestamp) + # Apply [prompt] at last. Because it may contain any replacement word.^M + if prompt is not None: + x = x.replace("[prompt]", sanitize_filename_part(prompt)) + if "[prompt_no_styles]" in x: + prompt_no_style = prompt + for style in shared.prompt_styles.get_style_prompts(p.styles): + if len(style) > 0: + style_parts = [y for y in style.split("{prompt}")] + for part in style_parts: + prompt_no_style = prompt_no_style.replace(part, "").replace(", ,", ",").strip().strip(',') + prompt_no_style = prompt_no_style.replace(style, "").strip().strip(',').strip() + x = x.replace("[prompt_no_styles]", sanitize_filename_part(prompt_no_style, replace_spaces=False)) + + x = x.replace("[prompt_spaces]", sanitize_filename_part(prompt, replace_spaces=False)) + if "[prompt_words]" in x: + words = [x for x in re_nonletters.split(prompt or "") if len(x) > 0] + if len(words) == 0: + words = ["empty"] + x = x.replace("[prompt_words]", sanitize_filename_part(" ".join(words[0:max_prompt_words]), replace_spaces=False)) + if cmd_opts.hide_ui_dir_config: x = re.sub(r'^[\\/]+|\.{2,}[\\/]+|[\\/]+\.{2,}', '', x) From 52cef36f6ba169a8e606ecdcaed73d47378f0e8e Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 4 Oct 2022 16:54:31 +0300 Subject: [PATCH 27/33] emergency fix for img2img --- modules/processing.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/modules/processing.py b/modules/processing.py index 6f5599c7..e9c45394 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -331,7 +331,8 @@ def process_images(p: StableDiffusionProcessing) -> Processed: output_images = [] with torch.no_grad(): - p.init(all_prompts, all_seeds, all_subseeds) + with devices.autocast(): + p.init(all_prompts, all_seeds, all_subseeds) if state.job_count == -1: state.job_count = p.n_iter From 957e29a8e9cb8ca069799ec69263e188c89ed6a6 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 4 Oct 2022 17:23:48 +0300 Subject: [PATCH 28/33] option to not show images in web ui --- modules/img2img.py | 3 +++ modules/shared.py | 1 + modules/txt2img.py | 3 +++ 3 files changed, 7 insertions(+) diff --git a/modules/img2img.py b/modules/img2img.py index 2ff8e261..da212d72 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -129,4 +129,7 @@ def img2img(mode: int, prompt: str, negative_prompt: str, prompt_style: str, pro if opts.samples_log_stdout: print(generation_info_js) + if opts.do_not_show_images: + processed.images = [] + return processed.images, generation_info_js, plaintext_to_html(processed.info) diff --git a/modules/shared.py b/modules/shared.py index a7d13b2d..ff4e5fa3 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -229,6 +229,7 @@ options_templates.update(options_section(('ui', "User interface"), { "show_progressbar": OptionInfo(True, "Show progressbar"), "show_progress_every_n_steps": OptionInfo(0, "Show show image creation progress every N sampling steps. Set 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 32, "step": 1}), "return_grid": OptionInfo(True, "Show grid in results for web"), + "do_not_show_images": OptionInfo(False, "Do not show any images in results for web"), "add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"), "font": OptionInfo("", "Font for image grids that have text"), "js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"), diff --git a/modules/txt2img.py b/modules/txt2img.py index d4406c3c..e985242b 100644 --- a/modules/txt2img.py +++ b/modules/txt2img.py @@ -48,5 +48,8 @@ def txt2img(prompt: str, negative_prompt: str, prompt_style: str, prompt_style2: if opts.samples_log_stdout: print(generation_info_js) + if opts.do_not_show_images: + processed.images = [] + return processed.images, generation_info_js, plaintext_to_html(processed.info) From e1b128d8e46bddb9c0b2fd3ee0eefd57e0527ee0 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 4 Oct 2022 17:36:39 +0300 Subject: [PATCH 29/33] do not touch p.seed/p.subseed during processing #1181 --- modules/processing.py | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index e9c45394..8180c63d 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -248,9 +248,16 @@ def create_random_tensors(shape, seeds, subseeds=None, subseed_strength=0.0, see return x +def get_fixed_seed(seed): + if seed is None or seed == '' or seed == -1: + return int(random.randrange(4294967294)) + + return seed + + def fix_seed(p): - p.seed = int(random.randrange(4294967294)) if p.seed is None or p.seed == '' or p.seed == -1 else p.seed - p.subseed = int(random.randrange(4294967294)) if p.subseed is None or p.subseed == '' or p.subseed == -1 else p.subseed + p.seed = get_fixed_seed(p.seed) + p.subseed = get_fixed_seed(p.subseed) def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration=0, position_in_batch=0): @@ -292,7 +299,8 @@ def process_images(p: StableDiffusionProcessing) -> Processed: devices.torch_gc() - fix_seed(p) + seed = get_fixed_seed(p.seed) + subseed = get_fixed_seed(p.subseed) if p.outpath_samples is not None: os.makedirs(p.outpath_samples, exist_ok=True) @@ -311,15 +319,15 @@ def process_images(p: StableDiffusionProcessing) -> Processed: else: all_prompts = p.batch_size * p.n_iter * [p.prompt] - if type(p.seed) == list: - all_seeds = p.seed + if type(seed) == list: + all_seeds = seed else: - all_seeds = [int(p.seed) + (x if p.subseed_strength == 0 else 0) for x in range(len(all_prompts))] + all_seeds = [int(seed) + (x if p.subseed_strength == 0 else 0) for x in range(len(all_prompts))] - if type(p.subseed) == list: - all_subseeds = p.subseed + if type(subseed) == list: + all_subseeds = subseed else: - all_subseeds = [int(p.subseed) + x for x in range(len(all_prompts))] + all_subseeds = [int(subseed) + x for x in range(len(all_prompts))] def infotext(iteration=0, position_in_batch=0): return create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration, position_in_batch) From 1eb588cbf19924333b88beaa1ac0041904966640 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 4 Oct 2022 18:02:01 +0300 Subject: [PATCH 30/33] remove functools.cache as some people are having issues with it --- modules/prompt_parser.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/modules/prompt_parser.py b/modules/prompt_parser.py index 99c8ed99..5d58c4ed 100644 --- a/modules/prompt_parser.py +++ b/modules/prompt_parser.py @@ -29,6 +29,7 @@ def get_learned_conditioning_prompt_schedules(prompts, steps): %import common.SIGNED_NUMBER -> NUMBER """ parser = Lark(grammar, parser='lalr') + def collect_steps(steps, tree): l = [steps] class CollectSteps(Visitor): @@ -40,6 +41,7 @@ def get_learned_conditioning_prompt_schedules(prompts, steps): l.append(tree.children[-1]) CollectSteps().visit(tree) return sorted(set(l)) + def at_step(step, tree): class AtStep(Transformer): def scheduled(self, args): @@ -62,11 +64,13 @@ def get_learned_conditioning_prompt_schedules(prompts, steps): for child in children: yield from child return AtStep().transform(tree) - @functools.cache + def get_schedule(prompt): tree = parser.parse(prompt) return [[t, at_step(t, tree)] for t in collect_steps(steps, tree)] - return [get_schedule(prompt) for prompt in prompts] + + promptdict = {prompt: get_schedule(prompt) for prompt in set(prompts)} + return [promptdict[prompt] for prompt in prompts] ScheduledPromptConditioning = namedtuple("ScheduledPromptConditioning", ["end_at_step", "cond"]) From 90e911fd546e76f879b38a764473569911a0f845 Mon Sep 17 00:00:00 2001 From: Rae Fu Date: Tue, 4 Oct 2022 09:49:51 -0600 Subject: [PATCH 31/33] prompt_parser: allow spaces in schedules, add test, log/ignore errors Only build the parser once (at import time) instead of for each step. doctest is run by simply executing modules/prompt_parser.py --- modules/processing.py | 10 +-- modules/prompt_parser.py | 139 +++++++++++++++++++++++++-------------- 2 files changed, 95 insertions(+), 54 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index 8180c63d..bb94033b 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -84,7 +84,7 @@ class StableDiffusionProcessing: self.s_tmin = opts.s_tmin self.s_tmax = float('inf') # not representable as a standard ui option self.s_noise = opts.s_noise - + if not seed_enable_extras: self.subseed = -1 self.subseed_strength = 0 @@ -296,7 +296,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed: assert(len(p.prompt) > 0) else: assert p.prompt is not None - + devices.torch_gc() seed = get_fixed_seed(p.seed) @@ -359,8 +359,8 @@ def process_images(p: StableDiffusionProcessing) -> Processed: #uc = p.sd_model.get_learned_conditioning(len(prompts) * [p.negative_prompt]) #c = p.sd_model.get_learned_conditioning(prompts) with devices.autocast(): - uc = prompt_parser.get_learned_conditioning(len(prompts) * [p.negative_prompt], p.steps) - c = prompt_parser.get_learned_conditioning(prompts, p.steps) + uc = prompt_parser.get_learned_conditioning(shared.sd_model, len(prompts) * [p.negative_prompt], p.steps) + c = prompt_parser.get_learned_conditioning(shared.sd_model, prompts, p.steps) if len(model_hijack.comments) > 0: for comment in model_hijack.comments: @@ -527,7 +527,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): # GC now before running the next img2img to prevent running out of memory x = None devices.torch_gc() - + samples = self.sampler.sample_img2img(self, samples, noise, conditioning, unconditional_conditioning, steps=self.steps) return samples diff --git a/modules/prompt_parser.py b/modules/prompt_parser.py index 5d58c4ed..a3b12421 100644 --- a/modules/prompt_parser.py +++ b/modules/prompt_parser.py @@ -1,10 +1,7 @@ import re from collections import namedtuple -import torch -from lark import Lark, Transformer, Visitor -import functools -import modules.shared as shared +import lark # a prompt like this: "fantasy landscape with a [mountain:lake:0.25] and [an oak:a christmas tree:0.75][ in foreground::0.6][ in background:0.25] [shoddy:masterful:0.5]" # will be represented with prompt_schedule like this (assuming steps=100): @@ -14,25 +11,48 @@ import modules.shared as shared # [75, 'fantasy landscape with a lake and an oak in background masterful'] # [100, 'fantasy landscape with a lake and a christmas tree in background masterful'] +schedule_parser = lark.Lark(r""" +!start: (prompt | /[][():]/+)* +prompt: (emphasized | scheduled | plain | WHITESPACE)* +!emphasized: "(" prompt ")" + | "(" prompt ":" prompt ")" + | "[" prompt "]" +scheduled: "[" [prompt ":"] prompt ":" [WHITESPACE] NUMBER "]" +WHITESPACE: /\s+/ +plain: /([^\\\[\]():]|\\.)+/ +%import common.SIGNED_NUMBER -> NUMBER +""") def get_learned_conditioning_prompt_schedules(prompts, steps): - grammar = r""" - start: prompt - prompt: (emphasized | scheduled | weighted | plain)* - !emphasized: "(" prompt ")" - | "(" prompt ":" prompt ")" - | "[" prompt "]" - scheduled: "[" (prompt ":")? prompt ":" NUMBER "]" - !weighted: "{" weighted_item ("|" weighted_item)* "}" - !weighted_item: prompt (":" prompt)? - plain: /([^\\\[\](){}:|]|\\.)+/ - %import common.SIGNED_NUMBER -> NUMBER """ - parser = Lark(grammar, parser='lalr') + >>> g = lambda p: get_learned_conditioning_prompt_schedules([p], 10)[0] + >>> g("test") + [[10, 'test']] + >>> g("a [b:3]") + [[3, 'a '], [10, 'a b']] + >>> g("a [b: 3]") + [[3, 'a '], [10, 'a b']] + >>> g("a [[[b]]:2]") + [[2, 'a '], [10, 'a [[b]]']] + >>> g("[(a:2):3]") + [[3, ''], [10, '(a:2)']] + >>> g("a [b : c : 1] d") + [[1, 'a b d'], [10, 'a c d']] + >>> g("a[b:[c:d:2]:1]e") + [[1, 'abe'], [2, 'ace'], [10, 'ade']] + >>> g("a [unbalanced") + [[10, 'a [unbalanced']] + >>> g("a [b:.5] c") + [[5, 'a c'], [10, 'a b c']] + >>> g("a [{b|d{:.5] c") # not handling this right now + [[5, 'a c'], [10, 'a {b|d{ c']] + >>> g("((a][:b:c [d:3]") + [[3, '((a][:b:c '], [10, '((a][:b:c d']] + """ def collect_steps(steps, tree): l = [steps] - class CollectSteps(Visitor): + class CollectSteps(lark.Visitor): def scheduled(self, tree): tree.children[-1] = float(tree.children[-1]) if tree.children[-1] < 1: @@ -43,13 +63,10 @@ def get_learned_conditioning_prompt_schedules(prompts, steps): return sorted(set(l)) def at_step(step, tree): - class AtStep(Transformer): + class AtStep(lark.Transformer): def scheduled(self, args): - if len(args) == 2: - before, after, when = (), *args - else: - before, after, when = args - yield before if step <= when else after + before, after, _, when = args + yield before or () if step <= when else after def start(self, args): def flatten(x): if type(x) == str: @@ -57,16 +74,22 @@ def get_learned_conditioning_prompt_schedules(prompts, steps): else: for gen in x: yield from flatten(gen) - return ''.join(flatten(args[0])) + return ''.join(flatten(args)) def plain(self, args): yield args[0].value def __default__(self, data, children, meta): for child in children: yield from child return AtStep().transform(tree) - + def get_schedule(prompt): - tree = parser.parse(prompt) + try: + tree = schedule_parser.parse(prompt) + except lark.exceptions.LarkError as e: + if 0: + import traceback + traceback.print_exc() + return [[steps, prompt]] return [[t, at_step(t, tree)] for t in collect_steps(steps, tree)] promptdict = {prompt: get_schedule(prompt) for prompt in set(prompts)} @@ -77,8 +100,7 @@ ScheduledPromptConditioning = namedtuple("ScheduledPromptConditioning", ["end_at ScheduledPromptBatch = namedtuple("ScheduledPromptBatch", ["shape", "schedules"]) -def get_learned_conditioning(prompts, steps): - +def get_learned_conditioning(model, prompts, steps): res = [] prompt_schedules = get_learned_conditioning_prompt_schedules(prompts, steps) @@ -92,7 +114,7 @@ def get_learned_conditioning(prompts, steps): continue texts = [x[1] for x in prompt_schedule] - conds = shared.sd_model.get_learned_conditioning(texts) + conds = model.get_learned_conditioning(texts) cond_schedule = [] for i, (end_at_step, text) in enumerate(prompt_schedule): @@ -105,12 +127,13 @@ def get_learned_conditioning(prompts, steps): def reconstruct_cond_batch(c: ScheduledPromptBatch, current_step): - res = torch.zeros(c.shape, device=shared.device, dtype=next(shared.sd_model.parameters()).dtype) + param = c.schedules[0][0].cond + res = torch.zeros(c.shape, device=param.device, dtype=param.dtype) for i, cond_schedule in enumerate(c.schedules): target_index = 0 - for curret_index, (end_at, cond) in enumerate(cond_schedule): + for current, (end_at, cond) in enumerate(cond_schedule): if current_step <= end_at: - target_index = curret_index + target_index = current break res[i] = cond_schedule[target_index].cond @@ -148,23 +171,26 @@ def parse_prompt_attention(text): \\ - literal character '\' anything else - just text - Example: - - 'a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).' - - produces: - - [ - ['a ', 1.0], - ['house', 1.5730000000000004], - [' ', 1.1], - ['on', 1.0], - [' a ', 1.1], - ['hill', 0.55], - [', sun, ', 1.1], - ['sky', 1.4641000000000006], - ['.', 1.1] - ] + >>> parse_prompt_attention('normal text') + [['normal text', 1.0]] + >>> parse_prompt_attention('an (important) word') + [['an ', 1.0], ['important', 1.1], [' word', 1.0]] + >>> parse_prompt_attention('(unbalanced') + [['unbalanced', 1.1]] + >>> parse_prompt_attention('\(literal\]') + [['(literal]', 1.0]] + >>> parse_prompt_attention('(unnecessary)(parens)') + [['unnecessaryparens', 1.1]] + >>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).') + [['a ', 1.0], + ['house', 1.5730000000000004], + [' ', 1.1], + ['on', 1.0], + [' a ', 1.1], + ['hill', 0.55], + [', sun, ', 1.1], + ['sky', 1.4641000000000006], + ['.', 1.1]] """ res = [] @@ -206,4 +232,19 @@ def parse_prompt_attention(text): if len(res) == 0: res = [["", 1.0]] + # merge runs of identical weights + i = 0 + while i + 1 < len(res): + if res[i][1] == res[i + 1][1]: + res[i][0] += res[i + 1][0] + res.pop(i + 1) + else: + i += 1 + return res + +if __name__ == "__main__": + import doctest + doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE) +else: + import torch # doctest faster From b32852ef037251eb3d846af76e2965594e1ac7a5 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 4 Oct 2022 20:49:54 +0300 Subject: [PATCH 32/33] add editor to img2img --- modules/shared.py | 1 + modules/ui.py | 2 +- style.css | 4 ++++ 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/modules/shared.py b/modules/shared.py index ff4e5fa3..e52c9b1d 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -55,6 +55,7 @@ parser.add_argument("--hide-ui-dir-config", action='store_true', help="hide dire parser.add_argument("--ui-settings-file", type=str, help="filename to use for ui settings", default=os.path.join(script_path, 'config.json')) parser.add_argument("--gradio-debug", action='store_true', help="launch gradio with --debug option") parser.add_argument("--gradio-auth", type=str, help='set gradio authentication like "username:password"; or comma-delimit multiple like "u1:p1,u2:p2,u3:p3"', default=None) +parser.add_argument("--gradio-img2img-tool", type=str, help='gradio image uploader tool: can be either editor for ctopping, or color-sketch for drawing', choices=["color-sketch", "editor"], default="color-sketch") parser.add_argument("--opt-channelslast", action='store_true', help="change memory type for stable diffusion to channels last") parser.add_argument("--styles-file", type=str, help="filename to use for styles", default=os.path.join(script_path, 'styles.csv')) parser.add_argument("--autolaunch", action='store_true', help="open the webui URL in the system's default browser upon launch", default=False) diff --git a/modules/ui.py b/modules/ui.py index 20dc8c37..6cd6761b 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -644,7 +644,7 @@ def create_ui(wrap_gradio_gpu_call): with gr.Tabs(elem_id="mode_img2img") as tabs_img2img_mode: with gr.TabItem('img2img', id='img2img'): - init_img = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil") + init_img = gr.Image(label="Image for img2img", elem_id="img2img_image", show_label=False, source="upload", interactive=True, type="pil", tool=cmd_opts.gradio_img2img_tool) with gr.TabItem('Inpaint', id='inpaint'): init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA") diff --git a/style.css b/style.css index 39586bf1..e8f4cb75 100644 --- a/style.css +++ b/style.css @@ -403,3 +403,7 @@ input[type="range"]{ .red { color: red; } + +#img2img_image div.h-60{ + height: 480px; +} \ No newline at end of file From ef40e4cd4d383a3405e03f1da3f5b5a1820a8f53 Mon Sep 17 00:00:00 2001 From: xpscyho Date: Tue, 4 Oct 2022 15:12:38 -0400 Subject: [PATCH 33/33] Display time taken in mins, secs when relevant Fixes #1656 --- modules/ui.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/modules/ui.py b/modules/ui.py index 6cd6761b..de6342a4 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -196,6 +196,11 @@ def wrap_gradio_call(func, extra_outputs=None): res = extra_outputs_array + [f"
{plaintext_to_html(type(e).__name__+': '+str(e))}
"] elapsed = time.perf_counter() - t + elapsed_m = int(elapsed // 60) + elapsed_s = elapsed % 60 + elapsed_text = f"{elapsed_s:.2f}s" + if (elapsed_m > 0): + elapsed_text = f"{elapsed_m}m "+elapsed_text if run_memmon: mem_stats = {k: -(v//-(1024*1024)) for k, v in shared.mem_mon.stop().items()} @@ -210,7 +215,7 @@ def wrap_gradio_call(func, extra_outputs=None): vram_html = '' # last item is always HTML - res[-1] += f"

Time taken: {elapsed:.2f}s

{vram_html}
" + res[-1] += f"

Time taken: {elapsed_text}

{vram_html}
" shared.state.interrupted = False shared.state.job_count = 0