From b64994b973ad8f4268bf785f25f92b66c8dced40 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Fri, 16 Sep 2022 19:24:48 +0300
Subject: [PATCH 01/20] added original negative prompt to img2img alt
---
scripts/img2imgalt.py | 13 +++++++------
1 file changed, 7 insertions(+), 6 deletions(-)
diff --git a/scripts/img2imgalt.py b/scripts/img2imgalt.py
index dbda3255..7f1f53a7 100644
--- a/scripts/img2imgalt.py
+++ b/scripts/img2imgalt.py
@@ -59,7 +59,7 @@ def find_noise_for_image(p, cond, uncond, cfg_scale, steps):
return x / x.std()
-Cached = namedtuple("Cached", ["noise", "cfg_scale", "steps", "latent", "original_prompt"])
+Cached = namedtuple("Cached", ["noise", "cfg_scale", "steps", "latent", "original_prompt", "original_negative_prompt"])
class Script(scripts.Script):
@@ -74,19 +74,20 @@ class Script(scripts.Script):
def ui(self, is_img2img):
original_prompt = gr.Textbox(label="Original prompt", lines=1)
+ original_negative_prompt = gr.Textbox(label="Original negative prompt", lines=1)
cfg = gr.Slider(label="Decode CFG scale", minimum=0.0, maximum=15.0, step=0.1, value=1.0)
st = gr.Slider(label="Decode steps", minimum=1, maximum=150, step=1, value=50)
randomness = gr.Slider(label="randomness", minimum=0.0, maximum=1.0, step=0.01, value=0.0)
- return [original_prompt, cfg, st, randomness]
+ return [original_prompt, original_negative_prompt, cfg, st, randomness]
- def run(self, p, original_prompt, cfg, st, randomness):
+ def run(self, p, original_prompt, original_negative_prompt, cfg, st, randomness):
p.batch_size = 1
p.batch_count = 1
def sample_extra(x, conditioning, unconditional_conditioning):
lat = (p.init_latent.cpu().numpy() * 10).astype(int)
- same_params = self.cache is not None and self.cache.cfg_scale == cfg and self.cache.steps == st and self.cache.original_prompt == original_prompt
+ same_params = self.cache is not None and self.cache.cfg_scale == cfg and self.cache.steps == st and self.cache.original_prompt == original_prompt and self.cache.original_negative_prompt == original_negative_prompt
same_everything = same_params and self.cache.latent.shape == lat.shape and np.abs(self.cache.latent-lat).sum() < 100
if same_everything:
@@ -94,9 +95,9 @@ class Script(scripts.Script):
else:
shared.state.job_count += 1
cond = p.sd_model.get_learned_conditioning(p.batch_size * [original_prompt])
- uncond = p.sd_model.get_learned_conditioning(p.batch_size * [""])
+ uncond = p.sd_model.get_learned_conditioning(p.batch_size * [original_negative_prompt])
rec_noise = find_noise_for_image(p, cond, uncond, cfg, st)
- self.cache = Cached(rec_noise, cfg, st, lat, original_prompt)
+ self.cache = Cached(rec_noise, cfg, st, lat, original_prompt, original_negative_prompt)
rand_noise = processing.create_random_tensors(p.init_latent.shape[1:], [p.seed + x + 1 for x in range(p.init_latent.shape[0])])
From 2ee9fc8eb84d5e1864dbabd8a8c6b279a6ae21ac Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Fri, 16 Sep 2022 22:18:30 +0300
Subject: [PATCH 02/20] new outpainting script
---
scripts/outpainting_mk_2.py | 290 ++++++++++++++++++++++++++++++++++++
1 file changed, 290 insertions(+)
create mode 100644 scripts/outpainting_mk_2.py
diff --git a/scripts/outpainting_mk_2.py b/scripts/outpainting_mk_2.py
new file mode 100644
index 00000000..a42c1aed
--- /dev/null
+++ b/scripts/outpainting_mk_2.py
@@ -0,0 +1,290 @@
+import math
+
+import numpy as np
+import skimage
+
+import modules.scripts as scripts
+import gradio as gr
+from PIL import Image, ImageDraw
+
+from modules import images, processing, devices
+from modules.processing import Processed, process_images
+from modules.shared import opts, cmd_opts, state
+
+
+def expand(x, dir, amount, power=0.75):
+ is_left = dir == 3
+ is_right = dir == 1
+ is_up = dir == 0
+ is_down = dir == 2
+
+ if is_left or is_right:
+ noise = np.zeros((x.shape[0], amount, 3), dtype=float)
+ indexes = np.random.random((x.shape[0], amount)) ** power * (1 - np.arange(amount) / amount)
+ if is_right:
+ indexes = 1 - indexes
+ indexes = (indexes * (x.shape[1] - 1)).astype(int)
+
+ for row in range(x.shape[0]):
+ if is_left:
+ noise[row] = x[row][indexes[row]]
+ else:
+ noise[row] = np.flip(x[row][indexes[row]], axis=0)
+
+ x = np.concatenate([noise, x] if is_left else [x, noise], axis=1)
+ return x
+
+ if is_up or is_down:
+ noise = np.zeros((amount, x.shape[1], 3), dtype=float)
+ indexes = np.random.random((x.shape[1], amount)) ** power * (1 - np.arange(amount) / amount)
+ if is_down:
+ indexes = 1 - indexes
+ indexes = (indexes * x.shape[0] - 1).astype(int)
+
+ for row in range(x.shape[1]):
+ if is_up:
+ noise[:, row] = x[:, row][indexes[row]]
+ else:
+ noise[:, row] = np.flip(x[:, row][indexes[row]], axis=0)
+
+ x = np.concatenate([noise, x] if is_up else [x, noise], axis=0)
+ return x
+
+
+def get_matched_noise(_np_src_image, np_mask_rgb, noise_q=1, color_variation=0.05):
+ # helper fft routines that keep ortho normalization and auto-shift before and after fft
+ def _fft2(data):
+ if data.ndim > 2: # has channels
+ out_fft = np.zeros((data.shape[0], data.shape[1], data.shape[2]), dtype=np.complex128)
+ for c in range(data.shape[2]):
+ c_data = data[:, :, c]
+ out_fft[:, :, c] = np.fft.fft2(np.fft.fftshift(c_data), norm="ortho")
+ out_fft[:, :, c] = np.fft.ifftshift(out_fft[:, :, c])
+ else: # one channel
+ out_fft = np.zeros((data.shape[0], data.shape[1]), dtype=np.complex128)
+ out_fft[:, :] = np.fft.fft2(np.fft.fftshift(data), norm="ortho")
+ out_fft[:, :] = np.fft.ifftshift(out_fft[:, :])
+
+ return out_fft
+
+ def _ifft2(data):
+ if data.ndim > 2: # has channels
+ out_ifft = np.zeros((data.shape[0], data.shape[1], data.shape[2]), dtype=np.complex128)
+ for c in range(data.shape[2]):
+ c_data = data[:, :, c]
+ out_ifft[:, :, c] = np.fft.ifft2(np.fft.fftshift(c_data), norm="ortho")
+ out_ifft[:, :, c] = np.fft.ifftshift(out_ifft[:, :, c])
+ else: # one channel
+ out_ifft = np.zeros((data.shape[0], data.shape[1]), dtype=np.complex128)
+ out_ifft[:, :] = np.fft.ifft2(np.fft.fftshift(data), norm="ortho")
+ out_ifft[:, :] = np.fft.ifftshift(out_ifft[:, :])
+
+ return out_ifft
+
+ def _get_gaussian_window(width, height, std=3.14, mode=0):
+ window_scale_x = float(width / min(width, height))
+ window_scale_y = float(height / min(width, height))
+
+ window = np.zeros((width, height))
+ x = (np.arange(width) / width * 2. - 1.) * window_scale_x
+ for y in range(height):
+ fy = (y / height * 2. - 1.) * window_scale_y
+ if mode == 0:
+ window[:, y] = np.exp(-(x ** 2 + fy ** 2) * std)
+ else:
+ window[:, y] = (1 / ((x ** 2 + 1.) * (fy ** 2 + 1.))) ** (std / 3.14) # hey wait a minute that's not gaussian
+
+ return window
+
+ def _get_masked_window_rgb(np_mask_grey, hardness=1.):
+ np_mask_rgb = np.zeros((np_mask_grey.shape[0], np_mask_grey.shape[1], 3))
+ if hardness != 1.:
+ hardened = np_mask_grey[:] ** hardness
+ else:
+ hardened = np_mask_grey[:]
+ for c in range(3):
+ np_mask_rgb[:, :, c] = hardened[:]
+ return np_mask_rgb
+
+ width = _np_src_image.shape[0]
+ height = _np_src_image.shape[1]
+ num_channels = _np_src_image.shape[2]
+
+ np_src_image = _np_src_image[:] * (1. - np_mask_rgb)
+ np_mask_grey = (np.sum(np_mask_rgb, axis=2) / 3.)
+ img_mask = np_mask_grey > 1e-6
+ ref_mask = np_mask_grey < 1e-3
+
+ windowed_image = _np_src_image * (1. - _get_masked_window_rgb(np_mask_grey))
+ windowed_image /= np.max(windowed_image)
+ windowed_image += np.average(_np_src_image) * np_mask_rgb # / (1.-np.average(np_mask_rgb)) # rather than leave the masked area black, we get better results from fft by filling the average unmasked color
+
+ src_fft = _fft2(windowed_image) # get feature statistics from masked src img
+ src_dist = np.absolute(src_fft)
+ src_phase = src_fft / src_dist
+
+ noise_window = _get_gaussian_window(width, height, mode=1) # start with simple gaussian noise
+ noise_rgb = np.random.random_sample((width, height, num_channels))
+ noise_grey = (np.sum(noise_rgb, axis=2) / 3.)
+ noise_rgb *= color_variation # the colorfulness of the starting noise is blended to greyscale with a parameter
+ for c in range(num_channels):
+ noise_rgb[:, :, c] += (1. - color_variation) * noise_grey
+
+ noise_fft = _fft2(noise_rgb)
+ for c in range(num_channels):
+ noise_fft[:, :, c] *= noise_window
+ noise_rgb = np.real(_ifft2(noise_fft))
+ shaped_noise_fft = _fft2(noise_rgb)
+ shaped_noise_fft[:, :, :] = np.absolute(shaped_noise_fft[:, :, :]) ** 2 * (src_dist ** noise_q) * src_phase # perform the actual shaping
+
+ brightness_variation = 0. # color_variation # todo: temporarily tieing brightness variation to color variation for now
+ contrast_adjusted_np_src = _np_src_image[:] * (brightness_variation + 1.) - brightness_variation * 2.
+
+ # scikit-image is used for histogram matching, very convenient!
+ shaped_noise = np.real(_ifft2(shaped_noise_fft))
+ shaped_noise -= np.min(shaped_noise)
+ shaped_noise /= np.max(shaped_noise)
+ shaped_noise[img_mask, :] = skimage.exposure.match_histograms(shaped_noise[img_mask, :] ** 1., contrast_adjusted_np_src[ref_mask, :], channel_axis=1)
+ shaped_noise = _np_src_image[:] * (1. - np_mask_rgb) + shaped_noise * np_mask_rgb
+
+ matched_noise = shaped_noise[:]
+
+ return np.clip(matched_noise, 0., 1.)
+
+
+
+class Script(scripts.Script):
+ def title(self):
+ return "Outpainting mk2"
+
+ def show(self, is_img2img):
+ return is_img2img
+
+ def ui(self, is_img2img):
+ if not is_img2img:
+ return None
+
+ info = gr.HTML("
Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8
")
+
+ pixels = gr.Slider(label="Pixels to expand", minimum=8, maximum=256, step=8, value=128)
+ mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=8, visible=False)
+ direction = gr.CheckboxGroup(label="Outpainting direction", choices=['left', 'right', 'up', 'down'], value=['left', 'right', 'up', 'down'])
+ noise_q = gr.Slider(label="Fall-off exponent (lower=higher detail)", minimum=0.0, maximum=4.0, step=0.01, value=1.0)
+ color_variation = gr.Slider(label="Color variation", minimum=0.0, maximum=1.0, step=0.01, value=0.05)
+
+ return [info, pixels, mask_blur, direction, noise_q, color_variation]
+
+ def run(self, p, _, pixels, mask_blur, direction, noise_q, color_variation):
+ initial_seed_and_info = [None, None]
+
+ process_width = p.width
+ process_height = p.height
+
+ p.mask_blur = mask_blur*4
+ p.inpaint_full_res = False
+ p.inpainting_fill = 1
+ p.do_not_save_samples = True
+ p.do_not_save_grid = True
+
+ left = pixels if "left" in direction else 0
+ right = pixels if "right" in direction else 0
+ up = pixels if "up" in direction else 0
+ down = pixels if "down" in direction else 0
+
+ init_img = p.init_images[0]
+ target_w = math.ceil((init_img.width + left + right) / 64) * 64
+ target_h = math.ceil((init_img.height + up + down) / 64) * 64
+
+ if left > 0:
+ left = left * (target_w - init_img.width) // (left + right)
+ if right > 0:
+ right = target_w - init_img.width - left
+
+ if up > 0:
+ up = up * (target_h - init_img.height) // (up + down)
+
+ if down > 0:
+ down = target_h - init_img.height - up
+
+ init_image = p.init_images[0]
+
+ state.job_count = (1 if left > 0 else 0) + (1 if right > 0 else 0)+ (1 if up > 0 else 0)+ (1 if down > 0 else 0)
+
+ def expand(init, expand_pixels, is_left=False, is_right=False, is_top=False, is_bottom=False):
+ is_horiz = is_left or is_right
+ is_vert = is_top or is_bottom
+ pixels_horiz = expand_pixels if is_horiz else 0
+ pixels_vert = expand_pixels if is_vert else 0
+
+ img = Image.new("RGB", (init.width + pixels_horiz, init.height + pixels_vert))
+ img.paste(init, (pixels_horiz if is_left else 0, pixels_vert if is_top else 0))
+ mask = Image.new("RGB", (init.width + pixels_horiz, init.height + pixels_vert), "white")
+ draw = ImageDraw.Draw(mask)
+ draw.rectangle((
+ expand_pixels + mask_blur if is_left else 0,
+ expand_pixels + mask_blur if is_top else 0,
+ mask.width - expand_pixels - mask_blur if is_right else mask.width,
+ mask.height - expand_pixels - mask_blur if is_bottom else mask.height,
+ ), fill="black")
+
+ np_image = (np.asarray(img) / 255.0).astype(np.float64)
+ np_mask = (np.asarray(mask) / 255.0).astype(np.float64)
+ noised = get_matched_noise(np_image, np_mask, noise_q, color_variation)
+ out = Image.fromarray(np.clip(noised * 255., 0., 255.).astype(np.uint8), mode="RGB")
+
+ target_width = min(process_width, init.width + pixels_horiz) if is_horiz else img.width
+ target_height = min(process_height, init.height + pixels_vert) if is_vert else img.height
+
+ crop_region = (
+ 0 if is_left else out.width - target_width,
+ 0 if is_top else out.height - target_height,
+ target_width if is_left else out.width,
+ target_height if is_top else out.height,
+ )
+
+ image_to_process = out.crop(crop_region)
+ mask = mask.crop(crop_region)
+
+ p.width = target_width if is_horiz else img.width
+ p.height = target_height if is_vert else img.height
+ p.init_images = [image_to_process]
+ p.image_mask = mask
+
+ latent_mask = Image.new("RGB", (p.width, p.height), "white")
+ draw = ImageDraw.Draw(latent_mask)
+ draw.rectangle((
+ expand_pixels + mask_blur * 2 if is_left else 0,
+ expand_pixels + mask_blur * 2 if is_top else 0,
+ mask.width - expand_pixels - mask_blur * 2 if is_right else mask.width,
+ mask.height - expand_pixels - mask_blur * 2 if is_bottom else mask.height,
+ ), fill="black")
+ p.latent_mask = latent_mask
+
+ proc = process_images(p)
+ proc_img = proc.images[0]
+
+ if initial_seed_and_info[0] is None:
+ initial_seed_and_info[0] = proc.seed
+ initial_seed_and_info[1] = proc.info
+
+ out.paste(proc_img, (0 if is_left else out.width - proc_img.width, 0 if is_top else out.height - proc_img.height))
+ return out
+
+ img = init_image
+
+ if left > 0:
+ img = expand(img, left, is_left=True)
+ if right > 0:
+ img = expand(img, right, is_right=True)
+ if up > 0:
+ img = expand(img, up, is_top=True)
+ if down > 0:
+ img = expand(img, down, is_bottom=True)
+
+ res = Processed(p, [img], initial_seed_and_info[0], initial_seed_and_info[1])
+
+ if opts.samples_save:
+ images.save_image(img, p.outpath_samples, "", res.seed, p.prompt, opts.grid_format, info=res.info, p=p)
+
+ return res
+
From 449719b2fc9e8fd2e61f219e0979deb83c05177c Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Fri, 16 Sep 2022 23:17:10 +0300
Subject: [PATCH 03/20] added Noise generation for outpainting mk2 to credits
---
README.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/README.md b/README.md
index 84a78da5..6c232e38 100644
--- a/README.md
+++ b/README.md
@@ -81,6 +81,7 @@ The documentation was moved from this README over to the project's [wiki](https:
- Ideas for optimizations - https://github.com/basujindal/stable-diffusion
- Doggettx - Cross Attention layer optimization - https://github.com/Doggettx/stable-diffusion, original idea for prompt editing.
- Idea for SD upscale - https://github.com/jquesnelle/txt2imghd
+- Noise generation for outpainting mk2 - https://github.com/parlance-zz/g-diffuser-bot
- CLIP interrogator idea and borrowing some code - https://github.com/pharmapsychotic/clip-interrogator
- Initial Gradio script - posted on 4chan by an Anonymous user. Thank you Anonymous user.
- (You)
From 6168d09218073c369fd08b7208f93805f624ec05 Mon Sep 17 00:00:00 2001
From: uservar <63248296+uservar@users.noreply.github.com>
Date: Fri, 16 Sep 2022 19:07:14 +0000
Subject: [PATCH 04/20] Prevent uploading previous output from javascript
As it is currently, txt2img and img2img send back the previous output args (txt2img_gallery, generation_info, html_info) whenever you generate a new image. This can lead to uploading a huge gallery of previously generated images, which leads to an unnecessary delay between submitting and beginning to generate.
---
script.js | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/script.js b/script.js
index 0852e421..8b10b53d 100644
--- a/script.js
+++ b/script.js
@@ -177,7 +177,7 @@ function submit(){
window.setTimeout(requestProgress, 500)
res = []
- for(var i=0;i
Date: Sat, 17 Sep 2022 08:03:47 +0300
Subject: [PATCH 05/20] add a comment and some checks for the functionality of
the last PR
---
script.js | 11 ++++++++++-
1 file changed, 10 insertions(+), 1 deletion(-)
diff --git a/script.js b/script.js
index 8b10b53d..4a70e51d 100644
--- a/script.js
+++ b/script.js
@@ -177,9 +177,18 @@ function submit(){
window.setTimeout(requestProgress, 500)
res = []
- for(var i=0;i
Date: Fri, 16 Sep 2022 07:28:57 +1000
Subject: [PATCH 06/20] image.save parameter fix
* image.save takes exif as a parameter
* piexif takes the bytes as a parameter, not the exif_bytes function itself
* reduce calls to create_exif_bytes
---
modules/images.py | 11 ++++++++---
1 file changed, 8 insertions(+), 3 deletions(-)
diff --git a/modules/images.py b/modules/images.py
index f37f5f08..8cd7fe37 100644
--- a/modules/images.py
+++ b/modules/images.py
@@ -345,7 +345,7 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i
if not os.path.exists(fullfn):
break
- def exif_bytes():
+ def create_exif_bytes():
return piexif.dump({
"Exif": {
piexif.ExifIFD.UserComment: piexif.helper.UserComment.dump(info or "", encoding="unicode")
@@ -353,7 +353,8 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i
})
if extension.lower() in ("jpg", "jpeg", "webp"):
- image.save(fullfn, quality=opts.jpeg_quality, exif_bytes=exif_bytes())
+ exif_bytes = create_exif_bytes()
+ image.save(fullfn, quality=opts.jpeg_quality, exif=exif_bytes)
else:
image.save(fullfn, quality=opts.jpeg_quality, pnginfo=pnginfo)
@@ -370,7 +371,11 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i
elif oversize:
image = image.resize((image.width * target_side_length // image.height, target_side_length), LANCZOS)
- image.save(fullfn_without_extension + ".jpg", quality=opts.jpeg_quality, exif_bytes=exif_bytes())
+ if exif_bytes in locals():
+ pass
+ else:
+ exif_bytes = create_exif_bytes()
+ image.save(fullfn_without_extension + ".jpg", quality=opts.jpeg_quality, exif=exif_bytes)
if opts.save_txt and info is not None:
with open(f"{fullfn_without_extension}.txt", "w", encoding="utf8") as file:
From 3c665b8dd6da07c60af7783f0e0dd1dec714a9b4 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sat, 17 Sep 2022 08:32:15 +0300
Subject: [PATCH 07/20] the last PR broke saving EXiF completely for me. I
don't know if it was broken already or some condition changed, but it seems
like the person who originally added EXIF said, saving it with PIL may not
work. I switched to using piexif to add data after the file written.
---
modules/images.py | 17 +++++++----------
1 file changed, 7 insertions(+), 10 deletions(-)
diff --git a/modules/images.py b/modules/images.py
index 8cd7fe37..7e1e506c 100644
--- a/modules/images.py
+++ b/modules/images.py
@@ -346,6 +346,7 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i
break
def create_exif_bytes():
+ def exif_bytes():
return piexif.dump({
"Exif": {
piexif.ExifIFD.UserComment: piexif.helper.UserComment.dump(info or "", encoding="unicode")
@@ -353,14 +354,12 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i
})
if extension.lower() in ("jpg", "jpeg", "webp"):
- exif_bytes = create_exif_bytes()
- image.save(fullfn, quality=opts.jpeg_quality, exif=exif_bytes)
+ image.save(fullfn, quality=opts.jpeg_quality)
+ if opts.enable_pnginfo and info is not None:
+ piexif.insert(exif_bytes(), fullfn)
else:
image.save(fullfn, quality=opts.jpeg_quality, pnginfo=pnginfo)
- if extension.lower() == "webp":
- piexif.insert(exif_bytes, fullfn)
-
target_side_length = 4000
oversize = image.width > target_side_length or image.height > target_side_length
if opts.export_for_4chan and (oversize or os.stat(fullfn).st_size > 4 * 1024 * 1024):
@@ -371,11 +370,9 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i
elif oversize:
image = image.resize((image.width * target_side_length // image.height, target_side_length), LANCZOS)
- if exif_bytes in locals():
- pass
- else:
- exif_bytes = create_exif_bytes()
- image.save(fullfn_without_extension + ".jpg", quality=opts.jpeg_quality, exif=exif_bytes)
+ image.save(fullfn_without_extension + ".jpg", quality=opts.jpeg_quality)
+ if opts.enable_pnginfo and info is not None:
+ piexif.insert(exif_bytes(), fullfn)
if opts.save_txt and info is not None:
with open(f"{fullfn_without_extension}.txt", "w", encoding="utf8") as file:
From 1fc1c537c7303be88e0da93c3a632c48acb101e9 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sat, 17 Sep 2022 09:01:10 +0300
Subject: [PATCH 08/20] fix
---
modules/images.py | 1 -
1 file changed, 1 deletion(-)
diff --git a/modules/images.py b/modules/images.py
index 7e1e506c..b62c48f8 100644
--- a/modules/images.py
+++ b/modules/images.py
@@ -345,7 +345,6 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i
if not os.path.exists(fullfn):
break
- def create_exif_bytes():
def exif_bytes():
return piexif.dump({
"Exif": {
From ed6787ca2fe950f633a925ccb0467eafd4ec0f43 Mon Sep 17 00:00:00 2001
From: EyeDeck
Date: Sat, 17 Sep 2022 00:49:31 -0400
Subject: [PATCH 09/20] Add VRAM monitoring
---
modules/memmon.py | 77 +++++++++++++++++++++++++++++++++++++++++++++++
modules/shared.py | 5 +++
modules/ui.py | 14 ++++++++-
style.css | 18 ++++++++++-
4 files changed, 112 insertions(+), 2 deletions(-)
create mode 100644 modules/memmon.py
diff --git a/modules/memmon.py b/modules/memmon.py
new file mode 100644
index 00000000..f2cac841
--- /dev/null
+++ b/modules/memmon.py
@@ -0,0 +1,77 @@
+import threading
+import time
+from collections import defaultdict
+
+import torch
+
+
+class MemUsageMonitor(threading.Thread):
+ run_flag = None
+ device = None
+ disabled = False
+ opts = None
+ data = None
+
+ def __init__(self, name, device, opts):
+ threading.Thread.__init__(self)
+ self.name = name
+ self.device = device
+ self.opts = opts
+
+ self.daemon = True
+ self.run_flag = threading.Event()
+ self.data = defaultdict(int)
+
+ def run(self):
+ if self.disabled:
+ return
+
+ while True:
+ self.run_flag.wait()
+
+ torch.cuda.reset_peak_memory_stats()
+ self.data.clear()
+
+ if self.opts.memmon_poll_rate <= 0:
+ self.run_flag.clear()
+ continue
+
+ self.data["min_free"] = torch.cuda.mem_get_info()[0]
+
+ while self.run_flag.is_set():
+ free, total = torch.cuda.mem_get_info() # calling with self.device errors, torch bug?
+ self.data["min_free"] = min(self.data["min_free"], free)
+
+ time.sleep(1 / self.opts.memmon_poll_rate)
+
+ def dump_debug(self):
+ print(self, 'recorded data:')
+ for k, v in self.read().items():
+ print(k, -(v // -(1024 ** 2)))
+
+ print(self, 'raw torch memory stats:')
+ tm = torch.cuda.memory_stats(self.device)
+ for k, v in tm.items():
+ if 'bytes' not in k:
+ continue
+ print('\t' if 'peak' in k else '', k, -(v // -(1024 ** 2)))
+
+ print(torch.cuda.memory_summary())
+
+ def monitor(self):
+ self.run_flag.set()
+
+ def read(self):
+ free, total = torch.cuda.mem_get_info()
+ self.data["total"] = total
+
+ torch_stats = torch.cuda.memory_stats(self.device)
+ self.data["active_peak"] = torch_stats["active_bytes.all.peak"]
+ self.data["reserved_peak"] = torch_stats["reserved_bytes.all.peak"]
+ self.data["system_peak"] = total - self.data["min_free"]
+
+ return self.data
+
+ def stop(self):
+ self.run_flag.clear()
+ return self.read()
diff --git a/modules/shared.py b/modules/shared.py
index da56b6ae..4f877036 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -12,6 +12,7 @@ from modules.paths import script_path, sd_path
from modules.devices import get_optimal_device
import modules.styles
import modules.interrogate
+import modules.memmon
sd_model_file = os.path.join(script_path, 'model.ckpt')
if not os.path.exists(sd_model_file):
@@ -138,6 +139,7 @@ class Options:
"show_progressbar": OptionInfo(True, "Show progressbar"),
"show_progress_every_n_steps": OptionInfo(0, "Show show image creation progress every N sampling steps. Set 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 32, "step": 1}),
"multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job. Broken in PyCharm console."),
+ "memmon_poll_rate": OptionInfo(8, "VRAM usage polls per second during generation. Set to 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 40, "step":1}),
"face_restoration_model": OptionInfo(None, "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in face_restorers]}),
"code_former_weight": OptionInfo(0.5, "CodeFormer weight parameter; 0 = maximum effect; 1 = minimum effect", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}),
"save_images_before_face_restoration": OptionInfo(False, "Save a copy of image before doing face restoration."),
@@ -217,3 +219,6 @@ class TotalTQDM:
total_tqdm = TotalTQDM()
+
+mem_mon = modules.memmon.MemUsageMonitor("MemMon", device, opts)
+mem_mon.start()
diff --git a/modules/ui.py b/modules/ui.py
index 738ac945..01b2ba85 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -119,6 +119,7 @@ def save_files(js_data, images, index):
def wrap_gradio_call(func):
def f(*args, **kwargs):
+ shared.mem_mon.monitor()
t = time.perf_counter()
try:
@@ -135,8 +136,19 @@ def wrap_gradio_call(func):
elapsed = time.perf_counter() - t
+ mem_stats = {k:-(v//-(1024*1024)) for k,v in shared.mem_mon.stop().items()}
+ active_peak = mem_stats['active_peak']
+ reserved_peak = mem_stats['reserved_peak']
+ sys_peak = '?' if opts.memmon_poll_rate <= 0 else mem_stats['system_peak']
+ sys_total = mem_stats['total']
+ sys_pct = '?' if opts.memmon_poll_rate <= 0 else round(sys_peak/sys_total * 100, 2)
+ vram_tooltip = "Torch active: Peak amount of VRAM used by Torch during generation, excluding cached data.
" \
+ "Torch reserved: Peak amount of VRAM allocated by Torch, including all active and cached data.
" \
+ "Sys VRAM: Peak amount of VRAM allocation across all applications / total GPU VRAM (peak utilization%)."
+
# last item is always HTML
- res[-1] = res[-1] + f"Time taken: {elapsed:.2f}s
"
+ res[-1] += f""
shared.state.interrupted = False
diff --git a/style.css b/style.css
index d41c098c..67ce8550 100644
--- a/style.css
+++ b/style.css
@@ -1,5 +1,21 @@
.output-html p {margin: 0 0.5em;}
-.performance { font-size: 0.85em; color: #444; }
+
+.performance {
+ font-size: 0.85em;
+ color: #444;
+ display: flex;
+ justify-content: space-between;
+ white-space: nowrap;
+}
+
+.performance .time {
+ margin-right: 0;
+}
+
+.performance .vram {
+ margin-left: 0;
+ text-align: right;
+}
#generate{
min-height: 4.5em;
From b8be33dad13d4937c6ef8fbb49715d843c3dd586 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sat, 17 Sep 2022 09:23:31 +0300
Subject: [PATCH 10/20] hide VRAM text if polling is disabled
---
modules/ui.py | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/modules/ui.py b/modules/ui.py
index 01b2ba85..437bce66 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -136,7 +136,7 @@ def wrap_gradio_call(func):
elapsed = time.perf_counter() - t
- mem_stats = {k:-(v//-(1024*1024)) for k,v in shared.mem_mon.stop().items()}
+ mem_stats = {k: -(v//-(1024*1024)) for k,v in shared.mem_mon.stop().items()}
active_peak = mem_stats['active_peak']
reserved_peak = mem_stats['reserved_peak']
sys_peak = '?' if opts.memmon_poll_rate <= 0 else mem_stats['system_peak']
@@ -146,9 +146,10 @@ def wrap_gradio_call(func):
"Torch reserved: Peak amount of VRAM allocated by Torch, including all active and cached data.
" \
"Sys VRAM: Peak amount of VRAM allocation across all applications / total GPU VRAM (peak utilization%)."
+ vram_html = '' if opts.memmon_poll_rate == 0 else f"Torch active/reserved: {active_peak}/{reserved_peak} MiB, Sys VRAM: {sys_peak}/{sys_total} MiB ({sys_pct}%)
"
+
# last item is always HTML
- res[-1] += f""
+ res[-1] += f""
shared.state.interrupted = False
From 247f58a5e740a7bd3980815961425b778d77ec28 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sat, 17 Sep 2022 12:05:04 +0300
Subject: [PATCH 11/20] add support for switching model checkpoints at runtime
---
modules/images.py | 2 +-
modules/processing.py | 2 +-
modules/sd_models.py | 148 ++++++++++++++++++++++++++++++++++++++++++
modules/shared.py | 19 ++++--
modules/ui.py | 5 ++
webui.py | 67 ++++---------------
6 files changed, 182 insertions(+), 61 deletions(-)
create mode 100644 modules/sd_models.py
diff --git a/modules/images.py b/modules/images.py
index b62c48f8..a3064333 100644
--- a/modules/images.py
+++ b/modules/images.py
@@ -274,7 +274,7 @@ def apply_filename_pattern(x, p, seed, prompt):
x = x.replace("[height]", str(p.height))
x = x.replace("[sampler]", sd_samplers.samplers[p.sampler_index].name)
- x = x.replace("[model_hash]", shared.sd_model_hash)
+ x = x.replace("[model_hash]", shared.sd_model.sd_model_hash)
x = x.replace("[date]", datetime.date.today().isoformat())
if cmd_opts.hide_ui_dir_config:
diff --git a/modules/processing.py b/modules/processing.py
index 81c83f06..3a4ff224 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -227,7 +227,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
"Seed": all_seeds[index],
"Face restoration": (opts.face_restoration_model if p.restore_faces else None),
"Size": f"{p.width}x{p.height}",
- "Model hash": (None if not opts.add_model_hash_to_info or not shared.sd_model_hash else shared.sd_model_hash),
+ "Model hash": (None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash),
"Batch size": (None if p.batch_size < 2 else p.batch_size),
"Batch pos": (None if p.batch_size < 2 else position_in_batch),
"Variation seed": (None if p.subseed_strength == 0 else all_subseeds[index]),
diff --git a/modules/sd_models.py b/modules/sd_models.py
new file mode 100644
index 00000000..036af0e4
--- /dev/null
+++ b/modules/sd_models.py
@@ -0,0 +1,148 @@
+import glob
+import os.path
+import sys
+from collections import namedtuple
+import torch
+from omegaconf import OmegaConf
+
+
+from ldm.util import instantiate_from_config
+
+from modules import shared
+
+CheckpointInfo = namedtuple("CheckpointInfo", ['filename', 'title', 'hash'])
+checkpoints_list = {}
+
+try:
+ # this silences the annoying "Some weights of the model checkpoint were not used when initializing..." message at start.
+
+ from transformers import logging
+
+ logging.set_verbosity_error()
+except Exception:
+ pass
+
+
+def list_models():
+ checkpoints_list.clear()
+
+ model_dir = os.path.abspath(shared.cmd_opts.ckpt_dir)
+
+ def modeltitle(path, h):
+ abspath = os.path.abspath(path)
+
+ if abspath.startswith(model_dir):
+ name = abspath.replace(model_dir, '')
+ else:
+ name = os.path.basename(path)
+
+ if name.startswith("\\") or name.startswith("/"):
+ name = name[1:]
+
+ return f'{name} [{h}]'
+
+ cmd_ckpt = shared.cmd_opts.ckpt
+ if os.path.exists(cmd_ckpt):
+ h = model_hash(cmd_ckpt)
+ title = modeltitle(cmd_ckpt, h)
+ checkpoints_list[title] = CheckpointInfo(cmd_ckpt, title, h)
+ elif cmd_ckpt is not None and cmd_ckpt != shared.default_sd_model_file:
+ print(f"Checkpoint in --ckpt argument not found: {cmd_ckpt}", file=sys.stderr)
+
+ if os.path.exists(model_dir):
+ for filename in glob.glob(model_dir + '/**/*.ckpt', recursive=True):
+ h = model_hash(filename)
+ title = modeltitle(filename, h)
+ checkpoints_list[title] = CheckpointInfo(filename, title, h)
+
+
+def model_hash(filename):
+ try:
+ with open(filename, "rb") as file:
+ import hashlib
+ m = hashlib.sha256()
+
+ file.seek(0x100000)
+ m.update(file.read(0x10000))
+ return m.hexdigest()[0:8]
+ except FileNotFoundError:
+ return 'NOFILE'
+
+
+def select_checkpoint():
+ model_checkpoint = shared.opts.sd_model_checkpoint
+ checkpoint_info = checkpoints_list.get(model_checkpoint, None)
+ if checkpoint_info is not None:
+ return checkpoint_info
+
+ if len(checkpoints_list) == 0:
+ print(f"Checkpoint {model_checkpoint} not found and no other checkpoints found", file=sys.stderr)
+ return None
+
+ checkpoint_info = next(iter(checkpoints_list.values()))
+ if model_checkpoint is not None:
+ print(f"Checkpoint {model_checkpoint} not found; loading fallback {checkpoint_info.title}", file=sys.stderr)
+
+ return checkpoint_info
+
+
+def load_model_weights(model, checkpoint_file, sd_model_hash):
+ print(f"Loading weights [{sd_model_hash}] from {checkpoint_file}")
+
+ pl_sd = torch.load(checkpoint_file, map_location="cpu")
+ if "global_step" in pl_sd:
+ print(f"Global Step: {pl_sd['global_step']}")
+ sd = pl_sd["state_dict"]
+
+ model.load_state_dict(sd, strict=False)
+
+ if shared.cmd_opts.opt_channelslast:
+ model.to(memory_format=torch.channels_last)
+
+ if not shared.cmd_opts.no_half:
+ model.half()
+
+ model.sd_model_hash = sd_model_hash
+ model.sd_model_checkpint = checkpoint_file
+
+
+def load_model():
+ from modules import lowvram, sd_hijack
+ checkpoint_info = select_checkpoint()
+
+ sd_config = OmegaConf.load(shared.cmd_opts.config)
+ sd_model = instantiate_from_config(sd_config.model)
+ load_model_weights(sd_model, checkpoint_info.filename, checkpoint_info.hash)
+
+ if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
+ lowvram.setup_for_low_vram(sd_model, shared.cmd_opts.medvram)
+ else:
+ sd_model.to(shared.device)
+
+ sd_hijack.model_hijack.hijack(sd_model)
+
+ sd_model.eval()
+
+ print(f"Model loaded.")
+ return sd_model
+
+
+def reload_model_weights(sd_model):
+ from modules import lowvram, devices
+ checkpoint_info = select_checkpoint()
+
+ if sd_model.sd_model_checkpint == checkpoint_info.filename:
+ return
+
+ if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
+ lowvram.send_everything_to_cpu()
+ else:
+ sd_model.to(devices.cpu)
+
+ load_model_weights(sd_model, checkpoint_info.filename, checkpoint_info.hash)
+
+ if not shared.cmd_opts.lowvram and not shared.cmd_opts.medvram:
+ sd_model.to(devices.device)
+
+ print(f"Weights loaded.")
+ return sd_model
diff --git a/modules/shared.py b/modules/shared.py
index 4f877036..3c3aa9b6 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -13,14 +13,15 @@ from modules.devices import get_optimal_device
import modules.styles
import modules.interrogate
import modules.memmon
+import modules.sd_models
sd_model_file = os.path.join(script_path, 'model.ckpt')
-if not os.path.exists(sd_model_file):
- sd_model_file = "models/ldm/stable-diffusion-v1/model.ckpt"
+default_sd_model_file = sd_model_file
parser = argparse.ArgumentParser()
parser.add_argument("--config", type=str, default=os.path.join(sd_path, "configs/stable-diffusion/v1-inference.yaml"), help="path to config which constructs model",)
-parser.add_argument("--ckpt", type=str, default=os.path.join(sd_path, sd_model_file), help="path to checkpoint of model",)
+parser.add_argument("--ckpt", type=str, default=sd_model_file, help="path to checkpoint of stable diffusion model; this checkpoint will be added to the list of checkpoints and loaded by default if you don't have a checkpoint selected in settings",)
+parser.add_argument("--ckpt-dir", type=str, default=os.path.join(script_path, 'models'), help="path to directory with stable diffusion checkpoints",)
parser.add_argument("--gfpgan-dir", type=str, help="GFPGAN directory", default=('./src/gfpgan' if os.path.exists('./src/gfpgan') else './GFPGAN'))
parser.add_argument("--gfpgan-model", type=str, help="GFPGAN model file name", default='GFPGANv1.3.pth')
parser.add_argument("--no-half", action='store_true', help="do not switch the model to 16-bit floats")
@@ -88,13 +89,17 @@ interrogator = modules.interrogate.InterrogateModels("interrogate")
face_restorers = []
+modules.sd_models.list_models()
+
+
class Options:
class OptionInfo:
- def __init__(self, default=None, label="", component=None, component_args=None):
+ def __init__(self, default=None, label="", component=None, component_args=None, onchange=None):
self.default = default
self.label = label
self.component = component
self.component_args = component_args
+ self.onchange = onchange
data = None
hide_dirs = {"visible": False} if cmd_opts.hide_ui_dir_config else None
@@ -150,6 +155,7 @@ class Options:
"interrogate_clip_min_length": OptionInfo(24, "Interrogate: minimum description length (excluding artists, etc..)", gr.Slider, {"minimum": 1, "maximum": 128, "step": 1}),
"interrogate_clip_max_length": OptionInfo(48, "Interrogate: maximum description length", gr.Slider, {"minimum": 1, "maximum": 256, "step": 1}),
"interrogate_clip_dict_limit": OptionInfo(1500, "Interrogate: maximum number of lines in text file (0 = No limit)"),
+ "sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Radio, lambda: {"choices": [x.title for x in modules.sd_models.checkpoints_list.values()]}),
}
def __init__(self):
@@ -180,6 +186,10 @@ class Options:
with open(filename, "r", encoding="utf8") as file:
self.data = json.load(file)
+ def onchange(self, key, func):
+ item = self.data_labels.get(key)
+ item.onchange = func
+
opts = Options()
if os.path.exists(config_filename):
@@ -188,7 +198,6 @@ if os.path.exists(config_filename):
sd_upscalers = []
sd_model = None
-sd_model_hash = ''
progress_print_out = sys.stdout
diff --git a/modules/ui.py b/modules/ui.py
index 437bce66..36e3c664 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -758,7 +758,12 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
if comp_args and isinstance(comp_args, dict) and comp_args.get('visible') is False:
continue
+ oldval = opts.data.get(key, None)
opts.data[key] = value
+
+ if oldval != value and opts.data_labels[key].onchange is not None:
+ opts.data_labels[key].onchange()
+
up.append(comp.update(value=value))
opts.save(shared.config_filename)
diff --git a/webui.py b/webui.py
index add72123..ff8997db 100644
--- a/webui.py
+++ b/webui.py
@@ -3,13 +3,8 @@ import threading
from modules.paths import script_path
-import torch
-from omegaconf import OmegaConf
-
import signal
-from ldm.util import instantiate_from_config
-
from modules.shared import opts, cmd_opts, state
import modules.shared as shared
import modules.ui
@@ -24,6 +19,7 @@ import modules.extras
import modules.lowvram
import modules.txt2img
import modules.img2img
+import modules.sd_models
modules.codeformer_model.setup_codeformer()
@@ -33,31 +29,19 @@ shared.face_restorers.append(modules.face_restoration.FaceRestoration())
esrgan.load_models(cmd_opts.esrgan_models_path)
realesrgan.setup_realesrgan()
-
-def load_model_from_config(config, ckpt, verbose=False):
- print(f"Loading model [{shared.sd_model_hash}] from {ckpt}")
- pl_sd = torch.load(ckpt, map_location="cpu")
- if "global_step" in pl_sd:
- print(f"Global Step: {pl_sd['global_step']}")
- sd = pl_sd["state_dict"]
-
- model = instantiate_from_config(config.model)
- m, u = model.load_state_dict(sd, strict=False)
- if len(m) > 0 and verbose:
- print("missing keys:")
- print(m)
- if len(u) > 0 and verbose:
- print("unexpected keys:")
- print(u)
- if cmd_opts.opt_channelslast:
- model = model.to(memory_format=torch.channels_last)
- model.eval()
- return model
-
-
queue_lock = threading.Lock()
+def wrap_queued_call(func):
+ def f(*args, **kwargs):
+ with queue_lock:
+ res = func(*args, **kwargs)
+
+ return res
+
+ return f
+
+
def wrap_gradio_gpu_call(func):
def f(*args, **kwargs):
shared.state.sampling_step = 0
@@ -80,33 +64,8 @@ def wrap_gradio_gpu_call(func):
modules.scripts.load_scripts(os.path.join(script_path, "scripts"))
-try:
- # this silences the annoying "Some weights of the model checkpoint were not used when initializing..." message at start.
-
- from transformers import logging
-
- logging.set_verbosity_error()
-except Exception:
- pass
-
-with open(cmd_opts.ckpt, "rb") as file:
- import hashlib
- m = hashlib.sha256()
-
- file.seek(0x100000)
- m.update(file.read(0x10000))
- shared.sd_model_hash = m.hexdigest()[0:8]
-
-sd_config = OmegaConf.load(cmd_opts.config)
-shared.sd_model = load_model_from_config(sd_config, cmd_opts.ckpt)
-shared.sd_model = (shared.sd_model if cmd_opts.no_half else shared.sd_model.half())
-
-if cmd_opts.lowvram or cmd_opts.medvram:
- modules.lowvram.setup_for_low_vram(shared.sd_model, cmd_opts.medvram)
-else:
- shared.sd_model = shared.sd_model.to(shared.device)
-
-modules.sd_hijack.model_hijack.hijack(shared.sd_model)
+shared.sd_model = modules.sd_models.load_model()
+shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: modules.sd_models.reload_model_weights(shared.sd_model)))
def webui():
From f8f17e3b9e61f238dd32b6d1bab5db040c531559 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sat, 17 Sep 2022 12:12:55 +0300
Subject: [PATCH 12/20] updated readme to reflect new model location
---
README.md | 2 +-
models/Put Stable Diffusion checkpoints here.txt | 0
2 files changed, 1 insertion(+), 1 deletion(-)
create mode 100644 models/Put Stable Diffusion checkpoints here.txt
diff --git a/README.md b/README.md
index 6c232e38..d97ebc3f 100644
--- a/README.md
+++ b/README.md
@@ -51,7 +51,7 @@ Alternatively, use [Google Colab](https://colab.research.google.com/drive/1Iy-xW
1. Install [Python 3.10.6](https://www.python.org/downloads/windows/), checking "Add Python to PATH"
2. Install [git](https://git-scm.com/download/win).
3. Download the stable-diffusion-webui repository, for example by running `git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui.git`.
-4. Place `model.ckpt` in the base directory, alongside `webui.py`.
+4. Place `model.ckpt` in the `models` directory.
5. _*(Optional)*_ Place `GFPGANv1.3.pth` in the base directory, alongside `webui.py`.
6. Run `webui-user.bat` from Windows Explorer as normal, non-administrate, user.
diff --git a/models/Put Stable Diffusion checkpoints here.txt b/models/Put Stable Diffusion checkpoints here.txt
new file mode 100644
index 00000000..e69de29b
From 99585b3514e2d7e987651d5c6a0806f933af012b Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sat, 17 Sep 2022 12:38:15 +0300
Subject: [PATCH 13/20] moved progressbar to top by request
---
modules/ui.py | 7 ++++---
style.css | 6 ++++++
2 files changed, 10 insertions(+), 3 deletions(-)
diff --git a/modules/ui.py b/modules/ui.py
index 36e3c664..960f1e36 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -337,6 +337,8 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
custom_inputs = modules.scripts.scripts_txt2img.setup_ui(is_img2img=False)
with gr.Column(variant='panel'):
+ progressbar = gr.HTML(elem_id="progressbar")
+
with gr.Group():
txt2img_preview = gr.Image(elem_id='txt2img_preview', visible=False)
txt2img_gallery = gr.Gallery(label='Output', elem_id='txt2img_gallery').style(grid=4)
@@ -349,8 +351,6 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
send_to_extras = gr.Button('Send to extras')
interrupt = gr.Button('Interrupt')
- progressbar = gr.HTML(elem_id="progressbar")
-
with gr.Group():
html_info = gr.HTML()
generation_info = gr.Textbox(visible=False)
@@ -474,6 +474,8 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
custom_inputs = modules.scripts.scripts_img2img.setup_ui(is_img2img=True)
with gr.Column(variant='panel'):
+ progressbar = gr.HTML(elem_id="progressbar")
+
with gr.Group():
img2img_preview = gr.Image(elem_id='img2img_preview', visible=False)
img2img_gallery = gr.Gallery(label='Output', elem_id='img2img_gallery').style(grid=4)
@@ -487,7 +489,6 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
interrupt = gr.Button('Interrupt')
img2img_save_style = gr.Button('Save prompt as style')
- progressbar = gr.HTML(elem_id="progressbar")
with gr.Group():
html_info = gr.HTML()
diff --git a/style.css b/style.css
index 67ce8550..752d2cf4 100644
--- a/style.css
+++ b/style.css
@@ -167,6 +167,12 @@ input[type="range"]{
#txt2img_negative_prompt, #img2img_negative_prompt{
}
+#progressbar{
+ position: absolute;
+ z-index: 1000;
+ right: 0;
+}
+
.progressDiv{
width: 100%;
height: 30px;
From 304222ef94d1c3c60fab466a96c448868f391bce Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sat, 17 Sep 2022 13:49:36 +0300
Subject: [PATCH 14/20] X/Y plot support for switching checkpoints.
---
modules/sd_models.py | 4 ++--
script.js | 2 ++
scripts/xy_grid.py | 15 +++++++++++++++
3 files changed, 19 insertions(+), 2 deletions(-)
diff --git a/modules/sd_models.py b/modules/sd_models.py
index 036af0e4..4bd70fc5 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -127,9 +127,9 @@ def load_model():
return sd_model
-def reload_model_weights(sd_model):
+def reload_model_weights(sd_model, info=None):
from modules import lowvram, devices
- checkpoint_info = select_checkpoint()
+ checkpoint_info = info or select_checkpoint()
if sd_model.sd_model_checkpint == checkpoint_info.filename:
return
diff --git a/script.js b/script.js
index 4a70e51d..e63e0695 100644
--- a/script.js
+++ b/script.js
@@ -66,6 +66,8 @@ titles = {
"Style 2": "Style to apply; styles have components for both positive and negative prompts and apply to both",
"Apply style": "Insert selected styles into prompt fields",
"Create style": "Save current prompts as a style. If you add the token {prompt} to the text, the style use that as placeholder for your prompt when you use the style in the future.",
+
+ "Checkpoint name": "Loads weights from checkpoint before making images. You can either use hash or a part of filename (as seen in settings) for checkpoint name. Recommended to use with Y axis for less switching.",
}
function gradioApp(){
diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py
index eccfda87..680dd702 100644
--- a/scripts/xy_grid.py
+++ b/scripts/xy_grid.py
@@ -10,7 +10,9 @@ import gradio as gr
from modules import images
from modules.processing import process_images, Processed
from modules.shared import opts, cmd_opts, state
+import modules.shared as shared
import modules.sd_samplers
+import modules.sd_models
import re
@@ -41,6 +43,15 @@ def apply_sampler(p, x, xs):
p.sampler_index = sampler_index
+def apply_checkpoint(p, x, xs):
+ applicable = [info for info in modules.sd_models.checkpoints_list.values() if x in info.title]
+ assert len(applicable) > 0, f'Checkpoint {x} for found'
+
+ info = applicable[0]
+
+ modules.sd_models.reload_model_weights(shared.sd_model, info)
+
+
def format_value_add_label(p, opt, x):
if type(x) == float:
x = round(x, 8)
@@ -74,6 +85,7 @@ axis_options = [
AxisOption("CFG Scale", float, apply_field("cfg_scale"), format_value_add_label),
AxisOption("Prompt S/R", str, apply_prompt, format_value),
AxisOption("Sampler", str, apply_sampler, format_value),
+ AxisOption("Checkpoint name", str, apply_checkpoint, format_value),
AxisOptionImg2Img("Denoising", float, apply_field("denoising_strength"), format_value_add_label), # as it is now all AxisOptionImg2Img items must go after AxisOption ones
]
@@ -215,4 +227,7 @@ class Script(scripts.Script):
if opts.grid_save:
images.save_image(processed.images[0], p.outpath_grids, "xy_grid", prompt=p.prompt, seed=processed.seed, grid=True, p=p)
+ # restore checkpoint in case it was changed by axes
+ modules.sd_models.reload_model_weights(shared.sd_model)
+
return processed
From 140f89315380dbcc541f6e18e3d355a06ea3e2f0 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sat, 17 Sep 2022 14:55:40 +0300
Subject: [PATCH 15/20] process all values for x/y plot right away to error out
if any are bad before any processing begins
---
scripts/xy_grid.py | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py
index 680dd702..6a157722 100644
--- a/scripts/xy_grid.py
+++ b/scripts/xy_grid.py
@@ -90,11 +90,11 @@ axis_options = [
]
-def draw_xy_grid(p, xs, ys, x_label, y_label, cell, draw_legend):
+def draw_xy_grid(p, xs, ys, x_labels, y_labels, cell, draw_legend):
res = []
- ver_texts = [[images.GridAnnotation(y_label(y))] for y in ys]
- hor_texts = [[images.GridAnnotation(x_label(x))] for x in xs]
+ ver_texts = [[images.GridAnnotation(y)] for y in y_labels]
+ hor_texts = [[images.GridAnnotation(x)] for x in x_labels]
first_pocessed = None
@@ -218,8 +218,8 @@ class Script(scripts.Script):
p,
xs=xs,
ys=ys,
- x_label=lambda x: x_opt.format_value(p, x_opt, x),
- y_label=lambda y: y_opt.format_value(p, y_opt, y),
+ x_labels=[x_opt.format_value(p, x_opt, x) for x in xs],
+ y_labels=[y_opt.format_value(p, y_opt, y) for y in ys],
cell=cell,
draw_legend=draw_legend
)
From ba295b32688629cf575d67f1750a7838b008858b Mon Sep 17 00:00:00 2001
From: Tony Beeman
Date: Sat, 17 Sep 2022 01:34:33 -0700
Subject: [PATCH 16/20] * Fix process_images where the number of images is not
a multiple of (batch_size * n_iter), which would cause us to throw an
exception. * Add a textbox option to Prompts from file (ease of use and it
makes it much easier to use on a mobile device) * Fix the fact that Prompts
from file was sometimes passing an empty batch.
---
modules/processing.py | 9 ++++++++-
scripts/prompts_from_file.py | 34 ++++++++++++++++++++++++----------
2 files changed, 32 insertions(+), 11 deletions(-)
diff --git a/modules/processing.py b/modules/processing.py
index 3a4ff224..6a99d383 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -188,7 +188,11 @@ def fix_seed(p):
def process_images(p: StableDiffusionProcessing) -> Processed:
"""this is the main loop that both txt2img and img2img use; it calls func_init once inside all the scopes and func_sample once per batch"""
- assert p.prompt is not None
+ if type(p.prompt) == list:
+ assert(len(p.prompt) > 0)
+ else:
+ assert p.prompt is not None
+
devices.torch_gc()
fix_seed(p)
@@ -265,6 +269,9 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
seeds = all_seeds[n * p.batch_size:(n + 1) * p.batch_size]
subseeds = all_subseeds[n * p.batch_size:(n + 1) * p.batch_size]
+ if (len(prompts) == 0):
+ break
+
#uc = p.sd_model.get_learned_conditioning(len(prompts) * [p.negative_prompt])
#c = p.sd_model.get_learned_conditioning(prompts)
uc = prompt_parser.get_learned_conditioning(len(prompts) * [p.negative_prompt], p.steps)
diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py
index d9b01c81..513d9a1c 100644
--- a/scripts/prompts_from_file.py
+++ b/scripts/prompts_from_file.py
@@ -13,28 +13,42 @@ from modules.shared import opts, cmd_opts, state
class Script(scripts.Script):
def title(self):
- return "Prompts from file"
+ return "Prompts from file or textbox"
def ui(self, is_img2img):
+ # This checkbox would look nicer as two tabs, but there are two problems:
+ # 1) There is a bug in Gradio 3.3 that prevents visibility from working on Tabs
+ # 2) Even with Gradio 3.3.1, returning a control (like Tabs) that can't be used as input
+ # causes a AttributeError: 'Tabs' object has no attribute 'preprocess' assert,
+ # due to the way Script assumes all controls returned can be used as inputs.
+ # Therefore, there's no good way to use grouping components right now,
+ # so we will use a checkbox! :)
+ checkbox_txt = gr.Checkbox(label="Show Textbox", value=False)
file = gr.File(label="File with inputs", type='bytes')
+ prompt_txt = gr.TextArea(label="Prompts")
+ checkbox_txt.change(fn=lambda x: [gr.File.update(visible = not x), gr.TextArea.update(visible = x)], inputs=[checkbox_txt], outputs=[file, prompt_txt])
+ return [checkbox_txt, file, prompt_txt]
- return [file]
-
- def run(self, p, data: bytes):
- lines = [x.strip() for x in data.decode('utf8', errors='ignore').split("\n")]
+ def run(self, p, checkbox_txt, data: bytes, prompt_txt: str):
+ if (checkbox_txt):
+ lines = [x.strip() for x in prompt_txt.splitlines()]
+ else:
+ lines = [x.strip() for x in data.decode('utf8', errors='ignore').split("\n")]
lines = [x for x in lines if len(x) > 0]
- batch_count = math.ceil(len(lines) / p.batch_size)
- print(f"Will process {len(lines) * p.n_iter} images in {batch_count * p.n_iter} batches.")
+ img_count = len(lines) * p.n_iter
+ batch_count = math.ceil(img_count / p.batch_size)
+ loop_count = math.ceil(batch_count / p.n_iter)
+ print(f"Will process {img_count} images in {batch_count} batches.")
p.do_not_save_grid = True
state.job_count = batch_count
images = []
- for batch_no in range(batch_count):
- state.job = f"{batch_no + 1} out of {batch_count * p.n_iter}"
- p.prompt = lines[batch_no*p.batch_size:(batch_no+1)*p.batch_size] * p.n_iter
+ for loop_no in range(loop_count):
+ state.job = f"{loop_no + 1} out of {loop_count}"
+ p.prompt = lines[loop_no*p.batch_size:(loop_no+1)*p.batch_size] * p.n_iter
proc = process_images(p)
images += proc.images
From 65be5312dc2b73e659299ea052d5484e6ae6c0ea Mon Sep 17 00:00:00 2001
From: DepFA <35278260+dfaker@users.noreply.github.com>
Date: Sat, 17 Sep 2022 01:00:45 +0100
Subject: [PATCH 17/20] Add modal css classes
---
style.css | 37 +++++++++++++++++++++++++++++++++++++
1 file changed, 37 insertions(+)
diff --git a/style.css b/style.css
index 752d2cf4..2de83a94 100644
--- a/style.css
+++ b/style.css
@@ -196,3 +196,40 @@ input[type="range"]{
border-radius: 8px;
}
+#lightboxModal{
+ display: none;
+ position: fixed;
+ z-index: 900;
+ padding-top: 100px;
+ left: 0;
+ top: 0;
+ width: 100%;
+ height: 100%;
+ overflow: auto;
+ background-color: black;
+}
+
+.modalClose {
+ color: white;
+ position: absolute;
+ top: 10px;
+ right: 25px;
+ font-size: 35px;
+ font-weight: bold;
+}
+
+.modalClose:hover,
+.modalClose:focus {
+ color: #999;
+ text-decoration: none;
+ cursor: pointer;
+}
+
+#modalImage {
+ display: block;
+ margin-left: auto;
+ margin-right: auto;
+ margin-top: auto;
+ width: auto;
+}
+
From 1a513370774ccb4cd9562f1b40048adc2ab7c896 Mon Sep 17 00:00:00 2001
From: DepFA <35278260+dfaker@users.noreply.github.com>
Date: Sat, 17 Sep 2022 01:03:03 +0100
Subject: [PATCH 18/20] Add modal creation and functions
---
script.js | 60 +++++++++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 60 insertions(+)
diff --git a/script.js b/script.js
index e63e0695..7c27da74 100644
--- a/script.js
+++ b/script.js
@@ -76,6 +76,35 @@ function gradioApp(){
global_progressbar = null
+function closeModal() {
+ gradioApp().getElementById("lightboxModal").style.display = "none";
+}
+
+function showModal(elem) {
+ gradioApp().getElementById("modalImage").src = elem.src
+ gradioApp().getElementById("lightboxModal").style.display = "block";
+}
+
+function showGalleryImage(){
+ setTimeout(function() {
+ fullImg_preview = gradioApp().querySelectorAll('img.w-full.object-contain')
+
+ if(fullImg_preview != null){
+ fullImg_preview.forEach(function function_name(e) {
+ if(e && e.parentElement.tagName == 'DIV'){
+ e.style.cursor='pointer'
+
+ elemfunc = function(elem){
+ elem.onclick = function(){showModal(elem)};
+ }
+ elemfunc(e)
+ }
+ });
+ }
+
+ }, 100);
+}
+
function addTitles(root){
root.querySelectorAll('span, button, select').forEach(function(span){
tooltip = titles[span.textContent];
@@ -117,8 +146,18 @@ function addTitles(root){
img2img_preview.style.width = img2img_gallery.clientWidth + "px"
img2img_preview.style.height = img2img_gallery.clientHeight + "px"
}
+
+ fullImg_preview = gradioApp().querySelectorAll('img.w-full')
+ if(fullImg_preview != null){
+ fullImg_preview.forEach(function function_name(e) {
+ if(e && e.parentElement.tagName == 'BUTTON'){
+ e.onclick = showGalleryImage;
+ }
+ });
+ }
+
window.setTimeout(requestProgress, 500)
});
mutationObserver.observe( progressbar, { childList:true, subtree:true })
@@ -131,6 +170,27 @@ document.addEventListener("DOMContentLoaded", function() {
addTitles(gradioApp());
});
mutationObserver.observe( gradioApp(), { childList:true, subtree:true })
+
+ const modalFragment = document.createDocumentFragment();
+ const modal = document.createElement('div')
+ modal.onclick = closeModal;
+
+ const modalClose = document.createElement('span')
+ modalClose.className = 'modalClose cursor';
+ modalClose.innerHTML = '×'
+ modalClose.onclick = closeModal;
+ modal.id = "lightboxModal";
+ modal.appendChild(modalClose)
+
+ const modalImage = document.createElement('img')
+ modalImage.id = 'modalImage';
+ modalImage.onclick = closeModal;
+ modal.appendChild(modalImage)
+
+ gradioApp().getRootNode().appendChild(modal)
+
+ document.body.appendChild(modalFragment);
+
});
function selected_gallery_index(){
From a66d857345c090674430c21fba1256c76d769635 Mon Sep 17 00:00:00 2001
From: DepFA <35278260+dfaker@users.noreply.github.com>
Date: Sat, 17 Sep 2022 01:13:28 +0100
Subject: [PATCH 19/20] make background semi-transparent not black;
---
style.css | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/style.css b/style.css
index 2de83a94..2bdd1e0e 100644
--- a/style.css
+++ b/style.css
@@ -206,7 +206,7 @@ input[type="range"]{
width: 100%;
height: 100%;
overflow: auto;
- background-color: black;
+ background-color: rgba(20, 20, 20, 0.95);
}
.modalClose {
From 1ef79f926e6314b3ef9308b12ff7ad482afd790a Mon Sep 17 00:00:00 2001
From: DepFA <35278260+dfaker@users.noreply.github.com>
Date: Sat, 17 Sep 2022 03:26:47 +0100
Subject: [PATCH 20/20] generalise to work on all non-masked images on all tabs
---
script.js | 23 ++++++++++++-----------
1 file changed, 12 insertions(+), 11 deletions(-)
diff --git a/script.js b/script.js
index 7c27da74..a016eb4e 100644
--- a/script.js
+++ b/script.js
@@ -105,6 +105,12 @@ function showGalleryImage(){
}, 100);
}
+function galleryImageHandler(e){
+ if(e && e.parentElement.tagName == 'BUTTON'){
+ e.onclick = showGalleryImage;
+ }
+}
+
function addTitles(root){
root.querySelectorAll('span, button, select').forEach(function(span){
tooltip = titles[span.textContent];
@@ -147,22 +153,17 @@ function addTitles(root){
img2img_preview.style.height = img2img_gallery.clientHeight + "px"
}
- fullImg_preview = gradioApp().querySelectorAll('img.w-full')
-
- if(fullImg_preview != null){
-
- fullImg_preview.forEach(function function_name(e) {
- if(e && e.parentElement.tagName == 'BUTTON'){
- e.onclick = showGalleryImage;
- }
- });
- }
-
window.setTimeout(requestProgress, 500)
});
mutationObserver.observe( progressbar, { childList:true, subtree:true })
}
+
+ fullImg_preview = gradioApp().querySelectorAll('img.w-full')
+ if(fullImg_preview != null){
+ fullImg_preview.forEach(galleryImageHandler);
+ }
+
}
document.addEventListener("DOMContentLoaded", function() {