an option to do exactly the amount of specified steps in img2img

This commit is contained in:
AUTOMATIC 2022-09-16 13:38:02 +03:00
parent be0f82df12
commit e49b1c5d73
2 changed files with 20 additions and 7 deletions

View file

@ -38,6 +38,17 @@ samplers = [
samplers_for_img2img = [x for x in samplers if x.name != 'PLMS'] samplers_for_img2img = [x for x in samplers if x.name != 'PLMS']
def setup_img2img_steps(p):
if opts.img2img_fix_steps:
steps = int(p.steps / min(p.denoising_strength, 0.999))
t_enc = p.steps - 1
else:
steps = p.steps
t_enc = int(min(p.denoising_strength, 0.999) * steps)
return steps, t_enc
def sample_to_image(samples): def sample_to_image(samples):
x_sample = shared.sd_model.decode_first_stage(samples[0:1].type(shared.sd_model.dtype))[0] x_sample = shared.sd_model.decode_first_stage(samples[0:1].type(shared.sd_model.dtype))[0]
x_sample = torch.clamp((x_sample + 1.0) / 2.0, min=0.0, max=1.0) x_sample = torch.clamp((x_sample + 1.0) / 2.0, min=0.0, max=1.0)
@ -105,13 +116,13 @@ class VanillaStableDiffusionSampler:
return res return res
def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning): def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning):
t_enc = int(min(p.denoising_strength, 0.999) * p.steps) steps, t_enc = setup_img2img_steps(p)
# existing code fails with cetain step counts, like 9 # existing code fails with cetain step counts, like 9
try: try:
self.sampler.make_schedule(ddim_num_steps=p.steps, verbose=False) self.sampler.make_schedule(ddim_num_steps=steps, verbose=False)
except Exception: except Exception:
self.sampler.make_schedule(ddim_num_steps=p.steps+1, verbose=False) self.sampler.make_schedule(ddim_num_steps=steps+1, verbose=False)
x1 = self.sampler.stochastic_encode(x, torch.tensor([t_enc] * int(x.shape[0])).to(shared.device), noise=noise) x1 = self.sampler.stochastic_encode(x, torch.tensor([t_enc] * int(x.shape[0])).to(shared.device), noise=noise)
@ -230,14 +241,15 @@ class KDiffusionSampler:
return res return res
def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning): def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning):
t_enc = int(min(p.denoising_strength, 0.999) * p.steps) steps, t_enc = setup_img2img_steps(p)
sigmas = self.model_wrap.get_sigmas(p.steps)
noise = noise * sigmas[p.steps - t_enc - 1] sigmas = self.model_wrap.get_sigmas(steps)
noise = noise * sigmas[steps - t_enc - 1]
xi = x + noise xi = x + noise
sigma_sched = sigmas[p.steps - t_enc - 1:] sigma_sched = sigmas[steps - t_enc - 1:]
self.model_wrap_cfg.mask = p.mask self.model_wrap_cfg.mask = p.mask
self.model_wrap_cfg.nmask = p.nmask self.model_wrap_cfg.nmask = p.nmask

View file

@ -125,6 +125,7 @@ class Options:
"enable_pnginfo": OptionInfo(True, "Save text information about generation parameters as chunks to png files"), "enable_pnginfo": OptionInfo(True, "Save text information about generation parameters as chunks to png files"),
"add_model_hash_to_info": OptionInfo(False, "Add model hash to generation information"), "add_model_hash_to_info": OptionInfo(False, "Add model hash to generation information"),
"img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."), "img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."),
"img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies (normaly you'd do less with less denoising)."),
"enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply."), "enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply."),
"font": OptionInfo("", "Font for image grids that have text"), "font": OptionInfo("", "Font for image grids that have text"),
"enable_emphasis": OptionInfo(True, "Use (text) to make model pay more attention to text and [text] to make it pay less attention"), "enable_emphasis": OptionInfo(True, "Use (text) to make model pay more attention to text and [text] to make it pay less attention"),