From dc6779f6f384a83ee2b96e60d841116bd4a878eb Mon Sep 17 00:00:00 2001 From: Dynamic Date: Wed, 26 Oct 2022 19:52:34 +0900 Subject: [PATCH 01/37] Update new strings Translated new strings in PFF UI --- localizations/ko_KR.json | 3 +++ 1 file changed, 3 insertions(+) diff --git a/localizations/ko_KR.json b/localizations/ko_KR.json index ff70f1ea..803b7fb9 100644 --- a/localizations/ko_KR.json +++ b/localizations/ko_KR.json @@ -218,6 +218,7 @@ "Interrogate: use artists from artists.csv": "분석 : artists.csv의 작가들 사용하기", "Interrupt": "중단", "Is negative text": "네거티브 텍스트일시 체크", + "Iterate seed every line": "줄마다 시드 반복하기", "Just resize": "리사이징", "Keep -1 for seeds": "시드값 -1로 유지", "keep whatever was there originally": "이미지 원본 유지", @@ -234,6 +235,7 @@ "Leave blank to save images to the default path.": "기존 저장 경로에 이미지들을 저장하려면 비워두세요.", "left": "왼쪽", "linear": "linear", + "List of prompt inputs": "프롬프트 입력 리스트", "List of setting names, separated by commas, for settings that should go to the quick access bar at the top, rather than the usual setting tab. See modules/shared.py for setting names. Requires restarting to apply.": "설정 탭이 아니라 상단의 빠른 설정 바에 위치시킬 설정 이름을 쉼표로 분리해서 입력하십시오. 설정 이름은 modules/shared.py에서 찾을 수 있습니다. 재시작이 필요합니다.", "LMS": "LMS", "LMS Karras": "LMS Karras", @@ -464,6 +466,7 @@ "uniform": "uniform", "up": "위쪽", "Upload mask": "마스크 업로드하기", + "Upload prompt inputs": "입력할 프롬프트를 업로드하십시오", "Upscale latent space image when doing hires. fix": "고해상도 보정 사용시 잠재 공간 이미지 업스케일하기", "Upscale masked region to target resolution, do inpainting, downscale back and paste into original image": "마스크된 부분을 설정된 해상도로 업스케일하고, 인페인팅을 진행한 뒤, 다시 다운스케일 후 원본 이미지에 붙여넣습니다.", "Upscaler": "업스케일러", From fddb4883f4a408b3464076465e1b0949ebe0fc30 Mon Sep 17 00:00:00 2001 From: evshiron Date: Wed, 26 Oct 2022 22:33:45 +0800 Subject: [PATCH 02/37] prototype progress api --- modules/api/api.py | 89 ++++++++++++++++++++++++++++++++++++++-------- modules/shared.py | 13 +++++++ 2 files changed, 88 insertions(+), 14 deletions(-) diff --git a/modules/api/api.py b/modules/api/api.py index 6e9d6097..c038f674 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -1,8 +1,11 @@ +import time + from modules.api.models import StableDiffusionTxt2ImgProcessingAPI, StableDiffusionImg2ImgProcessingAPI from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images from modules.sd_samplers import all_samplers from modules.extras import run_pnginfo import modules.shared as shared +from modules import devices import uvicorn from fastapi import Body, APIRouter, HTTPException from fastapi.responses import JSONResponse @@ -25,6 +28,37 @@ class ImageToImageResponse(BaseModel): parameters: Json info: Json +class ProgressResponse(BaseModel): + progress: float + eta_relative: float + state: Json + +# copy from wrap_gradio_gpu_call of webui.py +# because queue lock will be acquired in api handlers +# and time start needs to be set +# the function has been modified into two parts + +def before_gpu_call(): + devices.torch_gc() + + shared.state.sampling_step = 0 + shared.state.job_count = -1 + shared.state.job_no = 0 + shared.state.job_timestamp = shared.state.get_job_timestamp() + shared.state.current_latent = None + shared.state.current_image = None + shared.state.current_image_sampling_step = 0 + shared.state.skipped = False + shared.state.interrupted = False + shared.state.textinfo = None + shared.state.time_start = time.time() + + +def after_gpu_call(): + shared.state.job = "" + shared.state.job_count = 0 + + devices.torch_gc() class Api: def __init__(self, app, queue_lock): @@ -33,6 +67,7 @@ class Api: self.queue_lock = queue_lock self.app.add_api_route("/sdapi/v1/txt2img", self.text2imgapi, methods=["POST"]) self.app.add_api_route("/sdapi/v1/img2img", self.img2imgapi, methods=["POST"]) + self.app.add_api_route("/sdapi/v1/progress", self.progressapi, methods=["GET"]) def __base64_to_image(self, base64_string): # if has a comma, deal with prefix @@ -44,12 +79,12 @@ class Api: def text2imgapi(self, txt2imgreq: StableDiffusionTxt2ImgProcessingAPI): sampler_index = sampler_to_index(txt2imgreq.sampler_index) - + if sampler_index is None: - raise HTTPException(status_code=404, detail="Sampler not found") - + raise HTTPException(status_code=404, detail="Sampler not found") + populate = txt2imgreq.copy(update={ # Override __init__ params - "sd_model": shared.sd_model, + "sd_model": shared.sd_model, "sampler_index": sampler_index[0], "do_not_save_samples": True, "do_not_save_grid": True @@ -57,9 +92,11 @@ class Api: ) p = StableDiffusionProcessingTxt2Img(**vars(populate)) # Override object param + before_gpu_call() with self.queue_lock: processed = process_images(p) - + after_gpu_call() + b64images = [] for i in processed.images: buffer = io.BytesIO() @@ -67,30 +104,30 @@ class Api: b64images.append(base64.b64encode(buffer.getvalue())) return TextToImageResponse(images=b64images, parameters=json.dumps(vars(txt2imgreq)), info=processed.js()) - - + + def img2imgapi(self, img2imgreq: StableDiffusionImg2ImgProcessingAPI): sampler_index = sampler_to_index(img2imgreq.sampler_index) - + if sampler_index is None: - raise HTTPException(status_code=404, detail="Sampler not found") + raise HTTPException(status_code=404, detail="Sampler not found") init_images = img2imgreq.init_images if init_images is None: - raise HTTPException(status_code=404, detail="Init image not found") + raise HTTPException(status_code=404, detail="Init image not found") mask = img2imgreq.mask if mask: mask = self.__base64_to_image(mask) - + populate = img2imgreq.copy(update={ # Override __init__ params - "sd_model": shared.sd_model, + "sd_model": shared.sd_model, "sampler_index": sampler_index[0], "do_not_save_samples": True, - "do_not_save_grid": True, + "do_not_save_grid": True, "mask": mask } ) @@ -103,9 +140,11 @@ class Api: p.init_images = imgs # Override object param + before_gpu_call() with self.queue_lock: processed = process_images(p) - + after_gpu_call() + b64images = [] for i in processed.images: buffer = io.BytesIO() @@ -118,6 +157,28 @@ class Api: return ImageToImageResponse(images=b64images, parameters=json.dumps(vars(img2imgreq)), info=processed.js()) + def progressapi(self): + # copy from check_progress_call of ui.py + + if shared.state.job_count == 0: + return ProgressResponse(progress=0, eta_relative=0, state=shared.state.js()) + + # avoid dividing zero + progress = 0.01 + + if shared.state.job_count > 0: + progress += shared.state.job_no / shared.state.job_count + if shared.state.sampling_steps > 0: + progress += 1 / shared.state.job_count * shared.state.sampling_step / shared.state.sampling_steps + + time_since_start = time.time() - shared.state.time_start + eta = (time_since_start/progress) + eta_relative = eta-time_since_start + + progress = min(progress, 1) + + return ProgressResponse(progress=progress, eta_relative=eta_relative, state=shared.state.js()) + def extrasapi(self): raise NotImplementedError diff --git a/modules/shared.py b/modules/shared.py index 1a9b8289..00f61898 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -146,6 +146,19 @@ class State: def get_job_timestamp(self): return datetime.datetime.now().strftime("%Y%m%d%H%M%S") # shouldn't this return job_timestamp? + def js(self): + obj = { + "skipped": self.skipped, + "interrupted": self.skipped, + "job": self.job, + "job_count": self.job_count, + "job_no": self.job_no, + "sampling_step": self.sampling_step, + "sampling_steps": self.sampling_steps, + } + + return json.dumps(obj) + state = State() From 6e10078b2be8e690b5f85619b335e1dcd4fa8a3f Mon Sep 17 00:00:00 2001 From: Dynamic Date: Thu, 27 Oct 2022 22:21:56 +0900 Subject: [PATCH 03/37] Attention editing with hotkeys should work with KR now Added the word "Prompt" in the placeholders to pass the check from edit-attention.js --- localizations/webui.bat | 62 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) create mode 100644 localizations/webui.bat diff --git a/localizations/webui.bat b/localizations/webui.bat new file mode 100644 index 00000000..a38a28bb --- /dev/null +++ b/localizations/webui.bat @@ -0,0 +1,62 @@ +@echo off + +if not defined PYTHON (set PYTHON=python) +if not defined VENV_DIR (set VENV_DIR=venv) + +set ERROR_REPORTING=FALSE + +mkdir tmp 2>NUL + +%PYTHON% -c "" >tmp/stdout.txt 2>tmp/stderr.txt +if %ERRORLEVEL% == 0 goto :start_venv +echo Couldn't launch python +goto :show_stdout_stderr + +:start_venv +if [%VENV_DIR%] == [-] goto :skip_venv + +dir %VENV_DIR%\Scripts\Python.exe >tmp/stdout.txt 2>tmp/stderr.txt +if %ERRORLEVEL% == 0 goto :activate_venv + +for /f "delims=" %%i in ('CALL %PYTHON% -c "import sys; print(sys.executable)"') do set PYTHON_FULLNAME="%%i" +echo Creating venv in directory %VENV_DIR% using python %PYTHON_FULLNAME% +%PYTHON_FULLNAME% -m venv %VENV_DIR% >tmp/stdout.txt 2>tmp/stderr.txt +if %ERRORLEVEL% == 0 goto :activate_venv +echo Unable to create venv in directory %VENV_DIR% +goto :show_stdout_stderr + +:activate_venv +set PYTHON="%~dp0%VENV_DIR%\Scripts\Python.exe" +echo venv %PYTHON% +goto :launch + +:skip_venv + +:launch +%PYTHON% launch.py %* +pause +exit /b + +:show_stdout_stderr + +echo. +echo exit code: %errorlevel% + +for /f %%i in ("tmp\stdout.txt") do set size=%%~zi +if %size% equ 0 goto :show_stderr +echo. +echo stdout: +type tmp\stdout.txt + +:show_stderr +for /f %%i in ("tmp\stderr.txt") do set size=%%~zi +if %size% equ 0 goto :show_stderr +echo. +echo stderr: +type tmp\stderr.txt + +:endofscript + +echo. +echo Launch unsuccessful. Exiting. +pause From 9358a421cfa2ccd866825d8022f93a12268e7dc3 Mon Sep 17 00:00:00 2001 From: Dynamic Date: Thu, 27 Oct 2022 22:24:05 +0900 Subject: [PATCH 04/37] Remove files that shouldn't be here --- localizations/webui.bat | 62 ----------------------------------------- 1 file changed, 62 deletions(-) delete mode 100644 localizations/webui.bat diff --git a/localizations/webui.bat b/localizations/webui.bat deleted file mode 100644 index a38a28bb..00000000 --- a/localizations/webui.bat +++ /dev/null @@ -1,62 +0,0 @@ -@echo off - -if not defined PYTHON (set PYTHON=python) -if not defined VENV_DIR (set VENV_DIR=venv) - -set ERROR_REPORTING=FALSE - -mkdir tmp 2>NUL - -%PYTHON% -c "" >tmp/stdout.txt 2>tmp/stderr.txt -if %ERRORLEVEL% == 0 goto :start_venv -echo Couldn't launch python -goto :show_stdout_stderr - -:start_venv -if [%VENV_DIR%] == [-] goto :skip_venv - -dir %VENV_DIR%\Scripts\Python.exe >tmp/stdout.txt 2>tmp/stderr.txt -if %ERRORLEVEL% == 0 goto :activate_venv - -for /f "delims=" %%i in ('CALL %PYTHON% -c "import sys; print(sys.executable)"') do set PYTHON_FULLNAME="%%i" -echo Creating venv in directory %VENV_DIR% using python %PYTHON_FULLNAME% -%PYTHON_FULLNAME% -m venv %VENV_DIR% >tmp/stdout.txt 2>tmp/stderr.txt -if %ERRORLEVEL% == 0 goto :activate_venv -echo Unable to create venv in directory %VENV_DIR% -goto :show_stdout_stderr - -:activate_venv -set PYTHON="%~dp0%VENV_DIR%\Scripts\Python.exe" -echo venv %PYTHON% -goto :launch - -:skip_venv - -:launch -%PYTHON% launch.py %* -pause -exit /b - -:show_stdout_stderr - -echo. -echo exit code: %errorlevel% - -for /f %%i in ("tmp\stdout.txt") do set size=%%~zi -if %size% equ 0 goto :show_stderr -echo. -echo stdout: -type tmp\stdout.txt - -:show_stderr -for /f %%i in ("tmp\stderr.txt") do set size=%%~zi -if %size% equ 0 goto :show_stderr -echo. -echo stderr: -type tmp\stderr.txt - -:endofscript - -echo. -echo Launch unsuccessful. Exiting. -pause From a668444110743cd163474ec563b0e69025dea3d2 Mon Sep 17 00:00:00 2001 From: Dynamic Date: Thu, 27 Oct 2022 22:24:29 +0900 Subject: [PATCH 05/37] Attention editing hotkey fix part 2 --- localizations/ko_KR.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/localizations/ko_KR.json b/localizations/ko_KR.json index 803b7fb9..3d631066 100644 --- a/localizations/ko_KR.json +++ b/localizations/ko_KR.json @@ -263,7 +263,7 @@ "Multiplier (M) - set to 0 to get model A": "배율 (M) - 0으로 적용하면 모델 A를 얻게 됩니다", "Name": "이름", "Negative prompt": "네거티브 프롬프트", - "Negative prompt (press Ctrl+Enter or Alt+Enter to generate)": "네거티브 프롬프트 입력(Ctrl+Enter나 Alt+Enter로 생성 시작)", + "Negative prompt (press Ctrl+Enter or Alt+Enter to generate)": "네거티브 프롬프트(Prompt) 입력(Ctrl+Enter나 Alt+Enter로 생성 시작)", "Next batch": "다음 묶음", "Next Page": "다음 페이지", "None": "없음", @@ -321,7 +321,7 @@ "Process images in a directory on the same machine where the server is running.": "WebUI 서버가 돌아가고 있는 디바이스에 존재하는 디렉토리의 이미지들을 처리합니다.", "Produce an image that can be tiled.": "타일링 가능한 이미지를 생성합니다.", "Prompt": "프롬프트", - "Prompt (press Ctrl+Enter or Alt+Enter to generate)": "프롬프트 입력(Ctrl+Enter나 Alt+Enter로 생성 시작)", + "Prompt (press Ctrl+Enter or Alt+Enter to generate)": "프롬프트(Prompt) 입력(Ctrl+Enter나 Alt+Enter로 생성 시작)", "Prompt matrix": "프롬프트 매트릭스", "Prompt order": "프롬프트 순서", "Prompt S/R": "프롬프트 스타일 변경", From 9e465c8aa5616df4c6723bee007ffd3910404f12 Mon Sep 17 00:00:00 2001 From: timntorres Date: Thu, 27 Oct 2022 23:03:34 -0700 Subject: [PATCH 06/37] Add strength to textinfo. --- modules/processing.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/processing.py b/modules/processing.py index 4efba946..93066522 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -329,6 +329,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration "Model hash": getattr(p, 'sd_model_hash', None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash), "Model": (None if not opts.add_model_name_to_info or not shared.sd_model.sd_checkpoint_info.model_name else shared.sd_model.sd_checkpoint_info.model_name.replace(',', '').replace(':', '')), "Hypernet": (None if shared.loaded_hypernetwork is None else shared.loaded_hypernetwork.name), + "Hypernetwork strength": (None if shared.loaded_hypernetwork is None else shared.opts.sd_hypernetwork_strength), "Batch size": (None if p.batch_size < 2 else p.batch_size), "Batch pos": (None if p.batch_size < 2 else position_in_batch), "Variation seed": (None if p.subseed_strength == 0 else all_subseeds[index]), From d4a069a23cb19104b4e58a33d0d1670fadaefb7a Mon Sep 17 00:00:00 2001 From: timntorres Date: Thu, 27 Oct 2022 23:16:27 -0700 Subject: [PATCH 07/37] Read hypernet strength from PNG info. --- modules/ui.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/ui.py b/modules/ui.py index 0a63e357..62a2f4f3 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1812,6 +1812,7 @@ Requested path was: {f} settings_map = { 'sd_hypernetwork': 'Hypernet', + 'sd_hypernetwork_strength': 'Hypernetwork strength', 'CLIP_stop_at_last_layers': 'Clip skip', 'sd_model_checkpoint': 'Model hash', } From c0677b33161f04c3ed1a7a78f4c7288fb95787b7 Mon Sep 17 00:00:00 2001 From: timntorres Date: Thu, 27 Oct 2022 23:31:45 -0700 Subject: [PATCH 08/37] Explicitly state when Hypernet is none. --- modules/processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/processing.py b/modules/processing.py index 93066522..74a0cd64 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -328,7 +328,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration "Size": f"{p.width}x{p.height}", "Model hash": getattr(p, 'sd_model_hash', None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash), "Model": (None if not opts.add_model_name_to_info or not shared.sd_model.sd_checkpoint_info.model_name else shared.sd_model.sd_checkpoint_info.model_name.replace(',', '').replace(':', '')), - "Hypernet": (None if shared.loaded_hypernetwork is None else shared.loaded_hypernetwork.name), + "Hypernet": ("None" if shared.loaded_hypernetwork is None else shared.loaded_hypernetwork.name), "Hypernetwork strength": (None if shared.loaded_hypernetwork is None else shared.opts.sd_hypernetwork_strength), "Batch size": (None if p.batch_size < 2 else p.batch_size), "Batch pos": (None if p.batch_size < 2 else position_in_batch), From db5a354c489bfd1c95e0bbf9af12ab8b5d6fe170 Mon Sep 17 00:00:00 2001 From: timntorres Date: Fri, 28 Oct 2022 01:41:57 -0700 Subject: [PATCH 09/37] Always ignore "None.pt" in the hypernet directory. --- modules/hypernetworks/hypernetwork.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 8113b35b..cd920df5 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -208,13 +208,16 @@ def list_hypernetworks(path): res = {} for filename in glob.iglob(os.path.join(path, '**/*.pt'), recursive=True): name = os.path.splitext(os.path.basename(filename))[0] - res[name] = filename + # Prevent a hypothetical "None.pt" from being listed. + if name != "None": + res[name] = filename return res def load_hypernetwork(filename): path = shared.hypernetworks.get(filename, None) - if path is not None: + # Prevent any file named "None.pt" from being loaded. + if path is not None and filename != "None": print(f"Loading hypernetwork {filename}") try: shared.loaded_hypernetwork = Hypernetwork() From 2c4d20388425a5e40b93eef3722e42e8d375fbb4 Mon Sep 17 00:00:00 2001 From: timntorres Date: Sat, 29 Oct 2022 00:36:51 -0700 Subject: [PATCH 10/37] Revert "Explicitly state when Hypernet is none." --- modules/processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/processing.py b/modules/processing.py index 377c0978..04fdda7c 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -395,7 +395,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration "Size": f"{p.width}x{p.height}", "Model hash": getattr(p, 'sd_model_hash', None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash), "Model": (None if not opts.add_model_name_to_info or not shared.sd_model.sd_checkpoint_info.model_name else shared.sd_model.sd_checkpoint_info.model_name.replace(',', '').replace(':', '')), - "Hypernet": ("None" if shared.loaded_hypernetwork is None else shared.loaded_hypernetwork.name), + "Hypernet": (None if shared.loaded_hypernetwork is None else shared.loaded_hypernetwork.name), "Hypernetwork strength": (None if shared.loaded_hypernetwork is None else shared.opts.sd_hypernetwork_strength), "Batch size": (None if p.batch_size < 2 else p.batch_size), "Batch pos": (None if p.batch_size < 2 else position_in_batch), From a5f3adbdd7d9b8245f7782216ac48913660e6bb5 Mon Sep 17 00:00:00 2001 From: Muhammad Rizqi Nur Date: Sat, 29 Oct 2022 15:37:24 +0700 Subject: [PATCH 11/37] Allow trailing comma in learning rate --- modules/textual_inversion/learn_schedule.py | 33 +++++++++++++-------- 1 file changed, 20 insertions(+), 13 deletions(-) diff --git a/modules/textual_inversion/learn_schedule.py b/modules/textual_inversion/learn_schedule.py index 3a736065..76e611b6 100644 --- a/modules/textual_inversion/learn_schedule.py +++ b/modules/textual_inversion/learn_schedule.py @@ -11,23 +11,30 @@ class LearnScheduleIterator: self.rates = [] self.it = 0 self.maxit = 0 - for i, pair in enumerate(pairs): - tmp = pair.split(':') - if len(tmp) == 2: - step = int(tmp[1]) - if step > cur_step: - self.rates.append((float(tmp[0]), min(step, max_steps))) - self.maxit += 1 - if step > max_steps: + try: + for i, pair in enumerate(pairs): + if not pair.strip(): + continue + tmp = pair.split(':') + if len(tmp) == 2: + step = int(tmp[1]) + if step > cur_step: + self.rates.append((float(tmp[0]), min(step, max_steps))) + self.maxit += 1 + if step > max_steps: + return + elif step == -1: + self.rates.append((float(tmp[0]), max_steps)) + self.maxit += 1 return - elif step == -1: + else: self.rates.append((float(tmp[0]), max_steps)) self.maxit += 1 return - else: - self.rates.append((float(tmp[0]), max_steps)) - self.maxit += 1 - return + assert self.rates + except (ValueError, AssertionError): + raise Exception("Invalid learning rate schedule") + def __iter__(self): return self From ef4c94e1cfe66299227aa95a28c2380d21cb1600 Mon Sep 17 00:00:00 2001 From: Muhammad Rizqi Nur Date: Sat, 29 Oct 2022 15:42:51 +0700 Subject: [PATCH 12/37] Improve lr schedule error message --- modules/textual_inversion/learn_schedule.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/textual_inversion/learn_schedule.py b/modules/textual_inversion/learn_schedule.py index 76e611b6..dd0c0ad1 100644 --- a/modules/textual_inversion/learn_schedule.py +++ b/modules/textual_inversion/learn_schedule.py @@ -4,7 +4,7 @@ import tqdm class LearnScheduleIterator: def __init__(self, learn_rate, max_steps, cur_step=0): """ - specify learn_rate as "0.001:100, 0.00001:1000, 1e-5:10000" to have lr of 0.001 until step 100, 0.00001 until 1000, 1e-5:10000 until 10000 + specify learn_rate as "0.001:100, 0.00001:1000, 1e-5:10000" to have lr of 0.001 until step 100, 0.00001 until 1000, and 1e-5 until 10000 """ pairs = learn_rate.split(',') @@ -33,7 +33,7 @@ class LearnScheduleIterator: return assert self.rates except (ValueError, AssertionError): - raise Exception("Invalid learning rate schedule") + raise Exception('Invalid learning rate schedule. It should be a number or, for example, like "0.001:100, 0.00001:1000, 1e-5:10000" to have lr of 0.001 until step 100, 0.00001 until 1000, and 1e-5 until 10000.') def __iter__(self): From ab27c111d06ec920791c73eea25ad9a61671852e Mon Sep 17 00:00:00 2001 From: Muhammad Rizqi Nur Date: Sat, 29 Oct 2022 18:09:17 +0700 Subject: [PATCH 13/37] Add input validations before loading dataset for training --- modules/hypernetworks/hypernetwork.py | 38 ++++++++------ .../textual_inversion/textual_inversion.py | 50 ++++++++++++++----- 2 files changed, 59 insertions(+), 29 deletions(-) diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 2e84583b..38f35c58 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -332,7 +332,9 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log # images allows training previews to have infotext. Importing it at the top causes a circular import problem. from modules import images - assert hypernetwork_name, 'hypernetwork not selected' + save_hypernetwork_every = save_hypernetwork_every or 0 + create_image_every = create_image_every or 0 + textual_inversion.validate_train_inputs(hypernetwork_name, learn_rate, batch_size, data_root, template_file, steps, save_hypernetwork_every, create_image_every, log_directory, name="hypernetwork") path = shared.hypernetworks.get(hypernetwork_name, None) shared.loaded_hypernetwork = Hypernetwork() @@ -358,39 +360,43 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log else: images_dir = None + hypernetwork = shared.loaded_hypernetwork + + ititial_step = hypernetwork.step or 0 + if ititial_step > steps: + shared.state.textinfo = f"Model has already been trained beyond specified max steps" + return hypernetwork, filename + + scheduler = LearnRateScheduler(learn_rate, steps, ititial_step) + + # dataset loading may take a while, so input validations and early returns should be done before this shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..." with torch.autocast("cuda"): ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=hypernetwork_name, model=shared.sd_model, device=devices.device, template_file=template_file, include_cond=True, batch_size=batch_size) + if unload: shared.sd_model.cond_stage_model.to(devices.cpu) shared.sd_model.first_stage_model.to(devices.cpu) - hypernetwork = shared.loaded_hypernetwork - weights = hypernetwork.weights() - for weight in weights: - weight.requires_grad = True - size = len(ds.indexes) loss_dict = defaultdict(lambda : deque(maxlen = 1024)) losses = torch.zeros((size,)) previous_mean_losses = [0] previous_mean_loss = 0 print("Mean loss of {} elements".format(size)) - - last_saved_file = "" - last_saved_image = "" - forced_filename = "" - - ititial_step = hypernetwork.step or 0 - if ititial_step > steps: - return hypernetwork, filename - - scheduler = LearnRateScheduler(learn_rate, steps, ititial_step) + + weights = hypernetwork.weights() + for weight in weights: + weight.requires_grad = True # if optimizer == "AdamW": or else Adam / AdamW / SGD, etc... optimizer = torch.optim.AdamW(weights, lr=scheduler.learn_rate) steps_without_grad = 0 + last_saved_file = "" + last_saved_image = "" + forced_filename = "" + pbar = tqdm.tqdm(enumerate(ds), total=steps - ititial_step) for i, entries in pbar: hypernetwork.step = i + ititial_step diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 17dfb223..44f06443 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -204,9 +204,30 @@ def write_loss(log_directory, filename, step, epoch_len, values): **values, }) +def validate_train_inputs(model_name, learn_rate, batch_size, data_root, template_file, steps, save_model_every, create_image_every, log_directory, name="embedding"): + assert model_name, f"{name} not selected" + assert learn_rate, "Learning rate is empty or 0" + assert isinstance(batch_size, int), "Batch size must be integer" + assert batch_size > 0, "Batch size must be positive" + assert data_root, "Dataset directory is empty" + assert os.path.isdir(data_root), "Dataset directory doesn't exist" + assert os.listdir(data_root), "Dataset directory is empty" + assert template_file, "Prompt template file is empty" + assert os.path.isfile(template_file), "Prompt template file doesn't exist" + assert steps, "Max steps is empty or 0" + assert isinstance(steps, int), "Max steps must be integer" + assert steps > 0 , "Max steps must be positive" + assert isinstance(save_model_every, int), "Save {name} must be integer" + assert save_model_every >= 0 , "Save {name} must be positive or 0" + assert isinstance(create_image_every, int), "Create image must be integer" + assert create_image_every >= 0 , "Create image must be positive or 0" + if save_model_every or create_image_every: + assert log_directory, "Log directory is empty" def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height): - assert embedding_name, 'embedding not selected' + save_embedding_every = save_embedding_every or 0 + create_image_every = create_image_every or 0 + validate_train_inputs(embedding_name, learn_rate, batch_size, data_root, template_file, steps, save_embedding_every, create_image_every, log_directory, name="embedding") shared.state.textinfo = "Initializing textual inversion training..." shared.state.job_count = steps @@ -232,17 +253,27 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc os.makedirs(images_embeds_dir, exist_ok=True) else: images_embeds_dir = None - - cond_model = shared.sd_model.cond_stage_model - shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..." - with torch.autocast("cuda"): - ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=embedding_name, model=shared.sd_model, device=devices.device, template_file=template_file, batch_size=batch_size) + cond_model = shared.sd_model.cond_stage_model hijack = sd_hijack.model_hijack embedding = hijack.embedding_db.word_embeddings[embedding_name] + + ititial_step = embedding.step or 0 + if ititial_step > steps: + shared.state.textinfo = f"Model has already been trained beyond specified max steps" + return embedding, filename + + scheduler = LearnRateScheduler(learn_rate, steps, ititial_step) + + # dataset loading may take a while, so input validations and early returns should be done before this + shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..." + with torch.autocast("cuda"): + ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=embedding_name, model=shared.sd_model, device=devices.device, template_file=template_file, batch_size=batch_size) + embedding.vec.requires_grad = True + optimizer = torch.optim.AdamW([embedding.vec], lr=scheduler.learn_rate) losses = torch.zeros((32,)) @@ -251,13 +282,6 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc forced_filename = "" embedding_yet_to_be_embedded = False - ititial_step = embedding.step or 0 - if ititial_step > steps: - return embedding, filename - - scheduler = LearnRateScheduler(learn_rate, steps, ititial_step) - optimizer = torch.optim.AdamW([embedding.vec], lr=scheduler.learn_rate) - pbar = tqdm.tqdm(enumerate(ds), total=steps-ititial_step) for i, entries in pbar: embedding.step = i + ititial_step From 3ce2bfdf95bd5f26d0f6e250e67338ada91980d1 Mon Sep 17 00:00:00 2001 From: Muhammad Rizqi Nur Date: Sat, 29 Oct 2022 19:43:21 +0700 Subject: [PATCH 14/37] Add cleanup after training --- modules/hypernetworks/hypernetwork.py | 187 +++++++++--------- .../textual_inversion/textual_inversion.py | 163 +++++++-------- 2 files changed, 182 insertions(+), 168 deletions(-) diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 38f35c58..170d5ea4 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -398,110 +398,112 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log forced_filename = "" pbar = tqdm.tqdm(enumerate(ds), total=steps - ititial_step) - for i, entries in pbar: - hypernetwork.step = i + ititial_step - if len(loss_dict) > 0: - previous_mean_losses = [i[-1] for i in loss_dict.values()] - previous_mean_loss = mean(previous_mean_losses) - - scheduler.apply(optimizer, hypernetwork.step) - if scheduler.finished: - break - if shared.state.interrupted: - break - - with torch.autocast("cuda"): - c = stack_conds([entry.cond for entry in entries]).to(devices.device) - # c = torch.vstack([entry.cond for entry in entries]).to(devices.device) - x = torch.stack([entry.latent for entry in entries]).to(devices.device) - loss = shared.sd_model(x, c)[0] - del x - del c - - losses[hypernetwork.step % losses.shape[0]] = loss.item() - for entry in entries: - loss_dict[entry.filename].append(loss.item()) + try: + for i, entries in pbar: + hypernetwork.step = i + ititial_step + if len(loss_dict) > 0: + previous_mean_losses = [i[-1] for i in loss_dict.values()] + previous_mean_loss = mean(previous_mean_losses) - optimizer.zero_grad() - weights[0].grad = None - loss.backward() + scheduler.apply(optimizer, hypernetwork.step) + if scheduler.finished: + break - if weights[0].grad is None: - steps_without_grad += 1 + if shared.state.interrupted: + break + + with torch.autocast("cuda"): + c = stack_conds([entry.cond for entry in entries]).to(devices.device) + # c = torch.vstack([entry.cond for entry in entries]).to(devices.device) + x = torch.stack([entry.latent for entry in entries]).to(devices.device) + loss = shared.sd_model(x, c)[0] + del x + del c + + losses[hypernetwork.step % losses.shape[0]] = loss.item() + for entry in entries: + loss_dict[entry.filename].append(loss.item()) + + optimizer.zero_grad() + weights[0].grad = None + loss.backward() + + if weights[0].grad is None: + steps_without_grad += 1 + else: + steps_without_grad = 0 + assert steps_without_grad < 10, 'no gradient found for the trained weight after backward() for 10 steps in a row; this is a bug; training cannot continue' + + optimizer.step() + + steps_done = hypernetwork.step + 1 + + if torch.isnan(losses[hypernetwork.step % losses.shape[0]]): + raise RuntimeError("Loss diverged.") + + if len(previous_mean_losses) > 1: + std = stdev(previous_mean_losses) else: - steps_without_grad = 0 - assert steps_without_grad < 10, 'no gradient found for the trained weight after backward() for 10 steps in a row; this is a bug; training cannot continue' + std = 0 + dataset_loss_info = f"dataset loss:{mean(previous_mean_losses):.3f}" + u"\u00B1" + f"({std / (len(previous_mean_losses) ** 0.5):.3f})" + pbar.set_description(dataset_loss_info) - optimizer.step() + if hypernetwork_dir is not None and steps_done % save_hypernetwork_every == 0: + # Before saving, change name to match current checkpoint. + hypernetwork.name = f'{hypernetwork_name}-{steps_done}' + last_saved_file = os.path.join(hypernetwork_dir, f'{hypernetwork.name}.pt') + hypernetwork.save(last_saved_file) - steps_done = hypernetwork.step + 1 + textual_inversion.write_loss(log_directory, "hypernetwork_loss.csv", hypernetwork.step, len(ds), { + "loss": f"{previous_mean_loss:.7f}", + "learn_rate": scheduler.learn_rate + }) - if torch.isnan(losses[hypernetwork.step % losses.shape[0]]): - raise RuntimeError("Loss diverged.") - - if len(previous_mean_losses) > 1: - std = stdev(previous_mean_losses) - else: - std = 0 - dataset_loss_info = f"dataset loss:{mean(previous_mean_losses):.3f}" + u"\u00B1" + f"({std / (len(previous_mean_losses) ** 0.5):.3f})" - pbar.set_description(dataset_loss_info) + if images_dir is not None and steps_done % create_image_every == 0: + forced_filename = f'{hypernetwork_name}-{steps_done}' + last_saved_image = os.path.join(images_dir, forced_filename) - if hypernetwork_dir is not None and steps_done % save_hypernetwork_every == 0: - # Before saving, change name to match current checkpoint. - hypernetwork.name = f'{hypernetwork_name}-{steps_done}' - last_saved_file = os.path.join(hypernetwork_dir, f'{hypernetwork.name}.pt') - hypernetwork.save(last_saved_file) + optimizer.zero_grad() + shared.sd_model.cond_stage_model.to(devices.device) + shared.sd_model.first_stage_model.to(devices.device) - textual_inversion.write_loss(log_directory, "hypernetwork_loss.csv", hypernetwork.step, len(ds), { - "loss": f"{previous_mean_loss:.7f}", - "learn_rate": scheduler.learn_rate - }) + p = processing.StableDiffusionProcessingTxt2Img( + sd_model=shared.sd_model, + do_not_save_grid=True, + do_not_save_samples=True, + ) - if images_dir is not None and steps_done % create_image_every == 0: - forced_filename = f'{hypernetwork_name}-{steps_done}' - last_saved_image = os.path.join(images_dir, forced_filename) + if preview_from_txt2img: + p.prompt = preview_prompt + p.negative_prompt = preview_negative_prompt + p.steps = preview_steps + p.sampler_index = preview_sampler_index + p.cfg_scale = preview_cfg_scale + p.seed = preview_seed + p.width = preview_width + p.height = preview_height + else: + p.prompt = entries[0].cond_text + p.steps = 20 - optimizer.zero_grad() - shared.sd_model.cond_stage_model.to(devices.device) - shared.sd_model.first_stage_model.to(devices.device) + preview_text = p.prompt - p = processing.StableDiffusionProcessingTxt2Img( - sd_model=shared.sd_model, - do_not_save_grid=True, - do_not_save_samples=True, - ) + processed = processing.process_images(p) + image = processed.images[0] if len(processed.images)>0 else None - if preview_from_txt2img: - p.prompt = preview_prompt - p.negative_prompt = preview_negative_prompt - p.steps = preview_steps - p.sampler_index = preview_sampler_index - p.cfg_scale = preview_cfg_scale - p.seed = preview_seed - p.width = preview_width - p.height = preview_height - else: - p.prompt = entries[0].cond_text - p.steps = 20 + if unload: + shared.sd_model.cond_stage_model.to(devices.cpu) + shared.sd_model.first_stage_model.to(devices.cpu) - preview_text = p.prompt + if image is not None: + shared.state.current_image = image + last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, shared.opts.samples_format, processed.infotexts[0], p=p, forced_filename=forced_filename, save_to_dirs=False) + last_saved_image += f", prompt: {preview_text}" - processed = processing.process_images(p) - image = processed.images[0] if len(processed.images)>0 else None + shared.state.job_no = hypernetwork.step - if unload: - shared.sd_model.cond_stage_model.to(devices.cpu) - shared.sd_model.first_stage_model.to(devices.cpu) - - if image is not None: - shared.state.current_image = image - last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, shared.opts.samples_format, processed.infotexts[0], p=p, forced_filename=forced_filename, save_to_dirs=False) - last_saved_image += f", prompt: {preview_text}" - - shared.state.job_no = hypernetwork.step - - shared.state.textinfo = f""" + shared.state.textinfo = f"""

Loss: {previous_mean_loss:.7f}
Step: {hypernetwork.step}
@@ -510,7 +512,14 @@ Last saved hypernetwork: {html.escape(last_saved_file)}
Last saved image: {html.escape(last_saved_image)}

""" - + finally: + if weights: + for weight in weights: + weight.requires_grad = False + if unload: + shared.sd_model.cond_stage_model.to(devices.device) + shared.sd_model.first_stage_model.to(devices.device) + report_statistics(loss_dict) checkpoint = sd_models.select_checkpoint() diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 44f06443..fd7f0897 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -283,111 +283,113 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc embedding_yet_to_be_embedded = False pbar = tqdm.tqdm(enumerate(ds), total=steps-ititial_step) - for i, entries in pbar: - embedding.step = i + ititial_step - scheduler.apply(optimizer, embedding.step) - if scheduler.finished: - break + try: + for i, entries in pbar: + embedding.step = i + ititial_step - if shared.state.interrupted: - break + scheduler.apply(optimizer, embedding.step) + if scheduler.finished: + break - with torch.autocast("cuda"): - c = cond_model([entry.cond_text for entry in entries]) - x = torch.stack([entry.latent for entry in entries]).to(devices.device) - loss = shared.sd_model(x, c)[0] - del x + if shared.state.interrupted: + break - losses[embedding.step % losses.shape[0]] = loss.item() + with torch.autocast("cuda"): + c = cond_model([entry.cond_text for entry in entries]) + x = torch.stack([entry.latent for entry in entries]).to(devices.device) + loss = shared.sd_model(x, c)[0] + del x - optimizer.zero_grad() - loss.backward() - optimizer.step() + losses[embedding.step % losses.shape[0]] = loss.item() - steps_done = embedding.step + 1 + optimizer.zero_grad() + loss.backward() + optimizer.step() - epoch_num = embedding.step // len(ds) - epoch_step = embedding.step % len(ds) + steps_done = embedding.step + 1 - pbar.set_description(f"[Epoch {epoch_num}: {epoch_step+1}/{len(ds)}]loss: {losses.mean():.7f}") + epoch_num = embedding.step // len(ds) + epoch_step = embedding.step % len(ds) - if embedding_dir is not None and steps_done % save_embedding_every == 0: - # Before saving, change name to match current checkpoint. - embedding.name = f'{embedding_name}-{steps_done}' - last_saved_file = os.path.join(embedding_dir, f'{embedding.name}.pt') - embedding.save(last_saved_file) - embedding_yet_to_be_embedded = True + pbar.set_description(f"[Epoch {epoch_num}: {epoch_step+1}/{len(ds)}]loss: {losses.mean():.7f}") - write_loss(log_directory, "textual_inversion_loss.csv", embedding.step, len(ds), { - "loss": f"{losses.mean():.7f}", - "learn_rate": scheduler.learn_rate - }) + if embedding_dir is not None and steps_done % save_embedding_every == 0: + # Before saving, change name to match current checkpoint. + embedding.name = f'{embedding_name}-{steps_done}' + last_saved_file = os.path.join(embedding_dir, f'{embedding.name}.pt') + embedding.save(last_saved_file) + embedding_yet_to_be_embedded = True - if images_dir is not None and steps_done % create_image_every == 0: - forced_filename = f'{embedding_name}-{steps_done}' - last_saved_image = os.path.join(images_dir, forced_filename) - p = processing.StableDiffusionProcessingTxt2Img( - sd_model=shared.sd_model, - do_not_save_grid=True, - do_not_save_samples=True, - do_not_reload_embeddings=True, - ) + write_loss(log_directory, "textual_inversion_loss.csv", embedding.step, len(ds), { + "loss": f"{losses.mean():.7f}", + "learn_rate": scheduler.learn_rate + }) - if preview_from_txt2img: - p.prompt = preview_prompt - p.negative_prompt = preview_negative_prompt - p.steps = preview_steps - p.sampler_index = preview_sampler_index - p.cfg_scale = preview_cfg_scale - p.seed = preview_seed - p.width = preview_width - p.height = preview_height - else: - p.prompt = entries[0].cond_text - p.steps = 20 - p.width = training_width - p.height = training_height + if images_dir is not None and steps_done % create_image_every == 0: + forced_filename = f'{embedding_name}-{steps_done}' + last_saved_image = os.path.join(images_dir, forced_filename) + p = processing.StableDiffusionProcessingTxt2Img( + sd_model=shared.sd_model, + do_not_save_grid=True, + do_not_save_samples=True, + do_not_reload_embeddings=True, + ) - preview_text = p.prompt + if preview_from_txt2img: + p.prompt = preview_prompt + p.negative_prompt = preview_negative_prompt + p.steps = preview_steps + p.sampler_index = preview_sampler_index + p.cfg_scale = preview_cfg_scale + p.seed = preview_seed + p.width = preview_width + p.height = preview_height + else: + p.prompt = entries[0].cond_text + p.steps = 20 + p.width = training_width + p.height = training_height - processed = processing.process_images(p) - image = processed.images[0] + preview_text = p.prompt - shared.state.current_image = image + processed = processing.process_images(p) + image = processed.images[0] - if save_image_with_stored_embedding and os.path.exists(last_saved_file) and embedding_yet_to_be_embedded: + shared.state.current_image = image - last_saved_image_chunks = os.path.join(images_embeds_dir, f'{embedding_name}-{steps_done}.png') + if save_image_with_stored_embedding and os.path.exists(last_saved_file) and embedding_yet_to_be_embedded: - info = PngImagePlugin.PngInfo() - data = torch.load(last_saved_file) - info.add_text("sd-ti-embedding", embedding_to_b64(data)) + last_saved_image_chunks = os.path.join(images_embeds_dir, f'{embedding_name}-{steps_done}.png') - title = "<{}>".format(data.get('name', '???')) + info = PngImagePlugin.PngInfo() + data = torch.load(last_saved_file) + info.add_text("sd-ti-embedding", embedding_to_b64(data)) - try: - vectorSize = list(data['string_to_param'].values())[0].shape[0] - except Exception as e: - vectorSize = '?' + title = "<{}>".format(data.get('name', '???')) - checkpoint = sd_models.select_checkpoint() - footer_left = checkpoint.model_name - footer_mid = '[{}]'.format(checkpoint.hash) - footer_right = '{}v {}s'.format(vectorSize, steps_done) + try: + vectorSize = list(data['string_to_param'].values())[0].shape[0] + except Exception as e: + vectorSize = '?' - captioned_image = caption_image_overlay(image, title, footer_left, footer_mid, footer_right) - captioned_image = insert_image_data_embed(captioned_image, data) + checkpoint = sd_models.select_checkpoint() + footer_left = checkpoint.model_name + footer_mid = '[{}]'.format(checkpoint.hash) + footer_right = '{}v {}s'.format(vectorSize, steps_done) - captioned_image.save(last_saved_image_chunks, "PNG", pnginfo=info) - embedding_yet_to_be_embedded = False + captioned_image = caption_image_overlay(image, title, footer_left, footer_mid, footer_right) + captioned_image = insert_image_data_embed(captioned_image, data) - last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, shared.opts.samples_format, processed.infotexts[0], p=p, forced_filename=forced_filename, save_to_dirs=False) - last_saved_image += f", prompt: {preview_text}" + captioned_image.save(last_saved_image_chunks, "PNG", pnginfo=info) + embedding_yet_to_be_embedded = False - shared.state.job_no = embedding.step + last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, shared.opts.samples_format, processed.infotexts[0], p=p, forced_filename=forced_filename, save_to_dirs=False) + last_saved_image += f", prompt: {preview_text}" - shared.state.textinfo = f""" + shared.state.job_no = embedding.step + + shared.state.textinfo = f"""

Loss: {losses.mean():.7f}
Step: {embedding.step}
@@ -396,6 +398,9 @@ Last saved embedding: {html.escape(last_saved_file)}
Last saved image: {html.escape(last_saved_image)}

""" + finally: + if embedding and embedding.vec is not None: + embedding.vec.requires_grad = False checkpoint = sd_models.select_checkpoint() From a27d19de2eff633b6a39f9f4a5c0f2d6abb81bb5 Mon Sep 17 00:00:00 2001 From: Muhammad Rizqi Nur Date: Sat, 29 Oct 2022 19:44:05 +0700 Subject: [PATCH 15/37] Additional assert on dataset --- modules/textual_inversion/dataset.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/modules/textual_inversion/dataset.py b/modules/textual_inversion/dataset.py index 8bb00d27..ad726577 100644 --- a/modules/textual_inversion/dataset.py +++ b/modules/textual_inversion/dataset.py @@ -42,6 +42,8 @@ class PersonalizedBase(Dataset): self.lines = lines assert data_root, 'dataset directory not specified' + assert os.path.isdir(data_root), "Dataset directory doesn't exist" + assert os.listdir(data_root), "Dataset directory is empty" cond_model = shared.sd_model.cond_stage_model From cbdb5ced767d2f82296ecf22feb262870acba6f3 Mon Sep 17 00:00:00 2001 From: Dynamic Date: Sat, 29 Oct 2022 22:33:51 +0900 Subject: [PATCH 16/37] Add new translations New settings option New extras tab option --- localizations/ko_KR.json | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/localizations/ko_KR.json b/localizations/ko_KR.json index 3d631066..8f5f155b 100644 --- a/localizations/ko_KR.json +++ b/localizations/ko_KR.json @@ -202,6 +202,7 @@ "Inpaint at full resolution padding, pixels": "전체 해상도로 인페인트시 패딩값(픽셀 단위)", "Inpaint masked": "마스크만 처리", "Inpaint not masked": "마스크 이외만 처리", + "Inpainting conditioning mask strength": "인페인팅 조절 마스크 강도", "Input directory": "인풋 이미지 경로", "Input images directory": "이미지 경로 입력", "Interpolation Method": "보간 방법", @@ -276,6 +277,7 @@ "Number of repeats for a single input image per epoch; used only for displaying epoch number": "세대(Epoch)당 단일 인풋 이미지의 반복 횟수 - 세대(Epoch) 숫자를 표시하는 데에만 사용됩니다. ", "Number of rows on the page": "각 페이지마다 표시할 세로줄 수", "Number of vectors per token": "토큰별 벡터 수", + "Only applies to inpainting models. Determines how strongly to mask off the original image for inpainting and img2img. 1.0 means fully masked, which is the default behaviour. 0.0 means a fully unmasked conditioning. Lower values will help preserve the overall composition of the image, but will struggle with large changes.": "인페인팅 모델에만 적용됩니다. 인페인팅과 이미지→이미지에서 원본 이미지를 얼마나 마스킹 처리할지 결정하는 값입니다. 1.0은 완전히 마스킹함(기본 설정)을 의미하고, 0.0은 완전히 언마스킹된 이미지를 의미합니다. 낮은 값일수록 이미지의 전체적인 구성을 유지하는 데에 도움되겠지만, 변화량이 많을수록 불안정해집니다.", "Open for Clip Aesthetic!": "클립 스타일 기능을 활성화하려면 클릭!", "Open images output directory": "이미지 저장 경로 열기", "Open output directory": "저장 경로 열기", @@ -390,6 +392,7 @@ "Select activation function of hypernetwork": "하이퍼네트워크 활성화 함수 선택", "Select Layer weights initialization. relu-like - Kaiming, sigmoid-like - Xavier is recommended": "레이어 가중치 초기화 방식 선택 - relu류 : Kaiming 추천, sigmoid류 : Xavier 추천", "Select which Real-ESRGAN models to show in the web UI. (Requires restart)": "WebUI에 표시할 Real-ESRGAN 모델을 선택하십시오. (재시작 필요)", + "Send seed when sending prompt or image to other interface": "다른 화면으로 프롬프트나 이미지를 보낼 때 시드도 함께 보내기", "Send to extras": "부가기능으로 전송", "Send to img2img": "이미지→이미지로 전송", "Send to inpaint": "인페인트로 전송", @@ -467,6 +470,7 @@ "up": "위쪽", "Upload mask": "마스크 업로드하기", "Upload prompt inputs": "입력할 프롬프트를 업로드하십시오", + "Upscale Before Restoring Faces": "얼굴 보정을 진행하기 전에 업스케일링 먼저 진행하기", "Upscale latent space image when doing hires. fix": "고해상도 보정 사용시 잠재 공간 이미지 업스케일하기", "Upscale masked region to target resolution, do inpainting, downscale back and paste into original image": "마스크된 부분을 설정된 해상도로 업스케일하고, 인페인팅을 진행한 뒤, 다시 다운스케일 후 원본 이미지에 붙여넣습니다.", "Upscaler": "업스케일러", From af45b5a11a8a393ed404be3ce0ecac14338155c7 Mon Sep 17 00:00:00 2001 From: Vladimir Repin <32306715+mezotaken@users.noreply.github.com> Date: Sat, 29 Oct 2022 18:26:28 +0300 Subject: [PATCH 17/37] Testing with API added --- run_tests.bat | 15 +++++++++ test/__init__.py | 0 test/server_poll.py | 17 ++++++++++ test/txt2img_test.py | 80 ++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 112 insertions(+) create mode 100644 run_tests.bat create mode 100644 test/__init__.py create mode 100644 test/server_poll.py create mode 100644 test/txt2img_test.py diff --git a/run_tests.bat b/run_tests.bat new file mode 100644 index 00000000..3a63f034 --- /dev/null +++ b/run_tests.bat @@ -0,0 +1,15 @@ +@echo off +set ERROR_REPORTING=FALSE +set COMMANDLINE_ARGS= --api +echo Launching SDWebUI... +start "SDWebUITest" webui.bat + +if not defined PYTHON (set PYTHON=python) +if not defined VENV_DIR (set VENV_DIR=venv) +set PYTHON="%~dp0%VENV_DIR%\Scripts\Python.exe" +%PYTHON% test/server_poll.py +for /f "tokens=2 delims=," %%a in ('tasklist /v /fo csv ^| findstr /i "SDWebUITest"') do set "$PID=%%a" + +taskkill /PID %$PID% >nul 2>&1 + +pause diff --git a/test/__init__.py b/test/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/server_poll.py b/test/server_poll.py new file mode 100644 index 00000000..8c0436f8 --- /dev/null +++ b/test/server_poll.py @@ -0,0 +1,17 @@ +import unittest +import requests +import time + +timeout_threshold = 240 +start_time = time.time() +while time.time()-start_time < timeout_threshold: + try: + requests.head("http://localhost:7860/") + break + except requests.exceptions.ConnectionError: + pass +if time.time()-start_time < timeout_threshold: + suite = unittest.TestLoader().discover('', pattern='*_test.py') + result = unittest.TextTestRunner(verbosity=2).run(suite) +else: + print("Launch unsuccessful") diff --git a/test/txt2img_test.py b/test/txt2img_test.py new file mode 100644 index 00000000..9484fd99 --- /dev/null +++ b/test/txt2img_test.py @@ -0,0 +1,80 @@ +import unittest +import requests +import time + +url_txt2img = "http://localhost:7860/sdapi/v1/txt2img" +simple_txt2img = { + "enable_hr": False, + "denoising_strength": 0, + "firstphase_width": 0, + "firstphase_height": 0, + "prompt": "example prompt", + "styles": [ + "" + ], + "seed": -1, + "subseed": -1, + "subseed_strength": 0, + "seed_resize_from_h": -1, + "seed_resize_from_w": -1, + "batch_size": 1, + "n_iter": 1, + "steps": 5, + "cfg_scale": 7, + "width": 64, + "height": 64, + "restore_faces": False, + "tiling": False, + "negative_prompt": "", + "eta": 0, + "s_churn": 0, + "s_tmax": 0, + "s_tmin": 0, + "s_noise": 1, + "sampler_index": "Euler a" +} + +class TestTxt2ImgWorking(unittest.TestCase): + def test_txt2img_simple_performed(self): + self.assertEqual(requests.post(url_txt2img, json=simple_txt2img).status_code, 200) + + def test_txt2img_with_negative_prompt_performed(self): + params = simple_txt2img.copy() + params["negative_prompt"] = "example negative prompt" + self.assertEqual(requests.post(url_txt2img, json=params).status_code, 200) + + def test_txt2img_not_square_image_performed(self): + params = simple_txt2img.copy() + params["height"] = 128 + self.assertEqual(requests.post(url_txt2img, json=params).status_code, 200) + + def test_txt2img_with_hrfix_performed(self): + params = simple_txt2img.copy() + params["enable_hr"] = True + self.assertEqual(requests.post(url_txt2img, json=params).status_code, 200) + + def test_txt2img_with_restore_faces_performed(self): + params = simple_txt2img.copy() + params["restore_faces"] = True + self.assertEqual(requests.post(url_txt2img, json=params).status_code, 200) + + def test_txt2img_with_tiling_faces_performed(self): + params = simple_txt2img.copy() + params["tiling"] = True + self.assertEqual(requests.post(url_txt2img, json=params).status_code, 200) + + def test_txt2img_with_vanilla_sampler_performed(self): + params = simple_txt2img.copy() + params["sampler_index"] = "PLMS" + self.assertEqual(requests.post(url_txt2img, json=params).status_code, 200) + + def test_txt2img_multiple_batches_performed(self): + params = simple_txt2img.copy() + params["n_iter"] = 2 + self.assertEqual(requests.post(url_txt2img, json=params).status_code, 200) + +class TestTxt2ImgCorrectness(unittest.TestCase): + pass + +if __name__ == "__main__": + unittest.main() From ab05a74ead9fabb45dd099990e34061c7eb02ca3 Mon Sep 17 00:00:00 2001 From: Muhammad Rizqi Nur Date: Sun, 30 Oct 2022 00:32:02 +0700 Subject: [PATCH 18/37] Revert "Add cleanup after training" This reverts commit 3ce2bfdf95bd5f26d0f6e250e67338ada91980d1. --- modules/hypernetworks/hypernetwork.py | 191 +++++++++--------- .../textual_inversion/textual_inversion.py | 163 ++++++++------- 2 files changed, 170 insertions(+), 184 deletions(-) diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 170d5ea4..38f35c58 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -398,112 +398,110 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log forced_filename = "" pbar = tqdm.tqdm(enumerate(ds), total=steps - ititial_step) - - try: - for i, entries in pbar: - hypernetwork.step = i + ititial_step - if len(loss_dict) > 0: - previous_mean_losses = [i[-1] for i in loss_dict.values()] - previous_mean_loss = mean(previous_mean_losses) - - scheduler.apply(optimizer, hypernetwork.step) - if scheduler.finished: - break - - if shared.state.interrupted: - break - - with torch.autocast("cuda"): - c = stack_conds([entry.cond for entry in entries]).to(devices.device) - # c = torch.vstack([entry.cond for entry in entries]).to(devices.device) - x = torch.stack([entry.latent for entry in entries]).to(devices.device) - loss = shared.sd_model(x, c)[0] - del x - del c - - losses[hypernetwork.step % losses.shape[0]] = loss.item() - for entry in entries: - loss_dict[entry.filename].append(loss.item()) - - optimizer.zero_grad() - weights[0].grad = None - loss.backward() - - if weights[0].grad is None: - steps_without_grad += 1 - else: - steps_without_grad = 0 - assert steps_without_grad < 10, 'no gradient found for the trained weight after backward() for 10 steps in a row; this is a bug; training cannot continue' - - optimizer.step() - - steps_done = hypernetwork.step + 1 - - if torch.isnan(losses[hypernetwork.step % losses.shape[0]]): - raise RuntimeError("Loss diverged.") + for i, entries in pbar: + hypernetwork.step = i + ititial_step + if len(loss_dict) > 0: + previous_mean_losses = [i[-1] for i in loss_dict.values()] + previous_mean_loss = mean(previous_mean_losses) - if len(previous_mean_losses) > 1: - std = stdev(previous_mean_losses) + scheduler.apply(optimizer, hypernetwork.step) + if scheduler.finished: + break + + if shared.state.interrupted: + break + + with torch.autocast("cuda"): + c = stack_conds([entry.cond for entry in entries]).to(devices.device) + # c = torch.vstack([entry.cond for entry in entries]).to(devices.device) + x = torch.stack([entry.latent for entry in entries]).to(devices.device) + loss = shared.sd_model(x, c)[0] + del x + del c + + losses[hypernetwork.step % losses.shape[0]] = loss.item() + for entry in entries: + loss_dict[entry.filename].append(loss.item()) + + optimizer.zero_grad() + weights[0].grad = None + loss.backward() + + if weights[0].grad is None: + steps_without_grad += 1 else: - std = 0 - dataset_loss_info = f"dataset loss:{mean(previous_mean_losses):.3f}" + u"\u00B1" + f"({std / (len(previous_mean_losses) ** 0.5):.3f})" - pbar.set_description(dataset_loss_info) + steps_without_grad = 0 + assert steps_without_grad < 10, 'no gradient found for the trained weight after backward() for 10 steps in a row; this is a bug; training cannot continue' - if hypernetwork_dir is not None and steps_done % save_hypernetwork_every == 0: - # Before saving, change name to match current checkpoint. - hypernetwork.name = f'{hypernetwork_name}-{steps_done}' - last_saved_file = os.path.join(hypernetwork_dir, f'{hypernetwork.name}.pt') - hypernetwork.save(last_saved_file) + optimizer.step() - textual_inversion.write_loss(log_directory, "hypernetwork_loss.csv", hypernetwork.step, len(ds), { - "loss": f"{previous_mean_loss:.7f}", - "learn_rate": scheduler.learn_rate - }) + steps_done = hypernetwork.step + 1 - if images_dir is not None and steps_done % create_image_every == 0: - forced_filename = f'{hypernetwork_name}-{steps_done}' - last_saved_image = os.path.join(images_dir, forced_filename) + if torch.isnan(losses[hypernetwork.step % losses.shape[0]]): + raise RuntimeError("Loss diverged.") + + if len(previous_mean_losses) > 1: + std = stdev(previous_mean_losses) + else: + std = 0 + dataset_loss_info = f"dataset loss:{mean(previous_mean_losses):.3f}" + u"\u00B1" + f"({std / (len(previous_mean_losses) ** 0.5):.3f})" + pbar.set_description(dataset_loss_info) - optimizer.zero_grad() - shared.sd_model.cond_stage_model.to(devices.device) - shared.sd_model.first_stage_model.to(devices.device) + if hypernetwork_dir is not None and steps_done % save_hypernetwork_every == 0: + # Before saving, change name to match current checkpoint. + hypernetwork.name = f'{hypernetwork_name}-{steps_done}' + last_saved_file = os.path.join(hypernetwork_dir, f'{hypernetwork.name}.pt') + hypernetwork.save(last_saved_file) - p = processing.StableDiffusionProcessingTxt2Img( - sd_model=shared.sd_model, - do_not_save_grid=True, - do_not_save_samples=True, - ) + textual_inversion.write_loss(log_directory, "hypernetwork_loss.csv", hypernetwork.step, len(ds), { + "loss": f"{previous_mean_loss:.7f}", + "learn_rate": scheduler.learn_rate + }) - if preview_from_txt2img: - p.prompt = preview_prompt - p.negative_prompt = preview_negative_prompt - p.steps = preview_steps - p.sampler_index = preview_sampler_index - p.cfg_scale = preview_cfg_scale - p.seed = preview_seed - p.width = preview_width - p.height = preview_height - else: - p.prompt = entries[0].cond_text - p.steps = 20 + if images_dir is not None and steps_done % create_image_every == 0: + forced_filename = f'{hypernetwork_name}-{steps_done}' + last_saved_image = os.path.join(images_dir, forced_filename) - preview_text = p.prompt + optimizer.zero_grad() + shared.sd_model.cond_stage_model.to(devices.device) + shared.sd_model.first_stage_model.to(devices.device) - processed = processing.process_images(p) - image = processed.images[0] if len(processed.images)>0 else None + p = processing.StableDiffusionProcessingTxt2Img( + sd_model=shared.sd_model, + do_not_save_grid=True, + do_not_save_samples=True, + ) - if unload: - shared.sd_model.cond_stage_model.to(devices.cpu) - shared.sd_model.first_stage_model.to(devices.cpu) + if preview_from_txt2img: + p.prompt = preview_prompt + p.negative_prompt = preview_negative_prompt + p.steps = preview_steps + p.sampler_index = preview_sampler_index + p.cfg_scale = preview_cfg_scale + p.seed = preview_seed + p.width = preview_width + p.height = preview_height + else: + p.prompt = entries[0].cond_text + p.steps = 20 - if image is not None: - shared.state.current_image = image - last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, shared.opts.samples_format, processed.infotexts[0], p=p, forced_filename=forced_filename, save_to_dirs=False) - last_saved_image += f", prompt: {preview_text}" + preview_text = p.prompt - shared.state.job_no = hypernetwork.step + processed = processing.process_images(p) + image = processed.images[0] if len(processed.images)>0 else None - shared.state.textinfo = f""" + if unload: + shared.sd_model.cond_stage_model.to(devices.cpu) + shared.sd_model.first_stage_model.to(devices.cpu) + + if image is not None: + shared.state.current_image = image + last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, shared.opts.samples_format, processed.infotexts[0], p=p, forced_filename=forced_filename, save_to_dirs=False) + last_saved_image += f", prompt: {preview_text}" + + shared.state.job_no = hypernetwork.step + + shared.state.textinfo = f"""

Loss: {previous_mean_loss:.7f}
Step: {hypernetwork.step}
@@ -512,14 +510,7 @@ Last saved hypernetwork: {html.escape(last_saved_file)}
Last saved image: {html.escape(last_saved_image)}

""" - finally: - if weights: - for weight in weights: - weight.requires_grad = False - if unload: - shared.sd_model.cond_stage_model.to(devices.device) - shared.sd_model.first_stage_model.to(devices.device) - + report_statistics(loss_dict) checkpoint = sd_models.select_checkpoint() diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index fd7f0897..44f06443 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -283,113 +283,111 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc embedding_yet_to_be_embedded = False pbar = tqdm.tqdm(enumerate(ds), total=steps-ititial_step) + for i, entries in pbar: + embedding.step = i + ititial_step - try: - for i, entries in pbar: - embedding.step = i + ititial_step + scheduler.apply(optimizer, embedding.step) + if scheduler.finished: + break - scheduler.apply(optimizer, embedding.step) - if scheduler.finished: - break + if shared.state.interrupted: + break - if shared.state.interrupted: - break + with torch.autocast("cuda"): + c = cond_model([entry.cond_text for entry in entries]) + x = torch.stack([entry.latent for entry in entries]).to(devices.device) + loss = shared.sd_model(x, c)[0] + del x - with torch.autocast("cuda"): - c = cond_model([entry.cond_text for entry in entries]) - x = torch.stack([entry.latent for entry in entries]).to(devices.device) - loss = shared.sd_model(x, c)[0] - del x + losses[embedding.step % losses.shape[0]] = loss.item() - losses[embedding.step % losses.shape[0]] = loss.item() + optimizer.zero_grad() + loss.backward() + optimizer.step() - optimizer.zero_grad() - loss.backward() - optimizer.step() + steps_done = embedding.step + 1 - steps_done = embedding.step + 1 + epoch_num = embedding.step // len(ds) + epoch_step = embedding.step % len(ds) - epoch_num = embedding.step // len(ds) - epoch_step = embedding.step % len(ds) + pbar.set_description(f"[Epoch {epoch_num}: {epoch_step+1}/{len(ds)}]loss: {losses.mean():.7f}") - pbar.set_description(f"[Epoch {epoch_num}: {epoch_step+1}/{len(ds)}]loss: {losses.mean():.7f}") + if embedding_dir is not None and steps_done % save_embedding_every == 0: + # Before saving, change name to match current checkpoint. + embedding.name = f'{embedding_name}-{steps_done}' + last_saved_file = os.path.join(embedding_dir, f'{embedding.name}.pt') + embedding.save(last_saved_file) + embedding_yet_to_be_embedded = True - if embedding_dir is not None and steps_done % save_embedding_every == 0: - # Before saving, change name to match current checkpoint. - embedding.name = f'{embedding_name}-{steps_done}' - last_saved_file = os.path.join(embedding_dir, f'{embedding.name}.pt') - embedding.save(last_saved_file) - embedding_yet_to_be_embedded = True + write_loss(log_directory, "textual_inversion_loss.csv", embedding.step, len(ds), { + "loss": f"{losses.mean():.7f}", + "learn_rate": scheduler.learn_rate + }) - write_loss(log_directory, "textual_inversion_loss.csv", embedding.step, len(ds), { - "loss": f"{losses.mean():.7f}", - "learn_rate": scheduler.learn_rate - }) + if images_dir is not None and steps_done % create_image_every == 0: + forced_filename = f'{embedding_name}-{steps_done}' + last_saved_image = os.path.join(images_dir, forced_filename) + p = processing.StableDiffusionProcessingTxt2Img( + sd_model=shared.sd_model, + do_not_save_grid=True, + do_not_save_samples=True, + do_not_reload_embeddings=True, + ) - if images_dir is not None and steps_done % create_image_every == 0: - forced_filename = f'{embedding_name}-{steps_done}' - last_saved_image = os.path.join(images_dir, forced_filename) - p = processing.StableDiffusionProcessingTxt2Img( - sd_model=shared.sd_model, - do_not_save_grid=True, - do_not_save_samples=True, - do_not_reload_embeddings=True, - ) + if preview_from_txt2img: + p.prompt = preview_prompt + p.negative_prompt = preview_negative_prompt + p.steps = preview_steps + p.sampler_index = preview_sampler_index + p.cfg_scale = preview_cfg_scale + p.seed = preview_seed + p.width = preview_width + p.height = preview_height + else: + p.prompt = entries[0].cond_text + p.steps = 20 + p.width = training_width + p.height = training_height - if preview_from_txt2img: - p.prompt = preview_prompt - p.negative_prompt = preview_negative_prompt - p.steps = preview_steps - p.sampler_index = preview_sampler_index - p.cfg_scale = preview_cfg_scale - p.seed = preview_seed - p.width = preview_width - p.height = preview_height - else: - p.prompt = entries[0].cond_text - p.steps = 20 - p.width = training_width - p.height = training_height + preview_text = p.prompt - preview_text = p.prompt + processed = processing.process_images(p) + image = processed.images[0] - processed = processing.process_images(p) - image = processed.images[0] + shared.state.current_image = image - shared.state.current_image = image + if save_image_with_stored_embedding and os.path.exists(last_saved_file) and embedding_yet_to_be_embedded: - if save_image_with_stored_embedding and os.path.exists(last_saved_file) and embedding_yet_to_be_embedded: + last_saved_image_chunks = os.path.join(images_embeds_dir, f'{embedding_name}-{steps_done}.png') - last_saved_image_chunks = os.path.join(images_embeds_dir, f'{embedding_name}-{steps_done}.png') + info = PngImagePlugin.PngInfo() + data = torch.load(last_saved_file) + info.add_text("sd-ti-embedding", embedding_to_b64(data)) - info = PngImagePlugin.PngInfo() - data = torch.load(last_saved_file) - info.add_text("sd-ti-embedding", embedding_to_b64(data)) + title = "<{}>".format(data.get('name', '???')) - title = "<{}>".format(data.get('name', '???')) + try: + vectorSize = list(data['string_to_param'].values())[0].shape[0] + except Exception as e: + vectorSize = '?' - try: - vectorSize = list(data['string_to_param'].values())[0].shape[0] - except Exception as e: - vectorSize = '?' + checkpoint = sd_models.select_checkpoint() + footer_left = checkpoint.model_name + footer_mid = '[{}]'.format(checkpoint.hash) + footer_right = '{}v {}s'.format(vectorSize, steps_done) - checkpoint = sd_models.select_checkpoint() - footer_left = checkpoint.model_name - footer_mid = '[{}]'.format(checkpoint.hash) - footer_right = '{}v {}s'.format(vectorSize, steps_done) + captioned_image = caption_image_overlay(image, title, footer_left, footer_mid, footer_right) + captioned_image = insert_image_data_embed(captioned_image, data) - captioned_image = caption_image_overlay(image, title, footer_left, footer_mid, footer_right) - captioned_image = insert_image_data_embed(captioned_image, data) + captioned_image.save(last_saved_image_chunks, "PNG", pnginfo=info) + embedding_yet_to_be_embedded = False - captioned_image.save(last_saved_image_chunks, "PNG", pnginfo=info) - embedding_yet_to_be_embedded = False + last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, shared.opts.samples_format, processed.infotexts[0], p=p, forced_filename=forced_filename, save_to_dirs=False) + last_saved_image += f", prompt: {preview_text}" - last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, shared.opts.samples_format, processed.infotexts[0], p=p, forced_filename=forced_filename, save_to_dirs=False) - last_saved_image += f", prompt: {preview_text}" + shared.state.job_no = embedding.step - shared.state.job_no = embedding.step - - shared.state.textinfo = f""" + shared.state.textinfo = f"""

Loss: {losses.mean():.7f}
Step: {embedding.step}
@@ -398,9 +396,6 @@ Last saved embedding: {html.escape(last_saved_file)}
Last saved image: {html.escape(last_saved_image)}

""" - finally: - if embedding and embedding.vec is not None: - embedding.vec.requires_grad = False checkpoint = sd_models.select_checkpoint() From a07f054c86f33360ff620d6a3fffdee366ab2d99 Mon Sep 17 00:00:00 2001 From: Muhammad Rizqi Nur Date: Sun, 30 Oct 2022 00:49:29 +0700 Subject: [PATCH 19/37] Add missing info on hypernetwork/embedding model log Mentioned here: https://github.com/AUTOMATIC1111/stable-diffusion-webui/discussions/1528#discussioncomment-3991513 Also group the saving into one --- modules/hypernetworks/hypernetwork.py | 31 ++++++++++----- .../textual_inversion/textual_inversion.py | 39 ++++++++++++------- 2 files changed, 47 insertions(+), 23 deletions(-) diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 38f35c58..86daf825 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -361,6 +361,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log images_dir = None hypernetwork = shared.loaded_hypernetwork + checkpoint = sd_models.select_checkpoint() ititial_step = hypernetwork.step or 0 if ititial_step > steps: @@ -449,9 +450,9 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log if hypernetwork_dir is not None and steps_done % save_hypernetwork_every == 0: # Before saving, change name to match current checkpoint. - hypernetwork.name = f'{hypernetwork_name}-{steps_done}' - last_saved_file = os.path.join(hypernetwork_dir, f'{hypernetwork.name}.pt') - hypernetwork.save(last_saved_file) + hypernetwork_name_every = f'{hypernetwork_name}-{steps_done}' + last_saved_file = os.path.join(hypernetwork_dir, f'{hypernetwork_name_every}.pt') + save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, last_saved_file) textual_inversion.write_loss(log_directory, "hypernetwork_loss.csv", hypernetwork.step, len(ds), { "loss": f"{previous_mean_loss:.7f}", @@ -512,13 +513,23 @@ Last saved image: {html.escape(last_saved_image)}
""" report_statistics(loss_dict) - checkpoint = sd_models.select_checkpoint() - hypernetwork.sd_checkpoint = checkpoint.hash - hypernetwork.sd_checkpoint_name = checkpoint.model_name - # Before saving for the last time, change name back to the base name (as opposed to the save_hypernetwork_every step-suffixed naming convention). - hypernetwork.name = hypernetwork_name - filename = os.path.join(shared.cmd_opts.hypernetwork_dir, f'{hypernetwork.name}.pt') - hypernetwork.save(filename) + filename = os.path.join(shared.cmd_opts.hypernetwork_dir, f'{hypernetwork_name}.pt') + save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, filename) return hypernetwork, filename + +def save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, filename): + old_hypernetwork_name = hypernetwork.name + old_sd_checkpoint = hypernetwork.sd_checkpoint if hasattr(hypernetwork, "sd_checkpoint") else None + old_sd_checkpoint_name = hypernetwork.sd_checkpoint_name if hasattr(hypernetwork, "sd_checkpoint_name") else None + try: + hypernetwork.sd_checkpoint = checkpoint.hash + hypernetwork.sd_checkpoint_name = checkpoint.model_name + hypernetwork.name = hypernetwork_name + hypernetwork.save(filename) + except: + hypernetwork.sd_checkpoint = old_sd_checkpoint + hypernetwork.sd_checkpoint_name = old_sd_checkpoint_name + hypernetwork.name = old_hypernetwork_name + raise diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 44f06443..ee9917ce 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -119,7 +119,7 @@ class EmbeddingDatabase: vec = emb.detach().to(devices.device, dtype=torch.float32) embedding = Embedding(vec, name) embedding.step = data.get('step', None) - embedding.sd_checkpoint = data.get('hash', None) + embedding.sd_checkpoint = data.get('sd_checkpoint', None) embedding.sd_checkpoint_name = data.get('sd_checkpoint_name', None) self.register_embedding(embedding, shared.sd_model) @@ -259,6 +259,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc hijack = sd_hijack.model_hijack embedding = hijack.embedding_db.word_embeddings[embedding_name] + checkpoint = sd_models.select_checkpoint() ititial_step = embedding.step or 0 if ititial_step > steps: @@ -314,9 +315,9 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc if embedding_dir is not None and steps_done % save_embedding_every == 0: # Before saving, change name to match current checkpoint. - embedding.name = f'{embedding_name}-{steps_done}' - last_saved_file = os.path.join(embedding_dir, f'{embedding.name}.pt') - embedding.save(last_saved_file) + embedding_name_every = f'{embedding_name}-{steps_done}' + last_saved_file = os.path.join(embedding_dir, f'{embedding_name_every}.pt') + save_embedding(embedding, checkpoint, embedding_name_every, last_saved_file, remove_cached_checksum=True) embedding_yet_to_be_embedded = True write_loss(log_directory, "textual_inversion_loss.csv", embedding.step, len(ds), { @@ -397,14 +398,26 @@ Last saved image: {html.escape(last_saved_image)}

""" - checkpoint = sd_models.select_checkpoint() - - embedding.sd_checkpoint = checkpoint.hash - embedding.sd_checkpoint_name = checkpoint.model_name - embedding.cached_checksum = None - # Before saving for the last time, change name back to base name (as opposed to the save_embedding_every step-suffixed naming convention). - embedding.name = embedding_name - filename = os.path.join(shared.cmd_opts.embeddings_dir, f'{embedding.name}.pt') - embedding.save(filename) + filename = os.path.join(shared.cmd_opts.embeddings_dir, f'{embedding_name}.pt') + save_embedding(embedding, checkpoint, embedding_name, filename, remove_cached_checksum=True) return embedding, filename + +def save_embedding(embedding, checkpoint, embedding_name, filename, remove_cached_checksum=True): + old_embedding_name = embedding.name + old_sd_checkpoint = embedding.sd_checkpoint if hasattr(embedding, "sd_checkpoint") else None + old_sd_checkpoint_name = embedding.sd_checkpoint_name if hasattr(embedding, "sd_checkpoint_name") else None + old_cached_checksum = embedding.cached_checksum if hasattr(embedding, "cached_checksum") else None + try: + embedding.sd_checkpoint = checkpoint.hash + embedding.sd_checkpoint_name = checkpoint.model_name + if remove_cached_checksum: + embedding.cached_checksum = None + embedding.name = embedding_name + embedding.save(filename) + except: + embedding.sd_checkpoint = old_sd_checkpoint + embedding.sd_checkpoint_name = old_sd_checkpoint_name + embedding.name = old_embedding_name + embedding.cached_checksum = old_cached_checksum + raise From 3d58510f214c645ce5cdb261aa47df6573b239e9 Mon Sep 17 00:00:00 2001 From: Muhammad Rizqi Nur Date: Sun, 30 Oct 2022 00:54:59 +0700 Subject: [PATCH 20/37] Fix dataset still being loaded even when training will be skipped --- modules/hypernetworks/hypernetwork.py | 2 +- modules/textual_inversion/textual_inversion.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 86daf825..07acadc9 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -364,7 +364,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log checkpoint = sd_models.select_checkpoint() ititial_step = hypernetwork.step or 0 - if ititial_step > steps: + if ititial_step >= steps: shared.state.textinfo = f"Model has already been trained beyond specified max steps" return hypernetwork, filename diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index ee9917ce..e0babb46 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -262,7 +262,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc checkpoint = sd_models.select_checkpoint() ititial_step = embedding.step or 0 - if ititial_step > steps: + if ititial_step >= steps: shared.state.textinfo = f"Model has already been trained beyond specified max steps" return embedding, filename From 2f3d8172c3fe80ea64ebe5c797835ca15c2e595f Mon Sep 17 00:00:00 2001 From: Vladimir Repin <32306715+mezotaken@users.noreply.github.com> Date: Sat, 29 Oct 2022 21:43:32 +0300 Subject: [PATCH 21/37] img2img test template and setUp added --- test/extras_test.py | 0 test/img2img_test.py | 59 +++++++++++++++++ test/test_files/img2img_basic.png | Bin 0 -> 9932 bytes test/test_files/mask_basic.png | Bin 0 -> 362 bytes test/txt2img_test.py | 102 ++++++++++++++---------------- 5 files changed, 106 insertions(+), 55 deletions(-) create mode 100644 test/extras_test.py create mode 100644 test/img2img_test.py create mode 100644 test/test_files/img2img_basic.png create mode 100644 test/test_files/mask_basic.png diff --git a/test/extras_test.py b/test/extras_test.py new file mode 100644 index 00000000..e69de29b diff --git a/test/img2img_test.py b/test/img2img_test.py new file mode 100644 index 00000000..d8ed309d --- /dev/null +++ b/test/img2img_test.py @@ -0,0 +1,59 @@ +import unittest +import requests +from gradio.processing_utils import encode_pil_to_base64 +from PIL import Image + +class Img2ImgWorking(unittest.TestCase): + def setUp(self): + self.url_img2img = "http://localhost:7860/sdapi/v1/img2img" + self.simple_img2img = { + "init_images": [ + encode_pil_to_base64(Image.open(r"test/test_files/img2img_basic.png")) + ], + "resize_mode": 0, + "denoising_strength": 0.75, + "mask": None, + "mask_blur": 4, + "inpainting_fill": 0, + "inpaint_full_res": False, + "inpaint_full_res_padding": 0, + "inpainting_mask_invert": 0, + "prompt": "example prompt", + "styles": [ + "" + ], + "seed": -1, + "subseed": -1, + "subseed_strength": 0, + "seed_resize_from_h": -1, + "seed_resize_from_w": -1, + "batch_size": 1, + "n_iter": 1, + "steps": 3, + "cfg_scale": 7, + "width": 64, + "height": 64, + "restore_faces": False, + "tiling": False, + "negative_prompt": "", + "eta": 0, + "s_churn": 0, + "s_tmax": 0, + "s_tmin": 0, + "s_noise": 1, + "override_settings": {}, + "sampler_index": "Euler a", + "include_init_images": False + } + def test_img2img_simple_performed(self): + self.assertEqual(requests.post(self.url_img2img, json=self.simple_img2img).status_code, 200) + + def test_inpainting_masked_performed(self): + self.simple_img2img["mask"] = encode_pil_to_base64(Image.open(r"test/test_files/mask_basic.png")) + self.assertEqual(requests.post(self.url_img2img, json=self.simple_img2img).status_code, 200) + +class TestImg2ImgCorrectness(unittest.TestCase): + pass + +if __name__ == "__main__": + unittest.main() \ No newline at end of file diff --git a/test/test_files/img2img_basic.png b/test/test_files/img2img_basic.png new file mode 100644 index 0000000000000000000000000000000000000000..49a420482d0a70b9f5986d776a66cb3ea39d1a97 GIT binary patch literal 9932 zcmV;-CNtTIP)Px#1ZP1_K>z@;j|==^1poj532;bRa{vGi!~g&e!~vBn4jTXf02XvbSaefwW^{L9 za%BK#X=XBTZf77eE;KGOqh3c2001UGNklH_r=gz~MC(X~YJV>_j z0N5CuKm%P6%#>wTlQvx~4yA2Eikl8$B~7W;YFA5}(u9?)5axCfLJ6ca1``j&#=}q^&cjq(o_uuD=^gi8trtf@v-uu75bCmnaXI@QHOD&f({hEF) z%~`-U;pp-wU_ip9Y~L(qtI>g<@w@z!V^8zsm^G(?nvQRdf!Tb2JAW#od~$xX-{sqc zO^z`>mk-JI^-rH{WOkrAoR*)@zM_-M1p3)WyUvbwhJ*dyFaP9}-yU+(@8N^P(~PVp^+4d)qN$oc{D^;Tr@pfJW$x`fdseO+D7 zgtV%OvYRx{KM>RqXM|H}#+ToGS@Syo|GTGaj(-UQ{EuVJp}BopU1qR`$#ZmGb43W~ z?1%`nT@D|O(cB@5l<>M)bBdzo)4e_!w`tzK_J>N>Oi~s~fry(&H?v{ix4PM<(k8Q- zgwbhzT{yO4Qh&-3I@Wv&2f2MBR^5HM#tmY^=Yxhq?q^nJn<*ljTMCzIvTb%Q3xRA* z#CWI6SI3!h7|Ud`aw-p!&}czk>~{|7x!^kFSYJGV9lzjYhJ$cnF6!EV38YO;Ghh}@ zJ7M>np*JK5r`-fgr9r`RrJ@xIIh!bSZM2kHrIg35IiCw*QgdkhP&j@9X;V9*;&bz1 zoKA^J4ZO^}8%D<*$XKob|7L=^BG~b8;Oq0Si8WpVI~_i3+&jPW1XX|wn%HmPiX1|D zL9}&a$i)EFDsG)0jpt8{rr z(>p8<$E_y{5Rc$q^D3O28t24=Xn0x z*?#uPu4!EED?fS_Pq}|(ORdDSn<@0wsGNt zH8=2~D6vW&hQp$DAQ&Qc%>LMiK4Q6@`z?+mcTrK22e`Wr=XPNvivME_g!JcOz_siy zl*F+-RUs!&)p^~Ug=$vW2HF$zd=$p13y>Xi$#P%911?h@$~x2(gMCCck%ss=9~8J( z6iV3Xnfq+(&<|~+o3ma&v2s4Jsqvx}xhX7^tdAGSnla(A+}yI!fBHKXHYNy{$j^z2 z_>miCRwSdu%E?sI7>w(Qrmu+sQ>hT`&Ro0b@_XY#X zVr=7+4a-%kkR-t|Rly@6v!*z3hGD)CW;_uYZaudo`yR*8_Nt|u#Ev-8eaxj?&f$d=g!h$~koz6GUt=>QHnD!=By`v3Sl9EM zk|p&gL9?-3U=Mv|!Y0Ac+QUCTFPg(#=4V|zzZNGblkNO0i%uipYlrzgylGz*&M~bdP>yB^S}R zoOJp_+Rjtvg&O1W>{~8zUQ=WfWt+UNaYJ^mPU>ErlTv(`Uq^91#0O>DY)69W12BaE zBy~=woR}ErW>lXWv@OmTm?$9bv37wWm@AhLEnltLMpCqAR|@uQGiNU^^=R&n3%I>dsd*(Smdhvv1xipRmsQ)X zH7n*zZfzyROdK&L0i_T>sec$F6G%s{APwZYDu>_4u_8+|DBM)iJ?y@4=oAT4S7m8# zmwY-N3aUxPK=#SJL9Q#URLY@so_^SZC+@SLh(+^dgn}HB^*aO0lNd{r)An~h^?SD2 z?%FFS&e*dLK5Q4Behwvt2pm~yeAIfm$m-Q`YwXx&rHLIjGB;&WrEGuk&wj&xg#CHLGHT>(l+nlSrZ zb|d|GgF~FASVgDw^$GbDD^&L%-EYwo4_a+xjL8|J$#8$wvOFlTv%YD6{?xMli%);t z-hStMY;B`uM_xW*Pu~B4Z5=*|YnN^)-G3UqTaB^u5DJ` zeYK76p10!2j5CqQ%ObwYZW3FIyDPWgoIrjVFB>D}$l|%r(+x_6a)+VO zT>&{II0j|zJ&asmq$%2(Cbf3f&bvsI_k3+ZQd`ck+(xwSOPW;MUGFjPGKFDN}V$@F% z0w+@muW`~qmhduwA!AA@F1rM0Uy2P8RHsW-Lf{AAvf#NNS-#e=gg`7vIn=fo3Y9W- zHXYc}*VgR^&o0_cAGpCfi)%J_sVPR?0rdd9}~?6#1Q8JU=}peC;* zP$UFZ%JGaa2)s)G9TEa3QKDLCVlao{;4OUDa=~9$M?ZKePPN!&a&l zu|gTwB*~}}K#-=~fz+i|&mMSs!FKH5X-lsy+RDBvz#{@5YwzIFzhYN)uI$`W8vRii;avb@j}=hE?t1sB({#S$2lRZ~Ku+ieqpR2M2# zfKkTgX01B2-NJ1sSmA5n-;idyzHQ+tvXkzW49H z$L_l4ZkwK+wR20W_WZLywbu?kVOysc$uni3IFWLQHzZaL^us7d!qaCw1pr1C-)qbXztps2^TZl zVr7MdTa%5(=p%d|2&4?U*8cGWR>A@WYWV;L~wAqbii<`^VBKFk*ma0QArH-x&h`|LzTvM3g z>_Qu_$&-D3gqNp0!h}FICDxHqatKO#`oX8{2jBQBo7g^KH@)d5+jsSqw(Z92ty45x zJoUQedR3Gek1`M!^S z;-D-l<7d|u>sdIc>F^zwUIP^0d?CL0+VghToo~0ZPm;9OTe!4{W|l7ijj@j-!jOd2 z$K}w!?Vc{$Qj?y6n8XRKjxrtLVTzLIqKI;-Xti1iJdoN0-+0gteg8q5-#>3RzVRlT zzjEGYZoJMGNvfOY&KXtKViM1whY|?EKFZ6N-~q{`P%WnPJ016k240qz@(17(t%S;m z;EvoEA3F)gxtsze>Tn++eN~8E!A#)-d4m+tJoT)7@ZE2*!_PcpCqMU9+qtblyGydl zqve?Ri%C$4KyRc?&d)FK7Uw!4%V1hQZinh|+uv5ogu^dVc3B}gRCEHxv zvgYc#b+ujF@RBHxkWwp^ycLU8>kvW%lwBh`%D?%RL9=eI523+=sEPiq7kWir)Xte%2gdRVYTD5xLk_NeMoBFQMm?E*5t z=1>E**@^7f>2=!z&=jGe9F%Zx2L@x5AxxsR#pYJmcI@3@2k-r&4Z2+qmHY31$XUs+A0Z3F?mYa@|lJ zy*oUCDoZ4?nDAL$-n1s2Azn;~&};#QO3-`n-S4*jcid**{p0^dWx@MKYgVs~T1a_R zK2uH#2!R~XI>2C-BiWABE%#-RpdhfUFQMiBT1^pi0TQgPUb6YzioNaUZ?@0;%l}{t zubu&5RP5~0C7YmW`Q^9ISqt)1Pf?VZdK z*kgbB81Bw|^bsL0T%ucTjFc&PfUXe$5lRf%mojq1f2j(1`&$pt2O*Fh?*z|>)?Qz? z+U6_v{tv#({@|1U$xc7_k`>2C?EDLh)Y_D6h4?l-ruxMe` z5-BhVXrWs+d(AGZUomAr{<9xixmflEl@d89!kYxPA|XeWML8%Qs8U-cWb(Cw)c_7M zg$wK+?}n#kgunm+w$4!Q?s~6%?f3u0UVY|8tIdqt(o0L$C>KCpxDchJ&*TI8_lW!C zwo+o|^)x0}djO*mAjKEMIHxMm<9Gwi@On_OGu!6v?2g?qSH)|%H*Q;%F}J<_4m){n z#g3miZcA<26XRu@o-EtcWX(o7SHK8btm!PZZRzzU3gF|bt=jYS53oQZtFIlgkACDX`{tLv z1~PlxMz>Ab!b^)*BZ<_AC0XuP5+G^@c;Rv+Mfk?l2#r!2((({+DE14bGUYXAPb|iE zZu=fPS!r1Qz#Hw%#T7f3PE#q==HeZRTHb+Oam{WU-F}4?Yf~21sTx%puX7h|CN10i zR5fE2f-@6i0I>}__cuSauocYhr%Xi*phwgjG9{<+&ZG6v+oj$f;WyTbNv_)L_)&nqx3&q1l z%>V)c^}k9SSD2?y3DXKuEg-I$a{yk?gYIs+?K-=F(9eJSQQI+6wUOOd!33p?IfJA} z;_JAp7U>6eZJV@Q9VL^M1#BW9_bX4mNO`56!*_vkNkdf!qd;?RvTCD*U76NAAr}=< zP@i#SlC*%BdVmt}yhv5ZGrrQO5%Rc^9)S%Jysf?Z0&#r}@l>(n=a+2nZ8vz2-vC0^ zv4j>ZZn<*I#wqLx6PJi7Z-dF@0c9;kA&Ni6K|s>cBP(UyfE+L&-Cl*u&g|N5b64%M z=^axxIx}l2$`k!O-jix^6~(2LuU$K4tU5h|w~4o`<*lo)AoqA##b!Z4jTFEe8l`%L z+F$gBt6Z-lEXn{c35S27zEYgXQNg4!j)qOrzJ@4k_nY2eDS5}Kx+d4kBYxg zUJ=3thzRwmzKbX$m2kl14GSh`No09|53MQ|>tc0zS^@%eScX^0@FXfbmrt)ua{4O$C&$1SusbR7zM|A3f_39#KiwM+$>2ZbS}U z?8TuSUn|+h*!8yWJs-9CU-+;cdH5;2_`+$sX3v<-&sMzWkICS7+;J;R2su!41Z`!- z9OzG20F4hoggwB9*wE`UF)mwBAC9sWB86%#V+NNW0i~4)qFCq=CCGBpK$k903m6il zgpzhoS8e+kF^?y)`{e1wEdVqvj&ru59)F?y1Ol3a< zdYlvFb)5?i5ej;kA)Kt|eATfz``{;k(+=Eztxe96 z;(Uz`5W=l4S_U2B2{8g{u|qK)kw-!`E+}kv5*Un7>c~>iS={ZT95NrKlK`qpN>F+Y zM#kYEf^tJBR=Vu!?KOMz-nw14chq)}>E}iPLMS0oUm-pm z^Ki^YUsr_ULTF9@^pRKORNtxNRV-Sb-)Rv&c!(ykohLmds>$hbCM9+gG*6=w&o)pu?q4@+i99ce(99S4-gj%nc zl_N@8jxk{U+yFoAw#`~wS-hoPM&#`F+itU8{m6$cUmvr%opV;&T(N$;1=vxxPr&(l zs!YycE-|HSFMdMhDtM4T_1zo9FCXXx#JR$SfVBSlmUUYlYk><==7}+g7EA?+M;57&&v#>$Q2uFXBlVS;1qQ}=QwD1{|HW{?4^?Ds4G|G$?`M?Y6UX`QtNIY3MD^Z=W z9)Kv0^$1)|k-EElK^94^j1^RO7oL0C&V2T>W{*5%+b$lr{e`Ywx1BhhuE8W=nFN~x z!DA|cYl4qBQlpg}WXMvvAtfABUoI~LvM>;Qxn~MPXdM#w@P$!4rm6-F8MnHr9Xj8! zr}#c{KDNau!xFA#xv_*-{5x%rJ8}b^+1xY&Qkw^4)`)Uf*?*mNFI}YLBBM$z=LVy!2o+nQvDWy?O41h(A48BaGB0wdcvaf_o z5LWAX+K*ANazFvZr%z1t6k@259$Cp(C{GKdEkbF7{^HRyEjzRn+mkEsl&{zbWq9X! z1=QM=GSMn@vr3Q&@5k)xSz470TL3!7^!F-p`mWNz9azVcLFFj;4rM^Bvw ztl48+C{3MLKv>7uWU05dM^m1%|Y|tu@CyeT8zCBSm=)?gmML-htOxx6aT5_$u|d zBG_tQF;{d8aSew2(Nvy{FPx1=)pY0qCX(@kQkXLj=n)|5%WWxFMVL+s6&F;m$R$(+ z<&Ojf=D7w!;28QOsyvDj5C9=yRq_rl?R5F86Go#n*I`5~HzV6RzigZJx^*#zo;U6Q zoXv9kw6Fv@rHdlkD7Xq9$l;}0m^G9yh%61O3t=k|1bOJ|y*uAQXoW$yC$@+zwYIrz z#U=nG6$*6{A(n(eW$S?Cr>06Pxtdnlzo&PGP)U&9mO!OjQl!0AZ>Hwg&Y_qjSMOY_CMAnJgdM>B+H6PJ;*4c47t6QoU0=v5NcZlK*`|&6|B@jSo{tp0ICoO zts)%MK$Ba5oT^YI+iCiYTR(QUJ@Du;dug5W+HP8~bk3q9NAVcFii^U#1MotZQV4yL zZI9%u?*S zS^*tkV7S6qX7FyQ%ehAtC_YY7s-dL}#;aGdJCA2R`zv_RTMRi$?4O!fqOoWG&oWuU|wC z*-56vxN?J@K2Oe0lHqFBsMTy_jz)|5N}$3f2`|ALUiW7N(A#R-*6NaNHCJtZ?_SG& z0S}N6t~`XqF=530j>!UBzyBLnIsP0jPMJdpbys>DTb0@kK71A@_YVT%9qam{C-qOu z&jH5)aT^Fo$t@8;%2YfSBnAFnWL&4$$E9&=JYy&EK%!{`5co7mQN1 z-k@W-FaPiu;h(vVJU|K0-}T{KwMv_M|JSVe$`OmHpQ-?<+AaP$Y6W=cm06X`gg8^; zOjXw81$cv6VbyL4l850$36VI*;Eou#$QMoSS?iJ`q)i%|%2*xTF%1@|+9-lY<(hkB zo3I*mx2#Rg*J5be=>cx~*6MYb3zo!}vIz?GjukMZ1<_7t6U7gJ#a(NlxQ~AF6ZY5l z-D@vC_ETFt-oyh?fB=Z{9%3PYgu+cpJbJFa^}VlJ@%2|Y7AR4r;5x1s(Ya{F$3sU> zKvhtt4wMk_0KrZ;AVf%tSXcnXABvJ?$LDugIyGZ!`MO!7;g7ija)IhXcVo*sttKA8 zw@s4b9wjfPa8RbFPYD1gihE#_3=3@awyfV8_j+j(G~a=2l+&p&cB)iRf2U*%9(Adg!WDPuUMwg3DNEx*13VMJ94qm<>8rorX@ zFhtFeR}|$Awj?HHa;?v(Q}pV>ei5bVIX!!}wSBiO)Tgbveu+fcCyqgw2#_ZS)Z@U` z07mVtO}veXQDB$*1{Co=z^N~HqV>>75T@6~vk*{@sud!1?GIm7wmFRlf>`|`Va!H=NqtU%^Fo%Y zx*A7LMv&CD1is`^-#C0R-MsLstsMP{Ex!1?CCxUj(z8vF-pSJ&_VIV_^A;57;jvf( z5Xmr-Q;1Aj7vQm8+Y?-|1W~y@rAf=&2{EY<6H?SlkRG5EF^5)=2UfDASaN_>&cloh z0drq};Iy5*ut9#qa?lhBg^v^rc=MHMy#k}Ms~0t}LsrHmxUd6VWd_7h7;ry@v%0Eb3%T3}?ucL+5LHL-AT>)5Jd(#1HoH}mhlvtEC69FRd zH!*-b)}xMB0xXPmyFzeKNcwF`A>}ee9V&YyLfsotzZFFhnK_n3FqMbV5F{wT1wbc3 z7H#T^UG~2ZownDWJLTvtMB5317o~7+lfsp#eI?1~Q{KtEqAvwARfzC{V-!|T*s$$A zh&)M;%+gAQM7oLxpy2=ZyT6+TBXz5fPPogoVC+pd9Iz`7?6b zzp%S9zoJ=hUX%x0gd`Y%>#&LsL{=gx^pus=ou(aS9pZ;^^ptfqqoc?Y#TvkXrEBn_X*OTC($t7j5bEEB4Zp&mtV4fiPG<0tQg4D|aGJ zg%9+|S#?}-tVSdb(X|q(h^Kl(M7@B@o#Ic7$QD}P3w?1@0HS~|t6XYDSiY%c^FFzw zM)^FwfEVG~B^01_LxCzKG$l6}unIz}(&)u(VSpO+^_)=(Qyx(3F8D1&H7(pTGIFrJbkVwNtCWl_M)f_`PnL~vIvv9i8aW3@b`x6T3>)5}vzj#|i!gplAw@A)csWb>c{T)U_;g4di{ z>{^HUJgXzP`vGpPX9F@WJt1@BiG*xX!s7{kfr(TRG+s}F!c-4rNXTWv4_fjDy)-dc zZQH`ag3a!pv+(b}=NAu_%5}6!8%x&RTu*FmanmlovTi4TOcmHi@6{#IhzK()e0>+j z1dZJ?2rdRx7G;qT;nb8U4)cP!rL1yr=-F5pqe%P=OnbJ135F`G-EuJyM4!tJ|^5(_gBMo52alZj#iEX4BC_ki9& zOJZ8L6T=~~E=_sTBSm|%Vh+#HQX*gF1ih+8PV`FQDGM4QbDUi0`Vy8NY02ks4_q2= zKzke!Hd<&S0iy`L!yut#0Ews*>pgL zh7}S8E}cNUwP`(2Qg7v))gJi2{w!SpN&gW_`CkhkF)o#&eP;9WhrUTfJr1#K2gSg! zasxG$(o?2NGPY<|#`>s-&XVy(y%#7zMe@Un{*ndEKls8L6^5}eE1_a3-JQXu!I`K; z5`x1F!U24Ub1%u9(f>V`$N5X(jF;i`7Mg2Mp$HG%5wqDF<}KI+dTy?-+xpt3b+$lY zJs2!ZSZVGm+qQcjlWBdgNO@!ew5TUOYDuFA=3>+l4`^z7WPS-mO=p;dcMrfTlzjwP z%FI)Ouwq{BDdt-(Fb7IkF3~&00~GB4mCh@L`U|dUDUR?cAc*n-MH8%$^Y;?;pR)8? ze2!#XLCA_iOWrO|Aw1+AUN)7($VIi?V^-Wb;`yn(8d>>5*Zv>Rg@Y<8a-i)10000< KMNUMnLSTXg;ov#| literal 0 HcmV?d00001 diff --git a/test/test_files/mask_basic.png b/test/test_files/mask_basic.png new file mode 100644 index 0000000000000000000000000000000000000000..0c2e9a6899e5c0381ce7c7364b31d684464ab423 GIT binary patch literal 362 zcmeAS@N?(olHy`uVBq!ia0vp^4j|0I1SD0tpLGH$#^NA%Cx&(BWL^R}Ea{HEjtmSN z`?>!lvI6;>1s;*b3=DjSL74G){)!Z!;D1jS$B+!?x96kznhkhd0>ACglv+8xYZYf@ zg7GSo=i*7f^M21SZkWY22=JGmJVkbv-kp#gwEW^`mya*CxhPGhwnXiGR`q; zo_<(}c}Ira`{OCB1*f%^@py1P=&GJ^RapMOKMtQgA~FY_S)9o&KEP?c*l_PbMs=N@ zV_(`fFl>98;NHv3x8~HrJ}GvYh^fu8rhFC~wAih)4l!|Ub4{*lX7-Da=zYmMk&)H< zvBCDUjP^HV8at;yV&?jG|L~f5QyQAI!wgd$!W<6jey*@rGhFask>nnkY0@8#W!nC+ rec9Q-ulM)Z_RxN|lmdu{w!YU_dMY9qnsK!N7!(Yiu6{1-oD!M Date: Sat, 29 Oct 2022 21:50:06 +0300 Subject: [PATCH 22/37] extras test template added --- test/extras_test.py | 29 +++++++++++++++++++++++++++++ test/img2img_test.py | 4 ++-- 2 files changed, 31 insertions(+), 2 deletions(-) diff --git a/test/extras_test.py b/test/extras_test.py index e69de29b..2e1764d9 100644 --- a/test/extras_test.py +++ b/test/extras_test.py @@ -0,0 +1,29 @@ +import unittest +import requests +from gradio.processing_utils import encode_pil_to_base64 +from PIL import Image + +class TestExtrasWorking(unittest.TestCase): + def setUp(self): + self.url_img2img = "http://localhost:7860/sdapi/v1/extra-single-image" + self.simple_extras = { + "resize_mode": 0, + "show_extras_results": True, + "gfpgan_visibility": 0, + "codeformer_visibility": 0, + "codeformer_weight": 0, + "upscaling_resize": 2, + "upscaling_resize_w": 512, + "upscaling_resize_h": 512, + "upscaling_crop": True, + "upscaler_1": "None", + "upscaler_2": "None", + "extras_upscaler_2_visibility": 0, + "image": "" + } + +class TestExtrasCorrectness(unittest.TestCase): + pass + +if __name__ == "__main__": + unittest.main() diff --git a/test/img2img_test.py b/test/img2img_test.py index d8ed309d..61e3e285 100644 --- a/test/img2img_test.py +++ b/test/img2img_test.py @@ -3,7 +3,7 @@ import requests from gradio.processing_utils import encode_pil_to_base64 from PIL import Image -class Img2ImgWorking(unittest.TestCase): +class TestImg2ImgWorking(unittest.TestCase): def setUp(self): self.url_img2img = "http://localhost:7860/sdapi/v1/img2img" self.simple_img2img = { @@ -56,4 +56,4 @@ class TestImg2ImgCorrectness(unittest.TestCase): pass if __name__ == "__main__": - unittest.main() \ No newline at end of file + unittest.main() From f62db4d5c753bc32d2ae166606ce41f4c5fa5c43 Mon Sep 17 00:00:00 2001 From: evshiron Date: Sun, 30 Oct 2022 03:55:43 +0800 Subject: [PATCH 23/37] fix progress response model --- modules/api/api.py | 30 ------------------------------ modules/api/models.py | 8 ++++---- 2 files changed, 4 insertions(+), 34 deletions(-) diff --git a/modules/api/api.py b/modules/api/api.py index e93cddcb..7e8522a2 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -1,33 +1,3 @@ -# import time - -# from modules.api.models import StableDiffusionTxt2ImgProcessingAPI, StableDiffusionImg2ImgProcessingAPI -# from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images -# from modules.sd_samplers import all_samplers -# from modules.extras import run_pnginfo -# import modules.shared as shared -# from modules import devices -# import uvicorn -# from fastapi import Body, APIRouter, HTTPException -# from fastapi.responses import JSONResponse -# from pydantic import BaseModel, Field, Json -# from typing import List -# import json -# import io -# import base64 -# from PIL import Image - -# sampler_to_index = lambda name: next(filter(lambda row: name.lower() == row[1].name.lower(), enumerate(all_samplers)), None) - -# class TextToImageResponse(BaseModel): -# images: List[str] = Field(default=None, title="Image", description="The generated image in base64 format.") -# parameters: Json -# info: Json - -# class ImageToImageResponse(BaseModel): -# images: List[str] = Field(default=None, title="Image", description="The generated image in base64 format.") -# parameters: Json -# info: Json - import time import uvicorn from gradio.processing_utils import encode_pil_to_base64, decode_base64_to_file, decode_base64_to_image diff --git a/modules/api/models.py b/modules/api/models.py index 8d4abc39..e1762fb9 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -1,6 +1,6 @@ import inspect from click import prompt -from pydantic import BaseModel, Field, create_model +from pydantic import BaseModel, Field, Json, create_model from typing import Any, Optional from typing_extensions import Literal from inflection import underscore @@ -158,6 +158,6 @@ class PNGInfoResponse(BaseModel): info: str = Field(title="Image info", description="A string with all the info the image had") class ProgressResponse(BaseModel): - progress: float - eta_relative: float - state: dict + progress: float = Field(title="Progress", description="The progress with a range of 0 to 1") + eta_relative: float = Field(title="ETA in secs") + state: Json From e9c6c2a51f972fd7cd88ea740ade4ac3d8108b67 Mon Sep 17 00:00:00 2001 From: evshiron Date: Sun, 30 Oct 2022 04:02:56 +0800 Subject: [PATCH 24/37] add description for state field --- modules/api/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/api/models.py b/modules/api/models.py index e1762fb9..709ab5a6 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -160,4 +160,4 @@ class PNGInfoResponse(BaseModel): class ProgressResponse(BaseModel): progress: float = Field(title="Progress", description="The progress with a range of 0 to 1") eta_relative: float = Field(title="ETA in secs") - state: Json + state: Json = Field(title="State", description="The current state snapshot") From 88f46a5bec610cf03641f18becbe3deda541e982 Mon Sep 17 00:00:00 2001 From: evshiron Date: Sun, 30 Oct 2022 05:04:29 +0800 Subject: [PATCH 25/37] update progress response model --- modules/api/api.py | 6 +++--- modules/api/models.py | 4 ++-- modules/shared.py | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/modules/api/api.py b/modules/api/api.py index 7e8522a2..5912d289 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -61,7 +61,7 @@ class Api: self.app.add_api_route("/sdapi/v1/extra-single-image", self.extras_single_image_api, methods=["POST"], response_model=ExtrasSingleImageResponse) self.app.add_api_route("/sdapi/v1/extra-batch-images", self.extras_batch_images_api, methods=["POST"], response_model=ExtrasBatchImagesResponse) self.app.add_api_route("/sdapi/v1/png-info", self.pnginfoapi, methods=["POST"], response_model=PNGInfoResponse) - self.app.add_api_route("/sdapi/v1/progress", self.progressapi, methods=["GET"]) + self.app.add_api_route("/sdapi/v1/progress", self.progressapi, methods=["GET"], response_model=ProgressResponse) def text2imgapi(self, txt2imgreq: StableDiffusionTxt2ImgProcessingAPI): sampler_index = sampler_to_index(txt2imgreq.sampler_index) @@ -171,7 +171,7 @@ class Api: # copy from check_progress_call of ui.py if shared.state.job_count == 0: - return ProgressResponse(progress=0, eta_relative=0, state=shared.state.js()) + return ProgressResponse(progress=0, eta_relative=0, state=shared.state.dict()) # avoid dividing zero progress = 0.01 @@ -187,7 +187,7 @@ class Api: progress = min(progress, 1) - return ProgressResponse(progress=progress, eta_relative=eta_relative, state=shared.state.js()) + return ProgressResponse(progress=progress, eta_relative=eta_relative, state=shared.state.dict()) def launch(self, server_name, port): self.app.include_router(self.router) diff --git a/modules/api/models.py b/modules/api/models.py index 709ab5a6..0ab85ec5 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -1,6 +1,6 @@ import inspect from click import prompt -from pydantic import BaseModel, Field, Json, create_model +from pydantic import BaseModel, Field, create_model from typing import Any, Optional from typing_extensions import Literal from inflection import underscore @@ -160,4 +160,4 @@ class PNGInfoResponse(BaseModel): class ProgressResponse(BaseModel): progress: float = Field(title="Progress", description="The progress with a range of 0 to 1") eta_relative: float = Field(title="ETA in secs") - state: Json = Field(title="State", description="The current state snapshot") + state: dict = Field(title="State", description="The current state snapshot") diff --git a/modules/shared.py b/modules/shared.py index 0f4c035d..f7b0990c 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -147,7 +147,7 @@ class State: def get_job_timestamp(self): return datetime.datetime.now().strftime("%Y%m%d%H%M%S") # shouldn't this return job_timestamp? - def js(self): + def dict(self): obj = { "skipped": self.skipped, "interrupted": self.skipped, @@ -158,7 +158,7 @@ class State: "sampling_steps": self.sampling_steps, } - return json.dumps(obj) + return obj state = State() From 9f104b53c425e248595e5b6481336d2a339e015e Mon Sep 17 00:00:00 2001 From: evshiron Date: Sun, 30 Oct 2022 05:19:17 +0800 Subject: [PATCH 26/37] preview current image when opts.show_progress_every_n_steps is enabled --- modules/api/api.py | 8 ++++++-- modules/api/models.py | 1 + 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/modules/api/api.py b/modules/api/api.py index 5912d289..e960bb7b 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -1,7 +1,7 @@ import time import uvicorn from gradio.processing_utils import encode_pil_to_base64, decode_base64_to_file, decode_base64_to_image -from fastapi import APIRouter, HTTPException +from fastapi import APIRouter, Depends, HTTPException import modules.shared as shared from modules import devices from modules.api.models import * @@ -187,7 +187,11 @@ class Api: progress = min(progress, 1) - return ProgressResponse(progress=progress, eta_relative=eta_relative, state=shared.state.dict()) + current_image = None + if shared.state.current_image: + current_image = encode_pil_to_base64(shared.state.current_image) + + return ProgressResponse(progress=progress, eta_relative=eta_relative, state=shared.state.dict(), current_image=current_image) def launch(self, server_name, port): self.app.include_router(self.router) diff --git a/modules/api/models.py b/modules/api/models.py index 0ab85ec5..c8bc719a 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -161,3 +161,4 @@ class ProgressResponse(BaseModel): progress: float = Field(title="Progress", description="The progress with a range of 0 to 1") eta_relative: float = Field(title="ETA in secs") state: dict = Field(title="State", description="The current state snapshot") + current_image: str = Field(default=None, title="Current image", description="The current image in base64 format. opts.show_progress_every_n_steps is required for this to work.") From 66d038f6a41507af2243ff1f6618a745a092c290 Mon Sep 17 00:00:00 2001 From: timntorres Date: Sat, 29 Oct 2022 15:00:08 -0700 Subject: [PATCH 27/37] Read hypernet strength from PNG info. --- modules/generation_parameters_copypaste.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index bbaad42e..59c6d7da 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -66,6 +66,7 @@ def integrate_settings_paste_fields(component_dict): settings_map = { 'sd_hypernetwork': 'Hypernet', + 'sd_hypernetwork_strength': 'Hypernetwork strength', 'CLIP_stop_at_last_layers': 'Clip skip', 'sd_model_checkpoint': 'Model hash', } From 9f4f894d74b57c3d02ebccaa59f9c22fca2b6c90 Mon Sep 17 00:00:00 2001 From: evshiron Date: Sun, 30 Oct 2022 06:03:32 +0800 Subject: [PATCH 28/37] allow skip current image in progress api --- modules/api/api.py | 4 ++-- modules/api/models.py | 3 +++ 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/modules/api/api.py b/modules/api/api.py index e960bb7b..5c5b210f 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -167,7 +167,7 @@ class Api: return PNGInfoResponse(info=result[1]) - def progressapi(self): + def progressapi(self, req: ProgressRequest = Depends()): # copy from check_progress_call of ui.py if shared.state.job_count == 0: @@ -188,7 +188,7 @@ class Api: progress = min(progress, 1) current_image = None - if shared.state.current_image: + if shared.state.current_image and not req.skip_current_image: current_image = encode_pil_to_base64(shared.state.current_image) return ProgressResponse(progress=progress, eta_relative=eta_relative, state=shared.state.dict(), current_image=current_image) diff --git a/modules/api/models.py b/modules/api/models.py index c8bc719a..9ee42a17 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -157,6 +157,9 @@ class PNGInfoRequest(BaseModel): class PNGInfoResponse(BaseModel): info: str = Field(title="Image info", description="A string with all the info the image had") +class ProgressRequest(BaseModel): + skip_current_image: bool = Field(default=False, title="Skip current image", description="Skip current image serialization") + class ProgressResponse(BaseModel): progress: float = Field(title="Progress", description="The progress with a range of 0 to 1") eta_relative: float = Field(title="ETA in secs") From 35e95f574a5954e9cbe95265cbcf1386715c9e87 Mon Sep 17 00:00:00 2001 From: Riccardo Giovanetti <29801031+Harvester62@users.noreply.github.com> Date: Sun, 30 Oct 2022 00:13:13 +0200 Subject: [PATCH 29/37] Italian localization (extended) [Requires Feedback] This is my first version of an alternative localization into Italian language which is a follow-up of the current localization file made by @EugenioBuffo (#3725), which I thanks, and of my discussion "Italian localization (git newbie)" (#3633) which covers the main user interface, all the current the Extensions and Scripts, with the following exceptions: txt2img2img (I got errors therefore I removed it from my local installation of SD Web UI) Parameter Sequencer (not installed locally) Booru tag autocompletion (not installed locally) Saving steps of the sampling process (not installed locally) I do not forecast to translate the above scripts in the short period, unless I will install them locally on my machine. I beg your pardon if I am brutally overwriting the originally submitted file but I find quite exhausting to edit and append over a thousand lines of code to the original file. If this is mandatory, then I will delete this commit and start a new one amending the original it_IT.json file. It is for sure not perfect and there are some translations that can be improved, therefore I wish to invite @EugenioBuffo and any other Italian mother language person willing give advice and to help to review this extensive translation . I look forward read any feedback from the community and developers. Thank you. --- localizations/it_IT.json | 1560 ++++++++++++++++++++++++++------------ 1 file changed, 1069 insertions(+), 491 deletions(-) diff --git a/localizations/it_IT.json b/localizations/it_IT.json index d05035f9..9752f71c 100644 --- a/localizations/it_IT.json +++ b/localizations/it_IT.json @@ -1,492 +1,1070 @@ { - "⤡": "⤡", - "⊞": "⊞", - "×": "×", - "❮": "❮", - "❯": "❯", - "Loading...": "Caricamento...", - "view": "mostra ", - "api": "API", - "•": "•", - "built with gradio": "Sviluppato con Gradio", - "Stable Diffusion checkpoint": "Stable Diffusion checkpoint", - "txt2img": "txt2img", - "img2img": "img2img", - "Extras": "Extra", - "PNG Info": "PNG Info", - "Checkpoint Merger": "Checkpoint Merger", - "Train": "Train", - "Image Browser": "Galleria Immagini", - "Settings": "Impostazioni", - "Prompt": "Prompt", - "Negative prompt": "Prompt negativo", - "Run": "Esegui", - "Skip": "Salta", - "Interrupt": "Interrompi", - "Generate": "Genera", - "Style 1": "Stile 1", - "Style 2": "Stile 2", - "Label": "Etichetta", - "File": "File", - "Drop File Here": "Trascina il file qui", - "-": "-", - "or": "o", - "Click to Upload": "Clicca per caricare", - "Image": "Immagine", - "Check progress": "Verifica progresso", - "Check progress (first)": "Verifica progresso (iniziale)", - "Sampling Steps": "Sampling Steps", - "Sampling method": "Sampling method", - "Euler a": "Euler a", - "Euler": "Euler", - "LMS": "LMS", - "Heun": "Heun", - "DPM2": "DPM2", - "DPM2 a": "DPM2 a", - "DPM fast": "DPM fast", - "DPM adaptive": "DPM adaptive", - "LMS Karras": "LMS Karras", - "DPM2 Karras": "DPM2 Karras", - "DPM2 a Karras": "DPM2 a Karras", - "DDIM": "DDIM", - "PLMS": "PLMS", - "Width": "Larghezza", - "Height": "Altezza", - "Restore faces": "Ripristina volti", - "Tiling": "Tiling", - "Highres. fix": "Highres. fix", - "Firstpass width": "Larghezza del primo step", - "Firstpass height": "Altezza del primo step", - "Denoising strength": "Denoising strength", - "Batch count": "Batch count", - "Batch size": "Batch size", - "CFG Scale": "CFG Scale", - "Seed": "Seed", - "Extra": "Extra", - "Variation seed": "Variation seed", - "Variation strength": "Variation strength", - "Resize seed from width": "Ridimensiona seed da larghezza", - "Resize seed from height": "Ridimensiona seed da altezza", - "Script": "Script", - "None": "Nessuno", - "Prompt matrix": "Prompt matrix", - "Prompts from file or textbox": "Prompts da file o casella di testo", - "X/Y plot": "X/Y plot", - "Put variable parts at start of prompt": "Inserisce una parte variabile all'inizio del prompt", - "Iterate seed every line": "Itera seed per ogni linea", - "List of prompt inputs": "Lista dei prompt in input", - "Upload prompt inputs": "Carica prompt di input", - "Show Textbox": "Mostra la casella di testo", - "File with inputs": "File con input", - "Prompts": "Prompts", - "X type": "X type", - "Nothing": "Nulla", - "Var. seed": "Var. seed", - "Var. strength": "Var. strength", - "Steps": "Steps", - "Prompt order": "Prompt order", - "Sampler": "Sampler", - "Checkpoint name": "Checkpoint name", - "Hypernetwork": "Hypernetwork", - "Hypernet str.": "Hypernet str.", - "Sigma Churn": "Sigma Churn", - "Sigma min": "Sigma min", - "Sigma max": "Sigma max", - "Sigma noise": "Sigma noise", - "Eta": "Eta", - "Clip skip": "Clip skip", - "Denoising": "Denoising", - "X values": "X values", - "Y type": "Y type", - "Y values": "Y values", - "Draw legend": "Disegna legenda", - "Include Separate Images": "Includi immagini separate", - "Keep -1 for seeds": "Mantieni il seed a -1", - "Drop Image Here": "Trascina l'immagine qui", - "Save": "Salva", - "Send to img2img": "Invia a img2img", - "Send to inpaint": "Invia a inpaint", - "Send to extras": "Invia a extra", - "Make Zip when Save?": "Creare Zip al salvataggio?", - "Textbox": "Casella di testo", - "Interrogate\nCLIP": "Interroga\nCLIP", - "Inpaint": "Inpaint", - "Batch img2img": "Batch img2img", - "Image for img2img": "Immagine per img2img", - "Image for inpainting with mask": "Immagine per inpainting con maschera", - "Mask": "Mask", - "Mask blur": "Maschera sfocatura", - "Mask mode": "Modalità maschera", - "Draw mask": "Disegna maschera", - "Upload mask": "Carica maschera", - "Masking mode": "Modalità mascheramento", - "Inpaint masked": "Inpaint mascherato", - "Inpaint not masked": "Inpaint non mascherato", - "Masked content": "Maschera contenuto", - "fill": "riempi", - "original": "originale", - "latent noise": "latent noise", - "latent nothing": "latent nothing", - "Inpaint at full resolution": "Inpaint alla massima risoluzione", - "Inpaint at full resolution padding, pixels": "Inpaint alla massima risoluzione padding, pixels", - "Process images in a directory on the same machine where the server is running.": "Processa le immagini in una cartella nella stessa macchina in cui il server è stato lanciato.", - "Use an empty output directory to save pictures normally instead of writing to the output directory.": "Usa una cartella di output vuota per salvare le immagini normalmente invece di scrivere nella cartella di output", - "Input directory": "Cartella di Input", - "Output directory": "Cartella di Output", - "Resize mode": "Modalità ridimensionamento", - "Just resize": "Solo ridimensionamento", - "Crop and resize": "Taglia e Ridimensiona", - "Resize and fill": "Ridimensiona e Riempi", - "img2img alternative test": "img2img alternative test", - "Loopback": "Loopback", - "Outpainting mk2": "Outpainting mk2", - "Poor man's outpainting": "Poor man's outpainting", - "SD upscale": "SD upscale", - "should be 2 or lower.": "deve essere 2 o inferiore.", - "Override `Sampling method` to Euler?(this method is built for it)": "Ripristinare il `Sampling method` in Euler?(metodo di default)", - "Override `prompt` to the same value as `original prompt`?(and `negative prompt`)": "Ripristinare il `prompt` al valore del `prompt originale`? (e `prompt negativo`)", - "Original prompt": "Prompt originale", - "Original negative prompt": "Promp negativo originale", - "Override `Sampling Steps` to the same value as `Decode steps`?": "Ripristinare il `Sampling Steps` al valore di `Decode steps`?", - "Decode steps": "Decode steps", - "Override `Denoising strength` to 1?": "Ripristinare `Denoising strength` a 1?", - "Decode CFG scale": "Decode CFG scale", - "Randomness": "Casualità", - "Sigma adjustment for finding noise for image": "Sigma adjustment for finding noise for image. ", - "Loops": "Loops", - "Denoising strength change factor": "Denoising strength change factor", - "Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8": "Impostazioni Raccomandate: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8", - "Pixels to expand": "Pixels to expand", - "Outpainting direction": "Direzione Outpainting", - "left": "sinistra", - "right": "destra", - "up": "su", - "down": "giù", - "Fall-off exponent (lower=higher detail)": "Fall-off exponent (inferiore=maggiori dettagli)", - "Color variation": "Variazione del colore", - "Will upscale the image to twice the dimensions; use width and height sliders to set tile size": "Verrà effettuato l'upscale dell'immagine di due volte le sue dimensioni; usa gli sliders di larghezza e altezza per configurare la dimensione del tile", - "Tile overlap": "Sovrapposizione Tile", - "Upscaler": "Upscaler", - "Lanczos": "Lanczos", - "LDSR": "LDSR", - "SwinIR 4x": "SwinIR 4x", - "ScuNET GAN": "ScuNET GAN", - "ScuNET PSNR": "ScuNET PSNR", - "ESRGAN_4x": "ESRGAN_4x", - "Single Image": "Singola Immagine", - "Batch Process": "Batch Process", - "Batch from Directory": "Batch da Cartella", - "Source": "Sorgente", - "Show result images": "Mostra risultati", - "Scale by": "Scala di", - "Scale to": "Scala a", - "Resize": "Ridimensionamento", - "Crop to fit": "Taglia per adeguare", - "Upscaler 2 visibility": "Visibilità Upscaler 2", - "GFPGAN visibility": "Visibilità GFPGAN", - "CodeFormer visibility": "Visibilità CodeFormer", - "CodeFormer weight (0 = maximum effect, 1 = minimum effect)": "Peso CodeFormer (0 = effetto massimo, 1 = effetto minimo)", - "Open output directory": "Apri cartella di output", - "Send to txt2img": "Invia a txt2img", - "A merger of the two checkpoints will be generated in your": "Una fusione dei due checkpoint sarà generata nella ", - "checkpoint": "checkpoint", - "directory.": " directory.", - "Primary model (A)": "Primo modello (A)", - "Secondary model (B)": "Secondo modello (B)", - "Tertiary model (C)": "Terzo modello (C)", - "Custom Name (Optional)": "Nome Personalizzato (opzionale)", - "Multiplier (M) - set to 0 to get model A": "Moltiplicatore (M) - Imposta 0 per ottenere il modello A", - "Interpolation Method": "Metodo di Interpolazione", - "Weighted sum": "Somma pesata", - "Add difference": "Add difference", - "Save as float16": "Salva come float16", - "See": "Verifica la ", - "wiki": "wiki ", - "for detailed explanation.": "per una spiegazione dettagliata.", - "Create embedding": "Crea embedding", - "Create hypernetwork": "Crea hypernetwork", - "Preprocess images": "Pre-processa immagini", - "Name": "Nome", - "Initialization text": "Testo di Inizializzazione", - "Number of vectors per token": "Numero di vettori per token", - "Overwrite Old Embedding": "Sovrascrivi Vecchio Embedding", - "Modules": "Moduli", - "Enter hypernetwork layer structure": "Inserisci la struttura livelli dell'hypernetwork", - "Select activation function of hypernetwork": "Seleziona la funzione di attivazione dell'hypernetwork", - "linear": "linear", - "relu": "relu", - "leakyrelu": "leakyrelu", - "elu": "elu", - "swish": "swish", - "tanh": "tanh", - "sigmoid": "sigmoid", - "celu": "celu", - "gelu": "gelu", - "glu": "glu", - "hardshrink": "hardshrink", - "hardsigmoid": "hardsigmoid", - "hardswish": "hardswish", - "hardtanh": "hardtanh", - "logsigmoid": "logsigmoid", - "logsoftmax": "logsoftmax", - "mish": "mish", - "multiheadattention": "multiheadattention", - "prelu": "prelu", - "rrelu": "rrelu", - "relu6": "relu6", - "selu": "selu", - "silu": "silu", - "softmax": "softmax", - "softmax2d": "softmax2d", - "softmin": "softmin", - "softplus": "softplus", - "softshrink": "softshrink", - "softsign": "softsign", - "tanhshrink": "tanhshrink", - "threshold": "threshold", - "Select Layer weights initialization. relu-like - Kaiming, sigmoid-like - Xavier is recommended": "Seleziona Inizializzazione dei pesi dei livelli. (relu-like: Kaiming, sigmoid-like: Xavier). Xavier è raccomandato", - "Normal": "Normal", - "KaimingUniform": "KaimingUniform", - "KaimingNormal": "KaimingNormal", - "XavierUniform": "XavierUniform", - "XavierNormal": "XavierNormal", - "Add layer normalization": "Aggiungi normalizzazione dei livelli", - "Use dropout": "Usa dropout", - "Overwrite Old Hypernetwork": "Sovrascrivi Vecchio Hypernetwork", - "Source directory": "Cartella sorgente", - "Destination directory": "Cartella di destinazione", - "Existing Caption txt Action": "Azione in caso di didascalia già presente", - "ignore": "ignora", - "copy": "copia", - "prepend": "anteponi", - "append": "apponi", - "Create flipped copies": "Crea copie specchiate", - "Split oversized images": "Dividi immagini grandi", - "Use BLIP for caption": "Usa BLIP per la didascalia", - "Use deepbooru for caption": "Usa deepbooru per la didascalia", - "Split image threshold": "Dividi Threshold immagine", - "Split image overlap ratio": "Rapporto di sovrapposizione dell'immagine", - "Preprocess": "Preprocessa", - "Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images": "Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images", - "[wiki]": "[wiki]", - "Embedding": "Embedding", - "Embedding Learning rate": "Embedding Learning rate", - "Hypernetwork Learning rate": "Hypernetwork Learning rate", - "Dataset directory": "Cartella dataset", - "Log directory": "Cartella log", - "Prompt template file": "Prompt template file", - "Max steps": "Max steps", - "Save an image to log directory every N steps, 0 to disable": "Salva un'immagine ogni N step, 0 per disabilitare", - "Save a copy of embedding to log directory every N steps, 0 to disable": "Salva una copia dell'embedding nella cartella log ogni N step, 0 per disabilitare", - "Save images with embedding in PNG chunks": "Salva le immagini con embedding in PNG", - "Read parameters (prompt, etc...) from txt2img tab when making previews": "Usa i parametri (prompt, etc...) di txt2img per visualizzare le anteprime", - "Train Hypernetwork": "Train Hypernetwork", - "Train Embedding": "Train Embedding", - "extras": "Extra", - "favorites": "Preferiti", - "custom fold": "Cartella personalizzata", - "Load": "Carica", - "Images directory": "Cartella Immagini", - "Prev batch": "Batch Precedente", - "Next batch": "Batch Successivo", - "First Page": "Prima Pagina", - "Prev Page": "Pagina Precedente", - "Page Index": "Indice Pagina", - "Next Page": "Pagina Successiva", - "End Page": "Pagina Finale", - "number of images to delete consecutively next": "numero di immagini da eliminare consecutivamente a seguire", - "Delete": "Elimina", - "Generate Info": "Genera Info", - "File Name": "Nome File", - "Collect": "Ottieni", - "Refresh page": "Aggiorna Pagina", - "Date to": "Data fine", - "Number": "Numero", - "set_index": "set_index", - "Checkbox": "Checkbox", - "Apply settings": "Applica impostazioni", - "Saving images/grids": "Salvataggio immagini/griglie", - "Always save all generated images": "Salva sempre tutte le immagini generate", - "File format for images": "Formato file per le immagini", - "Images filename pattern": "Configura Pattern per il nome dei file immagine", - "Add number to filename when saving": "Aggiungi un numero al nome del file al salvataggio", - "Always save all generated image grids": "Salva sempre tutte le griglie delle immagini generate", - "File format for grids": "Formato file per le giglie", - "Add extended info (seed, prompt) to filename when saving grid": "Aggiungi informazioni estese (seed, prompt) al nome del file al salvataggio della griglia", - "Do not save grids consisting of one picture": "Non salvare griglie composte da una sola immagine", - "Prevent empty spots in grid (when set to autodetect)": "Previeni spazi vuoti nella griglia", - "Grid row count; use -1 for autodetect and 0 for it to be same as batch size": "Numero righe griglia; usa -1 per trovarlo automaticamente e 0 per essere lo stesso del batch size", - "Save text information about generation parameters as chunks to png files": "Salva le informazioni dei parametri di generazione come chunks nei file PNG", - "Create a text file next to every image with generation parameters.": "Crea un file di testo per ogni immagine con i parametri di generazione.", - "Save a copy of image before doing face restoration.": "Salva una copia dell'immagine prima di fare la riparazione volti.", - "Quality for saved jpeg images": "Qualità per le immagini salvate in JPEG", - "If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG": "Se l'immagine PNG image è più grande di 4MB o qualsiasi dimensione maggiore di 4000, effettua il downscale e salva come JPG", - "Use original name for output filename during batch process in extras tab": "Usa il nome originale del file come nome del file di output durante il 'batch process' nella tab 'extras'", - "When using 'Save' button, only save a single selected image": "Usando il tasto 'Save', verrà salvata solo la singola immagine selezionata", - "Do not add watermark to images": "Non aggiungere watermark alle immagini", - "Paths for saving": "Percorsi per il salvataggio", - "Output directory for images; if empty, defaults to three directories below": "Cartella di Output globale per immagini; se vuota, verranno usate di default le cartelle indicate sotto", - "Output directory for txt2img images": "Cartella di Output per txt2img", - "Output directory for img2img images": "Cartella di Output per img2img", - "Output directory for images from extras tab": "Cartella di Output per immagini dalla tab 'extra'", - "Output directory for grids; if empty, defaults to two directories below": "Cartella di Output globale per le griglie; se vuota, verranno usate di default le cartelle indicate sotto", - "Output directory for txt2img grids": "Cartella di Output per txt2img grids", - "Output directory for img2img grids": "Cartella di Output per img2img grids", - "Directory for saving images using the Save button": "Cartella di Output per il salvataggion con il tasto 'Salva'", - "Saving to a directory": "Salvataggio in cartelle", - "Save images to a subdirectory": "Salva immagini in una sottocartella", - "Save grids to a subdirectory": "Salva griglie in una sottocartella", - "When using \"Save\" button, save images to a subdirectory": "Usando il tasto \"Salva\", le immagini verranno salvate in una sottocartella", - "Directory name pattern": "Pattern del nome della cartella", - "Max prompt words for [prompt_words] pattern": "Massimo numero di parole per il pattern [prompt_words]", - "Upscaling": "Upscaling", - "Tile size for ESRGAN upscalers. 0 = no tiling.": "Dimensione Tile per upscalers ESRGAN. 0 = no tiling.", - "Tile overlap, in pixels for ESRGAN upscalers. Low values = visible seam.": "Sovrapposizione Tile, in pixels per upscalers ESRGAN. Valore basso = taglio visible.", - "Tile size for all SwinIR.": "Dimensione Tile per SwinIR.", - "Tile overlap, in pixels for SwinIR. Low values = visible seam.": "Sovrapposizione Tile, in pixels per upscalers SwinIR. Valore basso = taglio visible.", - "LDSR processing steps. Lower = faster": "LDSR processing steps. Basso = Veloce", - "Upscaler for img2img": "Upscaler per img2img", - "Upscale latent space image when doing hires. fix": "Upscale latent space image when doing hires. fix", - "Face restoration": "Ripristino volti", - "CodeFormer weight parameter; 0 = maximum effect; 1 = minimum effect": "Peso CodeFormer; 0 = effetto massimo; 1 = effetto minimo", - "Move face restoration model from VRAM into RAM after processing": "Sposta il modello di ripristino volti dalla VRAM alla RAM dopo averlo processato", - "System": "Sistema", - "VRAM usage polls per second during generation. Set to 0 to disable.": "Utilizzo VRAM al secondo durante la generazione. Imposta 0 per disabilitare.", - "Always print all generation info to standard output": "Mostra sempre tutte le informazioni della generazione nello standard output", - "Add a second progress bar to the console that shows progress for an entire job.": "Aggiungi una seconda progress bar alla console per mostrare il progresso complessivo della generazione.", - "Training": "Training", - "Move VAE and CLIP to RAM when training hypernetwork. Saves VRAM.": "Sposta VAE e CLIP in RAM durante il training di hypernetwork. Risparmia VRAM.", - "Filename word regex": "Filename word regex", - "Filename join string": "Filename join string", - "Number of repeats for a single input image per epoch; used only for displaying epoch number": "Numero di ripetizioni per singola immagine in input per epoch; usato solo per mostrare il numero di epoch", - "Save an csv containing the loss to log directory every N steps, 0 to disable": "Salva un csv contenente il loss nella cartella log ogni N step, 0 per disabilitare", - "Stable Diffusion": "Stable Diffusion", - "Checkpoints to cache in RAM": "Checkpoints da memorizzare in RAM", - "Hypernetwork strength": "Hypernetwork strength", - "Apply color correction to img2img results to match original colors.": "Applica color correction ai risultati img2img per uguagliare i colori originali.", - "Save a copy of image before applying color correction to img2img results": "Salva una copia dell'immagine prima di applicare la color correction ai risultati img2img", - "With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising).": "Con img2img, effettua esattamente la quantità di step indicata (normalmente se ne effettuano di meno con meno riduzione del disturbo).", - "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply.": "Abilita la quantizzazione nei campionatori K per risultati più nitidi e puliti. Questo può cambiare seed esistenti. Richiede il riavvio per essere applicato.", - "Emphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention": "Enfasi: usa (testo) per far prestare maggiore attenzione al testo indicato e [testo] per renderlo meno importante", - "Use old emphasis implementation. Can be useful to reproduce old seeds.": "Usa la vecchia implementazione per l'enfasi. Può essere utile per riprodurre vecchi seeds.", - "Make K-diffusion samplers produce same images in a batch as when making a single image": "Fa sì che i K-Diffusion producano le stesse immagini in un batch come quando si effettuano una singola immagine", - "Increase coherency by padding from the last comma within n tokens when using more than 75 tokens": "Aumenta la coerenza aggiungendo dall'ultima virgola ulteriori N token quando si usano più di 75 token.", - "Filter NSFW content": "Filtra contenuti NSFW", - "Stop At last layers of CLIP model": "Fermati all'ultimo livello del modello CLIP", - "Interrogate Options": "Opzioni Interrogate", - "Interrogate: keep models in VRAM": "Interrogate: mantieni modelli nella VRAM", - "Interrogate: use artists from artists.csv": "Interrogate: usa artisti dal file artists.csv", - "Interrogate: include ranks of model tags matches in results (Has no effect on caption-based interrogators).": "Interrogate: include ranks of model tags matches in results (Has no effect on caption-based interrogators).", - "Interrogate: num_beams for BLIP": "Interrogate: num_beams per BLIP", - "Interrogate: minimum description length (excluding artists, etc..)": "Interrogate: minima lunghezza della descrizione (escludendo artisti, etc..)", - "Interrogate: maximum description length": "Interrogate: massima lunghezza descrizione", - "CLIP: maximum number of lines in text file (0 = No limit)": "CLIP: massimo numero di linee di testo per file (0 = Nessun limite)", - "Interrogate: deepbooru score threshold": "Interrogate: soglia punteggio deepbooru", - "Interrogate: deepbooru sort alphabetically": "Interrogate: deepbooru ordina alfabeticamente", - "use spaces for tags in deepbooru": "usa spazi per tag su deepbooru", - "escape (\\) brackets in deepbooru (so they are used as literal brackets and not for emphasis)": "effettuta l'escape (\\) delle parentesi in deepbooru (così vengono usati come parentesi e non come enfasi)", - "User interface": "Interfaccia Utente", - "Show progressbar": "Mostra barre di caricamento", - "Show image creation progress every N sampling steps. Set 0 to disable.": "Mostra il progresso della generazione immagini ogni N step. Imposta 0 per disabilitare.", - "Show previews of all images generated in a batch as a grid": "Mostra l'anteprima di tutte le immagini di un batch come griglia", - "Show grid in results for web": "Mostra la griglia nei risultati per web", - "Do not show any images in results for web": "Non mostrare alcun risultato per web", - "Add model hash to generation information": "Aggiungi l'hash del modello alle informazioni di generazione", - "Add model name to generation information": "Aggiungi il nome del modello alle informazioni di generazione", - "When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint.": "Alla lettura dei parametri di generazione da testo ad interfaccia (da PNG o testo copiato), non cambiare il modello/checkpoint.", - "Font for image grids that have text": "Font per griglie di immagini con testo", - "Enable full page image viewer": "Abilita la visualizzazione a pagina intera", - "Show images zoomed in by default in full page image viewer": "Mostra le immagini zoommate di default nella visualizzazione a pagina intera", - "Show generation progress in window title.": "Mostra il progresso di generazione nel titolo della finestra.", - "Quicksettings list": "Lista imporazioni rapide", - "Localization (requires restart)": "Localization (richiede riavvio)", - "ar_AR": "ar_AR", - "es_ES": "es_ES", - "fr-FR": "fr-FR", - "it_IT": "it_IT", - "ja_JP": "ja_JP", - "ko_KR": "ko_KR", - "ru_RU": "ru_RU", - "tr_TR": "tr_TR", - "zh_CN": "zh_CN", - "Sampler parameters": "Parametri del Sampler", - "Hide samplers in user interface (requires restart)": "Hide samplers in user interface (richiede riavvio)", - "eta (noise multiplier) for DDIM": "eta (noise multiplier) per DDIM", - "eta (noise multiplier) for ancestral samplers": "eta (noise multiplier) per ancestral samplers", - "img2img DDIM discretize": "img2img DDIM discretize", - "uniform": "uniform", - "quad": "quad", - "sigma churn": "sigma churn", - "sigma tmin": "sigma tmin", - "sigma noise": "sigma noise", - "Eta noise seed delta": "Eta noise seed delta", - "Images Browser": "Galleria Immagini", - "Preload images at startup": "Precarica immagini all'avvio", - "Number of columns on the page": "Numero di colonne per pagina", - "Number of rows on the page": "Numero di righe per pagina", - "Minimum number of pages per load": "Minimo numero di pagine da caricare", - "Request browser notifications": "Richiedi notifiche via browser", - "Download localization template": "Scarica template di Localization", - "Reload custom script bodies (No ui updates, No restart)": "Ricarica gli script custom (Nessun aggiornamento UI, Nessun riavvio)", - "Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)": "Riavvio Gradio and Aggiorna i componenti (solo Script Custom, ui.py, js e css)", - "Prompt (press Ctrl+Enter or Alt+Enter to generate)": "Prompt (premi Ctrl+Enter o Alt+Enter per generare)", - "Negative prompt (press Ctrl+Enter or Alt+Enter to generate)": "Prompt negativo (premi Ctrl+Enter o Alt+Enter per generare)", - "Add a random artist to the prompt.": "Aggiungi un'artista casuale al prompt.", - "Read generation parameters from prompt or last generation if prompt is empty into user interface.": "Leggi i parametri di generazione dal prompt o dall'ultima generazione se il prompt è vuoto nell'interfaccia utente.", - "Save style": "Salva stile", - "Apply selected styles to current prompt": "Applica lo stile selezionato al prompt corrente", - "Stop processing current image and continue processing.": "Smette di processare l'immagine corrente e continua a processare.", - "Stop processing images and return any results accumulated so far.": "Smette di processare l'immagine corrente e restituisce tutti i risultati accumulati fin'ora.", - "Style to apply; styles have components for both positive and negative prompts and apply to both": "Stile da applicare; gli stili verranno applicati sia ai prompt positivi che ai prompt negativi", - "Do not do anything special": "Non fa nulla di speciale", - "Which algorithm to use to produce the image": "Algoritmo usato per produrre l'immagine", - "Euler Ancestral - very creative, each can get a completely different picture depending on step count, setting steps to higher than 30-40 does not help": "Euler Ancestral - molto creativo, può generare immagini completamente diverse in base alla quantità di step, impostare valori maggiori di 30-40 non aiuta", - "Denoising Diffusion Implicit Models - best at inpainting": "Denoising Diffusion Implicit Models - il migliore su inpainting", - "Produce an image that can be tiled.": "Produce un'immagine che può essere usata come texture.", - "Use a two step process to partially create an image at smaller resolution, upscale, and then improve details in it without changing composition": "Usa un processo a due step per creare parzialmente un'immagine a bassa risoluzione, eseguire l'upscale e dopo migliorare i dettagli senza cambiarne la composizione.", - "Determines how little respect the algorithm should have for image's content. At 0, nothing will change, and at 1 you'll get an unrelated image. With values below 1.0, processing will take less steps than the Sampling Steps slider specifies.": "Determines how little respect the algorithm should have for image's content. At 0, nothing will change, and at 1 you'll get an unrelated image. With values below 1.0, processing will take less steps than the Sampling Steps slider specifies.", - "How many batches of images to create": "Quanti batch di immagini creare in una sola generazione", - "How many image to create in a single batch": "Quante immagini creare per ogni singolo batch", - "Classifier Free Guidance Scale - how strongly the image should conform to prompt - lower values produce more creative results": "Classifier Free Guidance Scale - quanto l'immagine sarà conforme al prompt - valori bassi generano risultati più creativi", - "A value that determines the output of random number generator - if you create an image with same parameters and seed as another image, you'll get the same result": "Un valore che determina l'output del generatore casuale di numeri - se crei un'immagine con gli stessi parametri e seed, otterrai lo stesso risultato", - "Set seed to -1, which will cause a new random number to be used every time": "Imposta il seed a -1, dunque verrà usato un numero diverso ogni volta", - "Reuse seed from last generation, mostly useful if it was randomed": "Riutilizza il seed dall'ultima generazione, utile nel caso fosse casuale", - "Seed of a different picture to be mixed into the generation.": "Seed di differenti generazioni, mescolati nella generazione corrente.", - "How strong of a variation to produce. At 0, there will be no effect. At 1, you will get the complete picture with variation seed (except for ancestral samplers, where you will just get something).": "How strong of a variation to produce. At 0, there will be no effect. At 1, you will get the complete picture with variation seed (except for ancestral samplers, where you will just get something).", - "Make an attempt to produce a picture similar to what would have been produced with same seed at specified resolution": "Prova a generare immagini simili a quelle che sarebbero state prodotte con lo stesso seed ad una specifica risoluzione", - "Separate a list of words with commas, and the first word will be used as a keyword: script will search for this word in the prompt, and replace it with others": "Separa una lista di parole usando le virgole, la prima parola verrà usata come parola chiave: lo script cercherà la parola nel prompt e la rimpiazzerà con le altre", - "Separate values for X axis using commas.": "Separa i valori per l'asse X usando le virgole.", - "Separate values for Y axis using commas.": "Separa i valori per l'asse Y usando le virgole.", - "Write image to a directory (default - log/images) and generation parameters into csv file.": "Salva l'immagine in una cartella (default - log/images) e i parametri di generazione in un file CSV.", - "Open images output directory": "Apri la cartella di output delle immagini", - "How much to blur the mask before processing, in pixels.": "Quanto sfocare le maschere prima di processarle, in pixel.", - "What to put inside the masked area before processing it with Stable Diffusion.": "Cosa inserire nell'area di mascheramento prima di processarla con Stable Diffusion.", - "fill it with colors of the image": "riempi con colori dell'immagine", - "keep whatever was there originally": "mantieni qualsiasi cosa ci fosse originariamente", - "fill it with latent space noise": "riempi con latent space noise", - "fill it with latent space zeroes": "riempi con latent space zeroes", - "Upscale masked region to target resolution, do inpainting, downscale back and paste into original image": "Effettua l'Upscale della regione mascherata alla risoluzione obiettivo, effettua l'inpainting, esegue il downscale e lo incolla nell'immagine originale", - "Resize image to target resolution. Unless height and width match, you will get incorrect aspect ratio.": "Ridimensiona l'immagine alla risoluzione obiettivo. Se l'altezza e la larghezza non coincidono, il risultato avrà un'aspect ratio scorretto.", - "Resize the image so that entirety of target resolution is filled with the image. Crop parts that stick out.": "Ridimensiona l'immagine affinché l'intera risoluzione obiettivo sia riempita con l'immagine. Taglia le parti che restano fuori.", - "Resize the image so that entirety of image is inside target resolution. Fill empty space with image's colors.": "RRidimensiona l'immagine affinché l'intera risoluzione obiettivo sia riempita con l'immagine. Riempie gli spazi vuoti con i colori dell'immagine.", - "How many times to repeat processing an image and using it as input for the next iteration": "Quante volte processare un immagine e usarla come input per la prossima iterazione", - "In loopback mode, on each loop the denoising strength is multiplied by this value. <1 means decreasing variety so your sequence will converge on a fixed picture. >1 means increasing variety so your sequence will become more and more chaotic.": "In modalità Loopback, per ogni loop la denoising strength sarà moltiplicata per il suo valore. <1 significa ridurre la varietà, dunque la sequenza convergerà in un'immagine più sistemata. >1 significa aumentare la varietà, quindi la sequenza sarà sempre più caotica.", - "For SD upscale, how much overlap in pixels should there be between tiles. Tiles overlap so that when they are merged back into one picture, there is no clearly visible seam.": "Per l'upscale in SD, quanta sovrapposizione vi sarà in pixel per ogni tile. I tile si sovrapporranno in modo da essere fusi insieme in una singola immagine. Non vi sarà alcuna linea visibile.", - "A directory on the same machine where the server is running.": "Una cartella nella stessa macchina in cui il server è in funzione.", - "Leave blank to save images to the default path.": "Lascia vuoto per salvare le immagini nel percorso di default.", - "Result = A * (1 - M) + B * M": "Risultato = A * (1 - M) + B * M", - "Result = A + (B - C) * M": "Risultato = A + (B - C) * M", - "1st and last digit must be 1. ex:'1, 2, 1'": "Il primo e l'ulitmo carattere devono essere 1. es:'1, 2, 1'", - "Path to directory with input images": "Percorso della cartella con le immagini di input", - "Path to directory where to write outputs": "Percorso della cartella dove posizione le immagini di output", - "Input images directory": "Cartella immagini di input", - "Use following tags to define how filenames for images are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime], [datetime