Merge remote-tracking branch 'origin/master'
This commit is contained in:
commit
8a32a71ca3
3 changed files with 15 additions and 8 deletions
|
@ -70,17 +70,28 @@ titles = {
|
||||||
"Create style": "Save current prompts as a style. If you add the token {prompt} to the text, the style use that as placeholder for your prompt when you use the style in the future.",
|
"Create style": "Save current prompts as a style. If you add the token {prompt} to the text, the style use that as placeholder for your prompt when you use the style in the future.",
|
||||||
|
|
||||||
"Checkpoint name": "Loads weights from checkpoint before making images. You can either use hash or a part of filename (as seen in settings) for checkpoint name. Recommended to use with Y axis for less switching.",
|
"Checkpoint name": "Loads weights from checkpoint before making images. You can either use hash or a part of filename (as seen in settings) for checkpoint name. Recommended to use with Y axis for less switching.",
|
||||||
|
|
||||||
|
"vram": "Torch active: Peak amount of VRAM used by Torch during generation, excluding cached data.\nTorch reserved: Peak amount of VRAM allocated by Torch, including all active and cached data.\nSys VRAM: Peak amount of VRAM allocation across all applications / total GPU VRAM (peak utilization%).",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
onUiUpdate(function(){
|
onUiUpdate(function(){
|
||||||
gradioApp().querySelectorAll('span, button, select').forEach(function(span){
|
gradioApp().querySelectorAll('span, button, select, p').forEach(function(span){
|
||||||
tooltip = titles[span.textContent];
|
tooltip = titles[span.textContent];
|
||||||
|
|
||||||
if(!tooltip){
|
if(!tooltip){
|
||||||
tooltip = titles[span.value];
|
tooltip = titles[span.value];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if(!tooltip){
|
||||||
|
for (const c of span.classList) {
|
||||||
|
if (c in titles) {
|
||||||
|
tooltip = titles[c];
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if(tooltip){
|
if(tooltip){
|
||||||
span.title = tooltip;
|
span.title = tooltip;
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,7 +4,6 @@ import json
|
||||||
import os
|
import os
|
||||||
|
|
||||||
import gradio as gr
|
import gradio as gr
|
||||||
import torch
|
|
||||||
import tqdm
|
import tqdm
|
||||||
|
|
||||||
import modules.artists
|
import modules.artists
|
||||||
|
@ -38,7 +37,7 @@ parser.add_argument("--share", action='store_true', help="use share=True for gra
|
||||||
parser.add_argument("--esrgan-models-path", type=str, help="path to directory with ESRGAN models", default=os.path.join(script_path, 'ESRGAN'))
|
parser.add_argument("--esrgan-models-path", type=str, help="path to directory with ESRGAN models", default=os.path.join(script_path, 'ESRGAN'))
|
||||||
parser.add_argument("--opt-split-attention", action='store_true', help="does not do anything")
|
parser.add_argument("--opt-split-attention", action='store_true', help="does not do anything")
|
||||||
parser.add_argument("--disable-opt-split-attention", action='store_true', help="disable an optimization that reduces vram usage by a lot")
|
parser.add_argument("--disable-opt-split-attention", action='store_true', help="disable an optimization that reduces vram usage by a lot")
|
||||||
parser.add_argument("--opt-split-attention-v1", action='store_true', help="enable older version of split attention optimization that does not consaumes all the VRAM it can find")
|
parser.add_argument("--opt-split-attention-v1", action='store_true', help="enable older version of split attention optimization that does not consume all the VRAM it can find")
|
||||||
parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests")
|
parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests")
|
||||||
parser.add_argument("--port", type=int, help="launch gradio with given server port, you need root/admin rights for ports < 1024, defaults to 7860 if available", default=None)
|
parser.add_argument("--port", type=int, help="launch gradio with given server port, you need root/admin rights for ports < 1024, defaults to 7860 if available", default=None)
|
||||||
parser.add_argument("--show-negative-prompt", action='store_true', help="does not do anything", default=False)
|
parser.add_argument("--show-negative-prompt", action='store_true', help="does not do anything", default=False)
|
||||||
|
@ -135,7 +134,7 @@ class Options:
|
||||||
"enable_pnginfo": OptionInfo(True, "Save text information about generation parameters as chunks to png files"),
|
"enable_pnginfo": OptionInfo(True, "Save text information about generation parameters as chunks to png files"),
|
||||||
"add_model_hash_to_info": OptionInfo(False, "Add model hash to generation information"),
|
"add_model_hash_to_info": OptionInfo(False, "Add model hash to generation information"),
|
||||||
"img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."),
|
"img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."),
|
||||||
"img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies (normaly you'd do less with less denoising)."),
|
"img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising)."),
|
||||||
"enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply."),
|
"enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply."),
|
||||||
"font": OptionInfo("", "Font for image grids that have text"),
|
"font": OptionInfo("", "Font for image grids that have text"),
|
||||||
"enable_emphasis": OptionInfo(True, "Use (text) to make model pay more attention to text and [text] to make it pay less attention"),
|
"enable_emphasis": OptionInfo(True, "Use (text) to make model pay more attention to text and [text] to make it pay less attention"),
|
||||||
|
|
|
@ -151,11 +151,8 @@ def wrap_gradio_call(func):
|
||||||
sys_peak = mem_stats['system_peak']
|
sys_peak = mem_stats['system_peak']
|
||||||
sys_total = mem_stats['total']
|
sys_total = mem_stats['total']
|
||||||
sys_pct = round(sys_peak/max(sys_total, 1) * 100, 2)
|
sys_pct = round(sys_peak/max(sys_total, 1) * 100, 2)
|
||||||
vram_tooltip = "Torch active: Peak amount of VRAM used by Torch during generation, excluding cached data.
" \
|
|
||||||
"Torch reserved: Peak amount of VRAM allocated by Torch, including all active and cached data.
" \
|
|
||||||
"Sys VRAM: Peak amount of VRAM allocation across all applications / total GPU VRAM (peak utilization%)."
|
|
||||||
|
|
||||||
vram_html = f"<p class='vram' title='{vram_tooltip}'>Torch active/reserved: {active_peak}/{reserved_peak} MiB, <wbr>Sys VRAM: {sys_peak}/{sys_total} MiB ({sys_pct}%)</p>"
|
vram_html = f"<p class='vram'>Torch active/reserved: {active_peak}/{reserved_peak} MiB, <wbr>Sys VRAM: {sys_peak}/{sys_total} MiB ({sys_pct}%)</p>"
|
||||||
else:
|
else:
|
||||||
vram_html = ''
|
vram_html = ''
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue