Merge pull request #146 from orionaskatu/orionaskatu-port-option

--port option for #131
This commit is contained in:
AUTOMATIC1111 2022-09-08 11:46:51 +03:00 committed by GitHub
commit a196c45f15
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
3 changed files with 6 additions and 1 deletions

View file

@ -100,6 +100,10 @@ program in collabs.
Use `--listen` to make the server listen to network connections. This will allow computers on local newtork Use `--listen` to make the server listen to network connections. This will allow computers on local newtork
to access the UI, and if you configure port forwarding, also computers on the internet. to access the UI, and if you configure port forwarding, also computers on the internet.
Use `--port xxxx` to make the server listen on a specific port, xxxx being the wanted port. Remember that
all ports below 1024 needs root/admin rights, for this reason it is advised to use a port above 1024.
Defaults to port 7860 if available.
### Textual Inversion ### Textual Inversion
To make use of pretrained embeddings, create `embeddings` directory (in the same palce as `webui.py`) To make use of pretrained embeddings, create `embeddings` directory (in the same palce as `webui.py`)
and put your embeddings into it. They must be .pt files, each with only one trained embedding, and put your embeddings into it. They must be .pt files, each with only one trained embedding,

View file

@ -34,6 +34,7 @@ parser.add_argument("--share", action='store_true', help="use share=True for gra
parser.add_argument("--esrgan-models-path", type=str, help="path to directory with ESRGAN models", default=os.path.join(script_path, 'ESRGAN')) parser.add_argument("--esrgan-models-path", type=str, help="path to directory with ESRGAN models", default=os.path.join(script_path, 'ESRGAN'))
parser.add_argument("--opt-split-attention", action='store_true', help="enable optimization that reduced vram usage by a lot for about 10%% decrease in performance") parser.add_argument("--opt-split-attention", action='store_true', help="enable optimization that reduced vram usage by a lot for about 10%% decrease in performance")
parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests") parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests")
parser.add_argument("--port", type=int, help="launch gradio with given server port, you need root/admin rights for ports < 1024, defaults to 7860 if available", default=None)
cmd_opts = parser.parse_args() cmd_opts = parser.parse_args()
if torch.has_cuda: if torch.has_cuda:

View file

@ -191,4 +191,4 @@ if __name__ == "__main__":
run_pnginfo=run_pnginfo run_pnginfo=run_pnginfo
) )
demo.launch(share=cmd_opts.share, server_name="0.0.0.0" if cmd_opts.listen else None) demo.launch(share=cmd_opts.share, server_name="0.0.0.0" if cmd_opts.listen else None, server_port=cmd_opts.port)