diff --git a/README.md b/README.md index eba7597f..92aee158 100644 --- a/README.md +++ b/README.md @@ -100,6 +100,10 @@ program in collabs. Use `--listen` to make the server listen to network connections. This will allow computers on local newtork to access the UI, and if you configure port forwarding, also computers on the internet. +Use `--port xxxx` to make the server listen on a specific port, xxxx being the wanted port. Remember that +all ports below 1024 needs root/admin rights, for this reason it is advised to use a port above 1024. +Defaults to port 7860 if available. + ### Textual Inversion To make use of pretrained embeddings, create `embeddings` directory (in the same palce as `webui.py`) and put your embeddings into it. They must be .pt files, each with only one trained embedding, diff --git a/modules/shared.py b/modules/shared.py index e529ec27..de7cbf02 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -34,6 +34,7 @@ parser.add_argument("--share", action='store_true', help="use share=True for gra parser.add_argument("--esrgan-models-path", type=str, help="path to directory with ESRGAN models", default=os.path.join(script_path, 'ESRGAN')) parser.add_argument("--opt-split-attention", action='store_true', help="enable optimization that reduced vram usage by a lot for about 10%% decrease in performance") parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests") +parser.add_argument("--port", type=int, help="launch gradio with given server port, you need root/admin rights for ports < 1024, defaults to 7860 if available", default=None) cmd_opts = parser.parse_args() if torch.has_cuda: diff --git a/webui.py b/webui.py index 4f701fad..74c870a2 100644 --- a/webui.py +++ b/webui.py @@ -191,4 +191,4 @@ if __name__ == "__main__": run_pnginfo=run_pnginfo ) - demo.launch(share=cmd_opts.share, server_name="0.0.0.0" if cmd_opts.listen else None) + demo.launch(share=cmd_opts.share, server_name="0.0.0.0" if cmd_opts.listen else None, server_port=cmd_opts.port)