|
@@ -42,7 +42,7 @@ parser.add_argument("--discovery-module", type=str, choices=["udp", "tailscale",
|
|
parser.add_argument("--discovery-timeout", type=int, default=30, help="Discovery timeout in seconds")
|
|
parser.add_argument("--discovery-timeout", type=int, default=30, help="Discovery timeout in seconds")
|
|
parser.add_argument("--discovery-config-path", type=str, default=None, help="Path to discovery config json file")
|
|
parser.add_argument("--discovery-config-path", type=str, default=None, help="Path to discovery config json file")
|
|
parser.add_argument("--wait-for-peers", type=int, default=0, help="Number of peers to wait to connect to before starting")
|
|
parser.add_argument("--wait-for-peers", type=int, default=0, help="Number of peers to wait to connect to before starting")
|
|
-parser.add_argument("--chatgpt-api-port", type=int, default=8000, help="ChatGPT API port")
|
|
|
|
|
|
+parser.add_argument("--chatgpt-api-port", type=int, default=52415, help="ChatGPT API port")
|
|
parser.add_argument("--chatgpt-api-response-timeout", type=int, default=90, help="ChatGPT API response timeout in seconds")
|
|
parser.add_argument("--chatgpt-api-response-timeout", type=int, default=90, help="ChatGPT API response timeout in seconds")
|
|
parser.add_argument("--max-generate-tokens", type=int, default=10000, help="Max tokens to generate in each request")
|
|
parser.add_argument("--max-generate-tokens", type=int, default=10000, help="Max tokens to generate in each request")
|
|
parser.add_argument("--inference-engine", type=str, default=None, help="Inference engine to use (mlx, tinygrad, or dummy)")
|
|
parser.add_argument("--inference-engine", type=str, default=None, help="Inference engine to use (mlx, tinygrad, or dummy)")
|