main.py 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101
  1. import argparse
  2. import asyncio
  3. import signal
  4. import json
  5. import uuid
  6. from exo.orchestration.standard_node import StandardNode
  7. from exo.networking.grpc.grpc_server import GRPCServer
  8. from exo.networking.grpc.grpc_discovery import GRPCDiscovery
  9. from exo.topology.ring_memory_weighted_partitioning_strategy import RingMemoryWeightedPartitioningStrategy
  10. from exo.api import ChatGPTAPI
  11. from exo.helpers import print_yellow_exo, find_available_port, DEBUG, get_inference_engine, get_system_info, get_or_create_node_id
  12. # parse args
  13. parser = argparse.ArgumentParser(description="Initialize GRPC Discovery")
  14. parser.add_argument("--node-id", type=str, default=None, help="Node ID")
  15. parser.add_argument("--node-host", type=str, default="0.0.0.0", help="Node host")
  16. parser.add_argument("--node-port", type=int, default=None, help="Node port")
  17. parser.add_argument("--listen-port", type=int, default=5678, help="Listening port for discovery")
  18. parser.add_argument("--prometheus-client-port", type=int, default=None, help="Prometheus client port")
  19. parser.add_argument("--broadcast-port", type=int, default=5678, help="Broadcast port for discovery")
  20. parser.add_argument("--discovery-timeout", type=int, default=30, help="Discovery timeout in seconds")
  21. parser.add_argument("--wait-for-peers", type=int, default=0, help="Number of peers to wait to connect to before starting")
  22. parser.add_argument("--chatgpt-api-port", type=int, default=8000, help="ChatGPT API port")
  23. parser.add_argument("--chatgpt-api-response-timeout-secs", type=int, default=90, help="ChatGPT API response timeout in seconds")
  24. parser.add_argument("--max-generate-tokens", type=int, default=256, help="Max tokens to generate in each request")
  25. parser.add_argument("--inference-engine", type=str, default=None, help="Inference engine to use")
  26. parser.add_argument("--disable-tui", action=argparse.BooleanOptionalAction, help="Disable TUI")
  27. args = parser.parse_args()
  28. print_yellow_exo()
  29. system_info = get_system_info()
  30. print(f"Detected system: {system_info}")
  31. inference_engine_name = args.inference_engine or ("mlx" if system_info == "Apple Silicon Mac" else "tinygrad")
  32. inference_engine = get_inference_engine(inference_engine_name)
  33. print(f"Using inference engine: {inference_engine.__class__.__name__}")
  34. if args.node_port is None:
  35. args.node_port = find_available_port(args.node_host)
  36. if DEBUG >= 1: print(f"Using available port: {args.node_port}")
  37. args.node_id = args.node_id or get_or_create_node_id()
  38. discovery = GRPCDiscovery(args.node_id, args.node_port, args.listen_port, args.broadcast_port, discovery_timeout=args.discovery_timeout)
  39. node = StandardNode(
  40. args.node_id,
  41. None,
  42. inference_engine,
  43. discovery,
  44. partitioning_strategy=RingMemoryWeightedPartitioningStrategy(),
  45. chatgpt_api_endpoint=f"http://localhost:{args.chatgpt_api_port}/v1/chat/completions",
  46. web_chat_url=f"http://localhost:{args.chatgpt_api_port}",
  47. disable_tui=args.disable_tui,
  48. max_generate_tokens=args.max_generate_tokens,
  49. )
  50. server = GRPCServer(node, args.node_host, args.node_port)
  51. node.server = server
  52. api = ChatGPTAPI(node, inference_engine.__class__.__name__, response_timeout_secs=args.chatgpt_api_response_timeout_secs)
  53. node.on_token.register("main_log").on_next(lambda _, tokens, __: print(inference_engine.tokenizer.decode(tokens) if hasattr(inference_engine, "tokenizer") else tokens))
  54. if args.prometheus_client_port:
  55. from exo.stats.metrics import start_metrics_server
  56. start_metrics_server(node, args.prometheus_client_port)
  57. inference_engine.set_progress_callback(lambda event: asyncio.create_task(node.broadcast_opaque_status("", json.dumps({"type": "download_progress", "node_id": node.id, "progress": event.to_dict()}))))
  58. async def shutdown(signal, loop):
  59. """Gracefully shutdown the server and close the asyncio loop."""
  60. print(f"Received exit signal {signal.name}...")
  61. print("Thank you for using exo.")
  62. print_yellow_exo()
  63. server_tasks = [t for t in asyncio.all_tasks() if t is not asyncio.current_task()]
  64. [task.cancel() for task in server_tasks]
  65. print(f"Cancelling {len(server_tasks)} outstanding tasks")
  66. await asyncio.gather(*server_tasks, return_exceptions=True)
  67. await server.stop()
  68. loop.stop()
  69. async def main():
  70. loop = asyncio.get_running_loop()
  71. # Use a more direct approach to handle signals
  72. def handle_exit():
  73. asyncio.ensure_future(shutdown(signal.SIGTERM, loop))
  74. for s in [signal.SIGINT, signal.SIGTERM]:
  75. loop.add_signal_handler(s, handle_exit)
  76. await node.start(wait_for_peers=args.wait_for_peers)
  77. asyncio.create_task(api.run(port=args.chatgpt_api_port)) # Start the API server as a non-blocking task
  78. await asyncio.Event().wait()
  79. if __name__ == "__main__":
  80. loop = asyncio.new_event_loop()
  81. asyncio.set_event_loop(loop)
  82. try:
  83. loop.run_until_complete(main())
  84. except KeyboardInterrupt:
  85. print("Received keyboard interrupt. Shutting down...")
  86. finally:
  87. loop.run_until_complete(shutdown(signal.SIGTERM, loop))
  88. loop.close()