main.py 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112
  1. import argparse
  2. import asyncio
  3. import signal
  4. import json
  5. import time
  6. from exo.orchestration.standard_node import StandardNode
  7. from exo.networking.grpc.grpc_server import GRPCServer
  8. from exo.networking.grpc.grpc_discovery import GRPCDiscovery
  9. from exo.topology.ring_memory_weighted_partitioning_strategy import RingMemoryWeightedPartitioningStrategy
  10. from exo.api import ChatGPTAPI
  11. from exo.download.shard_download import ShardDownloader
  12. from exo.download.hf.hf_shard_download import HFShardDownloader
  13. from exo.helpers import print_yellow_exo, find_available_port, DEBUG, get_inference_engine, get_system_info, get_or_create_node_id
  14. # parse args
  15. parser = argparse.ArgumentParser(description="Initialize GRPC Discovery")
  16. parser.add_argument("--node-id", type=str, default=None, help="Node ID")
  17. parser.add_argument("--node-host", type=str, default="0.0.0.0", help="Node host")
  18. parser.add_argument("--node-port", type=int, default=None, help="Node port")
  19. parser.add_argument("--listen-port", type=int, default=5678, help="Listening port for discovery")
  20. parser.add_argument("--prometheus-client-port", type=int, default=None, help="Prometheus client port")
  21. parser.add_argument("--broadcast-port", type=int, default=5678, help="Broadcast port for discovery")
  22. parser.add_argument("--discovery-timeout", type=int, default=30, help="Discovery timeout in seconds")
  23. parser.add_argument("--wait-for-peers", type=int, default=0, help="Number of peers to wait to connect to before starting")
  24. parser.add_argument("--chatgpt-api-port", type=int, default=8000, help="ChatGPT API port")
  25. parser.add_argument("--chatgpt-api-response-timeout-secs", type=int, default=90, help="ChatGPT API response timeout in seconds")
  26. parser.add_argument("--max-generate-tokens", type=int, default=1024, help="Max tokens to generate in each request")
  27. parser.add_argument("--inference-engine", type=str, default=None, help="Inference engine to use")
  28. parser.add_argument("--disable-tui", action=argparse.BooleanOptionalAction, help="Disable TUI")
  29. args = parser.parse_args()
  30. print_yellow_exo()
  31. system_info = get_system_info()
  32. print(f"Detected system: {system_info}")
  33. shard_downloader: ShardDownloader = HFShardDownloader()
  34. inference_engine_name = args.inference_engine or ("mlx" if system_info == "Apple Silicon Mac" else "tinygrad")
  35. inference_engine = get_inference_engine(inference_engine_name, shard_downloader)
  36. print(f"Using inference engine: {inference_engine.__class__.__name__} with shard downloader: {shard_downloader.__class__.__name__}")
  37. if args.node_port is None:
  38. args.node_port = find_available_port(args.node_host)
  39. if DEBUG >= 1: print(f"Using available port: {args.node_port}")
  40. args.node_id = args.node_id or get_or_create_node_id()
  41. discovery = GRPCDiscovery(args.node_id, args.node_port, args.listen_port, args.broadcast_port, discovery_timeout=args.discovery_timeout)
  42. node = StandardNode(
  43. args.node_id,
  44. None,
  45. inference_engine,
  46. discovery,
  47. partitioning_strategy=RingMemoryWeightedPartitioningStrategy(),
  48. chatgpt_api_endpoint=f"http://localhost:{args.chatgpt_api_port}/v1/chat/completions",
  49. web_chat_url=f"http://localhost:{args.chatgpt_api_port}",
  50. disable_tui=args.disable_tui,
  51. max_generate_tokens=args.max_generate_tokens,
  52. )
  53. server = GRPCServer(node, args.node_host, args.node_port)
  54. node.server = server
  55. api = ChatGPTAPI(node, inference_engine.__class__.__name__, response_timeout_secs=args.chatgpt_api_response_timeout_secs)
  56. node.on_token.register("main_log").on_next(lambda _, tokens, __: print(inference_engine.tokenizer.decode(tokens) if hasattr(inference_engine, "tokenizer") else tokens))
  57. if args.prometheus_client_port:
  58. from exo.stats.metrics import start_metrics_server
  59. start_metrics_server(node, args.prometheus_client_port)
  60. last_broadcast_time = 0
  61. def throttled_broadcast(shard, event):
  62. global last_broadcast_time
  63. current_time = time.time()
  64. if current_time - last_broadcast_time >= 0.1:
  65. last_broadcast_time = current_time
  66. asyncio.create_task(node.broadcast_opaque_status("", json.dumps({"type": "download_progress", "node_id": node.id, "progress": event.to_dict()})))
  67. shard_downloader.on_progress.register("broadcast").on_next(throttled_broadcast)
  68. async def shutdown(signal, loop):
  69. """Gracefully shutdown the server and close the asyncio loop."""
  70. print(f"Received exit signal {signal.name}...")
  71. print("Thank you for using exo.")
  72. print_yellow_exo()
  73. server_tasks = [t for t in asyncio.all_tasks() if t is not asyncio.current_task()]
  74. [task.cancel() for task in server_tasks]
  75. print(f"Cancelling {len(server_tasks)} outstanding tasks")
  76. await asyncio.gather(*server_tasks, return_exceptions=True)
  77. await server.stop()
  78. loop.stop()
  79. async def main():
  80. loop = asyncio.get_running_loop()
  81. # Use a more direct approach to handle signals
  82. def handle_exit():
  83. asyncio.ensure_future(shutdown(signal.SIGTERM, loop))
  84. for s in [signal.SIGINT, signal.SIGTERM]:
  85. loop.add_signal_handler(s, handle_exit)
  86. await node.start(wait_for_peers=args.wait_for_peers)
  87. asyncio.create_task(api.run(port=args.chatgpt_api_port)) # Start the API server as a non-blocking task
  88. await asyncio.Event().wait()
  89. if __name__ == "__main__":
  90. loop = asyncio.new_event_loop()
  91. asyncio.set_event_loop(loop)
  92. try:
  93. loop.run_until_complete(main())
  94. except KeyboardInterrupt:
  95. print("Received keyboard interrupt. Shutting down...")
  96. finally:
  97. loop.run_until_complete(shutdown(signal.SIGTERM, loop))
  98. loop.close()