main.py 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100
  1. import argparse
  2. import asyncio
  3. import signal
  4. import uuid
  5. import platform
  6. import psutil
  7. import os
  8. from typing import List
  9. from exo.orchestration.standard_node import StandardNode
  10. from exo.networking.grpc.grpc_server import GRPCServer
  11. from exo.networking.grpc.grpc_discovery import GRPCDiscovery
  12. from exo.topology.ring_memory_weighted_partitioning_strategy import RingMemoryWeightedPartitioningStrategy
  13. from exo.api import ChatGPTAPI
  14. from exo.helpers import print_yellow_exo, find_available_port, DEBUG
  15. # parse args
  16. parser = argparse.ArgumentParser(description="Initialize GRPC Discovery")
  17. parser.add_argument("--node-id", type=str, default=str(uuid.uuid4()), help="Node ID")
  18. parser.add_argument("--node-host", type=str, default="0.0.0.0", help="Node host")
  19. parser.add_argument("--node-port", type=int, default=None, help="Node port")
  20. parser.add_argument("--listen-port", type=int, default=5678, help="Listening port for discovery")
  21. parser.add_argument("--broadcast-port", type=int, default=5678, help="Broadcast port for discovery")
  22. parser.add_argument("--wait-for-peers", type=int, default=0, help="Number of peers to wait to connect to before starting")
  23. parser.add_argument("--chatgpt-api-port", type=int, default=8000, help="ChatGPT API port")
  24. parser.add_argument("--inference-engine", type=str, default=None, help="Inference engine to use")
  25. args = parser.parse_args()
  26. print_yellow_exo()
  27. print(f"Starting exo {platform.system()=} {psutil.virtual_memory()=}")
  28. if args.inference_engine is None:
  29. if psutil.MACOS:
  30. from exo.inference.mlx.sharded_inference_engine import MLXDynamicShardInferenceEngine
  31. inference_engine = MLXDynamicShardInferenceEngine()
  32. else:
  33. from exo.inference.tinygrad.inference import TinygradDynamicShardInferenceEngine
  34. import tinygrad.helpers
  35. tinygrad.helpers.DEBUG.value = int(os.getenv("TINYGRAD_DEBUG", default="0"))
  36. inference_engine = TinygradDynamicShardInferenceEngine()
  37. else:
  38. if args.inference_engine == "mlx":
  39. from exo.inference.mlx.sharded_inference_engine import MLXDynamicShardInferenceEngine
  40. inference_engine = MLXDynamicShardInferenceEngine()
  41. elif args.inference_engine == "tinygrad":
  42. from exo.inference.tinygrad.inference import TinygradDynamicShardInferenceEngine
  43. import tinygrad.helpers
  44. tinygrad.helpers.DEBUG.value = int(os.getenv("TINYGRAD_DEBUG", default="0"))
  45. inference_engine = TinygradDynamicShardInferenceEngine()
  46. else:
  47. raise ValueError(f"Inference engine {args.inference_engine} not supported")
  48. print(f"Using inference engine {inference_engine.__class__.__name__}")
  49. if args.node_port is None:
  50. args.node_port = find_available_port(args.node_host)
  51. if DEBUG >= 1: print(f"Using available port: {args.node_port}")
  52. discovery = GRPCDiscovery(args.node_id, args.node_port, args.listen_port, args.broadcast_port)
  53. node = StandardNode(args.node_id, None, inference_engine, discovery, partitioning_strategy=RingMemoryWeightedPartitioningStrategy(), chatgpt_api_endpoint=f"http://localhost:{args.chatgpt_api_port}/v1/chat/completions", web_chat_url=f"http://localhost:{args.chatgpt_api_port}")
  54. server = GRPCServer(node, args.node_host, args.node_port)
  55. node.server = server
  56. api = ChatGPTAPI(node, inference_engine.__class__.__name__)
  57. node.on_token.register("main_log").on_next(lambda _, tokens , __: print(inference_engine.tokenizer.decode(tokens) if hasattr(inference_engine, "tokenizer") else tokens))
  58. async def shutdown(signal, loop):
  59. """Gracefully shutdown the server and close the asyncio loop."""
  60. print(f"Received exit signal {signal.name}...")
  61. print("Thank you for using exo.")
  62. print_yellow_exo()
  63. server_tasks = [t for t in asyncio.all_tasks() if t is not asyncio.current_task()]
  64. [task.cancel() for task in server_tasks]
  65. print(f"Cancelling {len(server_tasks)} outstanding tasks")
  66. await asyncio.gather(*server_tasks, return_exceptions=True)
  67. await server.stop()
  68. loop.stop()
  69. async def main():
  70. loop = asyncio.get_running_loop()
  71. # Use a more direct approach to handle signals
  72. def handle_exit():
  73. asyncio.ensure_future(shutdown(signal.SIGTERM, loop))
  74. for s in [signal.SIGINT, signal.SIGTERM]:
  75. loop.add_signal_handler(s, handle_exit)
  76. await node.start(wait_for_peers=args.wait_for_peers)
  77. asyncio.create_task(api.run(port=args.chatgpt_api_port)) # Start the API server as a non-blocking task
  78. await asyncio.Event().wait()
  79. if __name__ == "__main__":
  80. loop = asyncio.new_event_loop()
  81. asyncio.set_event_loop(loop)
  82. try:
  83. loop.run_until_complete(main())
  84. except KeyboardInterrupt:
  85. print("Received keyboard interrupt. Shutting down...")
  86. finally:
  87. loop.run_until_complete(shutdown(signal.SIGTERM, loop))
  88. loop.close()