main.py 2.8 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768
  1. import argparse
  2. import asyncio
  3. import signal
  4. import mlx.core as mx
  5. import mlx.nn as nn
  6. from typing import List
  7. from exo.orchestration.standard_node import StandardNode
  8. from exo.networking.grpc.grpc_server import GRPCServer
  9. from exo.inference.mlx.sharded_inference_engine import MLXDynamicShardInferenceEngine
  10. from exo.inference.shard import Shard
  11. from exo.networking.grpc.grpc_discovery import GRPCDiscovery
  12. from exo.topology.ring_memory_weighted_partitioning_strategy import RingMemoryWeightedPartitioningStrategy
  13. # parse args
  14. parser = argparse.ArgumentParser(description="Initialize GRPC Discovery")
  15. parser.add_argument("--node-id", type=str, default="node1", help="Node ID")
  16. parser.add_argument("--node-host", type=str, default="0.0.0.0", help="Node host")
  17. parser.add_argument("--node-port", type=int, default=8080, help="Node port")
  18. parser.add_argument("--listen-port", type=int, default=5678, help="Listening port for discovery")
  19. parser.add_argument("--broadcast-port", type=int, default=5678, help="Broadcast port for discovery")
  20. parser.add_argument("--wait-for-peers", type=int, default=0, help="Number of peers to wait to connect to before starting")
  21. args = parser.parse_args()
  22. inference_engine = MLXDynamicShardInferenceEngine()
  23. def on_token(tokens: List[int]):
  24. if inference_engine.tokenizer:
  25. print(inference_engine.tokenizer.decode(tokens))
  26. discovery = GRPCDiscovery(args.node_id, args.node_port, args.listen_port, args.broadcast_port)
  27. node = StandardNode(args.node_id, None, inference_engine, discovery, partitioning_strategy=RingMemoryWeightedPartitioningStrategy(), on_token=on_token)
  28. server = GRPCServer(node, args.node_host, args.node_port)
  29. node.server = server
  30. async def shutdown(signal, loop):
  31. """Gracefully shutdown the server and close the asyncio loop."""
  32. print(f"Received exit signal {signal.name}...")
  33. server_tasks = [t for t in asyncio.all_tasks() if t is not asyncio.current_task()]
  34. [task.cancel() for task in server_tasks]
  35. print(f"Cancelling {len(server_tasks)} outstanding tasks")
  36. await asyncio.gather(*server_tasks, return_exceptions=True)
  37. await server.stop()
  38. loop.stop()
  39. async def main():
  40. loop = asyncio.get_running_loop()
  41. # Use a more direct approach to handle signals
  42. def handle_exit():
  43. asyncio.ensure_future(shutdown(signal.SIGTERM, loop))
  44. for s in [signal.SIGINT, signal.SIGTERM]:
  45. loop.add_signal_handler(s, handle_exit)
  46. await node.start(wait_for_peers=args.wait_for_peers)
  47. await asyncio.Event().wait()
  48. if __name__ == "__main__":
  49. loop = asyncio.new_event_loop()
  50. asyncio.set_event_loop(loop)
  51. try:
  52. loop.run_until_complete(main())
  53. except KeyboardInterrupt:
  54. print("Received keyboard interrupt. Shutting down...")
  55. finally:
  56. loop.run_until_complete(shutdown(signal.SIGTERM, loop))
  57. loop.close()