main_static.py 3.0 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364
  1. import argparse
  2. import asyncio
  3. import signal
  4. import mlx.core as mx
  5. import mlx.nn as nn
  6. from exo.orchestration.standard_node import StandardNode
  7. from exo.networking.grpc.grpc_server import GRPCServer
  8. from exo.inference.mlx.sharded_inference_engine import MLXFixedShardInferenceEngine
  9. from exo.inference.shard import Shard
  10. from exo.networking.grpc.grpc_discovery import GRPCDiscovery
  11. from exo.topology.ring_memory_weighted_partitioning_strategy import RingMemoryWeightedPartitioningStrategy
  12. # parse args
  13. parser = argparse.ArgumentParser(description="Initialize GRPC Discovery")
  14. parser.add_argument("--node-id", type=str, default="node1", help="Node ID")
  15. parser.add_argument("--node-host", type=str, default="0.0.0.0", help="Node host")
  16. parser.add_argument("--node-port", type=int, default=8080, help="Node port")
  17. parser.add_argument("--listen-port", type=int, default=5678, help="Listening port for discovery")
  18. parser.add_argument("--broadcast-port", type=int, default=5678, help="Broadcast port for discovery")
  19. parser.add_argument("--model-id", type=str, default="mlx-community/Meta-Llama-3-8B-Instruct-4bit", help="Path to the model")
  20. parser.add_argument("--n-layers", type=int, default=32, help="Number of layers in the model")
  21. parser.add_argument("--start-layer", type=int, default=0, help="Start layer index")
  22. parser.add_argument("--end-layer", type=int, default=31, help="End layer index")
  23. parser.add_argument("--wait-for-peers", type=int, default=0, help="Number of peers to wait to connect to before starting")
  24. args = parser.parse_args()
  25. inference_engine = MLXFixedShardInferenceEngine(args.model_id, shard=Shard(model_id=args.model_id, n_layers=args.n_layers, start_layer=args.start_layer, end_layer=args.end_layer))
  26. discovery = GRPCDiscovery(args.node_id, args.node_port, args.listen_port, args.broadcast_port)
  27. node = StandardNode(args.node_id, None, inference_engine, discovery, partitioning_strategy=RingMemoryWeightedPartitioningStrategy())
  28. server = GRPCServer(node, args.node_host, args.node_port)
  29. node.server = server
  30. async def shutdown(signal, loop):
  31. """Gracefully shutdown the server and close the asyncio loop."""
  32. print(f"Received exit signal {signal.name}...")
  33. server_tasks = [t for t in asyncio.all_tasks() if t is not asyncio.current_task()]
  34. [task.cancel() for task in server_tasks]
  35. print(f"Cancelling {len(server_tasks)} outstanding tasks")
  36. await asyncio.gather(*server_tasks, return_exceptions=True)
  37. await server.shutdown()
  38. loop.stop()
  39. async def main():
  40. loop = asyncio.get_running_loop()
  41. # Use a more direct approach to handle signals
  42. def handle_exit():
  43. asyncio.ensure_future(shutdown(signal.SIGTERM, loop))
  44. for s in [signal.SIGINT, signal.SIGTERM]:
  45. loop.add_signal_handler(s, handle_exit)
  46. await node.start(wait_for_peers=args.wait_for_peers)
  47. await asyncio.Event().wait()
  48. if __name__ == "__main__":
  49. loop = asyncio.new_event_loop()
  50. asyncio.set_event_loop(loop)
  51. try:
  52. loop.run_until_complete(main())
  53. finally:
  54. loop.close()