main.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241
  1. import argparse
  2. import asyncio
  3. import signal
  4. import json
  5. import logging
  6. import time
  7. import traceback
  8. import uuid
  9. from exo.networking.manual.manual_discovery import ManualDiscovery
  10. from exo.networking.manual.network_topology_config import NetworkTopology
  11. from exo.orchestration.node import Node
  12. from exo.networking.grpc.grpc_server import GRPCServer
  13. from exo.networking.udp.udp_discovery import UDPDiscovery
  14. from exo.networking.tailscale.tailscale_discovery import TailscaleDiscovery
  15. from exo.networking.grpc.grpc_peer_handle import GRPCPeerHandle
  16. from exo.topology.ring_memory_weighted_partitioning_strategy import RingMemoryWeightedPartitioningStrategy
  17. from exo.api import ChatGPTAPI
  18. from exo.download.shard_download import ShardDownloader, RepoProgressEvent, NoopShardDownloader
  19. from exo.download.hf.hf_shard_download import HFShardDownloader
  20. from exo.helpers import print_yellow_exo, find_available_port, DEBUG, get_system_info, get_or_create_node_id, get_all_ip_addresses, terminal_link
  21. from exo.inference.shard import Shard
  22. from exo.inference.inference_engine import get_inference_engine, InferenceEngine
  23. from exo.inference.dummy_inference_engine import DummyInferenceEngine
  24. from exo.inference.tokenizers import resolve_tokenizer
  25. from exo.orchestration.node import Node
  26. from exo.models import model_base_shards
  27. from exo.viz.topology_viz import TopologyViz
  28. # parse args
  29. parser = argparse.ArgumentParser(description="Initialize GRPC Discovery")
  30. parser.add_argument("command", nargs="?", choices=["run"], help="Command to run")
  31. parser.add_argument("model_name", nargs="?", help="Model name to run")
  32. parser.add_argument("--node-id", type=str, default=None, help="Node ID")
  33. parser.add_argument("--node-host", type=str, default="0.0.0.0", help="Node host")
  34. parser.add_argument("--node-port", type=int, default=None, help="Node port")
  35. parser.add_argument("--listen-port", type=int, default=5678, help="Listening port for discovery")
  36. parser.add_argument("--download-quick-check", action="store_true", help="Quick check local path for model shards download")
  37. parser.add_argument("--max-parallel-downloads", type=int, default=4, help="Max parallel downloads for model shards download")
  38. parser.add_argument("--prometheus-client-port", type=int, default=None, help="Prometheus client port")
  39. parser.add_argument("--broadcast-port", type=int, default=5678, help="Broadcast port for discovery")
  40. parser.add_argument("--discovery-module", type=str, choices=["udp", "tailscale", "manual"], default="udp", help="Discovery module to use")
  41. parser.add_argument("--discovery-timeout", type=int, default=30, help="Discovery timeout in seconds")
  42. parser.add_argument("--discovery-config-path", type=str, default=None, help="Path to discovery config json file")
  43. parser.add_argument("--wait-for-peers", type=int, default=0, help="Number of peers to wait to connect to before starting")
  44. parser.add_argument("--chatgpt-api-port", type=int, default=8000, help="ChatGPT API port")
  45. parser.add_argument("--chatgpt-api-response-timeout", type=int, default=90, help="ChatGPT API response timeout in seconds")
  46. parser.add_argument("--max-generate-tokens", type=int, default=10000, help="Max tokens to generate in each request")
  47. parser.add_argument("--inference-engine", type=str, default=None, help="Inference engine to use (mlx, tinygrad, or dummy)")
  48. parser.add_argument("--disable-tui", action=argparse.BooleanOptionalAction, help="Disable TUI")
  49. parser.add_argument("--run-model", type=str, help="Specify a model to run directly")
  50. parser.add_argument("--prompt", type=str, help="Prompt for the model when using --run-model", default="Who are you?")
  51. parser.add_argument("--tailscale-api-key", type=str, default=None, help="Tailscale API key")
  52. parser.add_argument("--tailnet-name", type=str, default=None, help="Tailnet name")
  53. args = parser.parse_args()
  54. print(f"Selected inference engine: {args.inference_engine}")
  55. print_yellow_exo()
  56. system_info = get_system_info()
  57. print(f"Detected system: {system_info}")
  58. shard_downloader: ShardDownloader = HFShardDownloader(quick_check=args.download_quick_check,
  59. max_parallel_downloads=args.max_parallel_downloads) if args.inference_engine != "dummy" else NoopShardDownloader()
  60. inference_engine_name = args.inference_engine or ("mlx" if system_info == "Apple Silicon Mac" else "tinygrad")
  61. print(f"Inference engine name after selection: {inference_engine_name}")
  62. inference_engine = get_inference_engine(inference_engine_name, shard_downloader)
  63. print(f"Using inference engine: {inference_engine.__class__.__name__} with shard downloader: {shard_downloader.__class__.__name__}")
  64. if args.node_port is None:
  65. args.node_port = find_available_port(args.node_host)
  66. if DEBUG >= 1: print(f"Using available port: {args.node_port}")
  67. args.node_id = args.node_id or get_or_create_node_id()
  68. chatgpt_api_endpoints = [f"http://{ip}:{args.chatgpt_api_port}/v1/chat/completions" for ip in get_all_ip_addresses()]
  69. web_chat_urls = [f"http://{ip}:{args.chatgpt_api_port}" for ip in get_all_ip_addresses()]
  70. if DEBUG >= 0:
  71. print("Chat interface started:")
  72. for web_chat_url in web_chat_urls:
  73. print(f" - {terminal_link(web_chat_url)}")
  74. print("ChatGPT API endpoint served at:")
  75. for chatgpt_api_endpoint in chatgpt_api_endpoints:
  76. print(f" - {terminal_link(chatgpt_api_endpoint)}")
  77. if args.discovery_module == "udp":
  78. discovery = UDPDiscovery(
  79. args.node_id,
  80. args.node_port,
  81. args.listen_port,
  82. args.broadcast_port,
  83. lambda peer_id, address, device_capabilities: GRPCPeerHandle(peer_id, address, device_capabilities),
  84. discovery_timeout=args.discovery_timeout
  85. )
  86. elif args.discovery_module == "tailscale":
  87. discovery = TailscaleDiscovery(
  88. args.node_id,
  89. args.node_port,
  90. lambda peer_id, address, device_capabilities: GRPCPeerHandle(peer_id, address, device_capabilities),
  91. discovery_timeout=args.discovery_timeout,
  92. tailscale_api_key=args.tailscale_api_key,
  93. tailnet=args.tailnet_name
  94. )
  95. elif args.discovery_module == "manual":
  96. if not args.discovery_config_path:
  97. raise ValueError(f"--discovery-config-path is required when using manual discovery. Please provide a path to a config json file.")
  98. discovery = ManualDiscovery(args.discovery_config_path, args.node_id, create_peer_handle=lambda peer_id, address, device_capabilities: GRPCPeerHandle(peer_id, address, device_capabilities))
  99. topology_viz = TopologyViz(chatgpt_api_endpoints=chatgpt_api_endpoints, web_chat_urls=web_chat_urls) if not args.disable_tui else None
  100. node = Node(
  101. args.node_id,
  102. None,
  103. inference_engine,
  104. discovery,
  105. partitioning_strategy=RingMemoryWeightedPartitioningStrategy(),
  106. max_generate_tokens=args.max_generate_tokens,
  107. topology_viz=topology_viz,
  108. shard_downloader=shard_downloader
  109. )
  110. server = GRPCServer(node, args.node_host, args.node_port)
  111. node.server = server
  112. api = ChatGPTAPI(
  113. node,
  114. inference_engine.__class__.__name__,
  115. response_timeout=args.chatgpt_api_response_timeout,
  116. on_chat_completion_request=lambda req_id, __, prompt: topology_viz.update_prompt(req_id, prompt) if topology_viz else None
  117. )
  118. node.on_token.register("update_topology_viz").on_next(
  119. lambda req_id, tokens, __: topology_viz.update_prompt_output(req_id, inference_engine.tokenizer.decode(tokens)) if topology_viz and hasattr(inference_engine, "tokenizer") else None
  120. )
  121. def preemptively_start_download(request_id: str, opaque_status: str):
  122. try:
  123. status = json.loads(opaque_status)
  124. if status.get("type") == "node_status" and status.get("status") == "start_process_prompt":
  125. current_shard = node.get_current_shard(Shard.from_dict(status.get("shard")))
  126. if DEBUG >= 2: print(f"Preemptively starting download for {current_shard}")
  127. asyncio.create_task(shard_downloader.ensure_shard(current_shard))
  128. except Exception as e:
  129. if DEBUG >= 2:
  130. print(f"Failed to preemptively start download: {e}")
  131. traceback.print_exc()
  132. node.on_opaque_status.register("start_download").on_next(preemptively_start_download)
  133. if args.prometheus_client_port:
  134. from exo.stats.metrics import start_metrics_server
  135. start_metrics_server(node, args.prometheus_client_port)
  136. last_broadcast_time = 0
  137. def throttled_broadcast(shard: Shard, event: RepoProgressEvent):
  138. global last_broadcast_time
  139. current_time = time.time()
  140. if event.status == "complete" or current_time - last_broadcast_time >= 0.1:
  141. last_broadcast_time = current_time
  142. asyncio.create_task(node.broadcast_opaque_status("", json.dumps({"type": "download_progress", "node_id": node.id, "progress": event.to_dict()})))
  143. shard_downloader.on_progress.register("broadcast").on_next(throttled_broadcast)
  144. async def shutdown(signal, loop):
  145. """Gracefully shutdown the server and close the asyncio loop."""
  146. print(f"Received exit signal {signal.name}...")
  147. print("Thank you for using exo.")
  148. print_yellow_exo()
  149. server_tasks = [t for t in asyncio.all_tasks() if t is not asyncio.current_task()]
  150. [task.cancel() for task in server_tasks]
  151. print(f"Cancelling {len(server_tasks)} outstanding tasks")
  152. await asyncio.gather(*server_tasks, return_exceptions=True)
  153. await server.stop()
  154. loop.stop()
  155. async def run_model_cli(node: Node, inference_engine: InferenceEngine, model_name: str, prompt: str):
  156. shard = model_base_shards.get(model_name, {}).get(inference_engine.__class__.__name__)
  157. if not shard:
  158. print(f"Error: Unsupported model '{model_name}' for inference engine {inference_engine.__class__.__name__}")
  159. return
  160. tokenizer = await resolve_tokenizer(shard.model_id)
  161. request_id = str(uuid.uuid4())
  162. callback_id = f"cli-wait-response-{request_id}"
  163. callback = node.on_token.register(callback_id)
  164. if topology_viz:
  165. topology_viz.update_prompt(request_id, prompt)
  166. prompt = tokenizer.apply_chat_template([{"role": "user", "content": prompt}], tokenize=False, add_generation_prompt=True)
  167. try:
  168. print(f"Processing prompt: {prompt}")
  169. await node.process_prompt(shard, prompt, None, request_id=request_id)
  170. _, tokens, _ = await callback.wait(lambda _request_id, tokens, is_finished: _request_id == request_id and is_finished, timeout=300)
  171. print("\nGenerated response:")
  172. print(tokenizer.decode(tokens))
  173. except Exception as e:
  174. print(f"Error processing prompt: {str(e)}")
  175. traceback.print_exc()
  176. finally:
  177. node.on_token.deregister(callback_id)
  178. async def main():
  179. loop = asyncio.get_running_loop()
  180. # Use a more direct approach to handle signals
  181. def handle_exit():
  182. asyncio.ensure_future(shutdown(signal.SIGTERM, loop))
  183. for s in [signal.SIGINT, signal.SIGTERM]:
  184. loop.add_signal_handler(s, handle_exit)
  185. await node.start(wait_for_peers=args.wait_for_peers)
  186. if args.command == "run" or args.run_model:
  187. model_name = args.model_name or args.run_model
  188. if not model_name:
  189. print("Error: Model name is required when using 'run' command or --run-model")
  190. return
  191. await run_model_cli(node, inference_engine, model_name, args.prompt)
  192. else:
  193. asyncio.create_task(api.run(port=args.chatgpt_api_port)) # Start the API server as a non-blocking task
  194. await asyncio.Event().wait()
  195. def run():
  196. loop = asyncio.new_event_loop()
  197. asyncio.set_event_loop(loop)
  198. try:
  199. loop.run_until_complete(main())
  200. except KeyboardInterrupt:
  201. print("Received keyboard interrupt. Shutting down...")
  202. finally:
  203. loop.run_until_complete(shutdown(signal.SIGTERM, loop))
  204. loop.close()
  205. if __name__ == "__main__":
  206. run()