standard_node.py 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461
  1. import numpy as np
  2. import json
  3. import asyncio
  4. import uuid
  5. import time
  6. import traceback
  7. from typing import List, Dict, Optional, Tuple, Union
  8. from exo.networking import Discovery, PeerHandle, Server
  9. from exo.inference.inference_engine import InferenceEngine, Shard
  10. from .node import Node
  11. from exo.topology.topology import Topology
  12. from exo.topology.device_capabilities import device_capabilities
  13. from exo.topology.partitioning_strategy import Partition, PartitioningStrategy, map_partitions_to_shards
  14. from exo import DEBUG
  15. from exo.helpers import AsyncCallbackSystem
  16. from exo.viz.topology_viz import TopologyViz
  17. from exo.download.hf.hf_helpers import RepoProgressEvent
  18. import logging
  19. logger = logging.getLogger(__name__)
  20. class StandardNode(Node):
  21. def __init__(
  22. self,
  23. _id: str,
  24. server: Server,
  25. inference_engine: InferenceEngine,
  26. discovery: Discovery,
  27. partitioning_strategy: PartitioningStrategy = None,
  28. max_generate_tokens: int = 1024,
  29. topology_viz: Optional[TopologyViz] = None,
  30. ):
  31. self.id = _id
  32. self.inference_engine = inference_engine
  33. self.server = server
  34. self.discovery = discovery
  35. self.partitioning_strategy = partitioning_strategy
  36. self.peers: List[PeerHandle] = {}
  37. self.topology: Topology = Topology()
  38. self.device_capabilities = device_capabilities()
  39. self.buffered_token_output: Dict[str, Tuple[List[int], bool]] = {}
  40. self.max_generate_tokens = max_generate_tokens
  41. self.topology_viz = topology_viz
  42. self._on_token = AsyncCallbackSystem[str, Tuple[str, List[int], bool]]()
  43. self._on_opaque_status = AsyncCallbackSystem[str, Tuple[str, str]]()
  44. self._on_opaque_status.register("node_status").on_next(self.on_node_status)
  45. self.node_download_progress: Dict[str, RepoProgressEvent] = {}
  46. self.topology_inference_engines_pool: List[str] = []
  47. async def start(self, wait_for_peers: int = 0) -> None:
  48. await self.server.start()
  49. await self.discovery.start()
  50. await self.update_peers(wait_for_peers)
  51. await self.collect_topology()
  52. if DEBUG >= 2: print(f"Collected topology: {self.topology}")
  53. asyncio.create_task(self.periodic_topology_collection(1.0))
  54. async def stop(self) -> None:
  55. await self.discovery.stop()
  56. await self.server.stop()
  57. def on_node_status(self, request_id, opaque_status):
  58. try:
  59. status_data = json.loads(opaque_status)
  60. if status_data.get("type", "") == "node_status":
  61. if status_data.get("status", "").startswith("start_"):
  62. self.current_topology.active_node_id = status_data.get("node_id")
  63. elif status_data.get("status", "").startswith("end_"):
  64. if status_data.get("node_id") == self.current_topology.active_node_id:
  65. self.current_topology.active_node_id = None
  66. download_progress = None
  67. if status_data.get("type", "") == "download_progress":
  68. if DEBUG >= 8: print(f"Download progress from {status_data.get('node_id')}: {status_data.get('progress')}")
  69. download_progress = RepoProgressEvent.from_dict(status_data.get('progress'))
  70. self.node_download_progress[status_data.get('node_id')] = download_progress
  71. if self.topology_viz:
  72. self.topology_viz.update_visualization(self.current_topology, self.partitioning_strategy.partition(self.current_topology), self.id, self.node_download_progress)
  73. except Exception as e:
  74. if DEBUG >= 1: print(f"Error updating visualization: {e}")
  75. if DEBUG >= 1: traceback.print_exc()
  76. def get_supported_inference_engines(self):
  77. supported_engine_names = []
  78. if self.inference_engine.__class__.__name__ == 'MLXDynamicShardInferenceEngine':
  79. supported_engine_names.extend(['mlx', 'tinygrad'])
  80. else:
  81. supported_engine_names.append('tinygrad')
  82. return supported_engine_names
  83. async def broadcast_supported_engines(self, supported_engines: List):
  84. status_message = json.dumps({
  85. "type": "supported_inference_engines",
  86. "node_id": self.id,
  87. "engines": supported_engines
  88. })
  89. logger.error(f'broadcast_supported_engines: {status_message}')
  90. await self.broadcast_opaque_status("", status_message)
  91. logger.error(f'broadcast_supported_engines: done')
  92. def get_topology_inference_engines(self) -> List[str]:
  93. return self.topology_inference_engines_pool
  94. async def process_prompt(self, base_shard: Shard, prompt: str, image_str: Optional[str] = None, request_id: Optional[str] = None, inference_state: Optional[str] = None) -> Optional[np.ndarray]:
  95. shard = self.get_current_shard(base_shard)
  96. asyncio.create_task(
  97. self.broadcast_opaque_status(
  98. request_id,
  99. json.dumps({
  100. "type": "node_status",
  101. "node_id": self.id,
  102. "status": "start_process_prompt",
  103. "base_shard": base_shard.to_dict(),
  104. "shard": shard.to_dict(),
  105. "prompt": prompt,
  106. "image_str": image_str,
  107. "inference_state": inference_state,
  108. "request_id": request_id,
  109. }),
  110. )
  111. )
  112. start_time = time.perf_counter_ns()
  113. resp = await self._process_prompt(base_shard, prompt, image_str, request_id, inference_state)
  114. end_time = time.perf_counter_ns()
  115. elapsed_time_ns = end_time - start_time
  116. asyncio.create_task(
  117. self.broadcast_opaque_status(
  118. request_id,
  119. json.dumps({
  120. "type": "node_status",
  121. "node_id": self.id,
  122. "status": "end_process_prompt",
  123. "base_shard": base_shard.to_dict(),
  124. "shard": shard.to_dict(),
  125. "prompt": prompt,
  126. "image_str": image_str,
  127. "inference_state": inference_state,
  128. "request_id": request_id,
  129. "elapsed_time_ns": elapsed_time_ns,
  130. "result_size": resp.size if resp is not None else 0,
  131. }),
  132. )
  133. )
  134. return resp
  135. async def _process_prompt(self, base_shard: Shard, prompt: str, image_str: Optional[str] = None, request_id: Optional[str] = None, inference_state: Optional[str] = None) -> Optional[np.ndarray]:
  136. if request_id is None:
  137. request_id = str(uuid.uuid4())
  138. if request_id not in self.buffered_token_output:
  139. self.buffered_token_output[request_id] = ([], False)
  140. shard = self.get_current_shard(base_shard)
  141. if DEBUG >= 2: print(f"[{request_id}] process prompt: {base_shard=} {shard=} {prompt=} {image_str=}")
  142. if shard.start_layer != 0:
  143. if DEBUG >= 2: print(f"[{request_id}] forwarding to next shard: {base_shard=} {shard=} {prompt=} {image_str=}")
  144. await self.forward_to_next_shard(shard, prompt, request_id, image_str=image_str, inference_state=inference_state)
  145. return
  146. result, inference_state, is_finished = await self.inference_engine.infer_prompt(request_id, shard, prompt, image_str, inference_state=inference_state)
  147. is_finished = is_finished or len(self.buffered_token_output[request_id][0]) >= self.max_generate_tokens
  148. if is_finished:
  149. self.buffered_token_output[request_id] = (self.buffered_token_output[request_id][0], True)
  150. asyncio.create_task(self.broadcast_result(request_id, self.buffered_token_output[request_id][0], is_finished)) # TODO: this is n^2 communication complexity
  151. if result.size == 1:
  152. self.buffered_token_output[request_id][0].append(result.item())
  153. self.trigger_on_token_callbacks(request_id, self.buffered_token_output[request_id][0], is_finished)
  154. if DEBUG >= 2: print(f"[{request_id}] result size: {result.size}, is finished: {is_finished}, buffered tokens: {len(self.buffered_token_output[request_id][0])}")
  155. if not is_finished:
  156. asyncio.create_task(self.forward_to_next_shard(shard, result, request_id, image_str=image_str, inference_state=inference_state))
  157. return np.array(self.buffered_token_output[request_id][0]) if len(self.buffered_token_output[request_id][0]) > 0 else None
  158. async def process_tensor(
  159. self,
  160. base_shard: Shard,
  161. tensor: np.ndarray,
  162. request_id: Optional[str] = None,
  163. inference_state: Optional[str] = None,
  164. ) -> Optional[np.ndarray]:
  165. shard = self.get_current_shard(base_shard)
  166. asyncio.create_task(
  167. self.broadcast_opaque_status(
  168. request_id,
  169. json.dumps({
  170. "type": "node_status",
  171. "node_id": self.id,
  172. "status": "start_process_tensor",
  173. "base_shard": base_shard.to_dict(),
  174. "shard": shard.to_dict(),
  175. "tensor_size": tensor.size,
  176. "tensor_shape": tensor.shape,
  177. "request_id": request_id,
  178. "inference_state": inference_state,
  179. }),
  180. )
  181. )
  182. start_time = time.perf_counter_ns()
  183. resp = await self._process_tensor(shard, tensor, request_id, inference_state)
  184. end_time = time.perf_counter_ns()
  185. elapsed_time_ns = end_time - start_time
  186. asyncio.create_task(
  187. self.broadcast_opaque_status(
  188. request_id,
  189. json.dumps({
  190. "type": "node_status",
  191. "node_id": self.id,
  192. "status": "end_process_tensor",
  193. "base_shard": base_shard.to_dict(),
  194. "shard": shard.to_dict(),
  195. "request_id": request_id,
  196. "elapsed_time_ns": elapsed_time_ns,
  197. "result_size": resp.size if resp is not None else 0,
  198. }),
  199. )
  200. )
  201. return resp
  202. async def _process_tensor(
  203. self,
  204. base_shard: Shard,
  205. tensor: np.ndarray,
  206. request_id: Optional[str] = None,
  207. inference_state: Optional[str] = None,
  208. ) -> Optional[np.ndarray]:
  209. if request_id is None:
  210. request_id = str(uuid.uuid4())
  211. if request_id not in self.buffered_token_output:
  212. self.buffered_token_output[request_id] = ([], False)
  213. shard = self.get_current_shard(base_shard)
  214. try:
  215. if DEBUG >= 1: print(f"[{request_id}] process_tensor: {tensor.size=} {tensor.shape=}")
  216. result, inference_state, is_finished = await self.inference_engine.infer_tensor(request_id, shard, tensor, inference_state=inference_state)
  217. is_finished = is_finished or len(self.buffered_token_output[request_id][0]) >= self.max_generate_tokens
  218. if is_finished:
  219. self.buffered_token_output[request_id] = (self.buffered_token_output[request_id][0], True)
  220. asyncio.create_task(self.broadcast_result(request_id, self.buffered_token_output[request_id][0], is_finished)) # TODO: this is n^2 communication complexity
  221. if result.size == 1: # we got a new token out
  222. self.buffered_token_output[request_id][0].append(result.item())
  223. self.trigger_on_token_callbacks(request_id, self.buffered_token_output[request_id][0], is_finished)
  224. if DEBUG >= 2: print(f"[{request_id}] result size: {result.size}, is finished: {is_finished}, buffered tokens: {len(self.buffered_token_output[request_id][0])}")
  225. if not is_finished:
  226. asyncio.create_task(self.forward_to_next_shard(shard, result, request_id, inference_state=inference_state))
  227. return np.array(self.buffered_token_output[request_id][0]) if len(self.buffered_token_output[request_id][0]) > 0 else None
  228. except Exception as e:
  229. print(f"Error processing tensor for shard {shard}: {e}")
  230. traceback.print_exc()
  231. return None
  232. async def forward_to_next_shard(
  233. self,
  234. base_shard: Shard,
  235. tensor_or_prompt: Union[np.ndarray, str],
  236. request_id: str,
  237. image_str: Optional[str] = None,
  238. inference_state: Optional[str] = None,
  239. ) -> None:
  240. if not self.partitioning_strategy:
  241. if DEBUG >= 1: print("No partitioning strategy found. Skipping forward.")
  242. return
  243. shard = self.get_current_shard(base_shard)
  244. partitions = self.partitioning_strategy.partition(self.topology)
  245. shards = map_partitions_to_shards(self.partitioning_strategy.partition(self.topology), base_shard.n_layers, base_shard.model_id)
  246. current_partition_index = next((i for i, p in enumerate(partitions) if p.node_id == self.id), None)
  247. if DEBUG >= 1: print(f"Current partition index: {current_partition_index}")
  248. if current_partition_index is not None:
  249. next_partition_index = (current_partition_index+1) % len(partitions)
  250. next_partition: Partition = partitions[next_partition_index]
  251. next_shard = shards[next_partition_index]
  252. if DEBUG >= 2: print(f"Computed next from: {shard}, {self.topology}. Next partition: {next_partition}")
  253. if next_partition.node_id == self.id:
  254. if isinstance(tensor_or_prompt, np.ndarray):
  255. await self.process_tensor(shard, tensor_or_prompt, request_id, inference_state=inference_state)
  256. else:
  257. await self.process_prompt(shard, tensor_or_prompt, image_str, request_id, inference_state=inference_state)
  258. return
  259. target_peer = next((p for p in self.peers if p.id() == next_partition.node_id), None)
  260. if not target_peer:
  261. raise ValueError(f"Peer for {next_partition} not found")
  262. if DEBUG >= 1: print(f"Sending tensor_or_prompt to {target_peer.id()}: {tensor_or_prompt}")
  263. if isinstance(tensor_or_prompt, np.ndarray):
  264. await target_peer.send_tensor(next_shard, tensor_or_prompt, request_id=request_id, inference_state=inference_state)
  265. else:
  266. await target_peer.send_prompt(next_shard, tensor_or_prompt, image_str=image_str, request_id=request_id, inference_state=inference_state)
  267. def get_current_shard(self, base_shard: Shard) -> Shard:
  268. partitions = self.partitioning_strategy.partition(self.topology)
  269. shards = map_partitions_to_shards(partitions, base_shard.n_layers, base_shard.model_id)
  270. current_partition_index = next((i for i, p in enumerate(partitions) if p.node_id == self.id), None)
  271. if current_partition_index is None:
  272. raise ValueError(f"No current partition found for node: {self.id}")
  273. return shards[current_partition_index]
  274. async def update_peers(self, wait_for_peers: int = 0) -> bool:
  275. next_peers = await self.discovery.discover_peers(wait_for_peers)
  276. current_peer_ids = {peer.id() for peer in self.peers}
  277. next_peer_ids = {peer.id() for peer in next_peers}
  278. peers_added = [peer for peer in next_peers if peer.id() not in current_peer_ids]
  279. peers_removed = [peer for peer in self.peers if peer.id() not in next_peer_ids]
  280. peers_updated = [
  281. peer for peer in next_peers
  282. if peer.id() in current_peer_ids and any(p.addr() != peer.addr() for p in self.peers if p.id() == peer.id())
  283. ]
  284. peers_unchanged = [
  285. peer for peer in next_peers
  286. if peer.id() in current_peer_ids and all(p.addr() == peer.addr() for p in self.peers if p.id() == peer.id())
  287. ]
  288. peers_to_disconnect = [peer for peer in peers_removed if await peer.is_connected()]
  289. peers_to_connect = [peer for peer in peers_added + peers_updated + peers_unchanged if not await peer.is_connected()]
  290. def _pretty(peers: List[PeerHandle]) -> List[str]:
  291. return [f"{peer.id()}@{peer.addr()}" for peer in peers]
  292. if DEBUG >= 2: print(f"update_peers: added={peers_added} removed={peers_removed} updated={peers_updated} unchanged={peers_unchanged} to_disconnect={peers_to_disconnect} to_connect={peers_to_connect}")
  293. async def disconnect_with_timeout(peer, timeout=5):
  294. try:
  295. await asyncio.wait_for(peer.disconnect(), timeout)
  296. return True
  297. except Exception as e:
  298. print(f"Error disconnecting peer {peer.id()}@{peer.addr()}: {e}")
  299. traceback.print_exc()
  300. return False
  301. async def connect_with_timeout(peer, timeout=5):
  302. try:
  303. await asyncio.wait_for(peer.connect(), timeout)
  304. return True
  305. except Exception as e:
  306. print(f"Error connecting peer {peer.id()}@{peer.addr()}: {e}")
  307. traceback.print_exc()
  308. return False
  309. disconnect_results = await asyncio.gather(
  310. *(disconnect_with_timeout(peer) for peer in peers_to_disconnect),
  311. return_exceptions=True
  312. )
  313. connect_results = await asyncio.gather(
  314. *(connect_with_timeout(peer) for peer in peers_to_connect),
  315. return_exceptions=True
  316. )
  317. successful_disconnects = [peer for peer, result in zip(peers_to_disconnect, disconnect_results) if result is True]
  318. failed_disconnects = [peer for peer, result in zip(peers_to_disconnect, disconnect_results) if result is False]
  319. successful_connects = [peer for peer, result in zip(peers_to_connect, connect_results) if result is True]
  320. failed_connects = [peer for peer, result in zip(peers_to_connect, connect_results) if result is False]
  321. if DEBUG >= 1:
  322. if successful_disconnects: print(f"Successfully disconnected peers: {_pretty(successful_disconnects)}")
  323. if failed_disconnects: print(f"Failed to disconnect peers: {_pretty(failed_disconnects)}")
  324. if successful_connects: print(f"Successfully connected peers: {_pretty(successful_connects)}")
  325. if failed_connects: print(f"Failed to connect peers: {_pretty(failed_connects)}")
  326. self.peers = next_peers
  327. return len(peers_added) > 0 or len(peers_removed) > 0 or len(peers_updated) > 0
  328. async def periodic_topology_collection(self, interval: int):
  329. while True:
  330. await asyncio.sleep(interval)
  331. try:
  332. did_peers_change = await self.update_peers()
  333. if DEBUG >= 2: print(f"{did_peers_change=}")
  334. if did_peers_change:
  335. await self.collect_topology()
  336. except Exception as e:
  337. print(f"Error collecting topology: {e}")
  338. traceback.print_exc()
  339. async def get_inference_result(self, request_id: str) -> Tuple[Optional[np.ndarray], bool]:
  340. if request_id not in self.buffered_token_output:
  341. return None, False
  342. return np.array(self.buffered_token_output[request_id][0]), self.buffered_token_output[request_id][1]
  343. async def collect_topology(self, visited: set[str] = set(), max_depth: int = 4) -> Topology:
  344. next_topology = Topology()
  345. next_topology.update_node(self.id, self.device_capabilities)
  346. if DEBUG >= 2: print(f"Collecting topology {max_depth=} {visited=}")
  347. prev_visited = visited.copy()
  348. visited.add(self.id)
  349. visited.update(p.id() for p in self.peers)
  350. for peer in self.peers:
  351. next_topology.update_node(peer.id(), peer.device_capabilities())
  352. next_topology.add_edge(self.id, peer.id())
  353. if peer.id() in prev_visited:
  354. continue
  355. if max_depth <= 0:
  356. if DEBUG >= 2: print("Max depth reached. Skipping...")
  357. continue
  358. try:
  359. other_topology = await asyncio.wait_for(peer.collect_topology(visited, max_depth=max_depth - 1), timeout=5.0)
  360. if DEBUG >= 2: print(f"Collected topology from: {peer.id()}: {other_topology}")
  361. self.topology.merge(other_topology)
  362. except Exception as e:
  363. print(f"Error collecting topology from {peer.id()}: {e}")
  364. next_topology.active_node_id = self.topology.active_node_id # this is not so clean.
  365. self.topology = next_topology
  366. if self.topology_viz:
  367. self.topology_viz.update_visualization(self.current_topology, self.partitioning_strategy.partition(self.current_topology), self.id)
  368. return next_topology
  369. @property
  370. def on_token(self) -> AsyncCallbackSystem[str, Tuple[str, List[int], bool]]:
  371. return self._on_token
  372. @property
  373. def on_opaque_status(self) -> AsyncCallbackSystem[str, Tuple[str, str]]:
  374. return self._on_opaque_status
  375. def trigger_on_token_callbacks(self, request_id: str, tokens: List[int], is_finished: bool) -> None:
  376. if DEBUG >= 2: print(f"Triggering all on_token callbacks with {request_id=} num_tokens={len(tokens)} {is_finished=}")
  377. self.on_token.trigger_all(request_id, tokens, is_finished)
  378. async def broadcast_result(self, request_id: str, result: List[int], is_finished: bool) -> None:
  379. async def send_result_to_peer(peer):
  380. try:
  381. await asyncio.wait_for(peer.send_result(request_id, result, is_finished), timeout=15.0)
  382. except asyncio.TimeoutError:
  383. print(f"Timeout broadcasting result to {peer.id()}")
  384. except Exception as e:
  385. print(f"Error broadcasting result to {peer.id()}: {e}")
  386. traceback.print_exc()
  387. await asyncio.gather(*[send_result_to_peer(peer) for peer in self.peers], return_exceptions=True)
  388. async def broadcast_opaque_status(self, request_id: str, status: str) -> None:
  389. if DEBUG >= 8: print(f"Broadcasting opaque status: {request_id=} {status=}")
  390. async def send_status_to_peer(peer):
  391. try:
  392. status_dict = json.loads(status)
  393. if status_dict.get("type") == "supported_inference_engines":
  394. logger.error(f'broadcasting_inference_engines: {status_dict}')
  395. await asyncio.wait_for(peer.send_opaque_status(request_id, status), timeout=15.0)
  396. except asyncio.TimeoutError:
  397. print(f"Timeout sending opaque status to {peer.id()}")
  398. except Exception as e:
  399. print(f"Error sending opaque status to {peer.id()}: {e}")
  400. traceback.print_exc()
  401. await asyncio.gather(*[send_status_to_peer(peer) for peer in self.peers], return_exceptions=True)
  402. # in the case of opaque status, we also want to receive our own opaque statuses
  403. self.on_opaque_status.trigger_all(request_id, status)
  404. @property
  405. def current_topology(self) -> Topology:
  406. return self.topology