|
@@ -20,7 +20,7 @@ from exo.topology.ring_memory_weighted_partitioning_strategy import RingMemoryWe
|
|
|
from exo.api import ChatGPTAPI
|
|
|
from exo.download.shard_download import ShardDownloader, RepoProgressEvent, NoopShardDownloader
|
|
|
from exo.download.hf.hf_shard_download import HFShardDownloader
|
|
|
-from exo.helpers import print_yellow_exo, find_available_port, DEBUG, get_system_info, get_or_create_node_id, get_all_ip_addresses, terminal_link, shutdown
|
|
|
+from exo.helpers import print_yellow_exo, find_available_port, DEBUG, get_system_info, get_or_create_node_id, get_all_ip_addresses, terminal_link, shutdown, move_models_to_hf
|
|
|
from exo.inference.shard import Shard
|
|
|
from exo.inference.inference_engine import get_inference_engine, InferenceEngine
|
|
|
from exo.inference.dummy_inference_engine import DummyInferenceEngine
|
|
@@ -36,6 +36,7 @@ parser.add_argument("model_name", nargs="?", help="Model name to run")
|
|
|
parser.add_argument("--node-id", type=str, default=None, help="Node ID")
|
|
|
parser.add_argument("--node-host", type=str, default="0.0.0.0", help="Node host")
|
|
|
parser.add_argument("--node-port", type=int, default=None, help="Node port")
|
|
|
+parser.add_argument("--model-seed-dir", type=str, default=None, help="Model seed directory")
|
|
|
parser.add_argument("--listen-port", type=int, default=5678, help="Listening port for discovery")
|
|
|
parser.add_argument("--download-quick-check", action="store_true", help="Quick check local path for model shards download")
|
|
|
parser.add_argument("--max-parallel-downloads", type=int, default=4, help="Max parallel downloads for model shards download")
|
|
@@ -130,6 +131,11 @@ node.on_token.register("update_topology_viz").on_next(
|
|
|
lambda req_id, tokens, __: topology_viz.update_prompt_output(req_id, inference_engine.tokenizer.decode(tokens)) if topology_viz and hasattr(inference_engine, "tokenizer") else None
|
|
|
)
|
|
|
|
|
|
+if args.model_seed_dir is not None:
|
|
|
+ try:
|
|
|
+ await move_models_to_hf()
|
|
|
+ except:
|
|
|
+ print(f"Error moving models to .cache/huggingface: {e}")
|
|
|
|
|
|
def preemptively_start_download(request_id: str, opaque_status: str):
|
|
|
try:
|