瀏覽代碼

get rid of some more hf bloat

Alex Cheema 3 月之前
父節點
當前提交
3c7bd48aa3

+ 1 - 1
exo/download/test_new_shard_download.py

@@ -1,4 +1,4 @@
-from exo.download.hf.new_shard_download import download_shard, NewShardDownloader
+from exo.download.new_shard_download import download_shard, NewShardDownloader
 from exo.inference.shard import Shard
 from exo.inference.shard import Shard
 from exo.models import get_model_id
 from exo.models import get_model_id
 from pathlib import Path
 from pathlib import Path

+ 2 - 2
exo/inference/mlx/test_non_blocking.py

@@ -2,7 +2,7 @@ import asyncio
 import time
 import time
 import numpy as np
 import numpy as np
 from exo.inference.mlx.sharded_inference_engine import MLXDynamicShardInferenceEngine
 from exo.inference.mlx.sharded_inference_engine import MLXDynamicShardInferenceEngine
-from exo.download.hf.hf_shard_download import HFShardDownloader
+from exo.download.new_shard_download import NewShardDownloader
 from exo.inference.shard import Shard
 from exo.inference.shard import Shard
 from exo.models import build_base_shard
 from exo.models import build_base_shard
 from collections import deque
 from collections import deque
@@ -10,7 +10,7 @@ from statistics import mean, median
 
 
 async def test_non_blocking():
 async def test_non_blocking():
     # Setup
     # Setup
-    shard_downloader = HFShardDownloader()
+    shard_downloader = NewShardDownloader()
     engine = MLXDynamicShardInferenceEngine(shard_downloader)
     engine = MLXDynamicShardInferenceEngine(shard_downloader)
     _shard = build_base_shard("llama-3.1-8b", "MLXDynamicShardInferenceEngine")
     _shard = build_base_shard("llama-3.1-8b", "MLXDynamicShardInferenceEngine")
     shard = Shard(_shard.model_id, _shard.start_layer, _shard.n_layers - 1, _shard.n_layers)
     shard = Shard(_shard.model_id, _shard.start_layer, _shard.n_layers - 1, _shard.n_layers)

+ 3 - 3
exo/inference/test_inference_engine.py

@@ -1,6 +1,6 @@
 from exo.inference.mlx.sharded_inference_engine import MLXDynamicShardInferenceEngine
 from exo.inference.mlx.sharded_inference_engine import MLXDynamicShardInferenceEngine
-from exo.download.hf.hf_shard_download import HFShardDownloader
 from exo.inference.inference_engine import InferenceEngine
 from exo.inference.inference_engine import InferenceEngine
+from exo.download.new_shard_download import NewShardDownloader
 from exo.inference.shard import Shard
 from exo.inference.shard import Shard
 from exo.helpers import DEBUG
 from exo.helpers import DEBUG
 import os
 import os
@@ -44,7 +44,7 @@ async def test_inference_engine(inference_engine_1: InferenceEngine, inference_e
   assert np.array_equal(next_resp_full, resp4)
   assert np.array_equal(next_resp_full, resp4)
 
 
 
 
-asyncio.run(test_inference_engine(MLXDynamicShardInferenceEngine(HFShardDownloader()), MLXDynamicShardInferenceEngine(HFShardDownloader()), "llama-3.2-1b", 16))
+asyncio.run(test_inference_engine(MLXDynamicShardInferenceEngine(NewShardDownloader()), MLXDynamicShardInferenceEngine(NewShardDownloader()), "llama-3.2-1b", 16))
 
 
 if os.getenv("RUN_TINYGRAD", default="0") == "1":
 if os.getenv("RUN_TINYGRAD", default="0") == "1":
   import tinygrad
   import tinygrad
@@ -52,5 +52,5 @@ if os.getenv("RUN_TINYGRAD", default="0") == "1":
   from exo.inference.tinygrad.inference import TinygradDynamicShardInferenceEngine
   from exo.inference.tinygrad.inference import TinygradDynamicShardInferenceEngine
   tinygrad.helpers.DEBUG.value = int(os.getenv("TINYGRAD_DEBUG", default="0"))
   tinygrad.helpers.DEBUG.value = int(os.getenv("TINYGRAD_DEBUG", default="0"))
   asyncio.run(
   asyncio.run(
-    test_inference_engine(TinygradDynamicShardInferenceEngine(HFShardDownloader()), TinygradDynamicShardInferenceEngine(HFShardDownloader()), "llama-3-8b", 32)
+    test_inference_engine(TinygradDynamicShardInferenceEngine(NewShardDownloader()), TinygradDynamicShardInferenceEngine(NewShardDownloader()), "llama-3-8b", 32)
   )
   )

+ 0 - 50
extra/download_hf.py

@@ -1,50 +0,0 @@
-import argparse
-import asyncio
-from exo.download.hf.hf_helpers import download_all_files, RepoProgressEvent
-
-DEFAULT_ALLOW_PATTERNS = [
-  "*.json",
-  "*.py",
-  "tokenizer.model",
-  "*.tiktoken",
-  "*.txt",
-  "*.safetensors",
-]
-# Always ignore `.git` and `.cache/huggingface` folders in commits
-DEFAULT_IGNORE_PATTERNS = [
-  ".git",
-  ".git/*",
-  "*/.git",
-  "**/.git/**",
-  ".cache/huggingface",
-  ".cache/huggingface/*",
-  "*/.cache/huggingface",
-  "**/.cache/huggingface/**",
-]
-
-
-async def main(repo_id, revision="main", allow_patterns=None, ignore_patterns=None):
-  async def progress_callback(event: RepoProgressEvent):
-    print(f"Overall Progress: {event.completed_files}/{event.total_files} files, {event.downloaded_bytes}/{event.total_bytes} bytes")
-    print(f"Estimated time remaining: {event.overall_eta}")
-    print("File Progress:")
-    for file_path, progress in event.file_progress.items():
-      status_icon = {'not_started': '⚪', 'in_progress': '🔵', 'complete': '✅'}[progress.status]
-      eta_str = str(progress.eta)
-      print(f"{status_icon} {file_path}: {progress.downloaded}/{progress.total} bytes, "
-            f"Speed: {progress.speed:.2f} B/s, ETA: {eta_str}")
-    print("\n")
-
-  await download_all_files(repo_id, revision, progress_callback, allow_patterns, ignore_patterns)
-
-
-if __name__ == "__main__":
-  parser = argparse.ArgumentParser(description="Download files from a Hugging Face model repository.")
-  parser.add_argument("--repo-id", required=True, help="The repository ID (e.g., 'meta-llama/Meta-Llama-3.1-8B-Instruct')")
-  parser.add_argument("--revision", default="main", help="The revision to download (branch, tag, or commit hash)")
-  parser.add_argument("--allow-patterns", nargs="*", default=None, help="Patterns of files to allow (e.g., '*.json' '*.safetensors')")
-  parser.add_argument("--ignore-patterns", nargs="*", default=None, help="Patterns of files to ignore (e.g., '.*')")
-
-  args = parser.parse_args()
-
-  asyncio.run(main(args.repo_id, args.revision, args.allow_patterns, args.ignore_patterns))

+ 0 - 26
test/test_hf.py

@@ -1,26 +0,0 @@
-import os
-import sys
-
-# Add the project root to the Python path
-project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
-sys.path.insert(0, project_root)
-
-import asyncio
-from exo.download.hf.hf_helpers import get_weight_map
-
-async def test_get_weight_map():
-  repo_ids = [
-    "mlx-community/quantized-gemma-2b",
-    "mlx-community/Meta-Llama-3.1-8B-4bit",
-    "mlx-community/Meta-Llama-3.1-70B-4bit",
-    "mlx-community/Meta-Llama-3.1-405B-4bit",
-  ]
-  for repo_id in repo_ids:
-    weight_map = await get_weight_map(repo_id)
-    assert weight_map is not None, "Weight map should not be None"
-    assert isinstance(weight_map, dict), "Weight map should be a dictionary"
-    assert len(weight_map) > 0, "Weight map should not be empty"
-    print(f"OK: {repo_id}")
-
-if __name__ == "__main__":
-  asyncio.run(test_get_weight_map())