|
@@ -1,7 +1,7 @@
|
|
import os
|
|
import os
|
|
import re
|
|
import re
|
|
from transformers import AutoTokenizer, AutoProcessor
|
|
from transformers import AutoTokenizer, AutoProcessor
|
|
-from exo.models import model_base_shards
|
|
|
|
|
|
+from exo.models import model_cards
|
|
|
|
|
|
|
|
|
|
def test_tokenizer(name, tokenizer, verbose=False):
|
|
def test_tokenizer(name, tokenizer, verbose=False):
|
|
@@ -26,7 +26,12 @@ def test_tokenizer(name, tokenizer, verbose=False):
|
|
|
|
|
|
ignore = ["TriAiExperiments/SFR-Iterative-DPO-LLaMA-3-70B-R", "mlx-community/DeepSeek-Coder-V2-Lite-Instruct-4bit-mlx", "mlx-community/DeepSeek-V2.5-MLX-AQ4_1_64", "llava-hf/llava-1.5-7b-hf", "mlx-community/Qwen*", "dummy", "mlx-community/Meta-Llama-3.1-405B-Instruct-8bit"]
|
|
ignore = ["TriAiExperiments/SFR-Iterative-DPO-LLaMA-3-70B-R", "mlx-community/DeepSeek-Coder-V2-Lite-Instruct-4bit-mlx", "mlx-community/DeepSeek-V2.5-MLX-AQ4_1_64", "llava-hf/llava-1.5-7b-hf", "mlx-community/Qwen*", "dummy", "mlx-community/Meta-Llama-3.1-405B-Instruct-8bit"]
|
|
ignore_pattern = re.compile(r"^(" + "|".join(model.replace("*", ".*") for model in ignore) + r")")
|
|
ignore_pattern = re.compile(r"^(" + "|".join(model.replace("*", ".*") for model in ignore) + r")")
|
|
-models = [shard.model_id for shards in model_base_shards.values() for shard in shards.values() if not ignore_pattern.match(shard.model_id)]
|
|
|
|
|
|
+models = []
|
|
|
|
+for model_id in model_cards:
|
|
|
|
+ for engine_type, repo_id in model_cards[model_id].get("repo", {}).items():
|
|
|
|
+ if not ignore_pattern.match(repo_id):
|
|
|
|
+ models.append(repo_id)
|
|
|
|
+models = list(set(models))
|
|
|
|
|
|
verbose = os.environ.get("VERBOSE", "0").lower() == "1"
|
|
verbose = os.environ.get("VERBOSE", "0").lower() == "1"
|
|
for m in models:
|
|
for m in models:
|