Parcourir la source

change mlx-community/Meta-Llama-3.1-70B-Instruct-bf16-CORRECTED to mlx-community/Meta-Llama-3.1-70B-Instruct-bf16 since it works now

Alex Cheema il y a 6 mois
Parent
commit
8d524bfe71
1 fichiers modifiés avec 1 ajouts et 1 suppressions
  1. 1 1
      exo/models.py

+ 1 - 1
exo/models.py

@@ -13,7 +13,7 @@ model_base_shards = {
     "TinygradDynamicShardInferenceEngine": Shard(model_id="NousResearch/Meta-Llama-3.1-70B-Instruct", start_layer=0, end_layer=0, n_layers=80),
   },
   "llama-3.1-70b-bf16": {
-    "MLXDynamicShardInferenceEngine": Shard(model_id="mlx-community/Meta-Llama-3.1-70B-Instruct-bf16-CORRECTED", start_layer=0, end_layer=0, n_layers=80),
+    "MLXDynamicShardInferenceEngine": Shard(model_id="mlx-community/Meta-Llama-3.1-70B-Instruct-bf16", start_layer=0, end_layer=0, n_layers=80),
     "TinygradDynamicShardInferenceEngine": Shard(model_id="NousResearch/Meta-Llama-3.1-70B-Instruct", start_layer=0, end_layer=0, n_layers=80),
   },
   "llama-3.1-405b": {"MLXDynamicShardInferenceEngine": Shard(model_id="mlx-community/Meta-Llama-3.1-405B-4bit", start_layer=0, end_layer=0, n_layers=126),},