Browse Source

add support for qwen2.5 coder 1.5b and 7b

Alex Cheema 7 months ago
parent
commit
abca3bfa37
2 changed files with 8 additions and 0 deletions
  1. 6 0
      exo/models.py
  2. 2 0
      tinychat/examples/tinychat/index.html

+ 6 - 0
exo/models.py

@@ -38,6 +38,12 @@ model_base_shards = {
   ### llava
   "llava-1.5-7b-hf": {"MLXDynamicShardInferenceEngine": Shard(model_id="llava-hf/llava-1.5-7b-hf", start_layer=0, end_layer=0, n_layers=32),},
   ### qwen
+  "qwen-2.5-coder-1.5b": {
+    "MLXDynamicShardInferenceEngine": Shard(model_id="mlx-community/Qwen2.5-Coder-1.5B-Instruct-4bit", start_layer=0, end_layer=0, n_layers=28),
+  },
+  "qwen-2.5-coder-7b": {
+    "MLXDynamicShardInferenceEngine": Shard(model_id="mlx-community/Qwen2.5-Coder-7B-Instruct-4bit", start_layer=0, end_layer=0, n_layers=28),
+  },
   "qwen-2.5-7b": {
     "MLXDynamicShardInferenceEngine": Shard(model_id="mlx-community/Qwen2.5-7B-Instruct-4bit", start_layer=0, end_layer=0, n_layers=28),
   },

+ 2 - 0
tinychat/examples/tinychat/index.html

@@ -43,6 +43,8 @@
 <option value="deepseek-coder-v2-lite">Deepseek Coder V2 Lite</option>
 <option value="deepseek-coder-v2.5">Deepseek Coder V2.5</option>
 <option value="llava-1.5-7b-hf">LLaVa 1.5 7B (Vision Model)</option>
+<option value="qwen-2.5-coder-1.5b">Qwen 2.5 Coder 1.5B</option>
+<option value="qwen-2.5-coder-7b">Qwen 2.5 Coder 7B</option>
 <option value="qwen-2.5-7b">Qwen 2.5 7B</option>
 <option value="qwen-2.5-math-7b">Qwen 2.5 7B (Math)</option>
 <option value="qwen-2.5-14b">Qwen 2.5 14B</option>