inference.py 4.4 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495
  1. from pathlib import Path
  2. import json
  3. import os
  4. from exo.inference.tinygrad.models.llama import Transformer, convert_from_huggingface, fix_bf16
  5. from exo.inference.shard import Shard
  6. from exo.inference.tokenizers import resolve_tokenizer
  7. from tinygrad.nn.state import safe_load, torch_load, load_state_dict
  8. from tinygrad import Tensor, dtypes, nn, Context
  9. from transformers import AutoTokenizer
  10. from exo.inference.inference_engine import InferenceEngine
  11. from typing import Optional, Tuple
  12. import numpy as np
  13. from exo.inference.tinygrad.tinygrad_helpers import concat_weights, load
  14. from exo.download.shard_download import ShardDownloader
  15. Tensor.no_grad = True
  16. # default settings
  17. TEMPERATURE = int(os.getenv("TEMPERATURE", 0.85))
  18. TOP_K = 25
  19. TOP_P = 0.9
  20. ALPHA_F = 0.1
  21. ALPHA_P = 0.0
  22. MODEL_PARAMS = {
  23. "8B": {"args": {"dim": 4096, "n_heads": 32, "n_kv_heads": 8, "n_layers": 32, "norm_eps": 1e-5, "rope_theta": 500000, "vocab_size": 128256, "hidden_dim": 14336}, "files": 1},
  24. "70B": {"args": {"dim": 8192, "n_heads": 64, "n_kv_heads": 8, "n_layers": 80, "norm_eps": 1e-5, "rope_theta": 500000, "vocab_size": 128256, "hidden_dim": 28672}, "files": 8}
  25. }
  26. def build_transformer(model_path: Path, shard: Shard, model_size="8B", device=None):
  27. # build model
  28. linear = nn.Linear
  29. with Context(THREEFRY=0):
  30. model = Transformer(**MODEL_PARAMS[model_size]["args"], linear=linear, max_context=8192, jit=True, shard=shard)
  31. # load weights
  32. if model_path.is_dir():
  33. if (model_path/"model.safetensors.index.json").exists(): weights = load(str(model_path/"model.safetensors.index.json"), shard)
  34. elif (model_path/"model.safetensors").exists(): weights = load(str(model_path/"model.safetensors"), shard)
  35. else: weights = concat_weights([load(str(model_path/f"consolidated.{i:02d}.pth"), shard) for i in range(MODEL_PARAMS[model_size]["files"])], device[0] if isinstance(device, tuple) else device)
  36. else:
  37. weights = load(str(model_path), shard)
  38. weights = convert_from_huggingface(weights, model, MODEL_PARAMS[model_size]["args"]["n_heads"], MODEL_PARAMS[model_size]["args"]["n_kv_heads"])
  39. weights = fix_bf16(weights)
  40. with Context(BEAM=0):
  41. # replace weights in model
  42. load_state_dict(model, weights, strict=False, consume=False) # consume=True
  43. return model
  44. class TinygradDynamicShardInferenceEngine(InferenceEngine):
  45. def __init__(self, shard_downloader: ShardDownloader):
  46. self.shard = None
  47. self.shard_downloader = shard_downloader
  48. async def infer_prompt(self, request_id: str, shard: Shard, prompt: str, image_str: Optional[str] = None, inference_state: Optional[str] = None) -> (np.ndarray, str, bool):
  49. await self.ensure_shard(shard)
  50. start_pos = json.loads(inference_state or "{}").get("start_pos", 0)
  51. n_captured_toks = json.loads(inference_state or "{}").get("n_captured_toks", 0)
  52. toks = self.tokenizer.encode(prompt)
  53. h = self.model(Tensor([toks]), start_pos, TEMPERATURE).realize()
  54. if h.shape == (1,):
  55. start_pos += len(toks)
  56. start_pos += 1
  57. n_captured_toks = 0
  58. return np.array([[h.item()]]), json.dumps({"start_pos": start_pos, "n_captured_toks": n_captured_toks}), h.item() == self.tokenizer.eos_token_id
  59. else:
  60. n_captured_toks = len(toks)
  61. return h.numpy(), json.dumps({"start_pos": start_pos, "n_captured_toks": n_captured_toks}), False
  62. async def infer_tensor(self, request_id: str, shard: Shard, input_data: np.ndarray, inference_state: Optional[str] = None) -> Tuple[np.ndarray, str, bool]:
  63. await self.ensure_shard(shard)
  64. start_pos = json.loads(inference_state or "{}").get("start_pos", 0)
  65. n_captured_toks = json.loads(inference_state or "{}").get("n_captured_toks", 0)
  66. h = self.model(Tensor(input_data), start_pos, TEMPERATURE).realize()
  67. if h.shape == (1,):
  68. start_pos += n_captured_toks
  69. start_pos += 1
  70. n_captured_toks = 0
  71. return np.array([[h.item()]]), json.dumps({"start_pos": start_pos, "n_captured_toks": n_captured_toks}), h.item() == self.tokenizer.eos_token_id
  72. else:
  73. return h.numpy(), json.dumps({"start_pos": start_pos, "n_captured_toks": n_captured_toks}), False
  74. async def ensure_shard(self, shard: Shard):
  75. if self.shard == shard:
  76. return
  77. model_path = await self.shard_downloader.ensure_shard(shard)
  78. self.model = build_transformer(model_path, shard, model_size="8B" if "8b" in shard.model_id.lower() else "70B")
  79. self.tokenizer = await resolve_tokenizer(str((model_path if model_path.is_dir() else model_path.parent)))
  80. self.shard = shard