mixtral.py 3.1 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859
  1. import functools, argparse, pathlib
  2. from tinygrad import Tensor, nn, Device, GlobalCounters, Variable
  3. from tinygrad.helpers import Timing, Profiling, CI, tqdm
  4. from tinygrad.nn.state import torch_load, get_state_dict
  5. from extra.models.llama import FeedForward, Transformer
  6. class MixtureFeedForward:
  7. def __init__(self, num_experts:int, dim:int, hidden_dim:int, linear=nn.Linear):
  8. self.gate = nn.Linear(dim, num_experts, bias=False)
  9. self.experts = [FeedForward(dim, hidden_dim, linear) for _ in range(num_experts)]
  10. def __call__(self, x:Tensor) -> Tensor:
  11. assert x.shape[0] == 1, "only BS=1"
  12. g = self.gate(x).float().exp()
  13. choice = g.data().tolist()[0][0]
  14. top = sorted(enumerate(choice), key=lambda x: -x[1])
  15. norm = top[0][1] + top[1][1]
  16. e1, e2 = self.experts[top[0][0]], self.experts[top[1][0]]
  17. scale = Tensor([top[0][1]/norm, top[1][1]/norm])
  18. ret = e1(x.to(e1.w1.weight.device)).to(x.device) * scale[0] + \
  19. e2(x.to(e2.w1.weight.device)).to(x.device) * scale[1]
  20. return ret
  21. if __name__ == "__main__":
  22. parser = argparse.ArgumentParser(description="Run Mixtral in tinygrad", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
  23. parser.add_argument("--count", type=int, default=30, help="Max number of tokens to generate")
  24. parser.add_argument("--temperature", type=float, default=0.7, help="Temperature in the softmax")
  25. parser.add_argument("--timing", action="store_true", help="Print timing per token")
  26. parser.add_argument("--profile", action="store_true", help="Profile generation")
  27. parser.add_argument("--weights", type=str, default=(pathlib.Path(__file__).parent.parent / "weights/mixtral-8x7b-32kseqlen").as_posix(),
  28. help="Path to the downloaded weights")
  29. args = parser.parse_args()
  30. state = torch_load(args.weights + "/consolidated.00.pth.b")
  31. model = Transformer(n_layers=32, dim=4096, hidden_dim=14336, n_heads=32, n_kv_heads=8, norm_eps=1e-5, vocab_size=32000, feed_forward=functools.partial(MixtureFeedForward, 8), jit=False)
  32. model_state_dict = get_state_dict(model)
  33. for k in (t := tqdm(state, disable=CI)):
  34. if 'feed_forward.experts.' in k:
  35. expert_no = int(k.split('feed_forward.experts.')[1].split('.')[0])
  36. device = Device.DEFAULT + ":" + str((expert_no//2)+1)
  37. else:
  38. device = Device.DEFAULT
  39. t.set_description(f"ram used: {GlobalCounters.mem_used/1e9:5.2f} GB, loading {k} to {device}")
  40. model_state_dict[k].replace(state[k].to(device).half()).realize()
  41. if CI: print(f"ram used: {GlobalCounters.mem_used/1e9:5.2f} GB")
  42. from sentencepiece import SentencePieceProcessor
  43. spp = SentencePieceProcessor(model_file=args.weights + "/tokenizer.model")
  44. toks = [spp.bos_id()]
  45. start_pos = 0
  46. for i in range(args.count):
  47. GlobalCounters.reset()
  48. with Profiling(sort="time", frac=0.1, enabled=args.profile):
  49. with Timing("total ", enabled=args.timing, on_exit=lambda x: f", {1e9/x:.2f} tok/sec"):
  50. tok = model(Tensor([toks[start_pos:]]), 0 if start_pos == 0 else Variable("start_pos", 1, 1024).bind(start_pos), args.temperature).item()
  51. toks.append(tok)
  52. start_pos += 1
  53. print(spp.decode(toks))