external_benchmark_bert.py 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102
  1. import unittest, time
  2. from tinygrad import Tensor, TinyJit, GlobalCounters, Device
  3. from tinygrad.helpers import getenv, Context
  4. from tinygrad.nn.optim import LAMB
  5. from tinygrad.nn.state import get_parameters
  6. from tinygrad.engine.realize import run_schedule
  7. from extra.models import bert
  8. bs = getenv("BS", 16)
  9. seq_len = getenv("SEQ_LEN", 512)
  10. class BenchmarkBertTrain(unittest.TestCase):
  11. def _get_layer(self, layer_id):
  12. if not hasattr(self, "model"):
  13. dropout_prob = 0.0 if getenv("DISABLE_DROPOUT") else 0.1
  14. self.model = bert.BertForPretraining(attention_probs_dropout_prob=dropout_prob, hidden_dropout_prob=dropout_prob)
  15. hidden_size = self.model.bert.embeddings.word_embeddings.embed_sz
  16. intermediate_size = self.model.bert.encoder.layer[0].intermediate.dense.weight.shape[0]
  17. layer_map = {
  18. "embedding": self.model.bert.embeddings,
  19. "attention_self": self.model.bert.encoder.layer[0].attention.self,
  20. "attention_output": self.model.bert.encoder.layer[0].attention.output,
  21. "intermediate": self.model.bert.encoder.layer[0].intermediate,
  22. "output": self.model.bert.encoder.layer[0].output
  23. }
  24. input_shapes = {
  25. "embedding": [(bs, seq_len), (bs, seq_len)],
  26. "attention_self": [(bs, seq_len, hidden_size), (bs, 1, 1, seq_len)],
  27. "attention_output": [(bs, seq_len, hidden_size), (bs, seq_len, 1)],
  28. "intermediate": [(bs, seq_len, hidden_size)],
  29. "output": [(bs, seq_len, intermediate_size), (bs, seq_len, 1)]
  30. }.get(layer_id)
  31. return f"{layer_id}-layer, Input: {input_shapes}", layer_map.get(layer_id), input_shapes
  32. def _test_layer(self, name, layer, input_shapes):
  33. optim = LAMB(get_parameters(layer))
  34. with Context(SAVE_SCHEDULE=0): Tensor.realize(*[t.assign(t.detach().contiguous()) for t in get_parameters(optim)])
  35. JITCNT = getenv("JITCNT", 1)
  36. Tensor.training = True
  37. @TinyJit
  38. def step(inputs):
  39. optim.zero_grad()
  40. for i in inputs: i.grad = None
  41. y = layer(*inputs).contiguous().contiguous_backward()
  42. y.sum().backward()
  43. if getenv("ASSIGN", 1): sched, _ = Tensor.schedule_with_vars(y, *list(inputs), *optim.schedule_step())
  44. else: sched, _ = Tensor.schedule_with_vars(y, *list(inputs), *[t.grad for t in optim.params])
  45. for _ in range(JITCNT):
  46. run_schedule(sched)
  47. CNT = getenv("CNT", 5)
  48. best_tm = None
  49. flops, mem_used, mem, kernels = None, None, None, None
  50. for _ in range(CNT):
  51. with Context(SAVE_SCHEDULE=0): inputs = [Tensor.randn(*shape, requires_grad=False).realize() for shape in input_shapes]
  52. GlobalCounters.reset()
  53. st = time.perf_counter()
  54. step(inputs)
  55. Device[Device.DEFAULT].synchronize()
  56. et = time.perf_counter()
  57. flops = GlobalCounters.global_ops / JITCNT
  58. mem_used = GlobalCounters.mem_used
  59. mem = GlobalCounters.global_mem / JITCNT
  60. if kernels is None: kernels = GlobalCounters.kernel_count // JITCNT
  61. tm = (et-st) / JITCNT
  62. if best_tm is None or tm < best_tm: best_tm = tm
  63. print(f"\r{name:70s}: {best_tm * 1000:>9.2f} ms, {flops / 10**12 / best_tm:>6.2f} TFLOPS, {mem / 10**9 / best_tm:>5.0f} GB/s, "
  64. f"{mem_used / 10**9: 6.2f} GB used, {kernels:>5d} kernels")
  65. return best_tm, flops, mem, kernels
  66. def test_embedding_layer(self): self._est(*self._test_layer(*self._get_layer("embedding")), 1)
  67. def test_attention_self_layer(self): self._est(*self._test_layer(*self._get_layer("attention_self")), 24) # Assumes BERT-large
  68. def test_attention_output_layer(self): self._est(*self._test_layer(*self._get_layer("attention_output")), 24)
  69. def test_intermediate_layer(self): self._est(*self._test_layer(*self._get_layer("intermediate")), 24)
  70. def test_output_layer(self): self._est(*self._test_layer(*self._get_layer("output")), 24)
  71. est_tm, est_flops, est_mem, est_kernels = 0, 0, 0, 0
  72. @classmethod
  73. def _est(cls, tm, flops, mem, kernels, mult):
  74. cls.est_tm += tm * mult
  75. cls.est_flops += flops * mult
  76. cls.est_mem += mem * mult
  77. cls.est_kernels += kernels * mult
  78. @classmethod
  79. def tearDownClass(cls):
  80. print(f"\restimated step tm: {cls.est_tm * 1000.0:.2f} ms, {cls.est_flops / 10 ** 12 / cls.est_tm:.3f} tflops, "
  81. f"{cls.est_mem / 10 ** 9 / cls.est_tm:.2f} GB/s, {cls.est_kernels} kernels")
  82. if __name__ == "__main__":
  83. unittest.main()