test_lazybuffer.py 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117
  1. #!/usr/bin/env python
  2. import numpy as np
  3. import unittest
  4. from tinygrad import Tensor, Device, dtypes
  5. from tinygrad.lazy import LazyBuffer, ReduceOps, MetaOps
  6. from tinygrad.engine.schedule import create_schedule
  7. class TestLazyBuffer(unittest.TestCase):
  8. def test_fromcpu_shape_tracker(self):
  9. def helper(a: np.ndarray):
  10. print(a.shape, a.strides, a.flags.c_contiguous)
  11. b = Tensor(a).lazydata
  12. #assert b.st.contiguous == a.flags.c_contiguous
  13. assert b.st.shape == a.shape
  14. np.testing.assert_equal(a, Tensor(b).numpy())
  15. for ndims in range(1, 4):
  16. a = np.random.randn(*(4,)*ndims).astype(np.float32)
  17. for stride in [-2, 1, 2]:
  18. for start in [0, 1]:
  19. helper(a[(slice(start, None, stride),)*ndims])
  20. def test_shuffle_pad_ops_cmpeq(self):
  21. y = Tensor([1]).cat(Tensor([1]) == 0).numpy()
  22. z = Tensor([1, 0]).numpy()
  23. np.testing.assert_allclose(y, z)
  24. def test_shuffle_pad_ops_div(self):
  25. y = Tensor([1]).cat(Tensor([1]).div(Tensor([2.0]))).numpy()
  26. z = Tensor([1, 0.5]).numpy()
  27. np.testing.assert_allclose(y, z)
  28. def test_shuffle_pad_ops_log(self):
  29. y = Tensor([1]).cat(Tensor([1]).log()).numpy()
  30. z = Tensor([1, 0]).numpy()
  31. np.testing.assert_allclose(y, z)
  32. def test_shuffle_pad_ops_exp(self):
  33. y = Tensor([1]).cat(Tensor([1]).exp()).numpy()
  34. z = Tensor([1, np.e]).numpy()
  35. np.testing.assert_allclose(y, z)
  36. def test_device_0_is_the_same_device(self):
  37. a = Tensor([1, 2, 3], f"{Device.DEFAULT}")
  38. b = Tensor([1, 2, 3], f"{Device.DEFAULT}:0")
  39. assert a.device == b.device
  40. def test_shrink_const_into_zero(self):
  41. # regression test to make sure the shapetracker is preserved
  42. a = Tensor.zeros(4,4,4).shrink((None, (0,0), None))
  43. b = Tensor.zeros(4,1,4)
  44. c = a.cat(b, dim=1)
  45. np.testing.assert_allclose(c.numpy(), np.concatenate((a.numpy(), b.numpy()), axis=1))
  46. def test_shrink_const_then_cast(self):
  47. # regression test to make sure the shapetracker is preserved
  48. a = Tensor.zeros(4,4,4).shrink((None, (0,0), None)).cast(dtypes.int32)
  49. b = Tensor.zeros(4,1,4)
  50. c = a.cat(b, dim=1)
  51. np.testing.assert_allclose(c.numpy(), np.concatenate((a.numpy(), b.numpy()), axis=1))
  52. def test_const_dtype(self):
  53. lb: LazyBuffer = Tensor([1], dtype=dtypes.int).lazydata
  54. assert lb.const(1).base.arg == 1
  55. assert type(lb.const(1).base.arg) is int
  56. lb: LazyBuffer = Tensor([1], dtype=dtypes.float).lazydata
  57. assert lb.const(1).base.arg == 1.0
  58. assert type(lb.const(1).base.arg) is float
  59. class TestReduceOp(unittest.TestCase):
  60. def test_no_split_reduce_kernel(self):
  61. a = Tensor.rand(4, 4).realize()
  62. a = a.sum()
  63. sched = create_schedule([a.lazydata])
  64. assert len(sched) == 1
  65. assert sched[0].ast.src[0].src[0].op is ReduceOps.SUM
  66. def test_split_reduce_kernel_dim0(self):
  67. a = Tensor.rand(256, 255).realize()
  68. a = a.sum()
  69. sched = create_schedule([a.lazydata])
  70. assert len(sched) == 2
  71. for s in sched:
  72. assert s.ast.src[0].src[0].op is ReduceOps.SUM
  73. def test_split_reduce_kernel_dim1(self):
  74. a = Tensor.rand(255, 256).realize()
  75. a = a.sum()
  76. sched = create_schedule([a.lazydata])
  77. assert len(sched) == 2
  78. for s in sched:
  79. assert s.ast.src[0].src[0].op is ReduceOps.SUM
  80. class TestView(unittest.TestCase):
  81. def test_all_masked_out(self):
  82. # start with non CONST MetaOps
  83. a = Tensor.rand(10, 10)
  84. assert a.lazydata.base.op is not MetaOps.CONST
  85. # all masked out, degrades to const 0
  86. b = a.pad(((0, 10), None))[10:]
  87. assert b.shape == (10, 10)
  88. assert b.lazydata.base.op is MetaOps.CONST and b.lazydata.base.arg == 0
  89. # mask out dim = 1 works too
  90. b = a.pad((None, (0, 10)))[:, 10:]
  91. assert b.shape == (10, 10)
  92. assert b.lazydata.base.op is MetaOps.CONST and b.lazydata.base.arg == 0
  93. # partial masked out does not degrade into CONST
  94. b = a.pad(((0, 5), None))[5:]
  95. assert b.shape == (10, 10)
  96. assert b.lazydata.base.op is not MetaOps.CONST
  97. if __name__ == "__main__":
  98. unittest.main()