test_conv.py 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150
  1. import unittest
  2. import numpy as np
  3. from tinygrad.tensor import Tensor
  4. from tinygrad.helpers import Context
  5. class TestConv(unittest.TestCase):
  6. def test_simple(self):
  7. x = Tensor.ones(1,12,128,256).contiguous().realize()
  8. w = Tensor.ones(32,12,3,3).contiguous().realize()
  9. ret = x.conv2d(w, stride=(2,2), padding=(1,1)).numpy()
  10. # it's not 108 around the padding
  11. assert (ret[:, :, 1:-1, 1:-1] == 108).all()
  12. assert ret[0,0,0,0] == 48
  13. assert ret[0,0,0,1] == 72
  14. def test_simple_rand(self):
  15. x = Tensor.rand(1,12,128,256)
  16. w = Tensor.rand(32,12,3,3)
  17. x.conv2d(w, stride=(2,2), padding=(1,1)).numpy()
  18. def test_many_simple(self):
  19. x = Tensor(np.arange(8*2*8).reshape(1,8,2,8).astype(np.float32))
  20. #w = Tensor(np.arange(8*8*1*1).reshape(8,8,1,1).astype(np.float32))
  21. w = Tensor.eye(8).reshape((8,8,1,1))
  22. ret = x.conv2d(w, stride=(1,2), padding=(0,0)).numpy()
  23. print(ret)
  24. def test_lazycache(self):
  25. Tensor.no_grad = True
  26. x = Tensor.rand(1, 32)
  27. y = Tensor.rand(32)
  28. out = x + y.reshape((1,32,1)).reshape((1,32)) + y.reshape((1,32,1)).reshape((1,32))
  29. out.numpy()
  30. Tensor.no_grad = False
  31. def test_simple_biased(self):
  32. C = 8
  33. x = Tensor.rand(1,C,5,5)
  34. w = Tensor.eye(C).reshape((C,C,1,1))
  35. b = Tensor(np.arange(C).astype(np.float32))
  36. ret = Tensor.conv2d(x,w,b).relu().conv2d(w,b)
  37. print(ret.numpy())
  38. def test_two_binops_no_rerun_small(self):
  39. Tensor.no_grad = True
  40. x = Tensor.rand(1,1,32,32)
  41. w = Tensor.rand(1,1,3,3)
  42. out = x.conv2d(w, padding=(1,1))
  43. np.testing.assert_allclose(out.relu().numpy(), np.maximum(out.numpy(), 0))
  44. Tensor.no_grad = False
  45. def test_two_binops_no_rerun(self):
  46. Tensor.no_grad = True
  47. x = Tensor.randn(1,12,128,256)
  48. w = Tensor.randn(32,12,3,3)
  49. out = x.conv2d(w, stride=(2,2), padding=(1,1))
  50. r1, r2 = out.relu(), (out-1)
  51. np.testing.assert_allclose(r1.numpy(), np.maximum(out.numpy(), 0))
  52. np.testing.assert_allclose(r2.numpy(), out.numpy() - 1)
  53. Tensor.no_grad = False
  54. def test_two_overlapping_binops_no_rerun(self):
  55. Tensor.no_grad = True
  56. x = Tensor.randn(1,12,128,256)
  57. w = Tensor.randn(32,12,3,3)
  58. out = x.conv2d(w, stride=(2,2), padding=(1,1))
  59. r1, r2 = out.relu(), out.elu()
  60. np.testing.assert_allclose(r1.numpy(), np.maximum(out.numpy(), 0))
  61. np.testing.assert_allclose(r2.numpy(), np.where(out.numpy() > 0, out.numpy(), (np.exp(out.numpy()) - 1)), atol=1e-5)
  62. Tensor.no_grad = False
  63. def test_two_overlapping_binops_no_rerun_wino(self):
  64. Tensor.no_grad = True
  65. with Context(WINO=1):
  66. x = Tensor.randn(1,4,16,16)
  67. w = Tensor.randn(6,4,3,3)
  68. out = x.conv2d(w, padding=(1,1))
  69. r1, r2 = out.relu(), out.elu()
  70. np.testing.assert_allclose(r1.numpy(), np.maximum(out.numpy(), 0))
  71. np.testing.assert_allclose(r2.numpy(), np.where(out.numpy() > 0, out.numpy(), (np.exp(out.numpy()) - 1)), atol=1e-5)
  72. Tensor.no_grad = False
  73. def test_first_three(self):
  74. Tensor.no_grad = True
  75. x = Tensor.rand(1,12,128,256)
  76. w = Tensor.rand(32,12,3,3)
  77. x = x.conv2d(w, stride=(2,2), padding=(1,1)).elu()
  78. w = Tensor.rand(32,1,3,3)
  79. x = x.conv2d(w, padding=(1,1), groups=32).elu()
  80. w = Tensor.rand(16,32,1,1)
  81. x = x.conv2d(w).elu()
  82. x = x.numpy()
  83. print(x.shape)
  84. Tensor.no_grad = False
  85. def test_elu(self):
  86. Tensor.no_grad = True
  87. x = Tensor.rand(1,12,128,256)
  88. w = Tensor.rand(32,12,3,3)
  89. x = x.conv2d(w, stride=(2,2), padding=(1,1))
  90. x = x.elu()
  91. w = Tensor.rand(32,1,3,3)
  92. x = x.conv2d(w, padding=(1,1), groups=32)
  93. x.numpy()
  94. Tensor.no_grad = False
  95. def test_reduce_relu(self):
  96. Tensor.no_grad = True
  97. x = Tensor.rand(1,12,128,256)
  98. x = x.sum(keepdim=True).relu()
  99. x.numpy()
  100. Tensor.no_grad = False
  101. def test_bias(self):
  102. Tensor.no_grad = True
  103. from tinygrad.nn import Conv2d
  104. x = Tensor.rand(1,12,128,256)
  105. c = Conv2d(12, 32, 3)
  106. x = c(x).relu()
  107. w = Tensor.uniform(32, 1, 3, 3)
  108. x = x.conv2d(w, groups=32)
  109. x.numpy()
  110. Tensor.no_grad = False
  111. def test_multiadd(self):
  112. w = Tensor.rand(32)
  113. x = Tensor.rand(32).relu()
  114. (w+x).numpy()
  115. def test_reorder(self):
  116. x = Tensor.rand(1,12,128,256)
  117. w = Tensor.rand(12,12,3,3)
  118. x = x.conv2d(w, padding=(1,1))
  119. print(x.shape)
  120. x = x.reshape((1, 12, 256, 128))
  121. x += 1
  122. x += 1
  123. x = x.reshape((1, 12, 128, 256))
  124. x.numpy()
  125. if __name__ == '__main__':
  126. unittest.main()