mnist_gan.py 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107
  1. from pathlib import Path
  2. import numpy as np
  3. import torch
  4. from torchvision.utils import make_grid, save_image
  5. from tinygrad.nn.state import get_parameters
  6. from tinygrad.tensor import Tensor
  7. from tinygrad.helpers import trange
  8. from tinygrad.nn import optim
  9. from extra.datasets import fetch_mnist
  10. class LinearGen:
  11. def __init__(self):
  12. self.l1 = Tensor.scaled_uniform(128, 256)
  13. self.l2 = Tensor.scaled_uniform(256, 512)
  14. self.l3 = Tensor.scaled_uniform(512, 1024)
  15. self.l4 = Tensor.scaled_uniform(1024, 784)
  16. def forward(self, x):
  17. x = x.dot(self.l1).leakyrelu(0.2)
  18. x = x.dot(self.l2).leakyrelu(0.2)
  19. x = x.dot(self.l3).leakyrelu(0.2)
  20. x = x.dot(self.l4).tanh()
  21. return x
  22. class LinearDisc:
  23. def __init__(self):
  24. self.l1 = Tensor.scaled_uniform(784, 1024)
  25. self.l2 = Tensor.scaled_uniform(1024, 512)
  26. self.l3 = Tensor.scaled_uniform(512, 256)
  27. self.l4 = Tensor.scaled_uniform(256, 2)
  28. def forward(self, x):
  29. # balance the discriminator inputs with const bias (.add(1))
  30. x = x.dot(self.l1).add(1).leakyrelu(0.2).dropout(0.3)
  31. x = x.dot(self.l2).leakyrelu(0.2).dropout(0.3)
  32. x = x.dot(self.l3).leakyrelu(0.2).dropout(0.3)
  33. x = x.dot(self.l4).log_softmax()
  34. return x
  35. def make_batch(images):
  36. sample = np.random.randint(0, len(images), size=(batch_size))
  37. image_b = images[sample].reshape(-1, 28*28).astype(np.float32) / 127.5 - 1.0
  38. return Tensor(image_b)
  39. def make_labels(bs, col, val=-2.0):
  40. y = np.zeros((bs, 2), np.float32)
  41. y[range(bs), [col] * bs] = val # Can we do label smoothing? i.e -2.0 changed to -1.98789.
  42. return Tensor(y)
  43. def train_discriminator(optimizer, data_real, data_fake):
  44. real_labels = make_labels(batch_size, 1)
  45. fake_labels = make_labels(batch_size, 0)
  46. optimizer.zero_grad()
  47. output_real = discriminator.forward(data_real)
  48. output_fake = discriminator.forward(data_fake)
  49. loss_real = (output_real * real_labels).mean()
  50. loss_fake = (output_fake * fake_labels).mean()
  51. loss_real.backward()
  52. loss_fake.backward()
  53. optimizer.step()
  54. return (loss_real + loss_fake).numpy()
  55. def train_generator(optimizer, data_fake):
  56. real_labels = make_labels(batch_size, 1)
  57. optimizer.zero_grad()
  58. output = discriminator.forward(data_fake)
  59. loss = (output * real_labels).mean()
  60. loss.backward()
  61. optimizer.step()
  62. return loss.numpy()
  63. if __name__ == "__main__":
  64. # data for training and validation
  65. images_real = np.vstack(fetch_mnist()[::2])
  66. ds_noise = Tensor.randn(64, 128, requires_grad=False)
  67. # parameters
  68. epochs, batch_size, k = 300, 512, 1
  69. sample_interval = epochs // 10
  70. n_steps = len(images_real) // batch_size
  71. # models and optimizer
  72. generator = LinearGen()
  73. discriminator = LinearDisc()
  74. # path to store results
  75. output_dir = Path(".").resolve() / "outputs"
  76. output_dir.mkdir(exist_ok=True)
  77. # optimizers
  78. optim_g = optim.Adam(get_parameters(generator),lr=0.0002, b1=0.5) # 0.0002 for equilibrium!
  79. optim_d = optim.Adam(get_parameters(discriminator),lr=0.0002, b1=0.5)
  80. # training loop
  81. Tensor.training = True
  82. for epoch in (t := trange(epochs)):
  83. loss_g, loss_d = 0.0, 0.0
  84. for _ in range(n_steps):
  85. data_real = make_batch(images_real)
  86. for step in range(k): # Try with k = 5 or 7.
  87. noise = Tensor.randn(batch_size, 128)
  88. data_fake = generator.forward(noise).detach()
  89. loss_d += train_discriminator(optim_d, data_real, data_fake)
  90. noise = Tensor.randn(batch_size, 128)
  91. data_fake = generator.forward(noise)
  92. loss_g += train_generator(optim_g, data_fake)
  93. if (epoch + 1) % sample_interval == 0:
  94. fake_images = generator.forward(ds_noise).detach().numpy()
  95. fake_images = (fake_images.reshape(-1, 1, 28, 28) + 1) / 2 # 0 - 1 range.
  96. save_image(make_grid(torch.tensor(fake_images)), output_dir / f"image_{epoch+1}.jpg")
  97. t.set_description(f"Generator loss: {loss_g/n_steps}, Discriminator loss: {loss_d/n_steps}")
  98. print("Training Completed!")