ganSynthesisImage.py 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184
  1. #!/usr/bin/env python
  2. import random
  3. import argparse
  4. import cv2
  5. import os
  6. import torch
  7. import torch.nn as nn
  8. import torch.optim as optim
  9. from tensorboardX import SummaryWriter
  10. from PIL import Image
  11. import torchvision.utils as vutils
  12. import gym
  13. import gym.spaces
  14. import numpy as np
  15. log = gym.logger
  16. log.set_level(gym.logger.INFO)
  17. LATENT_VECTOR_SIZE = 100
  18. DISCR_FILTERS = 64
  19. GENER_FILTERS = 64
  20. BATCH_SIZE = 16
  21. # dimension input image will be rescaled
  22. IMAGE_SIZE = 64
  23. input_shape = (3, IMAGE_SIZE, IMAGE_SIZE)
  24. LEARNING_RATE = 0.0001
  25. REPORT_EVERY_ITER = 50
  26. SAVE_IMAGE_EVERY_ITER = 200
  27. MAX_ITERATION = 100000
  28. data_folder = 'synthesis_images/generated_blocks'
  29. class Discriminator(nn.Module):
  30. def __init__(self, input_shape):
  31. super(Discriminator, self).__init__()
  32. # this pipe converges image into the single number
  33. self.conv_pipe = nn.Sequential(
  34. nn.Conv2d(in_channels=input_shape[0], out_channels=DISCR_FILTERS,
  35. kernel_size=4, stride=2, padding=1),
  36. nn.ReLU(),
  37. nn.Conv2d(in_channels=DISCR_FILTERS, out_channels=DISCR_FILTERS*2,
  38. kernel_size=4, stride=2, padding=1),
  39. nn.BatchNorm2d(DISCR_FILTERS*2),
  40. nn.ReLU(),
  41. nn.Conv2d(in_channels=DISCR_FILTERS * 2, out_channels=DISCR_FILTERS * 4,
  42. kernel_size=4, stride=2, padding=1),
  43. nn.BatchNorm2d(DISCR_FILTERS * 4),
  44. nn.ReLU(),
  45. nn.Conv2d(in_channels=DISCR_FILTERS * 4, out_channels=DISCR_FILTERS * 8,
  46. kernel_size=4, stride=2, padding=1),
  47. nn.BatchNorm2d(DISCR_FILTERS * 8),
  48. nn.ReLU(),
  49. nn.Conv2d(in_channels=DISCR_FILTERS * 8, out_channels=1,
  50. kernel_size=4, stride=1, padding=0),
  51. nn.Sigmoid()
  52. )
  53. def forward(self, x):
  54. conv_out = self.conv_pipe(x)
  55. return conv_out.view(-1, 1).squeeze(dim=1)
  56. class Generator(nn.Module):
  57. def __init__(self, output_shape):
  58. super(Generator, self).__init__()
  59. # pipe deconvolves input vector into (3, 64, 64) image
  60. self.pipe = nn.Sequential(
  61. nn.ConvTranspose2d(in_channels=LATENT_VECTOR_SIZE, out_channels=GENER_FILTERS * 8,
  62. kernel_size=4, stride=1, padding=0),
  63. nn.BatchNorm2d(GENER_FILTERS * 8),
  64. nn.ReLU(),
  65. nn.ConvTranspose2d(in_channels=GENER_FILTERS * 8, out_channels=GENER_FILTERS * 4,
  66. kernel_size=4, stride=2, padding=1),
  67. nn.BatchNorm2d(GENER_FILTERS * 4),
  68. nn.ReLU(),
  69. nn.ConvTranspose2d(in_channels=GENER_FILTERS * 4, out_channels=GENER_FILTERS * 2,
  70. kernel_size=4, stride=2, padding=1),
  71. nn.BatchNorm2d(GENER_FILTERS * 2),
  72. nn.ReLU(),
  73. nn.ConvTranspose2d(in_channels=GENER_FILTERS * 2, out_channels=GENER_FILTERS,
  74. kernel_size=4, stride=2, padding=1),
  75. nn.BatchNorm2d(GENER_FILTERS),
  76. nn.ReLU(),
  77. nn.ConvTranspose2d(in_channels=GENER_FILTERS, out_channels=output_shape[0],
  78. kernel_size=4, stride=2, padding=1),
  79. nn.Tanh()
  80. )
  81. def forward(self, x):
  82. return self.pipe(x)
  83. # here we have to generate our batches from final or noisy synthesis images
  84. def iterate_batches(batch_size=BATCH_SIZE):
  85. batch = []
  86. images = os.listdir(data_folder)
  87. nb_images = len(images)
  88. while True:
  89. i = random.randint(0, nb_images - 1)
  90. img = Image.open(os.path.join(data_folder, images[i]))
  91. img_arr = np.asarray(img)
  92. new_obs = cv2.resize(img_arr, (IMAGE_SIZE, IMAGE_SIZE))
  93. # transform (210, 160, 3) -> (3, 210, 160)
  94. new_obs = np.moveaxis(new_obs, 2, 0)
  95. batch.append(new_obs)
  96. if len(batch) == batch_size:
  97. # Normalising input between -1 to 1
  98. batch_np = np.array(batch, dtype=np.float32) * 2.0 / 255.0 - 1.0
  99. yield torch.tensor(batch_np)
  100. batch.clear()
  101. if __name__ == "__main__":
  102. parser = argparse.ArgumentParser()
  103. parser.add_argument("--cuda", default=False, action='store_true', help="Enable cuda computation")
  104. args = parser.parse_args()
  105. device = torch.device("cuda" if args.cuda else "cpu")
  106. #envs = [InputWrapper(gym.make(name)) for name in ('Breakout-v0', 'AirRaid-v0', 'Pong-v0')]
  107. print(input_shape)
  108. net_discr = Discriminator(input_shape=input_shape).to(device)
  109. net_gener = Generator(output_shape=input_shape).to(device)
  110. print(net_gener)
  111. objective = nn.BCELoss()
  112. gen_optimizer = optim.Adam(params=net_gener.parameters(), lr=LEARNING_RATE, betas=(0.5, 0.999))
  113. dis_optimizer = optim.Adam(params=net_discr.parameters(), lr=LEARNING_RATE, betas=(0.5, 0.999))
  114. writer = SummaryWriter()
  115. gen_losses = []
  116. dis_losses = []
  117. iter_no = 0
  118. true_labels_v = torch.ones(BATCH_SIZE, dtype=torch.float32, device=device)
  119. fake_labels_v = torch.zeros(BATCH_SIZE, dtype=torch.float32, device=device)
  120. for batch_v in iterate_batches():
  121. # generate extra fake samples, input is 4D: batch, filters, x, y
  122. gen_input_v = torch.FloatTensor(BATCH_SIZE, LATENT_VECTOR_SIZE, 1, 1).normal_(0, 1).to(device)
  123. # There we get data
  124. batch_v = batch_v.to(device)
  125. gen_output_v = net_gener(gen_input_v)
  126. # train discriminator
  127. dis_optimizer.zero_grad()
  128. dis_output_true_v = net_discr(batch_v)
  129. dis_output_fake_v = net_discr(gen_output_v.detach())
  130. dis_loss = objective(dis_output_true_v, true_labels_v) + objective(dis_output_fake_v, fake_labels_v)
  131. dis_loss.backward()
  132. dis_optimizer.step()
  133. dis_losses.append(dis_loss.item())
  134. # train generator
  135. gen_optimizer.zero_grad()
  136. dis_output_v = net_discr(gen_output_v)
  137. gen_loss_v = objective(dis_output_v, true_labels_v)
  138. gen_loss_v.backward()
  139. gen_optimizer.step()
  140. gen_losses.append(gen_loss_v.item())
  141. iter_no += 1
  142. if iter_no % REPORT_EVERY_ITER == 0:
  143. log.info("Iter %d: gen_loss=%.3e, dis_loss=%.3e", iter_no, np.mean(gen_losses), np.mean(dis_losses))
  144. writer.add_scalar("gen_loss", np.mean(gen_losses), iter_no)
  145. writer.add_scalar("dis_loss", np.mean(dis_losses), iter_no)
  146. gen_losses = []
  147. dis_losses = []
  148. if iter_no % SAVE_IMAGE_EVERY_ITER == 0:
  149. writer.add_image("fake", vutils.make_grid(gen_output_v.data[:IMAGE_SIZE], normalize=True), iter_no)
  150. writer.add_image("real", vutils.make_grid(batch_v.data[:IMAGE_SIZE], normalize=True), iter_no)