ganSynthesisImage_100.py 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297
  1. #!/usr/bin/env python
  2. import random
  3. import argparse
  4. import cv2
  5. import os, sys, getopt
  6. import torch
  7. import torch.nn as nn
  8. import torch.optim as optim
  9. from tensorboardX import SummaryWriter
  10. from PIL import Image
  11. import torchvision.utils as vutils
  12. import gym
  13. import gym.spaces
  14. import numpy as np
  15. log = gym.logger
  16. log.set_level(gym.logger.INFO)
  17. LATENT_VECTOR_SIZE = 200
  18. DISCR_FILTERS = 100
  19. GENER_FILTERS = 100
  20. BATCH_SIZE = 16
  21. # dimension input image will be rescaled
  22. IMAGE_SIZE = 100
  23. input_shape = (3, IMAGE_SIZE, IMAGE_SIZE)
  24. BACKUP_MODEL_NAME = "synthesis_{}_model.pt"
  25. BACKUP_FOLDER = "saved_models"
  26. BACKUP_EVERY_ITER = 1
  27. LEARNING_RATE = 0.0001
  28. REPORT_EVERY_ITER = 10
  29. SAVE_IMAGE_EVERY_ITER = 20
  30. MAX_ITERATION = 100000
  31. data_folder = 'synthesis_images/generated_blocks'
  32. class Discriminator(nn.Module):
  33. def __init__(self, input_shape):
  34. super(Discriminator, self).__init__()
  35. # this pipe converges image into the single number
  36. self.conv_pipe = nn.Sequential(
  37. nn.Conv2d(in_channels=input_shape[0], out_channels=DISCR_FILTERS,
  38. kernel_size=4, stride=2, padding=1),
  39. nn.ReLU(),
  40. nn.Conv2d(in_channels=DISCR_FILTERS, out_channels=DISCR_FILTERS*2,
  41. kernel_size=4, stride=2, padding=1),
  42. nn.BatchNorm2d(DISCR_FILTERS*2),
  43. nn.ReLU(),
  44. nn.Conv2d(in_channels=DISCR_FILTERS * 2, out_channels=DISCR_FILTERS * 4,
  45. kernel_size=4, stride=2, padding=1),
  46. nn.BatchNorm2d(DISCR_FILTERS * 4),
  47. nn.ReLU(),
  48. nn.Conv2d(in_channels=DISCR_FILTERS * 4, out_channels=DISCR_FILTERS * 8,
  49. kernel_size=5, stride=2, padding=1),
  50. nn.BatchNorm2d(DISCR_FILTERS * 8),
  51. nn.ReLU(),
  52. nn.Conv2d(in_channels=DISCR_FILTERS * 8, out_channels=1,
  53. kernel_size=5, stride=1, padding=0),
  54. nn.Sigmoid()
  55. )
  56. def forward(self, x):
  57. print("Discriminator output")
  58. print("Shape : ", x.shape)
  59. conv_out = self.conv_pipe(x)
  60. print(conv_out.shape)
  61. print(conv_out.view(-1, 1).squeeze(dim=1).shape)
  62. return conv_out.view(-1, 1).squeeze(dim=1)
  63. class Generator(nn.Module):
  64. def __init__(self, output_shape):
  65. super(Generator, self).__init__()
  66. # pipe deconvolves input vector into (3, 100, 100) image
  67. self.pipe = nn.Sequential(
  68. nn.ConvTranspose2d(in_channels=LATENT_VECTOR_SIZE, out_channels=GENER_FILTERS * 8,
  69. kernel_size=6, stride=1, padding=0),
  70. nn.BatchNorm2d(GENER_FILTERS * 8),
  71. nn.ReLU(),
  72. nn.ConvTranspose2d(in_channels=GENER_FILTERS * 8, out_channels=GENER_FILTERS * 4,
  73. kernel_size=4, stride=2, padding=1),
  74. nn.BatchNorm2d(GENER_FILTERS * 4),
  75. nn.ReLU(),
  76. nn.ConvTranspose2d(in_channels=GENER_FILTERS * 4, out_channels=GENER_FILTERS * 2,
  77. kernel_size=4, stride=2, padding=1),
  78. nn.BatchNorm2d(GENER_FILTERS * 2),
  79. nn.ReLU(),
  80. nn.ConvTranspose2d(in_channels=GENER_FILTERS * 2, out_channels=GENER_FILTERS,
  81. kernel_size=4, stride=2, padding=0),
  82. nn.BatchNorm2d(GENER_FILTERS),
  83. nn.ReLU(),
  84. nn.ConvTranspose2d(in_channels=GENER_FILTERS, out_channels=output_shape[0],
  85. kernel_size=4, stride=2, padding=1),
  86. nn.Tanh()
  87. )
  88. def forward(self, x):
  89. print("Generator output")
  90. print(self.pipe(x).shape)
  91. return self.pipe(x)
  92. # here we have to generate our batches from final or noisy synthesis images
  93. def iterate_batches(batch_size=BATCH_SIZE):
  94. batch = []
  95. images = os.listdir(data_folder)
  96. nb_images = len(images)
  97. while True:
  98. i = random.randint(0, nb_images - 1)
  99. img = Image.open(os.path.join(data_folder, images[i]))
  100. img_arr = np.asarray(img)
  101. new_obs = cv2.resize(img_arr, (IMAGE_SIZE, IMAGE_SIZE))
  102. # transform (210, 160, 3) -> (3, 210, 160)
  103. new_obs = np.moveaxis(new_obs, 2, 0)
  104. batch.append(new_obs)
  105. if len(batch) == batch_size:
  106. # Normalising input between -1 to 1
  107. batch_np = np.array(batch, dtype=np.float32) * 2.0 / 255.0 - 1.0
  108. yield torch.tensor(batch_np)
  109. batch.clear()
  110. if __name__ == "__main__":
  111. save_model = False
  112. load_model = False
  113. p_cuda = False
  114. #parser = argparse.ArgumentParser()
  115. #parser.add_argument("--cuda", default=False, action='store_true', help="Enable cuda computation")
  116. #args = parser.parse_args()
  117. try:
  118. opts, args = getopt.getopt(sys.argv[1:], "hflc", ["help=", "folder=", "load=", "cuda="])
  119. except getopt.GetoptError:
  120. # print help information and exit:
  121. print('python ganSynthesisImage_100.py --folder folder_name_to_save --cuda 1')
  122. print('python ganSynthesisImage_100.py --load model_name_to_load ')
  123. sys.exit(2)
  124. for o, a in opts:
  125. if o in ("-h", "--help"):
  126. print('python ganSynthesisImage_100.py --folder folder_name_to_save --cuda 1')
  127. print('python ganSynthesisImage_100.py --load folder_name_to_load ')
  128. sys.exit()
  129. elif o in ("-f", "--folder"):
  130. p_model_folder = a
  131. save_model = True
  132. elif o in ("-l", "--load"):
  133. p_load = a
  134. load_model = True
  135. elif o in ("-c", "--cuda"):
  136. p_cuda = int(a)
  137. else:
  138. assert False, "unhandled option"
  139. if save_model and load_model:
  140. raise Exception("Cannot save and load model. One argurment in only required.")
  141. if not save_model and not load_model:
  142. print('python ganSynthesisImage_100.py --folder folder_name_to_save --cuda 1')
  143. print('python ganSynthesisImage_100.py --load folder_name_to_load ')
  144. print("Need at least one argurment.")
  145. sys.exit(2)
  146. device = torch.device("cuda" if p_cuda else "cpu")
  147. #envs = [InputWrapper(gym.make(name)) for name in ('Breakout-v0', 'AirRaid-v0', 'Pong-v0')]
  148. # prepare folder names to save models
  149. if save_model:
  150. models_folder_path = os.path.join(BACKUP_FOLDER, p_model_folder)
  151. dis_model_path = os.path.join(models_folder_path, BACKUP_MODEL_NAME.format('disc'))
  152. gen_model_path = os.path.join(models_folder_path, BACKUP_MODEL_NAME.format('gen'))
  153. if load_model:
  154. models_folder_path = os.path.join(BACKUP_FOLDER, p_load)
  155. dis_model_path = os.path.join(models_folder_path, BACKUP_MODEL_NAME.format('disc'))
  156. gen_model_path = os.path.join(models_folder_path, BACKUP_MODEL_NAME.format('gen'))
  157. # Construct model
  158. net_discr = Discriminator(input_shape=input_shape).to(device)
  159. net_gener = Generator(output_shape=input_shape).to(device)
  160. print(net_discr)
  161. print(net_gener)
  162. objective = nn.BCELoss()
  163. gen_optimizer = optim.Adam(params=net_gener.parameters(), lr=LEARNING_RATE, betas=(0.5, 0.999))
  164. dis_optimizer = optim.Adam(params=net_discr.parameters(), lr=LEARNING_RATE, betas=(0.5, 0.999))
  165. writer = SummaryWriter()
  166. gen_losses = []
  167. dis_losses = []
  168. iter_no = 0
  169. true_labels_v = torch.ones(BATCH_SIZE, dtype=torch.float32, device=device)
  170. fake_labels_v = torch.zeros(BATCH_SIZE, dtype=torch.float32, device=device)
  171. # load models checkpoint if exists
  172. if load_model:
  173. gen_checkpoint = torch.load(gen_model_path)
  174. net_gener.load_state_dict(gen_checkpoint['model_state_dict'])
  175. gen_optimizer.load_state_dict(gen_checkpoint['optimizer_state_dict'])
  176. gen_losses = gen_checkpoint['gen_losses']
  177. iteration = gen_checkpoint['iteration'] # retrieve only from the gen net the iteration number
  178. dis_checkpoint = torch.load(dis_model_path)
  179. net_discr.load_state_dict(dis_checkpoint['model_state_dict'])
  180. dis_optimizer.load_state_dict(dis_checkpoint['optimizer_state_dict'])
  181. dis_losses = dis_checkpoint['dis_losses']
  182. iter_no = iteration
  183. for batch_v in iterate_batches():
  184. # generate extra fake samples, input is 4D: batch, filters, x, y
  185. gen_input_v = torch.FloatTensor(BATCH_SIZE, LATENT_VECTOR_SIZE, 1, 1).normal_(0, 1).to(device)
  186. # There we get data
  187. batch_v = batch_v.to(device)
  188. gen_output_v = net_gener(gen_input_v)
  189. # train discriminator
  190. dis_optimizer.zero_grad()
  191. print("True disc")
  192. dis_output_true_v = net_discr(batch_v)
  193. print("Fake disc")
  194. dis_output_fake_v = net_discr(gen_output_v.detach())
  195. dis_loss = objective(dis_output_true_v, true_labels_v) + objective(dis_output_fake_v, fake_labels_v)
  196. dis_loss.backward()
  197. dis_optimizer.step()
  198. dis_losses.append(dis_loss.item())
  199. # train generator
  200. gen_optimizer.zero_grad()
  201. dis_output_v = net_discr(gen_output_v)
  202. gen_loss_v = objective(dis_output_v, true_labels_v)
  203. gen_loss_v.backward()
  204. gen_optimizer.step()
  205. gen_losses.append(gen_loss_v.item())
  206. iter_no += 1
  207. print("Iteration : ", iter_no)
  208. if iter_no % REPORT_EVERY_ITER == 0:
  209. log.info("Iter %d: gen_loss=%.3e, dis_loss=%.3e", iter_no, np.mean(gen_losses), np.mean(dis_losses))
  210. writer.add_scalar("gen_loss", np.mean(gen_losses), iter_no)
  211. writer.add_scalar("dis_loss", np.mean(dis_losses), iter_no)
  212. gen_losses = []
  213. dis_losses = []
  214. if iter_no % SAVE_IMAGE_EVERY_ITER == 0:
  215. writer.add_image("fake", vutils.make_grid(gen_output_v.data[:IMAGE_SIZE], normalize=True), iter_no)
  216. writer.add_image("real", vutils.make_grid(batch_v.data[:IMAGE_SIZE], normalize=True), iter_no)
  217. if iter_no % BACKUP_EVERY_ITER == 0:
  218. if not os.path.exists(models_folder_path):
  219. os.makedirs(models_folder_path)
  220. torch.save({
  221. 'iteration': iter_no,
  222. 'model_state_dict': net_gener.state_dict(),
  223. 'optimizer_state_dict': gen_optimizer.state_dict(),
  224. 'gen_losses': gen_losses
  225. }, gen_model_path)
  226. torch.save({
  227. 'iteration': iter_no,
  228. 'model_state_dict': net_discr.state_dict(),
  229. 'optimizer_state_dict': dis_optimizer.state_dict(),
  230. 'dis_losses': dis_losses
  231. }, dis_model_path)