predict_noisy_image.py 3.3 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798
  1. # main imports
  2. import sys, os, argparse, json
  3. import numpy as np
  4. # image processing imports
  5. from ipfml import processing, utils
  6. from PIL import Image
  7. # model imports
  8. from sklearn.externals import joblib
  9. from keras.models import model_from_json
  10. from keras import backend as K
  11. # modules imports
  12. sys.path.insert(0, '') # trick to enable import of main folder module
  13. import custom_config as cfg
  14. from modules.utils import data as dt
  15. from modules.classes.Transformation import Transformation
  16. # parameters from config
  17. path = cfg.dataset_path
  18. min_max_ext = cfg.min_max_filename_extension
  19. features_choices = cfg.features_choices_labels
  20. custom_min_max_folder = cfg.min_max_custom_folder
  21. def main():
  22. # getting all params
  23. parser = argparse.ArgumentParser(description="Script which detects if an image is noisy or not using specific model")
  24. parser.add_argument('--image', type=str, help='Image path')
  25. parser.add_argument('--features', type=str,
  26. help="list of features choice in order to compute data",
  27. default='svd_reconstruction, ipca_reconstruction',
  28. required=True)
  29. parser.add_argument('--params', type=str,
  30. help="list of specific param for each feature choice (See README.md for further information in 3D mode)",
  31. default='100, 200 :: 50, 25',
  32. required=True)
  33. parser.add_argument('--size', type=str, help="Expected output size before processing transformation", default="100,100")
  34. parser.add_argument('--model', type=str, help='.json file of keras model')
  35. args = parser.parse_args()
  36. p_img_file = args.image
  37. p_features = list(map(str.strip, args.features.split(',')))
  38. p_params = list(map(str.strip, args.params.split('::')))
  39. p_size = args.size
  40. p_model_file = args.model
  41. with open(p_model_file, 'r') as f:
  42. json_model = json.load(f)
  43. model = model_from_json(json_model)
  44. model.load_weights(p_model_file.replace('.json', '.h5'))
  45. model.compile(loss='binary_crossentropy',
  46. optimizer='rmsprop',
  47. features=['accuracy'])
  48. # load image
  49. img = Image.open(p_img_file)
  50. transformations = []
  51. for id, feature in enumerate(p_features):
  52. if feature not in features_choices:
  53. raise ValueError("Unknown feature, please select a correct feature : ", features_choices)
  54. transformations.append(Transformation(feature, p_params[id], p_size))
  55. # getting transformed image
  56. transformed_images = []
  57. for transformation in transformations:
  58. transformed_images.append(transformation.getTransformedImage(img))
  59. data = np.array(transformed_images)
  60. # specify the number of dimensions
  61. img_width, img_height = cfg.sub_image_size
  62. n_channels = len(transformations)
  63. if K.image_data_format() == 'channels_first':
  64. input_shape = (n_channels, img_width, img_height)
  65. else:
  66. input_shape = (img_width, img_height, n_channels)
  67. prediction = model.predict_classes([data])[0][0]
  68. # output expected from others scripts
  69. print(prediction)
  70. if __name__== "__main__":
  71. main()