-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathloader.py
125 lines (104 loc) · 6.09 KB
/
loader.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
# -*- coding: utf-8 -*-
import cv2
import h5py
import random
import numpy as np
class loader(object):
@classmethod
def get_data_training(self, original_image_path, ground_truth_path, border_mask_path,
patch_height, patch_width, num_patch, inside_mask=False):
original_images = self.load_hdf5(original_image_path) # shape = (-1, 584, 565, 3)
ground_truths = self.load_hdf5(ground_truth_path) # shape = (-1, 584, 565, 1)
masks = self.load_hdf5(border_mask_path) # shape = (-1, 584, 565, 1)
# 1\ Processing
processed_images = self.preprocess(original_images) # shape = (-1, 584, 565, 1)
ground_truths = ground_truths # shape = (-1, 584, 565, 1)
masks = masks # shape = (-1, 584, 565, 1)
# 2\ Random generate patches.
num_patch_per_img = int(num_patch / processed_images.shape[0])
processed_image_patches, ground_truth_patches = [], []
for i in range(processed_images.shape[0]):
for k in range(num_patch_per_img):
y_ = random.randint(0, 584 - patch_height -1)
x_ = random.randint(0, 565 - patch_width -1)
if inside_mask:
if masks[i, y_, x_, 0]==0 or masks[i, y_+patch_height, x_+patch_width, 0]==0 or \
masks[i, y_+patch_height, x_, 0]==0 or masks[i, y_, x_+patch_width, 0]==0:
continue
processed_image_patch = processed_images[i, y_:y_ + patch_height, x_:x_ + patch_width, :]
ground_truth_patch = ground_truths[i, y_:y_ + patch_height, x_:x_ + patch_width, :]
processed_image_patches.append(processed_image_patch)
ground_truth_patches.append(ground_truth_patch)
return np.array(processed_image_patches), np.array(ground_truth_patches)
@classmethod
def get_data_testing(self, original_image_path, patch_height, patch_width):
original_images = self.load_hdf5(original_image_path) # shape = (-1, 584, 565, 3)
# 1\ Processing
processed_images = self.preprocess(original_images) # shape = (-1, 584, 565, 1)
# 2\ Paint Border.
new_height = int(np.ceil(584/patch_height)*patch_height)
new_width = int(np.ceil(565/patch_width)*patch_width)
new_images = np.zeros((processed_images.shape[0], new_height, new_width, 1))
new_images[:, 0:584, 0:565, :] = processed_images
# 3\ Divide to patches.
processed_image_patches = []
num_sample = new_images.shape[0]
num_patch_height, num_patch_width = int(new_height / patch_height), int(new_width / patch_width)
for i in range(num_sample):
for h in range(num_patch_height):
for w in range(num_patch_width):
processed_image_patch = \
new_images[i, h*patch_height:(h+1)*patch_height, w*patch_width:(w+1)*patch_width, :]
processed_image_patches.append(processed_image_patch)
return np.array(processed_image_patches), num_patch_height, num_patch_width, original_images.shape[0]
@classmethod
def get_data_testing_overlap(self, original_image_path, patch_height, patch_width, stride_height, stride_width):
original_images = self.load_hdf5(original_image_path) # shape = (-1, 584, 565, 3)
# 1\ Processing
processed_images = self.preprocess(original_images) # shape = (-1, 584, 565, 1)
# 2\ Paint Border.
new_height = int(np.ceil((584 - patch_height) / stride_height) * stride_height + patch_height)
new_width = int(np.ceil((565 - patch_width) / stride_width) * stride_width + patch_width)
new_images = np.zeros((processed_images.shape[0], new_height, new_width, 1))
new_images[:, 0:584, 0:565, :] = processed_images
# 3\ Divide to patches.
processed_image_patches = []
num_sample = new_images.shape[0]
num_patch_height, num_patch_width = int((new_height-patch_height)/stride_height + 1), \
int((new_width-patch_width)/stride_width + 1)
for i in range(num_sample):
for h in range(num_patch_height):
for w in range(num_patch_width):
processed_image_patch = new_images[i, h*stride_height:h*stride_height+patch_height,
w*stride_width:w*stride_width+patch_width, :]
processed_image_patches.append(processed_image_patch)
return np.array(processed_image_patches), num_patch_height, num_patch_width, original_images.shape[0]
@staticmethod
def load_hdf5(in_file):
with h5py.File(in_file, 'r') as file:
return file['data'][()]
@staticmethod
def preprocess(data, gamma=1.2):
# 1\ Change RGB to GRAY
train_images = data[:, :, :, 0:1] * 0.299 + data[:, :, :, 1:2] * 0.587 + data[:, :, :, 2:3] * 0.114
# 2\ Normalization
images_std = np.std(train_images)
images_mean = np.mean(train_images)
images_normalized = (train_images - images_mean) / images_std
for i in range(train_images.shape[0]):
images_normalized[i] = ((images_normalized[i] - np.min(images_normalized[i])) /
(np.max(images_normalized[i]) - np.min(images_normalized[i]))) * 255
# 3\ C-L-A-H-E Equalization
images_equalized = np.empty(images_normalized.shape)
CLAHE = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
for i in range(images_normalized.shape[0]):
images_equalized[i,:,:,0] = CLAHE.apply(np.array(images_normalized[i,:,:,0], dtype=np.uint8))
# 4\ Adjust Gamma
images_lut = np.empty(images_equalized.shape)
inverse_gamma = 1.0 / gamma
table = np.array([((i / 255.0) ** inverse_gamma) * 255 for i in np.arange(256)]).astype('uint8')
for i in range(images_equalized.shape[0]):
images_lut[i,:,:, 0] = cv2.LUT(np.array(images_equalized[i,:,:, 0], dtype=np.uint8), table)
# 5\ Change [0, 255] to [0, 1].
train_images = images_lut/255
return train_images