1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ Created on Tue Oct 30 16:07:54 2018
4
+
5
+ @author: Neoooli
6
+ """
7
+
8
+ from __future__ import absolute_import , division , print_function , unicode_literals
9
+ import numpy as np
10
+ import tensorflow as tf
11
+ from collections import OrderedDict
12
+ from tensorflow .keras .layers import *
13
+ from tensorflow .keras .models import Sequential
14
+ from gdaldiy import *
15
+
16
+ def conv2d (input_ ,output_dim ,kernel_size = 3 ,stride = 2 ,padding = "SAME" ,biased = True ):
17
+ return Conv2D (output_dim ,kernel_size = [kernel_size ,kernel_size ],strides = [stride ,stride ],padding = padding ,use_bias = biased )(input_ )
18
+ def deconv2d (input_ ,output_dim ,kernel_size = 4 ,stride = 2 ,padding = "SAME" ,biased = True ):
19
+ return Conv2DTranspose (output_dim ,kernel_size = [kernel_size ,kernel_size ],strides = [stride ,stride ],padding = padding ,use_bias = biased )(input_ )
20
+ def DSC (input_ , output_dim ,kernel_size = 3 , stride = 1 , padding = "SAME" ,scale = 1 , biased = True ):
21
+ return SeparableConv2D (input_ .shape [- 1 ],kernel_size = [kernel_size ,kernel_size ],strides = [stride ,stride ],padding = padding ,depth_multiplier = scale ,use_bias = biased )(input_ )
22
+ def MDSC (input_ ,output_dim ,kernel_list = [3 ,5 ], stride = 1 , padding = "SAME" ,scale = 1 ,biased = True ):
23
+ depthoutput_list = []
24
+ for i in range (len (kernel_list )):
25
+ depth_output = DepthwiseConv2D (kernel_size = kernel_list [i ],strides = stride ,padding = padding ,depth_multiplier = scale )
26
+ depthoutput_list .append (depth_output (input_ ))
27
+ output = concatenate (depthoutput_list ,axis = - 1 )
28
+ output = conv2d (output ,output_dim ,kernel_size = 1 ,stride = 1 ,padding = padding ,biased = biased )
29
+ return output
30
+ def SDC (input_ ,output_dim , kernel_size = 3 ,stride = 1 ,dilation = 2 ,padding = 'SAME' , biased = True ):
31
+ """
32
+ Smoothed dilated conv2d via the Separable and Shared Convolution (SSC) without BN or relu.
33
+ """
34
+ input_dim = input_ .shape [- 1 ]
35
+ fix_w_size = dilation * 2 - 1
36
+ eo = tf .expand_dims (input_ ,- 1 )
37
+ o = Conv3D (1 ,kernel_size = [fix_w_size , fix_w_size ,1 ],strides = [stride ,stride ,stride ],padding = padding ,use_bias = biased )(eo )
38
+ o = eo + o
39
+ o = tf .squeeze (o ,- 1 )
40
+ o = Conv2D (output_dim ,kernel_size = [kernel_size ,kernel_size ],strides = [stride ,stride ],padding = padding ,dilation_rate = (dilation , dilation ),use_bias = biased )(o )
41
+ return o
42
+ def SDRB (input_ , kernel_size = 3 ,stride = 1 ,dilation = 2 ,training = False , biased = True ):
43
+ output_dim = input_ .get_shape ()[- 1 ]
44
+ sconv1 = SDC (input_ ,output_dim , kernel_size ,stride ,dilation ,biased = biased )
45
+ sconv1 = batch_norm (sconv1 ,training )
46
+ sconv1 = relu (sconv1 )
47
+ sconv2 = SDC (sconv1 ,output_dim , kernel_size ,stride ,dilation ,biased = biased )
48
+ sconv2 = batch_norm (sconv2 ,training )
49
+ return relu (sconv2 + input_ )
50
+ def relu (input_ ):
51
+ return ReLU ()(input_ )
52
+ def lrelu (input_ ):
53
+ return LeakyReLU ()(input_ )
54
+ def avg_pooling (input_ ,kernel_size = 2 ,stride = 2 ,padding = "same" ):
55
+ return tf .keras .layers .AveragePooling2D ((kernel_size ,kernel_size ),stride ,padding )(input_ )
56
+ def max_pooling (input_ ,kernel_size = 2 ,stride = 2 ,padding = "same" ):
57
+ return tf .keras .layers .MaxPool2D ((kernel_size ,kernel_size ),stride ,padding )(input_ )
58
+ def dropout (input_ ,rate = 0.2 ,training = True ):
59
+ """
60
+ rate是丢掉多少神经元.
61
+ """
62
+ return tf .keras .layers .Dropout (rate )(input_ ,training )
63
+ def GAP (input_ ):
64
+ return GlobalAveragePooling2D ()(input_ )
65
+ def batch_norm (input_ ,training = True ):
66
+ return BatchNormalization ()(input_ ,training )
67
+ class InstanceNormalization (tf .keras .layers .Layer ):
68
+ def __init__ (self , epsilon = 1e-5 ):
69
+ super (InstanceNormalization , self ).__init__ ()
70
+ self .epsilon = epsilon
71
+
72
+ def build (self , input_shape ):
73
+ self .scale = self .add_weight (
74
+ name = 'scale' ,
75
+ shape = input_shape [- 1 :],
76
+ initializer = tf .random_normal_initializer (1. , 0.02 ),
77
+ trainable = True )
78
+
79
+ self .offset = self .add_weight (
80
+ name = 'offset' ,
81
+ shape = input_shape [- 1 :],
82
+ initializer = 'zeros' ,
83
+ trainable = True )
84
+
85
+ def call (self , x ):
86
+ mean , variance = tf .nn .moments (x , axes = [1 , 2 ], keepdims = True )
87
+ inv = tf .math .rsqrt (variance + self .epsilon )
88
+ normalized = (x - mean ) * inv
89
+ return self .scale * normalized + self .offset
90
+
91
+ def instance_norm (input_ ):
92
+ return InstanceNormalization ()(input_ )
93
+
94
+ def norm (input_ ,norm = 'batch_norm' ,training = True ):
95
+ if norm == None :
96
+ return input_
97
+ elif norm == 'batch_norm' :
98
+ return BatchNormalization ()(input_ ,training )
99
+ elif norm == 'instance_norm' :
100
+ return InstanceNormalization ()(input_ )
101
+
102
+ def act (input_ ,activation = 'relu' ):
103
+ if activation == None :
104
+ return input_
105
+ elif activation == 'relu' :
106
+ return ReLU ()(input_ )
107
+ elif activation == 'lrelu' :
108
+ return LeakyReLU (alpha = 0.2 )(input_ )
109
+
110
+
111
+ def diydecay (steps ,baselr ,cycle_step = 100000 ,decay_steps = 100 ,decay_rate = 0.96 ):
112
+ n = steps // cycle_step
113
+ clr = baselr * (0.8 ** n )
114
+ steps = steps - n * cycle_step
115
+ k = steps // decay_steps
116
+ i = (- 1 )** k
117
+ step = ((i + 1 )/ 2 )* steps - i * ((k + 1 )// 2 )* decay_steps
118
+ dlr = clr * decay_rate ** (int (step ))
119
+ return dlr
120
+ def decay (global_steps ,baselr ,start_decay_step = 100000 ,cycle_step = 100000 ,decay_steps = 100 ,decay_rate = 0.96 ):
121
+ lr = np .where (np .greater_equal (global_steps ,start_decay_step ),
122
+ diydecay (global_steps - start_decay_step ,baselr ,cycle_step ,decay_steps ,decay_rate ),
123
+ baselr )
124
+ return lr
125
+
126
+ def l2_loss (x ):
127
+ return tf .sqrt (tf .reduce_sum (x ** 2 ))
128
+
129
+ def randomflip (input_ ,n ):
130
+ #生成-3到2的随机整数,-1顺时针90度,-2顺时针180,-3顺时针270,0垂直翻转,1水平翻转,2不变
131
+ if n < 0 :
132
+ return np .rot90 (input_ ,n )
133
+ elif - 1 < n < 2 :
134
+ return np .flip (input_ ,n )
135
+ else :
136
+ return input_
137
+ def read_img (datapath ,scale = 255 ):
138
+ img = imgread (datapath )/ scale
139
+ return img
140
+ def read_imgs (datapath ,scale = 255 ,k = 2 ):
141
+ img_list = []
142
+ l = len (datapath )
143
+ for i in range (l ):
144
+ img = read_img (datapath [i ],scale )
145
+ img = randomflip (img ,k )
146
+ img = img [np .newaxis ,:]
147
+ img_list .append (img )
148
+ imgs = np .concatenate (img_list ,axis = 0 )
149
+ return tf .convert_to_tensor (imgs ,tf .float32 )
150
+
151
+ def read_labels (datapath ,k = 2 ):
152
+ img_list = []
153
+ l = len (datapath )
154
+ for i in range (l ):
155
+ img = imgread (datapath [i ])
156
+ img = randomflip (img ,k )
157
+ img = img [np .newaxis ,:]
158
+ img_list .append (img )
159
+ imgs = np .concatenate (img_list ,axis = 0 )
160
+ imgs = rgb_to_gray (imgs )
161
+ return tf .convert_to_tensor (imgs ,tf .float32 )
162
+
163
+ rgb_colors = OrderedDict ([
164
+ ("cloud-free" ,np .array ([0 ],dtype = np .uint8 )),
165
+ ("cloud" ,np .array ([255 ],dtype = np .uint8 ))])
166
+
167
+ def rgb_to_gray (rgb_mask ):
168
+ label = (np .zeros (rgb_mask .shape [:3 ]+ tuple ([1 ]))).astype (np .uint8 )
169
+ if len (rgb_mask .shape )== 4 :
170
+ for gray , (class_name ,rgb_values ) in enumerate (rgb_colors .items ()):
171
+ match_pixs = np .where ((rgb_mask == np .asarray (rgb_values )).astype (np .uint8 ).sum (- 1 ) == 3 )
172
+ label [match_pixs ] = gray
173
+ else :
174
+ for gray , (class_name ,rgb_values ) in enumerate (rgb_colors .items ()):
175
+ match_pixs = np .where ((rgb_mask == np .asarray (rgb_values )).astype (np .uint8 ) == 1 )
176
+ label [match_pixs ] = gray
177
+ return label .astype (np .uint8 )
178
+
179
+ #输入shape=(w,h,c)/(batch_size,w,h,c)
180
+ def label_to_rgb (labels ):
181
+ max_index = np .argmax (labels ,axis = - 1 )#第三维上最大值的索引,返回其他维度,并在并对位置填上最大值之索引
182
+ n = len (labels .shape )- 1
183
+ if labels .shape [- 1 ]< 3 :
184
+ rgb = (np .zeros (labels .shape [:n ])).astype (np .uint8 )
185
+ else :
186
+ rgb = (np .zeros (labels .shape [:n ]+ tuple ([3 ]))).astype (np .uint8 )
187
+ for gray , (class_name ,rgb_values ) in enumerate (rgb_colors .items ()):
188
+ match_pixs = np .where (max_index == gray )
189
+ rgb [match_pixs ] = rgb_values
190
+ return rgb .astype (np .uint8 )
191
+
192
+ def down_sample (input_ ,kernel_size ,classes ):
193
+ onehot = tf .one_hot (tf .cast (input_ ,dtype = tf .int32 ),classes )
194
+ onehot = avg_pooling (onehot ,kernel_size ,kernel_size )
195
+ onehot = tf .argmax (onehot ,axis = - 1 )
196
+ return onehot
0 commit comments