Skip to content

Commit b8e04d9

Browse files
authored
Add files via upload
1 parent 74bc99b commit b8e04d9

File tree

4 files changed

+500
-0
lines changed

4 files changed

+500
-0
lines changed

CDFM3SF.py

+68
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,68 @@
1+
# -*- coding: utf-8 -*-
2+
"""
3+
Created on Fri Dec 7 16:56:44 2018
4+
5+
@author: lijun
6+
"""
7+
import tensorflow as tf
8+
from ops import *
9+
def CDFM3SF(input_dim, gf_dim=64, reuse=False,training=False, name="CD-FM3SF"):
10+
# dropout_rate = 0.8
11+
input_ = tf.keras.layers.Input(shape=[None,None,input_dim[0]])
12+
input_1 = tf.keras.layers.Input(shape=[None,None,input_dim[1]])
13+
input_2 = tf.keras.layers.Input(shape=[None,None,input_dim[2]])
14+
e10 = relu(conv2d(input_,gf_dim,kernel_size=3,stride=1))
15+
e1 = relu(batch_norm(MDSC(e10,gf_dim,stride=1),training))
16+
e1 = e10+e1
17+
# e1 is (128 x 128 x self.gf_dim)
18+
p1=max_pooling(e1,2,2)
19+
e20 = relu(conv2d(input_1,gf_dim,kernel_size=3,stride=1))
20+
c120 = tf.concat([p1,e20],axis=-1)
21+
e2 = relu(batch_norm(MDSC(c120,gf_dim,stride=1),training))
22+
e2 = p1+e20+e2
23+
# e2 is (64 x 64 x self.gf_dim*2)
24+
p2=max_pooling(e2,3,3)
25+
e30 = relu(conv2d(input_2,gf_dim,kernel_size=3,stride=1))
26+
c230 = tf.concat([p2,e30],axis=-1)
27+
e3 = relu(batch_norm(MDSC(c230,gf_dim,stride=1),training))
28+
e3 = p2+e30+e3
29+
# e3 is (32 x 32 x self.gf_dim*4)
30+
p3 = max_pooling(e3,2,2)
31+
e4 = relu(batch_norm(MDSC(p3,gf_dim,stride=1),training))
32+
# e3 is (32 x 32 x self.gf_dim*4)
33+
e4= p3+e4
34+
# e3 = tf.concat([e13,e3],axis=-1)
35+
# e3 = e3 + s3
36+
37+
r1=SDRB(e4,3,1,2,training)
38+
r2=SDRB(r1,3,1,2, training)
39+
r3=SDRB(r2,3,1,3,training)
40+
r4=SDRB(r3,3,1,3,training)
41+
r5=SDRB(r4,3,1,4,training)
42+
r6=SDRB(r5,3,1,4,training)
43+
# d3 = tf.nn.dropout(d3, dropout_rate)
44+
d1 = tf.concat([e4,r2,r4,r6], axis=-1)
45+
d1 = relu(batch_norm(DSC(d1, gf_dim*2, stride=1), training))
46+
# d3 is (32 x 32 x self.gf_dim*8*2)
47+
d1 = deconv2d(d1, gf_dim,stride=2)
48+
# d4 = tf.nn.dropout(d4, dropout_rate)
49+
d1 = tf.concat([d1, e3], 3)
50+
d1 = relu(batch_norm(DSC(d1, gf_dim, stride=1),training))
51+
output3 = conv2d(d1,1,stride=1)
52+
# d3 is (32 x 32 x self.gf_dim*8*2)
53+
d2 = deconv2d(d1, gf_dim,stride=3)
54+
# d4 = tf.nn.dropout(d4, dropout_rate)
55+
d2 = tf.concat([d2, e2], 3)
56+
d2 = relu(batch_norm(DSC(d2, gf_dim,stride=1), training))
57+
# d4 is (16 x 16 x self.gf_dim*8*2)
58+
output2 = conv2d(d2,1,stride=1)
59+
d3 = deconv2d(d2, gf_dim)
60+
# d5 = tf.nn.dropout(d5, dropout_rate)
61+
d3 = tf.concat([d3, e1],3)
62+
# d5 is (32 x 32 x self.gf_dim*4*2)
63+
d3 = relu(batch_norm(DSC(d3,gf_dim,stride=1),training))
64+
output1 = conv2d(d3,1,stride=1)
65+
# d8 is (256 x 256 x output_c_dim)
66+
return tf.keras.Model([input_,input_1,input_2],[output1,output2,output3],name=name)
67+
# model = generator_unet([4,6,3])
68+
# print(model.summary())

gdaldiy.py

+47
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
# -*- coding: utf-8 -*-
2+
"""
3+
Created on Fri Oct 25 20:57:37 2019
4+
5+
@author: Neoooli
6+
"""
7+
8+
import gdal
9+
import numpy as np
10+
11+
list1 = ["byte","uint8","uint16","int16","uint32","int32","float32","float64","cint16","cint32","cfloat32","cfloat64"]
12+
list2 = [gdal.GDT_Byte,gdal.GDT_Byte,gdal.GDT_UInt16,gdal.GDT_Int16,gdal.GDT_UInt32,gdal.GDT_Int32,gdal.GDT_Float32,gdal.GDT_Float64,gdal.GDT_CInt16,gdal.GDT_CInt32,gdal.GDT_CFloat32,gdal.GDT_CFloat64]
13+
14+
def imgread(path):
15+
img = gdal.Open(path)
16+
# col = img.RasterXSize #col
17+
# row = img.RasterYSize #row
18+
# img_arr = img.ReadAsArray(0,0,col,row) #设置读取图像的范围(宽/高)
19+
c = img.RasterCount
20+
img_arr = img.ReadAsArray() #读取整幅图像
21+
if c>1:
22+
img_arr = img_arr.swapaxes(1,0)
23+
img_arr = img_arr.swapaxes(2,1)
24+
del img
25+
return img_arr
26+
def imgwrite(path,narray,compress="None"):
27+
s=narray.shape
28+
dt_name=narray.dtype.name
29+
for i in range(len(list1)):
30+
if list1[i] in dt_name.lower():
31+
datatype=list2[i]
32+
break
33+
else:
34+
datatype=list2[0]
35+
if len(s)==2:
36+
row,col,c=s[0],s[1],1
37+
driver = gdal.GetDriverByName('GTiff')
38+
dataset = driver.Create(path,col,row,c,datatype,options=["COMPRESS="+compress]) #GDAL存储为宽/长/波段->列/行/通道
39+
dataset.GetRasterBand(1).WriteArray(narray)
40+
del dataset
41+
elif len(s)==3:
42+
row,col,c = s[0], s[1], s[2]
43+
driver = gdal.GetDriverByName('GTiff')
44+
dataset = driver.Create(path,col,row, c, datatype)
45+
for i in range(c):
46+
dataset.GetRasterBand(i + 1).WriteArray(narray[:,:,i])
47+
del dataset

ops.py

+196
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,196 @@
1+
# -*- coding: utf-8 -*-
2+
"""
3+
Created on Tue Oct 30 16:07:54 2018
4+
5+
@author: Neoooli
6+
"""
7+
8+
from __future__ import absolute_import, division, print_function, unicode_literals
9+
import numpy as np
10+
import tensorflow as tf
11+
from collections import OrderedDict
12+
from tensorflow.keras.layers import *
13+
from tensorflow.keras.models import Sequential
14+
from gdaldiy import *
15+
16+
def conv2d(input_,output_dim,kernel_size=3,stride=2,padding="SAME",biased=True):
17+
return Conv2D(output_dim,kernel_size=[kernel_size,kernel_size],strides=[stride,stride],padding=padding,use_bias=biased)(input_)
18+
def deconv2d(input_,output_dim,kernel_size=4,stride=2,padding="SAME",biased=True):
19+
return Conv2DTranspose(output_dim,kernel_size=[kernel_size,kernel_size],strides=[stride,stride],padding=padding,use_bias=biased)(input_)
20+
def DSC(input_, output_dim,kernel_size=3, stride=1, padding="SAME",scale=1, biased=True):
21+
return SeparableConv2D(input_.shape[-1],kernel_size=[kernel_size,kernel_size],strides=[stride,stride],padding=padding,depth_multiplier=scale,use_bias=biased)(input_)
22+
def MDSC(input_,output_dim,kernel_list=[3,5], stride=1, padding="SAME",scale=1,biased=True):
23+
depthoutput_list=[]
24+
for i in range(len(kernel_list)):
25+
depth_output=DepthwiseConv2D(kernel_size=kernel_list[i],strides=stride,padding=padding,depth_multiplier=scale)
26+
depthoutput_list.append(depth_output(input_))
27+
output = concatenate(depthoutput_list,axis=-1)
28+
output = conv2d(output,output_dim,kernel_size=1,stride=1,padding=padding,biased=biased)
29+
return output
30+
def SDC(input_,output_dim, kernel_size=3,stride=1,dilation=2,padding='SAME', biased=True):
31+
"""
32+
Smoothed dilated conv2d via the Separable and Shared Convolution (SSC) without BN or relu.
33+
"""
34+
input_dim = input_.shape[-1]
35+
fix_w_size = dilation * 2 - 1
36+
eo = tf.expand_dims(input_,-1)
37+
o = Conv3D(1,kernel_size=[fix_w_size, fix_w_size,1],strides=[stride,stride,stride],padding=padding,use_bias=biased)(eo)
38+
o = eo + o
39+
o = tf.squeeze(o,-1)
40+
o = Conv2D(output_dim,kernel_size=[kernel_size,kernel_size],strides=[stride,stride],padding=padding,dilation_rate=(dilation, dilation),use_bias=biased)(o)
41+
return o
42+
def SDRB(input_, kernel_size=3,stride=1,dilation=2,training=False, biased=True):
43+
output_dim=input_.get_shape()[-1]
44+
sconv1=SDC(input_,output_dim, kernel_size,stride,dilation,biased=biased)
45+
sconv1=batch_norm(sconv1,training)
46+
sconv1= relu(sconv1)
47+
sconv2=SDC(sconv1,output_dim, kernel_size,stride,dilation,biased=biased)
48+
sconv2=batch_norm(sconv2,training)
49+
return relu(sconv2+input_)
50+
def relu(input_):
51+
return ReLU()(input_)
52+
def lrelu(input_):
53+
return LeakyReLU()(input_)
54+
def avg_pooling(input_,kernel_size=2,stride=2,padding="same"):
55+
return tf.keras.layers.AveragePooling2D((kernel_size,kernel_size),stride,padding)(input_)
56+
def max_pooling(input_,kernel_size=2,stride=2,padding="same"):
57+
return tf.keras.layers.MaxPool2D((kernel_size,kernel_size),stride,padding)(input_)
58+
def dropout(input_,rate=0.2,training=True):
59+
"""
60+
rate是丢掉多少神经元.
61+
"""
62+
return tf.keras.layers.Dropout(rate)(input_,training)
63+
def GAP(input_):
64+
return GlobalAveragePooling2D()(input_)
65+
def batch_norm(input_,training=True):
66+
return BatchNormalization()(input_,training)
67+
class InstanceNormalization(tf.keras.layers.Layer):
68+
def __init__(self, epsilon=1e-5):
69+
super(InstanceNormalization, self).__init__()
70+
self.epsilon = epsilon
71+
72+
def build(self, input_shape):
73+
self.scale = self.add_weight(
74+
name='scale',
75+
shape=input_shape[-1:],
76+
initializer=tf.random_normal_initializer(1., 0.02),
77+
trainable=True)
78+
79+
self.offset = self.add_weight(
80+
name='offset',
81+
shape=input_shape[-1:],
82+
initializer='zeros',
83+
trainable=True)
84+
85+
def call(self, x):
86+
mean, variance = tf.nn.moments(x, axes=[1, 2], keepdims=True)
87+
inv = tf.math.rsqrt(variance + self.epsilon)
88+
normalized = (x - mean) * inv
89+
return self.scale * normalized + self.offset
90+
91+
def instance_norm(input_):
92+
return InstanceNormalization()(input_)
93+
94+
def norm(input_,norm='batch_norm',training=True):
95+
if norm==None:
96+
return input_
97+
elif norm=='batch_norm':
98+
return BatchNormalization()(input_,training)
99+
elif norm=='instance_norm':
100+
return InstanceNormalization()(input_)
101+
102+
def act(input_,activation='relu'):
103+
if activation==None:
104+
return input_
105+
elif activation=='relu':
106+
return ReLU()(input_)
107+
elif activation=='lrelu':
108+
return LeakyReLU(alpha=0.2)(input_)
109+
110+
111+
def diydecay(steps,baselr,cycle_step=100000,decay_steps=100,decay_rate=0.96):
112+
n=steps//cycle_step
113+
clr=baselr*(0.8**n)
114+
steps=steps-n*cycle_step
115+
k=steps//decay_steps
116+
i=(-1)**k
117+
step=((i+1)/2)*steps-i*((k+1)//2)*decay_steps
118+
dlr = clr*decay_rate**(int(step))
119+
return dlr
120+
def decay(global_steps,baselr,start_decay_step=100000,cycle_step=100000,decay_steps=100,decay_rate=0.96):
121+
lr=np.where(np.greater_equal(global_steps,start_decay_step),
122+
diydecay(global_steps-start_decay_step,baselr,cycle_step,decay_steps,decay_rate),
123+
baselr)
124+
return lr
125+
126+
def l2_loss(x):
127+
return tf.sqrt(tf.reduce_sum(x**2))
128+
129+
def randomflip(input_,n):
130+
#生成-3到2的随机整数,-1顺时针90度,-2顺时针180,-3顺时针270,0垂直翻转,1水平翻转,2不变
131+
if n<0:
132+
return np.rot90(input_,n)
133+
elif -1<n<2:
134+
return np.flip(input_,n)
135+
else:
136+
return input_
137+
def read_img(datapath,scale=255):
138+
img=imgread(datapath)/scale
139+
return img
140+
def read_imgs(datapath,scale=255,k=2):
141+
img_list=[]
142+
l=len(datapath)
143+
for i in range(l):
144+
img=read_img(datapath[i],scale)
145+
img = randomflip(img,k)
146+
img=img[np.newaxis,:]
147+
img_list.append(img)
148+
imgs=np.concatenate(img_list,axis=0)
149+
return tf.convert_to_tensor(imgs,tf.float32)
150+
151+
def read_labels(datapath,k=2):
152+
img_list=[]
153+
l=len(datapath)
154+
for i in range(l):
155+
img=imgread(datapath[i])
156+
img=randomflip(img,k)
157+
img=img[np.newaxis,:]
158+
img_list.append(img)
159+
imgs=np.concatenate(img_list,axis=0)
160+
imgs = rgb_to_gray(imgs)
161+
return tf.convert_to_tensor(imgs,tf.float32)
162+
163+
rgb_colors=OrderedDict([
164+
("cloud-free",np.array([0],dtype=np.uint8)),
165+
("cloud",np.array([255],dtype=np.uint8))])
166+
167+
def rgb_to_gray(rgb_mask):
168+
label = (np.zeros(rgb_mask.shape[:3]+tuple([1]))).astype(np.uint8)
169+
if len(rgb_mask.shape)==4:
170+
for gray, (class_name,rgb_values) in enumerate(rgb_colors.items()):
171+
match_pixs = np.where((rgb_mask == np.asarray(rgb_values)).astype(np.uint8).sum(-1) == 3)
172+
label[match_pixs] = gray
173+
else:
174+
for gray, (class_name,rgb_values) in enumerate(rgb_colors.items()):
175+
match_pixs = np.where((rgb_mask == np.asarray(rgb_values)).astype(np.uint8) == 1)
176+
label[match_pixs] = gray
177+
return label.astype(np.uint8)
178+
179+
#输入shape=(w,h,c)/(batch_size,w,h,c)
180+
def label_to_rgb(labels):
181+
max_index=np.argmax(labels,axis=-1)#第三维上最大值的索引,返回其他维度,并在并对位置填上最大值之索引
182+
n=len(labels.shape)-1
183+
if labels.shape[-1]<3:
184+
rgb = (np.zeros(labels.shape[:n])).astype(np.uint8)
185+
else:
186+
rgb = (np.zeros(labels.shape[:n]+tuple([3]))).astype(np.uint8)
187+
for gray, (class_name,rgb_values) in enumerate(rgb_colors.items()):
188+
match_pixs = np.where(max_index == gray)
189+
rgb[match_pixs] = rgb_values
190+
return rgb.astype(np.uint8)
191+
192+
def down_sample(input_,kernel_size,classes):
193+
onehot=tf.one_hot(tf.cast(input_,dtype=tf.int32),classes)
194+
onehot=avg_pooling(onehot,kernel_size,kernel_size)
195+
onehot=tf.argmax(onehot,axis=-1)
196+
return onehot

0 commit comments

Comments
 (0)