-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathchar_cnn_word_blstm.py
131 lines (93 loc) · 4.96 KB
/
char_cnn_word_blstm.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
import os
import sys
import logging
from pathlib import Path
import numpy as np
import tensorflow as tf
from tensorflow.contrib import layers
from tf_metrics import precision, recall, f1
from data_loader import DataLoader
from common import config as cfg
tf.enable_eager_execution()
# This module preprocesses and loads the data
data_loader = DataLoader()
def model_fn(mode, features, labels):
# Logging
Path('results').mkdir(exist_ok=True)
tf.logging.set_verbosity(logging.INFO)
handlers = [logging.FileHandler('./results/main.log'),
logging.StreamHandler(sys.stdout)]
logging.getLogger('tensorflow').handlers = handlers
word_inputs, char_inputs = features
training = (mode == tf.estimator.ModeKeys.TRAIN)
# Embeddings
embeddings = tf.get_variable('embeddings', [cfg.num_chars + 2, cfg.char_embed_dim])
char_input_emb = tf.nn.embedding_lookup(embeddings, char_inputs)
# Reshaping for CNN
output = tf.reshape(char_input_emb, [-1, tf.shape(char_inputs)[2], cfg.char_embed_dim])
# CNN
output = tf.layers.conv1d(output, filters=64, kernel_size=2, strides=1, padding="same", activation=tf.nn.relu)
output = tf.layers.max_pooling1d(output, pool_size=2, strides=2)
output = tf.layers.conv1d(output, filters=128, kernel_size=2, strides=1, padding="same", activation=tf.nn.relu)
output = tf.layers.max_pooling1d(output, pool_size=2, strides=2)
cnn_output = tf.layers.dropout(output, rate=.5, training=training)
cnn_output = tf.layers.flatten(cnn_output)
# Reshaping CNN and concatenating for LSTM
cnn_output = tf.reshape(cnn_output, [-1, tf.shape(char_inputs)[1], 128 * int(cfg.word_max_len / 4)])
word_inputs = tf.layers.dropout(word_inputs, rate=.5, training=training)
lstm_inputs = tf.concat([word_inputs, cnn_output], axis=-1)
# LSTM
fw_cell = tf.contrib.rnn.LSTMCell(num_units=cfg.lstm_units)
bw_cell = tf.contrib.rnn.LSTMCell(num_units=cfg.lstm_units)
_, (fw_state, bw_state) = tf.nn.bidirectional_dynamic_rnn(fw_cell, bw_cell, lstm_inputs, dtype=tf.float32)
output = tf.concat([bw_state.c, bw_state.h, fw_state.c, fw_state.h], axis=-1)
lstm_output = tf.layers.dropout(output, rate=.5, training=training)
# Dense
output = tf.layers.dense(lstm_output, 128)
output = tf.layers.dropout(output, rate=.5, training=training)
logits = tf.layers.dense(output, 2)
# Loss
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits))
# Gradient clipping
# optimizer = tf.train.AdamOptimizer(1e-4)
# gradients, variables = zip(*optimizer.compute_gradients(loss))
# gradients, _ = tf.clip_by_global_norm(gradients, .1)
# train_op = optimizer.apply_gradients(zip(gradients, variables), tf.train.get_global_step())
# Metrics
indices = [0, 1]
labels = tf.argmax(labels, 1)
pred_ids = tf.argmax(logits, 1)
metrics = {
'acc': tf.metrics.accuracy(labels, pred_ids),
'precision': precision(labels, pred_ids, 2, indices, None, average='macro'),
'recall': recall(labels, pred_ids, 2, indices, None, average='macro'),
'f1': f1(labels, pred_ids, 2, indices, None, average='macro')
}
for metric_name, op in metrics.items():
tf.summary.scalar(metric_name, op[1])
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode, loss=loss,
eval_metric_ops=metrics)
elif mode == tf.estimator.ModeKeys.TRAIN:
train_op = tf.train.AdamOptimizer(cfg.learning_rate).minimize(loss, global_step=tf.train.get_or_create_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss,
train_op=train_op)
def input_fn(mode=None):
data_generator = lambda: data_loader.data_generator(mode=mode)
dataset = tf.data.Dataset.from_generator(data_generator,
output_types=((tf.float32, tf.int32), tf.int32),
output_shapes=(([None, cfg.word_embed_dim], [None, None]), [None]))
if mode is 'train':
dataset = dataset.shuffle(cfg.shuffle_buffer).repeat(cfg.num_epochs)
dataset = dataset.padded_batch(cfg.batch_size, padded_shapes=(([None, cfg.word_embed_dim], [None, None]), [None]))
return dataset
def train():
train_input_func = lambda: input_fn(mode='train')
eval_input_func = lambda: input_fn(mode='valid')
est_conf = tf.estimator.RunConfig(cfg.model_dir, save_checkpoints_secs=30, keep_checkpoint_max=500)
est = tf.estimator.Estimator(model_fn, cfg.model_dir, est_conf)
train_spec = tf.estimator.TrainSpec(input_fn=train_input_func)
eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_func, throttle_secs=30)
tf.estimator.train_and_evaluate(est, train_spec, eval_spec)
if __name__ == '__main__':
train()