-
Notifications
You must be signed in to change notification settings - Fork 50
/
text_fast.py
70 lines (60 loc) · 3.3 KB
/
text_fast.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
import tensorflow as tf
import numpy as np
class TextFast(object):
"""
A FastText for text classification/regression.
Uses an embedding layer, followed by a average, fully-connected (and softmax) layer.
"""
def __init__(
self, model_type, sequence_length, num_classes, vocab_size,
embedding_size, l2_reg_lambda=0.0):
# Placeholders for input, output and dropout
self.input_x = tf.placeholder(tf.int32, [None, sequence_length], name="input_x")
self.input_y = tf.placeholder(tf.float32, [None, num_classes], name="input_y")
self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
self.learning_rate = tf.placeholder(tf.float32, name="learning_rate")
# Keeping track of l2 regularization loss (optional)
l2_loss = tf.constant(0.0)
# Embedding layer
with tf.device('/cpu:0'), tf.name_scope("embedding"):
# When trainable parameter equals True the embedding vector is non-static, otherwise is static
self.W = tf.Variable(
tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0),
name="W", trainable=True)
self.embedded_chars = tf.nn.embedding_lookup(self.W, self.input_x) # [None, sequence_length, embedding_size]
# Create a average layer (avg pooling)
with tf.name_scope("avg-pool"):
self.output = tf.reduce_mean(self.embedded_chars, axis=1)
# Add dropout
with tf.name_scope("dropout"):
self.h_drop = tf.nn.dropout(self.output, self.dropout_keep_prob)
# Final (unnormalized) scores and predictions
with tf.name_scope("output"):
W = tf.get_variable(
"W",
shape=[embedding_size, num_classes],
initializer=tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.constant(0.1, shape=[num_classes]), name="b")
l2_loss += tf.nn.l2_loss(W)
l2_loss += tf.nn.l2_loss(b)
self.scores = tf.nn.xw_plus_b(self.h_drop, W, b, name="scores")
if model_type == 'clf':
self.predictions = tf.argmax(self.scores, 1, name="predictions")
elif model_type == 'reg':
self.predictions = tf.reduce_max(self.scores, 1, name="predictions")
self.predictions = tf.expand_dims(self.predictions, -1)
# Calculate mean cross-entropy loss, or root-mean-square error loss
with tf.name_scope("loss"):
if model_type == 'clf':
losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.scores, labels=self.input_y)
self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss
elif model_type == 'reg':
losses = tf.sqrt(tf.losses.mean_squared_error(predictions=self.predictions, labels=self.input_y))
self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss
# Accuracy
with tf.name_scope("accuracy"):
if model_type == 'clf':
correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
elif model_type == 'reg':
self.accuracy = tf.constant(0.0, name="accuracy")