Skip to content

Commit

Permalink
Code update
Browse files Browse the repository at this point in the history
  • Loading branch information
nreimers committed Nov 23, 2016
1 parent da96c2a commit 489596b
Show file tree
Hide file tree
Showing 7 changed files with 16 additions and 515 deletions.
9 changes: 5 additions & 4 deletions 2016-11_Seminar/Session 1 - SENNA/code for NER/NER.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,28 +70,29 @@

# Create the train and predict_labels function
n_in = train_tokens.shape[1]
n_hidden = numHiddenUnits
n_out = len(label2Idx)


words = Sequential()
words.add(Embedding(output_dim=wordEmbeddings.shape[1], input_dim=wordEmbeddings.shape[0], input_length=n_in, weights=[wordEmbeddings], trainable=False))
words.add(Embedding(input_dim=wordEmbeddings.shape[0], output_dim=wordEmbeddings.shape[1], input_length=n_in, weights=[wordEmbeddings], trainable=False))
words.add(Flatten())

casing = Sequential()
casing.add(Embedding(output_dim=caseEmbeddings.shape[1], input_dim=caseEmbeddings.shape[0], input_length=n_in, weights=[caseEmbeddings], trainable=False))
casing.add(Embedding(input_dim=caseEmbeddings.shape[0], output_dim=caseEmbeddings.shape[1], input_length=n_in, weights=[caseEmbeddings], trainable=False))
casing.add(Flatten())

model = Sequential()
model.add(Merge([words, casing], mode='concat'))

model.add(Dense(output_dim=n_hidden, activation='tanh'))
model.add(Dense(output_dim=numHiddenUnits, activation='tanh'))
model.add(Dense(output_dim=n_out, activation='softmax'))


# Use Adam optimizer
model.compile(loss='categorical_crossentropy', optimizer='adam')

model.summary()

# Train_y is a 1-dimensional vector containing the index of the label
# With np_utils.to_categorical we map it to a 1 hot matrix
train_y_cat = np_utils.to_categorical(train_y, n_out)
Expand Down
11 changes: 6 additions & 5 deletions 2016-11_Seminar/Session 1 - SENNA/code for POS/POS.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@



numHiddenUnits = 20
numHiddenUnits = 100


f = gzip.open('pkl/embeddings.pkl.gz', 'rb')
Expand Down Expand Up @@ -69,28 +69,29 @@

# Create the train and predict_labels function
n_in = train_tokens.shape[1]
n_hidden = numHiddenUnits
n_out = len(label2Idx)


words = Sequential()
words.add(Embedding(output_dim=wordEmbeddings.shape[1], input_dim=wordEmbeddings.shape[0], input_length=n_in, weights=[wordEmbeddings], trainable=False))
words.add(Embedding(input_dim=wordEmbeddings.shape[0], output_dim=wordEmbeddings.shape[1], input_length=n_in, weights=[wordEmbeddings], trainable=False))
words.add(Flatten())

casing = Sequential()
casing.add(Embedding(output_dim=caseEmbeddings.shape[1], input_dim=caseEmbeddings.shape[0], input_length=n_in, weights=[caseEmbeddings], trainable=False))
casing.add(Embedding(input_dim=caseEmbeddings.shape[0], output_dim=caseEmbeddings.shape[1], input_length=n_in, weights=[caseEmbeddings], trainable=False))
casing.add(Flatten())

model = Sequential()
model.add(Merge([words, casing], mode='concat'))

model.add(Dense(output_dim=n_hidden, activation='tanh'))
model.add(Dense(output_dim=numHiddenUnits, activation='tanh'))
model.add(Dense(output_dim=n_out, activation='softmax'))


# Use Adam optimizer
model.compile(loss='categorical_crossentropy', optimizer='adam')

model.summary()

# Train_y is a 1-dimensional vector containing the index of the label
# With np_utils.to_categorical we map it to a 1 hot matrix
train_y_cat = np_utils.to_categorical(train_y, n_out)
Expand Down
85 changes: 0 additions & 85 deletions 2016-11_Seminar/Session 1 - SENNA/code for POS/POS_Skeleton.py

This file was deleted.

137 changes: 0 additions & 137 deletions 2016-11_Seminar/Session 2 - Sentence CNN/code/cnn_Skeleton.py

This file was deleted.

19 changes: 5 additions & 14 deletions 2016-11_Seminar/Session 3 - Relation CNN/code/CNN.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@
- Keras 1.1.1
- Python 2.7
"""
import theano
import numpy as np
np.random.seed(1337) # for reproducibility

Expand Down Expand Up @@ -80,31 +79,23 @@
wordModel.add(Embedding(embeddings.shape[0], embeddings.shape[1], input_length=sentenceTrain.shape[1], weights=[embeddings], trainable=False))


convModel = Sequential()
convModel.add(Merge([wordModel, distanceModel1, distanceModel2], mode='concat'))
model = Sequential()
model.add(Merge([wordModel, distanceModel1, distanceModel2], mode='concat'))


convModel.add(Convolution1D(nb_filter=nb_filter,
model.add(Convolution1D(nb_filter=nb_filter,
filter_length=filter_length,
border_mode='same',
activation='tanh',
subsample_length=1))
# we use standard max over time pooling
convModel.add(GlobalMaxPooling1D())


model = convModel

#model.add(Dropout(0.25))
#model.add(Dense(hidden_dims, activation='tanh', W_regularizer=keras.regularizers.l2(0.01)))
model.add(Dropout(0.5))

model.add(GlobalMaxPooling1D())

model.add(Dropout(0.25))
model.add(Dense(n_out, activation='softmax'))


model.compile(loss='categorical_crossentropy',optimizer='Adam', metrics=['accuracy'])

model.summary()
print "Start training"

Expand Down
Loading

0 comments on commit 489596b

Please sign in to comment.