commit 0021c3fb99d1cd3f8792a8cf5c35548815536428
parent 2a20bc827a8c1c9b6e74ef4e1234788207be45b8
Author: Alex Auvolat <alex.auvolat@ens.fr>
Date: Mon, 27 Jul 2015 12:59:39 -0400
Config files
Diffstat:
5 files changed, 54 insertions(+), 7 deletions(-)
diff --git a/config/bidirectional_tgtcls_1_momentum.py b/config/bidirectional_tgtcls_1_momentum.py
@@ -27,12 +27,12 @@ embed_weights_init = IsotropicGaussian(0.01)
weights_init = IsotropicGaussian(0.1)
biases_init = Constant(0.01)
-batch_size = 300
+batch_size = 200
batch_sort_size = 20
max_splits = 100
# monitor_freq = 10000 # temporary, for finding good learning rate
-step_rule= Momentum(learning_rate=0.01, momentum=0.9)
+step_rule= Momentum(learning_rate=0.001, momentum=0.9)
diff --git a/config/bidirectional_tgtcls_1_momentum_maxlen.py b/config/bidirectional_tgtcls_1_momentum_maxlen.py
@@ -0,0 +1,39 @@
+import os
+import cPickle
+
+from blocks.algorithms import Momentum
+from blocks.initialization import IsotropicGaussian, Constant
+
+import data
+from model.bidirectional_tgtcls import Model, Stream
+
+
+with open(os.path.join(data.path, 'arrival-clusters.pkl')) as f: tgtcls = cPickle.load(f)
+
+dim_embeddings = [
+ ('origin_call', data.origin_call_train_size, 10),
+ ('origin_stand', data.stands_size, 10),
+ ('week_of_year', 52, 10),
+ ('day_of_week', 7, 10),
+ ('qhour_of_day', 24 * 4, 10),
+ ('taxi_id', data.taxi_id_size, 10),
+]
+
+hidden_state_dim = 100
+
+dim_hidden = [500, 500]
+
+embed_weights_init = IsotropicGaussian(0.01)
+weights_init = IsotropicGaussian(0.1)
+biases_init = Constant(0.01)
+
+batch_size = 400
+batch_sort_size = 20
+
+max_splits = 100
+train_max_len = 500
+
+# monitor_freq = 10000 # temporary, for finding good learning rate
+
+step_rule= Momentum(learning_rate=0.001, momentum=0.9)
+
diff --git a/config/bidirectional_tgtcls_1_notvt.py b/config/bidirectional_tgtcls_1_notvt.py
@@ -0,0 +1 @@
+bidirectional_tgtcls_1.py+
\ No newline at end of file
diff --git a/config/memory_network_bidir_momentum.py b/config/memory_network_bidir_momentum.py
@@ -45,14 +45,14 @@ representation_activation = Tanh
normalize_representation = True
-batch_size = 32
+batch_size = 64
batch_sort_size = 20
max_splits = 100
num_cuts = 1000
-train_candidate_size = 300
-valid_candidate_size = 300
-test_candidate_size = 300
+train_candidate_size = 100
+valid_candidate_size = 100
+test_candidate_size = 100
step_rule = Momentum(learning_rate=0.01, momentum=0.9)
diff --git a/model/stream.py b/model/stream.py
@@ -1,4 +1,4 @@
-from fuel.transformers import Batch, Padding, Mapping, SortMapping, Unpack, MultiProcessing
+from fuel.transformers import Batch, Padding, Mapping, SortMapping, Unpack, MultiProcessing, Filter
from fuel.streams import DataStream
from fuel.schemes import ConstantScheme, ShuffledExampleScheme
@@ -31,6 +31,12 @@ class StreamRec(object):
elif not data.tvt:
stream = transformers.add_destination(stream)
+ if hasattr(self.config, 'train_max_len'):
+ idx = stream.sources.index('latitude')
+ def max_len_filter(x):
+ return len(x[idx]) <= self.config.train_max_len
+ stream = Filter(stream, max_len_filter)
+
stream = transformers.TaxiExcludeEmptyTrips(stream)
stream = transformers.taxi_add_datetime(stream)
stream = transformers.Select(stream, tuple(v for v in req_vars if not v.endswith('_mask')))