commit 35b4503ddd148b0c937468891dd0a7e9ff1c79f4
parent 60aa0d3fdd42d7489cc69acbb54c59d7c249ea34
Author: Alex Auvolat <alex.auvolat@ens.fr>
Date: Wed, 6 May 2015 10:12:17 -0400
Move weights init to config files ; fix s/time/travel_time
Diffstat:
15 files changed, 86 insertions(+), 99 deletions(-)
diff --git a/config/dest_simple_mlp_2_cs.py b/config/dest_simple_mlp_2_cs.py
@@ -1,3 +1,5 @@
+from blocks.initialization import IsotropicGaussian, Constant
+
import model.dest_simple_mlp as model
import data
@@ -16,6 +18,10 @@ dim_input = n_begin_end_pts * 2 * 2 + sum(x for (_, _, x) in dim_embeddings)
dim_hidden = [200, 100]
dim_output = 2
+embed_weights_init = IsotropicGaussian(0.001)
+mlp_weights_init = IsotropicGaussian(0.01)
+mlp_biases_init = Constant(0.001)
+
learning_rate = 0.0001
momentum = 0.99
batch_size = 32
diff --git a/config/dest_simple_mlp_2_cswdt.py b/config/dest_simple_mlp_2_cswdt.py
@@ -1,5 +1,7 @@
import model.dest_simple_mlp as model
+from blocks.initialization import IsotropicGaussian, Constant
+
import data
n_begin_end_pts = 5 # how many points we consider at the beginning and end of the known trajectory
@@ -20,6 +22,10 @@ dim_input = n_begin_end_pts * 2 * 2 + sum(x for (_, _, x) in dim_embeddings)
dim_hidden = [200, 100]
dim_output = 2
+embed_weights_init = IsotropicGaussian(0.001)
+mlp_weights_init = IsotropicGaussian(0.01)
+mlp_biases_init = Constant(0.001)
+
learning_rate = 0.0001
momentum = 0.99
batch_size = 32
diff --git a/config/dest_simple_mlp_2_noembed.py b/config/dest_simple_mlp_2_noembed.py
@@ -1,5 +1,7 @@
import model.dest_simple_mlp as model
+from blocks.initialization import IsotropicGaussian, Constant
+
import data
n_begin_end_pts = 5 # how many points we consider at the beginning and end of the known trajectory
@@ -13,6 +15,10 @@ dim_input = n_begin_end_pts * 2 * 2 + sum(x for (_, _, x) in dim_embeddings)
dim_hidden = [200, 100]
dim_output = 2
+embed_weights_init = IsotropicGaussian(0.001)
+mlp_weights_init = IsotropicGaussian(0.01)
+mlp_biases_init = Constant(0.001)
+
learning_rate = 0.0001
momentum = 0.99
batch_size = 32
diff --git a/config/dest_simple_mlp_tgtcls_0_cs.py b/config/dest_simple_mlp_tgtcls_0_cs.py
@@ -1,5 +1,7 @@
import cPickle
+from blocks.initialization import IsotropicGaussian, Constant
+
import data
import model.dest_simple_mlp_tgtcls as model
@@ -20,6 +22,10 @@ dim_input = n_begin_end_pts * 2 * 2 + sum(x for (_, _, x) in dim_embeddings)
dim_hidden = []
dim_output = tgtcls.shape[0]
+embed_weights_init = IsotropicGaussian(0.001)
+mlp_weights_init = IsotropicGaussian(0.01)
+mlp_biases_init = Constant(0.001)
+
learning_rate = 0.0001
momentum = 0.99
batch_size = 32
diff --git a/config/dest_simple_mlp_tgtcls_1_cs.py b/config/dest_simple_mlp_tgtcls_1_cs.py
@@ -1,5 +1,7 @@
import cPickle
+from blocks.initialization import IsotropicGaussian, Constant
+
import data
import model.dest_simple_mlp_tgtcls as model
@@ -20,6 +22,10 @@ dim_input = n_begin_end_pts * 2 * 2 + sum(x for (_, _, x) in dim_embeddings)
dim_hidden = [500]
dim_output = tgtcls.shape[0]
+embed_weights_init = IsotropicGaussian(0.001)
+mlp_weights_init = IsotropicGaussian(0.01)
+mlp_biases_init = Constant(0.001)
+
learning_rate = 0.0001
momentum = 0.99
batch_size = 32
diff --git a/config/dest_simple_mlp_tgtcls_1_cswdt.py b/config/dest_simple_mlp_tgtcls_1_cswdt.py
@@ -1,5 +1,7 @@
import cPickle
+from blocks.initialization import IsotropicGaussian, Constant
+
import data
import model.dest_simple_mlp_tgtcls as model
@@ -24,6 +26,10 @@ dim_input = n_begin_end_pts * 2 * 2 + sum(x for (_, _, x) in dim_embeddings)
dim_hidden = [500]
dim_output = tgtcls.shape[0]
+embed_weights_init = IsotropicGaussian(0.001)
+mlp_weights_init = IsotropicGaussian(0.01)
+mlp_biases_init = Constant(0.001)
+
learning_rate = 0.0001
momentum = 0.99
batch_size = 32
diff --git a/config/dest_simple_mlp_tgtcls_1_cswdtx.py b/config/dest_simple_mlp_tgtcls_1_cswdtx.py
@@ -1,5 +1,7 @@
import cPickle
+from blocks.initialization import IsotropicGaussian, Constant
+
import data
import model.dest_simple_mlp_tgtcls as model
@@ -25,6 +27,10 @@ dim_input = n_begin_end_pts * 2 * 2 + sum(x for (_, _, x) in dim_embeddings)
dim_hidden = [500]
dim_output = tgtcls.shape[0]
+embed_weights_init = IsotropicGaussian(0.001)
+mlp_weights_init = IsotropicGaussian(0.01)
+mlp_biases_init = Constant(0.001)
+
learning_rate = 0.0001
momentum = 0.99
batch_size = 32
diff --git a/config/dest_simple_mlp_tgtcls_1_cswdtx_alexandre.py b/config/dest_simple_mlp_tgtcls_1_cswdtx_alexandre.py
@@ -1,8 +1,10 @@
import cPickle
+from blocks.initialization import IsotropicGaussian, Constant
+
import data
-import model.dest_simple_mlp_tgtcls_alexandre as model
+import model.dest_simple_mlp_tgtcls as model
n_begin_end_pts = 5 # how many points we consider at the beginning and end of the known trajectory
n_end_pts = 5
@@ -25,6 +27,10 @@ dim_input = n_begin_end_pts * 2 * 2 + sum(x for (_, _, x) in dim_embeddings)
dim_hidden = [500]
dim_output = tgtcls.shape[0]
+embed_weights_init = IsotropicGaussian(0.01)
+mlp_weights_init = IsotropicGaussian(0.1)
+mlp_biases_init = Constant(0.01)
+
learning_rate = 0.01
momentum = 0.9
batch_size = 200
diff --git a/config/time_simple_mlp_1.py b/config/time_simple_mlp_1.py
@@ -1,5 +1,7 @@
import model.time_simple_mlp as model
+from blocks.initialization import IsotropicGaussian, Constant
+
import data
n_begin_end_pts = 5 # how many points we consider at the beginning and end of the known trajectory
@@ -14,6 +16,14 @@ dim_input = n_begin_end_pts * 2 * 2 + sum(x for (_, _, x) in dim_embeddings)
dim_hidden = [200]
dim_output = 1
+embed_weights_init = IsotropicGaussian(0.001)
+mlp_weights_init = IsotropicGaussian(0.01)
+mlp_biases_init = Constant(0.001)
+
+exp_base = 1.5
+
learning_rate = 0.00001
momentum = 0.99
batch_size = 32
+
+valid_set = 'cuts/test_times_0'
diff --git a/config/time_simple_mlp_2_cswdtx.py b/config/time_simple_mlp_2_cswdtx.py
@@ -1,5 +1,7 @@
import model.time_simple_mlp as model
+from blocks.initialization import IsotropicGaussian, Constant
+
import data
n_begin_end_pts = 5 # how many points we consider at the beginning and end of the known trajectory
@@ -21,6 +23,14 @@ dim_input = n_begin_end_pts * 2 * 2 + sum(x for (_, _, x) in dim_embeddings)
dim_hidden = [500, 100]
dim_output = 1
+embed_weights_init = IsotropicGaussian(0.001)
+mlp_weights_init = IsotropicGaussian(0.01)
+mlp_biases_init = Constant(0.001)
+
+exp_base = 1.5
+
learning_rate = 0.00001
momentum = 0.99
batch_size = 32
+
+valid_set = 'cuts/test_times_0'
diff --git a/data/transformers.py b/data/transformers.py
@@ -34,7 +34,7 @@ class Select(Transformer):
class TaxiGenerateSplits(Transformer):
def __init__(self, data_stream, max_splits=-1):
super(TaxiGenerateSplits, self).__init__(data_stream)
- self.sources = data_stream.sources + ('destination_latitude', 'destination_longitude', 'time')
+ self.sources = data_stream.sources + ('destination_latitude', 'destination_longitude', 'travel_time')
self.max_splits = max_splits
self.data = None
self.splits = []
diff --git a/model/dest_simple_mlp.py b/model/dest_simple_mlp.py
@@ -1,8 +1,6 @@
from blocks.bricks import MLP, Rectifier, Linear, Sigmoid, Identity
from blocks.bricks.lookup import LookupTable
-from blocks.initialization import IsotropicGaussian, Constant
-
from theano import tensor
import data
@@ -58,9 +56,9 @@ class Model(object):
# Initialization
for tbl in embed_tables:
- tbl.weights_init = IsotropicGaussian(0.001)
- mlp.weights_init = IsotropicGaussian(0.01)
- mlp.biases_init = Constant(0.001)
+ tbl.weights_init = config.embed_weights_init
+ mlp.weights_init = config.mlp_weights_init
+ mlp.biases_init = config.mlp_biases_init
for tbl in embed_tables:
tbl.initialize()
diff --git a/model/dest_simple_mlp_tgtcls.py b/model/dest_simple_mlp_tgtcls.py
@@ -6,8 +6,6 @@ from theano import tensor
from blocks.bricks import MLP, Rectifier, Linear, Sigmoid, Identity, Softmax
from blocks.bricks.lookup import LookupTable
-from blocks.initialization import IsotropicGaussian, Constant
-
import data
import error
@@ -60,9 +58,9 @@ class Model(object):
# Initialization
for tbl in embed_tables:
- tbl.weights_init = IsotropicGaussian(0.001)
- mlp.weights_init = IsotropicGaussian(0.01)
- mlp.biases_init = Constant(0.001)
+ tbl.weights_init = config.embed_weights_init
+ mlp.weights_init = config.mlp_weights_init
+ mlp.biases_init = config.mlp_biases_init
for tbl in embed_tables:
tbl.initialize()
diff --git a/model/dest_simple_mlp_tgtcls_alexandre.py b/model/dest_simple_mlp_tgtcls_alexandre.py
@@ -1,75 +0,0 @@
-import numpy
-
-import theano
-from theano import tensor
-
-from blocks.bricks import MLP, Rectifier, Linear, Sigmoid, Identity, Softmax
-from blocks.bricks.lookup import LookupTable
-
-from blocks.initialization import IsotropicGaussian, Constant
-
-import data
-import error
-
-class Model(object):
- def __init__(self, config):
- # The input and the targets
- x_firstk_latitude = (tensor.matrix('first_k_latitude') - data.train_gps_mean[0]) / data.train_gps_std[0]
- x_firstk_longitude = (tensor.matrix('first_k_longitude') - data.train_gps_mean[1]) / data.train_gps_std[1]
-
- x_lastk_latitude = (tensor.matrix('last_k_latitude') - data.train_gps_mean[0]) / data.train_gps_std[0]
- x_lastk_longitude = (tensor.matrix('last_k_longitude') - data.train_gps_mean[1]) / data.train_gps_std[1]
-
- input_list = [x_firstk_latitude, x_firstk_longitude, x_lastk_latitude, x_lastk_longitude]
- embed_tables = []
-
- self.require_inputs = ['first_k_latitude', 'first_k_longitude', 'last_k_latitude', 'last_k_longitude']
-
- for (varname, num, dim) in config.dim_embeddings:
- self.require_inputs.append(varname)
- vardata = tensor.lvector(varname)
- tbl = LookupTable(length=num, dim=dim, name='%s_lookup'%varname)
- embed_tables.append(tbl)
- input_list.append(tbl.apply(vardata))
-
- y = tensor.concatenate((tensor.vector('destination_latitude')[:, None],
- tensor.vector('destination_longitude')[:, None]), axis=1)
-
- # Define the model
- mlp = MLP(activations=[Rectifier() for _ in config.dim_hidden] + [Softmax()],
- dims=[config.dim_input] + config.dim_hidden + [config.dim_output])
- classes = theano.shared(numpy.array(config.tgtcls, dtype=theano.config.floatX), name='classes')
-
- # Create the Theano variables
- inputs = tensor.concatenate(input_list, axis=1)
-
- # inputs = theano.printing.Print("inputs")(inputs)
- cls_probas = mlp.apply(inputs)
- outputs = tensor.dot(cls_probas, classes)
-
- # outputs = theano.printing.Print("outputs")(outputs)
- # y = theano.printing.Print("y")(y)
-
- outputs.name = 'outputs'
-
- # Calculate the cost
- cost = error.erdist(outputs, y).mean()
- cost.name = 'cost'
- hcost = error.hdist(outputs, y).mean()
- hcost.name = 'hcost'
-
- # Initialization
- for tbl in embed_tables:
- tbl.weights_init = IsotropicGaussian(0.01)
- mlp.weights_init = IsotropicGaussian(0.1)
- mlp.biases_init = Constant(0.01)
-
- for tbl in embed_tables:
- tbl.initialize()
- mlp.initialize()
-
- self.cost = cost
- self.monitor = [cost, hcost]
- self.outputs = outputs
- self.pred_vars = ['destination_latitude', 'destination_longitude']
-
diff --git a/model/time_simple_mlp.py b/model/time_simple_mlp.py
@@ -1,8 +1,6 @@
from blocks.bricks import MLP, Rectifier, Linear, Sigmoid, Identity
from blocks.bricks.lookup import LookupTable
-from blocks.initialization import IsotropicGaussian, Constant
-
from theano import tensor
import data
@@ -11,11 +9,11 @@ import error
class Model(object):
def __init__(self, config):
# The input and the targets
- x_firstk_latitude = (tensor.matrix('first_k_latitude') - data.porto_center[0]) / data.data_std[0]
- x_firstk_longitude = (tensor.matrix('first_k_longitude') - data.porto_center[1]) / data.data_std[1]
+ x_firstk_latitude = (tensor.matrix('first_k_latitude') - data.train_gps_mean[0]) / data.train_gps_std[0]
+ x_firstk_longitude = (tensor.matrix('first_k_longitude') - data.train_gps_mean[1]) / data.train_gps_std[1]
- x_lastk_latitude = (tensor.matrix('last_k_latitude') - data.porto_center[0]) / data.data_std[0]
- x_lastk_longitude = (tensor.matrix('last_k_longitude') - data.porto_center[1]) / data.data_std[1]
+ x_lastk_latitude = (tensor.matrix('last_k_latitude') - data.train_gps_mean[0]) / data.train_gps_std[0]
+ x_lastk_longitude = (tensor.matrix('last_k_longitude') - data.train_gps_mean[1]) / data.train_gps_std[1]
input_list = [x_firstk_latitude, x_firstk_longitude, x_lastk_latitude, x_lastk_longitude]
embed_tables = []
@@ -29,7 +27,7 @@ class Model(object):
embed_tables.append(tbl)
input_list.append(tbl.apply(vardata))
- y = tensor.lvector('time')
+ y = tensor.lvector('travel_time')
# Define the model
mlp = MLP(activations=[Rectifier() for _ in config.dim_hidden] + [Identity()],
@@ -38,7 +36,7 @@ class Model(object):
# Create the Theano variables
inputs = tensor.concatenate(input_list, axis=1)
# inputs = theano.printing.Print("inputs")(inputs)
- outputs = tensor.exp(mlp.apply(inputs) + 2)
+ outputs = config.exp_base ** mlp.apply(inputs)
# outputs = theano.printing.Print("outputs")(outputs)
# y = theano.printing.Print("y")(y)
@@ -51,9 +49,9 @@ class Model(object):
# Initialization
for tbl in embed_tables:
- tbl.weights_init = IsotropicGaussian(0.001)
- mlp.weights_init = IsotropicGaussian(0.01)
- mlp.biases_init = Constant(0.001)
+ tbl.weights_init = config.embed_weights_init
+ mlp.weights_init = config.mlp_weights_init
+ mlp.biases_init = config.mlp_biases_init
for tbl in embed_tables:
tbl.initialize()
@@ -62,4 +60,4 @@ class Model(object):
self.cost = cost
self.monitor = [cost]
self.outputs = outputs
- self.pred_vars = ['time']
+ self.pred_vars = ['travel_time']