memory_network_mlp_2.py (1601B)
1 from blocks.initialization import IsotropicGaussian, Constant 2 3 from blocks.bricks import Tanh 4 5 import data 6 from model.memory_network_mlp import Model, Stream 7 8 n_begin_end_pts = 5 9 10 dim_embeddings = [ 11 ('origin_call', data.origin_call_train_size, 10), 12 ('origin_stand', data.stands_size, 10), 13 ('week_of_year', 52, 10), 14 ('day_of_week', 7, 10), 15 ('qhour_of_day', 24 * 4, 10), 16 ('day_type', 3, 10), 17 ] 18 19 embed_weights_init = IsotropicGaussian(0.001) 20 21 class MLPConfig(object): 22 __slots__ = ('dim_input', 'dim_hidden', 'dim_output', 'weights_init', 'biases_init', 'embed_weights_init', 'dim_embeddings') 23 24 prefix_encoder = MLPConfig() 25 prefix_encoder.dim_input = n_begin_end_pts * 2 * 2 + sum(x for (_, _, x) in dim_embeddings) 26 prefix_encoder.dim_hidden = [100, 100] 27 prefix_encoder.weights_init = IsotropicGaussian(0.01) 28 prefix_encoder.biases_init = Constant(0.001) 29 prefix_encoder.embed_weights_init = embed_weights_init 30 prefix_encoder.dim_embeddings = dim_embeddings 31 32 candidate_encoder = MLPConfig() 33 candidate_encoder.dim_input = n_begin_end_pts * 2 * 2 + sum(x for (_, _, x) in dim_embeddings) 34 candidate_encoder.dim_hidden = [100, 100] 35 candidate_encoder.weights_init = IsotropicGaussian(0.01) 36 candidate_encoder.biases_init = Constant(0.001) 37 candidate_encoder.embed_weights_init = embed_weights_init 38 candidate_encoder.dim_embeddings = dim_embeddings 39 40 representation_size = 100 41 representation_activation = Tanh 42 43 normalize_representation = True 44 45 46 batch_size = 1000 47 # batch_sort_size = 20 48 49 max_splits = 100 50 51 train_candidate_size = 5000 52 valid_candidate_size = 5000 53 test_candidate_size = 5000