Python keras.models() Examples

The following are 30 code examples for showing how to use keras.models(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module keras , or try the search function .

Example 1
Project: models   Author: kipoi   File: prepare_model_yaml.py    License: MIT License 6 votes vote down vote up
def make_model_yaml(template_yaml, model_json, output_yaml_path):
    #
    with open(template_yaml, 'r') as f:
        model_yaml = yaml.load(f)
    #
    # get the model config:
    json_file = open(model_json, 'r')
    loaded_model_json = json_file.read()
    json_file.close()
    loaded_model = keras.models.model_from_json(loaded_model_json)
    #
    model_yaml["schema"]["targets"] = []
    for oname, oshape in zip(loaded_model.output_names, loaded_model.output_shape):
        append_el ={"name":oname , "shape":str(oshape)#replace("None,", "")
        , "doc":"Methylation probability for %s"%oname}
        model_yaml["schema"]["targets"].append(append_el)
    #
    with open(output_yaml_path, 'w') as f:
        yaml.dump(model_yaml, f, default_flow_style=False) 
Example 2
Project: models   Author: kipoi   File: prepare_model_yaml.py    License: MIT License 6 votes vote down vote up
def make_secondary_dl_yaml(template_yaml, model_json, output_yaml_path):
    with open(template_yaml, 'r') as f:
        model_yaml = yaml.load(f)
    #
    # get the model config:
    json_file = open(model_json, 'r')
    loaded_model_json = json_file.read()
    json_file.close()
    loaded_model = keras.models.model_from_json(loaded_model_json)
    #
    model_yaml["output_schema"]["targets"] = []
    for oname, oshape in zip(loaded_model.output_names, loaded_model.output_shape):
        append_el ={"name":oname , "shape":str(oshape)#replace("None,", "")
        , "doc":"Methylation probability for %s"%oname}
        model_yaml["output_schema"]["targets"].append(append_el)
    #
    with open(output_yaml_path, 'w') as f:
        yaml.dump(model_yaml, f, default_flow_style=False) 
Example 3
Project: Deep_Learning_Weather_Forecasting   Author: BruceBinBoxing   File: seq2seq_class.py    License: Apache License 2.0 6 votes vote down vote up
def __init__(self, model_save_path='../models', 
              model_structure_name='seq2seq_model_demo', 
              model_weights_name='seq2seq_model_demo', 
              model_name=None):
        super().__init__()

        self.model_save_path = model_save_path
        self.model_structure_name=model_structure_name + self.model_name_format_str +'.json'
        self.model_weights_name=model_weights_name + self.model_name_format_str +'.h5'
        print('model_structure_name:', self.model_structure_name)
        print('model_weights_name:', self.model_weights_name)

        self.pred_result = None # Predicted mean value
        self.pred_var_result = None # Predicted variance value
        self.current_mean_val_loss = None
        self.EARLY_STOP=False
        self.val_loss_list=[]
        self.train_loss_list=[]
        self.pred_var_result = [] 
Example 4
Project: residual_block_keras   Author: keunwoochoi   File: example.py    License: GNU General Public License v3.0 6 votes vote down vote up
def get_residual_model(is_mnist=True, img_channels=1, img_rows=28, img_cols=28):
    model = keras.models.Sequential()
    first_layer_channel = 128
    if is_mnist: # size to be changed to 32,32
        model.add(ZeroPadding2D((2,2), input_shape=(img_channels, img_rows, img_cols))) # resize (28,28)-->(32,32)
        # the first conv 
        model.add(Convolution2D(first_layer_channel, 3, 3, border_mode='same'))
    else:
        model.add(Convolution2D(first_layer_channel, 3, 3, border_mode='same', input_shape=(img_channels, img_rows, img_cols)))

    model.add(Activation('relu'))
    # [residual-based Conv layers]
    residual_blocks = design_for_residual_blocks(num_channel_input=first_layer_channel)
    model.add(residual_blocks)
    model.add(BatchNormalization(axis=1))
    model.add(Activation('relu'))
    # [Classifier]    
    model.add(Flatten())
    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))
    # [END]
    return model 
Example 5
Project: dataiku-contrib   Author: dataiku   File: model.py    License: Apache License 2.0 6 votes vote down vote up
def compute_backbone_shapes(config, image_shape):
    """Computes the width and height of each stage of the backbone network.
    Returns:
        [N, (height, width)]. Where N is the number of stages
    """
    if callable(config.BACKBONE):
        return config.COMPUTE_BACKBONE_SHAPE(image_shape)

    # Currently supports ResNet only
    assert config.BACKBONE in ["resnet50", "resnet101"]
    return np.array(
        [[int(math.ceil(image_shape[0] / stride)),
            int(math.ceil(image_shape[1] / stride))]
            for stride in config.BACKBONE_STRIDES])


############################################################
#  Resnet Graph
############################################################

# Code adopted from:
# https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py 
Example 6
Project: typhon   Author: atmtools   File: qrnn.py    License: MIT License 6 votes vote down vote up
def predict(self, x):
        r"""
        Predict quantiles of the conditional distribution P(y|x).

        Forward propagates the inputs in `x` through the network to
        obtain the predicted quantiles `y`.

        Arguments:

            x(np.array): Array of shape `(n, m)` containing `n` m-dimensional inputs
                         for which to predict the conditional quantiles.

        Returns:

             Array of shape `(n, k)` with the columns corresponding to the k
             quantiles of the network.

        """
        predictions = np.stack(
            [m.predict((x - self.x_mean) / self.x_sigma) for m in self.models])
        return np.mean(predictions, axis=0) 
Example 7
Project: AIX360   Author: IBM   File: test_shap.py    License: Apache License 2.0 6 votes vote down vote up
def test_ShapGradientExplainer(self):

    #     model = VGG16(weights='imagenet', include_top=True)
    #     X, y = shap.datasets.imagenet50()
    #     to_explain = X[[39, 41]]
    #
    #     url = "https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json"
    #     fname = shap.datasets.cache(url)
    #     with open(fname) as f:
    #         class_names = json.load(f)
    #
    #     def map2layer(x, layer):
    #         feed_dict = dict(zip([model.layers[0].input], [preprocess_input(x.copy())]))
    #         return K.get_session().run(model.layers[layer].input, feed_dict)
    #
    #     e = GradientExplainer((model.layers[7].input, model.layers[-1].output),
    #                           map2layer(preprocess_input(X.copy()), 7))
    #     shap_values, indexes = e.explain_instance(map2layer(to_explain, 7), ranked_outputs=2)
    #
          print("Skipped Shap GradientExplainer") 
Example 8
Project: PanopticSegmentation   Author: dmechea   File: model.py    License: MIT License 6 votes vote down vote up
def compute_backbone_shapes(config, image_shape):
    """Computes the width and height of each stage of the backbone network.

    Returns:
        [N, (height, width)]. Where N is the number of stages
    """
    if callable(config.BACKBONE):
        return config.COMPUTE_BACKBONE_SHAPE(image_shape)

    # Currently supports ResNet only
    assert config.BACKBONE in ["resnet50", "resnet101"]
    return np.array(
        [[int(math.ceil(image_shape[0] / stride)),
            int(math.ceil(image_shape[1] / stride))]
            for stride in config.BACKBONE_STRIDES])


############################################################
#  Resnet Graph
############################################################

# Code adopted from:
# https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py 
Example 9
Project: rasa_wechat   Author: Rowl1ng   File: keras_policy.py    License: Apache License 2.0 6 votes vote down vote up
def _build_model(self, num_features, num_actions, max_history_len):
        """Build a keras model and return a compiled model.

        :param max_history_len: The maximum number of historical
                                turns used to decide on next action
        """
        from keras.layers import LSTM, Activation, Masking, Dense
        from keras.models import Sequential

        n_hidden = 32  # Neural Net and training params
        batch_shape = (None, max_history_len, num_features)
        # Build Model
        model = Sequential()
        model.add(Masking(-1, batch_input_shape=batch_shape))
        model.add(LSTM(n_hidden, batch_input_shape=batch_shape))
        model.add(Dense(input_dim=n_hidden, units=num_actions))
        model.add(Activation('softmax'))

        model.compile(loss='categorical_crossentropy',
                      optimizer='rmsprop',
                      metrics=['accuracy'])

        logger.debug(model.summary())
        return model 
Example 10
Project: elmo-bilstm-cnn-crf   Author: UKPLab   File: ELMoBiLSTM.py    License: Apache License 2.0 6 votes vote down vote up
def tagSentences(self, sentences):
        # Pad characters
        if 'characters' in self.params['featureNames']:
            self.padCharacters(sentences)

        labels = {}
        for modelName, model in self.models.items():
            paddedPredLabels = self.predictLabels(model, sentences)
            predLabels = []
            for idx in range(len(sentences)):
                unpaddedPredLabels = []
                for tokenIdx in range(len(sentences[idx]['tokens'])):
                    if sentences[idx]['tokens'][tokenIdx] != 0:  # Skip padding tokens
                        unpaddedPredLabels.append(paddedPredLabels[idx][tokenIdx])

                predLabels.append(unpaddedPredLabels)

            idx2Label = self.idx2Labels[modelName]
            labels[modelName] = [[idx2Label[tag] for tag in tagSentence] for tagSentence in predLabels]

        return labels 
Example 11
Project: elmo-bilstm-cnn-crf   Author: UKPLab   File: ELMoBiLSTM.py    License: Apache License 2.0 6 votes vote down vote up
def computeF1(self, modelName, sentences):
        labelKey = self.labelKeys[modelName]
        model = self.models[modelName]
        idx2Label = self.idx2Labels[modelName]
        
        correctLabels = [sentences[idx][labelKey] for idx in range(len(sentences))]
        predLabels = self.predictLabels(model, sentences)

        labelKey = self.labelKeys[modelName]
        encodingScheme = labelKey[labelKey.index('_')+1:]
        
        pre, rec, f1 = BIOF1Validation.compute_f1(predLabels, correctLabels, idx2Label, 'O', encodingScheme)
        pre_b, rec_b, f1_b = BIOF1Validation.compute_f1(predLabels, correctLabels, idx2Label, 'B', encodingScheme)
        
        if f1_b > f1:
            logging.debug("Setting wrong tags to B- improves from %.4f to %.4f" % (f1, f1_b))
            pre, rec, f1 = pre_b, rec_b, f1_b
        
        return pre, rec, f1 
Example 12
Project: elmo-bilstm-cnn-crf   Author: UKPLab   File: ELMoBiLSTM.py    License: Apache License 2.0 6 votes vote down vote up
def saveModel(self, modelName, epoch, dev_score, test_score):
        import json
        import h5py

        if self.modelSavePath == None:
            raise ValueError('modelSavePath not specified.')

        savePath = self.modelSavePath.replace("[DevScore]", "%.4f" % dev_score).replace("[TestScore]", "%.4f" % test_score).replace("[Epoch]", str(epoch+1)).replace("[ModelName]", modelName)

        directory = os.path.dirname(savePath)
        if not os.path.exists(directory):
            os.makedirs(directory)

        if os.path.isfile(savePath):
            logging.info("Model "+savePath+" already exists. Model will be overwritten")

        self.models[modelName].save(savePath, True)

        with h5py.File(savePath, 'a') as h5file:
            h5file.attrs['mappings'] = json.dumps(self.mappings)
            h5file.attrs['params'] = json.dumps(self.params)
            h5file.attrs['modelName'] = modelName
            h5file.attrs['labelKey'] = self.datasets[modelName]['label'] 
Example 13
Project: imgclsmob   Author: osmr   File: shufflenetv2b.py    License: MIT License 6 votes vote down vote up
def shufflenetv2b_wd2(**kwargs):
    """
    ShuffleNetV2(b) 0.5x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
    https://arxiv.org/abs/1807.11164.

    Parameters:
    ----------
    pretrained : bool, default False
        Whether to load the pretrained weights for model.
    root : str, default '~/.keras/models'
        Location for keeping the model parameters.

    Returns
    -------
    functor
        Functor for model graph creation with extra fields.
    """
    return get_shufflenetv2b(
        width_scale=(12.0 / 29.0),
        model_name="shufflenetv2b_wd2",
        **kwargs) 
Example 14
Project: imgclsmob   Author: osmr   File: shufflenetv2b.py    License: MIT License 6 votes vote down vote up
def shufflenetv2b_w1(**kwargs):
    """
    ShuffleNetV2(b) 1x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
    https://arxiv.org/abs/1807.11164.

    Parameters:
    ----------
    pretrained : bool, default False
        Whether to load the pretrained weights for model.
    root : str, default '~/.keras/models'
        Location for keeping the model parameters.

    Returns
    -------
    functor
        Functor for model graph creation with extra fields.
    """
    return get_shufflenetv2b(
        width_scale=1.0,
        model_name="shufflenetv2b_w1",
        **kwargs) 
Example 15
Project: imgclsmob   Author: osmr   File: shufflenetv2b.py    License: MIT License 6 votes vote down vote up
def shufflenetv2b_w3d2(**kwargs):
    """
    ShuffleNetV2(b) 1.5x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
    https://arxiv.org/abs/1807.11164.

    Parameters:
    ----------
    pretrained : bool, default False
        Whether to load the pretrained weights for model.
    root : str, default '~/.keras/models'
        Location for keeping the model parameters.

    Returns
    -------
    functor
        Functor for model graph creation with extra fields.
    """
    return get_shufflenetv2b(
        width_scale=(44.0 / 29.0),
        model_name="shufflenetv2b_w3d2",
        **kwargs) 
Example 16
Project: imgclsmob   Author: osmr   File: shufflenetv2b.py    License: MIT License 6 votes vote down vote up
def shufflenetv2b_w2(**kwargs):
    """
    ShuffleNetV2(b) 2x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
    https://arxiv.org/abs/1807.11164.

    Parameters:
    ----------
    pretrained : bool, default False
        Whether to load the pretrained weights for model.
    root : str, default '~/.keras/models'
        Location for keeping the model parameters.

    Returns
    -------
    functor
        Functor for model graph creation with extra fields.
    """
    return get_shufflenetv2b(
        width_scale=(61.0 / 29.0),
        model_name="shufflenetv2b_w2",
        **kwargs) 
Example 17
Project: keras_mixnets   Author: titu1994   File: custom_objects.py    License: MIT License 5 votes vote down vote up
def __call__(self, shape, dtype=None):
        dtype = dtype or K.floatx()

        kernel_height, kernel_width, _, out_filters = shape
        fan_out = int(kernel_height * kernel_width * out_filters)
        return tf.random_normal(
            shape, mean=0.0, stddev=np.sqrt(2.0 / fan_out), dtype=dtype)


# Obtained from https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py 
Example 18
Project: keras_mixnets   Author: titu1994   File: custom_objects.py    License: MIT License 5 votes vote down vote up
def __call__(self, shape, dtype=None):
        dtype = dtype or K.floatx()

        init_range = 1.0 / np.sqrt(shape[1])
        return tf.random_uniform(shape, -init_range, init_range, dtype=dtype)


# Obtained from https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py 
Example 19
Project: keras_mixnets   Author: titu1994   File: custom_objects.py    License: MIT License 5 votes vote down vote up
def call(self, inputs, training=None):
        return tf.nn.swish(inputs)


# Obtained from https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py 
Example 20
Project: neural-fingerprinting   Author: StephanZheng   File: utils_keras.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def fprop(self, x):
        """
        Exposes all the layers of the model returned by get_layer_names.
        :param x: A symbolic representation of the network input
        :return: A dictionary mapping layer names to the symbolic
                 representation of their output.
        """
        from keras.models import Model as KerasModel

        if self.keras_model is None:
            # Get the input layer
            new_input = self.model.get_input_at(0)

            # Make a new model that returns each of the layers as output
            out_layers = [x_layer.output for x_layer in self.model.layers]
            self.keras_model = KerasModel(new_input, out_layers)

        # and get the outputs for that model on the input x
        outputs = self.keras_model(x)

        # Keras only returns a list for outputs of length >= 1, if the model
        # is only one layer, wrap a list
        if len(self.model.layers) == 1:
            outputs = [outputs]

        # compute the dict to return
        fprop_dict = dict(zip(self.get_layer_names(), outputs))

        return fprop_dict 
Example 21
Project: Deep_Learning_Weather_Forecasting   Author: BruceBinBoxing   File: competition_model_class.py    License: Apache License 2.0 5 votes vote down vote up
def __init__(self, id_embd, time_embd, 
        num_input_features, num_output_features, num_decoder_features,
               input_sequence_length, target_sequence_length,
              num_steps_to_predict, regulariser = None,
              lr=0.001, decay=0, loss = "mse",
              layers=[35, 35], model_save_path='../models', 
              model_structure_name='seq2seq_model.json', model_weights_name='seq2seq_model_weights.h5'):
        
        super().__init__(num_input_features, num_output_features, num_decoder_features,
               input_sequence_length, target_sequence_length,
              num_steps_to_predict, regulariser = None,
              lr=lr, decay=decay, loss = loss,
              layers=layers)

        self.id_embd = id_embd
        self.time_embd = time_embd
        self.val_loss_list=[]
        self.train_loss_list=[]
        self.current_mean_val_loss = None
        self.early_stop_limit = 10 # with the unit of Iteration Display
        self.EARLY_STOP=False
        self.pred_var_result = []

        self.pi_dic={0.95:1.96, 0.9:1.645, 0.8:1.28, 0.68:1.}
        self.target_list=['t2m','rh2m','w10m']
        self.obs_range_dic={'t2m':[-30,42], # Official value: [-20,42]
                         'rh2m':[0.0,100.0],
                         'w10m':[0.0, 30.0]}
        self.obs_and_output_feature_index_map = {'t2m':0,'rh2m':1,'w10m':2}
        self.ruitu_feature_index_map = {'t2m':1,'rh2m':3,'w10m':4}
        self.model_save_path = model_save_path
        self.model_structure_name=model_structure_name
        self.model_weights_name=model_weights_name 
Example 22
Project: dataiku-contrib   Author: dataiku   File: model.py    License: Apache License 2.0 5 votes vote down vote up
def get_imagenet_weights(self):
        """Downloads ImageNet trained weights from Keras.
        Returns path to weights file.
        """
        from keras.utils.data_utils import get_file
        TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/'\
                                 'releases/download/v0.2/'\
                                 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
        weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
                                TF_WEIGHTS_PATH_NO_TOP,
                                cache_subdir='models',
                                md5_hash='a268eb855778b3df3c7506639542a6af')
        return weights_path 
Example 23
Project: typhon   Author: atmtools   File: qrnn.py    License: MIT License 5 votes vote down vote up
def save(self, path):
        r"""
        Store the QRNN model in a file.

        This stores the model to a file using pickle for all
        attributes that support pickling. The Keras model
        is handled separately, since it can not be pickled.

        .. note:: In addition to the model file with the given filename,
                  additional files suffixed with :code:`_model_i` will be
                  created for each neural network this model consists of.

        Arguments:

            path(str): The path including filename indicating where to
                       store the model.

        """

        f = open(path, "wb")
        filename = os.path.basename(path)
        name = os.path.splitext(filename)[0]
        dirname = os.path.dirname(path)

        self.model_files = []
        for i, m in enumerate(self.models):
            self.model_files += [name + "_model_" + str(i)]
            m.save(os.path.join(dirname, self.model_files[i]))
        pickle.dump(self, f)
        f.close() 
Example 24
Project: typhon   Author: atmtools   File: qrnn.py    License: MIT License 5 votes vote down vote up
def load(path):
        r"""
        Load a model from a file.

        This loads a model that has been stored using the `save` method.

        Arguments:

            path(str): The path from which to read the model.

        Return:

            The loaded QRNN object.
        """
        filename = os.path.basename(path)
        dirname = os.path.dirname(path)

        f = open(path, "rb")
        qrnn = pickle.load(f)
        qrnn.models = []
        for mf in qrnn.model_files:
            mf = os.path.basename(mf)
            try:
                mp = os.path.join(dirname, os.path.basename(mf))
                qrnn.models += [keras.models.load_model(mp, qrnn.custom_objects)]
            except:
                raise Exception("Error loading the neural network models. " \
                                "Please make sure all files created during the"\
                                " saving are in this folder.")
        f.close()
        return qrnn 
Example 25
Project: typhon   Author: atmtools   File: qrnn.py    License: MIT License 5 votes vote down vote up
def __getstate__(self):
        dct = copy.copy(self.__dict__)
        dct.pop("models")
        return dct 
Example 26
Project: ai-platform   Author: produvia   File: main.py    License: MIT License 5 votes vote down vote up
def load_pretrained_model():
    weathernet = keras.models.load_model(model_filepath)
    return weathernet
# Predict 
Example 27
Project: PanopticSegmentation   Author: dmechea   File: model.py    License: MIT License 5 votes vote down vote up
def get_imagenet_weights(self):
        """Downloads ImageNet trained weights from Keras.
        Returns path to weights file.
        """
        from keras.utils.data_utils import get_file
        TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/'\
                                 'releases/download/v0.2/'\
                                 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
        weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
                                TF_WEIGHTS_PATH_NO_TOP,
                                cache_subdir='models',
                                md5_hash='a268eb855778b3df3c7506639542a6af')
        return weights_path 
Example 28
Project: rasa_wechat   Author: Rowl1ng   File: keras_policy.py    License: Apache License 2.0 5 votes vote down vote up
def _load_model_arch(cls, path, meta):
        from keras.models import model_from_json

        arch_file = os.path.join(path, meta["arch"])
        if os.path.isfile(arch_file):
            with io.open(arch_file) as f:
                model = model_from_json(f.read())
            return model
        else:
            return None 
Example 29
Project: MovieTaster-Open   Author: lujiaying   File: keras_item2vec.py    License: MIT License 5 votes vote down vote up
def cbow_base_model(dict_size, emb_size=100, context_window_size=4):
    model = keras.models.Sequential()
    model.add(Embedding(dict_size, emb_size, 
        input_length=context_window_size,
        embeddings_initializer=keras.initializers.TruncatedNormal(mean=0.0, stddev=0.2),
        ))
    model.add(Lambda(lambda x: K.mean(x, axis=1), output_shape=(emb_size,)))
    model.add(Dense(dict_size))
    model.add(Activation('softmax')) # TODO: use nce

    sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(optimizer=sgd,
            loss='categorical_crossentropy',)
    return model 
Example 30
Project: MovieTaster-Open   Author: lujiaying   File: keras_item2vec.py    License: MIT License 5 votes vote down vote up
def train_cbow_base_model():
    min_word_freq = 5
    word_dict = process.get_movie_name_id_dict(min_word_freq=min_word_freq)
    dict_size = len(word_dict)
    emb_size = 100
    context_window_size = 4
    epochs = 20
    batch_size = 128

    model = cbow_base_model(dict_size, emb_size, context_window_size)
    for epoch_id in xrange(epochs):
        # train by batch
        batch_id = 0
        x_batch = []
        y_batch = []
        for movie_ids in process.shuffle(process.reader_creator(word_dict, ngram=context_window_size+1), 10000)():
            batch_id += 1
            if batch_id % (batch_size*50) == 0:
                # Print evaluate log
                score = model.evaluate(np.array(x_batch),
                    keras.utils.to_categorical(y_batch, num_classes=dict_size))
                logger.info('[epoch #%d] batch #%d, train loss:%s' % (epoch_id, batch_id, score))
            if batch_id % batch_size == 0:
                # Convert labels to categorical one-hot encoding
                model.train_on_batch(np.array(x_batch),
                        keras.utils.to_categorical(y_batch, num_classes=dict_size))
                x_batch = []
                y_batch = []
            x = np.array(movie_ids[:context_window_size])
            y = movie_ids[-1]
            x_batch.append(x)
            y_batch.append(y)
    logger.info('model train done')
    # store word embedding
    with open('./models/keras_0804_09_cbow', 'w') as fwrite:
        for idx, vec in enumerate(model.layers[0].get_weights()[0].tolist()):
            fwrite.write('%d %s\n' % (idx, ' '.join([str(_) for _ in vec])))