Python keras.backend.cast_to_floatx() Examples

The following are code examples for showing how to use keras.backend.cast_to_floatx(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: deep-models   Author: LaurentMazare   File: rhn.py    Apache License 2.0 6 votes vote down vote up
def get_constants(self, x):
    constants = []
    if 0 < self.dropout_U < 1:
      ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
      ones = K.tile(ones, (1, self.output_dim))
      B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
      constants.append(B_U)
    else:
      constants.append([K.cast_to_floatx(1.) for _ in range(3)])

    if 0 < self.dropout_W < 1:
      input_shape = self.input_spec[0].shape
      input_dim = input_shape[-1]
      ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
      ones = K.tile(ones, (1, input_dim))
      B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
      constants.append(B_W)
    else:
      constants.append([K.cast_to_floatx(1.) for _ in range(3)])
    return constants 
Example 2
Project: musaic   Author: al165   File: MetaEmbedding.py    GNU General Public License v3.0 6 votes vote down vote up
def __init__(self, meta_len, embed_size, compile_now=False):
        embed_V=K.cast_to_floatx(embed_size)
        
        preprocess = Dense(embed_size, activation="relu")
        categorise = Dense(embed_size, activation="softmax", name="vales_embedding",
                           activity_regularizer=EntropyRegulariser(factor=0.01, 
                                                                   V=embed_V))

        meta = Input(shape=(meta_len, ))        
        preproced = preprocess(meta)
        embedded = categorise(preproced)    
                
        super().__init__(inputs=meta, outputs=embedded, name="MetaEmbedder")
        
        self.meta_len = meta_len
        self.embed_size = embed_size
        
        self.params = {"meta_len": meta_len,
                           "embed_size": embed_size}
        
        if compile_now:
            self.compile_default() 
Example 3
Project: keras_bn_library   Author: bnsnapper   File: rnnrbm.py    MIT License 6 votes vote down vote up
def get_constants(self, x):
		constants = []
		if 0 < self.dropout_U < 1:
			ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
			ones = K.tile(ones, (1, self.hidden_recurrent_dim))
			B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones)
			constants.append(B_U)
		else:
			constants.append(K.cast_to_floatx(1.))
        
		if self.consume_less == 'cpu' and 0 < self.dropout_W < 1:
			input_shape = self.input_spec[0].shape
			input_dim = input_shape[-1]
			ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
			ones = K.tile(ones, (1, input_dim))
			B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones)
			constants.append(B_W)
		else:
			constants.append(K.cast_to_floatx(1.))

		return constants 
Example 4
Project: keras_bn_library   Author: bnsnapper   File: recurrent.py    MIT License 6 votes vote down vote up
def get_constants(self, x):
		constants = []
		if 0 < self.dropout_U < 1:
			ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
			ones = K.tile(ones, (1, self.input_dim))
			B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(4)]
			constants.append(B_U)
		else:
			constants.append([K.cast_to_floatx(1.) for _ in range(4)])

		if 0 < self.dropout_W < 1:
			input_shape = K.int_shape(x)
			input_dim = input_shape[-1]
			ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
			ones = K.tile(ones, (1, int(input_dim)))
			B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(4)]
			constants.append(B_W)
		else:
			constants.append([K.cast_to_floatx(1.) for _ in range(4)])
		return constants 
Example 5
Project: yoctol-keras-layer-zoo   Author: Yoctol   File: rnn_cell.py    GNU General Public License v3.0 6 votes vote down vote up
def get_constants(self, inputs, training=None):
        constants = self.recurrent_layer.get_constants(
            inputs=inputs,
            training=training
        )

        if 0 < self.dense_dropout < 1:
            ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.recurrent_layer.units))

            def dropped_inputs():
                return K.dropout(ones, self.dense_dropout)
            out_dp_mask = [K.in_train_phase(dropped_inputs,
                                            ones,
                                            training=training)]
            constants.append(out_dp_mask)
        else:
            constants.append([K.cast_to_floatx(1.)])

        return constants 
Example 6
Project: NTM-Keras   Author: SigmaQuan   File: lstm2ntm.py    MIT License 6 votes vote down vote up
def get_constants(self, x):
        constants = []
        if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.output_dim))
            B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(4)]
            constants.append(B_U)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])

        if 0 < self.dropout_W < 1:
            input_shape = self.input_spec[0].shape
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, int(input_dim)))
            B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(4)]
            constants.append(B_W)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])
        return constants 
Example 7
Project: EUNN-theano   Author: iguanaus   File: custom_layers.py    MIT License 6 votes vote down vote up
def get_constants(self, x):
        constants = []
        if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.output_dim))
            B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones)
            constants.append(B_U)
        else:
            constants.append(K.cast_to_floatx(1.))
        if self.consume_less == 'cpu' and 0 < self.dropout_W < 1:
            input_shape = self.input_spec[0].shape
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, input_dim))
            B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones)
            constants.append(B_W)
        else:
            constants.append(K.cast_to_floatx(1.))
        return constants 
Example 8
Project: research   Author: commaai   File: layers.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def get_constants(self, x):
      constants = []
      if 0 < self.dropout_U < 1:
          ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
          ones = K.concatenate([ones] * self.output_dim, 1)
          B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones)
          constants.append(B_U)
      else:
          constants.append(K.cast_to_floatx(1.))
      if self.consume_less == 'cpu' and 0 < self.dropout_W < 1:
          input_shape = self.input_spec[0].shape
          input_dim = input_shape[-1]
          ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
          ones = K.concatenate([ones] * input_dim, 1)
          B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones)
          constants.append(B_W)
      else:
          constants.append(K.cast_to_floatx(1.))
      return constants 
Example 9
Project: research   Author: commaai   File: layers.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def get_constants(self, x):
      constants = []
      if 0 < self.dropout_U < 1:
          ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
          ones = K.concatenate([ones] * self.output_dim, 1)
          B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones)
          constants.append(B_U)
      else:
          constants.append(K.cast_to_floatx(1.))
      if self.consume_less == 'cpu' and 0 < self.dropout_W < 1:
          input_shape = self.input_spec[0].shape
          input_dim = input_shape[-1]
          ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
          ones = K.concatenate([ones] * input_dim, 1)
          B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones)
          constants.append(B_W)
      else:
          constants.append(K.cast_to_floatx(1.))
      return constants 
Example 10
Project: DRNN-Keras   Author: jmsalash   File: AudioDataSet.py    MIT License 6 votes vote down vote up
def _populate_validation_data_hps(self):
        # Initialise X to a matrix of dimensions I tracks x J STFT frames x K STFT samples per frame
        print('Validation songs: %s' %len(self.input_file_val_mag_hps))
        self.X_val=np.zeros(shape=(len(self.input_file_val_mag_hps),self.maxframesong_hps,self.input_file_val_mag_hps[0].shape[1]))
    
        # Build the final X_val matrix    
        for i in range(len(self.input_file_val_mag_hps)):
            # JM: Normalise input using scaler
            tempx=self.myscaler_hps.transform(self.input_file_val_mag_hps[i])      
            for j in range(self.input_file_val_mag[i].shape[0]):
                for kx in range(self.input_file_val_mag_hps[i].shape[1]):
                    self.X_val[i,j,kx]=tempx[j,kx]     
        K.cast_to_floatx(self.X_val)
        print ('X validation shape: ' + str(self.X_val.shape))


   
    ###############################################################################
    # FUNCTION: create_dataset()
    # DESCRIPTION: Main function called to populate all training and validation data
    # PARAMETERS:
    # -  
    # RETURNS:
    # - 
    ############################################################################### 
Example 11
Project: 2019-TIFS-AnomalyNet   Author: limit-scu   File: sparse_lstm.py    MIT License 6 votes vote down vote up
def get_constants(self, x):
        constants = []
        if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.output_dim))
            B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(4)]
            constants.append(B_U)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])

        if 0 < self.dropout_W < 1:
            input_shape = self.input_spec[0].shape
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, input_dim))
            B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(4)]
            constants.append(B_W)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])
        return constants 
Example 12
Project: 2019-TIFS-AnomalyNet   Author: limit-scu   File: sparse_lstm.py    MIT License 6 votes vote down vote up
def get_constants(self, x):
        constants = []
        if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.output_dim))
            B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(4)]
            constants.append(B_U)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])

        if 0 < self.dropout_W < 1:
            input_shape = self.input_spec[0].shape
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, input_dim))
            B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(4)]
            constants.append(B_W)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])
        return constants 
Example 13
Project: 2019-TIFS-AnomalyNet   Author: limit-scu   File: sparse_lstm.py    MIT License 6 votes vote down vote up
def get_constants(self, x):
        constants = []
        if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.output_dim))
            B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(4)]
            constants.append(B_U)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])

        if 0 < self.dropout_W < 1:
            input_shape = self.input_spec[0].shape
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, input_dim))
            B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(4)]
            constants.append(B_W)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])
        return constants 
Example 14
Project: recurrent-attention-for-QA-SQUAD-based-on-keras   Author: wentaozhu   File: QnA.py    MIT License 6 votes vote down vote up
def get_constants(self, x):
        constants = []
        if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.output_dim))
            B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
            constants.append(B_U)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(3)])

        if 0 < self.dropout_W < 1:
            input_shape = K.int_shape(x)
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, int(input_dim)))
            B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
            constants.append(B_W)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(3)])
        return constants 
Example 15
Project: recurrent-attention-for-QA-SQUAD-based-on-keras   Author: wentaozhu   File: rnnlayer.py    MIT License 6 votes vote down vote up
def get_constants(self, inputs, training=None):
        constants = []
        '''if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.units))
            B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
            constants.append(B_U)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(3)])

        if 0 < self.dropout_W < 1:
            input_shape = K.int_shape(x)
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, int(input_dim)))
            B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
            constants.append(B_W)
        else:'''
        constants.append([K.cast_to_floatx(1.) for _ in range(3)])
        return constants 
Example 16
Project: recurrent-attention-for-QA-SQUAD-based-on-keras   Author: wentaozhu   File: rnnlayer.py    MIT License 6 votes vote down vote up
def get_constants(self, inputs, training=None):
        constants = []
        '''if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.units))
            B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
            constants.append(B_U)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(3)])

        if 0 < self.dropout_W < 1:
            input_shape = K.int_shape(x)
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, int(input_dim)))
            B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
            constants.append(B_W)
        else:'''
        constants.append([K.cast_to_floatx(1.) for _ in range(3)])
        return constants 
Example 17
Project: recurrent-attention-for-QA-SQUAD-based-on-keras   Author: wentaozhu   File: rnnlayer.py    MIT License 6 votes vote down vote up
def get_constants(self, inputs, training=None):
        constants = []
        '''if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.units))
            B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
            constants.append(B_U)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(3)])

        if 0 < self.dropout_W < 1:
            input_shape = K.int_shape(x)
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, int(input_dim)))
            B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
            constants.append(B_W)
        else:'''
        constants.append([K.cast_to_floatx(1.) for _ in range(3)])
        return constants 
Example 18
Project: ikelos   Author: braingineer   File: rtn.py    MIT License 6 votes vote down vote up
def get_constants(self, x):
        constants = []
        if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.concatenate([ones] * self.output_dim, 1)
            B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(4)]
            constants.append(B_U)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])

        if 0 < self.dropout_W < 1:
            input_shape = self.input_spec[0].shape
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.concatenate([ones] * input_dim, 1)
            B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(4)]
            constants.append(B_W)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])
        return constants 
Example 19
Project: ikelos   Author: braingineer   File: rtn.py    MIT License 6 votes vote down vote up
def get_constants(self, x):
        constants = []
        if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.concatenate([ones] * self.output_dim, 1)
            B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
            constants.append(B_U)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(3)])

        if 0 < self.dropout_W < 1:
            input_shape = self.input_spec[0].shape
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.concatenate([ones] * input_dim, 1)
            B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
            constants.append(B_W)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(3)])
        return constants 
Example 20
Project: KerasCog   Author: ABAtanasov   File: Networks.py    MIT License 6 votes vote down vote up
def get_constants(self, x):
        constants = []
        if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.output_dim))
            B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones)
            constants.append(B_U)
        else:
            constants.append(K.cast_to_floatx(1.0))
        if self.consume_less == 'cpu' and 0 < self.dropout_W < 1:
            input_shape = self.input_spec[0].shape
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, input_dim))
            B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones)
            constants.append(B_W)
        else:
            constants.append(K.cast_to_floatx(1.0))
        return constants 
Example 21
Project: smach_based_introspection_framework   Author: birlrobotics   File: layer_utils.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_constants(self, inputs, training=None):
        constants = []
        if self.implementation != 0 and 0 < self.dropout < 1:
            input_shape = K.int_shape(inputs)
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, int(input_dim)))

            def dropped_inputs():
                return K.dropout(ones, self.dropout)

            dp_mask = [K.in_train_phase(dropped_inputs,
                                        ones,
                                        training=training) for _ in range(4)]
            constants.append(dp_mask)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])

        if 0 < self.recurrent_dropout < 1:
            ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.units))

            def dropped_inputs():
                return K.dropout(ones, self.recurrent_dropout)
            rec_dp_mask = [K.in_train_phase(dropped_inputs,
                                            ones,
                                            training=training) for _ in range(4)]
            constants.append(rec_dp_mask)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])

        # append the input as well for use later
        constants.append(inputs)
        return constants 
Example 22
Project: Keras_MedicalImgAI   Author: taoyilee   File: dataset_utility.py    MIT License 5 votes vote down vote up
def targets(self, index=None, steps=None):
        if steps is None or steps == "auto":
            if index is None:
                return K.cast_to_floatx(self.batch["One_Hot_Labels"].tolist())
            else:
                return K.cast_to_floatx(self.batch["One_Hot_Labels"].iloc[[index]].tolist())
        else:
            return K.cast_to_floatx(self.batch["One_Hot_Labels"].iloc[:self.batch_size * steps].tolist()) 
Example 23
Project: lmtc-eurlex57k   Author: iliaschalkidis   File: label_driven_classification.py    Apache License 2.0 5 votes vote down vote up
def PretrainedEmbedding(self):

        inputs = Input(shape=(None,), dtype='int32')
        embeddings = KeyedVectors.load_word2vec_format(self.word_embedding_path, binary=False)
        word_embeddings_weights = K.cast_to_floatx(np.concatenate((np.zeros((1, embeddings.syn0.shape[-1]), dtype=np.float32), embeddings.syn0), axis=0))
        embeds = Embedding(len(word_embeddings_weights), word_embeddings_weights.shape[-1],
                           weights=[word_embeddings_weights], trainable=False)(inputs)

        return Model(inputs=inputs, outputs=embeds, name='embedding') 
Example 24
Project: keras_nade   Author: jgrnt   File: utils.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init__(self, min, max):
        self.min = K.cast_to_floatx(min)
        self.max = K.cast_to_floatx(max) 
Example 25
Project: Dropout_BBalpha   Author: YingzhenLi   File: BBalpha_dropout.py    MIT License 5 votes vote down vote up
def bbalpha_softmax_cross_entropy_with_mc_logits(alpha):
    alpha = K.cast_to_floatx(alpha)
    def loss(y_true, mc_logits):
        # log(p_ij), p_ij = softmax(logit_ij)
        #assert mc_logits.ndim == 3
        mc_log_softmax = mc_logits - K.max(mc_logits, axis=2, keepdims=True)
        mc_log_softmax = mc_log_softmax - K.log(K.sum(K.exp(mc_log_softmax), axis=2, keepdims=True))
        mc_ll = K.sum(y_true * mc_log_softmax, -1)  # N x K
        K_mc = mc_ll.get_shape().as_list()[1]	# only for tensorflow
        return - 1. / alpha * (logsumexp(alpha * mc_ll, 1) + K.log(1.0 / K_mc))
    return loss


###################################################################
# the model 
Example 26
Project: deep-mil-for-whole-mammogram-classification   Author: wentaozhu   File: customlayers.py    MIT License 5 votes vote down vote up
def __init__(self, l1=0., l2=0.,**kwargs):
        self.l1 = K.cast_to_floatx(l1)
        self.l2 = K.cast_to_floatx(l2)
        self.uses_learning_phase = True
        super(ActivityRegularizerOneDim, self).__init__(**kwargs)
        #self.layer = None 
Example 27
Project: deep-mil-for-whole-mammogram-classification   Author: wentaozhu   File: customlayers.py    MIT License 5 votes vote down vote up
def __init__(self, l1=0., l2=0.,**kwargs):
        self.l1 = K.cast_to_floatx(l1)
        self.l2 = K.cast_to_floatx(l2)
        self.uses_learning_phase = True
        super(ActivityRegularizerOneDim, self).__init__(**kwargs)
        #self.layer = None 
Example 28
Project: diktya   Author: BioroboticsLab   File: regularizers.py    Apache License 2.0 5 votes vote down vote up
def __init__(self, weight=0.):
        self.weight = K.cast_to_floatx(weight)
        self.uses_learning_phase = True 
Example 29
Project: diktya   Author: BioroboticsLab   File: core.py    Apache License 2.0 5 votes vote down vote up
def __init__(self, axis=1, normalize=True,
                 l1=0., l2=0., **kwargs):
        self.axis = axis
        self.normalize = normalize
        self.l1 = K.cast_to_floatx(l1)
        self.l2 = K.cast_to_floatx(l2)
        self.uses_learning_phase = normalize
        super(BatchLoss, self).__init__(**kwargs) 
Example 30
Project: NTM-Keras   Author: SigmaQuan   File: ntm.py    MIT License 5 votes vote down vote up
def get_constants(self, x):
        print("begin get_constants(self, x)")
        constants = []
        if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.controller_output_dim))
            B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(4)]
            constants.append(B_U)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])

        if 0 < self.dropout_W < 1:
            input_shape = self.input_spec[0].shape
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, int(input_dim)))
            B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(4)]
            constants.append(B_W)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])

        # if 0 < self.dropout_R < 1:
        #     input_shape = self.input_spec[0].shape
        #     input_dim = input_shape[-1]
        #     ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
        #     ones = K.tile(ones, (1, int(input_dim)))
        #     B_R = [K.in_train_phase(K.dropout(ones, self.dropout_R), ones) for _ in range(4)]
        #     constants.append(B_R)
        # else:
        #     constants.append([K.cast_to_floatx(1.) for _ in range(4)])

        print("end get_constants(self, x)")
        return constants 
Example 31
Project: E3Outlier   Author: demonzyj56   File: utils.py    MIT License 5 votes vote down vote up
def load_mnist():
    (X_train, y_train), (X_test, y_test) = mnist.load_data()
    X_train = normalize_minus1_1(cast_to_floatx(np.pad(X_train, ((0, 0), (2, 2), (2, 2)), 'constant')))
    X_train = np.expand_dims(X_train, axis=get_channels_axis())
    X_test = normalize_minus1_1(cast_to_floatx(np.pad(X_test, ((0, 0), (2, 2), (2, 2)), 'constant')))
    X_test = np.expand_dims(X_test, axis=get_channels_axis())
    return (X_train, y_train), (X_test, y_test) 
Example 32
Project: E3Outlier   Author: demonzyj56   File: utils.py    MIT License 5 votes vote down vote up
def load_cifar10():
    (X_train, y_train), (X_test, y_test) = cifar10.load_data()
    X_train = normalize_minus1_1(cast_to_floatx(X_train))
    X_test = normalize_minus1_1(cast_to_floatx(X_test))
    return (X_train, y_train), (X_test, y_test) 
Example 33
Project: E3Outlier   Author: demonzyj56   File: utils.py    MIT License 5 votes vote down vote up
def load_cifar100(label_mode='coarse'):
    (X_train, y_train), (X_test, y_test) = cifar100.load_data(label_mode=label_mode)
    X_train = normalize_minus1_1(cast_to_floatx(X_train))
    X_test = normalize_minus1_1(cast_to_floatx(X_test))
    return (X_train, y_train), (X_test, y_test) 
Example 34
Project: E3Outlier   Author: demonzyj56   File: utils.py    MIT License 5 votes vote down vote up
def load_svhn(data_dir='.SVHN_data/'):
    img_train_data = SVHN(root=data_dir, split='train', download=True)
    img_test_data = SVHN(root=data_dir, split='test', download=True)
    X_train = img_train_data.data.transpose((0, 2, 3, 1))
    y_train = img_train_data.labels
    X_test = img_test_data.data.transpose((0, 2, 3, 1))
    y_test = img_test_data.labels
    X_train = normalize_minus1_1(cast_to_floatx(X_train))
    X_test = normalize_minus1_1(cast_to_floatx(X_test))
    return (X_train, y_train), (X_test, y_test) 
Example 35
Project: Beta-Mish   Author: digantamisra98   File: beta_mish.py    MIT License 5 votes vote down vote up
def __init__(self, beta=1.5, **kwargs):
        super(beta_mish, self).__init__(**kwargs)
        self.supports_masking = True
        self.beta = K.cast_to_floatx(beta) 
Example 36
Project: keras-training   Author: hls-fpga-machine-learning   File: quantized_layers.py    GNU General Public License v3.0 5 votes vote down vote up
def get_constants(self, inputs, training=None):
        constants = []
        if 0 < self.dropout < 1:
            input_shape = K.int_shape(inputs)
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, int(input_dim)))

            def dropped_inputs():
                return K.dropout(ones, self.dropout)

            dp_mask = K.in_train_phase(dropped_inputs,
                                       ones,
                                       training=training)
            constants.append(dp_mask)
        else:
            constants.append(K.cast_to_floatx(1.))

        if 0 < self.recurrent_dropout < 1:
            ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.units))

            def dropped_inputs():
                return K.dropout(ones, self.recurrent_dropout)
            rec_dp_mask = K.in_train_phase(dropped_inputs,
                                           ones,
                                           training=training)
            constants.append(rec_dp_mask)
        else:
            constants.append(K.cast_to_floatx(1.))
        return constants 
Example 37
Project: LSTM-FCN   Author: ShobhitLamba   File: layer_utils.py    MIT License 5 votes vote down vote up
def get_constants(self, inputs, training=None):
        constants = []
        if self.implementation != 0 and 0 < self.dropout < 1:
            input_shape = K.int_shape(inputs)
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, int(input_dim)))

            def dropped_inputs():
                return K.dropout(ones, self.dropout)

            dp_mask = [K.in_train_phase(dropped_inputs,
                                        ones,
                                        training=training) for _ in range(4)]
            constants.append(dp_mask)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])

        if 0 < self.recurrent_dropout < 1:
            ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.units))

            def dropped_inputs():
                return K.dropout(ones, self.recurrent_dropout)
            rec_dp_mask = [K.in_train_phase(dropped_inputs,
                                            ones,
                                            training=training) for _ in range(4)]
            constants.append(rec_dp_mask)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])

        # append the input as well for use later
        constants.append(inputs)
        return constants 
Example 38
Project: keras-contrib   Author: keras-team   File: sinerelu.py    MIT License 5 votes vote down vote up
def __init__(self, epsilon=0.0025, **kwargs):
        super(SineReLU, self).__init__(**kwargs)
        self.supports_masking = True
        self.epsilon = K.cast_to_floatx(epsilon) 
Example 39
Project: keras-contrib   Author: keras-team   File: constraints_test.py    MIT License 5 votes vote down vote up
def test_clip():
    clip_instance = constraints.clip()
    clipped = clip_instance(K.variable(example_array))
    assert(np.max(np.abs(K.eval(clipped))) <= K.cast_to_floatx(0.01))
    clip_instance = constraints.clip(0.1)
    clipped = clip_instance(K.variable(example_array))
    assert(np.max(np.abs(K.eval(clipped))) <= K.cast_to_floatx(0.1)) 
Example 40
Project: experiments   Author: Octavian-ai   File: adjacency_layer.py    MIT License 5 votes vote down vote up
def __init__(self, a=0.0001, b=0.0, axis=-1):
		self.a = K.cast_to_floatx(a)
		self.b = K.cast_to_floatx(b)

		self.axis = axis 
Example 41
Project: KernelizedManifoldMapping   Author: asgsaeid   File: data.py    MIT License 5 votes vote down vote up
def preprocess_data(x_train, y_train, x_test, y_test, num_rows, num_cols, nb_classes, channel_order, print_shape=False):

    # Set channel order
    if channel_order == 'channels_first':
        x_train = x_train.reshape(x_train.shape[0], 1, num_rows, num_cols)
        x_test = x_test.reshape(x_test.shape[0], 1, num_rows, num_cols)
        input_shape = (1, num_rows, num_cols)
    else:
        x_train = x_train.reshape(x_train.shape[0], num_rows, num_cols, 1)
        x_test = x_test.reshape(x_test.shape[0], num_rows, num_cols, 1)
        input_shape = (num_rows, num_cols, 1)

    # Preprocess the data
    x_train = K.cast_to_floatx(x_train)
    x_test = K.cast_to_floatx(x_test)
    x_train /= 255
    x_test /= 255

    # convert class vectors to binary class matrices
    y_train = keras.utils.to_categorical(y_train, nb_classes)
    y_test = keras.utils.to_categorical(y_test, nb_classes)

    if print_shape:
        print('x_train shape:', x_train.shape)
        print(x_train.shape[0], 'train samples.')
        print(x_test.shape[0], 'test samples.')

    return input_shape, x_train, y_train, x_test, y_test 
Example 42
Project: DRNN-Keras   Author: jmsalash   File: AudioDataSet.py    MIT License 5 votes vote down vote up
def _populate_validation_data(self):
        # Initialise X to a matrix of dimensions I tracks x J STFT frames x K STFT samples per frame
        print('Validation songs: %s' %len(self.input_file_val_mag))
        self.X_val=np.zeros(shape=(len(self.input_file_val_mag),self.maxframesong,self.input_file_val_mag[0].shape[1]))
        
        # Build the final X_val matrix
        for i in range(len(self.input_file_val_mag)):
            # JM: Normalise input using scaler
            tempx=self.myscaler.transform(self.input_file_val_mag[i])    
    
            for j in range(self.input_file_val_mag[i].shape[0]):
                for kx in range(self.input_file_val_mag[i].shape[1]):
                    self.X_val[i,j,kx]=tempx[j,kx]
        # Force the dtype set in the main file. Either float32 or float64
        K.cast_to_floatx(self.X_val)
        # Sanity check of the generated data.
        print ('X validation shape: ' + str(self.X_val.shape))
        print ('X validation type : ', self.X_val.dtype)
    
    ###############################################################################
    # FUNCTION: _populate_validation_data_hps
    # DESCRIPTION: Populate X_val variable with audio hps validation dataset info 
    # PARAMETERS:
    # -  
    # RETURNS:
    # - 
    ############################################################################### 
Example 43
Project: Mila   Author: digantamisra98   File: mila.py    MIT License 5 votes vote down vote up
def __init__(self, beta=-0.25, **kwargs):
        super(mila, self).__init__(**kwargs)
        self.supports_masking = True
        self.beta = K.cast_to_floatx(beta) 
Example 44
Project: DLWP   Author: jweyn   File: custom.py    MIT License 5 votes vote down vote up
def __init__(self, loss_function, lats, data_format='channels_last', weighting='cosine'):
        """
        Initialize a weighted loss.

        :param loss_function: method: Keras loss function to apply after the weighting
        :param lats: ndarray: 1-dimensional array of latitude coordinates
        :param data_format: Keras data_format ('channels_first' or 'channels_last')
        :param weighting: str: type of weighting to apply. Options are:
            cosine: weight by the cosine of the latitude (default)
            midlatitude: weight by the cosine of the latitude but also apply a 25% reduction to the equator and boost
                to the mid-latitudes
        """
        self.loss_function = loss_function
        self.lats = lats
        self.data_format = K.normalize_data_format(data_format)
        if weighting not in ['cosine', 'midlatitude']:
            raise ValueError("'weighting' must be one of 'cosine' or 'midlatitude'")
        self.weighting = weighting
        lat_tensor = K.zeros(lats.shape)
        print(lats)
        lat_tensor.assign(K.cast_to_floatx(lats[:]))
        self.weights = K.cos(lat_tensor * np.pi / 180.)
        if self.weighting == 'midlatitude':
            self.weights = self.weights - 0.25 * K.sin(lat_tensor * 2 * np.pi / 180.)
        self.is_init = False

        self.__name__ = 'latitude_weighted_loss' 
Example 45
Project: DLWP   Author: jweyn   File: custom.py    MIT License 5 votes vote down vote up
def latitude_weighted_loss(loss_function=mean_squared_error, lats=None, output_shape=(), axis=-2, weighting='cosine'):
    """
    Create a loss function that weights inputs by a function of latitude before calculating the loss.

    :param loss_function: method: Keras loss function to apply after the weighting
    :param lats: ndarray: 1-dimensional array of latitude coordinates
    :param output_shape: tuple: shape of expected model output
    :param axis: int: latitude axis in model output shape
    :param weighting: str: type of weighting to apply. Options are:
            cosine: weight by the cosine of the latitude (default)
            midlatitude: weight by the cosine of the latitude but also apply a 25% reduction to the equator and boost
                to the mid-latitudes
    :return: callable loss function
    """
    if weighting not in ['cosine', 'midlatitude']:
        raise ValueError("'weighting' must be one of 'cosine' or 'midlatitude'")
    if lats is not None:
        lat_tensor = K.zeros(lats.shape)
        lat_tensor.assign(K.cast_to_floatx(lats[:]))

        weights = K.cos(lat_tensor * np.pi / 180.)
        if weighting == 'midlatitude':
            weights = weights + 0.5 * K.pow(K.sin(lat_tensor * 2 * np.pi / 180.), 2.)

        weight_shape = output_shape[axis:]
        for d in weight_shape[1:]:
            weights = K.expand_dims(weights, axis=-1)
            weights = K.repeat_elements(weights, d, axis=-1)

    else:
        weights = K.ones(output_shape)

    def lat_loss(y_true, y_pred):
        return loss_function(y_true * weights, y_pred * weights)

    return lat_loss 
Example 46
Project: recurrent-attention-for-QA-SQUAD-based-on-keras   Author: wentaozhu   File: rnnlayer.py    MIT License 5 votes vote down vote up
def get_constants(self, inputs, training=None):
        constants = []
        constants.append([K.cast_to_floatx(1.) for _ in range(3)])
        return constants 
Example 47
Project: rna_protein_binding   Author: wentaozhu   File: customlayers.py    MIT License 5 votes vote down vote up
def __init__(self, l1=0., l2=0.,**kwargs):
        self.l1 = K.cast_to_floatx(l1)
        self.l2 = K.cast_to_floatx(l2)
        self.uses_learning_phase = True
        super(ActivityRegularizerOneDim, self).__init__(**kwargs)
        #self.layer = None 
Example 48
Project: nn_playground   Author: DingKe   File: ternary_layers.py    MIT License 5 votes vote down vote up
def get_constants(self, inputs, training=None):
        constants = []
        if 0 < self.dropout < 1:
            input_shape = K.int_shape(inputs)
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, int(input_dim)))

            def dropped_inputs():
                return K.dropout(ones, self.dropout)

            dp_mask = K.in_train_phase(dropped_inputs,
                                       ones,
                                       training=training)
            constants.append(dp_mask)
        else:
            constants.append(K.cast_to_floatx(1.))

        if 0 < self.recurrent_dropout < 1:
            ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.units))

            def dropped_inputs():
                return K.dropout(ones, self.recurrent_dropout)
            rec_dp_mask = K.in_train_phase(dropped_inputs,
                                           ones,
                                           training=training)
            constants.append(rec_dp_mask)
        else:
            constants.append(K.cast_to_floatx(1.))
        return constants

# Aliases 
Example 49
Project: NNCF   Author: chentingpc   File: utilities.py    MIT License 5 votes vote down vote up
def __init__(self, l1=0., l2=0.):
        self.l1 = K.cast_to_floatx(l1)
        self.l2 = K.cast_to_floatx(l2) 
Example 50
Project: smsop   Author: kcyu2014   File: assist_layer.py    MIT License 5 votes vote down vote up
def __init__(self, l2=0.):
        self.l2 = K.cast_to_floatx(l2) 
Example 51
Project: CDANs   Author: a-gardner1   File: KerasSupplementary.py    MIT License 5 votes vote down vote up
def __init__(self, l1=0., l2=0.):
        self.l1 = K.cast_to_floatx(l1)
        self.l2 = K.cast_to_floatx(l2)
        self.uses_learning_phase = True 
Example 52
Project: CDANs   Author: a-gardner1   File: KerasSupplementary.py    MIT License 5 votes vote down vote up
def call(self, x, mask = None):
        input_shape = self.input_spec[0].shape
        x = K.reshape(x, (x.shape[0], x.shape[1], -1))
        numPools = (input_shape[1]-1)/self.stride+1
        poolStarts = K.expand_dims(K.expand_dims(T.arange(0, input_shape[1], self.stride), -1), 0)
        if mask is not None:
            _, pools, _ = K.rnn(lambda p, t: (K.expand_dims(TimeDistributedMerge(True, self.mode)(
                                                              t[-2][:, T.arange(p[0,0],K.minimum(p[0,0]+self.pool_size,input_shape[1])), :],
                                                              t[-1][:, T.arange(p[0,0],K.minimum(p[0,0]+self.pool_size,input_shape[1]))]
                                                              ), 
                                                           0), []),
                                poolStarts, [], False, None, [x, mask], unroll = True, input_length = numPools)
        else:
            _, pools, _ = K.rnn(lambda p, t: (K.expand_dims(TimeDistributedMerge(False, self.mode)(
                                                              t[-1][:, T.arange(p[0,0],K.minimum(p[0,0]+self.pool_size,input_shape[1])), :]
                                                              ), 
                                                           0), []),
                                poolStarts, [], False, None, [x], unroll = True, input_length = numPools)
        if numPools == 1:
            pools = K.expand_dims(pools, -1)
        output = pools
        #if mask is not None:
        #    # If the pool contains at least one unmasked input (nonzero mask), 
        #    # then the output is unmasked (nonzero).
        #    masks = []
        #    for poolStart in poolStarts:
        #        masks += [mask[:,poolStart:(poolStart+self.pool_size)]]
        #else:
        #    masks = [K.cast_to_floatx(1) for poolStart in poolStarts]
        #pools = []
        #for poolStart, poolmask in zip(poolStarts, masks):
        #    pool = x[:, poolStart:(poolStart+self.pool_size), :]
        #    pools += [TimeDistributedMerge(mask is not None, self.mode)(pool, poolmask)]
        #output = K.concatenate(pools, axis = 1)
        return K.reshape(output, (x.shape[0], numPools)+input_shape[2:]) 
Example 53
Project: CDANs   Author: a-gardner1   File: KerasSupplementary.py    MIT License 5 votes vote down vote up
def call(self, x, mask = None):
        if mask is not None and K.ndim(x) == 3:
            #replace masked values with large values
            modX = K.max(x)+K.cast_to_floatx(1)
            modX = K.switch(K.expand_dims(mask, -1), x, modX)
            indices = T.argsort(modX, axis = -2)
        else: #mask has no effect even if it exists
            indices = T.argsort(x, axis = -2)
        # don't know how to do this without reshaping
        input_shape = x.shape
        indices = K.reshape(take(indices, 0, -1), (-1, input_shape[-2]))
        x = K.reshape(x, (-1, input_shape[-2], input_shape[-1]))
        x = x[K.expand_dims(T.arange(input_shape[0]), -1), indices]
        return K.reshape(x, input_shape) 
Example 54
Project: AnomalyDetectionTransformations   Author: izikgo   File: multiclass_experiment.py    MIT License 5 votes vote down vote up
def load_tinyimagenet(tinyimagenet_path='./'):
    images = [plt.imread(fp) for fp in glob(os.path.join(tinyimagenet_path, '*.jpg'))]

    for i in range(len(images)):
        if len(images[i].shape) != 3:
            images[i] = np.stack([images[i], images[i], images[i]], axis=-1)

    images = np.stack(images)
    images = normalize_minus1_1(K.cast_to_floatx(images))
    return images 
Example 55
Project: AnomalyDetectionTransformations   Author: izikgo   File: utils.py    MIT License 5 votes vote down vote up
def load_fashion_mnist():
    (X_train, y_train), (X_test, y_test) = fashion_mnist.load_data()
    X_train = normalize_minus1_1(cast_to_floatx(np.pad(X_train, ((0, 0), (2, 2), (2, 2)), 'constant')))
    X_train = np.expand_dims(X_train, axis=get_channels_axis())
    X_test = normalize_minus1_1(cast_to_floatx(np.pad(X_test, ((0, 0), (2, 2), (2, 2)), 'constant')))
    X_test = np.expand_dims(X_test, axis=get_channels_axis())
    return (X_train, y_train), (X_test, y_test) 
Example 56
Project: AnomalyDetectionTransformations   Author: izikgo   File: utils.py    MIT License 5 votes vote down vote up
def load_mnist():
    (X_train, y_train), (X_test, y_test) = mnist.load_data()
    X_train = normalize_minus1_1(cast_to_floatx(np.pad(X_train, ((0, 0), (2, 2), (2, 2)), 'constant')))
    X_train = np.expand_dims(X_train, axis=get_channels_axis())
    X_test = normalize_minus1_1(cast_to_floatx(np.pad(X_test, ((0, 0), (2, 2), (2, 2)), 'constant')))
    X_test = np.expand_dims(X_test, axis=get_channels_axis())
    return (X_train, y_train), (X_test, y_test) 
Example 57
Project: AnomalyDetectionTransformations   Author: izikgo   File: utils.py    MIT License 5 votes vote down vote up
def load_cifar10():
    (X_train, y_train), (X_test, y_test) = cifar10.load_data()
    X_train = normalize_minus1_1(cast_to_floatx(X_train))
    X_test = normalize_minus1_1(cast_to_floatx(X_test))
    return (X_train, y_train), (X_test, y_test) 
Example 58
Project: AnomalyDetectionTransformations   Author: izikgo   File: utils.py    MIT License 5 votes vote down vote up
def load_cifar100(label_mode='coarse'):
    (X_train, y_train), (X_test, y_test) = cifar100.load_data(label_mode=label_mode)
    X_train = normalize_minus1_1(cast_to_floatx(X_train))
    X_test = normalize_minus1_1(cast_to_floatx(X_test))
    return (X_train, y_train), (X_test, y_test) 
Example 59
Project: E3Outlier   Author: demonzyj56   File: utils.py    MIT License 4 votes vote down vote up
def load_fashion_mnist():
    (X_train, y_train), (X_test, y_test) = fashion_mnist.load_data()
    X_train = normalize_minus1_1(cast_to_floatx(np.pad(X_train, ((0, 0), (2, 2), (2, 2)), 'constant')))
    X_train = np.expand_dims(X_train, axis=get_channels_axis())
    X_test = normalize_minus1_1(cast_to_floatx(np.pad(X_test, ((0, 0), (2, 2), (2, 2)), 'constant')))
    X_test = np.expand_dims(X_test, axis=get_channels_axis())
    return (X_train, y_train), (X_test, y_test)