Python keras.backend.ones() Examples
The following are 30
code examples of keras.backend.ones().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.backend
, or try the search function
.
Example #1
Source File: recurrent_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_masking_correctness(layer_class): # Check masking: output with left padding and right padding # should be the same. model = Sequential() model.add(embeddings.Embedding(embedding_num, embedding_dim, mask_zero=True, input_length=timesteps, batch_input_shape=(num_samples, timesteps))) layer = layer_class(units, return_sequences=False) model.add(layer) model.compile(optimizer='sgd', loss='mse') left_padded_input = np.ones((num_samples, timesteps)) left_padded_input[0, :1] = 0 left_padded_input[1, :2] = 0 out6 = model.predict(left_padded_input) right_padded_input = np.ones((num_samples, timesteps)) right_padded_input[0, -1:] = 0 right_padded_input[1, -2:] = 0 out7 = model.predict(right_padded_input) assert_allclose(out7, out6, atol=1e-5)
Example #2
Source File: recurrent_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_reset_states_with_values(layer_class): num_states = 2 if layer_class is recurrent.LSTM else 1 layer = layer_class(units, stateful=True) layer.build((num_samples, timesteps, embedding_dim)) layer.reset_states() assert len(layer.states) == num_states assert layer.states[0] is not None np.testing.assert_allclose(K.eval(layer.states[0]), np.zeros(K.int_shape(layer.states[0])), atol=1e-4) state_shapes = [K.int_shape(state) for state in layer.states] values = [np.ones(shape) for shape in state_shapes] if len(values) == 1: values = values[0] layer.reset_states(values) np.testing.assert_allclose(K.eval(layer.states[0]), np.ones(K.int_shape(layer.states[0])), atol=1e-4) # Test fit with invalid data with pytest.raises(ValueError): layer.reset_states([1] * (len(layer.states) + 1))
Example #3
Source File: recurrent_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_masking_correctness(layer_class): # Check masking: output with left padding and right padding # should be the same. model = Sequential() model.add(embeddings.Embedding(embedding_num, embedding_dim, mask_zero=True, input_length=timesteps, batch_input_shape=(num_samples, timesteps))) layer = layer_class(units, return_sequences=False) model.add(layer) model.compile(optimizer='sgd', loss='mse') left_padded_input = np.ones((num_samples, timesteps)) left_padded_input[0, :1] = 0 left_padded_input[1, :2] = 0 out6 = model.predict(left_padded_input) right_padded_input = np.ones((num_samples, timesteps)) right_padded_input[0, -1:] = 0 right_padded_input[1, -2:] = 0 out7 = model.predict(right_padded_input) assert_allclose(out7, out6, atol=1e-5)
Example #4
Source File: train.py From dfc2019 with MIT License | 6 votes |
def tversky_loss(y_true, y_pred): # ignore the last category shp = K.shape(y_true) y_true = y_true[:, :, :, 0:shp[3] - 1] y_pred = y_pred[:, :, :, 0:shp[3] - 1] alpha = 1.0 beta = 1.0 ones = K.ones(K.shape(y_true)) p0 = y_pred p1 = ones - y_pred g0 = y_true g1 = ones - y_true num = K.sum(p0 * g0, (0, 1, 2)) den = num + alpha * K.sum(p0 * g1, (0, 1, 2)) + beta * K.sum(p1 * g0, (0, 1, 2)) T = K.sum(num / den) Ncl = K.cast(K.shape(y_true)[-1], 'float32') return Ncl - T
Example #5
Source File: ntm.py From ntm_keras with BSD 3-Clause "New" or "Revised" License | 6 votes |
def get_initial_state(self, X): #if not self.stateful: # self.controller.reset_states() init_old_ntm_output = K.ones((self.batch_size, self.output_dim), name="init_old_ntm_output")*0.42 init_M = K.ones((self.batch_size, self.n_slots , self.m_depth), name='main_memory')*0.042 init_wr = np.zeros((self.batch_size, self.read_heads, self.n_slots)) init_wr[:,:,0] = 1 init_wr = K.variable(init_wr, name="init_weights_read") init_ww = np.zeros((self.batch_size, self.write_heads, self.n_slots)) init_ww[:,:,0] = 1 init_ww = K.variable(init_ww, name="init_weights_write") return [init_old_ntm_output, init_M, init_wr, init_ww] # See chapter 3.1
Example #6
Source File: fan.py From faceswap with GNU General Public License v3.0 | 6 votes |
def get_pts_from_predict(self, batch): """ Get points from predictor """ logger.debug("Obtain points from prediction") num_images, num_landmarks, height, width = batch["prediction"].shape image_slice = np.repeat(np.arange(num_images)[:, None], num_landmarks, axis=1) landmark_slice = np.repeat(np.arange(num_landmarks)[None, :], num_images, axis=0) resolution = np.full((num_images, num_landmarks), 64, dtype='int32') subpixel_landmarks = np.ones((num_images, num_landmarks, 3), dtype='float32') flat_indices = batch["prediction"].reshape(num_images, num_landmarks, -1).argmax(-1) indices = np.array(np.unravel_index(flat_indices, (height, width))) min_clipped = np.minimum(indices + 1, height - 1) max_clipped = np.maximum(indices - 1, 0) offsets = [(image_slice, landmark_slice, indices[0], min_clipped[1]), (image_slice, landmark_slice, indices[0], max_clipped[1]), (image_slice, landmark_slice, min_clipped[0], indices[1]), (image_slice, landmark_slice, max_clipped[0], indices[1])] x_subpixel_shift = batch["prediction"][offsets[0]] - batch["prediction"][offsets[1]] y_subpixel_shift = batch["prediction"][offsets[2]] - batch["prediction"][offsets[3]] # TODO improve rudimentary sub-pixel logic to centroid of 3x3 window algorithm subpixel_landmarks[:, :, 0] = indices[1] + np.sign(x_subpixel_shift) * 0.25 + 0.5 subpixel_landmarks[:, :, 1] = indices[0] + np.sign(y_subpixel_shift) * 0.25 + 0.5 batch["landmarks"] = self.transform(subpixel_landmarks, batch["center_scale"], resolution) logger.trace("Obtained points from prediction: %s", batch["landmarks"])
Example #7
Source File: recurrent_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_regularizer(layer_class): layer = layer_class(units, return_sequences=False, weights=None, input_shape=(timesteps, embedding_dim), kernel_regularizer=regularizers.l1(0.01), recurrent_regularizer=regularizers.l1(0.01), bias_regularizer='l2') layer.build((None, None, embedding_dim)) assert len(layer.losses) == 3 assert len(layer.cell.losses) == 3 layer = layer_class(units, return_sequences=False, weights=None, input_shape=(timesteps, embedding_dim), activity_regularizer='l2') assert layer.activity_regularizer x = K.variable(np.ones((num_samples, timesteps, embedding_dim))) layer(x) assert len(layer.cell.get_losses_for(x)) == 0 assert len(layer.get_losses_for(x)) == 1
Example #8
Source File: fan.py From faceswap with GNU General Public License v3.0 | 6 votes |
def call(self, inputs, **kwargs): input_shape = K.int_shape(inputs) broadcast_shape = [1] * len(input_shape) broadcast_shape[self.axis] = input_shape[self.axis] broadcast_moving_mean = K.reshape(self.moving_mean, broadcast_shape) broadcast_moving_variance = K.reshape(self.moving_variance, broadcast_shape) broadcast_gamma = K.reshape(self.gamma, broadcast_shape) broadcast_beta = K.reshape(self.beta, broadcast_shape) invstd = ( K.ones(shape=broadcast_shape, dtype='float32') / K.sqrt(broadcast_moving_variance + self._epsilon_const) ) return((inputs - broadcast_moving_mean) * invstd * broadcast_gamma + broadcast_beta)
Example #9
Source File: optimizer.py From Anime-Super-Resolution with MIT License | 6 votes |
def get_weightnorm_params_and_grads(p, g): ps = K.get_variable_shape(p) # construct weight scaler: V_scaler = g/||V|| V_scaler_shape = (ps[-1],) # assumes we're using tensorflow! V_scaler = K.ones(V_scaler_shape) # init to ones, so effective parameters don't change # get V parameters = ||V||/g * W norm_axes = [i for i in range(len(ps) - 1)] V = p / tf.reshape(V_scaler, [1] * len(norm_axes) + [-1]) # split V_scaler into ||V|| and g parameters V_norm = tf.sqrt(tf.reduce_sum(tf.square(V), norm_axes)) g_param = V_scaler * V_norm # get grad in V,g parameters grad_g = tf.reduce_sum(g * V, norm_axes) / V_norm grad_V = tf.reshape(V_scaler, [1] * len(norm_axes) + [-1]) * \ (g - tf.reshape(grad_g / V_norm, [1] * len(norm_axes) + [-1]) * V) return V, V_norm, V_scaler, g_param, grad_g, grad_V
Example #10
Source File: recurrent_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_reset_states_with_values(layer_class): num_states = 2 if layer_class is recurrent.LSTM else 1 layer = layer_class(units, stateful=True) layer.build((num_samples, timesteps, embedding_dim)) layer.reset_states() assert len(layer.states) == num_states assert layer.states[0] is not None np.testing.assert_allclose(K.eval(layer.states[0]), np.zeros(K.int_shape(layer.states[0])), atol=1e-4) state_shapes = [K.int_shape(state) for state in layer.states] values = [np.ones(shape) for shape in state_shapes] if len(values) == 1: values = values[0] layer.reset_states(values) np.testing.assert_allclose(K.eval(layer.states[0]), np.ones(K.int_shape(layer.states[0])), atol=1e-4) # Test fit with invalid data with pytest.raises(ValueError): layer.reset_states([1] * (len(layer.states) + 1))
Example #11
Source File: recurrent_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_regularizer(layer_class): layer = layer_class(units, return_sequences=False, weights=None, input_shape=(timesteps, embedding_dim), kernel_regularizer=regularizers.l1(0.01), recurrent_regularizer=regularizers.l1(0.01), bias_regularizer='l2') layer.build((None, None, embedding_dim)) assert len(layer.losses) == 3 assert len(layer.cell.losses) == 3 layer = layer_class(units, return_sequences=False, weights=None, input_shape=(timesteps, embedding_dim), activity_regularizer='l2') assert layer.activity_regularizer x = K.variable(np.ones((num_samples, timesteps, embedding_dim))) layer(x) assert len(layer.cell.get_losses_for(x)) == 0 assert len(layer.get_losses_for(x)) == 1
Example #12
Source File: bert.py From keras-bert-ner with MIT License | 6 votes |
def compute_attention_mask(self, layer_id, segment_ids): """为seq2seq采用特定的attention mask """ if self.attention_mask is None: def seq2seq_attention_mask(s, repeats=1): seq_len = K.shape(s)[1] ones = K.ones((1, repeats, seq_len, seq_len)) a_mask = tf.linalg.band_part(ones, -1, 0) s_ex12 = K.expand_dims(K.expand_dims(s, 1), 2) s_ex13 = K.expand_dims(K.expand_dims(s, 1), 3) a_mask = (1 - s_ex13) * (1 - s_ex12) + s_ex13 * a_mask a_mask = K.reshape(a_mask, (-1, seq_len, seq_len)) return a_mask self.attention_mask = Lambda( seq2seq_attention_mask, arguments={"repeats": self.num_attention_heads}, name="Attention-Mask")(segment_ids) return self.attention_mask
Example #13
Source File: projLayer.py From GaneratedHandsForReal_TIME with MIT License | 6 votes |
def __init__(self, output_shape, coeff, **kwargs): self.output_size = output_shape self.coeff = coeff self.base = k_b.ones((1, self.calc_cell_units()), dtype=np.float) self.ones = k_b.ones((21, 1, 2)) self.board_ones = k_b.ones((21, self.calc_cell_units(), 2)) pair = [] for i in range(0, self.calc_cell_units()): pair.append((i%self.output_size[0], i//self.output_size[1])) pair = np.asarray(pair) self.back_board = k_b.ones((self.calc_cell_units(),2)) print(pair.shape) k_b.set_value(self.back_board, pair) super(RenderingLayer, self).__init__(**kwargs)
Example #14
Source File: weightnorm.py From weightnorm with MIT License | 6 votes |
def get_weightnorm_params_and_grads(p, g): ps = K.get_variable_shape(p) # construct weight scaler: V_scaler = g/||V|| V_scaler_shape = (ps[-1],) # assumes we're using tensorflow! V_scaler = K.ones(V_scaler_shape) # init to ones, so effective parameters don't change # get V parameters = ||V||/g * W norm_axes = [i for i in range(len(ps) - 1)] V = p / tf.reshape(V_scaler, [1] * len(norm_axes) + [-1]) # split V_scaler into ||V|| and g parameters V_norm = tf.sqrt(tf.reduce_sum(tf.square(V), norm_axes)) g_param = V_scaler * V_norm # get grad in V,g parameters grad_g = tf.reduce_sum(g * V, norm_axes) / V_norm grad_V = tf.reshape(V_scaler, [1] * len(norm_axes) + [-1]) * \ (g - tf.reshape(grad_g / V_norm, [1] * len(norm_axes) + [-1]) * V) return V, V_norm, V_scaler, g_param, grad_g, grad_V
Example #15
Source File: recurrent_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_reset_states_with_values(layer_class): num_states = 2 if layer_class is recurrent.LSTM else 1 layer = layer_class(units, stateful=True) layer.build((num_samples, timesteps, embedding_dim)) layer.reset_states() assert len(layer.states) == num_states assert layer.states[0] is not None np.testing.assert_allclose(K.eval(layer.states[0]), np.zeros(K.int_shape(layer.states[0])), atol=1e-4) state_shapes = [K.int_shape(state) for state in layer.states] values = [np.ones(shape) for shape in state_shapes] if len(values) == 1: values = values[0] layer.reset_states(values) np.testing.assert_allclose(K.eval(layer.states[0]), np.ones(K.int_shape(layer.states[0])), atol=1e-4) # Test fit with invalid data with pytest.raises(ValueError): layer.reset_states([1] * (len(layer.states) + 1))
Example #16
Source File: recurrent_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_regularizer(layer_class): layer = layer_class(units, return_sequences=False, weights=None, input_shape=(timesteps, embedding_dim), kernel_regularizer=regularizers.l1(0.01), recurrent_regularizer=regularizers.l1(0.01), bias_regularizer='l2') layer.build((None, None, embedding_dim)) assert len(layer.losses) == 3 assert len(layer.cell.losses) == 3 layer = layer_class(units, return_sequences=False, weights=None, input_shape=(timesteps, embedding_dim), activity_regularizer='l2') assert layer.activity_regularizer x = K.variable(np.ones((num_samples, timesteps, embedding_dim))) layer(x) assert len(layer.cell.get_losses_for(x)) == 0 assert len(layer.get_losses_for(x)) == 1
Example #17
Source File: recurrent_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_masking_correctness(layer_class): # Check masking: output with left padding and right padding # should be the same. model = Sequential() model.add(embeddings.Embedding(embedding_num, embedding_dim, mask_zero=True, input_length=timesteps, batch_input_shape=(num_samples, timesteps))) layer = layer_class(units, return_sequences=False) model.add(layer) model.compile(optimizer='sgd', loss='mse') left_padded_input = np.ones((num_samples, timesteps)) left_padded_input[0, :1] = 0 left_padded_input[1, :2] = 0 out6 = model.predict(left_padded_input) right_padded_input = np.ones((num_samples, timesteps)) right_padded_input[0, -1:] = 0 right_padded_input[1, -2:] = 0 out7 = model.predict(right_padded_input) assert_allclose(out7, out6, atol=1e-5)
Example #18
Source File: recurrent_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_regularizer(layer_class): layer = layer_class(units, return_sequences=False, weights=None, input_shape=(timesteps, embedding_dim), kernel_regularizer=regularizers.l1(0.01), recurrent_regularizer=regularizers.l1(0.01), bias_regularizer='l2') layer.build((None, None, embedding_dim)) assert len(layer.losses) == 3 assert len(layer.cell.losses) == 3 layer = layer_class(units, return_sequences=False, weights=None, input_shape=(timesteps, embedding_dim), activity_regularizer='l2') assert layer.activity_regularizer x = K.variable(np.ones((num_samples, timesteps, embedding_dim))) layer(x) assert len(layer.cell.get_losses_for(x)) == 0 assert len(layer.get_losses_for(x)) == 1
Example #19
Source File: recurrent_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_reset_states_with_values(layer_class): num_states = 2 if layer_class is recurrent.LSTM else 1 layer = layer_class(units, stateful=True) layer.build((num_samples, timesteps, embedding_dim)) layer.reset_states() assert len(layer.states) == num_states assert layer.states[0] is not None np.testing.assert_allclose(K.eval(layer.states[0]), np.zeros(K.int_shape(layer.states[0])), atol=1e-4) state_shapes = [K.int_shape(state) for state in layer.states] values = [np.ones(shape) for shape in state_shapes] if len(values) == 1: values = values[0] layer.reset_states(values) np.testing.assert_allclose(K.eval(layer.states[0]), np.ones(K.int_shape(layer.states[0])), atol=1e-4) # Test fit with invalid data with pytest.raises(ValueError): layer.reset_states([1] * (len(layer.states) + 1))
Example #20
Source File: recurrent_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_masking_correctness(layer_class): # Check masking: output with left padding and right padding # should be the same. model = Sequential() model.add(embeddings.Embedding(embedding_num, embedding_dim, mask_zero=True, input_length=timesteps, batch_input_shape=(num_samples, timesteps))) layer = layer_class(units, return_sequences=False) model.add(layer) model.compile(optimizer='sgd', loss='mse') left_padded_input = np.ones((num_samples, timesteps)) left_padded_input[0, :1] = 0 left_padded_input[1, :2] = 0 out6 = model.predict(left_padded_input) right_padded_input = np.ones((num_samples, timesteps)) right_padded_input[0, -1:] = 0 right_padded_input[1, -2:] = 0 out7 = model.predict(right_padded_input) assert_allclose(out7, out6, atol=1e-5)
Example #21
Source File: recurrent_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_regularizer(layer_class): layer = layer_class(units, return_sequences=False, weights=None, input_shape=(timesteps, embedding_dim), kernel_regularizer=regularizers.l1(0.01), recurrent_regularizer=regularizers.l1(0.01), bias_regularizer='l2') layer.build((None, None, embedding_dim)) assert len(layer.losses) == 3 assert len(layer.cell.losses) == 3 layer = layer_class(units, return_sequences=False, weights=None, input_shape=(timesteps, embedding_dim), activity_regularizer='l2') assert layer.activity_regularizer x = K.variable(np.ones((num_samples, timesteps, embedding_dim))) layer(x) assert len(layer.cell.get_losses_for(x)) == 0 assert len(layer.get_losses_for(x)) == 1
Example #22
Source File: recurrent_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_masking_correctness(layer_class): # Check masking: output with left padding and right padding # should be the same. model = Sequential() model.add(embeddings.Embedding(embedding_num, embedding_dim, mask_zero=True, input_length=timesteps, batch_input_shape=(num_samples, timesteps))) layer = layer_class(units, return_sequences=False) model.add(layer) model.compile(optimizer='sgd', loss='mse') left_padded_input = np.ones((num_samples, timesteps)) left_padded_input[0, :1] = 0 left_padded_input[1, :2] = 0 out6 = model.predict(left_padded_input) right_padded_input = np.ones((num_samples, timesteps)) right_padded_input[0, -1:] = 0 right_padded_input[1, -2:] = 0 out7 = model.predict(right_padded_input) assert_allclose(out7, out6, atol=1e-5)
Example #23
Source File: recurrent_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_regularizer(layer_class): layer = layer_class(units, return_sequences=False, weights=None, input_shape=(timesteps, embedding_dim), kernel_regularizer=regularizers.l1(0.01), recurrent_regularizer=regularizers.l1(0.01), bias_regularizer='l2') layer.build((None, None, embedding_dim)) assert len(layer.losses) == 3 assert len(layer.cell.losses) == 3 layer = layer_class(units, return_sequences=False, weights=None, input_shape=(timesteps, embedding_dim), activity_regularizer='l2') assert layer.activity_regularizer x = K.variable(np.ones((num_samples, timesteps, embedding_dim))) layer(x) assert len(layer.cell.get_losses_for(x)) == 0 assert len(layer.get_losses_for(x)) == 1
Example #24
Source File: recurrent_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_masking_correctness(layer_class): # Check masking: output with left padding and right padding # should be the same. model = Sequential() model.add(embeddings.Embedding(embedding_num, embedding_dim, mask_zero=True, input_length=timesteps, batch_input_shape=(num_samples, timesteps))) layer = layer_class(units, return_sequences=False) model.add(layer) model.compile(optimizer='sgd', loss='mse') left_padded_input = np.ones((num_samples, timesteps)) left_padded_input[0, :1] = 0 left_padded_input[1, :2] = 0 out6 = model.predict(left_padded_input) right_padded_input = np.ones((num_samples, timesteps)) right_padded_input[0, -1:] = 0 right_padded_input[1, -2:] = 0 out7 = model.predict(right_padded_input) assert_allclose(out7, out6, atol=1e-5)
Example #25
Source File: recurrent_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_regularizer(layer_class): layer = layer_class(units, return_sequences=False, weights=None, input_shape=(timesteps, embedding_dim), kernel_regularizer=regularizers.l1(0.01), recurrent_regularizer=regularizers.l1(0.01), bias_regularizer='l2') layer.build((None, None, embedding_dim)) assert len(layer.losses) == 3 assert len(layer.cell.losses) == 3 layer = layer_class(units, return_sequences=False, weights=None, input_shape=(timesteps, embedding_dim), activity_regularizer='l2') assert layer.activity_regularizer x = K.variable(np.ones((num_samples, timesteps, embedding_dim))) layer(x) assert len(layer.cell.get_losses_for(x)) == 0 assert len(layer.get_losses_for(x)) == 1
Example #26
Source File: recurrent_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_reset_states_with_values(layer_class): num_states = 2 if layer_class is recurrent.LSTM else 1 layer = layer_class(units, stateful=True) layer.build((num_samples, timesteps, embedding_dim)) layer.reset_states() assert len(layer.states) == num_states assert layer.states[0] is not None np.testing.assert_allclose(K.eval(layer.states[0]), np.zeros(K.int_shape(layer.states[0])), atol=1e-4) state_shapes = [K.int_shape(state) for state in layer.states] values = [np.ones(shape) for shape in state_shapes] if len(values) == 1: values = values[0] layer.reset_states(values) np.testing.assert_allclose(K.eval(layer.states[0]), np.ones(K.int_shape(layer.states[0])), atol=1e-4) # Test fit with invalid data with pytest.raises(ValueError): layer.reset_states([1] * (len(layer.states) + 1))
Example #27
Source File: recurrent_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_reset_states_with_values(layer_class): num_states = 2 if layer_class is recurrent.LSTM else 1 layer = layer_class(units, stateful=True) layer.build((num_samples, timesteps, embedding_dim)) layer.reset_states() assert len(layer.states) == num_states assert layer.states[0] is not None np.testing.assert_allclose(K.eval(layer.states[0]), np.zeros(K.int_shape(layer.states[0])), atol=1e-4) state_shapes = [K.int_shape(state) for state in layer.states] values = [np.ones(shape) for shape in state_shapes] if len(values) == 1: values = values[0] layer.reset_states(values) np.testing.assert_allclose(K.eval(layer.states[0]), np.ones(K.int_shape(layer.states[0])), atol=1e-4) # Test fit with invalid data with pytest.raises(ValueError): layer.reset_states([1] * (len(layer.states) + 1))
Example #28
Source File: recurrent_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_masking_correctness(layer_class): # Check masking: output with left padding and right padding # should be the same. model = Sequential() model.add(embeddings.Embedding(embedding_num, embedding_dim, mask_zero=True, input_length=timesteps, batch_input_shape=(num_samples, timesteps))) layer = layer_class(units, return_sequences=False) model.add(layer) model.compile(optimizer='sgd', loss='mse') left_padded_input = np.ones((num_samples, timesteps)) left_padded_input[0, :1] = 0 left_padded_input[1, :2] = 0 out6 = model.predict(left_padded_input) right_padded_input = np.ones((num_samples, timesteps)) right_padded_input[0, -1:] = 0 right_padded_input[1, -2:] = 0 out7 = model.predict(right_padded_input) assert_allclose(out7, out6, atol=1e-5)
Example #29
Source File: backend_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_batch_dot_shape(self): x_batch = K.ones(shape=(32, 20)) y_batch = K.ones(shape=(32, 20)) xy_batch_dot = K.batch_dot(x_batch, y_batch, axes=1) assert_allclose(K.eval(xy_batch_dot), np.ones((32, 1)) * 20, atol=1e-05) xy_batch_dot = K.batch_dot(x_batch, y_batch, axes=0) assert_allclose(K.eval(xy_batch_dot), np.ones((20, 1)) * 32, atol=1e-05) # making sure swapping axes when ndim == 2 works x_batch = K.ones(shape=(32, 20)) y_batch = K.ones(shape=(20, 32)) xy_batch_dot = K.batch_dot(x_batch, y_batch, axes=(0, 1)) assert_allclose(K.eval(xy_batch_dot), np.ones((20, 1)) * 32, atol=1e-05) xy_batch_dot = K.batch_dot(x_batch, y_batch, axes=(1, 0)) assert_allclose(K.eval(xy_batch_dot), np.ones((32, 1)) * 20, atol=1e-05)
Example #30
Source File: backend_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_batch_dot_shape(self): x_batch = K.ones(shape=(32, 20)) y_batch = K.ones(shape=(32, 20)) xy_batch_dot = K.batch_dot(x_batch, y_batch, axes=1) assert_allclose(K.eval(xy_batch_dot), np.ones((32, 1)) * 20, atol=1e-05) xy_batch_dot = K.batch_dot(x_batch, y_batch, axes=0) assert_allclose(K.eval(xy_batch_dot), np.ones((20, 1)) * 32, atol=1e-05) # making sure swapping axes when ndim == 2 works x_batch = K.ones(shape=(32, 20)) y_batch = K.ones(shape=(20, 32)) xy_batch_dot = K.batch_dot(x_batch, y_batch, axes=(0, 1)) assert_allclose(K.eval(xy_batch_dot), np.ones((20, 1)) * 32, atol=1e-05) xy_batch_dot = K.batch_dot(x_batch, y_batch, axes=(1, 0)) assert_allclose(K.eval(xy_batch_dot), np.ones((32, 1)) * 20, atol=1e-05)