Python keras.backend.tile() Examples
The following are 30
code examples of keras.backend.tile().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.backend
, or try the search function
.
Example #1
Source File: arch.py From world_models with MIT License | 7 votes |
def tf_normal(y_true, mu, sigma, pi): rollout_length = K.shape(y_true)[1] y_true = K.tile(y_true,(1,1,GAUSSIAN_MIXTURES)) y_true = K.reshape(y_true, [-1, rollout_length, GAUSSIAN_MIXTURES,Z_DIM]) oneDivSqrtTwoPI = 1 / math.sqrt(2*math.pi) result = y_true - mu # result = K.permute_dimensions(result, [2,1,0]) result = result * (1 / (sigma + 1e-8)) result = -K.square(result)/2 result = K.exp(result) * (1/(sigma + 1e-8))*oneDivSqrtTwoPI result = result * pi result = K.sum(result, axis=2) #### sum over gaussians #result = K.prod(result, axis=2) #### multiply over latent dims return result
Example #2
Source File: rnnlayer.py From recurrent-attention-for-QA-SQUAD-based-on-keras with MIT License | 6 votes |
def get_constants(self, inputs, training=None): constants = [] '''if 0 < self.dropout_U < 1: ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1))) ones = K.tile(ones, (1, self.units)) B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)] constants.append(B_U) else: constants.append([K.cast_to_floatx(1.) for _ in range(3)]) if 0 < self.dropout_W < 1: input_shape = K.int_shape(x) input_dim = input_shape[-1] ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1))) ones = K.tile(ones, (1, int(input_dim))) B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)] constants.append(B_W) else:''' constants.append([K.cast_to_floatx(1.) for _ in range(3)]) return constants
Example #3
Source File: hypertree_model.py From costar_plan with Apache License 2.0 | 6 votes |
def concat_images_with_tiled_vector(images, vector): """Combine a set of images with a vector, tiling the vector at each pixel in the images and concatenating on the channel axis. # Params images: list of images with the same dimensions vector: vector to tile on each image. If you have more than one vector, simply concatenate them all before calling this function. # Returns """ with K.name_scope('concat_images_with_tiled_vector'): if not isinstance(images, list): images = [images] image_shape = K.int_shape(images[0]) tiled_vector = tile_vector_as_image_channels(vector, image_shape) images.append(tiled_vector) combined = K.concatenate(images) return combined
Example #4
Source File: capsulelayers.py From textcaps with MIT License | 6 votes |
def call(self, inputs, training=None): inputs_expand = K.expand_dims(inputs, 1) inputs_tiled = K.tile(inputs_expand, [1, self.num_capsule, 1, 1]) if(self.channels!=0): W2 = K.repeat_elements(self.W,int(self.input_num_capsule/self.channels),1) else: W2 = self.W inputs_hat = K.map_fn(lambda x: K.batch_dot(x, W2, [2, 3]) , elems=inputs_tiled) b = tf.zeros(shape=[K.shape(inputs_hat)[0], self.num_capsule, self.input_num_capsule]) assert self.routings > 0, 'The routings should be > 0.' for i in range(self.routings): c = tf.nn.softmax(b, dim=1) outputs = squash(K.batch_dot(c, inputs_hat, [2, 2])+ self.B) if i < self.routings - 1: b += K.batch_dot(outputs, inputs_hat, [2, 3]) return outputs
Example #5
Source File: hypertree_model.py From costar_plan with Apache License 2.0 | 6 votes |
def tile_vector_as_image_channels(vector_op, image_shape): """ Takes a vector of length n and an image shape BHWC, and repeat the vector as channels at each pixel. # Params vector_op: A tensor vector to tile. image_shape: A list of integers [width, height] with the desired dimensions. """ with K.name_scope('tile_vector_as_image_channels'): ivs = K.shape(vector_op) # reshape the vector into a single pixel vector_pixel_shape = [ivs[0], 1, 1, ivs[1]] vector_op = K.reshape(vector_op, vector_pixel_shape) # tile the pixel into a full image tile_dimensions = [1, image_shape[1], image_shape[2], 1] vector_op = K.tile(vector_op, tile_dimensions) if K.backend() is 'tensorflow': output_shape = [ivs[0], image_shape[1], image_shape[2], ivs[1]] vector_op.set_shape(output_shape) return vector_op
Example #6
Source File: backend_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_tile(self): shape = (3, 4) arr = np.arange(np.prod(shape)).reshape(shape) check_single_tensor_operation('tile', arr, BACKENDS, n=[2, 1]) check_single_tensor_operation('tile', (2, 5), BACKENDS, n=[5, 2]) # test theano shape inference when # input shape has None entries if K.backend() == 'theano': x = K.placeholder(shape=(None, 4)) n = 2 y = K.tile(x, n) assert y._keras_shape == (None, 8) n = (4, 3) y = K.tile(x, n) assert y._keras_shape == (None, 12)
Example #7
Source File: backend_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_tile(self): shape = (3, 4) arr = np.arange(np.prod(shape)).reshape(shape) check_single_tensor_operation('tile', arr, BACKENDS, n=[2, 1]) check_single_tensor_operation('tile', (2, 5), BACKENDS, n=[5, 2]) # test theano shape inference when # input shape has None entries if K.backend() == 'theano': x = K.placeholder(shape=(None, 4)) n = 2 y = K.tile(x, n) assert y._keras_shape == (None, 8) n = (4, 3) y = K.tile(x, n) assert y._keras_shape == (None, 12)
Example #8
Source File: multi_dim_attention.py From nlp_toolkit with MIT License | 6 votes |
def call(self, x, mask=None): uit = K.tanh(K.dot(x, self.Ws1)) ait = K.dot(uit, self.Ws2) ait = K.permute_dimensions(ait, (0, 2, 1)) A = K.softmax(ait, axis=1) M = K.batch_dot(A, x) if self.punish: A_T = K.permute_dimensions(A, (0, 2, 1)) tile_eye = K.tile(K.eye(self.weight_ws2), [self.batch_size, 1]) tile_eye = K.reshape( tile_eye, shape=[-1, self.weight_ws2, self.weight_ws2]) AA_T = K.batch_dot(A, A_T) - tile_eye P = K.l2_normalize(AA_T, axis=(1, 2)) return M, P else: return M
Example #9
Source File: lstm2ntm.py From NTM-Keras with MIT License | 6 votes |
def get_constants(self, x): constants = [] if 0 < self.dropout_U < 1: ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1))) ones = K.tile(ones, (1, self.output_dim)) B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(4)] constants.append(B_U) else: constants.append([K.cast_to_floatx(1.) for _ in range(4)]) if 0 < self.dropout_W < 1: input_shape = self.input_spec[0].shape input_dim = input_shape[-1] ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1))) ones = K.tile(ones, (1, int(input_dim))) B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(4)] constants.append(B_W) else: constants.append([K.cast_to_floatx(1.) for _ in range(4)]) return constants
Example #10
Source File: backend_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_tile(self): shape = (3, 4) arr = np.arange(np.prod(shape)).reshape(shape) check_single_tensor_operation('tile', arr, BACKENDS, n=[2, 1]) check_single_tensor_operation('tile', (2, 5), BACKENDS, n=[5, 2]) # test theano shape inference when # input shape has None entries if K.backend() == 'theano': x = K.placeholder(shape=(None, 4)) n = 2 y = K.tile(x, n) assert y._keras_shape == (None, 8) n = (4, 3) y = K.tile(x, n) assert y._keras_shape == (None, 12)
Example #11
Source File: scale_dot_product_attention.py From Keras-TextClassification with MIT License | 6 votes |
def call(self, inputs, mask=None, **kwargs): if isinstance(inputs, list): query, key, value = inputs else: query = key = value = inputs if isinstance(mask, list): mask = mask[1] feature_dim = K.shape(query)[-1] e = K.batch_dot(query, key, axes=2) / K.sqrt(K.cast(feature_dim, dtype=K.floatx())) e = K.exp(e - K.max(e, axis=-1, keepdims=True)) if self.history_only: query_len, key_len = K.shape(query)[1], K.shape(key)[1] indices = K.tile(K.expand_dims(K.arange(key_len), axis=0), [query_len, 1]) upper = K.expand_dims(K.arange(key_len), axis=-1) e *= K.expand_dims(K.cast(indices <= upper, K.floatx()), axis=0) if mask is not None: e *= K.cast(K.expand_dims(mask, axis=-2), K.floatx()) a = e / (K.sum(e, axis=-1, keepdims=True) + K.epsilon()) v = K.batch_dot(a, value) if self.return_attention: return [v, a] return v
Example #12
Source File: QnA.py From recurrent-attention-for-QA-SQUAD-based-on-keras with MIT License | 6 votes |
def get_constants(self, x): constants = [] if 0 < self.dropout_U < 1: ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1))) ones = K.tile(ones, (1, self.output_dim)) B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)] constants.append(B_U) else: constants.append([K.cast_to_floatx(1.) for _ in range(3)]) if 0 < self.dropout_W < 1: input_shape = K.int_shape(x) input_dim = input_shape[-1] ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1))) ones = K.tile(ones, (1, int(input_dim))) B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)] constants.append(B_W) else: constants.append([K.cast_to_floatx(1.) for _ in range(3)]) return constants
Example #13
Source File: rnnlayer.py From recurrent-attention-for-QA-SQUAD-based-on-keras with MIT License | 6 votes |
def get_constants(self, inputs, training=None): constants = [] '''if 0 < self.dropout_U < 1: ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1))) ones = K.tile(ones, (1, self.units)) B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)] constants.append(B_U) else: constants.append([K.cast_to_floatx(1.) for _ in range(3)]) if 0 < self.dropout_W < 1: input_shape = K.int_shape(x) input_dim = input_shape[-1] ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1))) ones = K.tile(ones, (1, int(input_dim))) B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)] constants.append(B_W) else:''' constants.append([K.cast_to_floatx(1.) for _ in range(3)]) return constants
Example #14
Source File: rnnlayer.py From recurrent-attention-for-QA-SQUAD-based-on-keras with MIT License | 6 votes |
def get_constants(self, inputs, training=None): constants = [] '''if 0 < self.dropout_U < 1: ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1))) ones = K.tile(ones, (1, self.units)) B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)] constants.append(B_U) else: constants.append([K.cast_to_floatx(1.) for _ in range(3)]) if 0 < self.dropout_W < 1: input_shape = K.int_shape(x) input_dim = input_shape[-1] ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1))) ones = K.tile(ones, (1, int(input_dim))) B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)] constants.append(B_W) else:''' constants.append([K.cast_to_floatx(1.) for _ in range(3)]) return constants
Example #15
Source File: recurrent.py From keras_bn_library with MIT License | 6 votes |
def get_constants(self, x): constants = [] if 0 < self.dropout_U < 1: ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1))) ones = K.tile(ones, (1, self.input_dim)) B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(4)] constants.append(B_U) else: constants.append([K.cast_to_floatx(1.) for _ in range(4)]) if 0 < self.dropout_W < 1: input_shape = K.int_shape(x) input_dim = input_shape[-1] ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1))) ones = K.tile(ones, (1, int(input_dim))) B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(4)] constants.append(B_W) else: constants.append([K.cast_to_floatx(1.) for _ in range(4)]) return constants
Example #16
Source File: backend_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_tile(self): shape = (3, 4) arr = np.arange(np.prod(shape)).reshape(shape) check_single_tensor_operation('tile', arr, BACKENDS, n=[2, 1]) check_single_tensor_operation('tile', (2, 5), BACKENDS, n=[5, 2]) # test theano shape inference when # input shape has None entries if K.backend() == 'theano': x = K.placeholder(shape=(None, 4)) n = 2 y = K.tile(x, n) assert y._keras_shape == (None, 8) n = (4, 3) y = K.tile(x, n) assert y._keras_shape == (None, 12)
Example #17
Source File: backend_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_tile(self): shape = (3, 4) arr = np.arange(np.prod(shape)).reshape(shape) check_single_tensor_operation('tile', arr, BACKENDS, n=[2, 1]) check_single_tensor_operation('tile', (2, 5), BACKENDS, n=[5, 2]) # test theano shape inference when # input shape has None entries if K.backend() == 'theano': x = K.placeholder(shape=(None, 4)) n = 2 y = K.tile(x, n) assert y._keras_shape == (None, 8) n = (4, 3) y = K.tile(x, n) assert y._keras_shape == (None, 12)
Example #18
Source File: backend_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_tile(self): shape = (3, 4) arr = np.arange(np.prod(shape)).reshape(shape) check_single_tensor_operation('tile', arr, BACKENDS, n=[2, 1]) check_single_tensor_operation('tile', (2, 5), BACKENDS, n=[5, 2]) # test theano shape inference when # input shape has None entries if K.backend() == 'theano': x = K.placeholder(shape=(None, 4)) n = 2 y = K.tile(x, n) assert y._keras_shape == (None, 8) n = (4, 3) y = K.tile(x, n) assert y._keras_shape == (None, 12)
Example #19
Source File: rnnrbm.py From keras_bn_library with MIT License | 6 votes |
def get_constants(self, x): constants = [] if 0 < self.dropout_U < 1: ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1))) ones = K.tile(ones, (1, self.hidden_recurrent_dim)) B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones) constants.append(B_U) else: constants.append(K.cast_to_floatx(1.)) if self.consume_less == 'cpu' and 0 < self.dropout_W < 1: input_shape = self.input_spec[0].shape input_dim = input_shape[-1] ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1))) ones = K.tile(ones, (1, input_dim)) B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones) constants.append(B_W) else: constants.append(K.cast_to_floatx(1.)) return constants
Example #20
Source File: backend_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_tile(self): shape = (3, 4) arr = np.arange(np.prod(shape)).reshape(shape) check_single_tensor_operation('tile', arr, BACKENDS, n=[2, 1]) check_single_tensor_operation('tile', (2, 5), BACKENDS, n=[5, 2]) # test theano shape inference when # input shape has None entries if K.backend() == 'theano': x = K.placeholder(shape=(None, 4)) n = 2 y = K.tile(x, n) assert y._keras_shape == (None, 8) n = (4, 3) y = K.tile(x, n) assert y._keras_shape == (None, 12)
Example #21
Source File: rhn.py From deep-models with Apache License 2.0 | 6 votes |
def get_constants(self, x): constants = [] if 0 < self.dropout_U < 1: ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1))) ones = K.tile(ones, (1, self.output_dim)) B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)] constants.append(B_U) else: constants.append([K.cast_to_floatx(1.) for _ in range(3)]) if 0 < self.dropout_W < 1: input_shape = self.input_spec[0].shape input_dim = input_shape[-1] ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1))) ones = K.tile(ones, (1, input_dim)) B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)] constants.append(B_W) else: constants.append([K.cast_to_floatx(1.) for _ in range(3)]) return constants
Example #22
Source File: transform_rnn.py From View-Adaptive-Neural-Networks-for-Skeleton-based-Human-Action-Recognition with MIT License | 6 votes |
def call(self,x,training=None): deta1 = 0.3 deta2 = 0.3 deta3 = 0.3 seed = np.random.randint(1, 10e6) rng = RandomStreams(seed=seed) theta1 = rng.uniform(size=(x.shape[0],1),low=-deta1,high=deta1,dtype='float32') theta2 = rng.uniform(size=(x.shape[0],1),low=-deta2,high=deta2,dtype='float32') theta3 = rng.uniform(size=(x.shape[0],1),low=-deta3,high=deta3,dtype='float32') theta = K.concatenate([theta1,theta2,theta3],axis=-1) theta = K.tile(theta,x.shape[1]) theta = theta.reshape((x.shape[0], x.shape[1], 3)) theta = theta.reshape((theta.shape[0]*theta.shape[1], theta.shape[2])) M = _fusion(theta) output = _transform_rot(M, x) return K.in_train_phase(output,x,training = training)
Example #23
Source File: capsule.py From CapsNet with MIT License | 6 votes |
def call(self, inputs, **kwargs): # (batch_size, 1, input_num_capsule, input_dim_capsule) expand_inputs = K.expand_dims(inputs, axis=1) # (batch_size, num_capsule, input_num_capsule, input_dim_capsule) expand_inputs = K.tile(expand_inputs, (1, self.num_capsule, 1, 1)) # (batch_size, num_capsule, input_num_capsule, dim_capsule) u_hat = K.map_fn(lambda x: K.batch_dot(x, self.W, axes=[2, 3]), expand_inputs) if self.num_routing <= 0: self.num_routing = 3 # (batch_size, num_capsule, input_num_capsule) b = K.zeros((K.shape(u_hat)[0], self.num_capsule, self.input_num_capsule)) for i in xrange(self.num_routing): # (batch_size, num_capsule, input_num_capsule) c = softmax(b, axis=1) # (batch_size, num_capsule, dim_capsule) s = K.batch_dot(c, u_hat, axes=[2, 2]) squashed_s = squash(s) if i < self.num_routing - 1: # (batch_size, num_capsule, input_num_capsule) b += K.batch_dot(squashed_s, u_hat, axes=[2, 3]) return squashed_s
Example #24
Source File: layers.py From asr-study with MIT License | 5 votes |
def get_constants(self, x): constants = [] for layer in xrange(self.depth): constant = [] if 0 < self.dropout_U < 1: ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1))) ones = K.tile(ones, (1, self.output_dim)) B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones) constant.append(B_U) else: constant.append(K.cast_to_floatx(1.)) if layer == 0: if 0 < self.dropout_W < 1: input_shape = self.input_spec[0].shape input_dim = input_shape[-1] ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1))) ones = K.tile(ones, (1, input_dim)) B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones) constant.append(B_W) else: constant.append(K.cast_to_floatx(1.)) constants.append(constant) return constants
Example #25
Source File: per_sample_dropout.py From perfect_match with MIT License | 5 votes |
def call(self, inputs, training=None): def dropped_inputs(): keep_prob = 1. - self.rate tile_shape = tf.expand_dims(tf.shape(inputs)[-1], axis=0) tiled_keep_prob = K.tile(keep_prob, tile_shape) keep_prob = tf.transpose(K.reshape(tiled_keep_prob, [tile_shape[0], tf.shape(keep_prob)[0]])) binary_tensor = tf.floor(keep_prob + K.random_uniform(shape=tf.shape(inputs))) return inputs * binary_tensor return K.in_train_phase(dropped_inputs, inputs, training=training)
Example #26
Source File: model.py From keras-yolo3 with MIT License | 5 votes |
def yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False): """Convert final layer features to bounding box parameters.""" num_anchors = len(anchors) # Reshape to batch, height, width, num_anchors, box_params. anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2]) grid_shape = K.shape(feats)[1:3] # height, width grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]), [1, grid_shape[1], 1, 1]) grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]), [grid_shape[0], 1, 1, 1]) grid = K.concatenate([grid_x, grid_y]) grid = K.cast(grid, K.dtype(feats)) feats = K.reshape( feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5]) # Adjust preditions to each spatial grid point and anchor size. box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[::-1], K.dtype(feats)) box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats)) box_confidence = K.sigmoid(feats[..., 4:5]) box_class_probs = K.sigmoid(feats[..., 5:]) if calc_loss == True: return grid, feats, box_xy, box_wh return box_xy, box_wh, box_confidence, box_class_probs
Example #27
Source File: grud_layers.py From GRU-D with MIT License | 5 votes |
def get_initial_state(self, inputs): # build an all-zero tensor of shape (samples, output_dim) initial_state = K.zeros_like(inputs[0]) # (samples, timesteps, input_dim) initial_state = K.sum(initial_state, axis=(1, 2)) # (samples,) initial_state = K.expand_dims(initial_state) # (samples, 1) ret = [K.tile(initial_state, [1, dim]) for dim in self.cell.state_size[:-1]] # initial_state for s_prev_tm1 should be the same as the first s # depending on the direction. if self.go_backwards: # if go_backwards, we take the last s # (we take the largest one in case the padded input can be invalid) return ret + [K.tile(K.max(inputs[2], axis=1), [1, self.cell.state_size[-1]])] # otherwise we take the first s. return ret + [K.tile(inputs[2][:, 0, :], [1, self.cell.state_size[-1]])]
Example #28
Source File: interpolation_layer.py From interp-net with MIT License | 5 votes |
def call(self, x, reconstruction=False): self.reconstruction = reconstruction self.output_dim = K.int_shape(x)[-1] cross_channel_interp = self.cross_channel_interp y = x[:, :self.d_dim, :] w = x[:, self.d_dim:2*self.d_dim, :] intensity = K.exp(w) y = tf.transpose(y, perm=[0, 2, 1]) w = tf.transpose(w, perm=[0, 2, 1]) w2 = w w = K.tile(w[:, :, :, None], (1, 1, 1, self.d_dim)) den = K.logsumexp(w, axis=2) w = K.exp(w2 - den) mean = K.mean(y, axis=1) mean = K.tile(mean[:, None, :], (1, self.output_dim, 1)) w2 = K.dot(w*(y - mean), cross_channel_interp) + mean rep1 = tf.transpose(w2, perm=[0, 2, 1]) if reconstruction is False: y_trans = x[:, 2*self.d_dim:3*self.d_dim, :] y_trans = y_trans - rep1 # subtracting smooth from transient part rep1 = tf.concat([rep1, intensity, y_trans], 1) return rep1
Example #29
Source File: interpolation_layer.py From interp-net with MIT License | 5 votes |
def call(self, x, reconstruction=False): self.reconstruction = reconstruction x_t = x[:, :self.d_dim, :] d = x[:, 2*self.d_dim:3*self.d_dim, :] if reconstruction: output_dim = self.time_stamp m = x[:, 3*self.d_dim:, :] ref_t = K.tile(d[:, :, None, :], (1, 1, output_dim, 1)) else: m = x[:, self.d_dim: 2*self.d_dim, :] ref_t = np.linspace(0, self.hours_look_ahead, self.ref_points) output_dim = self.ref_points ref_t.shape = (1, ref_t.shape[0]) #x_t = x_t*m d = K.tile(d[:, :, :, None], (1, 1, 1, output_dim)) mask = K.tile(m[:, :, :, None], (1, 1, 1, output_dim)) x_t = K.tile(x_t[:, :, :, None], (1, 1, 1, output_dim)) norm = (d - ref_t)*(d - ref_t) a = K.ones((self.d_dim, self.time_stamp, output_dim)) pos_kernel = K.log(1 + K.exp(self.kernel)) alpha = a*pos_kernel[:, np.newaxis, np.newaxis] w = K.logsumexp(-alpha*norm + K.log(mask), axis=2) w1 = K.tile(w[:, :, None, :], (1, 1, self.time_stamp, 1)) w1 = K.exp(-alpha*norm + K.log(mask) - w1) y = K.sum(w1*x_t, axis=2) if reconstruction: rep1 = tf.concat([y, w], 1) else: w_t = K.logsumexp(-10.0*alpha*norm + K.log(mask), axis=2) # kappa = 10 w_t = K.tile(w_t[:, :, None, :], (1, 1, self.time_stamp, 1)) w_t = K.exp(-10.0*alpha*norm + K.log(mask) - w_t) y_trans = K.sum(w_t*x_t, axis=2) rep1 = tf.concat([y, w, y_trans], 1) return rep1
Example #30
Source File: model.py From yolo3_keras_Flag_Detection with MIT License | 5 votes |
def yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False): """Convert final layer features to bounding box parameters.""" num_anchors = len(anchors) # Reshape to batch, height, width, num_anchors, box_params. anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2]) grid_shape = K.shape(feats)[1:3] # height, width grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]), [1, grid_shape[1], 1, 1]) grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]), [grid_shape[0], 1, 1, 1]) grid = K.concatenate([grid_x, grid_y]) grid = K.cast(grid, K.dtype(feats)) feats = K.reshape( feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5]) # Adjust preditions to each spatial grid point and anchor size. box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[::-1], K.dtype(feats)) box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats)) box_confidence = K.sigmoid(feats[..., 4:5]) box_class_probs = K.sigmoid(feats[..., 5:]) if calc_loss == True: return grid, feats, box_xy, box_wh return box_xy, box_wh, box_confidence, box_class_probs