Python keras.backend.reshape() Examples

The following are 30 code examples for showing how to use keras.backend.reshape(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module keras.backend , or try the search function .

Example 1
Project: keras-utility-layer-collection   Author: zimmerrol   File: layer_normalization.py    License: MIT License 6 votes vote down vote up
def call(self, x):
        mean = K.mean(x, axis=-1)
        std = K.std(x, axis=-1)

        if len(x.shape) == 3:
            mean = K.permute_dimensions(
                K.repeat(mean, x.shape.as_list()[-1]),
                [0,2,1]
            )
            std = K.permute_dimensions(
                K.repeat(std, x.shape.as_list()[-1]),
                [0,2,1] 
            )
            
        elif len(x.shape) == 2:
            mean = K.reshape(
                K.repeat_elements(mean, x.shape.as_list()[-1], 0),
                (-1, x.shape.as_list()[-1])
            )
            std = K.reshape(
                K.repeat_elements(mean, x.shape.as_list()[-1], 0),
                (-1, x.shape.as_list()[-1])
            )
        
        return self._g * (x - mean) / (std + self._epsilon) + self._b 
Example 2
Project: steppy-toolkit   Author: minerva-ml   File: contrib.py    License: MIT License 6 votes vote down vote up
def call(self, x, mask=None):
        # computes a probability distribution over the timesteps
        # uses 'max trick' for numerical stability
        # reshape is done to avoid issue with Tensorflow
        # and 1-dimensional weights
        logits = K.dot(x, self.W)
        x_shape = K.shape(x)
        logits = K.reshape(logits, (x_shape[0], x_shape[1]))
        ai = K.exp(logits - K.max(logits, axis=-1, keepdims=True))

        # masked timesteps have zero weight
        if mask is not None:
            mask = K.cast(mask, K.floatx())
            ai = ai * mask
        att_weights = ai / (K.sum(ai, axis=1, keepdims=True) + K.epsilon())
        weighted_input = x * K.expand_dims(att_weights)
        result = K.sum(weighted_input, axis=1)
        if self.return_attention:
            return [result, att_weights]
        return result 
Example 3
def call(self,x,mask=None):
        conv_input,theta = x
        s = theta.shape
        theta = T.reshape(theta,[-1,s[2]])
        m = K.not_equal(conv_input,0.)

        #### For translation
        trans = _trans(theta)
        output = _transform_trans(trans, conv_input)
        output = output * K.cast(m,K.floatx())

        ### For rotation
        M = _fusion(theta)
        output = _transform_rot(M,output)

        return output 
Example 4
def call(self,x,training=None):
        deta1 = 0.3
        deta2 = 0.3
        deta3 = 0.3
        seed = np.random.randint(1, 10e6)
        rng = RandomStreams(seed=seed)
        theta1 = rng.uniform(size=(x.shape[0],1),low=-deta1,high=deta1,dtype='float32')
        theta2 = rng.uniform(size=(x.shape[0],1),low=-deta2,high=deta2,dtype='float32')
        theta3 = rng.uniform(size=(x.shape[0],1),low=-deta3,high=deta3,dtype='float32')
        theta = K.concatenate([theta1,theta2,theta3],axis=-1)
        theta = K.tile(theta,x.shape[1])
        theta = theta.reshape((x.shape[0], x.shape[1], 3))

        theta = theta.reshape((theta.shape[0]*theta.shape[1], theta.shape[2]))
        M = _fusion(theta)
        output = _transform_rot(M, x)

        return K.in_train_phase(output,x,training = training) 
Example 5
Project: fancy-cnn   Author: textclf   File: timedistributed.py    License: MIT License 6 votes vote down vote up
def get_output(self, train=False):
        def format_shape(shape):
            if K._BACKEND == 'tensorflow':
                def trf(x):
                    try:
                        return int(x)
                    except TypeError:
                        return x

                return map(trf, shape)
            return shape

        X = self.get_input(train)

        in_shape = format_shape(K.shape(X))
        batch_flatten_len = K.prod(in_shape[:2])
        cast_in_shape = (batch_flatten_len, ) + tuple(in_shape[i] for i in range(2, K.ndim(X)))
        
        pre_outs = self.layer(K.reshape(X, cast_in_shape))
        
        out_shape = format_shape(K.shape(pre_outs))
        cast_out_shape = (in_shape[0], in_shape[1]) + tuple(out_shape[i] for i in range(1, K.ndim(pre_outs)))
        
        outputs = K.reshape(pre_outs, cast_out_shape)
        return outputs 
Example 6
Project: DeepLearn   Author: GauravBh1010tt   File: layers.py    License: MIT License 6 votes vote down vote up
def call(self , x, mask=None):
        
        e1=x[0].T
        e2=x[1].T
        
        batch_size = K.shape(x[0])[0]
        sim = []
        V_out = K.dot(self.V, K.concatenate([e1,e2],axis=0))     

        for i in range(self.k): 
            temp = K.batch_dot(K.dot(e1.T,self.W[i,:,:]),e2.T,axes=1)
            sim.append(temp)
        sim=K.reshape(sim,(self.k,batch_size))

        tensor_bi_product = self.activation(V_out+sim)
        tensor_bi_product = K.dot(self.U.T, tensor_bi_product).T

        return tensor_bi_product 
Example 7
Project: keras_bn_library   Author: bnsnapper   File: rnnrbm.py    License: MIT License 6 votes vote down vote up
def get_constants(self, x):
		constants = []
		if 0 < self.dropout_U < 1:
			ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
			ones = K.tile(ones, (1, self.hidden_recurrent_dim))
			B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones)
			constants.append(B_U)
		else:
			constants.append(K.cast_to_floatx(1.))
        
		if self.consume_less == 'cpu' and 0 < self.dropout_W < 1:
			input_shape = self.input_spec[0].shape
			input_dim = input_shape[-1]
			ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
			ones = K.tile(ones, (1, input_dim))
			B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones)
			constants.append(B_W)
		else:
			constants.append(K.cast_to_floatx(1.))

		return constants 
Example 8
Project: keras_bn_library   Author: bnsnapper   File: recurrent.py    License: MIT License 6 votes vote down vote up
def get_constants(self, x):
		constants = []
		if 0 < self.dropout_U < 1:
			ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
			ones = K.tile(ones, (1, self.input_dim))
			B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(4)]
			constants.append(B_U)
		else:
			constants.append([K.cast_to_floatx(1.) for _ in range(4)])

		if 0 < self.dropout_W < 1:
			input_shape = K.int_shape(x)
			input_dim = input_shape[-1]
			ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
			ones = K.tile(ones, (1, int(input_dim)))
			B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(4)]
			constants.append(B_W)
		else:
			constants.append([K.cast_to_floatx(1.) for _ in range(4)])
		return constants 
Example 9
Project: keras_bn_library   Author: bnsnapper   File: rbm.py    License: MIT License 6 votes vote down vote up
def contrastive_divergence_loss(self, y_true, y_pred):

		x = y_pred
		#x = K.reshape(x, (-1, self.input_dim))

		if(self.is_persistent):
			chain_start = self.persistent_chain
		else:
			chain_start = x

		def loss(chain_start, x):
			x_rec, _, _ = self.mcmc_chain(chain_start, self.nb_gibbs_steps)
			cd = K.mean(self.free_energy(x)) - K.mean(self.free_energy(x_rec))
			return cd, x_rec

		y, x_rec = loss(chain_start, x)

		if(self.is_persistent):
			self.updates = [(self.persistent_chain, x_rec)]

		return y 
Example 10
Project: EasyPR-python   Author: SunskyF   File: model.py    License: Apache License 2.0 6 votes vote down vote up
def call(self, inputs):
        def wrapper(rois, mrcnn_class, mrcnn_bbox, image_meta):
            # currently supports one image per batch
            b = 0
            _, _, window, _ = parse_image_meta(image_meta)
            detections = refine_detections(
                rois[b], mrcnn_class[b], mrcnn_bbox[b], window[b], self.config)
            # Pad with zeros if detections < DETECTION_MAX_INSTANCES
            gap = self.config.DETECTION_MAX_INSTANCES - detections.shape[0]
            assert gap >= 0
            if gap > 0:
                detections = np.pad(detections, [(0, gap), (0, 0)],
                                    'constant', constant_values=0)

            # Cast to float32
            # TODO: track where float64 is introduced
            detections = detections.astype(np.float32)

            # Reshape output
            # [batch, num_detections, (y1, x1, y2, x2, class_score)] in pixels
            return np.reshape(detections,
                              [1, self.config.DETECTION_MAX_INSTANCES, 6])

        # Return wrapped function
        return tf.py_func(wrapper, inputs, tf.float32) 
Example 11
Project: deep_complex_networks   Author: ChihebTrabelsi   File: fft.py    License: MIT License 6 votes vote down vote up
def ifft2(x):
	ff = x
	ff = KB.permute_dimensions(ff, (0, 2, 1))
	ff = KB.reshape(ff, (x.shape[0] *x.shape[2], x.shape[1]))
	tf = ifft(ff)
	tf = KB.reshape(tf, (x.shape[0], x.shape[2], x.shape[1]))
	tf = KB.permute_dimensions(tf, (0, 2, 1))
	tf = KB.reshape(tf, (x.shape[0] *x.shape[1], x.shape[2]))
	tt = ifft(tf)
	tt = KB.reshape(tt, (x.shape[0], x.shape[1], x.shape[2]))
	return tt

#
# FFT Layers:
#
#  FFT:   Batched 1-D FFT  (Input: (Batch, FeatureMaps, TimeSamples))
#  IFFT:  Batched 1-D IFFT (Input: (Batch, FeatureMaps, FreqSamples))
#  FFT2:  Batched 2-D FFT  (Input: (Batch, FeatureMaps, TimeSamplesH, TimeSamplesW))
#  IFFT2: Batched 2-D IFFT (Input: (Batch, FeatureMaps, FreqSamplesH, FreqSamplesW))
# 
Example 12
Project: deep_complex_networks   Author: ChihebTrabelsi   File: fft.py    License: MIT License 6 votes vote down vote up
def call(self, x, mask=None):
		a = KB.permute_dimensions(x, (1,0,2,3))
		a = KB.reshape(a, (x.shape[1] *x.shape[0], x.shape[2], x.shape[3]))
		a = ifft2(a)
		a = KB.reshape(a, (x.shape[1], x.shape[0], x.shape[2], x.shape[3]))
		return KB.permute_dimensions(a, (1,0,2,3))



#
# Tests
#
# Note: The IFFT is the conjugate of the FFT of the conjugate.
#
#     np.fft.ifft(x) == np.conj(np.fft.fft(np.conj(x)))
# 
Example 13
Project: FasterRCNN_KERAS   Author: akshaylamba   File: FixedBatchNormalization.py    License: Apache License 2.0 5 votes vote down vote up
def call(self, x, mask=None):

        assert self.built, 'Layer must be built before being called'
        input_shape = K.int_shape(x)

        reduction_axes = list(range(len(input_shape)))
        del reduction_axes[self.axis]
        broadcast_shape = [1] * len(input_shape)
        broadcast_shape[self.axis] = input_shape[self.axis]

        if sorted(reduction_axes) == range(K.ndim(x))[:-1]:
            x_normed = K.batch_normalization(
                x, self.running_mean, self.running_std,
                self.beta, self.gamma,
                epsilon=self.epsilon)
        else:
            # need broadcasting
            broadcast_running_mean = K.reshape(self.running_mean, broadcast_shape)
            broadcast_running_std = K.reshape(self.running_std, broadcast_shape)
            broadcast_beta = K.reshape(self.beta, broadcast_shape)
            broadcast_gamma = K.reshape(self.gamma, broadcast_shape)
            x_normed = K.batch_normalization(
                x, broadcast_running_mean, broadcast_running_std,
                broadcast_beta, broadcast_gamma,
                epsilon=self.epsilon)

        return x_normed 
Example 14
def _transform_trans(theta,input):
    batch1, step1, dim1 = input.shape
    input = K.reshape(input,(batch1,step1,dim1//3,3))
    input = K.reshape(input,(batch1*step1,dim1//3,3))
    input = K.permute_dimensions(input,[0,2,1])
    add = T.ones((batch1*step1,1,dim1//3))
    input= K.concatenate([input,add],axis=1)

    output = K.batch_dot(theta,input)
    output = K.permute_dimensions(output,[0,2,1])
    output = K.reshape(output,(output.shape[0],dim1))
    output = K.reshape(output,(batch1,step1,output.shape[1]))

    return output 
Example 15
def _trans(theta):
    tx = theta[:,3:4]
    ty = theta[:,4:5]
    tz = theta[:,5:6]
    zero = K.zeros_like(tx)
    one = K.ones_like(tx)
    first = K.reshape(K.concatenate([one,zero,zero,tx],axis=1),(-1,1,4))
    second = K.reshape(K.concatenate([zero,one,zero,ty],axis=1),(-1,1,4))
    third = K.reshape(K.concatenate([zero,zero,one,tz],axis=1),(-1,1,4))
    trans = K.concatenate([first,second,third],axis=1)
    trans = trans.reshape((trans.shape[0],3,4))

    return trans 
Example 16
def _rotation_y(theta):
    r1 = K.cos(theta[:,0:1])
    r2 = K.sin(theta[:,0:1])
    zero = K.zeros_like(r1)
    one = K.ones_like(r1)
    first = K.reshape(K.concatenate([r1,zero,r2,zero],axis=1),(-1,1,4))
    second = K.reshape(K.concatenate([zero,one,zero,zero],axis=1),(-1,1,4))
    third = K.reshape(K.concatenate([-r2,zero,r1,zero],axis=1),(-1,1,4))
    fourth = K.reshape(K.concatenate([zero,zero,zero,one],axis=1),(-1,1,4))
    rotation_y = K.concatenate([first,second,third,fourth],axis=1)
    rotation_y = T.reshape(rotation_y,[-1,4,4])
    return rotation_y 
Example 17
def _rotation_x(theta):
    r1 = K.cos(theta[:,1:2])
    r2 = K.sin(theta[:,1:2])
    zero = K.zeros_like(r1)
    one = K.ones_like(r1)
    first = K.reshape(K.concatenate([one,zero,zero,zero],axis=1),(-1,1,4))
    second = K.reshape(K.concatenate([zero,r1,-r2,zero],axis=1),(-1,1,4))
    third = K.reshape(K.concatenate([zero,r2,r1,zero],axis=1),(-1,1,4))
    fourth = K.reshape(K.concatenate([zero,zero,zero,one],axis=1),(-1,1,4))
    rotation_x = K.concatenate([first,second,third,fourth],axis=1)
    rotation_x = T.reshape(rotation_x,[-1,4,4])
    return rotation_x 
Example 18
def _trans_rot_new(theta):
    tx = theta[:,3:4]
    ty = theta[:,4:5]
    tz = theta[:,5:6]
    zero = K.zeros_like(tx)
    one = K.ones_like(tx)
    first = K.reshape(K.concatenate([one,zero,zero,tx],axis=1),(-1,1,4))
    second = K.reshape(K.concatenate([zero,one,zero,ty],axis=1),(-1,1,4))
    third = K.reshape(K.concatenate([zero,zero,one,tz],axis=1),(-1,1,4))
    fourth = K.reshape(K.concatenate([zero,zero,zero,one],axis=1),(-1,1,4))
    trans = K.concatenate([first,second,third,fourth],axis=1)

    trans = T.reshape(trans,[-1,4,4])
    return trans 
Example 19
def _transform_rot(theta,input):

    batch1, step1, dim1 = input.shape
    input = T.reshape(input,[-1,dim1//3,3])
    input = K.permute_dimensions(input,[0,2,1])
    add = T.ones((batch1*step1,1,dim1//3))
    input= K.concatenate([input,add],axis=1)

    output = K.batch_dot(theta,input)
    output = K.permute_dimensions(output,[0,2,1])
    output = output[:,:,0:3]
    output = T.reshape(output,[-1,dim1])
    output = T.reshape(output,[-1,step1,dim1])

    return output 
Example 20
Project: dataiku-contrib   Author: dataiku   File: model.py    License: Apache License 2.0 5 votes vote down vote up
def overlaps_graph(boxes1, boxes2):
    """Computes IoU overlaps between two sets of boxes.
    boxes1, boxes2: [N, (y1, x1, y2, x2)].
    """
    # 1. Tile boxes2 and repeat boxes1. This allows us to compare
    # every boxes1 against every boxes2 without loops.
    # TF doesn't have an equivalent to np.repeat() so simulate it
    # using tf.tile() and tf.reshape.
    b1 = tf.reshape(tf.tile(tf.expand_dims(boxes1, 1),
                            [1, 1, tf.shape(boxes2)[0]]), [-1, 4])
    b2 = tf.tile(boxes2, [tf.shape(boxes1)[0], 1])
    # 2. Compute intersections
    b1_y1, b1_x1, b1_y2, b1_x2 = tf.split(b1, 4, axis=1)
    b2_y1, b2_x1, b2_y2, b2_x2 = tf.split(b2, 4, axis=1)
    y1 = tf.maximum(b1_y1, b2_y1)
    x1 = tf.maximum(b1_x1, b2_x1)
    y2 = tf.minimum(b1_y2, b2_y2)
    x2 = tf.minimum(b1_x2, b2_x2)
    intersection = tf.maximum(x2 - x1, 0) * tf.maximum(y2 - y1, 0)
    # 3. Compute unions
    b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1)
    b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1)
    union = b1_area + b2_area - intersection
    # 4. Compute IoU and reshape to [boxes1, boxes2]
    iou = intersection / union
    overlaps = tf.reshape(iou, [tf.shape(boxes1)[0], tf.shape(boxes2)[0]])
    return overlaps 
Example 21
Project: dataiku-contrib   Author: dataiku   File: model.py    License: Apache License 2.0 5 votes vote down vote up
def call(self, inputs):
        rois = inputs[0]
        mrcnn_class = inputs[1]
        mrcnn_bbox = inputs[2]
        image_meta = inputs[3]

        # Get windows of images in normalized coordinates. Windows are the area
        # in the image that excludes the padding.
        # Use the shape of the first image in the batch to normalize the window
        # because we know that all images get resized to the same size.
        m = parse_image_meta_graph(image_meta)
        image_shape = m['image_shape'][0]
        window = norm_boxes_graph(m['window'], image_shape[:2])

        # Run detection refinement graph on each item in the batch
        detections_batch = utils.batch_slice(
            [rois, mrcnn_class, mrcnn_bbox, window],
            lambda x, y, w, z: refine_detections_graph(x, y, w, z, self.config),
            self.config.IMAGES_PER_GPU)

        # Reshape output
        # [batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] in
        # normalized coordinates
        return tf.reshape(
            detections_batch,
            [self.config.BATCH_SIZE, self.config.DETECTION_MAX_INSTANCES, 6]) 
Example 22
Project: dataiku-contrib   Author: dataiku   File: model.py    License: Apache License 2.0 5 votes vote down vote up
def rpn_graph(feature_map, anchors_per_location, anchor_stride):
    """Builds the computation graph of Region Proposal Network.
    feature_map: backbone features [batch, height, width, depth]
    anchors_per_location: number of anchors per pixel in the feature map
    anchor_stride: Controls the density of anchors. Typically 1 (anchors for
                   every pixel in the feature map), or 2 (every other pixel).
    Returns:
        rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax)
        rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities.
        rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be
                  applied to anchors.
    """
    # TODO: check if stride of 2 causes alignment issues if the feature map
    # is not even.
    # Shared convolutional base of the RPN
    shared = KL.Conv2D(512, (3, 3), padding='same', activation='relu',
                       strides=anchor_stride,
                       name='rpn_conv_shared')(feature_map)

    # Anchor Score. [batch, height, width, anchors per location * 2].
    x = KL.Conv2D(2 * anchors_per_location, (1, 1), padding='valid',
                  activation='linear', name='rpn_class_raw')(shared)

    # Reshape to [batch, anchors, 2]
    rpn_class_logits = KL.Lambda(
        lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 2]))(x)

    # Softmax on last dimension of BG/FG.
    rpn_probs = KL.Activation(
        "softmax", name="rpn_class_xxx")(rpn_class_logits)

    # Bounding box refinement. [batch, H, W, anchors per location * depth]
    # where depth is [x, y, log(w), log(h)]
    x = KL.Conv2D(anchors_per_location * 4, (1, 1), padding="valid",
                  activation='linear', name='rpn_bbox_pred')(shared)

    # Reshape to [batch, anchors, 4]
    rpn_bbox = KL.Lambda(lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 4]))(x)

    return [rpn_class_logits, rpn_probs, rpn_bbox] 
Example 23
Project: dataiku-contrib   Author: dataiku   File: model.py    License: Apache License 2.0 5 votes vote down vote up
def mrcnn_bbox_loss_graph(target_bbox, target_class_ids, pred_bbox):
    """Loss for Mask R-CNN bounding box refinement.
    target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))]
    target_class_ids: [batch, num_rois]. Integer class IDs.
    pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))]
    """
    # Reshape to merge batch and roi dimensions for simplicity.
    target_class_ids = K.reshape(target_class_ids, (-1,))
    target_bbox = K.reshape(target_bbox, (-1, 4))
    pred_bbox = K.reshape(pred_bbox, (-1, K.int_shape(pred_bbox)[2], 4))

    # Only positive ROIs contribute to the loss. And only
    # the right class_id of each ROI. Get their indices.
    positive_roi_ix = tf.where(target_class_ids > 0)[:, 0]
    positive_roi_class_ids = tf.cast(
        tf.gather(target_class_ids, positive_roi_ix), tf.int64)
    indices = tf.stack([positive_roi_ix, positive_roi_class_ids], axis=1)

    # Gather the deltas (predicted and true) that contribute to loss
    target_bbox = tf.gather(target_bbox, positive_roi_ix)
    pred_bbox = tf.gather_nd(pred_bbox, indices)

    # Smooth-L1 Loss
    loss = K.switch(tf.size(target_bbox) > 0,
                    smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox),
                    tf.constant(0.0))
    loss = K.mean(loss)
    return loss 
Example 24
Project: keras-yolo3   Author: bing0037   File: model.py    License: MIT License 5 votes vote down vote up
def yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False):
    """Convert final layer features to bounding box parameters."""
    num_anchors = len(anchors)
    # Reshape to batch, height, width, num_anchors, box_params.
    anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])

    grid_shape = K.shape(feats)[1:3] # height, width
    grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
        [1, grid_shape[1], 1, 1])
    grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
        [grid_shape[0], 1, 1, 1])
    grid = K.concatenate([grid_x, grid_y])
    grid = K.cast(grid, K.dtype(feats))

    feats = K.reshape(
        feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])

    # Adjust preditions to each spatial grid point and anchor size.
    box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[::-1], K.dtype(feats))
    box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats))
    box_confidence = K.sigmoid(feats[..., 4:5])
    box_class_probs = K.sigmoid(feats[..., 5:])

    if calc_loss == True:
        return grid, feats, box_xy, box_wh
    return box_xy, box_wh, box_confidence, box_class_probs 
Example 25
Project: keras-yolo3   Author: bing0037   File: model.py    License: MIT License 5 votes vote down vote up
def yolo_boxes_and_scores(feats, anchors, num_classes, input_shape, image_shape):
    '''Process Conv layer output'''
    box_xy, box_wh, box_confidence, box_class_probs = yolo_head(feats,
        anchors, num_classes, input_shape)
    boxes = yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape)
    boxes = K.reshape(boxes, [-1, 4])
    box_scores = box_confidence * box_class_probs
    box_scores = K.reshape(box_scores, [-1, num_classes])
    return boxes, box_scores 
Example 26
Project: PiCamNN   Author: PiSimo   File: keras_yolo.py    License: MIT License 5 votes vote down vote up
def yolo_eval(yolo_outputs,
              image_shape,
              max_boxes=10,
              score_threshold=.6,
              iou_threshold=.5):
    """Evaluate YOLO model on given input batch and return filtered boxes."""
    box_xy, box_wh, box_confidence, box_class_probs = yolo_outputs
    boxes = yolo_boxes_to_corners(box_xy, box_wh)
    boxes, scores, classes = yolo_filter_boxes(
        boxes, box_confidence, box_class_probs, threshold=score_threshold)

    # Scale boxes back to original image shape.
    height = image_shape[0]
    width = image_shape[1]
    image_dims = K.stack([height, width, height, width])
    image_dims = K.reshape(image_dims, [1, 4])
    boxes = boxes * image_dims

    # TODO: Something must be done about this ugly hack!
    max_boxes_tensor = K.variable(max_boxes, dtype='int32')
    K.get_session().run(tf.variables_initializer([max_boxes_tensor]))
    nms_index = tf.image.non_max_suppression(
        boxes, scores, max_boxes_tensor, iou_threshold=iou_threshold)
    boxes = K.gather(boxes, nms_index)
    scores = K.gather(scores, nms_index)
    classes = K.gather(classes, nms_index)
    return boxes, scores, classes 
Example 27
Project: df   Author: dfaker   File: pixel_shuffler.py    License: Mozilla Public License 2.0 5 votes vote down vote up
def call(self, inputs):

        input_shape = K.int_shape(inputs)
        if len(input_shape) != 4:
            raise ValueError('Inputs should have rank ' +
                             str(4) +
                             '; Received input shape:', str(input_shape))

        if self.data_format == 'channels_first':
            batch_size, c, h, w = input_shape
            if batch_size is None:
                batch_size = -1
            rh, rw = self.size
            oh, ow = h * rh, w * rw
            oc = c // (rh * rw)

            out = K.reshape(inputs, (batch_size, rh, rw, oc, h, w))
            out = K.permute_dimensions(out, (0, 3, 4, 1, 5, 2))
            out = K.reshape(out, (batch_size, oc, oh, ow))
            return out

        elif self.data_format == 'channels_last':
            batch_size, h, w, c = input_shape
            if batch_size is None:
                batch_size = -1
            rh, rw = self.size
            oh, ow = h * rh, w * rw
            oc = c // (rh * rw)

            out = K.reshape(inputs, (batch_size, h, w, rh, rw, oc))
            out = K.permute_dimensions(out, (0, 1, 3, 2, 4, 5))
            out = K.reshape(out, (batch_size, oh, ow, oc))
            return out 
Example 28
Project: df   Author: dfaker   File: exampleTrainer.py    License: Mozilla Public License 2.0 5 votes vote down vote up
def call(self, inputs):

        input_shape = K.int_shape(inputs)
        if len(input_shape) != 4:
            raise ValueError('Inputs should have rank ' +
                             str(4) +
                             '; Received input shape:', str(input_shape))

        if self.data_format == 'channels_first':
            batch_size, c, h, w = input_shape
            if batch_size is None:
                batch_size = -1
            rh, rw = self.size
            oh, ow = h * rh, w * rw
            oc = c // (rh * rw)

            out = K.reshape(inputs, (batch_size, rh, rw, oc, h, w))
            out = K.permute_dimensions(out, (0, 3, 4, 1, 5, 2))
            out = K.reshape(out, (batch_size, oc, oh, ow))
            return out

        elif self.data_format == 'channels_last':
            batch_size, h, w, c = input_shape
            if batch_size is None:
                batch_size = -1
            rh, rw = self.size
            oh, ow = h * rh, w * rw
            oc = c // (rh * rw)

            out = K.reshape(inputs, (batch_size, h, w, rh, rw, oc))
            out = K.permute_dimensions(out, (0, 1, 3, 2, 4, 5))
            out = K.reshape(out, (batch_size, oh, ow, oc))
            return out 
Example 29
Project: deep-smoke-machine   Author: CMU-CREATE-Lab   File: resnet_152_keras.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def call(self, x, mask=None):
        input_shape = self.input_spec[0].shape
        broadcast_shape = [1] * len(input_shape)
        broadcast_shape[self.axis] = input_shape[self.axis]

        out = K.reshape(self.gamma, broadcast_shape) * x + K.reshape(self.beta, broadcast_shape)
        return out 
Example 30
Project: timeception   Author: noureldien   File: resnet_152_keras.py    License: GNU General Public License v3.0 5 votes vote down vote up
def call(self, x, mask=None):
        input_shape = self.input_spec[0].shape
        broadcast_shape = [1] * len(input_shape)
        broadcast_shape[self.axis] = input_shape[self.axis]

        out = K.reshape(self.gamma, broadcast_shape) * x + K.reshape(self.beta, broadcast_shape)
        return out