Python keras.backend.prod() Examples

The following are code examples for showing how to use keras.backend.prod(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: fancy-cnn   Author: textclf   File: timedistributed.py    MIT License 6 votes vote down vote up
def get_output(self, train=False):
        def format_shape(shape):
            if K._BACKEND == 'tensorflow':
                def trf(x):
                    try:
                        return int(x)
                    except TypeError:
                        return x

                return map(trf, shape)
            return shape

        X = self.get_input(train)

        in_shape = format_shape(K.shape(X))
        batch_flatten_len = K.prod(in_shape[:2])
        cast_in_shape = (batch_flatten_len, ) + tuple(in_shape[i] for i in range(2, K.ndim(X)))
        
        pre_outs = self.layer(K.reshape(X, cast_in_shape))
        
        out_shape = format_shape(K.shape(pre_outs))
        cast_out_shape = (in_shape[0], in_shape[1]) + tuple(out_shape[i] for i in range(1, K.ndim(pre_outs)))
        
        outputs = K.reshape(pre_outs, cast_out_shape)
        return outputs 
Example 2
Project: group-ksparse-temporal-cnns   Author: srph25   File: ops.py    MIT License 6 votes vote down vote up
def ksparse(x, k, axis, alpha=1, absolute=False):
    if isinstance(axis, int):
        axis = (axis,)
    elif isinstance(axis, list):
        axis = tuple(axis)
    axis_complement = tuple(set(range(K.ndim(x))) - set(axis))
    shape_reduce = K.prod([K.shape(x)[j] for j in axis])
    _k = K.minimum(K.in_train_phase(k, alpha * k), shape_reduce)
    inputs_permute_dimensions = K.permute_dimensions(x, axis_complement + axis)
    inputs_permute_dimensions_reshape = K.reshape(inputs_permute_dimensions, (-1, shape_reduce))
    if absolute is True:
        inputs_permute_dimensions_reshape = K.abs(inputs_permute_dimensions_reshape)
    _, indices = tf.nn.top_k(inputs_permute_dimensions_reshape, _k)
    scatter_indices = K.concatenate([(K.arange(K.shape(inputs_permute_dimensions_reshape)[0])[:, None] * K.ones((1, _k), dtype='int32'))[:, :, None], indices[:, :, None]])
    scatter_updates = K.ones((K.shape(inputs_permute_dimensions_reshape)[0], _k))
    mask_permute_dimensions_reshape = K.cast(tf.scatter_nd(scatter_indices, scatter_updates, K.shape(inputs_permute_dimensions_reshape)), K.floatx())
    mask_permute_dimensions = K.reshape(mask_permute_dimensions_reshape, K.shape(inputs_permute_dimensions))
    mask = K.permute_dimensions(mask_permute_dimensions, tuple(np.argsort(axis_complement + axis)))
    return mask * x 
Example 3
Project: world_models   Author: llSourcell   File: arch.py    MIT License 6 votes vote down vote up
def tf_normal(y_true, mu, sigma, pi):

    rollout_length = K.shape(y_true)[1]
    y_true = K.tile(y_true,(1,1,GAUSSIAN_MIXTURES))
    y_true = K.reshape(y_true, [-1, rollout_length, GAUSSIAN_MIXTURES,Z_DIM])

    oneDivSqrtTwoPI = 1 / math.sqrt(2*math.pi)
    result = y_true - mu
#   result = K.permute_dimensions(result, [2,1,0])
    result = result * (1 / (sigma + 1e-8))
    result = -K.square(result)/2
    result = K.exp(result) * (1/(sigma + 1e-8))*oneDivSqrtTwoPI
    result = result * pi
    result = K.sum(result, axis=2) #### sum over gaussians
    #result = K.prod(result, axis=2) #### multiply over latent dims
    return result 
Example 4
Project: onnx-keras   Author: leodestiny   File: backend.py    MIT License 6 votes vote down vote up
def handle_gemm(cls, node, input_dict):
        x = input_dict[node.inputs[0]]

        y = input_dict[node.inputs[1]]

        z = input_dict[node.inputs[2]]
        shape = K.int_shape(x)
        if len(shape) > 2:
            x = keras.layers.Reshape([np.prod(shape[1:])])(x)
        if "transA" in node.attrs.keys() and node.attrs["transA"] == 1:
            x = K.transpose(x)
        if "transB" in node.attrs.keys() and node.attrs["transB"] == 1:
            y = np.transpose(y)
        alpha = node.attrs["alpha"] if "alpha" in node.attrs.keys() else 1.0
        beta = node.attrs["beta"] if "beta" in node.attrs.keys() else 1.0

        layer = keras.layers.Dense(len(z), weights=[alpha * y, beta * z])
        return [layer(x)] 
Example 5
Project: onnx-keras   Author: leodestiny   File: backend.py    MIT License 6 votes vote down vote up
def handle_softmax(cls, node, input_dict):
        x = input_dict[node.inputs[0]]
        shape = K.int_shape(x)
        if "axis" in node.attrs.keys() \
                and (node.attrs['axis'] == -1 or node.attrs["axis"] == len(shape) - 1):
            return [Lambda(lambda a: K.softmax(a))(x)]
        if "axis" in node.attrs.keys():
            axis = node.attrs["axis"]
            axis = (axis if axis >= 0 else
                    len(shape) + axis)
        else:
            axis = 1
        if axis == 1:
            cal_shape = [np.prod(shape[1:])]
        else:
            cal_shape = (np.prod(shape[1:axis], np.prod(shape[axis:])))
        x = keras.layers.Reshape(cal_shape)(x)
        x = Lambda(lambda _x: K.softmax(_x))(x)
        x = keras.layers.Reshape(shape[1:])(x)
        return [x] 
Example 6
Project: learning_to_adapt   Author: ondrejklejch   File: wrapper.py    Apache License 2.0 6 votes vote down vote up
def parameter_coordinates(shapes):
  num_params = np.sum([np.prod(shape) for shape in shapes])
  output_dims = [shape[-1] for shape in shapes]

  if not output_dims:
    return []

  output_dim = output_dims[0]
  input_dim = num_params / output_dim
  if not all([dim == output_dim for dim in output_dims]):
    raise ValueError("Can't handle different output dimensions")

  return np.stack([
    np.stack([np.arange(input_dim)] * output_dim).flatten() / float(input_dim),
    np.stack([np.arange(output_dim)] * input_dim).T.flatten() / float(output_dim)
  ], axis=-1) 
Example 7
Project: keras_nade   Author: jgrnt   File: utils.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __call__(self, x):
        return K.sum(K.exp(1 * (K.relu(self.min - x) + K.relu(x - self.max)))) - K.prod(K.cast(K.shape(x), K.floatx())) 
Example 8
Project: group-ksparse-temporal-cnns   Author: srph25   File: ops.py    MIT License 5 votes vote down vote up
def group_ksparse(x, groups, k, axis_group, axis_sparse, norm=2, alpha=1, epsilon=None):
    if isinstance(axis_group, int):
        axis_group = (axis_group,)
    elif isinstance(axis_group, list):
        axis_group = tuple(axis_group)
    if isinstance(axis_sparse, int):
        axis_sparse = (axis_sparse,)
    elif isinstance(axis_sparse, list):
        axis_sparse = tuple(axis_sparse)
    assert(1 - bool(set(axis_group) & set(axis_sparse)))
    if epsilon is None:
        epsilon = K.epsilon()
    axis_complement = tuple(set(range(K.ndim(x))) - set(axis_group) - set(axis_sparse))
    shape_reduce_group = K.prod([K.shape(x)[j] for j in axis_group])
    shape_reduce_sparse = K.prod([K.shape(x)[j] for j in axis_sparse])
    _k = K.minimum(K.in_train_phase(k, alpha * k), shape_reduce_sparse)
    inputs_permute_dimensions = K.permute_dimensions(x, axis_complement + axis_sparse + axis_group)
    inputs_permute_dimensions_reshape = K.reshape(inputs_permute_dimensions, (-1, shape_reduce_sparse, shape_reduce_group))
    norm_group_permute_dimensions_reshape = group_norms(inputs=inputs_permute_dimensions_reshape, groups=groups, axis=-1, norm=norm, epsilon=epsilon)
    norm_group_permute_dimensions_reshape = K.permute_dimensions(norm_group_permute_dimensions_reshape, (0, 2, 1))
    norm_group_permute_dimensions_reshape = K.reshape(norm_group_permute_dimensions_reshape, (-1, shape_reduce_sparse))
    _, indices = tf.nn.top_k(norm_group_permute_dimensions_reshape, _k)
    scatter_indices = K.concatenate([(K.arange(K.shape(norm_group_permute_dimensions_reshape)[0])[:, None] * K.ones((1, _k), dtype='int32'))[:, :, None], indices[:, :, None]])
    scatter_updates = K.ones((K.shape(norm_group_permute_dimensions_reshape)[0], _k))
    mask_group_permute_dimensions_reshape = K.cast(tf.scatter_nd(scatter_indices, scatter_updates, K.shape(norm_group_permute_dimensions_reshape)), K.floatx())
    mask_group_permute_dimensions_reshape = K.reshape(mask_group_permute_dimensions_reshape, (-1, groups, shape_reduce_sparse))
    mask_group_permute_dimensions_reshape = K.permute_dimensions(mask_group_permute_dimensions_reshape, (0, 2, 1))
    mask_permute_dimensions_reshape = (mask_group_permute_dimensions_reshape[:, :, :, None] * K.ones((1, 1, 1, floor_div(shape_reduce_group, groups))))
    mask_permute_dimensions = K.reshape(mask_permute_dimensions_reshape, K.shape(inputs_permute_dimensions))
    mask = K.permute_dimensions(mask_permute_dimensions, tuple(np.argsort(axis_complement + axis_sparse + axis_group)))
    return mask * x 
Example 9
Project: RPGOne   Author: RTHMaK   File: noisy_or.py    Apache License 2.0 5 votes vote down vote up
def call(self, inputs, mask=None):
        # shape: (batch size, ..., num_probs, ...)
        probabilities = inputs
        if mask is not None:
            probabilities *= K.cast(mask, "float32")

        noisy_probs = self.noise_parameter * probabilities

        # shape: (batch size, ..., num_probs, ...)
        noisy_probs = 1.0 - noisy_probs

        # shape: (batch size, ..., ...)
        probability_product = K.prod(noisy_probs, axis=self.axis)

        return 1.0 - probability_product 
Example 10
Project: deeplogic   Author: nuric   File: lstm.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def build_model(char_size=27, dim=64, training=True, **kwargs):
  """Build the model."""
  # Inputs
  # Context: (rules, preds, chars,)
  context = L.Input(shape=(None, None, None,), name='context', dtype='int32')
  query = L.Input(shape=(None,), name='query', dtype='int32')

  var_flat = L.Lambda(lambda x: K.reshape(x, K.stack([-1, K.prod(K.shape(x)[1:])])), name='var_flat')
  flat_ctx = var_flat(context)

  # Onehot embedding
  onehot = L.Embedding(char_size, char_size,
                       embeddings_initializer='identity',
                       trainable=False,
                       mask_zero=True,
                       name='onehot')
  embedded_ctx = onehot(flat_ctx) # (?, rules*preds*chars, char_size)
  embedded_q = onehot(query) # (?, chars, char_size)

  # Read query
  _, *states = L.LSTM(dim, return_state=True, name='query_lstm')(embedded_q)
  # Read context
  out, *states = L.LSTM(dim, return_state=True, name='ctx_lstm')(embedded_ctx, initial_state=states)

  # Prediction
  out = L.concatenate([out]+states, name='final_states')
  out = L.Dense(1, activation='sigmoid', name='out')(out)

  model = Model([context, query], out)
  if training:
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['acc'])
  return model 
Example 11
Project: onnx-keras   Author: leodestiny   File: backend.py    MIT License 5 votes vote down vote up
def handle_flatten(cls, node, input_dict):
        tensor = input_dict[node.inputs[0]]
        axis = node.attrs["axis"] if "axis" in node.attrs.keys() else 1
        shape = K.int_shape(tensor)[1:]

        axis -= 1
        split0, split1 = np.split(shape, [axis])
        if len(split0) == 0:
            split1 = np.prod(split1)
            output_shape = np.array([split1])
        else:
            split0 = np.prod(split0)
            split1 = np.prod(split1)
            output_shape = np.stack([split0, split1])
        return [keras.layers.core.Reshape(output_shape)(tensor)] 
Example 12
Project: onnx-keras   Author: leodestiny   File: backend.py    MIT License 5 votes vote down vote up
def handle_reduce_prod(cls, node, input_dict):
        return cls._reduce_op(node, input_dict, K.prod) 
Example 13
Project: maskrcnn   Author: shtamura   File: bbox.py    MIT License 5 votes vote down vote up
def get_iou(bbox_base, bbox_target):
    """2つのBoundingBoxのIoU(Intersection Over Union)を取得する。
        https://www.pyimagesearch.com/2016/11/07/intersection-over-union-iou-for-object-detection/
    Args:
        bbox_base (ndarray): 基準になるBoudingBox。
            Its shape is :math:`(N, 4)`.
            2軸目に以下の順でBBoxの座標を保持する。
            :math:`p_{ymin}, p_{xmin}, p_{ymax}, p_{xmax}`.
        bbox_target (ndarray): BoudingBox。
            Its shape is :math:`(K, 4)`.
            2軸目に以下の順でBBoxの座標を保持する。
            :math:`p_{ymin}, p_{xmin}, p_{ymax}, p_{xmax}`.

        bbox_baseの各Box毎にbbox_targetを適用し、IoUを求める。

    Returns:
        ndarray:
        IoU(0 <= IoU <= 1)
        形状は以下の通り。
        :math:`(N, K)`.

    """
    if bbox_base.shape[1] != 4 or bbox_target.shape[1] != 4:
        raise IndexError

    # 交差領域の左上の座標
    # bbox_base[:, None, :]のより次元を増やすことで、
    # bbox_baseとbbox_targetを総当りで評価出来る。
    # (N, K, 2)の座標が得られる
    tl = np.maximum(bbox_base[:, None, :2], bbox_target[:, :2])
    # 交差領域の右下の座標
    # (N, K, 2)の座標が得られる
    br = np.minimum(bbox_base[:, None, 2:], bbox_target[:, 2:])

    # 右下-左下=交差領域の(h, w)が得られる。
    # h*wで交差領域の面積。ただし、交差領域がない(右下 <= 左上)ものは除くため0とする。
    area_i = np.prod(br - tl, axis=2) * \
        np.all(br > tl, axis=2).astype('float32')
    area_base = np.prod(bbox_base[:, 2:] - bbox_base[:, :2], axis=1)
    area_target = np.prod(bbox_target[:, 2:] - bbox_target[:, :2], axis=1)
    return area_i / (area_base[:, None] + area_target - area_i) 
Example 14
Project: maskrcnn   Author: shtamura   File: bbox.py    MIT License 5 votes vote down vote up
def get_iou_K(bbox_base, bbox_target):
    """2つのBoundingBoxのIoU(Intersection Over Union)を取得する。
        https://www.pyimagesearch.com/2016/11/07/intersection-over-union-iou-for-object-detection/
    Args:
        bbox_base (tensor): 基準になるBoudingBox。
            Its shape is :math:`(N, 4)`.
            2軸目に以下の順でBBoxの座標を保持する。
            :math:`p_{ymin}, p_{xmin}, p_{ymax}, p_{xmax}`.
        bbox_target (tensor): BoudingBox。
            Its shape is :math:`(K, 4)`.
            2軸目に以下の順でBBoxの座標を保持する。
            :math:`p_{ymin}, p_{xmin}, p_{ymax}, p_{xmax}`.

        bbox_baseの各Box毎にbbox_targetを適用し、IoUを求める。

    Returns:
        tensor:
        IoU(0 <= IoU <= 1)
        形状は以下の通り。
        :math:`(N, K)`.

    """
    if bbox_base.shape[1] != 4 or bbox_target.shape[1] != 4:
        raise IndexError

    # 交差領域の左上の座標
    # bbox_base[:, None, :]のより次元を増やすことで、
    # bbox_baseとbbox_targetを総当りで評価出来る。
    # (N, K, 2)の座標が得られる
    tl = K.maximum(bbox_base[:, None, :2], bbox_target[:, :2])
    # 交差領域の右下の座標
    # (N, K, 2)の座標が得られる
    br = K.minimum(bbox_base[:, None, 2:], bbox_target[:, 2:])

    # 右下-左下=交差領域の(h, w)が得られる。
    # h*wで交差領域の面積。ただし、交差領域がない(右下 <= 左上)ものは除くため0とする。
    area_i = K.prod(br - tl, axis=2) * \
        K.cast(K.all(br > tl, axis=2), 'float32')
    area_base = K.prod(bbox_base[:, 2:] - bbox_base[:, :2], axis=1)
    area_target = K.prod(bbox_target[:, 2:] - bbox_target[:, :2], axis=1)
    return area_i / (area_base[:, None] + area_target - area_i) 
Example 15
Project: autoencoder-registration   Author: jmswaney   File: objectives.py    MIT License 5 votes vote down vote up
def generalizedDiceLoss(y_true, y_pred):
    # GDL, reference: https://arxiv.org/pdf/1707.03237.pdf 
    # Sudre et al, 2017, ArXiv (Generalised DICE overlap as a deep learning loss function for highly unbalanced segmentations)
    # DICE Loss for multi-category classification
    
    _EPSILON = K.epsilon()
    y_pred = K.clip(y_pred, _EPSILON, 1.0-_EPSILON)
    y_true = K.clip(y_true, _EPSILON, 1.0-_EPSILON)
    # First flatten the matrices for each channel (category) 
    ypredshape = y_pred.get_shape().as_list()
    ytrueshape = y_true.get_shape().as_list()
    
    dimp = K.prod(K.shape(y_pred)[:-1])
    dimt = K.prod(K.shape(y_true)[:-1])
    y_pred = K.reshape(y_pred, (dimp, ypredshape[-1]))
    y_true = K.reshape(y_true, (dimt, -1))
    
    y_int = y_pred*y_true 
    
    # Prevent dividing by 0 
    
    weights = 1/(K.square(K.sum(y_true, axis=0))) # square over the flattened axis

    numerator = 2*K.sum(weights*K.sum(y_int, axis=0), axis=-1)
    denominator = K.sum(weights*(K.sum(K.square(y_true), axis=0) + K.sum(K.square(y_pred),axis=0)), axis=-1)
    loss = - numerator/denominator 
    return loss 
Example 16
Project: learning_to_adapt   Author: ondrejklejch   File: wrapper.py    Apache License 2.0 5 votes vote down vote up
def reshape_params(shapes, params):
  reshaped_params = []
  offset = 0

  for shape in shapes:
    size = np.prod(shape)
    new_params = params[offset:offset + size]
    new_params = K.reshape(new_params, tuple(shape))
    reshaped_params.append(new_params)
    offset += size

  return reshaped_params 
Example 17
Project: learning_to_adapt   Author: ondrejklejch   File: wrapper.py    Apache License 2.0 5 votes vote down vote up
def set_model_weights(model, weights, wrapper=None):
  for l in model.layers:
    layer_weights = []
    for w in l.weights:
      num_weights = np.prod(w.shape)
      layer_weights.append(weights[:num_weights].reshape(w.shape))
      weights = weights[num_weights:]

    if isinstance(l, BatchNormalization) and wrapper is not None:
      layer_weights[2] = K.get_session().run(wrapper.moving_means[l.name])
      layer_weights[3] = K.get_session().run(wrapper.moving_vars[l.name])

    l.set_weights(layer_weights) 
Example 18
Project: learning_to_adapt   Author: ondrejklejch   File: wrapper.py    Apache License 2.0 5 votes vote down vote up
def call(self, inputs, training=None):
    if len(inputs) == 3:
        params, trainable_params, x = inputs
        params = self.merge_params(params, trainable_params)
    elif len(inputs) == 2:
        params, x = inputs
    else:
        raise ValueError("Wrong number of inputs")

    offset = 0
    for layer in self.layers:
      layer_params = params[:, offset:offset + layer["num_params"]]
      offset += layer["num_params"]

      if layer["type"] in ["standard-batchnorm", "batch-renorm"]:
        x = K.stack(x, 0)
        self.mean, self.variance = tf.nn.moments(x, [0, 1, 2])

        if training:
          sample_size = K.prod([K.shape(x)[axis] for axis in [0, 1, 2]])
          sample_size = K.cast(sample_size, dtype='float32')
          unbiased_variance = self.variance * sample_size / (sample_size - (1.0 + layer["epsilon"]))

          self.add_update([
            K.moving_average_update(self.moving_means[layer["name"]], self.mean, layer["momentum"]),
            K.moving_average_update(self.moving_vars[layer["name"]], unbiased_variance, layer["momentum"]),
          ], inputs)

      x = [self.evaluate_layer(layer, layer_params[i], x[i], training) for i in range(self.batch_size)]

    output = K.stack(x, 0)
    output._uses_learning_phase = True
    return output 
Example 19
Project: deep_qa   Author: allenai   File: noisy_or.py    Apache License 2.0 5 votes vote down vote up
def call(self, inputs, mask=None):
        # shape: (batch size, ..., num_probs, ...)
        probabilities = inputs
        if mask is not None:
            probabilities *= K.cast(mask, "float32")

        noisy_probs = self.noise_parameter * probabilities

        # shape: (batch size, ..., num_probs, ...)
        noisy_probs = 1.0 - noisy_probs

        # shape: (batch size, ..., ...)
        probability_product = K.prod(noisy_probs, axis=self.axis)

        return 1.0 - probability_product 
Example 20
Project: keras_extension   Author: k1414st   File: merge.py    MIT License 4 votes vote down vote up
def call(self, inputs):
        if not isinstance(inputs, list):
            raise ValueError('A merge layer should be called '
                             'on a list of inputs.')
        if self._reshape_required:
            reshaped_inputs = []
            input_ndims = list(map(K.ndim, inputs))
            if None not in input_ndims:
                # If ranks of all inputs are available,
                # we simply expand each of them at axis=1
                # until all of them have the same rank.
                max_ndim = max(input_ndims)
                for x in inputs:
                    x_ndim = K.ndim(x)
                    for _ in range(max_ndim - x_ndim):
                        x = K.expand_dims(x, 1)
                    reshaped_inputs.append(x)
                return self._merge_function(reshaped_inputs)
            else:
                # Transpose all inputs so that batch size is the last dimension.
                # (batch_size, dim1, dim2, ... ) -> (dim1, dim2, ... , batch_size)
                transposed = False
                for x in inputs:
                    x_ndim = K.ndim(x)
                    if x_ndim is None:
                        x_shape = K.shape(x)
                        batch_size = x_shape[0]
                        new_shape = K.concatenate(
                            [x_shape[1:], K.expand_dims(batch_size)])
                        x_transposed = K.reshape(x, K.stack(
                            [batch_size, K.prod(x_shape[1:])]))
                        x_transposed = K.permute_dimensions(x_transposed, (1, 0))
                        x_transposed = K.reshape(x_transposed, new_shape)
                        reshaped_inputs.append(x_transposed)
                        transposed = True
                    elif x_ndim > 1:
                        dims = list(range(1, x_ndim)) + [0]
                        reshaped_inputs.append(K.permute_dimensions(x, dims))
                        transposed = True
                    else:
                        # We don't transpose inputs if they are 1D vectors or scalars.
                        reshaped_inputs.append(x)
                y = self._merge_function(reshaped_inputs)
                y_ndim = K.ndim(y)
                if transposed:
                    # If inputs have been transposed, we have to transpose the output too.
                    if y_ndim is None:
                        y_shape = K.shape(y)
                        y_ndim = K.shape(y_shape)[0]
                        batch_size = y_shape[y_ndim - 1]
                        new_shape = K.concatenate(
                            [K.expand_dims(batch_size), y_shape[:y_ndim - 1]])
                        y = K.reshape(y, (-1, batch_size))
                        y = K.permute_dimensions(y, (1, 0))
                        y = K.reshape(y, new_shape)
                    elif y_ndim > 1:
                        dims = [y_ndim - 1] + list(range(y_ndim - 1))
                        y = K.permute_dimensions(y, dims)
                return y
        else:
            return self._merge_function(inputs) 
Example 21
Project: keras-extras   Author: kuza55   File: DiffForest.py    Apache License 2.0 4 votes vote down vote up
def call(self, x, mask=None):
        N_DECISION = (2 ** (self.n_depth)) - 1  # Number of decision nodes
        N_LEAF  = 2 ** (self.n_depth + 1)  # Number of leaf nodes

        flat_decision_p_e = []
        leaf_p_e = []
        for w_d, w_l in zip(self.w_d_ensemble, self.w_l_ensemble):

            decision_p = K.sigmoid((K.dot(x, w_d)))
            leaf_p = K.softmax(w_l)

            decision_p_comp = 1 - decision_p

            decision_p_pack = K.concatenate([decision_p, decision_p_comp])

            flat_decision_p_e.append(decision_p_pack)
            leaf_p_e.append(leaf_p)

        #Construct tiling pattern for decision probability matrix
        #Could be done in TF, but I think it's better statically
        tiling_pattern = np.zeros((N_LEAF, self.n_depth), dtype=np.int32)
        comp_offset = N_DECISION
        dec_idx = 0
        for n in xrange(self.n_depth):
            j = 0
            for depth_idx in xrange(2**n):
                repeat_times = 2 ** (self.n_depth - n)
                for _ in xrange(repeat_times):
                    tiling_pattern[j][n] = dec_idx 
                    j = j + 1

                for _ in xrange(repeat_times):
                    tiling_pattern[j][n] = comp_offset + dec_idx 
                    j = j + 1

                dec_idx = dec_idx + 1

        flat_pattern = tiling_pattern.flatten()

        # iterate over each tree
        tree_ret = None
        for flat_decision_p, leaf_p in zip(flat_decision_p_e, leaf_p_e):
            flat_mu = tf.transpose(tf.gather(tf.transpose(flat_decision_p), flat_pattern))
            
            batch_size = tf.shape(flat_decision_p)[0]
            shape = tf.pack([batch_size, N_LEAF, self.n_depth])

            mu = K.reshape(flat_mu, shape)
            leaf_prob = K.prod(mu, [2])
            prob_label = K.dot(leaf_prob, leaf_p)

            if tree_ret is None:
              tree_ret = prob_label
            else:
              tree_ret = tree_ret + prob_label

        return tree_ret/self.n_trees