Python tensorflow.matmul() Examples

The following are 30 code examples of tensorflow.matmul(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks_tf.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_fgm_gradient_max(self):
        input_dim = 2
        num_classes = 3
        batch_size = 4
        rng = np.random.RandomState([2017, 8, 23])
        x = tf.placeholder(tf.float32, [batch_size, input_dim])
        weights = tf.placeholder(tf.float32, [input_dim, num_classes])
        logits = tf.matmul(x, weights)
        probs = tf.nn.softmax(logits)
        adv_x = fgm(x, probs)
        random_example = rng.randint(batch_size)
        random_feature = rng.randint(input_dim)
        output = tf.slice(adv_x, [random_example, random_feature], [1, 1])
        dx, = tf.gradients(output, x)
        # The following line catches GitHub issue #243
        self.assertIsNotNone(dx)
        dx = self.sess.run(dx, feed_dict=random_feed_dict(rng, [x, weights]))
        ground_truth = np.zeros((batch_size, input_dim))
        ground_truth[random_example, random_feature] = 1.
        self.assertClose(dx, ground_truth) 
Example #2
Source Project: neural-fingerprinting   Author: StephanZheng   File: madry_mnist_model.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def fprop(self, x):

        output = OrderedDict()
        # first convolutional layer
        h_conv1 = tf.nn.relu(self._conv2d(x, self.W_conv1) + self.b_conv1)
        h_pool1 = self._max_pool_2x2(h_conv1)

        # second convolutional layer
        h_conv2 = tf.nn.relu(
            self._conv2d(h_pool1, self.W_conv2) + self.b_conv2)
        h_pool2 = self._max_pool_2x2(h_conv2)

        # first fully connected layer

        h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
        h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, self.W_fc1) + self.b_fc1)

        # output layer
        logits = tf.matmul(h_fc1, self.W_fc2) + self.b_fc2

        output = deterministic_dict(locals())
        del output["self"]
        output[self.O_PROBS] = tf.nn.softmax(logits=logits)

        return output 
Example #3
Source Project: deep-learning-note   Author: wdxtub   File: mnist_histogram.py    License: MIT License 6 votes vote down vote up
def nn_layer(input_tensor, input_dim, output_dim, layer_name, act=tf.nn.relu):
    # 同一层神经网络放在一个统一的命名空间下
    with tf.name_scope(layer_name):
        with tf.name_scope('weights'):
            # 权重及监控变量
            weights = tf.Variable(tf.truncated_normal([input_dim, output_dim], stddev=0.1))
            variable_summaries(weights, layer_name+'/weights')

        with tf.name_scope('biases'):
            # 偏置及监控变量
            biases = tf.Variable(tf.constant(0.0, shape=[output_dim]))
            variable_summaries(biases, layer_name + '/biases')

        with tf.name_scope('Wx_plus_b'):
            preactivate = tf.matmul(input_tensor, weights) + biases
            # 记录神经网络输出节点在经过激活函数之前的分布
            tf.summary.histogram(layer_name + '/pre_activations', preactivate)
        
        activations = act(preactivate, name='activation')
        # 记录神经网络输出节点在经过激活函数之后的分布
        tf.summary.histogram(layer_name + '/activations', activations)
        return activations 
Example #4
Source Project: deep-learning-note   Author: wdxtub   File: 2_tf_linear.py    License: MIT License 6 votes vote down vote up
def createLinearModel(dimension):
    np.random.seed(1024)
    # 定义 x 和 y
    x = tf.placeholder(tf.float64, shape=[None, dimension], name='x')
    # 写成矩阵形式会大大加快运算速度
    y = tf.placeholder(tf.float64, shape=[None, 1], name='y')
    # 定义参数估计值和预测值
    betaPred = tf.Variable(np.random.random([dimension, 1]))
    yPred = tf.matmul(x, betaPred, name='y_pred')
    # 定义损失函数
    loss = tf.reduce_mean(tf.square(yPred - y))
    model = {
        'loss_function': loss,
        'independent_variable': x,
        'dependent_variable': y,
        'prediction': yPred,
        'model_params': betaPred
    }
    return model 
Example #5
Source Project: DOTA_models   Author: ringringyi   File: gru_cell.py    License: Apache License 2.0 6 votes vote down vote up
def __call__(self, inputs, state, scope=None):
    """GRU cell with layer normalization."""
    input_dim = inputs.get_shape().as_list()[1]
    num_units = self._num_units

    with tf.variable_scope(scope or "gru_cell"):
      with tf.variable_scope("gates"):
        w_h = tf.get_variable(
            "w_h", [num_units, 2 * num_units],
            initializer=self._w_h_initializer())
        w_x = tf.get_variable(
            "w_x", [input_dim, 2 * num_units],
            initializer=self._w_x_initializer(input_dim))
        z_and_r = (_layer_norm(tf.matmul(state, w_h), scope="layer_norm/w_h") +
                   _layer_norm(tf.matmul(inputs, w_x), scope="layer_norm/w_x"))
        z, r = tf.split(tf.sigmoid(z_and_r), 2, 1)
      with tf.variable_scope("candidate"):
        w = tf.get_variable(
            "w", [input_dim, num_units], initializer=self._w_initializer)
        u = tf.get_variable(
            "u", [num_units, num_units], initializer=self._u_initializer)
        h_hat = (r * _layer_norm(tf.matmul(state, u), scope="layer_norm/u") +
                 _layer_norm(tf.matmul(inputs, w), scope="layer_norm/w"))
      new_h = (1 - z) * state + z * self._activation(h_hat)
    return new_h, new_h 
Example #6
Source Project: DOTA_models   Author: ringringyi   File: memory.py    License: Apache License 2.0 6 votes vote down vote up
def get_hint_pool_idxs(self, normalized_query):
    """Get small set of idxs to compute nearest neighbor queries on.

    This is an expensive look-up on the whole memory that is used to
    avoid more expensive operations later on.

    Args:
      normalized_query: A Tensor of shape [None, key_dim].

    Returns:
      A Tensor of shape [None, choose_k] of indices in memory
      that are closest to the queries.

    """
    # look up in large memory, no gradients
    with tf.device(self.nn_device):
      similarities = tf.matmul(tf.stop_gradient(normalized_query),
                               self.mem_keys, transpose_b=True, name='nn_mmul')
    _, hint_pool_idxs = tf.nn.top_k(
        tf.stop_gradient(similarities), k=self.choose_k, name='nn_topk')
    return hint_pool_idxs 
Example #7
Source Project: DOTA_models   Author: ringringyi   File: vision_baseline_lstm.py    License: Apache License 2.0 6 votes vote down vote up
def combine_setup(name, combine_type, embed_img, embed_goal, num_img_neuorons=None,
                  num_goal_neurons=None):
  with tf.name_scope(name + '_' + combine_type):
    if combine_type == 'add':
      # Simple concat features from goal and image
      out = embed_img + embed_goal

    elif combine_type == 'multiply':
      # Multiply things together
      re_embed_img = tf.reshape(
          embed_img, shape=[-1, num_img_neuorons / num_goal_neurons,
                            num_goal_neurons])
      re_embed_goal = tf.reshape(embed_goal, shape=[-1, num_goal_neurons, 1])
      x = tf.matmul(re_embed_img, re_embed_goal, transpose_a=False, transpose_b=False)
      out = slim.flatten(x)
    elif combine_type == 'none' or combine_type == 'imgonly':
      out = embed_img
    elif combine_type == 'goalonly':
      out = embed_goal
    else:
      logging.fatal('Undefined combine_type: %s', combine_type)
  return out 
Example #8
Source Project: DOTA_models   Author: ringringyi   File: network_units.py    License: Apache License 2.0 6 votes vote down vote up
def pass_through_embedding_matrix(act_block, embedding_matrix, step_idx):
  """Passes the activations through the embedding_matrix.

  Takes care to handle out of bounds lookups.

  Args:
    act_block: matrix of activations.
    embedding_matrix: matrix of weights.
    step_idx: vector containing step indices, with -1 indicating out of bounds.

  Returns:
    the embedded activations.
  """
  # Indicator vector for out of bounds lookups.
  step_idx_mask = tf.expand_dims(tf.equal(step_idx, -1), -1)

  # Pad the last column of the activation vectors with the indicator.
  act_block = tf.concat([act_block, tf.to_float(step_idx_mask)], 1)
  return tf.matmul(act_block, embedding_matrix) 
Example #9
Source Project: DOTA_models   Author: ringringyi   File: policy.py    License: Apache License 2.0 6 votes vote down vote up
def get_cell(self):
    self.cell_input_dim = self.internal_dim

    def mlp(cell_input, prev_internal_state):
      w1 = tf.get_variable('w1', [self.cell_input_dim, self.internal_dim])
      b1 = tf.get_variable('b1', [self.internal_dim])

      w2 = tf.get_variable('w2', [self.internal_dim, self.internal_dim])
      b2 = tf.get_variable('b2', [self.internal_dim])

      w3 = tf.get_variable('w3', [self.internal_dim, self.internal_dim])
      b3 = tf.get_variable('b3', [self.internal_dim])

      proj = tf.get_variable(
          'proj', [self.internal_dim, self.output_dim])

      hidden = cell_input
      hidden = tf.tanh(tf.nn.bias_add(tf.matmul(hidden, w1), b1))
      hidden = tf.tanh(tf.nn.bias_add(tf.matmul(hidden, w2), b2))

      output = tf.matmul(hidden, proj)

      return output, hidden

    return mlp 
Example #10
Source Project: DOTA_models   Author: ringringyi   File: utils.py    License: Apache License 2.0 6 votes vote down vote up
def gaussian_kernel_matrix(x, y, sigmas):
  r"""Computes a Guassian Radial Basis Kernel between the samples of x and y.

  We create a sum of multiple gaussian kernels each having a width sigma_i.

  Args:
    x: a tensor of shape [num_samples, num_features]
    y: a tensor of shape [num_samples, num_features]
    sigmas: a tensor of floats which denote the widths of each of the
      gaussians in the kernel.
  Returns:
    A tensor of shape [num_samples{x}, num_samples{y}] with the RBF kernel.
  """
  beta = 1. / (2. * (tf.expand_dims(sigmas, 1)))

  dist = compute_pairwise_distances(x, y)

  s = tf.matmul(beta, tf.reshape(dist, (1, -1)))

  return tf.reshape(tf.reduce_sum(tf.exp(-s), 0), tf.shape(dist)) 
Example #11
Source Project: tensorflow-alexnet   Author: jireh-father   File: ops.py    License: MIT License 6 votes vote down vote up
def fc(inputs, output_size, init_bias=0.0, activation_func=tf.nn.relu, stddev=0.01):
    input_shape = inputs.get_shape().as_list()
    if len(input_shape) == 4:
        fc_weights = tf.Variable(
            tf.random_normal([input_shape[1] * input_shape[2] * input_shape[3], output_size], dtype=tf.float32,
                             stddev=stddev),
            name='weights')
        inputs = tf.reshape(inputs, [-1, fc_weights.get_shape().as_list()[0]])
    else:
        fc_weights = tf.Variable(tf.random_normal([input_shape[-1], output_size], dtype=tf.float32, stddev=stddev),
                                 name='weights')

    fc_biases = tf.Variable(tf.constant(init_bias, shape=[output_size], dtype=tf.float32), name='biases')
    fc_layer = tf.matmul(inputs, fc_weights)
    fc_layer = tf.nn.bias_add(fc_layer, fc_biases)
    if activation_func:
        fc_layer = activation_func(fc_layer)
    return fc_layer 
Example #12
Source Project: fine-lm   Author: akzaidi   File: transformer_nat.py    License: MIT License 6 votes vote down vote up
def vq_nearest_neighbor(x, hparams):
  """Find the nearest element in means to elements in x."""
  bottleneck_size = 2**hparams.bottleneck_bits
  means = hparams.means
  x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keepdims=True)
  means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keepdims=True)
  scalar_prod = tf.matmul(x, means, transpose_b=True)
  dist = x_norm_sq + tf.transpose(means_norm_sq) - 2 * scalar_prod
  if hparams.bottleneck_kind == "em":
    x_means_idx = tf.multinomial(-dist, num_samples=hparams.num_samples)
    x_means_hot = tf.one_hot(
        x_means_idx, depth=bottleneck_size)
    x_means_hot = tf.reduce_mean(x_means_hot, axis=1)
  else:
    x_means_idx = tf.argmax(-dist, axis=-1)
    x_means_hot = tf.one_hot(x_means_idx, depth=bottleneck_size)
  x_means = tf.matmul(x_means_hot, means)
  e_loss = tf.reduce_mean(tf.square(x - tf.stop_gradient(x_means)))
  return x_means_hot, e_loss 
Example #13
Source Project: fine-lm   Author: akzaidi   File: slicenet.py    License: MIT License 6 votes vote down vote up
def rank_loss(sentence_emb, image_emb, margin=0.2):
  """Experimental rank loss, thanks to kkurach@ for the code."""
  with tf.name_scope("rank_loss"):
    # Normalize first as this is assumed in cosine similarity later.
    sentence_emb = tf.nn.l2_normalize(sentence_emb, 1)
    image_emb = tf.nn.l2_normalize(image_emb, 1)
    # Both sentence_emb and image_emb have size [batch, depth].
    scores = tf.matmul(image_emb, tf.transpose(sentence_emb))  # [batch, batch]
    diagonal = tf.diag_part(scores)  # [batch]
    cost_s = tf.maximum(0.0, margin - diagonal + scores)  # [batch, batch]
    cost_im = tf.maximum(
        0.0, margin - tf.reshape(diagonal, [-1, 1]) + scores)  # [batch, batch]
    # Clear diagonals.
    batch_size = tf.shape(sentence_emb)[0]
    empty_diagonal_mat = tf.ones_like(cost_s) - tf.eye(batch_size)
    cost_s *= empty_diagonal_mat
    cost_im *= empty_diagonal_mat
    return tf.reduce_mean(cost_s) + tf.reduce_mean(cost_im) 
Example #14
Source Project: Black-Box-Audio   Author: rtaori   File: tf_logits.py    License: MIT License 5 votes vote down vote up
def compute_mfcc(audio, **kwargs):
    """
    Compute the MFCC for a given audio waveform. This is
    identical to how DeepSpeech does it, but does it all in
    TensorFlow so that we can differentiate through it.
    """

    batch_size, size = audio.get_shape().as_list()
    audio = tf.cast(audio, tf.float32)

    # 1. Pre-emphasizer, a high-pass filter
    audio = tf.concat((audio[:, :1], audio[:, 1:] - 0.97*audio[:, :-1], np.zeros((batch_size,1000),dtype=np.float32)), 1)

    # 2. windowing into frames of 320 samples, overlapping
    windowed = tf.stack([audio[:, i:i+400] for i in range(0,size-320,160)],1)

    # 3. Take the FFT to convert to frequency space
    ffted = tf.spectral.rfft(windowed, [512])
    ffted = 1.0 / 512 * tf.square(tf.abs(ffted))

    # 4. Compute the Mel windowing of the FFT
    energy = tf.reduce_sum(ffted,axis=2)+1e-30
    filters = np.load("filterbanks.npy").T
    feat = tf.matmul(ffted, np.array([filters]*batch_size,dtype=np.float32))+1e-30

    # 5. Take the DCT again, because why not
    feat = tf.log(feat)
    feat = tf.spectral.dct(feat, type=2, norm='ortho')[:,:,:26]

    # 6. Amplify high frequencies for some reason
    _,nframes,ncoeff = feat.get_shape().as_list()
    n = np.arange(ncoeff)
    lift = 1 + (22/2.)*np.sin(np.pi*n/22)
    feat = lift*feat
    width = feat.get_shape().as_list()[1]

    # 7. And now stick the energy next to the features
    feat = tf.concat((tf.reshape(tf.log(energy),(-1,width,1)), feat[:, :, 1:]), axis=2)
    
    return feat 
Example #15
Source Project: icme2019   Author: ShenDezhou   File: sequence.py    License: MIT License 5 votes vote down vote up
def call(self, inputs, mask=None, **kwargs):

        if self.supports_masking:
            if mask is None:
                raise ValueError(
                    "When supports_masking=True,input must support masking")
            queries, keys = inputs
            key_masks = tf.expand_dims(mask[-1], axis=1)

        else:

            queries, keys, keys_length = inputs
            hist_len = keys.get_shape()[1]
            key_masks = tf.sequence_mask(keys_length, hist_len)

        attention_score = LocalActivationUnit(
            self.hidden_size, self.activation, 0, 1, False, 1024,)([queries, keys])

        outputs = tf.transpose(attention_score, (0, 2, 1))

        if self.weight_normalization:
            paddings = tf.ones_like(outputs) * (-2 ** 32 + 1)
        else:
            paddings = tf.zeros_like(outputs)

        outputs = tf.where(key_masks, outputs, paddings)

        if self.weight_normalization:
            outputs = tf.nn.softmax(outputs)

        outputs = tf.matmul(outputs, keys)

        return outputs 
Example #16
Source Project: icme2019   Author: ShenDezhou   File: interaction.py    License: MIT License 5 votes vote down vote up
def call(self, inputs, **kwargs):
        if K.ndim(inputs) != 2:
            raise ValueError(
                "Unexpected inputs dimensions %d, expect to be 2 dimensions" % (K.ndim(inputs)))

        x_0 = tf.expand_dims(inputs, axis=2)
        x_l = x_0
        for i in range(self.layer_num):
            xl_w = tf.tensordot(x_l, self.kernels[i], axes=(1, 0))
            dot_ = tf.matmul(x_0, xl_w)
            x_l = dot_ + self.bias[i] + x_l
        x_l = tf.squeeze(x_l, axis=2)
        return x_l 
Example #17
Source Project: icme2019   Author: ShenDezhou   File: interaction.py    License: MIT License 5 votes vote down vote up
def call(self, inputs, **kwargs):
        if K.ndim(inputs) != 3:
            raise ValueError(
                "Unexpected inputs dimensions %d, expect to be 3 dimensions" % (K.ndim(inputs)))

        querys = tf.tensordot(inputs, self.W_Query,
                              axes=(-1, 0))  # None F D*head_num
        keys = tf.tensordot(inputs, self.W_key, axes=(-1, 0))
        values = tf.tensordot(inputs, self.W_Value, axes=(-1, 0))

        # head_num None F D
        querys = tf.stack(tf.split(querys, self.head_num, axis=2))
        keys = tf.stack(tf.split(keys, self.head_num, axis=2))
        values = tf.stack(tf.split(values, self.head_num, axis=2))

        inner_product = tf.matmul(
            querys, keys, transpose_b=True)  # head_num None F F
        self.normalized_att_scores = tf.nn.softmax(inner_product)

        result = tf.matmul(self.normalized_att_scores,
                           values)  # head_num None F D
        result = tf.concat(tf.split(result, self.head_num, ), axis=-1)
        result = tf.squeeze(result, axis=0)  # None F D*head_num

        if self.use_res:
            result += tf.tensordot(inputs, self.W_Res, axes=(-1, 0))
        result = tf.nn.relu(result)

        return result 
Example #18
Source Project: disentangling_conditional_gans   Author: zalandoresearch   File: networks.py    License: MIT License 5 votes vote down vote up
def dense(x, fmaps, gain=np.sqrt(2), use_wscale=False):
    if len(x.shape) > 2:
        x = tf.reshape(x, [-1, np.prod([d.value for d in x.shape[1:]])])
    w = get_weight([x.shape[1].value, fmaps], gain=gain, use_wscale=use_wscale)
    w = tf.cast(w, x.dtype)
    return tf.matmul(x, w)

#----------------------------------------------------------------------------
# Convolutional layer. 
Example #19
Source Project: jiji-with-tensorflow-example   Author: unageanu   File: model.py    License: MIT License 5 votes vote down vote up
def __setup_model(self):
        column_size = Model.COLUMN_SIZE
        w1 = tf.Variable(tf.truncated_normal([column_size, Estimator.HIDDEN_UNIT_SIZE], stddev=0.1))
        b1 = tf.Variable(tf.constant(0.1, shape=[Estimator.HIDDEN_UNIT_SIZE]))
        h1 = tf.nn.relu(tf.matmul(self.trade_data, w1) + b1)

        w2 = tf.Variable(tf.truncated_normal([Estimator.HIDDEN_UNIT_SIZE, Estimator.HIDDEN_UNIT_SIZE2], stddev=0.1))
        b2 = tf.Variable(tf.constant(0.1, shape=[Estimator.HIDDEN_UNIT_SIZE2]))
        h2 = tf.nn.relu(tf.matmul(h1, w2) + b2)

        h2_drop = tf.nn.dropout(h2, self.keep_prob)
        w2 = tf.Variable(tf.truncated_normal([Estimator.HIDDEN_UNIT_SIZE2, 2], stddev=0.1))
        b2 = tf.Variable(tf.constant(0.1, shape=[2]))
        self.output = tf.nn.softmax(tf.matmul(h2_drop, w2) + b2) 
Example #20
Source Project: dc_tts   Author: Kyubyong   File: networks.py    License: Apache License 2.0 5 votes vote down vote up
def Attention(Q, K, V, mononotic_attention=False, prev_max_attentions=None):
    '''
    Args:
      Q: Queries. (B, T/r, d)
      K: Keys. (B, N, d)
      V: Values. (B, N, d)
      mononotic_attention: A boolean. At training, it is False.
      prev_max_attentions: (B,). At training, it is set to None.

    Returns:
      R: [Context Vectors; Q]. (B, T/r, 2d)
      alignments: (B, N, T/r)
      max_attentions: (B, T/r)
    '''
    A = tf.matmul(Q, K, transpose_b=True) * tf.rsqrt(tf.to_float(hp.d))
    if mononotic_attention:  # for inference
        key_masks = tf.sequence_mask(prev_max_attentions, hp.max_N)
        reverse_masks = tf.sequence_mask(hp.max_N - hp.attention_win_size - prev_max_attentions, hp.max_N)[:, ::-1]
        masks = tf.logical_or(key_masks, reverse_masks)
        masks = tf.tile(tf.expand_dims(masks, 1), [1, hp.max_T, 1])
        paddings = tf.ones_like(A) * (-2 ** 32 + 1)  # (B, T/r, N)
        A = tf.where(tf.equal(masks, False), A, paddings)
    A = tf.nn.softmax(A) # (B, T/r, N)
    max_attentions = tf.argmax(A, -1)  # (B, T/r)
    R = tf.matmul(A, V)
    R = tf.concat((R, Q), -1)

    alignments = tf.transpose(A, [0, 2, 1]) # (B, N, T/r)

    return R, alignments, max_attentions 
Example #21
Source Project: neural-fingerprinting   Author: StephanZheng   File: util.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def lid_term(logits, batch_size=100):
    """Calculate LID loss term for a minibatch of logits

    :param logits: 
    :return: 
    """
    # y_pred = tf.nn.softmax(logits)
    y_pred = logits

    # calculate pairwise distance
    r = tf.reduce_sum(y_pred * y_pred, 1)
    # turn r into column vector
    r1 = tf.reshape(r, [-1, 1])
    D = r1 - 2 * tf.matmul(y_pred, tf.transpose(y_pred)) + tf.transpose(r1) + \
        tf.ones([batch_size, batch_size])

    # find the k nearest neighbor
    D1 = -tf.sqrt(D)
    D2, _ = tf.nn.top_k(D1, k=21, sorted=True)
    D3 = -D2[:, 1:]

    m = tf.transpose(tf.multiply(tf.transpose(D3), 1.0 / D3[:, -1]))
    v_log = tf.reduce_sum(tf.log(m + 1e-9), axis=1)  # to avoid nan
    lids = -20 / v_log

    ## batch normalize lids
    # lids = tf.nn.l2_normalize(lids, dim=0, epsilon=1e-12)

    return lids 
Example #22
Source Project: neural-fingerprinting   Author: StephanZheng   File: util.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def lid_adv_term(clean_logits, adv_logits, batch_size=100):
    """Calculate LID loss term for a minibatch of advs logits

    :param logits: clean logits
    :param A_logits: adversarial logits
    :return: 
    """
    # y_pred = tf.nn.softmax(logits)
    c_pred = tf.reshape(clean_logits, (batch_size, -1))
    a_pred = tf.reshape(adv_logits, (batch_size, -1))

    # calculate pairwise distance
    r = tf.reduce_sum(c_pred * a_pred, 1)
    # turn r into column vector
    r1 = tf.reshape(r, [-1, 1])
    D = r1 - 2 * tf.matmul(c_pred, tf.transpose(a_pred)) + tf.transpose(r1) + \
        tf.ones([batch_size, batch_size])

    # find the k nearest neighbor
    D1 = -tf.sqrt(D)
    D2, _ = tf.nn.top_k(D1, k=21, sorted=True)
    D3 = -D2[:, 1:]

    m = tf.transpose(tf.multiply(tf.transpose(D3), 1.0 / D3[:, -1]))
    v_log = tf.reduce_sum(tf.log(m + 1e-9), axis=1)  # to avoid nan
    lids = -20 / v_log

    ## batch normalize lids
    lids = tf.nn.l2_normalize(lids, dim=0, epsilon=1e-12)

    return lids 
Example #23
Source Project: neural-fingerprinting   Author: StephanZheng   File: test_defenses.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def fprop(self, x, **kwargs):
        del kwargs
        with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):
            w1 = tf.constant([[1.5, .3], [-2, 0.3]],
                             dtype=tf.as_dtype(x.dtype))
            w2 = tf.constant([[-2.4, 1.2], [0.5, -2.3]],
                             dtype=tf.as_dtype(x.dtype))
        h1 = tf.nn.sigmoid(tf.matmul(x, w1))
        res = tf.matmul(h1, w2)
        return {self.O_FEATURES: [h1, res],
                self.O_LOGITS: res,
                self.O_PROBS: tf.nn.softmax(res)} 
Example #24
Source Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def fprop(self, x, **kwargs):
        del kwargs
        with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):
            w1 = tf.constant([[1.5, .3], [-2, 0.3]],
                             dtype=tf.as_dtype(x.dtype))
            w2 = tf.constant([[-2.4, 1.2], [0.5, -2.3]],
                             dtype=tf.as_dtype(x.dtype))
        h1 = tf.nn.sigmoid(tf.matmul(x, w1))
        res = tf.matmul(h1, w2)
        return {self.O_LOGITS: res,
                self.O_PROBS: tf.nn.softmax(res)} 
Example #25
Source Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def fprop(self, x, **kwargs):
        del kwargs
        with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):
            w1 = tf.constant([[1, -1]], dtype=tf.float32)
        res = tf.matmul(x, w1)
        return {self.O_LOGITS: res,
                self.O_PROBS: tf.nn.softmax(res)} 
Example #26
Source Project: neural-fingerprinting   Author: StephanZheng   File: test_attack_multigpu.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def setUp(self):
        super(TestMadryEtAlMultiGPU, self).setUp()

        class SimpleLayer(LayernGPU):
            def __init__(self):
                super(SimpleLayer, self).__init__()

            def set_input_shape(self, input_shape):
                self.input_shape = input_shape
                self.output_shape = input_shape
                self.W1 = tf.constant([[1.5, .3], [-2, 0.3]], dtype=tf.float32)
                self.W2 = tf.constant([[-2.4, 1.2], [0.5, -2.3]],
                                      dtype=tf.float32)

            def fprop_noscope(self, x):
                h1 = tf.nn.sigmoid(tf.matmul(x, self.W1))
                res = tf.matmul(h1, self.W2)
                return res

        input_shape = (None, 2)
        self.model_ngpu = MLPnGPU([SimpleLayer()], input_shape)

        self.attack_single_gpu = self.attack
        self.attack_multi_gpu = MadryEtAlMultiGPU(self.model_ngpu,
                                                  sess=self.sess)
        self.attack = self.attack_multi_gpu 
Example #27
Source Project: neural-fingerprinting   Author: StephanZheng   File: model.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def fprop(self, x):
        return tf.matmul(x, self.W) + self.b 
Example #28
Source Project: neural-fingerprinting   Author: StephanZheng   File: model.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def fprop_noscope(self, x):
        return tf.matmul(x, self.W) + self.b 
Example #29
Source Project: deep-learning-note   Author: wdxtub   File: basic_model.py    License: MIT License 5 votes vote down vote up
def inference(input_tensor, avg_class, weights1, biases1, weights2, biases2):
    # 没有提供滑动平均类,直接使用当前值
    if avg_class is None:
        # 计算隐藏层的前向传播结果
        layer1 = tf.nn.relu(tf.matmul(input_tensor, weights1) + biases1)
        # 计算输出层的前向传播结果
        return tf.matmul(layer1, weights2) + biases2
    else:
        layer1 = tf.nn.relu(
            tf.matmul(input_tensor, avg_class.average(weights1)) + avg_class.average(biases1))
        return tf.matmul(layer1, avg_class.average(weights2)) + avg_class.average(biases2)


# 训练模型 
Example #30
Source Project: deep-learning-note   Author: wdxtub   File: mnist_inference.py    License: MIT License 5 votes vote down vote up
def inference(input_tensor, regularizer):
    # 声明第一层神经网络
    with tf.variable_scope('layer1'):
        weights = get_weight_variable([INPUT_NODE, LAYER1_NODE], regularizer)
        biases = tf.get_variable("biases", [LAYER1_NODE], initializer=tf.constant_initializer(0.0))
        layer1 = tf.nn.relu(tf.matmul(input_tensor, weights) + biases)

    # 声明第二层
    with tf.variable_scope('layer2'):
        weights = get_weight_variable([LAYER1_NODE, OUTPUT_NODE], regularizer)
        biases = tf.get_variable("biases", [OUTPUT_NODE], initializer=tf.constant_initializer(0.0))
        layer2 = tf.matmul(layer1, weights) + biases

    return layer2