Python theano.tensor.argmax() Examples

The following are 30 code examples of theano.tensor.argmax(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module theano.tensor , or try the search function .
Example #1
Source File: rnn.py    From theano-recurrence with MIT License 6 votes vote down vote up
def generative_sampling(self, seed, emb_data, sample_length):
        fruit = theano.shared(value=seed)

        def step(h_tm, y_tm):
            h_t = self.activation(T.dot(emb_data[y_tm], self.W) +
                                  T.dot(h_tm, self.U) + self.bh)
            y_t = T.nnet.softmax(T.dot(h_t, self.V) + self.by)
            y = T.argmax(y_t, axis=1)

            return h_t, y[0]

        [_, samples], _ = theano.scan(fn=step,
                                      outputs_info=[self.h0, fruit],
                                      n_steps=sample_length)

        get_samples = theano.function(inputs=[],
                                      outputs=samples)

        return get_samples() 
Example #2
Source File: nbow.py    From text_convnet with MIT License 6 votes vote down vote up
def __init__(self, input, n_in, n_out, W=None):

        self.input = input

        if W is None:
            self.W = theano.shared(
                    value = numpy.zeros(
                        (n_in, n_out),
                        dtype = theano.config.floatX),
                    name = 'W',
                    borrow = True
                )
        else:
            self.W = W

        self.s_y_given_x = T.dot(input, self.W)
        self.p_y_given_x = T.nnet.softmax(self.s_y_given_x) #+ self.b)
        self.pred = T.argmax(self.s_y_given_x, axis=1)

        self.params = [ self.W ] 
Example #3
Source File: model.py    From text_convnet with MIT License 6 votes vote down vote up
def __init__(self, input, n_in, n_out, W=None):

        self.input = input

        if W is None:
            self.W = theano.shared(
                    value = numpy.zeros(
                        (n_in, n_out),
                        dtype = theano.config.floatX),
                    name = 'W',
                    borrow = True
                )
        else:
            self.W = W

        self.s_y_given_x = T.dot(input, self.W)
        self.p_y_given_x = T.nnet.softmax(self.s_y_given_x) #+ self.b)
        self.pred = T.argmax(self.s_y_given_x, axis=1)

        self.params = [ self.W ] 
Example #4
Source File: roc_auc.py    From deep-mil-for-whole-mammogram-classification with MIT License 6 votes vote down vote up
def on_epoch_end(self, epoch, logs={}):
    if epoch % self.interval == 0:
      y_pred = self.model.predict(self.X_val, verbose=0)
      #print(np.sum(y_pred[:,1]))
      #y_true = np.argmax(self.y_val, axis=1)
      #y_pred = np.argmax(y_pred, axis=1)
      #print(y_true.shape, y_pred.shape)
      if self.mymil:
        score = roc_auc_score(self.y_val.max(axis=1), y_pred.max(axis=1))  
      else: score = roc_auc_score(self.y_val[:,1], y_pred[:,1])
      print("interval evaluation - epoch: {:d} - auc: {:.2f}".format(epoch, score))
      if score > self.auc:
        self.auc = score
        for f in os.listdir('./'):
          if f.startswith(self.filepath+'auc'):
            os.remove(f)
        self.model.save(self.filepath+'auc'+str(score)+'ep'+str(epoch)+'.hdf5') 
Example #5
Source File: roc_auc.py    From deep-mil-for-whole-mammogram-classification with MIT License 6 votes vote down vote up
def on_epoch_end(self, epoch, logs={}):
    if epoch % self.interval == 0:
      y_pred = self.model.predict(self.X_val, verbose=0)
      if self.mymil:
        y_true = self.y_val.max(axis=1)
        y_score = y_pred.max(axis=1)>0.5
      else:
        y_true = np.argmax(self.y_val, axis=1)
        y_score = np.argmax(y_pred, axis=1)
      #print(type(y_true), y_true.shape, type(y_score), y_score.shape)
      #print(y_score, y_true)
      TP = np.sum(y_true[y_score==1]==1)*1. #/ sum(y_true)
      FP = np.sum(y_true[y_score==1]==0)*1. #/ (y_true.shape[0]-sum(y_true))
      prec = TP / (TP+FP+1e-6)
      print("interval evaluation - epoch: {:d} - prec: {:.2f}".format(epoch, prec))
      if prec > self.prec:
        self.prec = prec
        for f in os.listdir('./'):
          if f.startswith(self.filepath+'prec'):
            os.remove(f)
        self.model.save(self.filepath+'prec'+str(prec)+'ep'+str(epoch)+'.hdf5') 
Example #6
Source File: roc_auc.py    From deep-mil-for-whole-mammogram-classification with MIT License 6 votes vote down vote up
def on_epoch_end(self, epoch, logs={}):
    if epoch % self.interval == 0:
      y_pred = self.model.predict(self.X_val, verbose=0)
      if self.mymil:
        y_true = self.y_val.max(axis=1)
        y_score = y_pred.max(axis=1)>0.5
      else:
        y_true = np.argmax(self.y_val, axis=1)
        y_score = np.argmax(y_pred, axis=1)
      #print(type(y_true), y_true.shape, type(y_score), y_score.shape)
      TP = np.sum(y_true[y_score==1]==1)*1. #/ sum(y_true)
      FN = np.sum(y_true[y_score==0]==1)*1. #/ sum(y_true)
      reca = TP / (TP+FN+1e-6)
      print("interval evaluation - epoch: {:d} - reca: {:.2f}".format(epoch, reca))
      if reca > self.reca:
        self.reca = reca
        for f in os.listdir('./'):
          if f.startswith(self.filepath+'reca'):
            os.remove(f)
        self.model.save(self.filepath+'reca'+str(reca)+'ep'+str(epoch)+'.hdf5') 
Example #7
Source File: unet.py    From luna16 with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def score_metrics(out, target_var, weight_map, l2_loss=0):
    _EPSILON=1e-8

    out_flat = out.dimshuffle(1,0,2,3).flatten(ndim=2).dimshuffle(1,0)
    target_flat = target_var.dimshuffle(1,0,2,3).flatten(ndim=1)
    weight_flat = weight_map.dimshuffle(1,0,2,3).flatten(ndim=1)

    prediction = lasagne.nonlinearities.softmax(out_flat)
    prediction_binary = T.argmax(prediction, axis=1)

    dice_score = (T.sum(T.eq(2, prediction_binary+target_flat))*2.0 /
                    (T.sum(prediction_binary) + T.sum(target_flat)))

    loss = lasagne.objectives.categorical_crossentropy(T.clip(prediction,_EPSILON,1-_EPSILON), target_flat)
    loss = loss * weight_flat
    loss = loss.mean()
    loss += l2_loss

    accuracy = T.mean(T.eq(prediction_binary, target_flat),
                      dtype=theano.config.floatX)

    return loss, accuracy, dice_score, target_flat, prediction, prediction_binary 
Example #8
Source File: roc_auc.py    From deep-mil-for-whole-mammogram-classification with MIT License 6 votes vote down vote up
def on_epoch_end(self, epoch, logs={}):
    if epoch % self.interval == 0:
      y_pred = self.model.predict(self.X_val, verbose=0)
      #print(y_pred.shape)
      if self.mymil:
        y_true = self.y_val.max(axis=1)
        y_score = y_pred.max(axis=1)>0.5
      else:
        y_true = np.argmax(self.y_val, axis=1)
        y_score = y_pred[np.arange(len(y_true)), y_true] #y_pred[:, y_true] #np.argmax(y_pred, axis=1)
      loss = -np.mean(np.log(y_score+1e-6)) #-np.mean(y_true*np.log(y_score+1e-6) + (1-y_true)*np.log(1-y_score+1e-6))
      print('')
      print("interval evaluation - epoch: {:d} - loss: {:.2f}".format(epoch, loss))
      if loss < self.loss:
        self.loss = loss
        for f in os.listdir('./'):
          if f.startswith(self.filepath+'loss'):
            os.remove(f)
        self.model.save(self.filepath+'loss'+str(loss)+'ep'+str(epoch)+'.hdf5') 
Example #9
Source File: kdl_template.py    From SciPy2015 with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def categorical_crossentropy_nll(predicted_values, true_values):
    """ Returns likelihood compared to one hot category labels """
    indices = tensor.argmax(true_values, axis=-1)
    rows = tensor.arange(true_values.shape[0])
    if predicted_values.ndim < 3:
        return -tensor.log(predicted_values)[rows, indices]
    elif predicted_values.ndim == 3:
        d0 = true_values.shape[0]
        d1 = true_values.shape[1]
        pred = predicted_values.reshape((d0 * d1, -1))
        ind = indices.reshape((d0 * d1,))
        s = tensor.arange(pred.shape[0])
        correct = -tensor.log(pred)[s, ind]
        return correct.reshape((d0, d1,))
    else:
        raise AttributeError("Tensor dim not supported") 
Example #10
Source File: kdl_template.py    From SciPy2015 with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def categorical_crossentropy_nll(predicted_values, true_values):
    """ Returns likelihood compared to one hot category labels """
    indices = tensor.argmax(true_values, axis=-1)
    rows = tensor.arange(true_values.shape[0])
    if predicted_values.ndim < 3:
        return -tensor.log(predicted_values)[rows, indices]
    elif predicted_values.ndim == 3:
        d0 = true_values.shape[0]
        d1 = true_values.shape[1]
        pred = predicted_values.reshape((d0 * d1, -1))
        ind = indices.reshape((d0 * d1,))
        s = tensor.arange(pred.shape[0])
        correct = -tensor.log(pred)[s, ind]
        return correct.reshape((d0, d1,))
    else:
        raise AttributeError("Tensor dim not supported") 
Example #11
Source File: fat_classifier.py    From spinn with MIT License 6 votes vote down vote up
def build_cost(logits, targets):
    """
    Build a classification cost function.
    """
    # Clip gradients coming from the cost function.
    logits = theano.gradient.grad_clip(
        logits, -1. * FLAGS.clipping_max_value, FLAGS.clipping_max_value)

    predicted_dist = T.nnet.softmax(logits)

    costs = T.nnet.categorical_crossentropy(predicted_dist, targets)
    cost = costs.mean()

    pred = T.argmax(logits, axis=1)
    acc = 1. - T.mean(T.cast(T.neq(pred, targets), theano.config.floatX))

    return cost, acc 
Example #12
Source File: kdl_template.py    From SciPy2015 with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def categorical_crossentropy_nll(predicted_values, true_values):
    """ Returns likelihood compared to one hot category labels """
    indices = tensor.argmax(true_values, axis=-1)
    rows = tensor.arange(true_values.shape[0])
    if predicted_values.ndim < 3:
        return -tensor.log(predicted_values)[rows, indices]
    elif predicted_values.ndim == 3:
        d0 = true_values.shape[0]
        d1 = true_values.shape[1]
        pred = predicted_values.reshape((d0 * d1, -1))
        ind = indices.reshape((d0 * d1,))
        s = tensor.arange(pred.shape[0])
        correct = -tensor.log(pred)[s, ind]
        return correct.reshape((d0, d1,))
    else:
        raise AttributeError("Tensor dim not supported") 
Example #13
Source File: classifier.py    From spinn with MIT License 6 votes vote down vote up
def build_cost(logits, targets):
    """
    Build a classification cost function.
    """
    # Clip gradients coming from the cost function.
    logits = theano.gradient.grad_clip(
        logits, -1. * FLAGS.clipping_max_value, FLAGS.clipping_max_value)

    predicted_dist = T.nnet.softmax(logits)

    costs = T.nnet.categorical_crossentropy(predicted_dist, targets)
    cost = costs.mean()

    pred = T.argmax(logits, axis=1)
    acc = 1. - T.mean(T.cast(T.neq(pred, targets), theano.config.floatX))

    return cost, acc 
Example #14
Source File: theano_backend.py    From Att-ChemdNER with Apache License 2.0 5 votes vote down vote up
def argmax(x, axis=-1):
    return T.argmax(x, axis=axis, keepdims=False) 
Example #15
Source File: learn.py    From luna16 with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def define_loss(network, targets):
    prediction = lasagne.layers.get_output(network)


    loss = lasagne.objectives.categorical_crossentropy(prediction, targets)
    loss = loss.mean()

    test_prediction = lasagne.layers.get_output(network, deterministic=True)
    test_loss = lasagne.objectives.categorical_crossentropy(test_prediction, targets)
    test_loss = test_loss.mean()

    if params.REGULARIZATION:
        regularization_penalty = regularize_layer_params(network, l2) * params.REGULARIZATION_WEIGHT

        loss = loss + regularization_penalty
        test_loss = test_loss + regularization_penalty



    acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), targets),
                dtype=theano.config.floatX)

    # Compile a second function computing the validation loss and accuracy:
    val_fn = theano.function([inputs, targets], [test_prediction, test_loss, acc])

    return loss, val_fn 
Example #16
Source File: test_rop.py    From attention-lvcsr with MIT License 5 votes vote down vote up
def test_argmax(self):
        self.check_nondiff_rop(tensor.argmax(self.mx, axis=1)) 
Example #17
Source File: network.py    From dcase_task2 with MIT License 5 votes vote down vote up
def predict(self, input, thresh=0.5):
        """
        Predict label map on test samples
        """
        P = self.predict_proba(input, squeeze=False)
        
        # binary segmentation
        if P.shape[1] == 1:
            return (P > thresh).squeeze()
        
        # categorical segmentation
        else:
            return np.argmax(P, axis=1).squeeze() 
Example #18
Source File: layers.py    From dcnn with MIT License 5 votes vote down vote up
def get_output_for(self, inputs):
        A = inputs[0]
        X = inputs[1]

        max_degree_node = T.argmax(A.sum(0))
        min_degree_node = T.argmin(A.sum(0))

        return self.reduce(A, [max_degree_node, min_degree_node]) 
Example #19
Source File: layers.py    From dcnn with MIT License 5 votes vote down vote up
def get_output_for(self, inputs):
        A = inputs[0]
        X = inputs[1]

        num_nodes = A.shape[0]
        structural_symbolic_loss = T.addbroadcast(
            T.reshape(
                1 + A + self._symbolic_triangles(A) + self._symbolic_arrows(A),
                [num_nodes, num_nodes, 1]
            ),
            2
        )

        feature_symbolic_loss = (
            (self._outer_substract(X, X) ** 2) *
            T.addbroadcast(self.W, 0, 1)
        )

        unnormalized_logprobs = T.sum(
            structural_symbolic_loss + feature_symbolic_loss,
            2
        )

        flat_reduction_index = T.argmax(unnormalized_logprobs)

        return self.reduce(A, [
            flat_reduction_index // num_nodes,
            flat_reduction_index % num_nodes
        ]) 
Example #20
Source File: models.py    From dcnn with MIT License 5 votes vote down vote up
def predict(self, X, prediction_indices):
        pred = lasagne.layers.get_output(self.l_out)

        # Create a function that applies the model to data to predict a class
        pred_fn = theano.function([self.var_K, self.var_X], T.argmax(pred, axis=1))

        # Return the predictions
        predictions = pred_fn(self.K[prediction_indices, :, :], X)

        return predictions 
Example #21
Source File: models.py    From dcnn with MIT License 5 votes vote down vote up
def predict(self, X, prediction_indices):
        pred = lasagne.layers.get_output(self.l_out)

        # Create a function that applies the model to data to predict a class
        pred_fn = theano.function([self.var_K, self.var_X, self.var_I], T.argmax(pred, axis=1))

        # Return the predictions
        predictions = pred_fn(self.K, X, prediction_indices)

        return predictions 
Example #22
Source File: dnn.py    From DL4H with MIT License 5 votes vote down vote up
def __init__(self, rng, input, n_in, n_out, W=None, b=None):
        super(LogisticRegression, self).__init__(rng, input, n_in, n_out, W, b)
        self.p_y_given_x = T.nnet.softmax(T.dot(self.input, self.W) + self.b)
        self.y_pred = T.argmax(self.p_y_given_x, axis=1)
        self.output = self.y_pred
        self.params = [self.W, self.b] 
Example #23
Source File: dnn.py    From DL4H with MIT License 5 votes vote down vote up
def __init__(self, rng, input, n_in, n_out, W=None, b=None):
        super(SVM, self).__init__(rng, input, n_in, n_out, W, b)
        self.y_given_x = T.dot(self.input, self.W) + self.b
        self.p_y_given_x = self.y_given_x / T.sum(self.y_given_x, axis=1)  # badly calibrated prob
        self.y_pred = T.argmax(self.y_given_x, axis=1)
        self.output = self.y_pred
        self.params = [self.W, self.b] 
Example #24
Source File: layer.py    From deep_srl with Apache License 2.0 5 votes vote down vote up
def connect(self, inputs):
    energy = tensor.dot(inputs, self.W) + self.b
    energy = energy.reshape([energy.shape[0] * energy.shape[1], energy.shape[2]])
    log_scores = tensor.log(tensor.nnet.softmax(energy))
    predictions = tensor.argmax(log_scores, axis=-1)
    return (log_scores, predictions) 
Example #25
Source File: test_type_other.py    From attention-lvcsr with MIT License 5 votes vote down vote up
def test_none_Constant():
    """ Tests equals

    We had an error in the past with unpickling
    """
    o1 = Constant(NoneTypeT(), None, name='NoneConst')
    o2 = Constant(NoneTypeT(), None, name='NoneConst')
    assert o1.equals(o2)
    assert NoneConst.equals(o1)
    assert o1.equals(NoneConst)
    assert NoneConst.equals(o2)
    assert o2.equals(NoneConst)

    # This trigger equals that returned the wrong answer in the past.
    import six.moves.cPickle as pickle
    import theano
    from theano import tensor

    x = tensor.vector('x')
    y = tensor.argmax(x)
    kwargs = {}
    # We can't pickle DebugMode
    if theano.config.mode in ["DebugMode", "DEBUG_MODE"]:
        kwargs = {'mode': 'FAST_RUN'}
    f = theano.function([x], [y], **kwargs)
    pickle.loads(pickle.dumps(f)) 
Example #26
Source File: network.py    From dcase_task2 with MIT License 5 votes vote down vote up
def predict(self, input):
        """
        Predict class labels on test samples
        """
        return np.argmax(self.predict_proba(input), axis=1) 
Example #27
Source File: gru.py    From theano-recurrence with MIT License 5 votes vote down vote up
def generative_sampling(self, seed, emb_data, sample_length):
        fruit = theano.shared(value=seed)

        def step(h_tm, y_tm):

            x_z = T.dot(emb_data[y_tm], self.W_z) + self.b_z
            x_r = T.dot(emb_data[y_tm], self.W_r) + self.b_r
            x_h = T.dot(emb_data[y_tm], self.W) + self.b_h

            z_t = self.inner_activation(x_z + T.dot(h_tm, self.U_z))
            r_t = self.inner_activation(x_r + T.dot(h_tm, self.U_r))
            hh_t = self.activation(x_h + T.dot(r_t * h_tm, self.U))
            h_t = (T.ones_like(z_t) - z_t) * hh_t + z_t * h_tm

            y_t = T.nnet.softmax(T.dot(h_t, self.V) + self.b_y)
            y = T.argmax(y_t, axis=1)

            return h_t, y[0]

        [_, samples], _ = theano.scan(fn=step,
                                      outputs_info=[self.h0, fruit],
                                      n_steps=sample_length)

        get_samples = theano.function(inputs=[],
                                      outputs=samples)

        return get_samples() 
Example #28
Source File: theano_backend.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def argmax(x, axis=-1):
    return T.argmax(x, axis=axis, keepdims=False) 
Example #29
Source File: train.py    From FRRN with MIT License 5 votes vote down vote up
def compile_validation_function(network, batch_size):
    """Compiles the validation function.

    Args:
        network: The network instance.
        batch_size: The batch size.

    Returns:
    A function that takes in a batch of images and targets and returns the
    predicted segmentation mask and the loss.
    """
    input_var = network.input_layers[0].input_var
    target_var = T.ftensor4()

    predictions = lasagne.layers.get_output(
        network.output_layers, deterministic=True)[0]

    loss = losses.bootstrapped_xentropy(
        predictions=predictions,
        targets=target_var,
        batch_size=batch_size,
        multiplier=BOOTSTRAP_MULTIPLIER
    )

    pylogging.info("Compile validation function")
    return theano.function(
        inputs=[input_var, target_var],
        outputs=[T.argmax(predictions, axis=1), loss]
    ) 
Example #30
Source File: resnet.py    From luna16 with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def define_updates(output_layer, X, Y):
    output_train = lasagne.layers.get_output(output_layer)
    output_test = lasagne.layers.get_output(output_layer, deterministic=True)

    # set up the loss that we aim to minimize when using cat cross entropy our Y should be ints not one-hot
    loss = lasagne.objectives.categorical_crossentropy(T.clip(output_train,0.000001,0.999999), Y)
    loss = loss.mean()

    acc = T.mean(T.eq(T.argmax(output_train, axis=1), Y), dtype=theano.config.floatX)

    # if using ResNet use L2 regularization
    all_layers = lasagne.layers.get_all_layers(output_layer)
    l2_penalty = lasagne.regularization.regularize_layer_params(all_layers, lasagne.regularization.l2) * P.L2_LAMBDA
    loss = loss + l2_penalty

    # set up loss functions for validation dataset
    test_loss = lasagne.objectives.categorical_crossentropy(T.clip(output_test,0.000001,0.999999), Y)
    test_loss = test_loss.mean()
    test_loss = test_loss + l2_penalty

    test_acc = T.mean(T.eq(T.argmax(output_test, axis=1), Y), dtype=theano.config.floatX)

    # get parameters from network and set up sgd with nesterov momentum to update parameters, l_r is shared var so it can be changed
    l_r = theano.shared(np.array(LR_SCHEDULE[0], dtype=theano.config.floatX))
    params = lasagne.layers.get_all_params(output_layer, trainable=True)
    updates = nesterov_momentum(loss, params, learning_rate=l_r, momentum=P.MOMENTUM)
    #updates = adam(loss, params, learning_rate=l_r)

    prediction_binary = T.argmax(output_train, axis=1)
    test_prediction_binary = T.argmax(output_test, axis=1)

    # set up training and prediction functions
    train_fn = theano.function(inputs=[X,Y], outputs=[loss, l2_penalty, acc, prediction_binary, output_train[:,1]], updates=updates)
    valid_fn = theano.function(inputs=[X,Y], outputs=[test_loss, l2_penalty, test_acc, test_prediction_binary, output_test[:,1]])

    return train_fn, valid_fn, l_r