Python sklearn.metrics.accuracy_score() Examples

The following are 30 code examples of sklearn.metrics.accuracy_score(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module sklearn.metrics , or try the search function .
Example #1
Source File: main.py    From transferlearning with MIT License 13 votes vote down vote up
def classify_1nn(data_train, data_test):
    '''
    Classification using 1NN
    Inputs: data_train, data_test: train and test csv file path
    Outputs: yprediction and accuracy
    '''
    from sklearn.neighbors import KNeighborsClassifier
    from sklearn.metrics import accuracy_score
    from sklearn.preprocessing import StandardScaler
    data = {'src': np.loadtxt(data_train, delimiter=','),
            'tar': np.loadtxt(data_test, delimiter=','),
            }
    Xs, Ys, Xt, Yt = data['src'][:, :-1], data['src'][:, -
                                                      1], data['tar'][:, :-1], data['tar'][:, -1]
    Xs = StandardScaler(with_mean=0, with_std=1).fit_transform(Xs)
    Xt = StandardScaler(with_mean=0, with_std=1).fit_transform(Xt)
    clf = KNeighborsClassifier(n_neighbors=1)
    clf.fit(Xs, Ys)
    ypred = clf.predict(Xt)
    acc = accuracy_score(y_true=Yt, y_pred=ypred)
    print('Acc: {:.4f}'.format(acc))
    return ypred, acc 
Example #2
Source File: multi_class_classification.py    From edge2vec with BSD 3-Clause "New" or "Revised" License 11 votes vote down vote up
def multi_class_classification(data_X,data_Y):
    '''
    calculate multi-class classification and return related evaluation metrics
    '''

    svc = svm.SVC(C=1, kernel='linear')
    # X_train, X_test, y_train, y_test = train_test_split( data_X, data_Y, test_size=0.4, random_state=0) 
    clf = svc.fit(data_X, data_Y) #svm
    # array = svc.coef_
    # print array
    predicted = cross_val_predict(clf, data_X, data_Y, cv=2)
    print "accuracy",metrics.accuracy_score(data_Y, predicted)
    print "f1 score macro",metrics.f1_score(data_Y, predicted, average='macro') 
    print "f1 score micro",metrics.f1_score(data_Y, predicted, average='micro') 
    print "precision score",metrics.precision_score(data_Y, predicted, average='macro') 
    print "recall score",metrics.recall_score(data_Y, predicted, average='macro') 
    print "hamming_loss",metrics.hamming_loss(data_Y, predicted)
    print "classification_report", metrics.classification_report(data_Y, predicted)
    print "jaccard_similarity_score", metrics.jaccard_similarity_score(data_Y, predicted)
    # print "log_loss", metrics.log_loss(data_Y, predicted)
    print "zero_one_loss", metrics.zero_one_loss(data_Y, predicted)
    # print "AUC&ROC",metrics.roc_auc_score(data_Y, predicted)
    # print "matthews_corrcoef", metrics.matthews_corrcoef(data_Y, predicted) 
Example #3
Source File: drop_connect_trainer.py    From MNIST-baselines with MIT License 7 votes vote down vote up
def test(data, model, optimizer, logger, config):
    test_batches = (data.DATA_SIZE[1] + config["batch_size"] - 1) // config["batch_size"]
    for param in model.parameters():
        param.requires_grad = False
    model.eval()

    prediction = np.zeros(data.DATA_SIZE[1], dtype=np.uint8)
    for i in range(test_batches):
        inputs = Variable(torch.from_numpy(data.data_test[i * config["batch_size"]: min((i + 1) * config["batch_size"], data.DATA_SIZE[1]), :]), requires_grad=False).view(-1, 1, 45, 45)
        if config["cuda"] and torch.cuda.is_available():
            inputs = inputs.cuda()
        outputs = model(inputs)
        prediction[i * config["batch_size"]: min((i + 1) * config["batch_size"], data.DATA_SIZE[1])] = np.argmax(outputs.data.cpu().numpy(), axis=1)

    print('Accuracy: %0.2f' % (100 * accuracy_score(data.label_test, prediction)))
    init_dir(config['output_dir'])
    np.save(os.path.join(config['output_dir'], '%s_pred.npy' % config['method']), prediction) 
Example #4
Source File: link_prediction.py    From edge2vec with BSD 3-Clause "New" or "Revised" License 7 votes vote down vote up
def evaluation_analysis(true_label,predicted): 
    '''
    return all metrics results
    '''
    print "accuracy",metrics.accuracy_score(true_label, predicted)
    print "f1 score macro",metrics.f1_score(true_label, predicted, average='macro')     
    print "f1 score micro",metrics.f1_score(true_label, predicted, average='micro') 
    print "precision score",metrics.precision_score(true_label, predicted, average='macro') 
    print "recall score",metrics.recall_score(true_label, predicted, average='macro') 
    print "hamming_loss",metrics.hamming_loss(true_label, predicted)
    print "classification_report", metrics.classification_report(true_label, predicted)
    print "jaccard_similarity_score", metrics.jaccard_similarity_score(true_label, predicted)
    print "log_loss", metrics.log_loss(true_label, predicted)
    print "zero_one_loss", metrics.zero_one_loss(true_label, predicted)
    print "AUC&ROC",metrics.roc_auc_score(true_label, predicted)
    print "matthews_corrcoef", metrics.matthews_corrcoef(true_label, predicted) 
Example #5
Source File: accuracy.py    From pipeline with MIT License 7 votes vote down vote up
def calculate(self):
        if not self._predictions:
            raise PipelineError("You need to add predictions for calculating the accuracy first")

        y_pred = np.concatenate(self._predictions)
        y_true = np.concatenate(self._true_labels)

        if y_pred.shape[-1] == 1:
            # Binary classification
            y_pred = (y_pred >= self._border).astype("int")
        else:
            y_pred = np.argmax(y_pred, -1)

        if len(y_true.shape) != 1:
            y_true = np.argmax(y_true, -1)

        result = accuracy_score(y_true, y_pred)
        return {"accuracy": result} 
Example #6
Source File: utils.py    From Text-Classification-Models-Pytorch with MIT License 6 votes vote down vote up
def evaluate_model(model, iterator):
    all_preds = []
    all_y = []
    for idx,batch in enumerate(iterator):
        if torch.cuda.is_available():
            batch = [Variable(record).cuda() for record in batch]
        else:
            batch = [Variable(record).cuda() for record in batch]
        x, y = batch
        y_pred = model(x)
        predicted = torch.max(y_pred.cpu().data, 1)[1]
        all_preds.extend(predicted.numpy())
        all_y.extend(y.cpu().numpy())
        
    score = accuracy_score(all_y, np.array(all_preds).flatten())
    return score 
Example #7
Source File: utils.py    From Attention-Gated-Networks with MIT License 6 votes vote down vote up
def classification_scores(gts, preds, labels):
    accuracy        = metrics.accuracy_score(gts,  preds)
    class_accuracies = []
    for lab in labels: # TODO Fix
        class_accuracies.append(metrics.accuracy_score(gts[gts == lab], preds[gts == lab]))
    class_accuracies = np.array(class_accuracies)

    f1_micro        = metrics.f1_score(gts,        preds, average='micro')
    precision_micro = metrics.precision_score(gts, preds, average='micro')
    recall_micro    = metrics.recall_score(gts,    preds, average='micro')
    f1_macro        = metrics.f1_score(gts,        preds, average='macro')
    precision_macro = metrics.precision_score(gts, preds, average='macro')
    recall_macro    = metrics.recall_score(gts,    preds, average='macro')

    # class wise score
    f1s        = metrics.f1_score(gts,        preds, average=None)
    precisions = metrics.precision_score(gts, preds, average=None)
    recalls    = metrics.recall_score(gts,    preds, average=None)

    confusion = metrics.confusion_matrix(gts,preds, labels=labels)

    #TODO confusion matrix, recall, precision
    return accuracy, f1_micro, precision_micro, recall_micro, f1_macro, precision_macro, recall_macro, confusion, class_accuracies, f1s, precisions, recalls 
Example #8
Source File: train_eval.py    From Bert-Chinese-Text-Classification-Pytorch with MIT License 6 votes vote down vote up
def evaluate(config, model, data_iter, test=False):
    model.eval()
    loss_total = 0
    predict_all = np.array([], dtype=int)
    labels_all = np.array([], dtype=int)
    with torch.no_grad():
        for texts, labels in data_iter:
            outputs = model(texts)
            loss = F.cross_entropy(outputs, labels)
            loss_total += loss
            labels = labels.data.cpu().numpy()
            predic = torch.max(outputs.data, 1)[1].cpu().numpy()
            labels_all = np.append(labels_all, labels)
            predict_all = np.append(predict_all, predic)

    acc = metrics.accuracy_score(labels_all, predict_all)
    if test:
        report = metrics.classification_report(labels_all, predict_all, target_names=config.class_list, digits=4)
        confusion = metrics.confusion_matrix(labels_all, predict_all)
        return acc, loss_total / len(data_iter), report, confusion
    return acc, loss_total / len(data_iter) 
Example #9
Source File: label_accuracy.py    From linguistic-style-transfer with Apache License 2.0 6 votes vote down vote up
def get_label_accuracy(predictions_file_path, gold_labels_file_path, saved_model_path):
    with open(os.path.join(saved_model_path,
                           global_config.label_to_index_dict_file), 'r') as json_file:
        label_to_index_map = json.load(json_file)

    gold_labels = list()
    prediction_labels = list()

    with open(gold_labels_file_path) as gold_labels_file:
        for text_label in gold_labels_file:
            gold_labels.append(label_to_index_map[text_label.strip()])

    with open(predictions_file_path) as predictions_file:
        for label in predictions_file:
            prediction_labels.append(int(label.strip()))

    accuracy = metrics.accuracy_score(y_true=gold_labels, y_pred=prediction_labels)
    logger.info("Classification Accuracy: {}".format(accuracy)) 
Example #10
Source File: toxcast_maml.py    From deepchem with MIT License 6 votes vote down vote up
def compute_scores(optimize):
  maml.restore()
  y_true = []
  y_pred = []
  losses = []
  for task in range(learner.n_training_tasks, n_tasks):
    learner.set_task_index(task)
    if optimize:
      maml.train_on_current_task(restore=True)
    inputs = learner.get_batch()
    loss, prediction = maml.predict_on_batch(inputs)
    y_true.append(inputs[1])
    y_pred.append(prediction[0][:, 0])
    losses.append(loss)
  y_true = np.concatenate(y_true)
  y_pred = np.concatenate(y_pred)
  print()
  print('Cross entropy loss:', np.mean(losses))
  print('Prediction accuracy:', accuracy_score(y_true, y_pred > 0.5))
  print('ROC AUC:', dc.metrics.roc_auc_score(y_true, y_pred))
  print() 
Example #11
Source File: textpro.py    From comparable-text-miner with Apache License 2.0 6 votes vote down vote up
def evaluate(trueValues, predicted, decimals, note):
	print note
	label = 1
	avg = 'weighted'
	a = accuracy_score(trueValues, predicted)
	p = precision_score(trueValues, predicted, pos_label=label, average=avg)
	r = recall_score(trueValues, predicted, pos_label=label, average=avg)
	avg_f1 = f1_score(trueValues, predicted, pos_label=label, average=avg)
	fclasses = f1_score(trueValues, predicted, average=None)
	f1c1 = fclasses[0]; f1c2 = fclasses[1]
	fw = (f1c1 + f1c2)/2.0

	print 'accuracy:\t', str(round(a,decimals))
	print 'precision:\t', str(round(p,decimals))
	print 'recall:\t', str(round(r,decimals))
	print 'avg f1:\t', str(round(avg_f1,decimals))
	print 'c1 f1:\t', str(round(f1c1,decimals))
	print 'c2 f1:\t', str(round(f1c2,decimals))
	print 'avg(c1,c2):\t', str(round(fw,decimals))
	print '------------'

###################################################################################


# split a parallel or comparable corpus into two parts 
Example #12
Source File: score_dataset.py    From snape with Apache License 2.0 6 votes vote down vote up
def score_multiclass_classification(y, y_hat, report=True):
    """
    Create multiclass classification score
    :param y:
    :param y_hat:
    :return:
    """
    report_string = "---Multiclass Classification Score--- \n"
    report_string += classification_report(y, y_hat)
    score = accuracy_score(y, y_hat)
    report_string += "\nAccuracy = " + str(score)

    if report:
        print(report_string)

    return score, report_string 
Example #13
Source File: pcnn_model.py    From PCNN with Apache License 2.0 6 votes vote down vote up
def run_evaluate(self, test):
        """Evaluates performance on test set

        Args:
            test: dataset that yields tuple of (sentences, relation tags)

        Returns:
            metrics: (dict) metrics["acc"] = 98.4, ...

        """
        y_true, y_pred = [], []
        for data in minibatches(test, self.config.batch_size):
            word_batch, pos1_batch, pos2_batch, pos_batch, y_batch = data
            relations_pred = self.predict_batch(word_batch, pos1_batch, pos2_batch, pos_batch)
            assert len(relations_pred) == len(y_batch)
            y_true += y_batch
            y_pred += relations_pred.tolist()

        acc = accuracy_score(y_true, y_pred)
        p   = precision_score(y_true, y_pred, average='macro')
        r   = recall_score(y_true, y_pred, average='macro')
        f1  = f1_score(y_true, y_pred, average='macro')

        return {"acc":acc, "p":p, "r":r, "f1":f1} 
Example #14
Source File: run.py    From fever-naacl-2018 with Apache License 2.0 6 votes vote down vote up
def print_evaluation(model,data,ls,log=None):
    features,actual = data
    predictions = predict(model, features, 500).data.numpy().reshape(-1).tolist()

    labels = [ls.idx[i] for i, _ in enumerate(ls.idx)]

    actual = [labels[i] for i in actual]
    predictions = [labels[i] for i in predictions]

    print(accuracy_score(actual, predictions))
    print(classification_report(actual, predictions))
    print(confusion_matrix(actual, predictions))

    data = zip(actual,predictions)
    if log is not None:
        f = open(log, "w+")
        for a,p in data:
            f.write(json.dumps({"actual": a, "predicted": p}) + "\n")
        f.close() 
Example #15
Source File: dnn_trainer.py    From MNIST-baselines with MIT License 6 votes vote down vote up
def test(data, model, optimizer, logger, config):
    test_batches = (data.DATA_SIZE[1] + config["batch_size"] - 1) // config["batch_size"]
    for param in model.parameters():
        param.requires_grad = False
    model.eval()

    prediction = np.zeros(data.DATA_SIZE[1], dtype=np.uint8)
    for i in range(test_batches):
        inputs = Variable(torch.from_numpy(data.data_test[i * config["batch_size"]: min((i + 1) * config["batch_size"], data.DATA_SIZE[1]), :]), requires_grad=False).view(-1, 1, 45, 45)
        if config["cuda"] and torch.cuda.is_available():
            inputs = inputs.cuda()
        outputs = model(inputs)
        prediction[i * config["batch_size"]: min((i + 1) * config["batch_size"], data.DATA_SIZE[1])] = np.argmax(outputs.data.cpu().numpy(), axis=1)

    print('Accuracy: %0.2f' % (100 * accuracy_score(data.label_test, prediction)))
    init_dir(config['output_dir'])
    np.save(os.path.join(config['output_dir'], '%s_pred.npy' % config['method']), prediction) 
Example #16
Source File: capsnet_trainer.py    From MNIST-baselines with MIT License 6 votes vote down vote up
def test(data, model, optimizer, logger, config):
    test_batches = (data.DATA_SIZE[1] + config["batch_size"] - 1) // config["batch_size"]
    for param in model.parameters():
        param.requires_grad = False
    model.eval()

    prediction = np.zeros(data.DATA_SIZE[1], dtype=np.uint8)
    for i in range(test_batches):
        inputs = Variable(torch.from_numpy(data.data_test[i * config["batch_size"]: min((i + 1) * config["batch_size"], data.DATA_SIZE[1]), :]), requires_grad=False).view(-1, 1, 45, 45)
        if config["cuda"] and torch.cuda.is_available():
            inputs = inputs.cuda()
        outputs, probs = model(inputs)
        prediction[i * config["batch_size"]: min((i + 1) * config["batch_size"], data.DATA_SIZE[1])] = np.argmax(probs.data.cpu().numpy(), axis=1)

    print('Accuracy: %0.2f' % (100 * accuracy_score(data.label_test, prediction)))
    init_dir(config['output_dir'])
    np.save(os.path.join(config['output_dir'], '%s_pred.npy' % config['method']), prediction) 
Example #17
Source File: utils.py    From Text-Classification-Models-Pytorch with MIT License 5 votes vote down vote up
def evaluate_model(model, iterator):
    all_preds = []
    all_y = []
    for idx,batch in enumerate(iterator):
        if torch.cuda.is_available():
            x = batch.text.cuda()
        else:
            x = batch.text
        y_pred = model(x)
        predicted = torch.max(y_pred.cpu().data, 1)[1] + 1
        all_preds.extend(predicted.numpy())
        all_y.extend(batch.label.numpy())
    score = accuracy_score(all_y, np.array(all_preds).flatten())
    return score 
Example #18
Source File: model.py    From Text-Classification-Models-Pytorch with MIT License 5 votes vote down vote up
def run_epoch(self, train_data, val_data):
        train_x, train_y = train_data[0], train_data[1]
        val_x, val_y = val_data[0], val_data[1]
        iterator = data_iterator(train_x, train_y, self.config.batch_size)
        train_losses = []
        val_accuracies = []
        losses = []
    
        for i, (x,y) in enumerate(iterator):
            self.optimizer.zero_grad()
    
            x = Tensor(x).cuda()
            y_pred = self.__call__(x)
            loss = self.loss_op(y_pred, torch.cuda.LongTensor(y-1))
            loss.backward()
    
            losses.append(loss.data.cpu().numpy())
            self.optimizer.step()
    
            if (i + 1) % 50 == 0:
                print("Iter: {}".format(i+1))
                avg_train_loss = np.mean(losses)
                train_losses.append(avg_train_loss)
                print("\tAverage training loss: {:.5f}".format(avg_train_loss))
                losses = []
                
                # Evalute Accuracy on validation set
                self.eval()
                all_preds = []
                val_iterator = data_iterator(val_x, val_y, self.config.batch_size)
                for x, y in val_iterator:
                    x = Variable(Tensor(x))
                    y_pred = self.__call__(x.cuda())
                    predicted = torch.max(y_pred.cpu().data, 1)[1] + 1
                    all_preds.extend(predicted.numpy())
                score = accuracy_score(val_y, np.array(all_preds).flatten())
                val_accuracies.append(score)
                print("\tVal Accuracy: {:.4f}".format(score))
                self.train()
                
        return train_losses, val_accuracies 
Example #19
Source File: train_predict_trees_batch3.py    From wsdm19cup with MIT License 5 votes vote down vote up
def validate_predict(model,X,y,X_test,n_splits=10,seed=42,model_type='lgb',verbose=0, sample_weights=sample_weights):
        
    preds = np.zeros((X.shape[0],3))
    preds_test = np.zeros((X_test.shape[0],3))
    cv_scores = []
    skf = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=seed)
    for idx_train, idx_val in skf.split(X, y):
        X_train, X_val = X[idx_train,:], X[idx_val,:]
        y_train, y_val = y[idx_train], y[idx_val]
        
        if model_type == 'lgb':
            model.fit(X_train, y_train,
                        eval_set=[(X_train, y_train), (X_val, y_val)],
                        verbose=verbose)
        else:
            model.fit(X_train, y_train)
            
        if hasattr(model, 'predict_proba'):
            yhat_val = model.predict_proba(X_val)
            preds_test = preds_test + model.predict_proba(X_test)
            preds[idx_val] = yhat_val
        else:
            yhat_val = model.predict(X_val)
            preds_test = preds_test + model.predict(X_test)
            preds[idx_val] = yhat_val
        cv_scores.append(accuracy_score(y_val, np.array(['agreed', 'disagreed', 'unrelated'])[np.argmax(yhat_val,axis=1)]))
    print("local cv", np.mean(cv_scores), np.std(cv_scores))
    print(f"Val accuracy: {accuracy_score(y, np.array(['agreed', 'disagreed', 'unrelated'])[np.argmax(preds,axis=1)], sample_weight=sample_weights):.5f}")
    preds_test /= n_splits       
    return preds, preds_test 
Example #20
Source File: train_predict_trees_batch2.py    From wsdm19cup with MIT License 5 votes vote down vote up
def validate_predict(model,X,y,X_test,n_splits=10,seed=42,model_type='lgb',verbose=0, sample_weights=sample_weights):
        
    preds = np.zeros((X.shape[0],3))
    preds_test = np.zeros((X_test.shape[0],3))
    cv_scores = []
    skf = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=seed)
    for idx_train, idx_val in skf.split(X, y):
        X_train, X_val = X[idx_train,:], X[idx_val,:]
        y_train, y_val = y[idx_train], y[idx_val]
        
        if model_type == 'lgb':
            model.fit(X_train, y_train,
                        eval_set=[(X_train, y_train), (X_val, y_val)],
                        verbose=verbose)
        else:
            model.fit(X_train, y_train)
            
        if hasattr(model, 'predict_proba'):
            yhat_val = model.predict_proba(X_val)
            preds_test = preds_test + model.predict_proba(X_test)
            preds[idx_val] = yhat_val
        else:
            yhat_val = model.predict(X_val)
            preds_test = preds_test + model.predict(X_test)
            preds[idx_val] = yhat_val
        cv_scores.append(accuracy_score(y_val, np.array(['agreed', 'disagreed', 'unrelated'])[np.argmax(yhat_val,axis=1)]))
    print("local cv", np.mean(cv_scores), np.std(cv_scores))
    print(f"Val accuracy: {accuracy_score(y, np.array(['agreed', 'disagreed', 'unrelated'])[np.argmax(preds,axis=1)], sample_weight=sample_weights):.5f}")
    preds_test /= n_splits       
    return preds, preds_test

### TRAIN - PREDICT ### 
Example #21
Source File: Deopen_classification.py    From Deopen with MIT License 5 votes vote down vote up
def model_test(net, X_test, y_test, outputfile):
    #net.load_params_from('saved_weights_file')
    y_pred = net.predict(X_test)
    y_prob = net.predict_proba(X_test)
    print 'Accuracy score is {}'.format(metrics.accuracy_score(y_test, y_pred))
    print 'ROC AUC score is {}'.format(metrics.roc_auc_score(y_test, y_prob[:,-1]))
    hkl.dump([y_prob[:,-1],y_test],outputfile)

#save model parameters 
Example #22
Source File: utils.py    From Text-Classification-Models-Pytorch with MIT License 5 votes vote down vote up
def evaluate_model(model, iterator):
    all_preds = []
    all_y = []
    for idx,batch in enumerate(iterator):
        if torch.cuda.is_available():
            x = batch.text.cuda()
        else:
            x = batch.text
        y_pred = model(x)
        predicted = torch.max(y_pred.cpu().data, 1)[1] + 1
        all_preds.extend(predicted.numpy())
        all_y.extend(batch.label.numpy())
    score = accuracy_score(all_y, np.array(all_preds).flatten())
    return score 
Example #23
Source File: run.py    From fever-naacl-2018 with Apache License 2.0 5 votes vote down vote up
def evaluate(model,data,labels,batch_size):
    predicted = predict(model,data,batch_size)
    return accuracy_score(labels,predicted.data.numpy().reshape(-1)) 
Example #24
Source File: CorrMCNN_Arch2.py    From DeepLearn with MIT License 5 votes vote down vote up
def svm_classifier(train_x, train_y, valid_x, valid_y, test_x, test_y):
    
    clf = svm.LinearSVC()
    #print train_x.shape,train_y.shape
    clf.fit(train_x,train_y)
    pred = clf.predict(valid_x)
    va = accuracy_score(np.ravel(valid_y),np.ravel(pred))
    pred = clf.predict(test_x)
    ta = accuracy_score(np.ravel(test_y),np.ravel(pred))
    return va, ta 
Example #25
Source File: XRMB_CNN_17.06.v2.py    From DeepLearn with MIT License 5 votes vote down vote up
def svm_classifier(train_x, train_y, valid_x, valid_y, test_x, test_y):
    
    clf = svm.LinearSVC()
    #print train_x.shape,train_y.shape
    clf.fit(train_x,train_y)
    pred = clf.predict(valid_x)
    va = accuracy_score(np.ravel(valid_y),np.ravel(pred))
    pred = clf.predict(test_x)
    ta = accuracy_score(np.ravel(test_y),np.ravel(pred))
    return va, ta 
Example #26
Source File: DeepLearn_cornet.py    From DeepLearn with MIT License 5 votes vote down vote up
def svm_classifier(train_x, train_y, valid_x, valid_y, test_x, test_y):
    
    clf = svm.LinearSVC()
    #print train_x.shape,train_y.shape
    clf.fit(train_x,train_y)
    pred = clf.predict(valid_x)
    va = accuracy_score(np.ravel(valid_y),np.ravel(pred))
    pred = clf.predict(test_x)
    ta = accuracy_score(np.ravel(test_y),np.ravel(pred))
    return va, ta 
Example #27
Source File: attack.py    From robust_physical_perturbations with MIT License 5 votes vote down vote up
def calculate_acc(self):
        assert FLAGS.validation_set is not None
        assert self.val_data is not None

        val_feed_dict = self.create_feed_dict(np.array(self.val_data), self.attack_graph)

        net_predictions = self.sess.run(tf.argmax(self.attack_graph.adv_pred, axis=1), \
                                        feed_dict=val_feed_dict)
        labels = [FLAGS.attack_target for _ in range(len(net_predictions))]
        
        val_feed_dict = None
        gc.collect()

        return accuracy_score(labels, net_predictions, normalize=True) 
Example #28
Source File: attack_util.py    From robust_physical_perturbations with MIT License 5 votes vote down vote up
def model_eval(labels, net_predictions):
    return accuracy_score(labels, net_predictions, normalize=True) 
Example #29
Source File: eval_lcqmc_order.py    From BERT with Apache License 2.0 5 votes vote down vote up
def eval_fn(result):
        i = 0
        total_accuracy = 0
        total_loss = 0.0
        pred_prob = []
        label, label_id = [], []
        label_weight = []
        while True:
            try:
                eval_result = sess.run(result)
                total_accuracy += eval_result["accuracy"]
                label_id.extend(eval_result["label_ids"])
                label.extend(eval_result["pred_label"])
                total_loss += eval_result["loss"]
                pred_prob.extend(eval_result["pred_prob"])
                # for item in eval_result["label_ids"]:
                #     label_weight.append(label_tensor[item])
                i += 1
            except tf.errors.OutOfRangeError:
                print("End of dataset")
                break
        # f1 = f1_score(label_id, label, average="macro", sample_weight=label_weight)
        # accuracy = accuracy_score(label_id, label, sample_weight=label_weight)
        f1 = f1_score(label_id, label, average="macro")
        accuracy = accuracy_score(label_id, label)

        print("test accuracy {} accuracy {} loss {} f1 {}".format(total_accuracy/i, 
            accuracy, total_loss/i, f1))
        return pred_prob 
Example #30
Source File: __init__.py    From PADME with MIT License 5 votes vote down vote up
def balanced_accuracy_score(y, y_pred):
  """Computes balanced accuracy score."""
  num_positive = float(np.count_nonzero(y))
  num_negative = float(len(y) - num_positive)
  pos_weight = num_negative / num_positive
  weights = np.ones_like(y)
  weights[y != 0] = pos_weight
  return accuracy_score(y, y_pred, sample_weight=weights)