Python sklearn.svm.SVC Examples
The following are 30
code examples of sklearn.svm.SVC().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
sklearn.svm
, or try the search function
.
Example #1
Source File: multi_class_classification.py From edge2vec with BSD 3-Clause "New" or "Revised" License | 11 votes |
def multi_class_classification(data_X,data_Y): ''' calculate multi-class classification and return related evaluation metrics ''' svc = svm.SVC(C=1, kernel='linear') # X_train, X_test, y_train, y_test = train_test_split( data_X, data_Y, test_size=0.4, random_state=0) clf = svc.fit(data_X, data_Y) #svm # array = svc.coef_ # print array predicted = cross_val_predict(clf, data_X, data_Y, cv=2) print "accuracy",metrics.accuracy_score(data_Y, predicted) print "f1 score macro",metrics.f1_score(data_Y, predicted, average='macro') print "f1 score micro",metrics.f1_score(data_Y, predicted, average='micro') print "precision score",metrics.precision_score(data_Y, predicted, average='macro') print "recall score",metrics.recall_score(data_Y, predicted, average='macro') print "hamming_loss",metrics.hamming_loss(data_Y, predicted) print "classification_report", metrics.classification_report(data_Y, predicted) print "jaccard_similarity_score", metrics.jaccard_similarity_score(data_Y, predicted) # print "log_loss", metrics.log_loss(data_Y, predicted) print "zero_one_loss", metrics.zero_one_loss(data_Y, predicted) # print "AUC&ROC",metrics.roc_auc_score(data_Y, predicted) # print "matthews_corrcoef", metrics.matthews_corrcoef(data_Y, predicted)
Example #2
Source File: common_utils.py From interpret-text with MIT License | 8 votes |
def create_pandas_only_svm_classifier(X, y, probability=True): class PandasOnlyEstimator(TransformerMixin): def fit(self, X, y=None, **fitparams): return self def transform(self, X, **transformparams): dataset_is_df = isinstance(X, pd.DataFrame) if not dataset_is_df: raise Exception("Dataset must be a pandas dataframe!") return X pandas_only = PandasOnlyEstimator() clf = svm.SVC(gamma=0.001, C=100.0, probability=probability, random_state=777) pipeline = Pipeline([("pandas_only", pandas_only), ("clf", clf)]) return pipeline.fit(X, y)
Example #3
Source File: classifier.py From Video-Highlight-Detection with MIT License | 7 votes |
def _build_model(self,model_name,params=None): if params==None: if model_name=='xgb': self.model=XGBClassifier(n_estimators=100,learning_rate=0.02) elif model_name=='svm': kernel_function=chi2_kernel if not (self.model_kernel=='linear' or self.model_kernel=='rbf') else self.model_kernel self.model=SVC(C=1,kernel=kernel_function,gamma=1,probability=True) elif model_name=='lr': self.model=LR(C=1,penalty='l1',tol=1e-6) else: if model_name=='xgb': self.model=XGBClassifier(n_estimators=1000,learning_rate=0.02,**params) elif model_name=='svm': self.model=SVC(C=1,kernel=kernel_function,gamma=1,probability=True) elif model_name=='lr': self.model=LR(C=1,penalty='l1',tol=1e-6) log.l.info('=======> built the model {} done'.format(self.model_name))
Example #4
Source File: example_fabolas.py From RoBO with BSD 3-Clause "New" or "Revised" License | 6 votes |
def objective_function(x, s): # Start the clock to determine the cost of this function evaluation start_time = time.time() # Shuffle the data and split up the request subset of the training data s_max = y_train.shape[0] shuffle = np.random.permutation(np.arange(s_max)) train_subset = X_train[shuffle[:s]] train_targets_subset = y_train[shuffle[:s]] # Train the SVM on the subset set C = np.exp(float(x[0])) gamma = np.exp(float(x[1])) clf = svm.SVC(gamma=gamma, C=C) clf.fit(train_subset, train_targets_subset) # Validate this hyperparameter configuration on the full validation data y = 1 - clf.score(X_val, y_val) c = time.time() - start_time return y, c # Load the data
Example #5
Source File: test_bagging.py From Mastering-Elasticsearch-7.0 with MIT License | 6 votes |
def test_classification(): # Check classification for various parameter settings. rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, random_state=rng) grid = ParameterGrid({"max_samples": [0.5, 1.0], "max_features": [1, 2, 4], "bootstrap": [True, False], "bootstrap_features": [True, False]}) for base_estimator in [None, DummyClassifier(), Perceptron(tol=1e-3), DecisionTreeClassifier(), KNeighborsClassifier(), SVC(gamma="scale")]: for params in grid: BaggingClassifier(base_estimator=base_estimator, random_state=rng, **params).fit(X_train, y_train).predict(X_test)
Example #6
Source File: test_svm.py From m2cgen with MIT License | 6 votes |
def test_linear_kernel(): estimator = svm.SVC(kernel="linear", random_state=1) estimator.fit([[1], [2]], [1, 2]) assembler = assemblers.SklearnSVMModelAssembler(estimator) actual = assembler.assemble() def kernel_ast(sup_vec_value): return ast.BinNumExpr( ast.NumVal(sup_vec_value), ast.FeatureRef(0), ast.BinNumOpType.MUL) expected = _create_expected_single_output_ast( estimator.dual_coef_, estimator.intercept_, [kernel_ast(1.0), kernel_ast(2.0)]) assert utils.cmp_exprs(actual, expected)
Example #7
Source File: utils.py From m2cgen with MIT License | 6 votes |
def __call__(self, estimator): fitted_estimator = estimator.fit(self.X_train, self.y_train) if isinstance(estimator, (LinearClassifierMixin, SVC, NuSVC, LightBaseClassifier)): y_pred = estimator.decision_function(self.X_test) elif isinstance(estimator, DecisionTreeClassifier): y_pred = estimator.predict_proba(self.X_test.astype(np.float32)) elif isinstance( estimator, (ForestClassifier, XGBClassifier, LGBMClassifier)): y_pred = estimator.predict_proba(self.X_test) else: y_pred = estimator.predict(self.X_test) return self.X_test, y_pred, fitted_estimator
Example #8
Source File: more_data.py From WannaPark with GNU General Public License v3.0 | 6 votes |
def run_svms(): svm_training_data, svm_validation_data, svm_test_data \ = mnist_loader.load_data() accuracies = [] for size in SIZES: print "\n\nTraining SVM with data set size %s" % size clf = svm.SVC() clf.fit(svm_training_data[0][:size], svm_training_data[1][:size]) predictions = [int(a) for a in clf.predict(svm_validation_data[0])] accuracy = sum(int(a == y) for a, y in zip(predictions, svm_validation_data[1])) / 100.0 print "Accuracy was %s percent" % accuracy accuracies.append(accuracy) f = open("more_data_svm.json", "w") json.dump(accuracies, f) f.close()
Example #9
Source File: models.py From aletheia with MIT License | 6 votes |
def _prepare_classifier(self, params, n_jobs=1): X_train, y_train = params tuned_parameters = [{ 'kernel': ['rbf'], 'gamma': [1e-4,1e-3,1e-2,1e-1,1e+0,1e+1,1e+2,1e+3,1e+4], 'C': [1e+0,1e+1,1e+2,1e+3,1e+4,1e+5,1e+6,1e+7,1e+8,1e+9] }] clf=RandomizedSearchCV(svm.SVC(random_state=self.random_state), tuned_parameters[0], n_iter=self.n_randomized_search_iter, n_jobs=n_jobs, random_state=self.random_state) clf.fit(X_train, y_train) params=clf.best_params_ clf=svm.SVC(kernel=params['kernel'], C=params['C'], gamma=params['gamma'], probability=True, random_state=self.random_state) clf.fit(X_train, y_train) return clf
Example #10
Source File: GRAM.py From MKLpy with GNU General Public License v3.0 | 6 votes |
def __init__(self, learner=SVC(C=1000), multiclass_strategy='ova', verbose=False, max_iter=1000, learning_rate=0.01, tolerance=1e-7, callbacks=[], scheduler=None ): super().__init__( learner=learner, multiclass_strategy=multiclass_strategy, max_iter=max_iter, verbose=verbose, tolerance=tolerance, callbacks=callbacks, scheduler=scheduler, direction='min', learning_rate=learning_rate, ) self.func_form = summation
Example #11
Source File: SVM_scikit-learn.py From MachineLearning_Python with MIT License | 6 votes |
def SVM(): '''data1——线性分类''' data1 = spio.loadmat('data1.mat') X = data1['X'] y = data1['y'] y = np.ravel(y) plot_data(X, y) model = svm.SVC(C=1.0, kernel='linear').fit(X, y) # 指定核函数为线性核函数 plot_decisionBoundary(X, y, model) # 画决策边界 '''data2——非线性分类''' data2 = spio.loadmat('data2.mat') X = data2['X'] y = data2['y'] y = np.ravel(y) plt = plot_data(X, y) plt.show() model = svm.SVC(gamma=100).fit(X, y) # gamma为核函数的系数,值越大拟合的越好 plot_decisionBoundary(X, y, model, class_='notLinear') # 画决策边界 # 作图
Example #12
Source File: Sklearn_Classify_SVM.py From Machine-Learning-for-Beginner-by-Python3 with MIT License | 6 votes |
def sk_svm_train(intr, labeltr, inte, labelte, kener): clf = svm.SVC(kernel=kener) # 开始训练 clf.fit(intr, labeltr) # 绘图的标识 figsign = kener # 训练精确度 acc_train = clf.score(intr, labeltr) # 测试精确度 acc_test = clf.score(inte, labelte) # 支持向量的个数 vec_count = sum(clf.n_support_) # 支持向量 vectors = clf.support_vectors_ return acc_train, acc_test, vec_count, vectors, figsign # 结果输出函数
Example #13
Source File: annotation.py From scVI with MIT License | 6 votes |
def compute_accuracy_svc( data_train, labels_train, data_test, labels_test, param_grid=None, verbose=0, max_iter=-1, ): if param_grid is None: param_grid = [ {"C": [1, 10, 100, 1000], "kernel": ["linear"]}, {"C": [1, 10, 100, 1000], "gamma": [0.001, 0.0001], "kernel": ["rbf"]}, ] svc = SVC(max_iter=max_iter) clf = GridSearchCV(svc, param_grid, verbose=verbose, cv=3) return compute_accuracy_classifier( clf, data_train, labels_train, data_test, labels_test )
Example #14
Source File: maximum_margin_reduction.py From libact with BSD 2-Clause "Simplified" License | 6 votes |
def __init__(self, *args, **kwargs): super(MaximumLossReductionMaximalConfidence, self).__init__(*args, **kwargs) # self.n_labels = len(self.dataset.get_labeled_entries()[0][1]) self.n_labels = len(self.dataset.get_labeled_entries()[1][0]) random_state = kwargs.pop('random_state', None) self.random_state_ = seed_random_state(random_state) self.logreg_param = kwargs.pop('logreg_param', {'multi_class': 'multinomial', 'solver': 'newton-cg', 'random_state': random_state}) self.logistic_regression_ = LogisticRegression(**self.logreg_param) self.br_base = kwargs.pop('br_base', SklearnProbaAdapter(SVC(kernel='linear', probability=True, gamma="auto", random_state=random_state)))
Example #15
Source File: test_svm.py From libact with BSD 2-Clause "Simplified" License | 6 votes |
def test_svm(self): svc_clf = SVC(gamma="auto") svc_clf.fit(self.X_train, self.y_train) svm = SVM() svm.train(Dataset(self.X_train, self.y_train)) assert_array_equal( svc_clf.predict(self.X_train), svm.predict(self.X_train)) assert_array_equal( svc_clf.predict(self.X_test), svm.predict(self.X_test)) self.assertEqual( svc_clf.score(self.X_train, self.y_train), svm.score(Dataset(self.X_train, self.y_train))) self.assertEqual( svc_clf.score(self.X_test, self.y_test), svm.score(Dataset(self.X_test, self.y_test)))
Example #16
Source File: classifier.py From stock-price-prediction with MIT License | 6 votes |
def buildModel(dataset, method, parameters): """ Build final model for predicting real testing data """ features = dataset.columns[0:-1] if method == 'RNN': clf = performRNNlass(dataset[features], dataset['UpDown']) return clf elif method == 'RF': clf = RandomForestClassifier(n_estimators=1000, n_jobs=-1) elif method == 'KNN': clf = neighbors.KNeighborsClassifier() elif method == 'SVM': c = parameters[0] g = parameters[1] clf = SVC(C=c, gamma=g) elif method == 'ADA': clf = AdaBoostClassifier() return clf.fit(dataset[features], dataset['UpDown'])
Example #17
Source File: ros_svm.py From ROS-Programming-Building-Powerful-Robots with MIT License | 5 votes |
def train(self): """ This function will train the SVM """ self.clf = svm.SVC(kernel='linear', C = 1.0) self.clf.fit(self.x,self.y)
Example #18
Source File: svc.py From monasca-analytics with Apache License 2.0 | 5 votes |
def _get_best_detector(self, train, label): detector = svm.SVC(kernel='rbf') detector.fit(train, label) return detector
Example #19
Source File: test_svm.py From m2cgen with MIT License | 5 votes |
def test_sigmoid_kernel(): estimator = svm.SVC(kernel="sigmoid", random_state=1, gamma=2.0) estimator.fit([[1], [2]], [1, 2]) assembler = assemblers.SklearnSVMModelAssembler(estimator) actual = assembler.assemble() def kernel_ast(sup_vec_value): return ast.TanhExpr( ast.BinNumExpr( ast.BinNumExpr( ast.NumVal(estimator.gamma), ast.BinNumExpr( ast.NumVal(sup_vec_value), ast.FeatureRef(0), ast.BinNumOpType.MUL), ast.BinNumOpType.MUL), ast.NumVal(0.0), ast.BinNumOpType.ADD)) expected = _create_expected_single_output_ast( estimator.dual_coef_, estimator.intercept_, [kernel_ast(1.0), kernel_ast(2.0)]) assert utils.cmp_exprs(actual, expected)
Example #20
Source File: test_svm.py From m2cgen with MIT License | 5 votes |
def test_poly_kernel(): estimator = svm.SVC(kernel="poly", random_state=1, gamma=2.0, degree=2) estimator.fit([[1], [2]], [1, 2]) assembler = assemblers.SklearnSVMModelAssembler(estimator) actual = assembler.assemble() def kernel_ast(sup_vec_value): return ast.PowExpr( ast.BinNumExpr( ast.BinNumExpr( ast.NumVal(estimator.gamma), ast.BinNumExpr( ast.NumVal(sup_vec_value), ast.FeatureRef(0), ast.BinNumOpType.MUL), ast.BinNumOpType.MUL), ast.NumVal(0.0), ast.BinNumOpType.ADD), ast.NumVal(estimator.degree)) expected = _create_expected_single_output_ast( estimator.dual_coef_, estimator.intercept_, [kernel_ast(1.0), kernel_ast(2.0)]) assert utils.cmp_exprs(actual, expected)
Example #21
Source File: test_svm.py From m2cgen with MIT License | 5 votes |
def test_rbf_kernel(): estimator = svm.SVC(kernel="rbf", random_state=1, gamma=2.0) estimator.fit([[1], [2]], [1, 2]) assembler = assemblers.SklearnSVMModelAssembler(estimator) actual = assembler.assemble() kernels = [_rbf_kernel_ast(estimator, 1.), _rbf_kernel_ast(estimator, 2.)] expected = _create_expected_single_output_ast( estimator.dual_coef_, estimator.intercept_, kernels) assert utils.cmp_exprs(actual, expected)
Example #22
Source File: test_bagging.py From Mastering-Elasticsearch-7.0 with MIT License | 5 votes |
def test_oob_score_classification(): # Check that oob prediction is a good estimation of the generalization # error. rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, random_state=rng) for base_estimator in [DecisionTreeClassifier(), SVC(gamma="scale")]: clf = BaggingClassifier(base_estimator=base_estimator, n_estimators=100, bootstrap=True, oob_score=True, random_state=rng).fit(X_train, y_train) test_score = clf.score(X_test, y_test) assert_less(abs(test_score - clf.oob_score_), 0.1) # Test with few estimators assert_warns(UserWarning, BaggingClassifier(base_estimator=base_estimator, n_estimators=1, bootstrap=True, oob_score=True, random_state=rng).fit, X_train, y_train)
Example #23
Source File: test_stacking.py From PyShortTextCategorization with MIT License | 5 votes |
def training_stacking(self): # loading NIH Reports nihdict = {'NCCAM': self.nihdict['NCCAM'], 'NCATS': self.nihdict['NCATS']} # maxent maxent_classifier = shorttext.classifiers.MaxEntClassifier() maxent_classifier.train(nihdict, nb_epochs=100) maxent_classifier.save_compact_model('./bio_maxent.bin') # SVM + LDA topicmodeler = shorttext.generators.LDAModeler() topicmodeler.train(nihdict, 8) topicdisclassifier = shorttext.classifiers.TopicVectorCosineDistanceClassifier(topicmodeler) topicmodeler.save_compact_model('./bio_lda.bin') svm_classifier = shorttext.classifiers.TopicVectorSkLearnClassifier(topicmodeler, SVC()) svm_classifier.train(nihdict) svm_classifier.save_compact_model('./bio_svm.bin') # logistic stacked_classifier = LogisticStackedGeneralization({'maxent': maxent_classifier, 'svm': svm_classifier, 'topiccosine': topicdisclassifier}) stacked_classifier.train(nihdict) stacked_classifier.save_compact_model('./bio_logistics.bin') return maxent_classifier, topicmodeler, svm_classifier, stacked_classifier
Example #24
Source File: class_w2v.py From 2016CCF-sougou with Apache License 2.0 | 5 votes |
def __init__(self,size=300): random_rate = 8240 self.size=size self.svc= SVC(C=1, random_state=random_rate) self.LR = LogisticRegression(C=1.0, max_iter=100, class_weight='balanced', random_state=random_rate, n_jobs=-1) self.clf = LinearSVC(random_state=random_rate)
Example #25
Source File: ml.py From smrtsv2 with MIT License | 5 votes |
def __init__(self, predictor, scaler): """ Create an object for genotype predictions. :param predictor: Trained predictor. If string, then it must be a path to a `joblib` file containing the predictor object. :param scaler: Scaler fit to the data used for training. If string, then it must be a path to a `joblib` file containing the scaler object. """ # Check arguments if predictor is None: raise ValueError('Cannot load genotyper predictor `None`') if scaler is None: raise ValueError('Cannot load feature scaler `None`') if isinstance(predictor, str): predictor = joblib.load(predictor) if isinstance(scaler, str): scaler = joblib.load(scaler) if not isinstance(predictor, SVC): raise ValueError('Predictor must be class sklearn.svm.SVC: Found "{}"'.format(type(predictor))) if not isinstance(scaler, StandardScaler): raise ValueError( 'Scaler must be class sklearn.preprocessing.StandardScaler: Found "{}"'.format(type(scaler)) ) # Set fields self.predictor = predictor self.scaler = scaler
Example #26
Source File: classifier.py From stock-price-prediction with MIT License | 5 votes |
def performSVMClass(X_train, y_train, X_test, y_test, parameters, savemodel): """ SVM binary Classification """ c = parameters[0] g = parameters[1] clf = SVC(C=c, gamma=g) clf.fit(X_train, y_train) accuracy = clf.score(X_test, y_test) return accuracy
Example #27
Source File: test_weight_boosting.py From Mastering-Elasticsearch-7.0 with MIT License | 5 votes |
def test_base_estimator(): # Test different base estimators. from sklearn.ensemble import RandomForestClassifier # XXX doesn't work with y_class because RF doesn't support classes_ # Shouldn't AdaBoost run a LabelBinarizer? clf = AdaBoostClassifier(RandomForestClassifier()) clf.fit(X, y_regr) clf = AdaBoostClassifier(SVC(gamma="scale"), algorithm="SAMME") clf.fit(X, y_class) from sklearn.ensemble import RandomForestRegressor clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0) clf.fit(X, y_regr) clf = AdaBoostRegressor(SVR(gamma='scale'), random_state=0) clf.fit(X, y_regr) # Check that an empty discrete ensemble fails in fit, not predict. X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]] y_fail = ["foo", "bar", 1, 2] clf = AdaBoostClassifier(SVC(gamma="scale"), algorithm="SAMME") assert_raises_regexp(ValueError, "worse than random", clf.fit, X_fail, y_fail)
Example #28
Source File: audio_transfer_learning.py From sklearn-audio-transfer-learning with ISC License | 5 votes |
def define_classification_model(): """ Select and define the model you will use for the classifier. """ if config['model_type'] == 'linearSVM': # linearSVM can be faster than SVM return LinearSVC(C=1) elif config['model_type'] == 'SVM': # non-linearSVM, we can use the kernel trick return SVC(C=1, kernel='rbf', gamma='scale') elif config['model_type'] == 'kNN': # k-nearest neighbour return KNeighborsClassifier(n_neighbors=1, metric='cosine') elif config['model_type'] == 'perceptron': # otpimizes log-loss, also known as cross-entropy with sgd return SGDClassifier(max_iter=600, verbose=0.5, loss='log', learning_rate='optimal') elif config['model_type'] == 'MLP': # otpimizes log-loss, also known as cross-entropy with sgd return MLPClassifier(hidden_layer_sizes=(20,), max_iter=600, verbose=10, solver='sgd', learning_rate='constant', learning_rate_init=0.001)
Example #29
Source File: 07_magic.py From sacred with MIT License | 5 votes |
def get_model(C, gamma, kernel): return svm.SVC(C=C, kernel=kernel, gamma=gamma)
Example #30
Source File: test_voting.py From Mastering-Elasticsearch-7.0 with MIT License | 5 votes |
def test_multilabel(): """Check if error is raised for multilabel classification.""" X, y = make_multilabel_classification(n_classes=2, n_labels=1, allow_unlabeled=False, random_state=123) clf = OneVsRestClassifier(SVC(kernel='linear')) eclf = VotingClassifier(estimators=[('ovr', clf)], voting='hard') try: eclf.fit(X, y) except NotImplementedError: return