Python sklearn.svm.SVC Examples

The following are 30 code examples for showing how to use sklearn.svm.SVC(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module sklearn.svm , or try the search function .

Example 1
Project: Video-Highlight-Detection   Author: qijiezhao   File: classifier.py    License: MIT License 7 votes vote down vote up
def _build_model(self,model_name,params=None):
        if params==None:
            if model_name=='xgb':
                self.model=XGBClassifier(n_estimators=100,learning_rate=0.02)
            elif model_name=='svm':
                kernel_function=chi2_kernel if not (self.model_kernel=='linear' or self.model_kernel=='rbf') else self.model_kernel
                self.model=SVC(C=1,kernel=kernel_function,gamma=1,probability=True)
            elif model_name=='lr':
                self.model=LR(C=1,penalty='l1',tol=1e-6)
        else:
            if model_name=='xgb':
                self.model=XGBClassifier(n_estimators=1000,learning_rate=0.02,**params)
            elif model_name=='svm':
                self.model=SVC(C=1,kernel=kernel_function,gamma=1,probability=True)
            elif model_name=='lr':
                self.model=LR(C=1,penalty='l1',tol=1e-6)

        log.l.info('=======> built the model {} done'.format(self.model_name)) 
Example 2
Project: interpret-text   Author: interpretml   File: common_utils.py    License: MIT License 6 votes vote down vote up
def create_pandas_only_svm_classifier(X, y, probability=True):
    class PandasOnlyEstimator(TransformerMixin):
        def fit(self, X, y=None, **fitparams):
            return self

        def transform(self, X, **transformparams):
            dataset_is_df = isinstance(X, pd.DataFrame)
            if not dataset_is_df:
                raise Exception("Dataset must be a pandas dataframe!")
            return X

    pandas_only = PandasOnlyEstimator()

    clf = svm.SVC(gamma=0.001, C=100.0, probability=probability, random_state=777)
    pipeline = Pipeline([("pandas_only", pandas_only), ("clf", clf)])
    return pipeline.fit(X, y) 
Example 3
Project: edge2vec   Author: RoyZhengGao   File: multi_class_classification.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def multi_class_classification(data_X,data_Y):
    '''
    calculate multi-class classification and return related evaluation metrics
    '''

    svc = svm.SVC(C=1, kernel='linear')
    # X_train, X_test, y_train, y_test = train_test_split( data_X, data_Y, test_size=0.4, random_state=0) 
    clf = svc.fit(data_X, data_Y) #svm
    # array = svc.coef_
    # print array
    predicted = cross_val_predict(clf, data_X, data_Y, cv=2)
    print "accuracy",metrics.accuracy_score(data_Y, predicted)
    print "f1 score macro",metrics.f1_score(data_Y, predicted, average='macro') 
    print "f1 score micro",metrics.f1_score(data_Y, predicted, average='micro') 
    print "precision score",metrics.precision_score(data_Y, predicted, average='macro') 
    print "recall score",metrics.recall_score(data_Y, predicted, average='macro') 
    print "hamming_loss",metrics.hamming_loss(data_Y, predicted)
    print "classification_report", metrics.classification_report(data_Y, predicted)
    print "jaccard_similarity_score", metrics.jaccard_similarity_score(data_Y, predicted)
    # print "log_loss", metrics.log_loss(data_Y, predicted)
    print "zero_one_loss", metrics.zero_one_loss(data_Y, predicted)
    # print "AUC&ROC",metrics.roc_auc_score(data_Y, predicted)
    # print "matthews_corrcoef", metrics.matthews_corrcoef(data_Y, predicted) 
Example 4
Project: MKLpy   Author: IvanoLauriola   File: GRAM.py    License: GNU General Public License v3.0 6 votes vote down vote up
def __init__(self, 
        learner=SVC(C=1000), 
        multiclass_strategy='ova', 
        verbose=False,
        max_iter=1000, 
        learning_rate=0.01, 
        tolerance=1e-7, 
        callbacks=[], 
        scheduler=None ):

        super().__init__(
            learner=learner, 
            multiclass_strategy=multiclass_strategy, 
            max_iter=max_iter, 
            verbose=verbose, 
            tolerance=tolerance,
            callbacks=callbacks,
            scheduler=scheduler, 
            direction='min', 
            learning_rate=learning_rate, 
        )
        self.func_form = summation 
Example 5
Project: scVI   Author: YosefLab   File: annotation.py    License: MIT License 6 votes vote down vote up
def compute_accuracy_svc(
    data_train,
    labels_train,
    data_test,
    labels_test,
    param_grid=None,
    verbose=0,
    max_iter=-1,
):
    if param_grid is None:
        param_grid = [
            {"C": [1, 10, 100, 1000], "kernel": ["linear"]},
            {"C": [1, 10, 100, 1000], "gamma": [0.001, 0.0001], "kernel": ["rbf"]},
        ]
    svc = SVC(max_iter=max_iter)
    clf = GridSearchCV(svc, param_grid, verbose=verbose, cv=3)
    return compute_accuracy_classifier(
        clf, data_train, labels_train, data_test, labels_test
    ) 
Example 6
Project: libact   Author: ntucllab   File: maximum_margin_reduction.py    License: BSD 2-Clause "Simplified" License 6 votes vote down vote up
def __init__(self, *args, **kwargs):
        super(MaximumLossReductionMaximalConfidence, self).__init__(*args, **kwargs)

        # self.n_labels = len(self.dataset.get_labeled_entries()[0][1])
        self.n_labels = len(self.dataset.get_labeled_entries()[1][0])

        random_state = kwargs.pop('random_state', None)
        self.random_state_ = seed_random_state(random_state)

        self.logreg_param = kwargs.pop('logreg_param',
                                       {'multi_class': 'multinomial',
                                        'solver': 'newton-cg',
                                        'random_state': random_state})
        self.logistic_regression_ = LogisticRegression(**self.logreg_param)

        self.br_base = kwargs.pop('br_base',
              SklearnProbaAdapter(SVC(kernel='linear',
                                      probability=True,
                                      gamma="auto",
                                      random_state=random_state))) 
Example 7
Project: libact   Author: ntucllab   File: test_svm.py    License: BSD 2-Clause "Simplified" License 6 votes vote down vote up
def test_svm(self):
        svc_clf = SVC(gamma="auto")
        svc_clf.fit(self.X_train, self.y_train)
        svm = SVM()
        svm.train(Dataset(self.X_train, self.y_train))

        assert_array_equal(
            svc_clf.predict(self.X_train), svm.predict(self.X_train))
        assert_array_equal(
            svc_clf.predict(self.X_test), svm.predict(self.X_test))
        self.assertEqual(
            svc_clf.score(self.X_train, self.y_train),
            svm.score(Dataset(self.X_train, self.y_train)))
        self.assertEqual(
            svc_clf.score(self.X_test, self.y_test),
            svm.score(Dataset(self.X_test, self.y_test))) 
Example 8
Project: RoBO   Author: automl   File: example_fabolas.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def objective_function(x, s):
    # Start the clock to determine the cost of this function evaluation
    start_time = time.time()

    # Shuffle the data and split up the request subset of the training data
    s_max = y_train.shape[0]
    shuffle = np.random.permutation(np.arange(s_max))
    train_subset = X_train[shuffle[:s]]
    train_targets_subset = y_train[shuffle[:s]]

    # Train the SVM on the subset set
    C = np.exp(float(x[0]))
    gamma = np.exp(float(x[1]))
    clf = svm.SVC(gamma=gamma, C=C)
    clf.fit(train_subset, train_targets_subset)
    
    # Validate this hyperparameter configuration on the full validation data
    y = 1 - clf.score(X_val, y_val)

    c = time.time() - start_time

    return y, c

# Load the data 
Example 9
Project: stock-price-prediction   Author: chinuy   File: classifier.py    License: MIT License 6 votes vote down vote up
def buildModel(dataset, method, parameters):
    """
    Build final model for predicting real testing data
    """
    features = dataset.columns[0:-1]

    if method == 'RNN':
        clf = performRNNlass(dataset[features], dataset['UpDown'])
        return clf

    elif method == 'RF':
        clf = RandomForestClassifier(n_estimators=1000, n_jobs=-1)

    elif method == 'KNN':
        clf = neighbors.KNeighborsClassifier()

    elif method == 'SVM':
        c = parameters[0]
        g =  parameters[1]
        clf = SVC(C=c, gamma=g)

    elif method == 'ADA':
        clf = AdaBoostClassifier()

    return clf.fit(dataset[features], dataset['UpDown']) 
Example 10
def sk_svm_train(intr, labeltr, inte, labelte, kener):
    clf = svm.SVC(kernel=kener)
    # 开始训练
    clf.fit(intr, labeltr)
    #  绘图的标识
    figsign = kener
    #  训练精确度
    acc_train = clf.score(intr, labeltr)
    #  测试精确度
    acc_test = clf.score(inte, labelte)
    #  支持向量的个数
    vec_count = sum(clf.n_support_)
    #  支持向量
    vectors = clf.support_vectors_

    return acc_train, acc_test, vec_count, vectors, figsign


# 结果输出函数 
Example 11
Project: aletheia   Author: daniellerch   File: models.py    License: MIT License 6 votes vote down vote up
def _prepare_classifier(self, params, n_jobs=1):

        X_train, y_train = params

        tuned_parameters = [{
            'kernel': ['rbf'], 
            'gamma': [1e-4,1e-3,1e-2,1e-1,1e+0,1e+1,1e+2,1e+3,1e+4],
            'C': [1e+0,1e+1,1e+2,1e+3,1e+4,1e+5,1e+6,1e+7,1e+8,1e+9]
        }]

        clf=RandomizedSearchCV(svm.SVC(random_state=self.random_state), 
                               tuned_parameters[0], 
                               n_iter=self.n_randomized_search_iter, 
                               n_jobs=n_jobs, random_state=self.random_state)
        clf.fit(X_train, y_train)
              
        params=clf.best_params_
        clf=svm.SVC(kernel=params['kernel'], C=params['C'], 
            gamma=params['gamma'], probability=True, 
            random_state=self.random_state)
        clf.fit(X_train, y_train)

        return clf 
Example 12
Project: MachineLearning_Python   Author: lawlite19   File: SVM_scikit-learn.py    License: MIT License 6 votes vote down vote up
def SVM():
    '''data1——线性分类'''
    data1 = spio.loadmat('data1.mat')
    X = data1['X']
    y = data1['y']
    y = np.ravel(y)
    plot_data(X, y)

    model = svm.SVC(C=1.0, kernel='linear').fit(X, y)  # 指定核函数为线性核函数
    plot_decisionBoundary(X, y, model)  # 画决策边界
    '''data2——非线性分类'''
    data2 = spio.loadmat('data2.mat')
    X = data2['X']
    y = data2['y']
    y = np.ravel(y)
    plt = plot_data(X, y)
    plt.show()

    model = svm.SVC(gamma=100).fit(X, y)  # gamma为核函数的系数,值越大拟合的越好
    plot_decisionBoundary(X, y, model, class_='notLinear')  # 画决策边界


# 作图 
Example 13
Project: WannaPark   Author: dalmia   File: more_data.py    License: GNU General Public License v3.0 6 votes vote down vote up
def run_svms():
    svm_training_data, svm_validation_data, svm_test_data \
        = mnist_loader.load_data()
    accuracies = []
    for size in SIZES:
        print "\n\nTraining SVM with data set size %s" % size
        clf = svm.SVC()
        clf.fit(svm_training_data[0][:size], svm_training_data[1][:size])
        predictions = [int(a) for a in clf.predict(svm_validation_data[0])]
        accuracy = sum(int(a == y) for a, y in 
                       zip(predictions, svm_validation_data[1])) / 100.0
        print "Accuracy was %s percent" % accuracy
        accuracies.append(accuracy)
    f = open("more_data_svm.json", "w")
    json.dump(accuracies, f)
    f.close() 
Example 14
Project: m2cgen   Author: BayesWitnesses   File: utils.py    License: MIT License 6 votes vote down vote up
def __call__(self, estimator):
        fitted_estimator = estimator.fit(self.X_train, self.y_train)

        if isinstance(estimator, (LinearClassifierMixin, SVC, NuSVC,
                                  LightBaseClassifier)):
            y_pred = estimator.decision_function(self.X_test)
        elif isinstance(estimator, DecisionTreeClassifier):
            y_pred = estimator.predict_proba(self.X_test.astype(np.float32))
        elif isinstance(
                estimator,
                (ForestClassifier, XGBClassifier, LGBMClassifier)):
            y_pred = estimator.predict_proba(self.X_test)
        else:
            y_pred = estimator.predict(self.X_test)

        return self.X_test, y_pred, fitted_estimator 
Example 15
Project: m2cgen   Author: BayesWitnesses   File: test_svm.py    License: MIT License 6 votes vote down vote up
def test_linear_kernel():
    estimator = svm.SVC(kernel="linear", random_state=1)

    estimator.fit([[1], [2]], [1, 2])

    assembler = assemblers.SklearnSVMModelAssembler(estimator)
    actual = assembler.assemble()

    def kernel_ast(sup_vec_value):
        return ast.BinNumExpr(
            ast.NumVal(sup_vec_value),
            ast.FeatureRef(0),
            ast.BinNumOpType.MUL)

    expected = _create_expected_single_output_ast(
        estimator.dual_coef_, estimator.intercept_,
        [kernel_ast(1.0), kernel_ast(2.0)])

    assert utils.cmp_exprs(actual, expected) 
Example 16
Project: Mastering-Elasticsearch-7.0   Author: PacktPublishing   File: test_bagging.py    License: MIT License 6 votes vote down vote up
def test_classification():
    # Check classification for various parameter settings.
    rng = check_random_state(0)
    X_train, X_test, y_train, y_test = train_test_split(iris.data,
                                                        iris.target,
                                                        random_state=rng)
    grid = ParameterGrid({"max_samples": [0.5, 1.0],
                          "max_features": [1, 2, 4],
                          "bootstrap": [True, False],
                          "bootstrap_features": [True, False]})

    for base_estimator in [None,
                           DummyClassifier(),
                           Perceptron(tol=1e-3),
                           DecisionTreeClassifier(),
                           KNeighborsClassifier(),
                           SVC(gamma="scale")]:
        for params in grid:
            BaggingClassifier(base_estimator=base_estimator,
                              random_state=rng,
                              **params).fit(X_train, y_train).predict(X_test) 
Example 17
Project: sklearn-audio-transfer-learning   Author: jordipons   File: audio_transfer_learning.py    License: ISC License 5 votes vote down vote up
def define_classification_model():
    """ Select and define the model you will use for the classifier. 
    """
    if config['model_type'] == 'linearSVM': # linearSVM can be faster than SVM
        return LinearSVC(C=1)
    elif config['model_type'] == 'SVM': # non-linearSVM, we can use the kernel trick
        return SVC(C=1, kernel='rbf', gamma='scale')
    elif config['model_type'] == 'kNN': # k-nearest neighbour
        return KNeighborsClassifier(n_neighbors=1, metric='cosine')
    elif config['model_type'] == 'perceptron': # otpimizes log-loss, also known as cross-entropy with sgd
        return SGDClassifier(max_iter=600, verbose=0.5, loss='log', learning_rate='optimal')
    elif config['model_type'] == 'MLP': # otpimizes log-loss, also known as cross-entropy with sgd
        return MLPClassifier(hidden_layer_sizes=(20,), max_iter=600, verbose=10, 
               solver='sgd', learning_rate='constant', learning_rate_init=0.001) 
Example 18
Project: transferlearning   Author: jindongwang   File: SFA.py    License: MIT License 5 votes vote down vote up
def __init__(self, l=500, K=100, base_classifer=svm.SVC()):
        self.l = l
        self.K = K
        self.m = 0
        self.ut = None
        self.phi = 1
        self.base_classifer = base_classifer
        self.ix = None
        self._ix = None
        return 
Example 19
Project: transferlearning   Author: jindongwang   File: proxy_a_distance.py    License: MIT License 5 votes vote down vote up
def proxy_a_distance(source_X, target_X, verbose=False):
    """
    Compute the Proxy-A-Distance of a source/target representation
    """
    nb_source = np.shape(source_X)[0]
    nb_target = np.shape(target_X)[0]

    if verbose:
        print('PAD on', (nb_source, nb_target), 'examples')

    C_list = np.logspace(-5, 4, 10)

    half_source, half_target = int(nb_source/2), int(nb_target/2)
    train_X = np.vstack((source_X[0:half_source, :], target_X[0:half_target, :]))
    train_Y = np.hstack((np.zeros(half_source, dtype=int), np.ones(half_target, dtype=int)))

    test_X = np.vstack((source_X[half_source:, :], target_X[half_target:, :]))
    test_Y = np.hstack((np.zeros(nb_source - half_source, dtype=int), np.ones(nb_target - half_target, dtype=int)))

    best_risk = 1.0
    for C in C_list:
        clf = svm.SVC(C=C, kernel='linear', verbose=False)
        clf.fit(train_X, train_Y)

        train_risk = np.mean(clf.predict(train_X) != train_Y)
        test_risk = np.mean(clf.predict(test_X) != test_Y)

        if verbose:
            print('[ PAD C = %f ] train risk: %f  test risk: %f' % (C, train_risk, test_risk))

        if test_risk > .5:
            test_risk = 1. - test_risk

        best_risk = min(best_risk, test_risk)

    return 2 * (1. - 2 * best_risk) 
Example 20
Project: interpret-text   Author: interpretml   File: common_utils.py    License: MIT License 5 votes vote down vote up
def create_sklearn_svm_classifier(X, y, probability=True):
    clf = svm.SVC(gamma=0.001, C=100.0, probability=probability, random_state=777)
    model = clf.fit(X, y)
    return model 
Example 21
Project: razzy-spinner   Author: rafasashi   File: transitionparser.py    License: GNU General Public License v3.0 5 votes vote down vote up
def train(self, depgraphs, modelfile):
        """
        :param depgraphs : list of DependencyGraph as the training data
        :type depgraphs : DependencyGraph
        :param modelfile : file name to save the trained model
        :type modelfile : str
        """

        try:
            input_file = tempfile.NamedTemporaryFile(
                prefix='transition_parse.train',
                dir=tempfile.gettempdir(),
                delete=False)

            if self._algorithm == self.ARC_STANDARD:
                self._create_training_examples_arc_std(depgraphs, input_file)
            else:
                self._create_training_examples_arc_eager(depgraphs, input_file)

            input_file.close()
            # Using the temporary file to train the libsvm classifier
            x_train, y_train = load_svmlight_file(input_file.name)
            # The parameter is set according to the paper:
            # Algorithms for Deterministic Incremental Dependency Parsing by Joakim Nivre
            # Todo : because of probability = True => very slow due to
            # cross-validation. Need to improve the speed here
            model = svm.SVC(
                kernel='poly',
                degree=2,
                coef0=0,
                gamma=0.2,
                C=0.5,
                verbose=True,
                probability=True)

            model.fit(x_train, y_train)
            # Save the model to file name (as pickle)
            pickle.dump(model, open(modelfile, 'wb'))
        finally:
            remove(input_file.name) 
Example 22
Project: ConvLab   Author: ConvLab   File: Classifier.py    License: MIT License 5 votes vote down vote up
def trainSVMwrapper(X,y):
    model = svm.SVC(kernel='linear', C=1)
    model.probability = True
    # model.class_weight = 'auto'
    model.fit(X, y)
    return model 
Example 23
Project: ConvLab   Author: ConvLab   File: Classifier.py    License: MIT License 5 votes vote down vote up
def pickC(self, X, y):
        Cs =      [1, 0.1, 5, 10, 50] # 1 goes first as it should be preferred
        scores = []
        n = X.shape[0]
        dev_index = max([int(n*0.8), 1+y.index(1)])
        max_score = 0.0
        self.C = Cs[0]
        print("Warning, not picking C from validation")
        return
        for i, C in enumerate(Cs) :
            this_model = svm.sparse.SVC(C=C, kernel='linear')
            this_model.probability = False
            this_model.class_weight = 'auto'
            
            this_model.fit(X[:dev_index,:],y[:dev_index])
            pred = this_model.predict(X)
            train_correct = 0.0
            dev_correct = 0.0
            for j, y_j in enumerate(y):
                if j < dev_index :
                    train_correct += int(y_j == pred[j])
                else :
                    dev_correct += int(y_j == pred[j])
            train_acc = train_correct/dev_index
            dev_acc = dev_correct/(n-dev_index)
            score = (0.1*train_acc + 0.9*dev_acc)
            print("\tfor C=%.2f;\n\t\t train_acc=%.4f, dev_acc=%.4f, score=%.4f" % (C, train_acc, dev_acc, score))
            if score > max_score :
                max_score = score
                self.C = C
            if score == 1.0 :
                break
        print("Selected C=%.2f"%self.C) 
Example 24
Project: ConvLab   Author: ConvLab   File: Classifier.py    License: MIT License 5 votes vote down vote up
def train(self, X, y):
        # print('train')
        # print(X[0])
        # print(type(X[0]))
        # print(numpy.shape(X))
        # print(y[0])
        self.pickC(X, y)
        #model = svm.sparse.SVC(kernel='linear', C=self.C)
        model = svm.SVC(kernel='linear', C=self.C)
        model.probability=True
        # model.class_weight = 'auto'
        model.fit(X,y)
        self.model = model 
Example 25
Project: me-ica   Author: ME-ICA   File: select_model_fft20e.py    License: GNU Lesser General Public License v2.1 5 votes vote down vote up
def do_svm(train_set,train_labs,test_set,svmtype=0):
	if svmtype==2: probability=True
	else: probability = False
	clf = svm.SVC(kernel='linear',probability=probability)
	if svmtype==1: clf = svm.LinearSVC(loss='squared_hinge',penalty='l1',dual=False)
	clf.fit(train_set,train_labs)
	return clf.predict(test_set),clf 
Example 26
Project: me-ica   Author: ME-ICA   File: select_model_fft20d.py    License: GNU Lesser General Public License v2.1 5 votes vote down vote up
def do_svm(train_set,train_labs,test_set,svmtype=0):
	if svmtype==2: probability=True
	else: probability = False
	clf = svm.SVC(kernel='linear',probability=probability)
	if svmtype==1: clf = svm.LinearSVC(loss='squared_hinge',penalty='l1',dual=False)
	clf.fit(train_set,train_labs)
	return clf.predict(test_set),clf 
Example 27
Project: fake-news-detection   Author: aldengolab   File: model_loop.py    License: MIT License 5 votes vote down vote up
def define_clfs_params(self):
        '''
        Defines all relevant parameters and classes for classfier objects.
        Edit these if you wish to change parameters.
        '''
        # These are the classifiers
        self.clfs = {
            'RF': RandomForestClassifier(n_estimators = 50, n_jobs = -1),
            'ET': ExtraTreesClassifier(n_estimators = 10, n_jobs = -1, criterion = 'entropy'),
            'AB': AdaBoostClassifier(DecisionTreeClassifier(max_depth = [1, 5, 10, 15]), algorithm = "SAMME", n_estimators = 200),
            'LR': LogisticRegression(penalty = 'l1', C = 1e5),
            'SVM': svm.SVC(kernel = 'linear', probability = True, random_state = 0),
            'GB': GradientBoostingClassifier(learning_rate = 0.05, subsample = 0.5, max_depth = 6, n_estimators = 10),
            'NB': GaussianNB(),
            'DT': DecisionTreeClassifier(),
            'SGD': SGDClassifier(loss = 'log', penalty = 'l2'),
            'KNN': KNeighborsClassifier(n_neighbors = 3)
            }
        # These are the parameters which will be run through
        self.params = {
             'RF':{'n_estimators': [1,10,100,1000], 'max_depth': [10, 15,20,30,40,50,60,70,100], 'max_features': ['sqrt','log2'],'min_samples_split': [2,5,10], 'random_state': [1]},
             'LR': {'penalty': ['l1','l2'], 'C': [0.00001,0.0001,0.001,0.01,0.1,1,10], 'random_state': [1]},
             'SGD': {'loss': ['log'], 'penalty': ['l2','l1','elasticnet'], 'random_state': [1]},
             'ET': {'n_estimators': [1,10,100,1000], 'criterion' : ['gini', 'entropy'], 'max_depth': [1,3,5,10,15], 'max_features': ['sqrt','log2'],'min_samples_split': [2,5,10], 'random_state': [1]},
             'AB': {'algorithm': ['SAMME', 'SAMME.R'], 'n_estimators': [1,10,100,1000], 'random_state': [1]},
             'GB': {'n_estimators': [1,10,100,1000], 'learning_rate' : [0.001,0.01,0.05,0.1,0.5],'subsample' : [0.1,0.5,1.0], 'max_depth': [1,3,5,10,20,50,100], 'random_state': [1]},
             'NB': {},
             'DT': {'criterion': ['gini', 'entropy'], 'max_depth': [1,2,15,20,30,40,50], 'max_features': ['sqrt','log2'],'min_samples_split': [2,5,10], 'random_state': [1]},
             'SVM' :{'C' :[0.00001,0.0001,0.001,0.01,0.1,1,10],'kernel':['linear'], 'random_state': [1]},
             'KNN' :{'n_neighbors': [1,5,10,25,50,100],'weights': ['uniform','distance'],'algorithm': ['auto','ball_tree','kd_tree']}
             } 
Example 28
Project: OpenChem   Author: Mariewelt   File: vanilla_model.py    License: MIT License 5 votes vote down vote up
def __init__(self, model_type='classifier', n_ensemble=5):
        super(SVMQSAR, self).__init__()
        self.n_ensemble = n_ensemble
        self.model = []
        self.model_type = model_type
        if self.model_type == 'classifier':
            for i in range(n_ensemble):
                self.model.append(SVC())
        elif self.model_type == 'regressor':
            for i in range(n_ensemble):
                self.model.append(SVR())
        else:
            raise ValueError('invalid value for argument') 
Example 29
Project: proxy-a-distance   Author: rpryzant   File: model.py    License: MIT License 5 votes vote down vote up
def train_on_batch(self, domains, x, x_lens, y, y_lens, c=3000):
        """ Train svm on some data
        """
        self.model = svm.SVC(C=c, probability=True, verbose=2)
        examples = self.prepare_examples(x, x_lens, y, y_lens)
        labels = self.prepare_labels(domains)
        self.model.fit(examples, labels) 
Example 30
Project: MKLpy   Author: IvanoLauriola   File: MEMO.py    License: GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, learner=SVC(C=1000), multiclass_strategy='ova', verbose=False,
				theta=0.0, max_iter=1000, learning_rate=0.01, callbacks=[]):
		super().__init__(learner=learner, generator=generator, multiclass_strategy=multiclass_strategy,	
			max_iter=max_iter, verbose=verbose, callbacks=callbacks)
		self.theta = theta
		self.func_form = summation
		print ('warning: MEMO needs refactoring and parameters chehck, please contact the author if you want to use MEMO')