org.apache.spark.ml.classification.LogisticRegressionModel Java Examples

The following examples show how to use org.apache.spark.ml.classification.LogisticRegressionModel. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: JavaMulticlassLogisticRegressionWithElasticNetExample.java    From SparkDemo with MIT License 6 votes vote down vote up
public static void main(String[] args) {
    SparkSession spark = SparkSession
            .builder()
            .appName("JavaMulticlassLogisticRegressionWithElasticNetExample")
            .getOrCreate();

    // $example on$
    // Load training data
    Dataset<Row> training = spark.read().format("libsvm")
            .load("data/mllib/sample_multiclass_classification_data.txt");

    LogisticRegression lr = new LogisticRegression()
            .setMaxIter(10)
            .setRegParam(0.3)
            .setElasticNetParam(0.8);

    // Fit the model
    LogisticRegressionModel lrModel = lr.fit(training);

    // Print the coefficients and intercept for multinomial logistic regression
    System.out.println("Coefficients: \n"
            + lrModel.coefficientMatrix() + " \nIntercept: " + lrModel.interceptVector());
    // $example off$

    spark.stop();
}
 
Example #2
Source File: LogisticRegressionModelInfoAdapter1.java    From spark-transformers with Apache License 2.0 6 votes vote down vote up
@Override
public LogisticRegressionModelInfo getModelInfo(final LogisticRegressionModel sparkLRModel) {
    final LogisticRegressionModelInfo logisticRegressionModelInfo = new LogisticRegressionModelInfo();
    logisticRegressionModelInfo.setWeights(sparkLRModel.coefficients().toArray());
    logisticRegressionModelInfo.setIntercept(sparkLRModel.intercept());
    logisticRegressionModelInfo.setNumClasses(sparkLRModel.numClasses());
    logisticRegressionModelInfo.setNumFeatures(sparkLRModel.numFeatures());
    logisticRegressionModelInfo.setThreshold(sparkLRModel.getThreshold());

    Set<String> inputKeys = new LinkedHashSet<String>();
    inputKeys.add(sparkLRModel.getFeaturesCol());
    logisticRegressionModelInfo.setInputKeys(inputKeys);

    Set<String> outputKeys = new LinkedHashSet<String>();
    outputKeys.add(sparkLRModel.getPredictionCol());
    outputKeys.add(sparkLRModel.getProbabilityCol());
    logisticRegressionModelInfo.setOutputKeys(outputKeys);

    return logisticRegressionModelInfo;
}
 
Example #3
Source File: LogisticRegressionModelInfoAdapter1.java    From spark-transformers with Apache License 2.0 6 votes vote down vote up
@Override
public LogisticRegressionModelInfo getModelInfo(final LogisticRegressionModel sparkLRModel, DataFrame df) {
    final LogisticRegressionModelInfo logisticRegressionModelInfo = new LogisticRegressionModelInfo();
    logisticRegressionModelInfo.setWeights(sparkLRModel.coefficients().toArray());
    logisticRegressionModelInfo.setIntercept(sparkLRModel.intercept());
    logisticRegressionModelInfo.setNumClasses(sparkLRModel.numClasses());
    logisticRegressionModelInfo.setNumFeatures(sparkLRModel.numFeatures());
    logisticRegressionModelInfo.setThreshold(sparkLRModel.getThreshold());
    logisticRegressionModelInfo.setProbabilityKey(sparkLRModel.getProbabilityCol());

    Set<String> inputKeys = new LinkedHashSet<String>();
    inputKeys.add(sparkLRModel.getFeaturesCol());
    logisticRegressionModelInfo.setInputKeys(inputKeys);

    Set<String> outputKeys = new LinkedHashSet<String>();
    outputKeys.add(sparkLRModel.getPredictionCol());
    outputKeys.add(sparkLRModel.getProbabilityCol());
    logisticRegressionModelInfo.setOutputKeys(outputKeys);

    return logisticRegressionModelInfo;
}
 
Example #4
Source File: JavaLogisticRegressionWithElasticNetExample.java    From SparkDemo with MIT License 5 votes vote down vote up
public static void main(String[] args) {
  SparkSession spark = SparkSession
    .builder()
    .appName("JavaLogisticRegressionWithElasticNetExample")
    .getOrCreate();

  // $example on$
  // Load training data
  Dataset<Row> training = spark.read().format("libsvm")
    .load("data/mllib/sample_libsvm_data.txt");

  LogisticRegression lr = new LogisticRegression()
    .setMaxIter(10)
    .setRegParam(0.3)
    .setElasticNetParam(0.8);

  // Fit the model
  LogisticRegressionModel lrModel = lr.fit(training);

  // Print the coefficients and intercept for logistic regression
  System.out.println("Coefficients: "
    + lrModel.coefficients() + " Intercept: " + lrModel.intercept());

  // We can also use the multinomial family for binary classification
  LogisticRegression mlr = new LogisticRegression()
          .setMaxIter(10)
          .setRegParam(0.3)
          .setElasticNetParam(0.8)
          .setFamily("multinomial");

  // Fit the model
  LogisticRegressionModel mlrModel = mlr.fit(training);

  // Print the coefficients and intercepts for logistic regression with multinomial family
  System.out.println("Multinomial coefficients: " + lrModel.coefficientMatrix()
    + "\nMultinomial intercepts: " + mlrModel.interceptVector());
  // $example off$

  spark.stop();
}
 
Example #5
Source File: WhitespaceClassifier.java    From vn.vitk with GNU General Public License v3.0 5 votes vote down vote up
public void printModel() {
	LogisticRegressionModel lrModel = (LogisticRegressionModel) model.stages()[2];
	System.out.println("intercept = " + lrModel.intercept());
	System.out.println("number of features = " + lrModel.numFeatures());
	System.out.println("regularization parameter = " + lrModel.getRegParam());
	System.out.println(lrModel.explainParams());
}
 
Example #6
Source File: LogisticRegression1BridgeTest.java    From spark-transformers with Apache License 2.0 5 votes vote down vote up
@Test
public void testLogisticRegression() {
    //prepare data
    String datapath = "src/test/resources/binary_classification_test.libsvm";

    Dataset<Row> trainingData = spark.read().format("libsvm").load(datapath);

    //Train model in spark
    LogisticRegressionModel lrmodel = new LogisticRegression().fit(trainingData);

    //Export this model
    byte[] exportedModel = ModelExporter.export(lrmodel);

    //Import and get Transformer
    Transformer transformer = ModelImporter.importAndGetTransformer(exportedModel);

    //validate predictions
    List<LabeledPoint> testPoints = MLUtils.loadLibSVMFile(jsc.sc(), datapath).toJavaRDD().collect();
    for (LabeledPoint i : testPoints) {
        Vector v = i.features().asML();
        double actual = lrmodel.predict(v);

        Map<String, Object> data = new HashMap<String, Object>();
        data.put("features", v.toArray());
        transformer.transform(data);
        double predicted = (double) data.get("prediction");

        assertEquals(actual, predicted, 0.01);
    }
}
 
Example #7
Source File: LogisticRegression1ExporterTest.java    From spark-transformers with Apache License 2.0 5 votes vote down vote up
@Test
public void shouldExportAndImportCorrectly() {
    //prepare data
    String datapath = "src/test/resources/binary_classification_test.libsvm";

    Dataset<Row> trainingData = spark.read().format("libsvm").load(datapath);

    //Train model in spark
    LogisticRegressionModel lrmodel = new LogisticRegression().fit(trainingData);

    //Export this model
    byte[] exportedModel = ModelExporter.export(lrmodel);

    //Import it back
    LogisticRegressionModelInfo importedModel = (LogisticRegressionModelInfo) ModelImporter.importModelInfo(exportedModel);

    //check if they are exactly equal with respect to their fields
    //it maybe edge cases eg. order of elements in the list is changed
    assertEquals(lrmodel.intercept(), importedModel.getIntercept(), 0.01);
    assertEquals(lrmodel.numClasses(), importedModel.getNumClasses(), 0.01);
    assertEquals(lrmodel.numFeatures(), importedModel.getNumFeatures(), 0.01);
    assertEquals(lrmodel.getThreshold(), importedModel.getThreshold(), 0.01);
    for (int i = 0; i < importedModel.getNumFeatures(); i++)
        assertEquals(lrmodel.coefficients().toArray()[i], importedModel.getWeights()[i], 0.01);

    assertEquals(lrmodel.getFeaturesCol(), importedModel.getInputKeys().iterator().next());
    assertEquals(lrmodel.getPredictionCol(), importedModel.getOutputKeys().iterator().next());
}
 
Example #8
Source File: LogisticRegression1BridgeTest.java    From spark-transformers with Apache License 2.0 5 votes vote down vote up
@Test
public void testLogisticRegression() {
    //prepare data
    String datapath = "src/test/resources/binary_classification_test.libsvm";

    DataFrame trainingData = sqlContext.read().format("libsvm").load(datapath);

    //Train model in spark
    LogisticRegressionModel lrmodel = new LogisticRegression().fit(trainingData);

    //Export this model
    byte[] exportedModel = ModelExporter.export(lrmodel, trainingData);

    //Import and get Transformer
    Transformer transformer = ModelImporter.importAndGetTransformer(exportedModel);

    //validate predictions
    List<LabeledPoint> testPoints = MLUtils.loadLibSVMFile(sc.sc(), datapath).toJavaRDD().collect();
    for (LabeledPoint i : testPoints) {
        Vector v = i.features();
        double actual = lrmodel.predict(v);

        Map<String, Object> data = new HashMap<String, Object>();
        data.put("features", v.toArray());
        transformer.transform(data);
        double predicted = (double) data.get("prediction");

        assertEquals(actual, predicted, EPSILON);
    }
}
 
Example #9
Source File: LogisticRegression1ExporterTest.java    From spark-transformers with Apache License 2.0 5 votes vote down vote up
@Test
public void shouldExportAndImportCorrectly() {
    //prepare data
    String datapath = "src/test/resources/binary_classification_test.libsvm";

    DataFrame trainingData = sqlContext.read().format("libsvm").load(datapath);

    //Train model in spark
    LogisticRegressionModel lrmodel = new LogisticRegression().fit(trainingData);

    //Export this model
    byte[] exportedModel = ModelExporter.export(lrmodel, trainingData);

    //Import it back
    LogisticRegressionModelInfo importedModel = (LogisticRegressionModelInfo) ModelImporter.importModelInfo(exportedModel);

    //check if they are exactly equal with respect to their fields
    //it maybe edge cases eg. order of elements in the list is changed
    assertEquals(lrmodel.intercept(), importedModel.getIntercept(), EPSILON);
    assertEquals(lrmodel.numClasses(), importedModel.getNumClasses(), EPSILON);
    assertEquals(lrmodel.numFeatures(), importedModel.getNumFeatures(), EPSILON);
    assertEquals(lrmodel.getThreshold(), importedModel.getThreshold(), EPSILON);
    for (int i = 0; i < importedModel.getNumFeatures(); i++)
        assertEquals(lrmodel.weights().toArray()[i], importedModel.getWeights()[i], EPSILON);

    assertEquals(lrmodel.getFeaturesCol(), importedModel.getInputKeys().iterator().next());
    assertEquals(lrmodel.getPredictionCol(), importedModel.getOutputKeys().iterator().next());
}
 
Example #10
Source File: JavaEstimatorTransformerParamExample.java    From Apache-Spark-2x-for-Java-Developers with MIT License 4 votes vote down vote up
public static void main(String[] args) {
   SparkSession spark = SparkSession
     .builder().master("local").config("spark.sql.warehouse.dir", "file:///C:/Users/sumit.kumar/Downloads/bin/warehouse")
     .appName("JavaEstimatorTransformerParamExample")
     .getOrCreate();
   Logger rootLogger = LogManager.getRootLogger();
rootLogger.setLevel(Level.WARN);
   // $example on$
   // Prepare training data.
   List<Row> dataTraining = Arrays.asList(
       RowFactory.create(1.0, Vectors.dense(0.0, 1.1, 0.1)),
       RowFactory.create(0.0, Vectors.dense(2.0, 1.0, -1.0)),
       RowFactory.create(0.0, Vectors.dense(2.0, 1.3, 1.0)),
       RowFactory.create(1.0, Vectors.dense(0.0, 1.2, -0.5))
   );
   StructType schema = new StructType(new StructField[]{
       new StructField("label", DataTypes.DoubleType, false, Metadata.empty()),
       new StructField("features", new VectorUDT(), false, Metadata.empty())
   });
   Dataset<Row> training = spark.createDataFrame(dataTraining, schema);

   // Create a LogisticRegression instance. This instance is an Estimator.
   LogisticRegression lr = new LogisticRegression();
   // Print out the parameters, documentation, and any default values.
   System.out.println("LogisticRegression parameters:\n" + lr.explainParams() + "\n");

   // We may set parameters using setter methods.
   lr.setMaxIter(10).setRegParam(0.01);

   // Learn a LogisticRegression model. This uses the parameters stored in lr.
   LogisticRegressionModel model1 = lr.fit(training);
   // Since model1 is a Model (i.e., a Transformer produced by an Estimator),
   // we can view the parameters it used during fit().
   // This prints the parameter (name: value) pairs, where names are unique IDs for this
   // LogisticRegression instance.
   System.out.println("Model 1 was fit using parameters: " + model1.parent().extractParamMap());

   // We may alternatively specify parameters using a ParamMap.
   ParamMap paramMap = new ParamMap()
     .put(lr.maxIter().w(20))  // Specify 1 Param.
     .put(lr.maxIter(), 30)  // This overwrites the original maxIter.
     .put(lr.regParam().w(0.1), lr.threshold().w(0.55));  // Specify multiple Params.

   // One can also combine ParamMaps.
   ParamMap paramMap2 = new ParamMap()
     .put(lr.probabilityCol().w("myProbability"));  // Change output column name
   ParamMap paramMapCombined = paramMap.$plus$plus(paramMap2);

   // Now learn a new model using the paramMapCombined parameters.
   // paramMapCombined overrides all parameters set earlier via lr.set* methods.
   LogisticRegressionModel model2 = lr.fit(training, paramMapCombined);
   System.out.println("Model 2 was fit using parameters: " + model2.parent().extractParamMap());

   // Prepare test documents.
   List<Row> dataTest = Arrays.asList(
       RowFactory.create(1.0, Vectors.dense(-1.0, 1.5, 1.3)),
       RowFactory.create(0.0, Vectors.dense(3.0, 2.0, -0.1)),
       RowFactory.create(1.0, Vectors.dense(0.0, 2.2, -1.5))
   );
   Dataset<Row> test = spark.createDataFrame(dataTest, schema);

   // Make predictions on test documents using the Transformer.transform() method.
   // LogisticRegression.transform will only use the 'features' column.
   // Note that model2.transform() outputs a 'myProbability' column instead of the usual
   // 'probability' column since we renamed the lr.probabilityCol parameter previously.
   Dataset<Row> results = model2.transform(test);
   Dataset<Row> rows = results.select("features", "label", "myProbability", "prediction");
   for (Row r: rows.collectAsList()) {
     System.out.println("(" + r.get(0) + ", " + r.get(1) + ") -> prob=" + r.get(2)
       + ", prediction=" + r.get(3));
   }
   // $example off$

   spark.stop();
 }
 
Example #11
Source File: JavaEstimatorTransformerParamExample.java    From SparkDemo with MIT License 4 votes vote down vote up
public static void main(String[] args) {
  SparkSession spark = SparkSession
    .builder()
    .appName("JavaEstimatorTransformerParamExample")
    .getOrCreate();

  // $example on$
  // Prepare training data.
  List<Row> dataTraining = Arrays.asList(
      RowFactory.create(1.0, Vectors.dense(0.0, 1.1, 0.1)),
      RowFactory.create(0.0, Vectors.dense(2.0, 1.0, -1.0)),
      RowFactory.create(0.0, Vectors.dense(2.0, 1.3, 1.0)),
      RowFactory.create(1.0, Vectors.dense(0.0, 1.2, -0.5))
  );
  StructType schema = new StructType(new StructField[]{
      new StructField("label", DataTypes.DoubleType, false, Metadata.empty()),
      new StructField("features", new VectorUDT(), false, Metadata.empty())
  });
  Dataset<Row> training = spark.createDataFrame(dataTraining, schema);

  // Create a LogisticRegression instance. This instance is an Estimator.
  LogisticRegression lr = new LogisticRegression();
  // Print out the parameters, documentation, and any default values.
  System.out.println("LogisticRegression parameters:\n" + lr.explainParams() + "\n");

  // We may set parameters using setter methods.
  lr.setMaxIter(10).setRegParam(0.01);

  // Learn a LogisticRegression model. This uses the parameters stored in lr.
  LogisticRegressionModel model1 = lr.fit(training);
  // Since model1 is a Model (i.e., a Transformer produced by an Estimator),
  // we can view the parameters it used during fit().
  // This prints the parameter (name: value) pairs, where names are unique IDs for this
  // LogisticRegression instance.
  System.out.println("Model 1 was fit using parameters: " + model1.parent().extractParamMap());

  // We may alternatively specify parameters using a ParamMap.
  ParamMap paramMap = new ParamMap()
    .put(lr.maxIter().w(20))  // Specify 1 Param.
    .put(lr.maxIter(), 30)  // This overwrites the original maxIter.
    .put(lr.regParam().w(0.1), lr.threshold().w(0.55));  // Specify multiple Params.

  // One can also combine ParamMaps.
  ParamMap paramMap2 = new ParamMap()
    .put(lr.probabilityCol().w("myProbability"));  // Change output column name
  ParamMap paramMapCombined = paramMap.$plus$plus(paramMap2);

  // Now learn a new model using the paramMapCombined parameters.
  // paramMapCombined overrides all parameters set earlier via lr.set* methods.
  LogisticRegressionModel model2 = lr.fit(training, paramMapCombined);
  System.out.println("Model 2 was fit using parameters: " + model2.parent().extractParamMap());

  // Prepare test documents.
  List<Row> dataTest = Arrays.asList(
      RowFactory.create(1.0, Vectors.dense(-1.0, 1.5, 1.3)),
      RowFactory.create(0.0, Vectors.dense(3.0, 2.0, -0.1)),
      RowFactory.create(1.0, Vectors.dense(0.0, 2.2, -1.5))
  );
  Dataset<Row> test = spark.createDataFrame(dataTest, schema);

  // Make predictions on test documents using the Transformer.transform() method.
  // LogisticRegression.transform will only use the 'features' column.
  // Note that model2.transform() outputs a 'myProbability' column instead of the usual
  // 'probability' column since we renamed the lr.probabilityCol parameter previously.
  Dataset<Row> results = model2.transform(test);
  Dataset<Row> rows = results.select("features", "label", "myProbability", "prediction");
  for (Row r: rows.collectAsList()) {
    System.out.println("(" + r.get(0) + ", " + r.get(1) + ") -> prob=" + r.get(2)
      + ", prediction=" + r.get(3));
  }
  // $example off$

  spark.stop();
}
 
Example #12
Source File: JavaLogisticRegressionSummaryExample.java    From SparkDemo with MIT License 4 votes vote down vote up
public static void main(String[] args) {
  SparkSession spark = SparkSession
    .builder()
    .appName("JavaLogisticRegressionSummaryExample")
    .getOrCreate();

  // Load training data
  Dataset<Row> training = spark.read().format("libsvm")
    .load("data/mllib/sample_libsvm_data.txt");

  LogisticRegression lr = new LogisticRegression()
    .setMaxIter(10)
    .setRegParam(0.3)
    .setElasticNetParam(0.8);

  // Fit the model
  LogisticRegressionModel lrModel = lr.fit(training);

  // $example on$
  // Extract the summary from the returned LogisticRegressionModel instance trained in the earlier
  // example
  LogisticRegressionTrainingSummary trainingSummary = lrModel.summary();

  // Obtain the loss per iteration.
  double[] objectiveHistory = trainingSummary.objectiveHistory();
  for (double lossPerIteration : objectiveHistory) {
    System.out.println(lossPerIteration);
  }

  // Obtain the metrics useful to judge performance on test data.
  // We cast the summary to a BinaryLogisticRegressionSummary since the problem is a binary
  // classification problem.
  BinaryLogisticRegressionSummary binarySummary =
    (BinaryLogisticRegressionSummary) trainingSummary;

  // Obtain the receiver-operating characteristic as a dataframe and areaUnderROC.
  Dataset<Row> roc = binarySummary.roc();
  roc.show();
  roc.select("FPR").show();
  System.out.println(binarySummary.areaUnderROC());

  // Get the threshold corresponding to the maximum F-Measure and rerun LogisticRegression with
  // this selected threshold.
  Dataset<Row> fMeasure = binarySummary.fMeasureByThreshold();
  double maxFMeasure = fMeasure.select(functions.max("F-Measure")).head().getDouble(0);
  double bestThreshold = fMeasure.where(fMeasure.col("F-Measure").equalTo(maxFMeasure))
    .select("threshold").head().getDouble(0);
  lrModel.setThreshold(bestThreshold);
  // $example off$

  spark.stop();
}
 
Example #13
Source File: LogisticRegressionModelConverter.java    From jpmml-sparkml with GNU Affero General Public License v3.0 4 votes vote down vote up
public LogisticRegressionModelConverter(LogisticRegressionModel model){
	super(model);
}
 
Example #14
Source File: WhitespaceClassifier.java    From vn.vitk with GNU General Public License v3.0 4 votes vote down vote up
/**
 * Trains a whitespace classifier model and save the resulting pipeline model
 * to an external file. 
 * @param sentences a list of tokenized sentences.
 * @param pipelineModelFileName
 * @param numFeatures
 */
public void train(List<String> sentences, String pipelineModelFileName, int numFeatures) {
	List<WhitespaceContext> contexts = new ArrayList<WhitespaceContext>(sentences.size());
	int id = 0;
	for (String sentence : sentences) {
		sentence = sentence.trim();
		for (int j = 0; j < sentence.length(); j++) {
			char c = sentence.charAt(j);
			if (c == ' ' || c == '_') {
				WhitespaceContext context = new WhitespaceContext();
				context.setId(id++);
				context.setContext(extractContext(sentence, j));
				context.setLabel(c == ' ' ? 0d : 1d);
				contexts.add(context);
			}
		}
	}
	JavaRDD<WhitespaceContext> jrdd = jsc.parallelize(contexts);
	DataFrame df = sqlContext.createDataFrame(jrdd, WhitespaceContext.class);
	df.show(false);
	System.out.println("N = " + df.count());
	df.groupBy("label").count().show();
	
	org.apache.spark.ml.feature.Tokenizer tokenizer = new Tokenizer()
			.setInputCol("context").setOutputCol("words");
	HashingTF hashingTF = new HashingTF().setNumFeatures(numFeatures)
			.setInputCol(tokenizer.getOutputCol()).setOutputCol("features");
	LogisticRegression lr = new LogisticRegression().setMaxIter(100)
			.setRegParam(0.01);
	Pipeline pipeline = new Pipeline().setStages(new PipelineStage[] {
			tokenizer, hashingTF, lr });
	model = pipeline.fit(df);
	
	try {
		model.write().overwrite().save(pipelineModelFileName);
	} catch (IOException e) {
		e.printStackTrace();
	}
	
	DataFrame predictions = model.transform(df);
	predictions.show();
	MulticlassClassificationEvaluator evaluator = new MulticlassClassificationEvaluator().setMetricName("precision");
	double accuracy = evaluator.evaluate(predictions);
	System.out.println("training accuracy = " + accuracy);
	
	LogisticRegressionModel lrModel = (LogisticRegressionModel) model.stages()[2];
	LogisticRegressionTrainingSummary trainingSummary = lrModel.summary();
	double[] objectiveHistory = trainingSummary.objectiveHistory();
	System.out.println("#(iterations) = " + objectiveHistory.length);
	for (double lossPerIteration : objectiveHistory) {
	  System.out.println(lossPerIteration);
	}
	
}
 
Example #15
Source File: LogisticRegressionModelInfoAdapter1.java    From spark-transformers with Apache License 2.0 4 votes vote down vote up
@Override
public Class<LogisticRegressionModel> getSource() {
    return LogisticRegressionModel.class;
}
 
Example #16
Source File: LogisticRegressionModelInfoAdapter1.java    From spark-transformers with Apache License 2.0 4 votes vote down vote up
@Override
public Class<LogisticRegressionModel> getSource() {
    return LogisticRegressionModel.class;
}