Java Code Examples for org.apache.spark.sql.types.StructField

The following examples show how to use org.apache.spark.sql.types.StructField. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: systemds   Source File: MLContextTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testDataFrameSumDMLVectorWithIDColumnNoFormatSpecified() {
	System.out.println("MLContextTest - DataFrame sum DML, vector with ID column, no format specified");

	List<Tuple2<Double, Vector>> list = new ArrayList<>();
	list.add(new Tuple2<>(1.0, Vectors.dense(1.0, 2.0, 3.0)));
	list.add(new Tuple2<>(2.0, Vectors.dense(4.0, 5.0, 6.0)));
	list.add(new Tuple2<>(3.0, Vectors.dense(7.0, 8.0, 9.0)));
	JavaRDD<Tuple2<Double, Vector>> javaRddTuple = sc.parallelize(list);

	JavaRDD<Row> javaRddRow = javaRddTuple.map(new DoubleVectorRow());
	List<StructField> fields = new ArrayList<>();
	fields.add(DataTypes.createStructField(RDDConverterUtils.DF_ID_COLUMN, DataTypes.DoubleType, true));
	fields.add(DataTypes.createStructField("C1", new VectorUDT(), true));
	StructType schema = DataTypes.createStructType(fields);
	Dataset<Row> dataFrame = spark.createDataFrame(javaRddRow, schema);

	Script script = dml("print('sum: ' + sum(M));").in("M", dataFrame);
	setExpectedStdOut("sum: 45.0");
	ml.execute(script);
}
 
Example 2
Source Project: ambiverse-nlu   Source File: UnaryTransformer.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public StructType transformSchema(StructType structType) {
    String inputCol = getInputCol();
    String outputCol = getOutputCol();
    DataType inputType = structType.apply(inputCol).dataType();
    this.validateInputType(inputType);
    List<String> names = Arrays.asList(structType.fieldNames());
    Cond.require(!names.contains(outputCol), "The output column " + outputCol + " already exists in this schema!");
    List<StructField> fields = new ArrayList<>();
    for (int i = 0; i < structType.fields().length; i++) {
        fields.add(structType.fields()[i]);
    }
    DataType dt = getOutputDataType();
    fields.add(DataTypes.createStructField(outputCol, dt, isOutputDataTypeNullable()));
    return DataTypes.createStructType(fields);
}
 
Example 3
Source Project: envelope   Source File: TestRowUtils.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testRemoveOneField() {
  StructField field1 = DataTypes.createStructField("field1", DataTypes.StringType, true);
  StructField field2 = DataTypes.createStructField("field2", DataTypes.IntegerType, true);
  StructField field3 = DataTypes.createStructField("field3", DataTypes.FloatType, true);
  StructType removeSchema = DataTypes.createStructType(Lists.newArrayList(field1, field2, field3));
  Row remove = new RowWithSchema(removeSchema, "hello", 1, 1.0);

  Row removed = RowUtils.remove(remove, "field2");

  Row expected = new RowWithSchema(
      DataTypes.createStructType(Lists.newArrayList(field1, field3)),
      "hello", 1.0);

  assertEquals(expected, removed);
}
 
Example 4
Source Project: systemds   Source File: MLContextUtil.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Examine the DataFrame schema to determine whether the data appears to be
 * a matrix.
 *
 * @param df
 *            the DataFrame
 * @return {@code true} if the DataFrame appears to be a matrix,
 *         {@code false} otherwise
 */
public static boolean doesDataFrameLookLikeMatrix(Dataset<Row> df) {
	StructType schema = df.schema();
	StructField[] fields = schema.fields();
	if (fields == null) {
		return true;
	}
	for (StructField field : fields) {
		DataType dataType = field.dataType();
		if ((dataType != DataTypes.DoubleType) && (dataType != DataTypes.IntegerType)
				&& (dataType != DataTypes.LongType) && (!(dataType instanceof org.apache.spark.ml.linalg.VectorUDT))
				&& (!(dataType instanceof org.apache.spark.mllib.linalg.VectorUDT))) {
			// uncomment if we support arrays of doubles for matrices
			// if (dataType instanceof ArrayType) {
			// ArrayType arrayType = (ArrayType) dataType;
			// if (arrayType.elementType() == DataTypes.DoubleType) {
			// continue;
			// }
			// }
			return false;
		}
	}
	return true;
}
 
Example 5
Source Project: indexr   Source File: IndexRUtil.java    License: Apache License 2.0 6 votes vote down vote up
public static SegmentSchema sparkSchemaToIndexRSchema(List<StructField> sparkSchema, IsIndexed isIndexed) {
    List<ColumnSchema> columns = new ArrayList<>();
    for (StructField f : sparkSchema) {
        SQLType type;
        if (f.dataType() instanceof IntegerType) {
            type = SQLType.INT;
        } else if (f.dataType() instanceof LongType) {
            type = SQLType.BIGINT;
        } else if (f.dataType() instanceof FloatType) {
            type = SQLType.FLOAT;
        } else if (f.dataType() instanceof DoubleType) {
            type = SQLType.DOUBLE;
        } else if (f.dataType() instanceof StringType) {
            type = SQLType.VARCHAR;
        } else if (f.dataType() instanceof DateType) {
            type = SQLType.DATE;
        } else if (f.dataType() instanceof TimestampType) {
            type = SQLType.DATETIME;
        } else {
            throw new IllegalStateException("Unsupported type: " + f.dataType());
        }
        columns.add(new ColumnSchema(f.name(), type, isIndexed.apply(f.name())));
    }
    return new SegmentSchema(columns);
}
 
Example 6
Source Project: systemds   Source File: RDDConverterUtilsExtTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testStringDataFrameToVectorDataFrameNull() {
	List<String> list = new ArrayList<>();
	list.add("[1.2, 3.4]");
	list.add(null);
	JavaRDD<String> javaRddString = sc.parallelize(list);
	JavaRDD<Row> javaRddRow = javaRddString.map(new StringToRow());
	SparkSession sparkSession = SparkSession.builder().sparkContext(sc.sc()).getOrCreate();
	List<StructField> fields = new ArrayList<>();
	fields.add(DataTypes.createStructField("C1", DataTypes.StringType, true));
	StructType schema = DataTypes.createStructType(fields);
	Dataset<Row> inDF = sparkSession.createDataFrame(javaRddRow, schema);
	Dataset<Row> outDF = RDDConverterUtilsExt.stringDataFrameToVectorDataFrame(sparkSession, inDF);

	List<String> expectedResults = new ArrayList<>();
	expectedResults.add("[[1.2,3.4]]");
	expectedResults.add("[null]");

	List<Row> outputList = outDF.collectAsList();
	for (Row row : outputList) {
		assertTrue("Expected results don't contain: " + row, expectedResults.contains(row.toString()));
	}
}
 
Example 7
Source Project: systemds   Source File: MLContextTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testDataFrameSumDMLVectorWithNoIDColumn() {
	System.out.println("MLContextTest - DataFrame sum DML, vector with no ID column");

	List<Vector> list = new ArrayList<>();
	list.add(Vectors.dense(1.0, 2.0, 3.0));
	list.add(Vectors.dense(4.0, 5.0, 6.0));
	list.add(Vectors.dense(7.0, 8.0, 9.0));
	JavaRDD<Vector> javaRddVector = sc.parallelize(list);

	JavaRDD<Row> javaRddRow = javaRddVector.map(new VectorRow());
	List<StructField> fields = new ArrayList<>();
	fields.add(DataTypes.createStructField("C1", new VectorUDT(), true));
	StructType schema = DataTypes.createStructType(fields);
	Dataset<Row> dataFrame = spark.createDataFrame(javaRddRow, schema);

	MatrixMetadata mm = new MatrixMetadata(MatrixFormat.DF_VECTOR);

	Script script = dml("print('sum: ' + sum(M));").in("M", dataFrame, mm);
	setExpectedStdOut("sum: 45.0");
	ml.execute(script);
}
 
Example 8
Source Project: systemds   Source File: MLContextTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testInputMatrixBlockDML() {
	System.out.println("MLContextTest - input MatrixBlock DML");

	List<String> list = new ArrayList<>();
	list.add("10,20,30");
	list.add("40,50,60");
	list.add("70,80,90");
	JavaRDD<String> javaRddString = sc.parallelize(list);

	JavaRDD<Row> javaRddRow = javaRddString.map(new CommaSeparatedValueStringToRow());
	List<StructField> fields = new ArrayList<>();
	fields.add(DataTypes.createStructField("C1", DataTypes.StringType, true));
	fields.add(DataTypes.createStructField("C2", DataTypes.StringType, true));
	fields.add(DataTypes.createStructField("C3", DataTypes.StringType, true));
	StructType schema = DataTypes.createStructType(fields);
	Dataset<Row> dataFrame = spark.createDataFrame(javaRddRow, schema);

	Matrix m = new Matrix(dataFrame);
	MatrixBlock matrixBlock = m.toMatrixBlock();
	Script script = dml("avg = avg(M);").in("M", matrixBlock).out("avg");
	double avg = ml.execute(script).getDouble("avg");
	Assert.assertEquals(50.0, avg, 0.0);
}
 
Example 9
Source Project: systemds   Source File: MLContextTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testDataFrameSumDMLDoublesWithNoIDColumnNoFormatSpecified() {
	System.out.println("MLContextTest - DataFrame sum DML, doubles with no ID column, no format specified");

	List<String> list = new ArrayList<>();
	list.add("2,2,2");
	list.add("3,3,3");
	list.add("4,4,4");
	JavaRDD<String> javaRddString = sc.parallelize(list);

	JavaRDD<Row> javaRddRow = javaRddString.map(new CommaSeparatedValueStringToDoubleArrayRow());
	List<StructField> fields = new ArrayList<>();
	fields.add(DataTypes.createStructField("C1", DataTypes.DoubleType, true));
	fields.add(DataTypes.createStructField("C2", DataTypes.DoubleType, true));
	fields.add(DataTypes.createStructField("C3", DataTypes.DoubleType, true));
	StructType schema = DataTypes.createStructType(fields);
	Dataset<Row> dataFrame = spark.createDataFrame(javaRddRow, schema);

	Script script = dml("print('sum: ' + sum(M));").in("M", dataFrame);
	setExpectedStdOut("sum: 27.0");
	ml.execute(script);
}
 
Example 10
Source Project: systemds   Source File: MLContextTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testDataFrameSumDMLDoublesWithIDColumnNoFormatSpecified() {
	System.out.println("MLContextTest - DataFrame sum DML, doubles with ID column, no format specified");

	List<String> list = new ArrayList<>();
	list.add("1,2,2,2");
	list.add("2,3,3,3");
	list.add("3,4,4,4");
	JavaRDD<String> javaRddString = sc.parallelize(list);

	JavaRDD<Row> javaRddRow = javaRddString.map(new CommaSeparatedValueStringToDoubleArrayRow());
	List<StructField> fields = new ArrayList<>();
	fields.add(DataTypes.createStructField(RDDConverterUtils.DF_ID_COLUMN, DataTypes.DoubleType, true));
	fields.add(DataTypes.createStructField("C1", DataTypes.DoubleType, true));
	fields.add(DataTypes.createStructField("C2", DataTypes.DoubleType, true));
	fields.add(DataTypes.createStructField("C3", DataTypes.DoubleType, true));
	StructType schema = DataTypes.createStructType(fields);
	Dataset<Row> dataFrame = spark.createDataFrame(javaRddRow, schema);

	Script script = dml("print('sum: ' + sum(M));").in("M", dataFrame);
	setExpectedStdOut("sum: 27.0");
	ml.execute(script);
}
 
Example 11
Source Project: systemds   Source File: MLContextTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testDataFrameSumPYDMLVectorWithIDColumnNoFormatSpecified() {
	System.out.println("MLContextTest - DataFrame sum PYDML, vector with ID column, no format specified");

	List<Tuple2<Double, Vector>> list = new ArrayList<>();
	list.add(new Tuple2<>(1.0, Vectors.dense(1.0, 2.0, 3.0)));
	list.add(new Tuple2<>(2.0, Vectors.dense(4.0, 5.0, 6.0)));
	list.add(new Tuple2<>(3.0, Vectors.dense(7.0, 8.0, 9.0)));
	JavaRDD<Tuple2<Double, Vector>> javaRddTuple = sc.parallelize(list);

	JavaRDD<Row> javaRddRow = javaRddTuple.map(new DoubleVectorRow());
	List<StructField> fields = new ArrayList<>();
	fields.add(DataTypes.createStructField(RDDConverterUtils.DF_ID_COLUMN, DataTypes.DoubleType, true));
	fields.add(DataTypes.createStructField("C1", new VectorUDT(), true));
	StructType schema = DataTypes.createStructType(fields);
	Dataset<Row> dataFrame = spark.createDataFrame(javaRddRow, schema);

	Script script = dml("print('sum: ' + sum(M))").in("M", dataFrame);
	setExpectedStdOut("sum: 45.0");
	ml.execute(script);
}
 
Example 12
Source Project: systemds   Source File: MLContextUtil.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Examine the DataFrame schema to determine whether the data appears to be
 * a matrix.
 *
 * @param df
 *            the DataFrame
 * @return {@code true} if the DataFrame appears to be a matrix,
 *         {@code false} otherwise
 */
public static boolean doesDataFrameLookLikeMatrix(Dataset<Row> df) {
	StructType schema = df.schema();
	StructField[] fields = schema.fields();
	if (fields == null) {
		return true;
	}
	for (StructField field : fields) {
		DataType dataType = field.dataType();
		if ((dataType != DataTypes.DoubleType) && (dataType != DataTypes.IntegerType)
				&& (dataType != DataTypes.LongType) && (!(dataType instanceof org.apache.spark.ml.linalg.VectorUDT))
				&& (!(dataType instanceof org.apache.spark.mllib.linalg.VectorUDT))) {
			// uncomment if we support arrays of doubles for matrices
			// if (dataType instanceof ArrayType) {
			// ArrayType arrayType = (ArrayType) dataType;
			// if (arrayType.elementType() == DataTypes.DoubleType) {
			// continue;
			// }
			// }
			return false;
		}
	}
	return true;
}
 
Example 13
Source Project: systemds   Source File: MLContextTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testGetTuple1DML() {
	System.out.println("MLContextTest - Get Tuple1<Matrix> DML");
	JavaRDD<String> javaRddString = sc
			.parallelize(Stream.of("1,2,3", "4,5,6", "7,8,9").collect(Collectors.toList()));
	JavaRDD<Row> javaRddRow = javaRddString.map(new CommaSeparatedValueStringToDoubleArrayRow());
	List<StructField> fields = new ArrayList<>();
	fields.add(DataTypes.createStructField("C1", DataTypes.DoubleType, true));
	fields.add(DataTypes.createStructField("C2", DataTypes.DoubleType, true));
	fields.add(DataTypes.createStructField("C3", DataTypes.DoubleType, true));
	StructType schema = DataTypes.createStructType(fields);
	Dataset<Row> df = spark.createDataFrame(javaRddRow, schema);

	Script script = dml("N=M*2").in("M", df).out("N");
	Tuple1<Matrix> tuple = ml.execute(script).getTuple("N");
	double[][] n = tuple._1().to2DDoubleArray();
	Assert.assertEquals(2.0, n[0][0], 0);
	Assert.assertEquals(4.0, n[0][1], 0);
	Assert.assertEquals(6.0, n[0][2], 0);
	Assert.assertEquals(8.0, n[1][0], 0);
	Assert.assertEquals(10.0, n[1][1], 0);
	Assert.assertEquals(12.0, n[1][2], 0);
	Assert.assertEquals(14.0, n[2][0], 0);
	Assert.assertEquals(16.0, n[2][1], 0);
	Assert.assertEquals(18.0, n[2][2], 0);
}
 
Example 14
Source Project: systemds   Source File: MLContextTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testDataFrameSumDMLDoublesWithIDColumnNoFormatSpecified() {
	System.out.println("MLContextTest - DataFrame sum DML, doubles with ID column, no format specified");

	List<String> list = new ArrayList<>();
	list.add("1,2,2,2");
	list.add("2,3,3,3");
	list.add("3,4,4,4");
	JavaRDD<String> javaRddString = sc.parallelize(list);

	JavaRDD<Row> javaRddRow = javaRddString.map(new CommaSeparatedValueStringToDoubleArrayRow());
	List<StructField> fields = new ArrayList<>();
	fields.add(DataTypes.createStructField(RDDConverterUtils.DF_ID_COLUMN, DataTypes.DoubleType, true));
	fields.add(DataTypes.createStructField("C1", DataTypes.DoubleType, true));
	fields.add(DataTypes.createStructField("C2", DataTypes.DoubleType, true));
	fields.add(DataTypes.createStructField("C3", DataTypes.DoubleType, true));
	StructType schema = DataTypes.createStructType(fields);
	Dataset<Row> dataFrame = spark.createDataFrame(javaRddRow, schema);

	Script script = dml("print('sum: ' + sum(M));").in("M", dataFrame);
	setExpectedStdOut("sum: 27.0");
	ml.execute(script);
}
 
Example 15
Source Project: systemds   Source File: MLContextTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testGetTuple1DML() {
	System.out.println("MLContextTest - Get Tuple1<Matrix> DML");
	JavaRDD<String> javaRddString = sc
			.parallelize(Stream.of("1,2,3", "4,5,6", "7,8,9").collect(Collectors.toList()));
	JavaRDD<Row> javaRddRow = javaRddString.map(new CommaSeparatedValueStringToDoubleArrayRow());
	List<StructField> fields = new ArrayList<>();
	fields.add(DataTypes.createStructField("C1", DataTypes.DoubleType, true));
	fields.add(DataTypes.createStructField("C2", DataTypes.DoubleType, true));
	fields.add(DataTypes.createStructField("C3", DataTypes.DoubleType, true));
	StructType schema = DataTypes.createStructType(fields);
	Dataset<Row> df = spark.createDataFrame(javaRddRow, schema);

	Script script = dml("N=M*2").in("M", df).out("N");
	Tuple1<Matrix> tuple = ml.execute(script).getTuple("N");
	double[][] n = tuple._1().to2DDoubleArray();
	Assert.assertEquals(2.0, n[0][0], 0);
	Assert.assertEquals(4.0, n[0][1], 0);
	Assert.assertEquals(6.0, n[0][2], 0);
	Assert.assertEquals(8.0, n[1][0], 0);
	Assert.assertEquals(10.0, n[1][1], 0);
	Assert.assertEquals(12.0, n[1][2], 0);
	Assert.assertEquals(14.0, n[2][0], 0);
	Assert.assertEquals(16.0, n[2][1], 0);
	Assert.assertEquals(18.0, n[2][2], 0);
}
 
Example 16
Source Project: envelope   Source File: TestRangeRowRule.java    License: Apache License 2.0 6 votes vote down vote up
public void testDontIgnoreNulls() {
  StructType schema = new StructType(new StructField[] {
      new StructField("name", DataTypes.StringType, false, Metadata.empty()),
      new StructField("nickname", DataTypes.StringType, false, Metadata.empty()),
      new StructField("age", DataTypes.IntegerType, false, Metadata.empty()),
      new StructField("candycrushscore", DataTypes.createDecimalType(), false, Metadata.empty())
  });

  Map<String, Object> configMap = new HashMap<>();
  configMap.put(RangeRowRule.FIELDS_CONFIG, Lists.newArrayList("age"));
  configMap.put(RangeRowRule.FIELD_TYPE_CONFIG, "int");
  configMap.put(RangeRowRule.RANGE_CONFIG, Lists.newArrayList(0,105));
  Config config = ConfigFactory.parseMap(configMap);

  RangeRowRule rule = new RangeRowRule();
  assertNoValidationFailures(rule, config);
  rule.configure(config);
  rule.configureName("agerange");

  Row row1 = new RowWithSchema(schema, "Ian", "Ian", null, new BigDecimal("0.00"));
  assertFalse("Row should not pass rule", rule.check(row1));
}
 
Example 17
Source Project: geowave   Source File: SimpleFeatureMapper.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public Row call(final SimpleFeature feature) throws Exception {
  final Object[] fields = new Serializable[schema.size()];

  for (int i = 0; i < schema.size(); i++) {
    final Object fieldObj = feature.getAttribute(i);
    if (fieldObj != null) {
      final StructField structField = schema.apply(i);
      if (structField.name().equals("geom")) {
        fields[i] = fieldObj;
      } else if (structField.dataType() == DataTypes.TimestampType) {
        fields[i] = new Timestamp(((Date) fieldObj).getTime());
      } else if (structField.dataType() != null) {
        fields[i] = fieldObj;
      } else {
        LOGGER.error("Unexpected attribute in field(" + structField.name() + "): " + fieldObj);
      }
    }
  }

  return new GenericRowWithSchema(fields, schema);
}
 
Example 18
Source Project: envelope   Source File: TestDecisionStep.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testPruneByStepValueFalse() {
  StructType schema = new StructType(new StructField[] {
      new StructField("outcome", DataTypes.BooleanType, false, Metadata.empty())
  });
  List<Row> rows = Lists.newArrayList(
      RowFactory.create(false)
  );
  Dataset<Row> ds = Contexts.getSparkSession().createDataFrame(rows, schema);
  step1.setData(ds);

  Map<String, Object> step2ConfigMap = Maps.newHashMap();
  step2ConfigMap.put(Step.DEPENDENCIES_CONFIG, Lists.newArrayList("step1"));
  step2ConfigMap.put(DecisionStep.IF_TRUE_STEP_NAMES_PROPERTY, Lists.newArrayList("step3", "step7"));
  step2ConfigMap.put(DecisionStep.DECISION_METHOD_PROPERTY, DecisionStep.STEP_BY_VALUE_DECISION_METHOD);
  step2ConfigMap.put(DecisionStep.STEP_BY_VALUE_STEP_PROPERTY, "step1");
  Config step2Config = ConfigFactory.parseMap(step2ConfigMap);
  RefactorStep step2 = new DecisionStep("step2");
  step2.configure(step2Config);
  steps.add(step2);

  Set<Step> refactored = step2.refactor(steps);

  assertEquals(refactored, Sets.newHashSet(step1, step2, step5, step6));
}
 
Example 19
Source Project: geowave   Source File: SchemaConverter.java    License: Apache License 2.0 6 votes vote down vote up
public static SimpleFeatureType schemaToFeatureType(
    final StructType schema,
    final String typeName) {
  final SimpleFeatureTypeBuilder typeBuilder = new SimpleFeatureTypeBuilder();
  typeBuilder.setName(typeName);
  typeBuilder.setNamespaceURI(BasicFeatureTypes.DEFAULT_NAMESPACE);
  try {
    typeBuilder.setCRS(CRS.decode("EPSG:4326", true));
  } catch (final FactoryException e) {
    LOGGER.error(e.getMessage(), e);
  }

  final AttributeTypeBuilder attrBuilder = new AttributeTypeBuilder();

  for (final StructField field : schema.fields()) {
    final AttributeDescriptor attrDesc = attrDescFromStructField(attrBuilder, field);

    typeBuilder.add(attrDesc);
  }

  return typeBuilder.buildFeatureType();
}
 
Example 20
Source Project: hudi   Source File: TestChainedTransformer.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testChainedTransformation() {
  StructType schema = DataTypes.createStructType(
      new StructField[] {
          createStructField("foo", StringType, false)
      });
  Row r1 = RowFactory.create("100");
  Row r2 = RowFactory.create("200");
  Dataset<Row> original = sparkSession.sqlContext().createDataFrame(Arrays.asList(r1, r2), schema);

  Transformer t1 = (jsc, sparkSession, dataset, properties) -> dataset.withColumnRenamed("foo", "bar");
  Transformer t2 = (jsc, sparkSession, dataset, properties) -> dataset.withColumn("bar", dataset.col("bar").cast(IntegerType));
  ChainedTransformer transformer = new ChainedTransformer(Arrays.asList(t1, t2));
  Dataset<Row> transformed = transformer.apply(jsc, sparkSession, original, null);

  assertEquals(2, transformed.count());
  assertArrayEquals(new String[] {"bar"}, transformed.columns());
  List<Row> rows = transformed.collectAsList();
  assertEquals(100, rows.get(0).getInt(0));
  assertEquals(200, rows.get(1).getInt(0));
}
 
Example 21
Source Project: envelope   Source File: TestRangeRowRule.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testAgeRangeInt() {
  StructType schema = new StructType(new StructField[] {
      new StructField("name", DataTypes.StringType, false, Metadata.empty()),
      new StructField("nickname", DataTypes.StringType, false, Metadata.empty()),
      new StructField("age", DataTypes.IntegerType, false, Metadata.empty()),
      new StructField("candycrushscore", DataTypes.createDecimalType(), false, Metadata.empty())
  });

  Map<String, Object> configMap = new HashMap<>();
  configMap.put(RangeRowRule.FIELDS_CONFIG, Lists.newArrayList("age"));
  configMap.put(RangeRowRule.FIELD_TYPE_CONFIG, "int");
  configMap.put(RangeRowRule.RANGE_CONFIG, Lists.newArrayList(0,105));
  Config config = ConfigFactory.parseMap(configMap);

  RangeRowRule rule = new RangeRowRule();
  assertNoValidationFailures(rule, config);
  rule.configure(config);
  rule.configureName("agerange");

  Row row1 = new RowWithSchema(schema, "Ian", "Ian", 34, new BigDecimal("0.00"));
  assertTrue("Row should pass rule", rule.check(row1));

  Row row2 = new RowWithSchema(schema, "Webster1", "Websta1", 110, new BigDecimal("450.10"));
  assertFalse("Row should not pass rule", rule.check(row2));

  Row row3 = new RowWithSchema(schema, "", "Ian1", 106, new BigDecimal("450.10"));
  assertFalse("Row should not pass rule", rule.check(row3));

  Row row4 = new RowWithSchema(schema, "First Last", "Ian Last", 105, new BigDecimal("450.10"));
  assertTrue("Row should pass rule", rule.check(row4));
}
 
Example 22
Source Project: sparkResearch   Source File: CustomDataFrame.java    License: Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) {
    SparkSession sparkSession = SparkSession.builder()
            .master("local")
            .appName("spark app")
            .getOrCreate();

    //创建普通的JavaRDD
    JavaRDD<String> javaRDD = sparkSession.sparkContext().textFile("URL", 1).toJavaRDD();
    //字符串编码的模式
    String schema = "name age";

    //根据模式的字符串生成模式
    List<StructField> structFieldList = new ArrayList<>();
    for (String fieldName : schema.split(" ")) {
        StructField structField = DataTypes.createStructField(fieldName, DataTypes.StringType, true);
        structFieldList.add(structField);
    }
    StructType structType = DataTypes.createStructType(structFieldList);

    JavaRDD<Row> rowJavaRDD = javaRDD.map(new Function<String, Row>() {
        @Override
        public Row call(String v1) {
            String[] attirbutes = v1.split(",");
            return RowFactory.create(attirbutes[0], attirbutes[1].trim());
        }
    });

    //将模式应用于RDD
    Dataset<Row> dataset = sparkSession.createDataFrame(rowJavaRDD, structType);

    //创建临时视图
    dataset.createOrReplaceTempView("user");
    Dataset<Row> result = sparkSession.sql("select * from user");
    result.show();
}
 
Example 23
/**
 * Returns a Spark Type for the column. This StructField
 * will not represent an actual value, it will only represent the Spark type
 * that all values in the column will have.
 *
 * @return    A StructField describing the type of the column.
 */
public StructField getStructField() {
    try {
        return getType().getNull().getStructField(getName());
    } catch (StandardException e) {
        return null;
    }
}
 
Example 24
Source Project: deeplearning4j   Source File: DataFrames.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Convert the DataVec sequence schema to a StructType for Spark, for example for use in
 * {@link #toDataFrameSequence(Schema, JavaRDD)}}
 * <b>Note</b>: as per {@link #toDataFrameSequence(Schema, JavaRDD)}}, the StructType has two additional columns added to it:<br>
 * - Column 0: Sequence UUID (name: {@link #SEQUENCE_UUID_COLUMN}) - a UUID for the original sequence<br>
 * - Column 1: Sequence index (name: {@link #SEQUENCE_INDEX_COLUMN} - an index (integer, starting at 0) for the position
 * of this record in the original time series.<br>
 * These two columns are required if the data is to be converted back into a sequence at a later point, for example
 * using {@link #toRecordsSequence(Dataset<Row>)}
 *
 * @param schema Schema to convert
 * @return StructType for the schema
 */
public static StructType fromSchemaSequence(Schema schema) {
    StructField[] structFields = new StructField[schema.numColumns() + 2];

    structFields[0] = new StructField(SEQUENCE_UUID_COLUMN, DataTypes.StringType, false, Metadata.empty());
    structFields[1] = new StructField(SEQUENCE_INDEX_COLUMN, DataTypes.IntegerType, false, Metadata.empty());

    for (int i = 0; i < schema.numColumns(); i++) {
        switch (schema.getColumnTypes().get(i)) {
            case Double:
                structFields[i + 2] =
                                new StructField(schema.getName(i), DataTypes.DoubleType, false, Metadata.empty());
                break;
            case Integer:
                structFields[i + 2] =
                                new StructField(schema.getName(i), DataTypes.IntegerType, false, Metadata.empty());
                break;
            case Long:
                structFields[i + 2] =
                                new StructField(schema.getName(i), DataTypes.LongType, false, Metadata.empty());
                break;
            case Float:
                structFields[i + 2] =
                                new StructField(schema.getName(i), DataTypes.FloatType, false, Metadata.empty());
                break;
            default:
                throw new IllegalStateException(
                                "This api should not be used with strings , binary data or ndarrays. This is only for columnar data");
        }
    }
    return new StructType(structFields);
}
 
Example 25
Source Project: kylin-on-parquet-v2   Source File: CsvSourceTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testGetFlatTable() throws IOException {
    System.out.println(getTestConfig().getMetadataUrl());
    CubeManager cubeMgr = CubeManager.getInstance(getTestConfig());
    CubeInstance cube = cubeMgr.getCube(CUBE_NAME);
    cleanupSegments(CUBE_NAME);
    DataModelDesc model = cube.getModel();
    CubeSegment segment = cubeMgr.appendSegment(cube, new SegmentRange.TSRange(dateToLong("2010-01-01"), dateToLong("2013-01-01")));
    Dataset<Row> ds = initFlatTable(segment);
    ds.show(10);
    StructType schema = ds.schema();

    SegmentInfo segmentInfo = MetadataConverter.getSegmentInfo(segment.getCubeInstance(), segment.getUuid(),
            segment.getName(), segment.getStorageLocationIdentifier());
    scala.collection.immutable.Map<String, String> map = BuildUtils.getColumnIndexMap(segmentInfo);
    for (StructField field : schema.fields()) {
        Assert.assertNotNull(model.findColumn(map.apply(field.name())));
    }

    for (LayoutEntity layoutEntity : MetadataConverter.extractEntityList2JavaList(cube)) {
        Set<Integer> dims = layoutEntity.getOrderedDimensions().keySet();
        Column[] modelCols = new Column[dims.size()];
        int index = 0;
        for (int id : dims) {
            modelCols[index] = new Column(String.valueOf(id));
            index++;
        }
        ds.select(modelCols).show(10);
    }
}
 
Example 26
Source Project: envelope   Source File: LoopStep.java    License: Apache License 2.0 5 votes vote down vote up
private List<Row> toRowList(List<? extends Object> substitutionValues) {
  String columnName = hasParameter() ? getParameter() : DEFAULT_SUFFIX_PROPERTY;
  StructType schema = DataTypes.createStructType(
    new StructField[] {
      DataTypes.createStructField(columnName, DataTypes.StringType, false)
    });
  List<Row> substitutionRows = new ArrayList<Row>();
  for(Object value : substitutionValues) {
    substitutionRows.add(new RowWithSchema(schema, new Object[] {value}));
  }
  return substitutionRows;
}
 
Example 27
Source Project: spark-data-sources   Source File: DBClientWrapper.java    License: MIT License 5 votes vote down vote up
public static Schema sparkToDbSchema(StructType st) {
    Schema schema = new Schema();
    for (StructField sf: st.fields()) {
        if (sf.dataType() == DataTypes.StringType) {
            schema.addColumn(sf.name(), Schema.ColumnType.STRING);
        } else if (sf.dataType() == DataTypes.DoubleType) {
            schema.addColumn(sf.name(), Schema.ColumnType.DOUBLE);
        } else if (sf.dataType() == DataTypes.LongType) {
            schema.addColumn(sf.name(), Schema.ColumnType.INT64);
        } else {
            // TODO: type leakage
        }
    }
    return schema;
}
 
Example 28
Source Project: net.jgp.labs.spark   Source File: FieldUtils.java    License: Apache License 2.0 5 votes vote down vote up
public static String explain(StructField field) {
  StringBuilder sb = new StringBuilder();

  sb.append("Name ....... ");
  sb.append(field.name());
  sb.append("\nMetadata ... ");
  sb.append(field.metadata());
  sb.append("\nType ....... ");
  sb.append(field.dataType());

  return sb.toString();
}
 
Example 29
Source Project: systemds   Source File: MLContextConversionUtil.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * If the MatrixFormat of the DataFrame has not been explicitly specified,
 * attempt to determine the proper MatrixFormat.
 *
 * @param dataFrame
 *            the Spark {@code DataFrame}
 * @param matrixMetadata
 *            the matrix metadata, if available
 */
public static void determineMatrixFormatIfNeeded(Dataset<Row> dataFrame, MatrixMetadata matrixMetadata) {
	if (matrixMetadata == null) {
		return;
	}
	MatrixFormat matrixFormat = matrixMetadata.getMatrixFormat();
	if (matrixFormat != null) {
		return;
	}
	StructType schema = dataFrame.schema();
	boolean hasID = false;
	try {
		schema.fieldIndex(RDDConverterUtils.DF_ID_COLUMN);
		hasID = true;
	} catch (IllegalArgumentException iae) {
	}

	StructField[] fields = schema.fields();
	MatrixFormat mf = null;
	if (hasID) {
		if (fields[1].dataType() instanceof VectorUDT) {
			mf = MatrixFormat.DF_VECTOR_WITH_INDEX;
		} else {
			mf = MatrixFormat.DF_DOUBLES_WITH_INDEX;
		}
	} else {
		if (fields[0].dataType() instanceof VectorUDT) {
			mf = MatrixFormat.DF_VECTOR;
		} else {
			mf = MatrixFormat.DF_DOUBLES;
		}
	}

	if (mf == null) {
		throw new MLContextException("DataFrame format not recognized as an accepted SystemDS MatrixFormat");
	}
	matrixMetadata.setMatrixFormat(mf);
}
 
Example 30
Source Project: systemds   Source File: RDDConverterUtilsExtTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testStringDataFrameToVectorDataFrame() {
	List<String> list = new ArrayList<>();
	list.add("((1.2, 4.3, 3.4))");
	list.add("(1.2, 3.4, 2.2)");
	list.add("[[1.2, 34.3, 1.2, 1.25]]");
	list.add("[1.2, 3.4]");
	JavaRDD<String> javaRddString = sc.parallelize(list);
	JavaRDD<Row> javaRddRow = javaRddString.map(new StringToRow());
	SparkSession sparkSession = SparkSession.builder().sparkContext(sc.sc()).getOrCreate();
	List<StructField> fields = new ArrayList<>();
	fields.add(DataTypes.createStructField("C1", DataTypes.StringType, true));
	StructType schema = DataTypes.createStructType(fields);
	Dataset<Row> inDF = sparkSession.createDataFrame(javaRddRow, schema);
	Dataset<Row> outDF = RDDConverterUtilsExt.stringDataFrameToVectorDataFrame(sparkSession, inDF);

	List<String> expectedResults = new ArrayList<>();
	expectedResults.add("[[1.2,4.3,3.4]]");
	expectedResults.add("[[1.2,3.4,2.2]]");
	expectedResults.add("[[1.2,34.3,1.2,1.25]]");
	expectedResults.add("[[1.2,3.4]]");

	List<Row> outputList = outDF.collectAsList();
	for (Row row : outputList) {
		assertTrue("Expected results don't contain: " + row, expectedResults.contains(row.toString()));
	}
}