Java Code Examples for org.apache.flink.api.common.typeinfo.BasicTypeInfo#DOUBLE_TYPE_INFO

The following examples show how to use org.apache.flink.api.common.typeinfo.BasicTypeInfo#DOUBLE_TYPE_INFO . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: flink   File: OrcTableSource.java    License: Apache License 2.0 6 votes vote down vote up
private PredicateLeaf.Type toOrcType(TypeInformation<?> type) {
	if (type == BasicTypeInfo.BYTE_TYPE_INFO ||
		type == BasicTypeInfo.SHORT_TYPE_INFO ||
		type == BasicTypeInfo.INT_TYPE_INFO ||
		type == BasicTypeInfo.LONG_TYPE_INFO) {
		return PredicateLeaf.Type.LONG;
	} else if (type == BasicTypeInfo.FLOAT_TYPE_INFO ||
		type == BasicTypeInfo.DOUBLE_TYPE_INFO) {
		return PredicateLeaf.Type.FLOAT;
	} else if (type == BasicTypeInfo.BOOLEAN_TYPE_INFO) {
		return PredicateLeaf.Type.BOOLEAN;
	} else if (type == BasicTypeInfo.STRING_TYPE_INFO) {
		return PredicateLeaf.Type.STRING;
	} else if (type == SqlTimeTypeInfo.TIMESTAMP) {
		return PredicateLeaf.Type.TIMESTAMP;
	} else if (type == SqlTimeTypeInfo.DATE) {
		return PredicateLeaf.Type.DATE;
	} else if (type == BasicTypeInfo.BIG_DEC_TYPE_INFO) {
		return PredicateLeaf.Type.DECIMAL;
	} else {
		// unsupported type
		return null;
	}
}
 
Example 2
@SuppressWarnings("unchecked")
@Test
public void testStringDoubleEither() {

Either<String, Double>[] testData = new Either[] {
		Left("banana"),
		Left(""),
		Right(32.0),
		Right(Double.MIN_VALUE),
		Right(Double.MAX_VALUE)};

EitherTypeInfo<String, Double> eitherTypeInfo = (EitherTypeInfo<String, Double>) new EitherTypeInfo<String, Double>(
		BasicTypeInfo.STRING_TYPE_INFO, BasicTypeInfo.DOUBLE_TYPE_INFO);
EitherSerializer<String, Double> eitherSerializer =
		(EitherSerializer<String, Double>) eitherTypeInfo.createSerializer(new ExecutionConfig());
SerializerTestInstance<Either<String, Double>> testInstance =
		new EitherSerializerTestInstance<Either<String, Double>>(eitherSerializer, eitherTypeInfo.getTypeClass(), -1, testData);
testInstance.testAll();
}
 
Example 3
@SuppressWarnings("unchecked")
@Test
public void testEitherWithTuple() {

Either<Tuple2<Long, Long>, Double>[] testData = new Either[] {
		Either.Left(new Tuple2<>(2L, 9L)),
		new Left<>(new Tuple2<>(Long.MIN_VALUE, Long.MAX_VALUE)),
		new Right<>(32.0),
		Right(Double.MIN_VALUE),
		Right(Double.MAX_VALUE)};

EitherTypeInfo<Tuple2<Long, Long>, Double> eitherTypeInfo = (EitherTypeInfo<Tuple2<Long, Long>, Double>)
		new EitherTypeInfo<Tuple2<Long, Long>, Double>(
		new TupleTypeInfo<Tuple2<Long, Long>>(BasicTypeInfo.LONG_TYPE_INFO, BasicTypeInfo.LONG_TYPE_INFO),
		BasicTypeInfo.DOUBLE_TYPE_INFO);
EitherSerializer<Tuple2<Long, Long>, Double> eitherSerializer =
		(EitherSerializer<Tuple2<Long, Long>, Double>) eitherTypeInfo.createSerializer(new ExecutionConfig());
SerializerTestInstance<Either<Tuple2<Long, Long>, Double>> testInstance =
		new EitherSerializerTestInstance<Either<Tuple2<Long, Long>, Double>>(
				eitherSerializer, eitherTypeInfo.getTypeClass(), -1, testData);
testInstance.testAll();
}
 
Example 4
private TypeInformation getFieldType(HCatFieldSchema fieldSchema) {

		switch(fieldSchema.getType()) {
			case INT:
				return BasicTypeInfo.INT_TYPE_INFO;
			case TINYINT:
				return BasicTypeInfo.BYTE_TYPE_INFO;
			case SMALLINT:
				return BasicTypeInfo.SHORT_TYPE_INFO;
			case BIGINT:
				return BasicTypeInfo.LONG_TYPE_INFO;
			case BOOLEAN:
				return BasicTypeInfo.BOOLEAN_TYPE_INFO;
			case FLOAT:
				return BasicTypeInfo.FLOAT_TYPE_INFO;
			case DOUBLE:
				return BasicTypeInfo.DOUBLE_TYPE_INFO;
			case STRING:
				return BasicTypeInfo.STRING_TYPE_INFO;
			case BINARY:
				return PrimitiveArrayTypeInfo.BYTE_PRIMITIVE_ARRAY_TYPE_INFO;
			case ARRAY:
				return new GenericTypeInfo(List.class);
			case MAP:
				return new GenericTypeInfo(Map.class);
			case STRUCT:
				return new GenericTypeInfo(List.class);
			default:
				throw new IllegalArgumentException("Unknown data type \"" + fieldSchema.getType() + "\" encountered.");
		}
	}
 
Example 5
Source Project: flink   File: HCatInputFormatBase.java    License: Apache License 2.0 5 votes vote down vote up
private TypeInformation getFieldType(HCatFieldSchema fieldSchema) {

		switch(fieldSchema.getType()) {
			case INT:
				return BasicTypeInfo.INT_TYPE_INFO;
			case TINYINT:
				return BasicTypeInfo.BYTE_TYPE_INFO;
			case SMALLINT:
				return BasicTypeInfo.SHORT_TYPE_INFO;
			case BIGINT:
				return BasicTypeInfo.LONG_TYPE_INFO;
			case BOOLEAN:
				return BasicTypeInfo.BOOLEAN_TYPE_INFO;
			case FLOAT:
				return BasicTypeInfo.FLOAT_TYPE_INFO;
			case DOUBLE:
				return BasicTypeInfo.DOUBLE_TYPE_INFO;
			case STRING:
				return BasicTypeInfo.STRING_TYPE_INFO;
			case BINARY:
				return PrimitiveArrayTypeInfo.BYTE_PRIMITIVE_ARRAY_TYPE_INFO;
			case ARRAY:
				return new GenericTypeInfo(List.class);
			case MAP:
				return new GenericTypeInfo(Map.class);
			case STRUCT:
				return new GenericTypeInfo(List.class);
			default:
				throw new IllegalArgumentException("Unknown data type \"" + fieldSchema.getType() + "\" encountered.");
		}
	}
 
Example 6
@Test
public void testDoubleFields() throws Exception {
	String fileContent = "11.1|22.2|33.3|44.4|55.5\n66.6|77.7|88.8|99.9|00.0|\n";

	FileInputSplit split = createTempFile(fileContent);

	TypeInformation[] fieldTypes = new TypeInformation[]{
		BasicTypeInfo.DOUBLE_TYPE_INFO,
		BasicTypeInfo.DOUBLE_TYPE_INFO,
		BasicTypeInfo.DOUBLE_TYPE_INFO,
		BasicTypeInfo.DOUBLE_TYPE_INFO,
		BasicTypeInfo.DOUBLE_TYPE_INFO};

	RowCsvInputFormat format = new RowCsvInputFormat(PATH, fieldTypes);
	format.setFieldDelimiter("|");
	format.configure(new Configuration());
	format.open(split);

	Row result = new Row(5);

	result = format.nextRecord(result);
	assertNotNull(result);
	assertEquals(11.1, result.getField(0));
	assertEquals(22.2, result.getField(1));
	assertEquals(33.3, result.getField(2));
	assertEquals(44.4, result.getField(3));
	assertEquals(55.5, result.getField(4));

	result = format.nextRecord(result);
	assertNotNull(result);
	assertEquals(66.6, result.getField(0));
	assertEquals(77.7, result.getField(1));
	assertEquals(88.8, result.getField(2));
	assertEquals(99.9, result.getField(3));
	assertEquals(0.0, result.getField(4));

	result = format.nextRecord(result);
	assertNull(result);
	assertTrue(format.reachedEnd());
}
 
Example 7
Source Project: flink   File: HCatInputFormatBase.java    License: Apache License 2.0 5 votes vote down vote up
private TypeInformation getFieldType(HCatFieldSchema fieldSchema) {

		switch(fieldSchema.getType()) {
			case INT:
				return BasicTypeInfo.INT_TYPE_INFO;
			case TINYINT:
				return BasicTypeInfo.BYTE_TYPE_INFO;
			case SMALLINT:
				return BasicTypeInfo.SHORT_TYPE_INFO;
			case BIGINT:
				return BasicTypeInfo.LONG_TYPE_INFO;
			case BOOLEAN:
				return BasicTypeInfo.BOOLEAN_TYPE_INFO;
			case FLOAT:
				return BasicTypeInfo.FLOAT_TYPE_INFO;
			case DOUBLE:
				return BasicTypeInfo.DOUBLE_TYPE_INFO;
			case STRING:
				return BasicTypeInfo.STRING_TYPE_INFO;
			case BINARY:
				return PrimitiveArrayTypeInfo.BYTE_PRIMITIVE_ARRAY_TYPE_INFO;
			case ARRAY:
				return new GenericTypeInfo(List.class);
			case MAP:
				return new GenericTypeInfo(Map.class);
			case STRUCT:
				return new GenericTypeInfo(List.class);
			default:
				throw new IllegalArgumentException("Unknown data type \"" + fieldSchema.getType() + "\" encountered.");
		}
	}
 
Example 8
Source Project: flink   File: RowCsvInputFormatTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testDoubleFields() throws Exception {
	String fileContent = "11.1|22.2|33.3|44.4|55.5\n66.6|77.7|88.8|99.9|00.0|\n";

	FileInputSplit split = createTempFile(fileContent);

	TypeInformation[] fieldTypes = new TypeInformation[]{
		BasicTypeInfo.DOUBLE_TYPE_INFO,
		BasicTypeInfo.DOUBLE_TYPE_INFO,
		BasicTypeInfo.DOUBLE_TYPE_INFO,
		BasicTypeInfo.DOUBLE_TYPE_INFO,
		BasicTypeInfo.DOUBLE_TYPE_INFO};

	RowCsvInputFormat format = new RowCsvInputFormat(PATH, fieldTypes);
	format.setFieldDelimiter("|");
	format.configure(new Configuration());
	format.open(split);

	Row result = new Row(5);

	result = format.nextRecord(result);
	assertNotNull(result);
	assertEquals(11.1, result.getField(0));
	assertEquals(22.2, result.getField(1));
	assertEquals(33.3, result.getField(2));
	assertEquals(44.4, result.getField(3));
	assertEquals(55.5, result.getField(4));

	result = format.nextRecord(result);
	assertNotNull(result);
	assertEquals(66.6, result.getField(0));
	assertEquals(77.7, result.getField(1));
	assertEquals(88.8, result.getField(2));
	assertEquals(99.9, result.getField(3));
	assertEquals(0.0, result.getField(4));

	result = format.nextRecord(result);
	assertNull(result);
	assertTrue(format.reachedEnd());
}
 
Example 9
Source Project: flink   File: RowCsvInputFormatTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void ignoreSingleCharPrefixComments() throws Exception {
	String fileContent =
		"#description of the data\n" +
			"#successive commented line\n" +
			"this is|1|2.0|\n" +
			"a test|3|4.0|\n" +
			"#next|5|6.0|\n";

	FileInputSplit split = createTempFile(fileContent);

	TypeInformation[] fieldTypes = new TypeInformation[]{
		BasicTypeInfo.STRING_TYPE_INFO,
		BasicTypeInfo.INT_TYPE_INFO,
		BasicTypeInfo.DOUBLE_TYPE_INFO};

	RowCsvInputFormat format = new RowCsvInputFormat(PATH, fieldTypes, "\n", "|");
	format.setCommentPrefix("#");
	format.configure(new Configuration());
	format.open(split);

	Row result = new Row(3);

	result = format.nextRecord(result);
	assertNotNull(result);
	assertEquals("this is", result.getField(0));
	assertEquals(1, result.getField(1));
	assertEquals(2.0, result.getField(2));

	result = format.nextRecord(result);
	assertNotNull(result);
	assertEquals("a test", result.getField(0));
	assertEquals(3, result.getField(1));
	assertEquals(4.0, result.getField(2));

	result = format.nextRecord(result);
	assertNull(result);
}
 
Example 10
Source Project: flink   File: RowCsvInputFormatTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void ignoreSingleCharPrefixComments() throws Exception {
	String fileContent =
		"#description of the data\n" +
			"#successive commented line\n" +
			"this is|1|2.0|\n" +
			"a test|3|4.0|\n" +
			"#next|5|6.0|\n";

	FileInputSplit split = createTempFile(fileContent);

	TypeInformation[] fieldTypes = new TypeInformation[]{
		BasicTypeInfo.STRING_TYPE_INFO,
		BasicTypeInfo.INT_TYPE_INFO,
		BasicTypeInfo.DOUBLE_TYPE_INFO};

	RowCsvInputFormat format = new RowCsvInputFormat(PATH, fieldTypes, "\n", "|");
	format.setCommentPrefix("#");
	format.configure(new Configuration());
	format.open(split);

	Row result = new Row(3);

	result = format.nextRecord(result);
	assertNotNull(result);
	assertEquals("this is", result.getField(0));
	assertEquals(1, result.getField(1));
	assertEquals(2.0, result.getField(2));

	result = format.nextRecord(result);
	assertNotNull(result);
	assertEquals("a test", result.getField(0));
	assertEquals(3, result.getField(1));
	assertEquals(4.0, result.getField(2));

	result = format.nextRecord(result);
	assertNull(result);
}
 
Example 11
Source Project: flink   File: ParquetTableSource.java    License: Apache License 2.0 4 votes vote down vote up
@Nullable
private Tuple2<Column, Comparable> extractColumnAndLiteral(BinaryComparison comp) {
	String columnName = getColumnName(comp);
	ColumnPath columnPath = ColumnPath.fromDotString(columnName);
	TypeInformation<?> typeInfo = null;
	try {
		Type type = parquetSchema.getType(columnPath.toArray());
		typeInfo = ParquetSchemaConverter.convertParquetTypeToTypeInfo(type);
	} catch (InvalidRecordException e) {
		LOG.error("Pushed predicate on undefined field name {} in schema", columnName);
		return null;
	}

	// fetch literal and ensure it is comparable
	Object value = getLiteral(comp);
	// validate that literal is comparable
	if (!(value instanceof Comparable)) {
		LOG.warn("Encountered a non-comparable literal of type {}." +
			"Cannot push predicate [{}] into ParquetTablesource." +
			"This is a bug and should be reported.", value.getClass().getCanonicalName(), comp);
		return null;
	}

	if (typeInfo == BasicTypeInfo.BYTE_TYPE_INFO ||
		typeInfo == BasicTypeInfo.SHORT_TYPE_INFO ||
		typeInfo == BasicTypeInfo.INT_TYPE_INFO) {
		return new Tuple2<>(FilterApi.intColumn(columnName), ((Number) value).intValue());
	} else if (typeInfo == BasicTypeInfo.LONG_TYPE_INFO) {
		return new Tuple2<>(FilterApi.longColumn(columnName), ((Number) value).longValue());
	} else if (typeInfo == BasicTypeInfo.FLOAT_TYPE_INFO) {
		return new Tuple2<>(FilterApi.floatColumn(columnName), ((Number) value).floatValue());
	} else if (typeInfo == BasicTypeInfo.BOOLEAN_TYPE_INFO) {
		return new Tuple2<>(FilterApi.booleanColumn(columnName), (Boolean) value);
	} else if (typeInfo == BasicTypeInfo.DOUBLE_TYPE_INFO) {
		return new Tuple2<>(FilterApi.doubleColumn(columnName), ((Number) value).doubleValue());
	} else if (typeInfo == BasicTypeInfo.STRING_TYPE_INFO) {
		return new Tuple2<>(FilterApi.binaryColumn(columnName), Binary.fromString((String) value));
	} else {
		// unsupported type
		return null;
	}
}
 
Example 12
Source Project: flink   File: MinWithRetractAggFunction.java    License: Apache License 2.0 4 votes vote down vote up
@Override
protected TypeInformation<Double> getValueTypeInfo() {
	return BasicTypeInfo.DOUBLE_TYPE_INFO;
}
 
Example 13
@Test
public void testEmptyFields() throws Exception {
	String fileContent =
		",,,,,,,,\n" +
			",,,,,,,\n" +
			",,,,,,,,\n" +
			",,,,,,,\n" +
			",,,,,,,,\n" +
			",,,,,,,,\n" +
			",,,,,,,\n" +
			",,,,,,,,\n";

	FileInputSplit split = createTempFile(fileContent);

	TypeInformation[] fieldTypes = new TypeInformation[]{
		BasicTypeInfo.BOOLEAN_TYPE_INFO,
		BasicTypeInfo.BYTE_TYPE_INFO,
		BasicTypeInfo.DOUBLE_TYPE_INFO,
		BasicTypeInfo.FLOAT_TYPE_INFO,
		BasicTypeInfo.INT_TYPE_INFO,
		BasicTypeInfo.LONG_TYPE_INFO,
		BasicTypeInfo.SHORT_TYPE_INFO,
		BasicTypeInfo.STRING_TYPE_INFO};

	RowCsvInputFormat format = new RowCsvInputFormat(PATH, fieldTypes, true);
	format.setFieldDelimiter(",");
	format.configure(new Configuration());
	format.open(split);

	Row result = new Row(8);
	int linesCnt = fileContent.split("\n").length;

	for (int i = 0; i < linesCnt; i++) {
		result = format.nextRecord(result);
		assertNull(result.getField(i));
	}

	// ensure no more rows
	assertNull(format.nextRecord(result));
	assertTrue(format.reachedEnd());
}
 
Example 14
@Test
@Ignore("Test disabled because we do not support double-quote escaped quotes right now.")
public void testParserCorrectness() throws Exception {
	// RFC 4180 Compliance Test content
	// Taken from http://en.wikipedia.org/wiki/Comma-separated_values#Example
	String fileContent = "Year,Make,Model,Description,Price\n" +
		"1997,Ford,E350,\"ac, abs, moon\",3000.00\n" +
		"1999,Chevy,\"Venture \"\"Extended Edition\"\"\",\"\",4900.00\n" +
		"1996,Jeep,Grand Cherokee,\"MUST SELL! air, moon roof, loaded\",4799.00\n" +
		"1999,Chevy,\"Venture \"\"Extended Edition, Very Large\"\"\",,5000.00\n" +
		",,\"Venture \"\"Extended Edition\"\"\",\"\",4900.00";

	FileInputSplit split = createTempFile(fileContent);

	TypeInformation[] fieldTypes = new TypeInformation[]{
		BasicTypeInfo.INT_TYPE_INFO,
		BasicTypeInfo.STRING_TYPE_INFO,
		BasicTypeInfo.STRING_TYPE_INFO,
		BasicTypeInfo.STRING_TYPE_INFO,
		BasicTypeInfo.DOUBLE_TYPE_INFO};

	RowCsvInputFormat format = new RowCsvInputFormat(PATH, fieldTypes);
	format.setSkipFirstLineAsHeader(true);
	format.setFieldDelimiter(",");
	format.configure(new Configuration());
	format.open(split);

	Row result = new Row(5);
	Row r1 = new Row(5);
	r1.setField(0, 1997);
	r1.setField(1, "Ford");
	r1.setField(2, "E350");
	r1.setField(3, "ac, abs, moon");
	r1.setField(4, 3000.0);

	Row r2 = new Row(5);
	r2.setField(0, 1999);
	r2.setField(1, "Chevy");
	r2.setField(2, "Venture \"Extended Edition\"");
	r2.setField(3, "");
	r2.setField(4, 4900.0);

	Row r3 = new Row(5);
	r3.setField(0, 1996);
	r3.setField(1, "Jeep");
	r3.setField(2, "Grand Cherokee");
	r3.setField(3, "MUST SELL! air, moon roof, loaded");
	r3.setField(4, 4799.0);

	Row r4 = new Row(5);
	r4.setField(0, 1999);
	r4.setField(1, "Chevy");
	r4.setField(2, "Venture \"Extended Edition, Very Large\"");
	r4.setField(3, "");
	r4.setField(4, 5000.0);

	Row r5 = new Row(5);
	r5.setField(0, 0);
	r5.setField(1, "");
	r5.setField(2, "Venture \"Extended Edition\"");
	r5.setField(3, "");
	r5.setField(4, 4900.0);

	Row[] expectedLines = new Row[]{r1, r2, r3, r4, r5};
	for (Row expected : expectedLines) {
		result = format.nextRecord(result);
		assertEquals(expected, result);
	}
	assertNull(format.nextRecord(result));
	assertTrue(format.reachedEnd());
}
 
Example 15
Source Project: flink   File: OrcBatchReader.java    License: Apache License 2.0 4 votes vote down vote up
/**
 * Converts an ORC schema to a Flink TypeInformation.
 *
 * @param schema The ORC schema.
 * @return The TypeInformation that corresponds to the ORC schema.
 */
static TypeInformation schemaToTypeInfo(TypeDescription schema) {
	switch (schema.getCategory()) {
		case BOOLEAN:
			return BasicTypeInfo.BOOLEAN_TYPE_INFO;
		case BYTE:
			return BasicTypeInfo.BYTE_TYPE_INFO;
		case SHORT:
			return BasicTypeInfo.SHORT_TYPE_INFO;
		case INT:
			return BasicTypeInfo.INT_TYPE_INFO;
		case LONG:
			return BasicTypeInfo.LONG_TYPE_INFO;
		case FLOAT:
			return BasicTypeInfo.FLOAT_TYPE_INFO;
		case DOUBLE:
			return BasicTypeInfo.DOUBLE_TYPE_INFO;
		case DECIMAL:
			return BasicTypeInfo.BIG_DEC_TYPE_INFO;
		case STRING:
		case CHAR:
		case VARCHAR:
			return BasicTypeInfo.STRING_TYPE_INFO;
		case DATE:
			return SqlTimeTypeInfo.DATE;
		case TIMESTAMP:
			return SqlTimeTypeInfo.TIMESTAMP;
		case BINARY:
			return PrimitiveArrayTypeInfo.BYTE_PRIMITIVE_ARRAY_TYPE_INFO;
		case STRUCT:
			List<TypeDescription> fieldSchemas = schema.getChildren();
			TypeInformation[] fieldTypes = new TypeInformation[fieldSchemas.size()];
			for (int i = 0; i < fieldSchemas.size(); i++) {
				fieldTypes[i] = schemaToTypeInfo(fieldSchemas.get(i));
			}
			String[] fieldNames = schema.getFieldNames().toArray(new String[]{});
			return new RowTypeInfo(fieldTypes, fieldNames);
		case LIST:
			TypeDescription elementSchema = schema.getChildren().get(0);
			TypeInformation<?> elementType = schemaToTypeInfo(elementSchema);
			// arrays of primitive types are handled as object arrays to support null values
			return ObjectArrayTypeInfo.getInfoFor(elementType);
		case MAP:
			TypeDescription keySchema = schema.getChildren().get(0);
			TypeDescription valSchema = schema.getChildren().get(1);
			TypeInformation<?> keyType = schemaToTypeInfo(keySchema);
			TypeInformation<?> valType = schemaToTypeInfo(valSchema);
			return new MapTypeInfo<>(keyType, valType);
		case UNION:
			throw new UnsupportedOperationException("UNION type is not supported yet.");
		default:
			throw new IllegalArgumentException("Unknown type " + schema);
	}
}
 
Example 16
@Override
public TypeInformation<Double> getProducedType() {
	return BasicTypeInfo.DOUBLE_TYPE_INFO;
}
 
Example 17
@Override
public TypeInformation<Double> getProducedType() {
	return BasicTypeInfo.DOUBLE_TYPE_INFO;
}
 
Example 18
Source Project: Quicksql   File: FlinkJdbcGenerator.java    License: MIT License 4 votes vote down vote up
/**
 * .
 */
public static TypeInformation<?> getTypeInformation(String type) {
    switch (type) {
        case "INT":
        case "TINYINT":
        case "SMALLINT":
        case "MEDIUMINT":
        case "BOOLEAN":
        case "INTEGER":
            return BasicTypeInfo.INT_TYPE_INFO;
        case "BIGINT":
        case "INT UNSIGNED":
            return BasicTypeInfo.LONG_TYPE_INFO;
        case "VARCHAR":
        case "TEXT":
        case "TIMESTAMP":
        case "DATETIME":
        case "LONGTEXT":
        case "VARCHAR2":
        case "STRING":
        case "CHAR":
            return BasicTypeInfo.STRING_TYPE_INFO;
        case "DOUBLE":
            return BasicTypeInfo.DOUBLE_TYPE_INFO;
        case "FLOAT":
            return BasicTypeInfo.FLOAT_TYPE_INFO;
        case "DATE":
        case "YEAR":
            return SqlTimeTypeInfo.DATE;
        case "BIGDECIMAL":
        case "DECIMAL":
            return BasicTypeInfo.BIG_DEC_TYPE_INFO;
        case "BIT":
            return BasicTypeInfo.BOOLEAN_TYPE_INFO;
        case "BLOB":
        case "LONGBLOB":
            return BasicTypeInfo.BYTE_TYPE_INFO;
        default:
            return BasicTypeInfo.STRING_TYPE_INFO;
    }
}
 
Example 19
Source Project: flink   File: MaxWithRetractAggFunction.java    License: Apache License 2.0 4 votes vote down vote up
@Override
protected TypeInformation<Double> getValueTypeInfo() {
	return BasicTypeInfo.DOUBLE_TYPE_INFO;
}
 
Example 20
Source Project: flink   File: RowCsvInputFormatTest.java    License: Apache License 2.0 4 votes vote down vote up
@Test
@Ignore("Test disabled because we do not support double-quote escaped quotes right now.")
public void testParserCorrectness() throws Exception {
	// RFC 4180 Compliance Test content
	// Taken from http://en.wikipedia.org/wiki/Comma-separated_values#Example
	String fileContent = "Year,Make,Model,Description,Price\n" +
		"1997,Ford,E350,\"ac, abs, moon\",3000.00\n" +
		"1999,Chevy,\"Venture \"\"Extended Edition\"\"\",\"\",4900.00\n" +
		"1996,Jeep,Grand Cherokee,\"MUST SELL! air, moon roof, loaded\",4799.00\n" +
		"1999,Chevy,\"Venture \"\"Extended Edition, Very Large\"\"\",,5000.00\n" +
		",,\"Venture \"\"Extended Edition\"\"\",\"\",4900.00";

	FileInputSplit split = createTempFile(fileContent);

	TypeInformation[] fieldTypes = new TypeInformation[]{
		BasicTypeInfo.INT_TYPE_INFO,
		BasicTypeInfo.STRING_TYPE_INFO,
		BasicTypeInfo.STRING_TYPE_INFO,
		BasicTypeInfo.STRING_TYPE_INFO,
		BasicTypeInfo.DOUBLE_TYPE_INFO};

	RowCsvInputFormat format = new RowCsvInputFormat(PATH, fieldTypes);
	format.setSkipFirstLineAsHeader(true);
	format.setFieldDelimiter(",");
	format.configure(new Configuration());
	format.open(split);

	Row result = new Row(5);
	Row r1 = new Row(5);
	r1.setField(0, 1997);
	r1.setField(1, "Ford");
	r1.setField(2, "E350");
	r1.setField(3, "ac, abs, moon");
	r1.setField(4, 3000.0);

	Row r2 = new Row(5);
	r2.setField(0, 1999);
	r2.setField(1, "Chevy");
	r2.setField(2, "Venture \"Extended Edition\"");
	r2.setField(3, "");
	r2.setField(4, 4900.0);

	Row r3 = new Row(5);
	r3.setField(0, 1996);
	r3.setField(1, "Jeep");
	r3.setField(2, "Grand Cherokee");
	r3.setField(3, "MUST SELL! air, moon roof, loaded");
	r3.setField(4, 4799.0);

	Row r4 = new Row(5);
	r4.setField(0, 1999);
	r4.setField(1, "Chevy");
	r4.setField(2, "Venture \"Extended Edition, Very Large\"");
	r4.setField(3, "");
	r4.setField(4, 5000.0);

	Row r5 = new Row(5);
	r5.setField(0, 0);
	r5.setField(1, "");
	r5.setField(2, "Venture \"Extended Edition\"");
	r5.setField(3, "");
	r5.setField(4, 4900.0);

	Row[] expectedLines = new Row[]{r1, r2, r3, r4, r5};
	for (Row expected : expectedLines) {
		result = format.nextRecord(result);
		assertEquals(expected, result);
	}
	assertNull(format.nextRecord(result));
	assertTrue(format.reachedEnd());
}