Java Code Examples for org.apache.flink.api.common.typeinfo.Types#LONG

The following examples show how to use org.apache.flink.api.common.typeinfo.Types#LONG . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: OneHotModelMapperTest.java    From Alink with Apache License 2.0 6 votes vote down vote up
@Test
public void testDropLast() throws Exception {
	TableSchema dataSchema = new TableSchema(
		new String[] {"docid", "word", "cnt"},
		new TypeInformation <?>[] {Types.STRING, Types.STRING, Types.LONG}
	);
	Params params = new Params()
		.set(OneHotPredictParams.ENCODE, HasEncodeWithoutWoe.Encode.VECTOR)
		.set(OneHotPredictParams.SELECTED_COLS, new String[]{"cnt", "word", "docid"})
		.set(OneHotPredictParams.DROP_LAST, true);

	OneHotModelMapper mapper = new OneHotModelMapper(modelSchema, dataSchema, params);
	mapper.loadModel(model);

	assertEquals(mapper.map(Row.of("doc0", "天", 4L)), Row.of(new SparseVector(4),
		new SparseVector(8, new int[]{5}, new double[]{1.0}),
		new SparseVector(6, new int[]{2}, new double[]{1.0})));
	assertEquals(mapper.map(nullElseRow), Row.of(
		new SparseVector(4, new int[]{2}, new double[]{1.0}),
		new SparseVector(8, new int[]{7}, new double[]{1.0}),
		new SparseVector(6, new int[]{2}, new double[]{1.0})));
}
 
Example 2
Source File: ProcTimeRangeBoundedPrecedingFunction.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public void open(Configuration parameters) throws Exception {
	function = genAggsHandler.newInstance(getRuntimeContext().getUserCodeClassLoader());
	function.open(new PerKeyStateDataViewStore(getRuntimeContext()));

	output = new JoinedRowData();

	// input element are all binary row as they are came from network
	RowDataTypeInfo inputType = new RowDataTypeInfo(inputFieldTypes);
	// we keep the elements received in a map state indexed based on their ingestion time
	ListTypeInfo<RowData> rowListTypeInfo = new ListTypeInfo<>(inputType);
	MapStateDescriptor<Long, List<RowData>> mapStateDescriptor = new MapStateDescriptor<>(
		"inputState", BasicTypeInfo.LONG_TYPE_INFO, rowListTypeInfo);
	inputState = getRuntimeContext().getMapState(mapStateDescriptor);

	RowDataTypeInfo accTypeInfo = new RowDataTypeInfo(accTypes);
	ValueStateDescriptor<RowData> stateDescriptor =
		new ValueStateDescriptor<RowData>("accState", accTypeInfo);
	accState = getRuntimeContext().getState(stateDescriptor);

	ValueStateDescriptor<Long> cleanupTsStateDescriptor = new ValueStateDescriptor<>(
		"cleanupTsState",
		Types.LONG
	);
	this.cleanupTsState = getRuntimeContext().getState(cleanupTsStateDescriptor);
}
 
Example 3
Source File: FirstValueWithRetractAggFunction.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public TypeInformation<GenericRowData> getAccumulatorType() {
	LogicalType[] fieldTypes = new LogicalType[] {
			fromTypeInfoToLogicalType(getResultType()),
			new BigIntType(),
			new TypeInformationRawType<>(new MapViewTypeInfo<>(getResultType(), new ListTypeInfo<>(Types.LONG), false, false)),
			new TypeInformationRawType<>(new MapViewTypeInfo<>(Types.LONG, new ListTypeInfo<>(getResultType()), false, false))
	};

	String[] fieldNames = new String[] {
			"firstValue",
			"firstOrder",
			"valueToOrderMapView",
			"orderToValueMapView"
	};

	return (TypeInformation) new RowDataTypeInfo(fieldTypes, fieldNames);
}
 
Example 4
Source File: ImputerMapperTest.java    From Alink with Apache License 2.0 6 votes vote down vote up
@Test
public void testMax() throws Exception {
    Row[] rows = new Row[]{
        Row.of(0L, "{\"selectedCols\":\"[\\\"f_double\\\",\\\"f_long\\\",\\\"f_int\\\"]\",\"strategy\":\"\\\"min\\\"\"}", null, null, null),
        Row.of(1048576L, "[2.0, 2.0, 2.0]", null, null, null)
    };

    List<Row> model = Arrays.asList(rows);

    TableSchema dataSchema = new TableSchema(
        new String[]{"f_string", "f_long", "f_int", "f_double", "f_boolean"},
        new TypeInformation<?>[]{Types.STRING, Types.LONG, Types.INT, Types.DOUBLE, Types.BOOLEAN}
    );
    Params params = new Params();

    ImputerModelMapper mapper = new ImputerModelMapper(modelSchema, dataSchema, params);
    mapper.loadModel(model);

    assertEquals(mapper.map(Row.of("a", null, null, null, true)).getField(1), 2L);
    assertEquals(mapper.map(Row.of("a", null, null, null, true)).getField(2), 2);
    assertEquals((double) mapper.map(Row.of("a", null, null, null, true)).getField(3), 2.0, 10e-4);
}
 
Example 5
Source File: ReplicateRows.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public TypeInformation<?>[] getParameterTypes(Class<?>[] signature) {
	TypeInformation[] paraTypes = new TypeInformation[1 + fieldTypes.length];
	paraTypes[0] = Types.LONG;
	System.arraycopy(fieldTypes, 0, paraTypes, 1, fieldTypes.length);
	return paraTypes;
}
 
Example 6
Source File: OrcTableSourceTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private TypeInformation[] getNestedFieldTypes() {
	return new TypeInformation[]{
		Types.BOOLEAN, Types.BYTE, Types.SHORT, Types.INT, Types.LONG, Types.FLOAT, Types.DOUBLE,
		PrimitiveArrayTypeInfo.BYTE_PRIMITIVE_ARRAY_TYPE_INFO, Types.STRING,
		Types.ROW_NAMED(
			new String[]{"list"},
			ObjectArrayTypeInfo.getInfoFor(
				Types.ROW_NAMED(
					new String[]{"int1", "string1"},
					Types.INT, Types.STRING
				)
			)
		),
		ObjectArrayTypeInfo.getInfoFor(
			Types.ROW_NAMED(
				new String[]{"int1", "string1"},
				Types.INT, Types.STRING
			)
		),
		new MapTypeInfo<>(
			Types.STRING,
			Types.ROW_NAMED(
				new String[]{"int1", "string1"},
				Types.INT, Types.STRING
			)
		)
	};
}
 
Example 7
Source File: JsonRowSerializationSchema.java    From flink with Apache License 2.0 5 votes vote down vote up
private Optional<SerializationRuntimeConverter> createConverterForSimpleType(TypeInformation<?> simpleTypeInfo) {
	if (simpleTypeInfo == Types.VOID) {
		return Optional.of((mapper, reuse, object) -> mapper.getNodeFactory().nullNode());
	} else if (simpleTypeInfo == Types.BOOLEAN) {
		return Optional.of((mapper, reuse, object) -> mapper.getNodeFactory().booleanNode((Boolean) object));
	} else if (simpleTypeInfo == Types.STRING) {
		return Optional.of((mapper, reuse, object) -> mapper.getNodeFactory().textNode((String) object));
	} else if (simpleTypeInfo == Types.INT) {
		return Optional.of((mapper, reuse, object) -> mapper.getNodeFactory().numberNode((Integer) object));
	} else if (simpleTypeInfo == Types.LONG) {
		return Optional.of((mapper, reuse, object) -> mapper.getNodeFactory().numberNode((Long) object));
	} else if (simpleTypeInfo == Types.DOUBLE) {
		return Optional.of((mapper, reuse, object) -> mapper.getNodeFactory().numberNode((Double) object));
	} else if (simpleTypeInfo == Types.FLOAT) {
		return Optional.of((mapper, reuse, object) -> mapper.getNodeFactory().numberNode((Float) object));
	} else if (simpleTypeInfo == Types.SHORT) {
		return Optional.of((mapper, reuse, object) -> mapper.getNodeFactory().numberNode((Short) object));
	} else if (simpleTypeInfo == Types.BYTE) {
		return Optional.of((mapper, reuse, object) -> mapper.getNodeFactory().numberNode((Byte) object));
	} else if (simpleTypeInfo == Types.BIG_DEC) {
		return Optional.of(createBigDecimalConverter());
	} else if (simpleTypeInfo == Types.BIG_INT) {
		return Optional.of(createBigIntegerConverter());
	} else if (simpleTypeInfo == Types.SQL_DATE) {
		return Optional.of(this::convertDate);
	} else if (simpleTypeInfo == Types.SQL_TIME) {
		return Optional.of(this::convertTime);
	} else if (simpleTypeInfo == Types.SQL_TIMESTAMP) {
		return Optional.of(this::convertTimestamp);
	} else if (simpleTypeInfo == Types.LOCAL_DATE) {
		return Optional.of(this::convertLocalDate);
	} else if (simpleTypeInfo == Types.LOCAL_TIME) {
		return Optional.of(this::convertLocalTime);
	} else if (simpleTypeInfo == Types.LOCAL_DATE_TIME) {
		return Optional.of(this::convertLocalDateTime);
	} else {
		return Optional.empty();
	}
}
 
Example 8
Source File: CountSlidingWindowAssigner.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public void open(InternalWindowProcessFunction.Context<?, CountWindow> ctx) throws Exception {
	String descriptorName = "slide-count-assigner";
	ValueStateDescriptor<Long> countDescriptor = new ValueStateDescriptor<>(
		descriptorName,
		Types.LONG);
	this.count = ctx.getPartitionedState(countDescriptor);
}
 
Example 9
Source File: IsotonicRegressionModelMapperTest.java    From Alink with Apache License 2.0 5 votes vote down vote up
@Test
public void testIsoReg() throws Exception {
	Row[] rows = new Row[] {
		Row.of(0L, "{\"vectorColName\":null,\"modelName\":\"\\\"IsotonicRegressionModel\\\"\","
			+ "\"featureColName\":\"\\\"feature\\\"\",\"featureIndex\":\"0\",\"modelSchema\":\"\\\"model_id "
			+ "bigint,"
			+ "model_info string\\\"\",\"isNewFormat\":\"true\"}"),
		Row.of(1048576L, "[0.02,0.1,0.2,0.27,0.3,0.35,0.45,0.5,0.7,0.8,0.9]"),
		Row.of(2097152L,
			"[0.0,0.3333333333333333,0.3333333333333333,0.5,0.5,0.6666666666666666,0.6666666666666666,0.75,0.75,"
				+ "1.0,1.0]")
	};
	List <Row> model = Arrays.asList(rows);
	TableSchema modelSchema = new TableSchema(new String[] {"model_id", "model_info"},
		new TypeInformation[] {Types.LONG, Types.STRING});

	TableSchema dataSchema = new TableSchema(new String[] {"feature"}, new TypeInformation <?>[] {Types.DOUBLE});

	Params params = new Params()
		.set(IsotonicRegPredictParams.PREDICTION_COL, "pred");

	IsotonicRegressionModelMapper mapper = new IsotonicRegressionModelMapper(modelSchema, dataSchema, params);
	mapper.loadModel(model);

	assertEquals(Double.parseDouble(mapper.map(Row.of(0.35)).getField(1).toString()), 0.66, 0.01);
	assertEquals(mapper.getOutputSchema(), new TableSchema(new String[] {"feature", "pred"},
		new TypeInformation <?>[] {Types.DOUBLE, Types.DOUBLE}));
}
 
Example 10
Source File: ReplicateRows.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public TypeInformation<?>[] getParameterTypes(Class<?>[] signature) {
	TypeInformation[] paraTypes = new TypeInformation[1 + fieldTypes.length];
	paraTypes[0] = Types.LONG;
	System.arraycopy(fieldTypes, 0, paraTypes, 1, fieldTypes.length);
	return paraTypes;
}
 
Example 11
Source File: StreamSortOperator.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public void initializeState(StateInitializationContext context) throws Exception {
	super.initializeState(context);
	TupleTypeInfo<Tuple2<RowData, Long>> tupleType = new TupleTypeInfo<>(inputRowType, Types.LONG);
	this.bufferState = context.getOperatorStateStore()
			.getListState(new ListStateDescriptor<>("localBufferState", tupleType));
}
 
Example 12
Source File: ProcTimeRowsBoundedPrecedingFunction.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public void open(Configuration parameters) throws Exception {
	function = genAggsHandler.newInstance(getRuntimeContext().getUserCodeClassLoader());
	function.open(new PerKeyStateDataViewStore(getRuntimeContext()));

	output = new JoinedRowData();

	// input element are all binary row as they are came from network
	RowDataTypeInfo inputType = new RowDataTypeInfo(inputFieldTypes);
	// We keep the elements received in a Map state keyed
	// by the ingestion time in the operator.
	// we also keep counter of processed elements
	// and timestamp of oldest element
	ListTypeInfo<RowData> rowListTypeInfo = new ListTypeInfo<RowData>(inputType);
	MapStateDescriptor<Long, List<RowData>> mapStateDescriptor = new MapStateDescriptor<Long, List<RowData>>(
		"inputState", BasicTypeInfo.LONG_TYPE_INFO, rowListTypeInfo);
	inputState = getRuntimeContext().getMapState(mapStateDescriptor);

	RowDataTypeInfo accTypeInfo = new RowDataTypeInfo(accTypes);
	ValueStateDescriptor<RowData> stateDescriptor =
		new ValueStateDescriptor<RowData>("accState", accTypeInfo);
	accState = getRuntimeContext().getState(stateDescriptor);

	ValueStateDescriptor<Long> processedCountDescriptor = new ValueStateDescriptor<Long>(
		"processedCountState",
		Types.LONG);
	counterState = getRuntimeContext().getState(processedCountDescriptor);

	ValueStateDescriptor<Long> smallestTimestampDescriptor = new ValueStateDescriptor<Long>(
		"smallestTSState",
		Types.LONG);
	smallestTsState = getRuntimeContext().getState(smallestTimestampDescriptor);

	initCleanupTimeState("ProcTimeBoundedRowsOverCleanupTime");
}
 
Example 13
Source File: TableUtilTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void formatTest() {
	TableSchema tableSchema = new TableSchema(new String[] {"f0", "f1", "F2", "f3"},
		new TypeInformation[] {Types.INT, Types.LONG, Types.STRING, Types.BOOLEAN});
	Row row = Row.of(1, 2L, "3", true);

	String format = TableUtil.format(tableSchema.getFieldNames(), Collections.singletonList(row));
	Assert.assertTrue(("f0|f1|F2|f3\r\n" + "--|--|--|--\n" + "1|2|3|true").equalsIgnoreCase(format));
}
 
Example 14
Source File: Kafka010ITCase.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Kafka 0.10 specific test, ensuring Timestamps are properly written to and read from Kafka.
 */
@Ignore("This test is disabled because of: https://issues.apache.org/jira/browse/FLINK-9217")
@Test(timeout = 60000)
public void testTimestamps() throws Exception {

	final String topic = "tstopic";
	createTestTopic(topic, 3, 1);

	// ---------- Produce an event time stream into Kafka -------------------

	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setParallelism(1);
	env.getConfig().setRestartStrategy(RestartStrategies.noRestart());
			env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);

	DataStream<Long> streamWithTimestamps = env.addSource(new SourceFunction<Long>() {
		private static final long serialVersionUID = -2255105836471289626L;
		boolean running = true;

		@Override
		public void run(SourceContext<Long> ctx) throws Exception {
			long i = 0;
			while (running) {
				ctx.collectWithTimestamp(i, i * 2);
				if (i++ == 1000L) {
					running = false;
				}
			}
		}

		@Override
		public void cancel() {
			running = false;
		}
	});

	final TypeInformationSerializationSchema<Long> longSer = new TypeInformationSerializationSchema<>(Types.LONG, env.getConfig());
	FlinkKafkaProducer010.FlinkKafkaProducer010Configuration prod = FlinkKafkaProducer010.writeToKafkaWithTimestamps(streamWithTimestamps, topic, new KeyedSerializationSchemaWrapper<>(longSer), standardProps, new FlinkKafkaPartitioner<Long>() {
		private static final long serialVersionUID = -6730989584364230617L;

		@Override
		public int partition(Long next, byte[] key, byte[] value, String targetTopic, int[] partitions) {
			return (int) (next % 3);
		}
	});
	prod.setParallelism(3);
	prod.setWriteTimestampToKafka(true);
	env.execute("Produce some");

	// ---------- Consume stream from Kafka -------------------

	env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setParallelism(1);
	env.getConfig().setRestartStrategy(RestartStrategies.noRestart());
			env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);

	FlinkKafkaConsumer010<Long> kafkaSource = new FlinkKafkaConsumer010<>(topic, new LimitedLongDeserializer(), standardProps);
	kafkaSource.assignTimestampsAndWatermarks(new AssignerWithPunctuatedWatermarks<Long>() {
		private static final long serialVersionUID = -4834111073247835189L;

		@Nullable
		@Override
		public Watermark checkAndGetNextWatermark(Long lastElement, long extractedTimestamp) {
			if (lastElement % 10 == 0) {
				return new Watermark(lastElement);
			}
			return null;
		}

		@Override
		public long extractTimestamp(Long element, long previousElementTimestamp) {
			return previousElementTimestamp;
		}
	});

	DataStream<Long> stream = env.addSource(kafkaSource);
	GenericTypeInfo<Object> objectTypeInfo = new GenericTypeInfo<>(Object.class);
	stream.transform("timestamp validating operator", objectTypeInfo, new TimestampValidatingOperator()).setParallelism(1);

	env.execute("Consume again");

	deleteTestTopic(topic);
}
 
Example 15
Source File: AvroSchemaConverter.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
private static TypeInformation<?> convertToTypeInfo(Schema schema) {
	switch (schema.getType()) {
		case RECORD:
			final List<Schema.Field> fields = schema.getFields();

			final TypeInformation<?>[] types = new TypeInformation<?>[fields.size()];
			final String[] names = new String[fields.size()];
			for (int i = 0; i < fields.size(); i++) {
				final Schema.Field field = fields.get(i);
				types[i] = convertToTypeInfo(field.schema());
				names[i] = field.name();
			}
			return Types.ROW_NAMED(names, types);
		case ENUM:
			return Types.STRING;
		case ARRAY:
			// result type might either be ObjectArrayTypeInfo or BasicArrayTypeInfo for Strings
			return Types.OBJECT_ARRAY(convertToTypeInfo(schema.getElementType()));
		case MAP:
			return Types.MAP(Types.STRING, convertToTypeInfo(schema.getValueType()));
		case UNION:
			final Schema actualSchema;
			if (schema.getTypes().size() == 2 && schema.getTypes().get(0).getType() == Schema.Type.NULL) {
				actualSchema = schema.getTypes().get(1);
			} else if (schema.getTypes().size() == 2 && schema.getTypes().get(1).getType() == Schema.Type.NULL) {
				actualSchema = schema.getTypes().get(0);
			} else if (schema.getTypes().size() == 1) {
				actualSchema = schema.getTypes().get(0);
			} else {
				// use Kryo for serialization
				return Types.GENERIC(Object.class);
			}
			return convertToTypeInfo(actualSchema);
		case FIXED:
			// logical decimal type
			if (schema.getLogicalType() instanceof LogicalTypes.Decimal) {
				return Types.BIG_DEC;
			}
			// convert fixed size binary data to primitive byte arrays
			return Types.PRIMITIVE_ARRAY(Types.BYTE);
		case STRING:
			// convert Avro's Utf8/CharSequence to String
			return Types.STRING;
		case BYTES:
			// logical decimal type
			if (schema.getLogicalType() instanceof LogicalTypes.Decimal) {
				return Types.BIG_DEC;
			}
			return Types.PRIMITIVE_ARRAY(Types.BYTE);
		case INT:
			// logical date and time type
			final LogicalType logicalType = schema.getLogicalType();
			if (logicalType == LogicalTypes.date()) {
				return Types.SQL_DATE;
			} else if (logicalType == LogicalTypes.timeMillis()) {
				return Types.SQL_TIME;
			}
			return Types.INT;
		case LONG:
			// logical timestamp type
			if (schema.getLogicalType() == LogicalTypes.timestampMillis()) {
				return Types.SQL_TIMESTAMP;
			}
			return Types.LONG;
		case FLOAT:
			return Types.FLOAT;
		case DOUBLE:
			return Types.DOUBLE;
		case BOOLEAN:
			return Types.BOOLEAN;
		case NULL:
			return Types.VOID;
	}
	throw new IllegalArgumentException("Unsupported Avro type '" + schema.getType() + "'.");
}
 
Example 16
Source File: KafkaITCase.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Kafka 20 specific test, ensuring Timestamps are properly written to and read from Kafka.
 */
@Test(timeout = 60000)
public void testTimestamps() throws Exception {

	final String topic = "tstopic";
	createTestTopic(topic, 3, 1);

	// ---------- Produce an event time stream into Kafka -------------------

	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setParallelism(1);
	env.getConfig().setRestartStrategy(RestartStrategies.noRestart());
			env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);

	DataStream<Long> streamWithTimestamps = env.addSource(new SourceFunction<Long>() {
		private static final long serialVersionUID = -2255115836471289626L;
		boolean running = true;

		@Override
		public void run(SourceContext<Long> ctx) throws Exception {
			long i = 0;
			while (running) {
				ctx.collectWithTimestamp(i, i * 2);
				if (i++ == 1110L) {
					running = false;
				}
			}
		}

		@Override
		public void cancel() {
			running = false;
		}
	});

	final TypeInformationSerializationSchema<Long> longSer = new TypeInformationSerializationSchema<>(Types.LONG, env.getConfig());
	FlinkKafkaProducer<Long> prod = new FlinkKafkaProducer<>(topic, new KeyedSerializationSchemaWrapper<>(longSer), standardProps, Optional.of(new FlinkKafkaPartitioner<Long>() {
		private static final long serialVersionUID = -6730989584364230617L;

		@Override
		public int partition(Long next, byte[] key, byte[] value, String targetTopic, int[] partitions) {
			return (int) (next % 3);
		}
	}));
	prod.setWriteTimestampToKafka(true);

	streamWithTimestamps.addSink(prod).setParallelism(3);

	env.execute("Produce some");

	// ---------- Consume stream from Kafka -------------------

	env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setParallelism(1);
	env.getConfig().setRestartStrategy(RestartStrategies.noRestart());
			env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);

	FlinkKafkaConsumer<Long> kafkaSource = new FlinkKafkaConsumer<>(topic, new KafkaITCase.LimitedLongDeserializer(), standardProps);
	kafkaSource.assignTimestampsAndWatermarks(new AssignerWithPunctuatedWatermarks<Long>() {
		private static final long serialVersionUID = -4834111173247835189L;

		@Nullable
		@Override
		public Watermark checkAndGetNextWatermark(Long lastElement, long extractedTimestamp) {
			if (lastElement % 11 == 0) {
				return new Watermark(lastElement);
			}
			return null;
		}

		@Override
		public long extractTimestamp(Long element, long previousElementTimestamp) {
			return previousElementTimestamp;
		}
	});

	DataStream<Long> stream = env.addSource(kafkaSource);
	GenericTypeInfo<Object> objectTypeInfo = new GenericTypeInfo<>(Object.class);
	stream.transform("timestamp validating operator", objectTypeInfo, new TimestampValidatingOperator()).setParallelism(1);

	env.execute("Consume again");

	deleteTestTopic(topic);
}
 
Example 17
Source File: KeyedProcessFunctionWithCleanupState.java    From flink with Apache License 2.0 4 votes vote down vote up
protected void initCleanupTimeState(String stateName) {
	if (stateCleaningEnabled) {
		ValueStateDescriptor<Long> inputCntDescriptor = new ValueStateDescriptor<>(stateName, Types.LONG);
		cleanupTimeState = getRuntimeContext().getState(inputCntDescriptor);
	}
}
 
Example 18
Source File: LastValueWithRetractAggFunction.java    From flink with Apache License 2.0 4 votes vote down vote up
@Override
public TypeInformation<Long> getResultType() {
	return Types.LONG;
}
 
Example 19
Source File: CsvParserTest.java    From Alink with Apache License 2.0 4 votes vote down vote up
@Test
public void testMalFormatString1() throws Exception {
    CsvParser parser = new CsvParser(new TypeInformation[]{Types.STRING, Types.LONG}, ",", '"');
    Assert.assertTrue(parser.parse("\"hello\" world,1").f0);
}
 
Example 20
Source File: KafkaITCase.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
/**
 * Kafka 20 specific test, ensuring Timestamps are properly written to and read from Kafka.
 */
@Test(timeout = 60000)
public void testTimestamps() throws Exception {

	final String topic = "tstopic";
	createTestTopic(topic, 3, 1);

	// ---------- Produce an event time stream into Kafka -------------------

	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setParallelism(1);
	env.getConfig().setRestartStrategy(RestartStrategies.noRestart());
	env.getConfig().disableSysoutLogging();
	env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);

	DataStream<Long> streamWithTimestamps = env.addSource(new SourceFunction<Long>() {
		private static final long serialVersionUID = -2255115836471289626L;
		boolean running = true;

		@Override
		public void run(SourceContext<Long> ctx) throws Exception {
			long i = 0;
			while (running) {
				ctx.collectWithTimestamp(i, i * 2);
				if (i++ == 1110L) {
					running = false;
				}
			}
		}

		@Override
		public void cancel() {
			running = false;
		}
	});

	final TypeInformationSerializationSchema<Long> longSer = new TypeInformationSerializationSchema<>(Types.LONG, env.getConfig());
	FlinkKafkaProducer<Long> prod = new FlinkKafkaProducer<>(topic, new KeyedSerializationSchemaWrapper<>(longSer), standardProps, Optional.of(new FlinkKafkaPartitioner<Long>() {
		private static final long serialVersionUID = -6730989584364230617L;

		@Override
		public int partition(Long next, byte[] key, byte[] value, String targetTopic, int[] partitions) {
			return (int) (next % 3);
		}
	}));
	prod.setWriteTimestampToKafka(true);

	streamWithTimestamps.addSink(prod).setParallelism(3);

	env.execute("Produce some");

	// ---------- Consume stream from Kafka -------------------

	env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setParallelism(1);
	env.getConfig().setRestartStrategy(RestartStrategies.noRestart());
	env.getConfig().disableSysoutLogging();
	env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);

	FlinkKafkaConsumer<Long> kafkaSource = new FlinkKafkaConsumer<>(topic, new KafkaITCase.LimitedLongDeserializer(), standardProps);
	kafkaSource.assignTimestampsAndWatermarks(new AssignerWithPunctuatedWatermarks<Long>() {
		private static final long serialVersionUID = -4834111173247835189L;

		@Nullable
		@Override
		public Watermark checkAndGetNextWatermark(Long lastElement, long extractedTimestamp) {
			if (lastElement % 11 == 0) {
				return new Watermark(lastElement);
			}
			return null;
		}

		@Override
		public long extractTimestamp(Long element, long previousElementTimestamp) {
			return previousElementTimestamp;
		}
	});

	DataStream<Long> stream = env.addSource(kafkaSource);
	GenericTypeInfo<Object> objectTypeInfo = new GenericTypeInfo<>(Object.class);
	stream.transform("timestamp validating operator", objectTypeInfo, new TimestampValidatingOperator()).setParallelism(1);

	env.execute("Consume again");

	deleteTestTopic(topic);
}