org.apache.flink.graph.utils.Tuple3ToEdgeMap Java Examples

The following examples show how to use org.apache.flink.graph.utils.Tuple3ToEdgeMap. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: GSASingleSourceShortestPaths.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
private static DataSet<Edge<Long, Double>> getEdgeDataSet(ExecutionEnvironment env) {
	if (fileOutput) {
		return env.readCsvFile(edgesInputPath)
				.fieldDelimiter("\t")
				.lineDelimiter("\n")
				.types(Long.class, Long.class, Double.class)
				.map(new Tuple3ToEdgeMap<>());
	} else {
		return SingleSourceShortestPathsData.getDefaultEdgeDataSet(env);
	}
}
 
Example #2
Source File: PregelSSSP.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
private static DataSet<Edge<Long, Double>> getEdgesDataSet(ExecutionEnvironment env) {
	if (fileOutput) {
		return env.readCsvFile(edgesInputPath)
				.lineDelimiter("\n")
				.fieldDelimiter("\t")
				.ignoreComments("%")
				.types(Long.class, Long.class, Double.class)
				.map(new Tuple3ToEdgeMap<>());
	} else {
		return SingleSourceShortestPathsData.getDefaultEdgeDataSet(env);
	}
}
 
Example #3
Source File: SingleSourceShortestPaths.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
private static DataSet<Edge<Long, Double>> getEdgesDataSet(ExecutionEnvironment env) {
	if (fileOutput) {
		return env.readCsvFile(edgesInputPath)
				.lineDelimiter("\n")
				.fieldDelimiter("\t")
				.types(Long.class, Long.class, Double.class)
				.map(new Tuple3ToEdgeMap<>());
	} else {
		return SingleSourceShortestPathsData.getDefaultEdgeDataSet(env);
	}
}
 
Example #4
Source File: PregelSSSP.java    From flink with Apache License 2.0 5 votes vote down vote up
private static DataSet<Edge<Long, Double>> getEdgesDataSet(ExecutionEnvironment env) {
	if (fileOutput) {
		return env.readCsvFile(edgesInputPath)
				.lineDelimiter("\n")
				.fieldDelimiter("\t")
				.ignoreComments("%")
				.types(Long.class, Long.class, Double.class)
				.map(new Tuple3ToEdgeMap<>());
	} else {
		return SingleSourceShortestPathsData.getDefaultEdgeDataSet(env);
	}
}
 
Example #5
Source File: GSASingleSourceShortestPaths.java    From flink with Apache License 2.0 5 votes vote down vote up
private static DataSet<Edge<Long, Double>> getEdgeDataSet(ExecutionEnvironment env) {
	if (fileOutput) {
		return env.readCsvFile(edgesInputPath)
				.fieldDelimiter("\t")
				.lineDelimiter("\n")
				.types(Long.class, Long.class, Double.class)
				.map(new Tuple3ToEdgeMap<>());
	} else {
		return SingleSourceShortestPathsData.getDefaultEdgeDataSet(env);
	}
}
 
Example #6
Source File: SingleSourceShortestPaths.java    From flink with Apache License 2.0 5 votes vote down vote up
private static DataSet<Edge<Long, Double>> getEdgesDataSet(ExecutionEnvironment env) {
	if (fileOutput) {
		return env.readCsvFile(edgesInputPath)
				.lineDelimiter("\n")
				.fieldDelimiter("\t")
				.types(Long.class, Long.class, Double.class)
				.map(new Tuple3ToEdgeMap<>());
	} else {
		return SingleSourceShortestPathsData.getDefaultEdgeDataSet(env);
	}
}
 
Example #7
Source File: SingleSourceShortestPaths.java    From flink with Apache License 2.0 5 votes vote down vote up
private static DataSet<Edge<Long, Double>> getEdgesDataSet(ExecutionEnvironment env) {
	if (fileOutput) {
		return env.readCsvFile(edgesInputPath)
				.lineDelimiter("\n")
				.fieldDelimiter("\t")
				.types(Long.class, Long.class, Double.class)
				.map(new Tuple3ToEdgeMap<>());
	} else {
		return SingleSourceShortestPathsData.getDefaultEdgeDataSet(env);
	}
}
 
Example #8
Source File: GSASingleSourceShortestPaths.java    From flink with Apache License 2.0 5 votes vote down vote up
private static DataSet<Edge<Long, Double>> getEdgeDataSet(ExecutionEnvironment env) {
	if (fileOutput) {
		return env.readCsvFile(edgesInputPath)
				.fieldDelimiter("\t")
				.lineDelimiter("\n")
				.types(Long.class, Long.class, Double.class)
				.map(new Tuple3ToEdgeMap<>());
	} else {
		return SingleSourceShortestPathsData.getDefaultEdgeDataSet(env);
	}
}
 
Example #9
Source File: PregelSSSP.java    From flink with Apache License 2.0 5 votes vote down vote up
private static DataSet<Edge<Long, Double>> getEdgesDataSet(ExecutionEnvironment env) {
	if (fileOutput) {
		return env.readCsvFile(edgesInputPath)
				.lineDelimiter("\n")
				.fieldDelimiter("\t")
				.ignoreComments("%")
				.types(Long.class, Long.class, Double.class)
				.map(new Tuple3ToEdgeMap<>());
	} else {
		return SingleSourceShortestPathsData.getDefaultEdgeDataSet(env);
	}
}
 
Example #10
Source File: GSACompilerTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testGSACompiler() {
	ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
	env.setParallelism(DEFAULT_PARALLELISM);

	// compose test program

	DataSet<Edge<Long, NullValue>> edges = env.fromElements(new Tuple3<>(
		1L, 2L, NullValue.getInstance())).map(new Tuple3ToEdgeMap<>());

	Graph<Long, Long, NullValue> graph = Graph.fromDataSet(edges, new InitVertices(), env);

	DataSet<Vertex<Long, Long>> result = graph.runGatherSumApplyIteration(
		new GatherNeighborIds(), new SelectMinId(),
		new UpdateComponentId(), 100).getVertices();

	result.output(new DiscardingOutputFormat<>());

	Plan p = env.createProgramPlan("GSA Connected Components");
	OptimizedPlan op = compileNoStats(p);

	// check the sink
	SinkPlanNode sink = op.getDataSinks().iterator().next();
	assertEquals(ShipStrategyType.FORWARD, sink.getInput().getShipStrategy());
	assertEquals(DEFAULT_PARALLELISM, sink.getParallelism());
	assertEquals(PartitioningProperty.HASH_PARTITIONED, sink.getGlobalProperties().getPartitioning());

	// check the iteration
	WorksetIterationPlanNode iteration = (WorksetIterationPlanNode) sink.getInput().getSource();
	assertEquals(DEFAULT_PARALLELISM, iteration.getParallelism());

	// check the solution set join and the delta
	PlanNode ssDelta = iteration.getSolutionSetDeltaPlanNode();
	assertTrue(ssDelta instanceof DualInputPlanNode); // this is only true if the update function preserves the partitioning

	DualInputPlanNode ssJoin = (DualInputPlanNode) ssDelta;
	assertEquals(DEFAULT_PARALLELISM, ssJoin.getParallelism());
	assertEquals(ShipStrategyType.PARTITION_HASH, ssJoin.getInput1().getShipStrategy());
	assertEquals(new FieldList(0), ssJoin.getInput1().getShipStrategyKeys());

	// check the workset set join
	SingleInputPlanNode sumReducer = (SingleInputPlanNode) ssJoin.getInput1().getSource();
	SingleInputPlanNode gatherMapper = (SingleInputPlanNode) sumReducer.getInput().getSource();
	DualInputPlanNode edgeJoin = (DualInputPlanNode) gatherMapper.getInput().getSource();
	assertEquals(DEFAULT_PARALLELISM, edgeJoin.getParallelism());
	// input1 is the workset
	assertEquals(ShipStrategyType.FORWARD, edgeJoin.getInput1().getShipStrategy());
	// input2 is the edges
	assertEquals(ShipStrategyType.PARTITION_HASH, edgeJoin.getInput2().getShipStrategy());
	assertTrue(edgeJoin.getInput2().getTempMode().isCached());

	assertEquals(new FieldList(0), edgeJoin.getInput2().getShipStrategyKeys());
}
 
Example #11
Source File: GSATranslationTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testTranslation() {
	ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();

	DataSet<Long> bcGather = env.fromElements(1L);
	DataSet<Long> bcSum = env.fromElements(1L);
	DataSet<Long> bcApply = env.fromElements(1L);

	DataSet<Vertex<Long, Long>> result;

	// ------------ construct the test program ------------------

	DataSet<Edge<Long, NullValue>> edges = env.fromElements(new Tuple3<>(
		1L, 2L, NullValue.getInstance())).map(new Tuple3ToEdgeMap<>());

	Graph<Long, Long, NullValue> graph = Graph.fromDataSet(edges, new InitVertices(), env);

	GSAConfiguration parameters = new GSAConfiguration();

	parameters.registerAggregator(AGGREGATOR_NAME, new LongSumAggregator());
	parameters.setName(ITERATION_NAME);
	parameters.setParallelism(ITERATION_parallelism);
	parameters.addBroadcastSetForGatherFunction(BC_SET_GATHER_NAME, bcGather);
	parameters.addBroadcastSetForSumFunction(BC_SET_SUM_NAME, bcSum);
	parameters.addBroadcastSetForApplyFunction(BC_SET_APLLY_NAME, bcApply);

	result = graph.runGatherSumApplyIteration(
		new GatherNeighborIds(), new SelectMinId(),
		new UpdateComponentId(), NUM_ITERATIONS, parameters).getVertices();

	result.output(new DiscardingOutputFormat<>());

	// ------------- validate the java program ----------------

	assertTrue(result instanceof DeltaIterationResultSet);

	DeltaIterationResultSet<?, ?> resultSet = (DeltaIterationResultSet<?, ?>) result;
	DeltaIteration<?, ?> iteration = resultSet.getIterationHead();

	// check the basic iteration properties
	assertEquals(NUM_ITERATIONS, resultSet.getMaxIterations());
	assertArrayEquals(new int[]{0}, resultSet.getKeyPositions());
	assertEquals(ITERATION_parallelism, iteration.getParallelism());
	assertEquals(ITERATION_NAME, iteration.getName());

	assertEquals(AGGREGATOR_NAME, iteration.getAggregators().getAllRegisteredAggregators().iterator().next().getName());

	// validate that the semantic properties are set as they should
	TwoInputUdfOperator<?, ?, ?, ?> solutionSetJoin = (TwoInputUdfOperator<?, ?, ?, ?>) resultSet.getNextWorkset();
	assertTrue(solutionSetJoin.getSemanticProperties().getForwardingTargetFields(0, 0).contains(0));
	assertTrue(solutionSetJoin.getSemanticProperties().getForwardingTargetFields(1, 0).contains(0));

	SingleInputUdfOperator<?, ?, ?> sumReduce = (SingleInputUdfOperator<?, ?, ?>) solutionSetJoin.getInput1();
	SingleInputUdfOperator<?, ?, ?> gatherMap = (SingleInputUdfOperator<?, ?, ?>) sumReduce.getInput();

	// validate that the broadcast sets are forwarded
	assertEquals(bcGather, gatherMap.getBroadcastSets().get(BC_SET_GATHER_NAME));
	assertEquals(bcSum, sumReduce.getBroadcastSets().get(BC_SET_SUM_NAME));
	assertEquals(bcApply, solutionSetJoin.getBroadcastSets().get(BC_SET_APLLY_NAME));
}
 
Example #12
Source File: GSACompilerTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testGSACompiler() {
	ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
	env.setParallelism(DEFAULT_PARALLELISM);

	// compose test program

	DataSet<Edge<Long, NullValue>> edges = env.fromElements(new Tuple3<>(
		1L, 2L, NullValue.getInstance())).map(new Tuple3ToEdgeMap<>());

	Graph<Long, Long, NullValue> graph = Graph.fromDataSet(edges, new InitVertices(), env);

	DataSet<Vertex<Long, Long>> result = graph.runGatherSumApplyIteration(
		new GatherNeighborIds(), new SelectMinId(),
		new UpdateComponentId(), 100).getVertices();

	result.output(new DiscardingOutputFormat<>());

	Plan p = env.createProgramPlan("GSA Connected Components");
	OptimizedPlan op = compileNoStats(p);

	// check the sink
	SinkPlanNode sink = op.getDataSinks().iterator().next();
	assertEquals(ShipStrategyType.FORWARD, sink.getInput().getShipStrategy());
	assertEquals(DEFAULT_PARALLELISM, sink.getParallelism());
	assertEquals(PartitioningProperty.HASH_PARTITIONED, sink.getGlobalProperties().getPartitioning());

	// check the iteration
	WorksetIterationPlanNode iteration = (WorksetIterationPlanNode) sink.getInput().getSource();
	assertEquals(DEFAULT_PARALLELISM, iteration.getParallelism());

	// check the solution set join and the delta
	PlanNode ssDelta = iteration.getSolutionSetDeltaPlanNode();
	assertTrue(ssDelta instanceof DualInputPlanNode); // this is only true if the update function preserves the partitioning

	DualInputPlanNode ssJoin = (DualInputPlanNode) ssDelta;
	assertEquals(DEFAULT_PARALLELISM, ssJoin.getParallelism());
	assertEquals(ShipStrategyType.PARTITION_HASH, ssJoin.getInput1().getShipStrategy());
	assertEquals(new FieldList(0), ssJoin.getInput1().getShipStrategyKeys());

	// check the workset set join
	SingleInputPlanNode sumReducer = (SingleInputPlanNode) ssJoin.getInput1().getSource();
	SingleInputPlanNode gatherMapper = (SingleInputPlanNode) sumReducer.getInput().getSource();
	DualInputPlanNode edgeJoin = (DualInputPlanNode) gatherMapper.getInput().getSource();
	assertEquals(DEFAULT_PARALLELISM, edgeJoin.getParallelism());
	// input1 is the workset
	assertEquals(ShipStrategyType.FORWARD, edgeJoin.getInput1().getShipStrategy());
	// input2 is the edges
	assertEquals(ShipStrategyType.PARTITION_HASH, edgeJoin.getInput2().getShipStrategy());
	assertTrue(edgeJoin.getInput2().getTempMode().isCached());

	assertEquals(new FieldList(0), edgeJoin.getInput2().getShipStrategyKeys());
}
 
Example #13
Source File: GSATranslationTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testTranslation() {
	ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();

	DataSet<Long> bcGather = env.fromElements(1L);
	DataSet<Long> bcSum = env.fromElements(1L);
	DataSet<Long> bcApply = env.fromElements(1L);

	DataSet<Vertex<Long, Long>> result;

	// ------------ construct the test program ------------------

	DataSet<Edge<Long, NullValue>> edges = env.fromElements(new Tuple3<>(
		1L, 2L, NullValue.getInstance())).map(new Tuple3ToEdgeMap<>());

	Graph<Long, Long, NullValue> graph = Graph.fromDataSet(edges, new InitVertices(), env);

	GSAConfiguration parameters = new GSAConfiguration();

	parameters.registerAggregator(AGGREGATOR_NAME, new LongSumAggregator());
	parameters.setName(ITERATION_NAME);
	parameters.setParallelism(ITERATION_parallelism);
	parameters.addBroadcastSetForGatherFunction(BC_SET_GATHER_NAME, bcGather);
	parameters.addBroadcastSetForSumFunction(BC_SET_SUM_NAME, bcSum);
	parameters.addBroadcastSetForApplyFunction(BC_SET_APLLY_NAME, bcApply);

	result = graph.runGatherSumApplyIteration(
		new GatherNeighborIds(), new SelectMinId(),
		new UpdateComponentId(), NUM_ITERATIONS, parameters).getVertices();

	result.output(new DiscardingOutputFormat<>());

	// ------------- validate the java program ----------------

	assertTrue(result instanceof DeltaIterationResultSet);

	DeltaIterationResultSet<?, ?> resultSet = (DeltaIterationResultSet<?, ?>) result;
	DeltaIteration<?, ?> iteration = resultSet.getIterationHead();

	// check the basic iteration properties
	assertEquals(NUM_ITERATIONS, resultSet.getMaxIterations());
	assertArrayEquals(new int[]{0}, resultSet.getKeyPositions());
	assertEquals(ITERATION_parallelism, iteration.getParallelism());
	assertEquals(ITERATION_NAME, iteration.getName());

	assertEquals(AGGREGATOR_NAME, iteration.getAggregators().getAllRegisteredAggregators().iterator().next().getName());

	// validate that the semantic properties are set as they should
	TwoInputUdfOperator<?, ?, ?, ?> solutionSetJoin = (TwoInputUdfOperator<?, ?, ?, ?>) resultSet.getNextWorkset();
	assertTrue(solutionSetJoin.getSemanticProperties().getForwardingTargetFields(0, 0).contains(0));
	assertTrue(solutionSetJoin.getSemanticProperties().getForwardingTargetFields(1, 0).contains(0));

	SingleInputUdfOperator<?, ?, ?> sumReduce = (SingleInputUdfOperator<?, ?, ?>) solutionSetJoin.getInput1();
	SingleInputUdfOperator<?, ?, ?> gatherMap = (SingleInputUdfOperator<?, ?, ?>) sumReduce.getInput();

	// validate that the broadcast sets are forwarded
	assertEquals(bcGather, gatherMap.getBroadcastSets().get(BC_SET_GATHER_NAME));
	assertEquals(bcSum, sumReduce.getBroadcastSets().get(BC_SET_SUM_NAME));
	assertEquals(bcApply, solutionSetJoin.getBroadcastSets().get(BC_SET_APLLY_NAME));
}
 
Example #14
Source File: GSACompilerTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Test
public void testGSACompiler() {
	ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
	env.setParallelism(DEFAULT_PARALLELISM);

	// compose test program

	DataSet<Edge<Long, NullValue>> edges = env.fromElements(new Tuple3<>(
		1L, 2L, NullValue.getInstance())).map(new Tuple3ToEdgeMap<>());

	Graph<Long, Long, NullValue> graph = Graph.fromDataSet(edges, new InitVertices(), env);

	DataSet<Vertex<Long, Long>> result = graph.runGatherSumApplyIteration(
		new GatherNeighborIds(), new SelectMinId(),
		new UpdateComponentId(), 100).getVertices();

	result.output(new DiscardingOutputFormat<>());

	Plan p = env.createProgramPlan("GSA Connected Components");
	OptimizedPlan op = compileNoStats(p);

	// check the sink
	SinkPlanNode sink = op.getDataSinks().iterator().next();
	assertEquals(ShipStrategyType.FORWARD, sink.getInput().getShipStrategy());
	assertEquals(DEFAULT_PARALLELISM, sink.getParallelism());
	assertEquals(PartitioningProperty.HASH_PARTITIONED, sink.getGlobalProperties().getPartitioning());

	// check the iteration
	WorksetIterationPlanNode iteration = (WorksetIterationPlanNode) sink.getInput().getSource();
	assertEquals(DEFAULT_PARALLELISM, iteration.getParallelism());

	// check the solution set join and the delta
	PlanNode ssDelta = iteration.getSolutionSetDeltaPlanNode();
	assertTrue(ssDelta instanceof DualInputPlanNode); // this is only true if the update function preserves the partitioning

	DualInputPlanNode ssJoin = (DualInputPlanNode) ssDelta;
	assertEquals(DEFAULT_PARALLELISM, ssJoin.getParallelism());
	assertEquals(ShipStrategyType.PARTITION_HASH, ssJoin.getInput1().getShipStrategy());
	assertEquals(new FieldList(0), ssJoin.getInput1().getShipStrategyKeys());

	// check the workset set join
	SingleInputPlanNode sumReducer = (SingleInputPlanNode) ssJoin.getInput1().getSource();
	SingleInputPlanNode gatherMapper = (SingleInputPlanNode) sumReducer.getInput().getSource();
	DualInputPlanNode edgeJoin = (DualInputPlanNode) gatherMapper.getInput().getSource();
	assertEquals(DEFAULT_PARALLELISM, edgeJoin.getParallelism());
	// input1 is the workset
	assertEquals(ShipStrategyType.FORWARD, edgeJoin.getInput1().getShipStrategy());
	// input2 is the edges
	assertEquals(ShipStrategyType.PARTITION_HASH, edgeJoin.getInput2().getShipStrategy());
	assertTrue(edgeJoin.getInput2().getTempMode().isCached());

	assertEquals(new FieldList(0), edgeJoin.getInput2().getShipStrategyKeys());
}
 
Example #15
Source File: GSATranslationTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Test
public void testTranslation() {
	ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();

	DataSet<Long> bcGather = env.fromElements(1L);
	DataSet<Long> bcSum = env.fromElements(1L);
	DataSet<Long> bcApply = env.fromElements(1L);

	DataSet<Vertex<Long, Long>> result;

	// ------------ construct the test program ------------------

	DataSet<Edge<Long, NullValue>> edges = env.fromElements(new Tuple3<>(
		1L, 2L, NullValue.getInstance())).map(new Tuple3ToEdgeMap<>());

	Graph<Long, Long, NullValue> graph = Graph.fromDataSet(edges, new InitVertices(), env);

	GSAConfiguration parameters = new GSAConfiguration();

	parameters.registerAggregator(AGGREGATOR_NAME, new LongSumAggregator());
	parameters.setName(ITERATION_NAME);
	parameters.setParallelism(ITERATION_parallelism);
	parameters.addBroadcastSetForGatherFunction(BC_SET_GATHER_NAME, bcGather);
	parameters.addBroadcastSetForSumFunction(BC_SET_SUM_NAME, bcSum);
	parameters.addBroadcastSetForApplyFunction(BC_SET_APLLY_NAME, bcApply);

	result = graph.runGatherSumApplyIteration(
		new GatherNeighborIds(), new SelectMinId(),
		new UpdateComponentId(), NUM_ITERATIONS, parameters).getVertices();

	result.output(new DiscardingOutputFormat<>());

	// ------------- validate the java program ----------------

	assertTrue(result instanceof DeltaIterationResultSet);

	DeltaIterationResultSet<?, ?> resultSet = (DeltaIterationResultSet<?, ?>) result;
	DeltaIteration<?, ?> iteration = resultSet.getIterationHead();

	// check the basic iteration properties
	assertEquals(NUM_ITERATIONS, resultSet.getMaxIterations());
	assertArrayEquals(new int[]{0}, resultSet.getKeyPositions());
	assertEquals(ITERATION_parallelism, iteration.getParallelism());
	assertEquals(ITERATION_NAME, iteration.getName());

	assertEquals(AGGREGATOR_NAME, iteration.getAggregators().getAllRegisteredAggregators().iterator().next().getName());

	// validate that the semantic properties are set as they should
	TwoInputUdfOperator<?, ?, ?, ?> solutionSetJoin = (TwoInputUdfOperator<?, ?, ?, ?>) resultSet.getNextWorkset();
	assertTrue(solutionSetJoin.getSemanticProperties().getForwardingTargetFields(0, 0).contains(0));
	assertTrue(solutionSetJoin.getSemanticProperties().getForwardingTargetFields(1, 0).contains(0));

	SingleInputUdfOperator<?, ?, ?> sumReduce = (SingleInputUdfOperator<?, ?, ?>) solutionSetJoin.getInput1();
	SingleInputUdfOperator<?, ?, ?> gatherMap = (SingleInputUdfOperator<?, ?, ?>) sumReduce.getInput();

	// validate that the broadcast sets are forwarded
	assertEquals(bcGather, gatherMap.getBroadcastSets().get(BC_SET_GATHER_NAME));
	assertEquals(bcSum, sumReduce.getBroadcastSets().get(BC_SET_SUM_NAME));
	assertEquals(bcApply, solutionSetJoin.getBroadcastSets().get(BC_SET_APLLY_NAME));
}
 
Example #16
Source File: Graph.java    From flink with Apache License 2.0 3 votes vote down vote up
/**
 * Creates a graph from a DataSet of Tuple3 objects for edges.
 *
 * <p>Each Tuple3 will become one Edge, where the source ID will be the first field of the Tuple2,
 * the target ID will be the second field of the Tuple2
 * and the Edge value will be the third field of the Tuple3.
 *
 * <p>Vertices are created automatically and their values are initialized
 * by applying the provided vertexValueInitializer map function to the vertex IDs.
 *
 * @param edges a DataSet of Tuple3.
 * @param vertexValueInitializer the mapper function that initializes the vertex values.
 * It allows to apply a map transformation on the vertex ID to produce an initial vertex value.
 * @param context the flink execution environment.
 * @return the newly created graph.
 */
public static <K, VV, EV> Graph<K, VV, EV> fromTupleDataSet(DataSet<Tuple3<K, K, EV>> edges,
		final MapFunction<K, VV> vertexValueInitializer, ExecutionEnvironment context) {

	DataSet<Edge<K, EV>> edgeDataSet = edges
		.map(new Tuple3ToEdgeMap<>())
			.name("Type conversion");

	return fromDataSet(edgeDataSet, vertexValueInitializer, context);
}
 
Example #17
Source File: Graph.java    From flink with Apache License 2.0 3 votes vote down vote up
/**
 * Creates a graph from a DataSet of Tuple3 objects for edges.
 *
 * <p>The first field of the Tuple3 object will become the source ID,
 * the second field will become the target ID, and the third field will become
 * the edge value.
 *
 * <p>Vertices are created automatically and their values are set to NullValue.
 *
 * @param edges a DataSet of Tuple3 representing the edges.
 * @param context the flink execution environment.
 * @return the newly created graph.
 */
public static <K, EV> Graph<K, NullValue, EV> fromTupleDataSet(DataSet<Tuple3<K, K, EV>> edges,
		ExecutionEnvironment context) {

	DataSet<Edge<K, EV>> edgeDataSet = edges
		.map(new Tuple3ToEdgeMap<>())
			.name("Type conversion");

	return fromDataSet(edgeDataSet, context);
}
 
Example #18
Source File: Graph.java    From flink with Apache License 2.0 3 votes vote down vote up
/**
 * Creates a graph from a DataSet of Tuple2 objects for vertices and
 * Tuple3 objects for edges.
 *
 * <p>The first field of the Tuple2 vertex object will become the vertex ID
 * and the second field will become the vertex value.
 * The first field of the Tuple3 object for edges will become the source ID,
 * the second field will become the target ID, and the third field will become
 * the edge value.
 *
 * @param vertices a DataSet of Tuple2 representing the vertices.
 * @param edges a DataSet of Tuple3 representing the edges.
 * @param context the flink execution environment.
 * @return the newly created graph.
 */
public static <K, VV, EV> Graph<K, VV, EV> fromTupleDataSet(DataSet<Tuple2<K, VV>> vertices,
		DataSet<Tuple3<K, K, EV>> edges, ExecutionEnvironment context) {

	DataSet<Vertex<K, VV>> vertexDataSet = vertices
		.map(new Tuple2ToVertexMap<>())
			.name("Type conversion");

	DataSet<Edge<K, EV>> edgeDataSet = edges
		.map(new Tuple3ToEdgeMap<>())
			.name("Type conversion");

	return fromDataSet(vertexDataSet, edgeDataSet, context);
}
 
Example #19
Source File: Graph.java    From Flink-CEPplus with Apache License 2.0 3 votes vote down vote up
/**
 * Creates a graph from a DataSet of Tuple3 objects for edges.
 *
 * <p>Each Tuple3 will become one Edge, where the source ID will be the first field of the Tuple2,
 * the target ID will be the second field of the Tuple2
 * and the Edge value will be the third field of the Tuple3.
 *
 * <p>Vertices are created automatically and their values are initialized
 * by applying the provided vertexValueInitializer map function to the vertex IDs.
 *
 * @param edges a DataSet of Tuple3.
 * @param vertexValueInitializer the mapper function that initializes the vertex values.
 * It allows to apply a map transformation on the vertex ID to produce an initial vertex value.
 * @param context the flink execution environment.
 * @return the newly created graph.
 */
public static <K, VV, EV> Graph<K, VV, EV> fromTupleDataSet(DataSet<Tuple3<K, K, EV>> edges,
		final MapFunction<K, VV> vertexValueInitializer, ExecutionEnvironment context) {

	DataSet<Edge<K, EV>> edgeDataSet = edges
		.map(new Tuple3ToEdgeMap<>())
			.name("Type conversion");

	return fromDataSet(edgeDataSet, vertexValueInitializer, context);
}
 
Example #20
Source File: Graph.java    From flink with Apache License 2.0 3 votes vote down vote up
/**
 * Creates a graph from a DataSet of Tuple2 objects for vertices and
 * Tuple3 objects for edges.
 *
 * <p>The first field of the Tuple2 vertex object will become the vertex ID
 * and the second field will become the vertex value.
 * The first field of the Tuple3 object for edges will become the source ID,
 * the second field will become the target ID, and the third field will become
 * the edge value.
 *
 * @param vertices a DataSet of Tuple2 representing the vertices.
 * @param edges a DataSet of Tuple3 representing the edges.
 * @param context the flink execution environment.
 * @return the newly created graph.
 */
public static <K, VV, EV> Graph<K, VV, EV> fromTupleDataSet(DataSet<Tuple2<K, VV>> vertices,
		DataSet<Tuple3<K, K, EV>> edges, ExecutionEnvironment context) {

	DataSet<Vertex<K, VV>> vertexDataSet = vertices
		.map(new Tuple2ToVertexMap<>())
			.name("Type conversion");

	DataSet<Edge<K, EV>> edgeDataSet = edges
		.map(new Tuple3ToEdgeMap<>())
			.name("Type conversion");

	return fromDataSet(vertexDataSet, edgeDataSet, context);
}
 
Example #21
Source File: Graph.java    From flink with Apache License 2.0 3 votes vote down vote up
/**
 * Creates a graph from a DataSet of Tuple3 objects for edges.
 *
 * <p>The first field of the Tuple3 object will become the source ID,
 * the second field will become the target ID, and the third field will become
 * the edge value.
 *
 * <p>Vertices are created automatically and their values are set to NullValue.
 *
 * @param edges a DataSet of Tuple3 representing the edges.
 * @param context the flink execution environment.
 * @return the newly created graph.
 */
public static <K, EV> Graph<K, NullValue, EV> fromTupleDataSet(DataSet<Tuple3<K, K, EV>> edges,
		ExecutionEnvironment context) {

	DataSet<Edge<K, EV>> edgeDataSet = edges
		.map(new Tuple3ToEdgeMap<>())
			.name("Type conversion");

	return fromDataSet(edgeDataSet, context);
}
 
Example #22
Source File: Graph.java    From flink with Apache License 2.0 3 votes vote down vote up
/**
 * Creates a graph from a DataSet of Tuple3 objects for edges.
 *
 * <p>Each Tuple3 will become one Edge, where the source ID will be the first field of the Tuple2,
 * the target ID will be the second field of the Tuple2
 * and the Edge value will be the third field of the Tuple3.
 *
 * <p>Vertices are created automatically and their values are initialized
 * by applying the provided vertexValueInitializer map function to the vertex IDs.
 *
 * @param edges a DataSet of Tuple3.
 * @param vertexValueInitializer the mapper function that initializes the vertex values.
 * It allows to apply a map transformation on the vertex ID to produce an initial vertex value.
 * @param context the flink execution environment.
 * @return the newly created graph.
 */
public static <K, VV, EV> Graph<K, VV, EV> fromTupleDataSet(DataSet<Tuple3<K, K, EV>> edges,
		final MapFunction<K, VV> vertexValueInitializer, ExecutionEnvironment context) {

	DataSet<Edge<K, EV>> edgeDataSet = edges
		.map(new Tuple3ToEdgeMap<>())
			.name("Type conversion");

	return fromDataSet(edgeDataSet, vertexValueInitializer, context);
}
 
Example #23
Source File: Graph.java    From Flink-CEPplus with Apache License 2.0 3 votes vote down vote up
/**
 * Creates a graph from a DataSet of Tuple3 objects for edges.
 *
 * <p>The first field of the Tuple3 object will become the source ID,
 * the second field will become the target ID, and the third field will become
 * the edge value.
 *
 * <p>Vertices are created automatically and their values are set to NullValue.
 *
 * @param edges a DataSet of Tuple3 representing the edges.
 * @param context the flink execution environment.
 * @return the newly created graph.
 */
public static <K, EV> Graph<K, NullValue, EV> fromTupleDataSet(DataSet<Tuple3<K, K, EV>> edges,
		ExecutionEnvironment context) {

	DataSet<Edge<K, EV>> edgeDataSet = edges
		.map(new Tuple3ToEdgeMap<>())
			.name("Type conversion");

	return fromDataSet(edgeDataSet, context);
}
 
Example #24
Source File: Graph.java    From Flink-CEPplus with Apache License 2.0 3 votes vote down vote up
/**
 * Creates a graph from a DataSet of Tuple2 objects for vertices and
 * Tuple3 objects for edges.
 *
 * <p>The first field of the Tuple2 vertex object will become the vertex ID
 * and the second field will become the vertex value.
 * The first field of the Tuple3 object for edges will become the source ID,
 * the second field will become the target ID, and the third field will become
 * the edge value.
 *
 * @param vertices a DataSet of Tuple2 representing the vertices.
 * @param edges a DataSet of Tuple3 representing the edges.
 * @param context the flink execution environment.
 * @return the newly created graph.
 */
public static <K, VV, EV> Graph<K, VV, EV> fromTupleDataSet(DataSet<Tuple2<K, VV>> vertices,
		DataSet<Tuple3<K, K, EV>> edges, ExecutionEnvironment context) {

	DataSet<Vertex<K, VV>> vertexDataSet = vertices
		.map(new Tuple2ToVertexMap<>())
			.name("Type conversion");

	DataSet<Edge<K, EV>> edgeDataSet = edges
		.map(new Tuple3ToEdgeMap<>())
			.name("Type conversion");

	return fromDataSet(vertexDataSet, edgeDataSet, context);
}