Java Code Examples for org.apache.flink.optimizer.dataproperties.PartitioningProperty#HASH_PARTITIONED

The following examples show how to use org.apache.flink.optimizer.dataproperties.PartitioningProperty#HASH_PARTITIONED . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: CoGroupDescriptor.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
public boolean areCompatible(RequestedGlobalProperties requested1, RequestedGlobalProperties requested2,
		GlobalProperties produced1, GlobalProperties produced2)
{

	if(produced1.getPartitioning() == PartitioningProperty.HASH_PARTITIONED &&
			produced2.getPartitioning() == PartitioningProperty.HASH_PARTITIONED) {

		// both are hash partitioned, check that partitioning fields are equivalently chosen
		return checkEquivalentFieldPositionsInKeyFields(
				produced1.getPartitioningFields(), produced2.getPartitioningFields());

	}
	else if(produced1.getPartitioning() == PartitioningProperty.RANGE_PARTITIONED &&
			produced2.getPartitioning() == PartitioningProperty.RANGE_PARTITIONED &&
			produced1.getDataDistribution() != null && produced2.getDataDistribution() != null) {

		return produced1.getPartitioningFields().size() == produced2.getPartitioningFields().size() &&
				checkSameOrdering(produced1, produced2, produced1.getPartitioningFields().size()) &&
				produced1.getDataDistribution().equals(produced2.getDataDistribution());
		
	}
	else if(produced1.getPartitioning() == PartitioningProperty.CUSTOM_PARTITIONING &&
			produced2.getPartitioning() == PartitioningProperty.CUSTOM_PARTITIONING) {

		// both use a custom partitioner. Check that both keys are exactly as specified and that both the same partitioner
		return produced1.getPartitioningFields().isExactMatch(this.keys1) &&
				produced2.getPartitioningFields().isExactMatch(this.keys2) &&
				produced1.getCustomPartitioner() != null && produced2.getCustomPartitioner() != null &&
				produced1.getCustomPartitioner().equals(produced2.getCustomPartitioner());

	}
	else {

		// no other partitioning valid, incl. ANY_PARTITIONING.
		//   For co-groups we must ensure that both sides are exactly identically partitioned, ANY is not good enough.
		return false;
	}

}
 
Example 2
Source File: CoGroupDescriptor.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public boolean areCompatible(RequestedGlobalProperties requested1, RequestedGlobalProperties requested2,
		GlobalProperties produced1, GlobalProperties produced2)
{

	if(produced1.getPartitioning() == PartitioningProperty.HASH_PARTITIONED &&
			produced2.getPartitioning() == PartitioningProperty.HASH_PARTITIONED) {

		// both are hash partitioned, check that partitioning fields are equivalently chosen
		return checkEquivalentFieldPositionsInKeyFields(
				produced1.getPartitioningFields(), produced2.getPartitioningFields());

	}
	else if(produced1.getPartitioning() == PartitioningProperty.RANGE_PARTITIONED &&
			produced2.getPartitioning() == PartitioningProperty.RANGE_PARTITIONED &&
			produced1.getDataDistribution() != null && produced2.getDataDistribution() != null) {

		return produced1.getPartitioningFields().size() == produced2.getPartitioningFields().size() &&
				checkSameOrdering(produced1, produced2, produced1.getPartitioningFields().size()) &&
				produced1.getDataDistribution().equals(produced2.getDataDistribution());
		
	}
	else if(produced1.getPartitioning() == PartitioningProperty.CUSTOM_PARTITIONING &&
			produced2.getPartitioning() == PartitioningProperty.CUSTOM_PARTITIONING) {

		// both use a custom partitioner. Check that both keys are exactly as specified and that both the same partitioner
		return produced1.getPartitioningFields().isExactMatch(this.keys1) &&
				produced2.getPartitioningFields().isExactMatch(this.keys2) &&
				produced1.getCustomPartitioner() != null && produced2.getCustomPartitioner() != null &&
				produced1.getCustomPartitioner().equals(produced2.getCustomPartitioner());

	}
	else {

		// no other partitioning valid, incl. ANY_PARTITIONING.
		//   For co-groups we must ensure that both sides are exactly identically partitioned, ANY is not good enough.
		return false;
	}

}
 
Example 3
Source File: CoGroupDescriptor.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public boolean areCompatible(RequestedGlobalProperties requested1, RequestedGlobalProperties requested2,
		GlobalProperties produced1, GlobalProperties produced2)
{

	if(produced1.getPartitioning() == PartitioningProperty.HASH_PARTITIONED &&
			produced2.getPartitioning() == PartitioningProperty.HASH_PARTITIONED) {

		// both are hash partitioned, check that partitioning fields are equivalently chosen
		return checkEquivalentFieldPositionsInKeyFields(
				produced1.getPartitioningFields(), produced2.getPartitioningFields());

	}
	else if(produced1.getPartitioning() == PartitioningProperty.RANGE_PARTITIONED &&
			produced2.getPartitioning() == PartitioningProperty.RANGE_PARTITIONED &&
			produced1.getDataDistribution() != null && produced2.getDataDistribution() != null) {

		return produced1.getPartitioningFields().size() == produced2.getPartitioningFields().size() &&
				checkSameOrdering(produced1, produced2, produced1.getPartitioningFields().size()) &&
				produced1.getDataDistribution().equals(produced2.getDataDistribution());
		
	}
	else if(produced1.getPartitioning() == PartitioningProperty.CUSTOM_PARTITIONING &&
			produced2.getPartitioning() == PartitioningProperty.CUSTOM_PARTITIONING) {

		// both use a custom partitioner. Check that both keys are exactly as specified and that both the same partitioner
		return produced1.getPartitioningFields().isExactMatch(this.keys1) &&
				produced2.getPartitioningFields().isExactMatch(this.keys2) &&
				produced1.getCustomPartitioner() != null && produced2.getCustomPartitioner() != null &&
				produced1.getCustomPartitioner().equals(produced2.getCustomPartitioner());

	}
	else {

		// no other partitioning valid, incl. ANY_PARTITIONING.
		//   For co-groups we must ensure that both sides are exactly identically partitioned, ANY is not good enough.
		return false;
	}

}
 
Example 4
Source File: AbstractJoinDescriptor.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Override
public boolean areCompatible(RequestedGlobalProperties requested1, RequestedGlobalProperties requested2,
		GlobalProperties produced1, GlobalProperties produced2)
{
	if (requested1.getPartitioning().isPartitionedOnKey() && requested2.getPartitioning().isPartitionedOnKey()) {

		if(produced1.getPartitioning() == PartitioningProperty.HASH_PARTITIONED &&
				produced2.getPartitioning() == PartitioningProperty.HASH_PARTITIONED) {

			// both are hash partitioned, check that partitioning fields are equivalently chosen
			return checkEquivalentFieldPositionsInKeyFields(
					produced1.getPartitioningFields(), produced2.getPartitioningFields());

		}
		else if(produced1.getPartitioning() == PartitioningProperty.RANGE_PARTITIONED &&
				produced2.getPartitioning() == PartitioningProperty.RANGE_PARTITIONED &&
				produced1.getDataDistribution() != null && produced2.getDataDistribution() != null) {

			return produced1.getPartitioningFields().size() == produced2.getPartitioningFields().size() &&
					checkSameOrdering(produced1, produced2, produced1.getPartitioningFields().size()) &&
					produced1.getDataDistribution().equals(produced2.getDataDistribution());

		}
		else if(produced1.getPartitioning() == PartitioningProperty.CUSTOM_PARTITIONING &&
				produced2.getPartitioning() == PartitioningProperty.CUSTOM_PARTITIONING) {

			// both use a custom partitioner. Check that both keys are exactly as specified and that both the same partitioner
			return produced1.getPartitioningFields().isExactMatch(this.keys1) &&
					produced2.getPartitioningFields().isExactMatch(this.keys2) &&
					produced1.getCustomPartitioner() != null && produced2.getCustomPartitioner() != null &&
					produced1.getCustomPartitioner().equals(produced2.getCustomPartitioner());

		}
		else {

			// no other partitioning valid, incl. ANY_PARTITIONING.
			//   For joins we must ensure that both sides are exactly identically partitioned, ANY is not good enough.
			return false;
		}

	} else {
		return true;
	}

}
 
Example 5
Source File: BinaryUnionOpDescriptor.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Override
public GlobalProperties computeGlobalProperties(GlobalProperties in1, GlobalProperties in2) {
	GlobalProperties newProps = new GlobalProperties();
	
	if (in1.getPartitioning() == PartitioningProperty.HASH_PARTITIONED &&
		in2.getPartitioning() == PartitioningProperty.HASH_PARTITIONED &&
		in1.getPartitioningFields().equals(in2.getPartitioningFields())) {
		newProps.setHashPartitioned(in1.getPartitioningFields());
	}
	else if (in1.getPartitioning() == PartitioningProperty.RANGE_PARTITIONED &&
				in2.getPartitioning() == PartitioningProperty.RANGE_PARTITIONED &&
				in1.getPartitioningOrdering().equals(in2.getPartitioningOrdering()) &&
				(
					in1.getDataDistribution() == null && in2.getDataDistribution() == null ||
					in1.getDataDistribution() != null && in1.getDataDistribution().equals(in2.getDataDistribution())
				)
			) {
		if (in1.getDataDistribution() == null) {
			newProps.setRangePartitioned(in1.getPartitioningOrdering());
		}
		else {
			newProps.setRangePartitioned(in1.getPartitioningOrdering(), in1.getDataDistribution());
		}
	}
	else if (in1.getPartitioning() == PartitioningProperty.CUSTOM_PARTITIONING &&
				in2.getPartitioning() == PartitioningProperty.CUSTOM_PARTITIONING &&
				in1.getPartitioningFields().equals(in2.getPartitioningFields()) &&
				in1.getCustomPartitioner().equals(in2.getCustomPartitioner())) {
		newProps.setCustomPartitioned(in1.getPartitioningFields(), in1.getCustomPartitioner());
	}
	else if (in1.getPartitioning() == PartitioningProperty.FORCED_REBALANCED &&
				in2.getPartitioning() == PartitioningProperty.FORCED_REBALANCED) {
		newProps.setForcedRebalanced();
	}
	else if (in1.getPartitioning() == PartitioningProperty.FULL_REPLICATION &&
		in2.getPartitioning() == PartitioningProperty.FULL_REPLICATION) {
		newProps.setFullyReplicated();
	}

	return newProps;
}
 
Example 6
Source File: PartitioningReusageTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
private void checkValidJoinInputProperties(DualInputPlanNode join) {

		GlobalProperties inProps1 = join.getInput1().getGlobalProperties();
		GlobalProperties inProps2 = join.getInput2().getGlobalProperties();

		if(inProps1.getPartitioning() == PartitioningProperty.HASH_PARTITIONED &&
				inProps2.getPartitioning() == PartitioningProperty.HASH_PARTITIONED) {

			// check that both inputs are hash partitioned on the same fields
			FieldList pFields1 = inProps1.getPartitioningFields();
			FieldList pFields2 = inProps2.getPartitioningFields();

			assertTrue("Inputs are not the same number of fields. Input 1: "+pFields1+", Input 2: "+pFields2,
					pFields1.size() == pFields2.size());

			FieldList reqPFields1 = join.getKeysForInput1();
			FieldList reqPFields2 = join.getKeysForInput2();

			for(int i=0; i<pFields1.size(); i++) {

				// get fields
				int f1 = pFields1.get(i);
				int f2 = pFields2.get(i);

				// check that field positions in original key field list are identical
				int pos1 = getPosInFieldList(f1, reqPFields1);
				int pos2 = getPosInFieldList(f2, reqPFields2);

				if(pos1 < 0) {
					fail("Input 1 is partitioned on field "+f1+" which is not contained in the key set "+reqPFields1);
				}
				if(pos2 < 0) {
					fail("Input 2 is partitioned on field "+f2+" which is not contained in the key set "+reqPFields2);
				}
				if(pos1 != pos2) {
					fail("Inputs are not partitioned on the same key fields");
				}
			}

		}
		else if(inProps1.getPartitioning() == PartitioningProperty.FULL_REPLICATION &&
				inProps2.getPartitioning() == PartitioningProperty.RANDOM_PARTITIONED) {
			// we are good. No need to check for fields
		}
		else if(inProps1.getPartitioning() == PartitioningProperty.RANDOM_PARTITIONED &&
				inProps2.getPartitioning() == PartitioningProperty.FULL_REPLICATION) {
			// we are good. No need to check for fields
		}
		else {
			throw new UnsupportedOperationException("This method has only been implemented to check for hash partitioned coGroupinputs");
		}

	}
 
Example 7
Source File: PartitioningReusageTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
private void checkValidCoGroupInputProperties(DualInputPlanNode coGroup) {

		GlobalProperties inProps1 = coGroup.getInput1().getGlobalProperties();
		GlobalProperties inProps2 = coGroup.getInput2().getGlobalProperties();

		if(inProps1.getPartitioning() == PartitioningProperty.HASH_PARTITIONED &&
				inProps2.getPartitioning() == PartitioningProperty.HASH_PARTITIONED) {

			// check that both inputs are hash partitioned on the same fields
			FieldList pFields1 = inProps1.getPartitioningFields();
			FieldList pFields2 = inProps2.getPartitioningFields();

			assertTrue("Inputs are not the same number of fields. Input 1: "+pFields1+", Input 2: "+pFields2,
					pFields1.size() == pFields2.size());

			FieldList reqPFields1 = coGroup.getKeysForInput1();
			FieldList reqPFields2 = coGroup.getKeysForInput2();

			for(int i=0; i<pFields1.size(); i++) {

				// get fields
				int f1 = pFields1.get(i);
				int f2 = pFields2.get(i);

				// check that field positions in original key field list are identical
				int pos1 = getPosInFieldList(f1, reqPFields1);
				int pos2 = getPosInFieldList(f2, reqPFields2);

				if(pos1 < 0) {
					fail("Input 1 is partitioned on field "+f1+" which is not contained in the key set "+reqPFields1);
				}
				if(pos2 < 0) {
					fail("Input 2 is partitioned on field "+f2+" which is not contained in the key set "+reqPFields2);
				}
				if(pos1 != pos2) {
					fail("Inputs are not partitioned on the same key fields");
				}
			}

		}
		else {
			throw new UnsupportedOperationException("This method has only been implemented to check for hash partitioned coGroup inputs");
		}

	}
 
Example 8
Source File: AbstractJoinDescriptor.java    From flink with Apache License 2.0 4 votes vote down vote up
@Override
public boolean areCompatible(RequestedGlobalProperties requested1, RequestedGlobalProperties requested2,
		GlobalProperties produced1, GlobalProperties produced2)
{
	if (requested1.getPartitioning().isPartitionedOnKey() && requested2.getPartitioning().isPartitionedOnKey()) {

		if(produced1.getPartitioning() == PartitioningProperty.HASH_PARTITIONED &&
				produced2.getPartitioning() == PartitioningProperty.HASH_PARTITIONED) {

			// both are hash partitioned, check that partitioning fields are equivalently chosen
			return checkEquivalentFieldPositionsInKeyFields(
					produced1.getPartitioningFields(), produced2.getPartitioningFields());

		}
		else if(produced1.getPartitioning() == PartitioningProperty.RANGE_PARTITIONED &&
				produced2.getPartitioning() == PartitioningProperty.RANGE_PARTITIONED &&
				produced1.getDataDistribution() != null && produced2.getDataDistribution() != null) {

			return produced1.getPartitioningFields().size() == produced2.getPartitioningFields().size() &&
					checkSameOrdering(produced1, produced2, produced1.getPartitioningFields().size()) &&
					produced1.getDataDistribution().equals(produced2.getDataDistribution());

		}
		else if(produced1.getPartitioning() == PartitioningProperty.CUSTOM_PARTITIONING &&
				produced2.getPartitioning() == PartitioningProperty.CUSTOM_PARTITIONING) {

			// both use a custom partitioner. Check that both keys are exactly as specified and that both the same partitioner
			return produced1.getPartitioningFields().isExactMatch(this.keys1) &&
					produced2.getPartitioningFields().isExactMatch(this.keys2) &&
					produced1.getCustomPartitioner() != null && produced2.getCustomPartitioner() != null &&
					produced1.getCustomPartitioner().equals(produced2.getCustomPartitioner());

		}
		else {

			// no other partitioning valid, incl. ANY_PARTITIONING.
			//   For joins we must ensure that both sides are exactly identically partitioned, ANY is not good enough.
			return false;
		}

	} else {
		return true;
	}

}
 
Example 9
Source File: BinaryUnionOpDescriptor.java    From flink with Apache License 2.0 4 votes vote down vote up
@Override
public GlobalProperties computeGlobalProperties(GlobalProperties in1, GlobalProperties in2) {
	GlobalProperties newProps = new GlobalProperties();
	
	if (in1.getPartitioning() == PartitioningProperty.HASH_PARTITIONED &&
		in2.getPartitioning() == PartitioningProperty.HASH_PARTITIONED &&
		in1.getPartitioningFields().equals(in2.getPartitioningFields())) {
		newProps.setHashPartitioned(in1.getPartitioningFields());
	}
	else if (in1.getPartitioning() == PartitioningProperty.RANGE_PARTITIONED &&
				in2.getPartitioning() == PartitioningProperty.RANGE_PARTITIONED &&
				in1.getPartitioningOrdering().equals(in2.getPartitioningOrdering()) &&
				(
					in1.getDataDistribution() == null && in2.getDataDistribution() == null ||
					in1.getDataDistribution() != null && in1.getDataDistribution().equals(in2.getDataDistribution())
				)
			) {
		if (in1.getDataDistribution() == null) {
			newProps.setRangePartitioned(in1.getPartitioningOrdering());
		}
		else {
			newProps.setRangePartitioned(in1.getPartitioningOrdering(), in1.getDataDistribution());
		}
	}
	else if (in1.getPartitioning() == PartitioningProperty.CUSTOM_PARTITIONING &&
				in2.getPartitioning() == PartitioningProperty.CUSTOM_PARTITIONING &&
				in1.getPartitioningFields().equals(in2.getPartitioningFields()) &&
				in1.getCustomPartitioner().equals(in2.getCustomPartitioner())) {
		newProps.setCustomPartitioned(in1.getPartitioningFields(), in1.getCustomPartitioner());
	}
	else if (in1.getPartitioning() == PartitioningProperty.FORCED_REBALANCED &&
				in2.getPartitioning() == PartitioningProperty.FORCED_REBALANCED) {
		newProps.setForcedRebalanced();
	}
	else if (in1.getPartitioning() == PartitioningProperty.FULL_REPLICATION &&
		in2.getPartitioning() == PartitioningProperty.FULL_REPLICATION) {
		newProps.setFullyReplicated();
	}

	return newProps;
}
 
Example 10
Source File: PartitioningReusageTest.java    From flink with Apache License 2.0 4 votes vote down vote up
private void checkValidJoinInputProperties(DualInputPlanNode join) {

		GlobalProperties inProps1 = join.getInput1().getGlobalProperties();
		GlobalProperties inProps2 = join.getInput2().getGlobalProperties();

		if(inProps1.getPartitioning() == PartitioningProperty.HASH_PARTITIONED &&
				inProps2.getPartitioning() == PartitioningProperty.HASH_PARTITIONED) {

			// check that both inputs are hash partitioned on the same fields
			FieldList pFields1 = inProps1.getPartitioningFields();
			FieldList pFields2 = inProps2.getPartitioningFields();

			assertTrue("Inputs are not the same number of fields. Input 1: "+pFields1+", Input 2: "+pFields2,
					pFields1.size() == pFields2.size());

			FieldList reqPFields1 = join.getKeysForInput1();
			FieldList reqPFields2 = join.getKeysForInput2();

			for(int i=0; i<pFields1.size(); i++) {

				// get fields
				int f1 = pFields1.get(i);
				int f2 = pFields2.get(i);

				// check that field positions in original key field list are identical
				int pos1 = getPosInFieldList(f1, reqPFields1);
				int pos2 = getPosInFieldList(f2, reqPFields2);

				if(pos1 < 0) {
					fail("Input 1 is partitioned on field "+f1+" which is not contained in the key set "+reqPFields1);
				}
				if(pos2 < 0) {
					fail("Input 2 is partitioned on field "+f2+" which is not contained in the key set "+reqPFields2);
				}
				if(pos1 != pos2) {
					fail("Inputs are not partitioned on the same key fields");
				}
			}

		}
		else if(inProps1.getPartitioning() == PartitioningProperty.FULL_REPLICATION &&
				inProps2.getPartitioning() == PartitioningProperty.RANDOM_PARTITIONED) {
			// we are good. No need to check for fields
		}
		else if(inProps1.getPartitioning() == PartitioningProperty.RANDOM_PARTITIONED &&
				inProps2.getPartitioning() == PartitioningProperty.FULL_REPLICATION) {
			// we are good. No need to check for fields
		}
		else {
			throw new UnsupportedOperationException("This method has only been implemented to check for hash partitioned coGroupinputs");
		}

	}
 
Example 11
Source File: PartitioningReusageTest.java    From flink with Apache License 2.0 4 votes vote down vote up
private void checkValidCoGroupInputProperties(DualInputPlanNode coGroup) {

		GlobalProperties inProps1 = coGroup.getInput1().getGlobalProperties();
		GlobalProperties inProps2 = coGroup.getInput2().getGlobalProperties();

		if(inProps1.getPartitioning() == PartitioningProperty.HASH_PARTITIONED &&
				inProps2.getPartitioning() == PartitioningProperty.HASH_PARTITIONED) {

			// check that both inputs are hash partitioned on the same fields
			FieldList pFields1 = inProps1.getPartitioningFields();
			FieldList pFields2 = inProps2.getPartitioningFields();

			assertTrue("Inputs are not the same number of fields. Input 1: "+pFields1+", Input 2: "+pFields2,
					pFields1.size() == pFields2.size());

			FieldList reqPFields1 = coGroup.getKeysForInput1();
			FieldList reqPFields2 = coGroup.getKeysForInput2();

			for(int i=0; i<pFields1.size(); i++) {

				// get fields
				int f1 = pFields1.get(i);
				int f2 = pFields2.get(i);

				// check that field positions in original key field list are identical
				int pos1 = getPosInFieldList(f1, reqPFields1);
				int pos2 = getPosInFieldList(f2, reqPFields2);

				if(pos1 < 0) {
					fail("Input 1 is partitioned on field "+f1+" which is not contained in the key set "+reqPFields1);
				}
				if(pos2 < 0) {
					fail("Input 2 is partitioned on field "+f2+" which is not contained in the key set "+reqPFields2);
				}
				if(pos1 != pos2) {
					fail("Inputs are not partitioned on the same key fields");
				}
			}

		}
		else {
			throw new UnsupportedOperationException("This method has only been implemented to check for hash partitioned coGroup inputs");
		}

	}
 
Example 12
Source File: AbstractJoinDescriptor.java    From flink with Apache License 2.0 4 votes vote down vote up
@Override
public boolean areCompatible(RequestedGlobalProperties requested1, RequestedGlobalProperties requested2,
		GlobalProperties produced1, GlobalProperties produced2)
{
	if (requested1.getPartitioning().isPartitionedOnKey() && requested2.getPartitioning().isPartitionedOnKey()) {

		if(produced1.getPartitioning() == PartitioningProperty.HASH_PARTITIONED &&
				produced2.getPartitioning() == PartitioningProperty.HASH_PARTITIONED) {

			// both are hash partitioned, check that partitioning fields are equivalently chosen
			return checkEquivalentFieldPositionsInKeyFields(
					produced1.getPartitioningFields(), produced2.getPartitioningFields());

		}
		else if(produced1.getPartitioning() == PartitioningProperty.RANGE_PARTITIONED &&
				produced2.getPartitioning() == PartitioningProperty.RANGE_PARTITIONED &&
				produced1.getDataDistribution() != null && produced2.getDataDistribution() != null) {

			return produced1.getPartitioningFields().size() == produced2.getPartitioningFields().size() &&
					checkSameOrdering(produced1, produced2, produced1.getPartitioningFields().size()) &&
					produced1.getDataDistribution().equals(produced2.getDataDistribution());

		}
		else if(produced1.getPartitioning() == PartitioningProperty.CUSTOM_PARTITIONING &&
				produced2.getPartitioning() == PartitioningProperty.CUSTOM_PARTITIONING) {

			// both use a custom partitioner. Check that both keys are exactly as specified and that both the same partitioner
			return produced1.getPartitioningFields().isExactMatch(this.keys1) &&
					produced2.getPartitioningFields().isExactMatch(this.keys2) &&
					produced1.getCustomPartitioner() != null && produced2.getCustomPartitioner() != null &&
					produced1.getCustomPartitioner().equals(produced2.getCustomPartitioner());

		}
		else {

			// no other partitioning valid, incl. ANY_PARTITIONING.
			//   For joins we must ensure that both sides are exactly identically partitioned, ANY is not good enough.
			return false;
		}

	} else {
		return true;
	}

}
 
Example 13
Source File: BinaryUnionOpDescriptor.java    From flink with Apache License 2.0 4 votes vote down vote up
@Override
public GlobalProperties computeGlobalProperties(GlobalProperties in1, GlobalProperties in2) {
	GlobalProperties newProps = new GlobalProperties();
	
	if (in1.getPartitioning() == PartitioningProperty.HASH_PARTITIONED &&
		in2.getPartitioning() == PartitioningProperty.HASH_PARTITIONED &&
		in1.getPartitioningFields().equals(in2.getPartitioningFields())) {
		newProps.setHashPartitioned(in1.getPartitioningFields());
	}
	else if (in1.getPartitioning() == PartitioningProperty.RANGE_PARTITIONED &&
				in2.getPartitioning() == PartitioningProperty.RANGE_PARTITIONED &&
				in1.getPartitioningOrdering().equals(in2.getPartitioningOrdering()) &&
				(
					in1.getDataDistribution() == null && in2.getDataDistribution() == null ||
					in1.getDataDistribution() != null && in1.getDataDistribution().equals(in2.getDataDistribution())
				)
			) {
		if (in1.getDataDistribution() == null) {
			newProps.setRangePartitioned(in1.getPartitioningOrdering());
		}
		else {
			newProps.setRangePartitioned(in1.getPartitioningOrdering(), in1.getDataDistribution());
		}
	}
	else if (in1.getPartitioning() == PartitioningProperty.CUSTOM_PARTITIONING &&
				in2.getPartitioning() == PartitioningProperty.CUSTOM_PARTITIONING &&
				in1.getPartitioningFields().equals(in2.getPartitioningFields()) &&
				in1.getCustomPartitioner().equals(in2.getCustomPartitioner())) {
		newProps.setCustomPartitioned(in1.getPartitioningFields(), in1.getCustomPartitioner());
	}
	else if (in1.getPartitioning() == PartitioningProperty.FORCED_REBALANCED &&
				in2.getPartitioning() == PartitioningProperty.FORCED_REBALANCED) {
		newProps.setForcedRebalanced();
	}
	else if (in1.getPartitioning() == PartitioningProperty.FULL_REPLICATION &&
		in2.getPartitioning() == PartitioningProperty.FULL_REPLICATION) {
		newProps.setFullyReplicated();
	}

	return newProps;
}
 
Example 14
Source File: PartitioningReusageTest.java    From flink with Apache License 2.0 4 votes vote down vote up
private void checkValidJoinInputProperties(DualInputPlanNode join) {

		GlobalProperties inProps1 = join.getInput1().getGlobalProperties();
		GlobalProperties inProps2 = join.getInput2().getGlobalProperties();

		if(inProps1.getPartitioning() == PartitioningProperty.HASH_PARTITIONED &&
				inProps2.getPartitioning() == PartitioningProperty.HASH_PARTITIONED) {

			// check that both inputs are hash partitioned on the same fields
			FieldList pFields1 = inProps1.getPartitioningFields();
			FieldList pFields2 = inProps2.getPartitioningFields();

			assertTrue("Inputs are not the same number of fields. Input 1: "+pFields1+", Input 2: "+pFields2,
					pFields1.size() == pFields2.size());

			FieldList reqPFields1 = join.getKeysForInput1();
			FieldList reqPFields2 = join.getKeysForInput2();

			for(int i=0; i<pFields1.size(); i++) {

				// get fields
				int f1 = pFields1.get(i);
				int f2 = pFields2.get(i);

				// check that field positions in original key field list are identical
				int pos1 = getPosInFieldList(f1, reqPFields1);
				int pos2 = getPosInFieldList(f2, reqPFields2);

				if(pos1 < 0) {
					fail("Input 1 is partitioned on field "+f1+" which is not contained in the key set "+reqPFields1);
				}
				if(pos2 < 0) {
					fail("Input 2 is partitioned on field "+f2+" which is not contained in the key set "+reqPFields2);
				}
				if(pos1 != pos2) {
					fail("Inputs are not partitioned on the same key fields");
				}
			}

		}
		else if(inProps1.getPartitioning() == PartitioningProperty.FULL_REPLICATION &&
				inProps2.getPartitioning() == PartitioningProperty.RANDOM_PARTITIONED) {
			// we are good. No need to check for fields
		}
		else if(inProps1.getPartitioning() == PartitioningProperty.RANDOM_PARTITIONED &&
				inProps2.getPartitioning() == PartitioningProperty.FULL_REPLICATION) {
			// we are good. No need to check for fields
		}
		else {
			throw new UnsupportedOperationException("This method has only been implemented to check for hash partitioned coGroupinputs");
		}

	}
 
Example 15
Source File: PartitioningReusageTest.java    From flink with Apache License 2.0 4 votes vote down vote up
private void checkValidCoGroupInputProperties(DualInputPlanNode coGroup) {

		GlobalProperties inProps1 = coGroup.getInput1().getGlobalProperties();
		GlobalProperties inProps2 = coGroup.getInput2().getGlobalProperties();

		if(inProps1.getPartitioning() == PartitioningProperty.HASH_PARTITIONED &&
				inProps2.getPartitioning() == PartitioningProperty.HASH_PARTITIONED) {

			// check that both inputs are hash partitioned on the same fields
			FieldList pFields1 = inProps1.getPartitioningFields();
			FieldList pFields2 = inProps2.getPartitioningFields();

			assertTrue("Inputs are not the same number of fields. Input 1: "+pFields1+", Input 2: "+pFields2,
					pFields1.size() == pFields2.size());

			FieldList reqPFields1 = coGroup.getKeysForInput1();
			FieldList reqPFields2 = coGroup.getKeysForInput2();

			for(int i=0; i<pFields1.size(); i++) {

				// get fields
				int f1 = pFields1.get(i);
				int f2 = pFields2.get(i);

				// check that field positions in original key field list are identical
				int pos1 = getPosInFieldList(f1, reqPFields1);
				int pos2 = getPosInFieldList(f2, reqPFields2);

				if(pos1 < 0) {
					fail("Input 1 is partitioned on field "+f1+" which is not contained in the key set "+reqPFields1);
				}
				if(pos2 < 0) {
					fail("Input 2 is partitioned on field "+f2+" which is not contained in the key set "+reqPFields2);
				}
				if(pos1 != pos2) {
					fail("Inputs are not partitioned on the same key fields");
				}
			}

		}
		else {
			throw new UnsupportedOperationException("This method has only been implemented to check for hash partitioned coGroup inputs");
		}

	}