org.apache.flink.runtime.state.KeyGroupRangeAssignment Java Examples

The following examples show how to use org.apache.flink.runtime.state.KeyGroupRangeAssignment. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KvStateClientProxyHandler.java    From flink with Apache License 2.0 6 votes vote down vote up
private CompletableFuture<KvStateResponse> getState(
		final KvStateRequest request,
		final boolean forceUpdate) {

	return getKvStateLookupInfo(request.getJobId(), request.getStateName(), forceUpdate)
			.thenComposeAsync((Function<KvStateLocation, CompletableFuture<KvStateResponse>>) location -> {
				final int keyGroupIndex = KeyGroupRangeAssignment.computeKeyGroupForKeyHash(
						request.getKeyHashCode(), location.getNumKeyGroups());

				final InetSocketAddress serverAddress = location.getKvStateServerAddress(keyGroupIndex);
				if (serverAddress == null) {
					return FutureUtils.completedExceptionally(new UnknownKvStateKeyGroupLocationException(getServerName()));
				} else {
					// Query server
					final KvStateID kvStateId = location.getKvStateID(keyGroupIndex);
					final KvStateInternalRequest internalRequest = new KvStateInternalRequest(
							kvStateId, request.getSerializedKeyAndNamespace());
					return kvStateClient.sendRequest(serverAddress, internalRequest);
				}
			}, queryExecutor);
}
 
Example #2
Source File: ResultPartitionDeploymentDescriptor.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
public ResultPartitionDeploymentDescriptor(
		IntermediateDataSetID resultId,
		IntermediateResultPartitionID partitionId,
		ResultPartitionType partitionType,
		int numberOfSubpartitions,
		int maxParallelism,
		boolean lazyScheduling) {

	this.resultId = checkNotNull(resultId);
	this.partitionId = checkNotNull(partitionId);
	this.partitionType = checkNotNull(partitionType);

	KeyGroupRangeAssignment.checkParallelismPreconditions(maxParallelism);
	checkArgument(numberOfSubpartitions >= 1);
	this.numberOfSubpartitions = numberOfSubpartitions;
	this.maxParallelism = maxParallelism;
	this.sendScheduleOrUpdateConsumersMessage = lazyScheduling;
}
 
Example #3
Source File: KvStateClientProxyHandler.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
private CompletableFuture<KvStateResponse> getState(
		final KvStateRequest request,
		final boolean forceUpdate) {

	return getKvStateLookupInfo(request.getJobId(), request.getStateName(), forceUpdate)
			.thenComposeAsync((Function<KvStateLocation, CompletableFuture<KvStateResponse>>) location -> {
				final int keyGroupIndex = KeyGroupRangeAssignment.computeKeyGroupForKeyHash(
						request.getKeyHashCode(), location.getNumKeyGroups());

				final InetSocketAddress serverAddress = location.getKvStateServerAddress(keyGroupIndex);
				if (serverAddress == null) {
					return FutureUtils.completedExceptionally(new UnknownKvStateKeyGroupLocationException(getServerName()));
				} else {
					// Query server
					final KvStateID kvStateId = location.getKvStateID(keyGroupIndex);
					final KvStateInternalRequest internalRequest = new KvStateInternalRequest(
							kvStateId, request.getSerializedKeyAndNamespace());
					return kvStateClient.sendRequest(serverAddress, internalRequest);
				}
			}, queryExecutor);
}
 
Example #4
Source File: AbstractRocksDBState.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public byte[] getSerializedValue(
		final byte[] serializedKeyAndNamespace,
		final TypeSerializer<K> safeKeySerializer,
		final TypeSerializer<N> safeNamespaceSerializer,
		final TypeSerializer<V> safeValueSerializer) throws Exception {

	//TODO make KvStateSerializer key-group aware to save this round trip and key-group computation
	Tuple2<K, N> keyAndNamespace = KvStateSerializer.deserializeKeyAndNamespace(
			serializedKeyAndNamespace, safeKeySerializer, safeNamespaceSerializer);

	int keyGroup = KeyGroupRangeAssignment.assignToKeyGroup(keyAndNamespace.f0, backend.getNumberOfKeyGroups());

	RocksDBSerializedCompositeKeyBuilder<K> keyBuilder =
					new RocksDBSerializedCompositeKeyBuilder<>(
						safeKeySerializer,
						backend.getKeyGroupPrefixBytes(),
						32
					);
	keyBuilder.setKeyAndKeyGroup(keyAndNamespace.f0, keyGroup);
	byte[] key = keyBuilder.buildCompositeKeyNamespace(keyAndNamespace.f1, namespaceSerializer);
	return backend.db.get(columnFamily, key);
}
 
Example #5
Source File: KvStateClientProxyHandler.java    From flink with Apache License 2.0 6 votes vote down vote up
private CompletableFuture<KvStateResponse> getState(
		final KvStateRequest request,
		final boolean forceUpdate) {

	return getKvStateLookupInfo(request.getJobId(), request.getStateName(), forceUpdate)
			.thenComposeAsync((Function<KvStateLocation, CompletableFuture<KvStateResponse>>) location -> {
				final int keyGroupIndex = KeyGroupRangeAssignment.computeKeyGroupForKeyHash(
						request.getKeyHashCode(), location.getNumKeyGroups());

				final InetSocketAddress serverAddress = location.getKvStateServerAddress(keyGroupIndex);
				if (serverAddress == null) {
					return FutureUtils.completedExceptionally(new UnknownKvStateKeyGroupLocationException(getServerName()));
				} else {
					// Query server
					final KvStateID kvStateId = location.getKvStateID(keyGroupIndex);
					final KvStateInternalRequest internalRequest = new KvStateInternalRequest(
							kvStateId, request.getSerializedKeyAndNamespace());
					return kvStateClient.sendRequest(serverAddress, internalRequest);
				}
			}, queryExecutor);
}
 
Example #6
Source File: FlinkUtils.java    From flink-crawler with Apache License 2.0 6 votes vote down vote up
/**
 * Return an String key that will get partitioned to the target <operatorIndex>, given the workflow's
 * <maxParallelism> (for key groups) and the operator <parallelism>.
 * 
 * @param format
 *            - format for key that we'll append to (must have one %d param in it)
 * @param maxParallelism
 * @param parallelism
 * @param operatorIndex
 * @return Integer suitable for use in a record as the key.
 */
public static String makeKeyForOperatorIndex(String format, int maxParallelism, int parallelism,
        int operatorIndex) {
    if (!format.contains("%d")) {
        throw new IllegalArgumentException("Format string must contain %d");
    }

    if (maxParallelism == ExecutionJobVertex.VALUE_NOT_SET) {
        maxParallelism = KeyGroupRangeAssignment.computeDefaultMaxParallelism(parallelism);
    }

    for (int i = 0; i < maxParallelism * 2; i++) {
        String key = String.format(format, i);
        int index = getOperatorIndexForKey(key, maxParallelism, parallelism);
        if (index == operatorIndex) {
            return key;
        }
    }

    throw new RuntimeException(String.format(
            "Unable to find key for target operator index %d (max parallelism = %d, parallelism = %d",
            operatorIndex, maxParallelism, parallelism));
}
 
Example #7
Source File: KafkaShuffleTestBase.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public void processElement(
		Tuple3<Integer, Long, Integer> in,
		Context ctx,
		Collector<Tuple3<Integer, Long, Integer>> out) throws Exception {
	int expectedPartition = KeyGroupRangeAssignment
		.assignKeyToParallelOperator(keySelector.getKey(in), numberOfPartitions, numberOfPartitions);
	int indexOfThisSubtask = getRuntimeContext().getIndexOfThisSubtask();
	KafkaTopicPartition partition = new KafkaTopicPartition(topic, expectedPartition);

	// This is how Kafka assign partition to subTask;
	boolean rightAssignment =
		KafkaTopicPartitionAssigner.assign(partition, numberOfPartitions) == indexOfThisSubtask;
	boolean samePartition = (previousPartition == expectedPartition) || (previousPartition == -1);
	previousPartition = expectedPartition;

	if (!(rightAssignment && samePartition)) {
		throw new Exception("Error: Kafka partition assignment error ");
	}
	out.collect(in);
}
 
Example #8
Source File: AbstractRocksDBState.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public byte[] getSerializedValue(
		final byte[] serializedKeyAndNamespace,
		final TypeSerializer<K> safeKeySerializer,
		final TypeSerializer<N> safeNamespaceSerializer,
		final TypeSerializer<V> safeValueSerializer) throws Exception {

	//TODO make KvStateSerializer key-group aware to save this round trip and key-group computation
	Tuple2<K, N> keyAndNamespace = KvStateSerializer.deserializeKeyAndNamespace(
			serializedKeyAndNamespace, safeKeySerializer, safeNamespaceSerializer);

	int keyGroup = KeyGroupRangeAssignment.assignToKeyGroup(keyAndNamespace.f0, backend.getNumberOfKeyGroups());

	RocksDBSerializedCompositeKeyBuilder<K> keyBuilder =
					new RocksDBSerializedCompositeKeyBuilder<>(
						safeKeySerializer,
						backend.getKeyGroupPrefixBytes(),
						32
					);
	keyBuilder.setKeyAndKeyGroup(keyAndNamespace.f0, keyGroup);
	byte[] key = keyBuilder.buildCompositeKeyNamespace(keyAndNamespace.f1, namespaceSerializer);
	return backend.db.get(columnFamily, key);
}
 
Example #9
Source File: FlinkUtils.java    From flink-crawler with Apache License 2.0 6 votes vote down vote up
/**
 * Return an integer value that will get partitioned to the target <operatorIndex>, given the workflow's
 * <maxParallelism> (for key groups) and the operator <parallelism>.
 * 
 * @param maxParallelism
 * @param parallelism
 * @param operatorIndex
 * @return Integer suitable for use in a record as the key.
 */
public static Integer makeKeyForOperatorIndex(int maxParallelism, int parallelism,
        int operatorIndex) {
    if (maxParallelism == ExecutionJobVertex.VALUE_NOT_SET) {
        maxParallelism = KeyGroupRangeAssignment.computeDefaultMaxParallelism(parallelism);
    }

    for (int i = 0; i < maxParallelism * 2; i++) {
        Integer key = new Integer(i);
        int keyGroup = KeyGroupRangeAssignment.assignToKeyGroup(key, maxParallelism);
        int index = KeyGroupRangeAssignment.computeOperatorIndexForKeyGroup(maxParallelism,
                parallelism, keyGroup);
        if (index == operatorIndex) {
            return key;
        }
    }

    throw new RuntimeException(String.format(
            "Unable to find key for target operator index %d (max parallelism = %d, parallelism = %d",
            operatorIndex, maxParallelism, parallelism));
}
 
Example #10
Source File: AbstractRocksDBState.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Override
public byte[] getSerializedValue(
		final byte[] serializedKeyAndNamespace,
		final TypeSerializer<K> safeKeySerializer,
		final TypeSerializer<N> safeNamespaceSerializer,
		final TypeSerializer<V> safeValueSerializer) throws Exception {

	//TODO make KvStateSerializer key-group aware to save this round trip and key-group computation
	Tuple2<K, N> keyAndNamespace = KvStateSerializer.deserializeKeyAndNamespace(
			serializedKeyAndNamespace, safeKeySerializer, safeNamespaceSerializer);

	int keyGroup = KeyGroupRangeAssignment.assignToKeyGroup(keyAndNamespace.f0, backend.getNumberOfKeyGroups());

	RocksDBSerializedCompositeKeyBuilder<K> keyBuilder =
					new RocksDBSerializedCompositeKeyBuilder<>(
						safeKeySerializer,
						backend.getKeyGroupPrefixBytes(),
						32
					);
	keyBuilder.setKeyAndKeyGroup(keyAndNamespace.f0, keyGroup);
	byte[] key = keyBuilder.buildCompositeKeyNamespace(keyAndNamespace.f1, namespaceSerializer);
	return backend.db.get(columnFamily, key);
}
 
Example #11
Source File: ExecutionJobVertex.java    From flink with Apache License 2.0 5 votes vote down vote up
private void setMaxParallelismInternal(int maxParallelism) {
	if (maxParallelism == ExecutionConfig.PARALLELISM_AUTO_MAX) {
		maxParallelism = KeyGroupRangeAssignment.UPPER_BOUND_MAX_PARALLELISM;
	}

	Preconditions.checkArgument(maxParallelism > 0
					&& maxParallelism <= KeyGroupRangeAssignment.UPPER_BOUND_MAX_PARALLELISM,
			"Overriding max parallelism is not in valid bounds (1..%s), found: %s",
			KeyGroupRangeAssignment.UPPER_BOUND_MAX_PARALLELISM, maxParallelism);

	this.maxParallelism = maxParallelism;
}
 
Example #12
Source File: ValueStateToKeyedStateRow.java    From bravo with Apache License 2.0 5 votes vote down vote up
@Override
public KeyedStateRow map(Tuple2<K, V> t) throws Exception {
	int keyGroup = KeyGroupRangeAssignment.assignToKeyGroup(t.f0, maxParallelism);
	ByteArrayOutputStreamWithPos os = new ByteArrayOutputStreamWithPos();
	DataOutputViewStreamWrapper ov = new DataOutputViewStreamWrapper(os);

	RocksDBUtils.writeKeyGroup(keyGroup, keygroupPrefixBytes, ov);
	RocksDBUtils.writeKey(t.f0, keySerializer, os, ov, false);
	RocksDBUtils.writeNameSpace(VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, os,
			ov, false);

	os.close();
	return new KeyedStateRow(stateName, os.toByteArray(),
			InstantiationUtil.serializeToByteArray(valueSerializer, t.f1));
}
 
Example #13
Source File: RocksDBSerializedCompositeKeyBuilderTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
private <K> int setKeyAndReturnKeyGroup(
	RocksDBSerializedCompositeKeyBuilder<K> compositeKeyBuilder,
	K key,
	int maxParallelism) {

	int keyGroup = KeyGroupRangeAssignment.assignKeyToParallelOperator(key, maxParallelism, maxParallelism);
	compositeKeyBuilder.setKeyAndKeyGroup(key, keyGroup);
	return keyGroup;
}
 
Example #14
Source File: KeyGroupRangePartitioner.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public int partition(Integer key, int numPartitions) {
	return KeyGroupRangeAssignment.computeOperatorIndexForKeyGroup(
		maxParallelism,
		numPartitions,
		KeyGroupRangeAssignment.computeKeyGroupForKeyHash(key, maxParallelism));
}
 
Example #15
Source File: FlinkStreamingPipelineTranslator.java    From beam with Apache License 2.0 5 votes vote down vote up
FlinkAutoBalancedShardKeyShardingFunction(
    int parallelism, int maxParallelism, Coder<DestinationT> destinationCoder) {
  this.parallelism = parallelism;
  // keep resolution of maxParallelism to sharding functions, as it relies on Flink's
  //  state API at KeyGroupRangeAssignment
  this.maxParallelism =
      maxParallelism > 0
          ? maxParallelism
          : KeyGroupRangeAssignment.computeDefaultMaxParallelism(parallelism);

  this.destinationCoder = destinationCoder;
}
 
Example #16
Source File: FlinkStreamingPipelineTranslator.java    From beam with Apache License 2.0 5 votes vote down vote up
private Map<Integer, ShardedKey<Integer>> generateShardedKeys(int key, int shardCount) {

      Map<Integer, ShardedKey<Integer>> shardedKeys = new HashMap<>();

      for (int shard = 0; shard < shardCount; shard++) {

        int salt = -1;
        while (true) {
          if (salt++ == Integer.MAX_VALUE) {
            throw new RuntimeException(
                "Failed to find sharded key in [ " + Integer.MAX_VALUE + " ] iterations");
          }
          ShardedKey<Integer> shk = ShardedKey.of(Objects.hash(key, salt), shard);
          int targetPartition = shard % parallelism;

          // create effective key in the same way Beam/Flink will do so we can see if it gets
          // allocated to the partition we want
          ByteBuffer effectiveKey = FlinkKeyUtils.encodeKey(shk, shardedKeyCoder);

          int partition =
              KeyGroupRangeAssignment.assignKeyToParallelOperator(
                  effectiveKey, maxParallelism, parallelism);

          if (partition == targetPartition) {
            shardedKeys.put(shard, shk);
            break;
          }
        }
      }

      return shardedKeys;
    }
 
Example #17
Source File: FlinkStreamingPipelineTranslatorTest.java    From beam with Apache License 2.0 5 votes vote down vote up
@Test
public void testAutoBalanceShardKeyResolvesMaxParallelism() {

  int parallelism = 3;
  assertThat(
      new FlinkAutoBalancedShardKeyShardingFunction<>(parallelism, -1, StringUtf8Coder.of())
          .getMaxParallelism(),
      equalTo(KeyGroupRangeAssignment.computeDefaultMaxParallelism(parallelism)));
  assertThat(
      new FlinkAutoBalancedShardKeyShardingFunction<>(parallelism, 0, StringUtf8Coder.of())
          .getMaxParallelism(),
      equalTo(KeyGroupRangeAssignment.computeDefaultMaxParallelism(parallelism)));
}
 
Example #18
Source File: FlinkKafkaShuffleProducer.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * This is the function invoked to handle each element.
 *
 * @param transaction Transaction state;
 *                    elements are written to Kafka in transactions to guarantee different level of data consistency
 * @param next Element to handle
 * @param context Context needed to handle the element
 * @throws FlinkKafkaException for kafka error
 */
@Override
public void invoke(KafkaTransactionState transaction, IN next, Context context) throws FlinkKafkaException {
	checkErroneous();

	// write timestamp to Kafka if timestamp is available
	Long timestamp = context.timestamp();

	int[] partitions = getPartitions(transaction);
	int partitionIndex;
	try {
		partitionIndex = KeyGroupRangeAssignment
			.assignKeyToParallelOperator(keySelector.getKey(next), partitions.length, partitions.length);
	} catch (Exception e) {
		throw new RuntimeException("Fail to assign a partition number to record", e);
	}

	ProducerRecord<byte[], byte[]> record = new ProducerRecord<>(
		defaultTopicId,
		partitionIndex,
		timestamp,
		null,
		kafkaSerializer.serializeRecord(next, timestamp));

	pendingRecords.incrementAndGet();
	transaction.getProducer().send(record, callback);
}
 
Example #19
Source File: RocksIncrementalCheckpointRescalingTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Before
public void initRecords() throws Exception {
	records = new String[10];
	records[0] = "8";
	Assert.assertEquals(0, KeyGroupRangeAssignment.assignToKeyGroup(keySelector.getKey(records[0]), maxParallelism)); // group 0

	records[1] = "5";
	Assert.assertEquals(1, KeyGroupRangeAssignment.assignToKeyGroup(keySelector.getKey(records[1]), maxParallelism)); // group 1

	records[2] = "25";
	Assert.assertEquals(2, KeyGroupRangeAssignment.assignToKeyGroup(keySelector.getKey(records[2]), maxParallelism)); // group 2

	records[3] = "13";
	Assert.assertEquals(3, KeyGroupRangeAssignment.assignToKeyGroup(keySelector.getKey(records[3]), maxParallelism)); // group 3

	records[4] = "4";
	Assert.assertEquals(4, KeyGroupRangeAssignment.assignToKeyGroup(keySelector.getKey(records[4]), maxParallelism)); // group 4

	records[5] = "7";
	Assert.assertEquals(5, KeyGroupRangeAssignment.assignToKeyGroup(keySelector.getKey(records[5]), maxParallelism)); // group 5

	records[6] = "1";
	Assert.assertEquals(6, KeyGroupRangeAssignment.assignToKeyGroup(keySelector.getKey(records[6]), maxParallelism)); // group 6

	records[7] = "6";
	Assert.assertEquals(7, KeyGroupRangeAssignment.assignToKeyGroup(keySelector.getKey(records[7]), maxParallelism)); // group 7

	records[8] = "9";
	Assert.assertEquals(8, KeyGroupRangeAssignment.assignToKeyGroup(keySelector.getKey(records[8]), maxParallelism)); // group 8

	records[9] = "3";
	Assert.assertEquals(9, KeyGroupRangeAssignment.assignToKeyGroup(keySelector.getKey(records[9]), maxParallelism)); // group 9
}
 
Example #20
Source File: InternalTimerServiceImplTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private static int getKeyInKeyGroupRange(KeyGroupRange range, int maxParallelism) {
	Random rand = new Random(System.currentTimeMillis());
	int result = rand.nextInt();
	while (!range.contains(KeyGroupRangeAssignment.assignToKeyGroup(result, maxParallelism))) {
		result = rand.nextInt();
	}
	return result;
}
 
Example #21
Source File: InternalTimerServiceImplTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private static int getKeyInKeyGroup(int keyGroup, int maxParallelism) {
	Random rand = new Random(System.currentTimeMillis());
	int result = rand.nextInt();
	while (KeyGroupRangeAssignment.assignToKeyGroup(result, maxParallelism) != keyGroup) {
		result = rand.nextInt();
	}
	return result;
}
 
Example #22
Source File: RocksIncrementalCheckpointRescalingTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Before
public void initRecords() throws Exception {
	records = new String[10];
	records[0] = "8";
	Assert.assertEquals(0, KeyGroupRangeAssignment.assignToKeyGroup(keySelector.getKey(records[0]), maxParallelism)); // group 0

	records[1] = "5";
	Assert.assertEquals(1, KeyGroupRangeAssignment.assignToKeyGroup(keySelector.getKey(records[1]), maxParallelism)); // group 1

	records[2] = "25";
	Assert.assertEquals(2, KeyGroupRangeAssignment.assignToKeyGroup(keySelector.getKey(records[2]), maxParallelism)); // group 2

	records[3] = "13";
	Assert.assertEquals(3, KeyGroupRangeAssignment.assignToKeyGroup(keySelector.getKey(records[3]), maxParallelism)); // group 3

	records[4] = "4";
	Assert.assertEquals(4, KeyGroupRangeAssignment.assignToKeyGroup(keySelector.getKey(records[4]), maxParallelism)); // group 4

	records[5] = "7";
	Assert.assertEquals(5, KeyGroupRangeAssignment.assignToKeyGroup(keySelector.getKey(records[5]), maxParallelism)); // group 5

	records[6] = "1";
	Assert.assertEquals(6, KeyGroupRangeAssignment.assignToKeyGroup(keySelector.getKey(records[6]), maxParallelism)); // group 6

	records[7] = "6";
	Assert.assertEquals(7, KeyGroupRangeAssignment.assignToKeyGroup(keySelector.getKey(records[7]), maxParallelism)); // group 7

	records[8] = "9";
	Assert.assertEquals(8, KeyGroupRangeAssignment.assignToKeyGroup(keySelector.getKey(records[8]), maxParallelism)); // group 8

	records[9] = "3";
	Assert.assertEquals(9, KeyGroupRangeAssignment.assignToKeyGroup(keySelector.getKey(records[9]), maxParallelism)); // group 9
}
 
Example #23
Source File: AbstractStreamOperatorTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private static int getKeyInKeyGroupRange(KeyGroupRange range, int maxParallelism) {
	Random rand = new Random(System.currentTimeMillis());
	int result = rand.nextInt();
	while (!range.contains(KeyGroupRangeAssignment.assignToKeyGroup(result, maxParallelism))) {
		result = rand.nextInt();
	}
	return result;
}
 
Example #24
Source File: ResultPartitionDeploymentDescriptor.java    From flink with Apache License 2.0 5 votes vote down vote up
public ResultPartitionDeploymentDescriptor(
		PartitionDescriptor partitionDescriptor,
		ShuffleDescriptor shuffleDescriptor,
		int maxParallelism,
		boolean sendScheduleOrUpdateConsumersMessage) {
	this.partitionDescriptor = checkNotNull(partitionDescriptor);
	this.shuffleDescriptor = checkNotNull(shuffleDescriptor);
	KeyGroupRangeAssignment.checkParallelismPreconditions(maxParallelism);
	this.maxParallelism = maxParallelism;
	this.sendScheduleOrUpdateConsumersMessage = sendScheduleOrUpdateConsumersMessage;
}
 
Example #25
Source File: KeyGroupStreamPartitioner.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public int selectChannel(SerializationDelegate<StreamRecord<T>> record) {
	K key;
	try {
		key = keySelector.getKey(record.getInstance().getValue());
	} catch (Exception e) {
		throw new RuntimeException("Could not extract key from " + record.getInstance().getValue(), e);
	}
	return KeyGroupRangeAssignment.assignKeyToParallelOperator(key, maxParallelism, numberOfChannels);
}
 
Example #26
Source File: CheckpointCoordinatorTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private void testCreateKeyGroupPartitions(int maxParallelism, int parallelism) {
	List<KeyGroupRange> ranges = StateAssignmentOperation.createKeyGroupPartitions(maxParallelism, parallelism);
	for (int i = 0; i < maxParallelism; ++i) {
		KeyGroupRange range = ranges.get(KeyGroupRangeAssignment.computeOperatorIndexForKeyGroup(maxParallelism, parallelism, i));
		if (!range.contains(i)) {
			Assert.fail("Could not find expected key-group " + i + " in range " + range);
		}
	}
}
 
Example #27
Source File: ExecutionJobVertex.java    From flink with Apache License 2.0 5 votes vote down vote up
private void setMaxParallelismInternal(int maxParallelism) {
	if (maxParallelism == ExecutionConfig.PARALLELISM_AUTO_MAX) {
		maxParallelism = KeyGroupRangeAssignment.UPPER_BOUND_MAX_PARALLELISM;
	}

	Preconditions.checkArgument(maxParallelism > 0
					&& maxParallelism <= KeyGroupRangeAssignment.UPPER_BOUND_MAX_PARALLELISM,
			"Overriding max parallelism is not in valid bounds (1..%s), found: %s",
			KeyGroupRangeAssignment.UPPER_BOUND_MAX_PARALLELISM, maxParallelism);

	this.maxParallelism = maxParallelism;
}
 
Example #28
Source File: Execution.java    From flink with Apache License 2.0 5 votes vote down vote up
private static int getPartitionMaxParallelism(IntermediateResultPartition partition) {
	// TODO consumers.isEmpty() only exists for test, currently there has to be exactly one consumer in real jobs!
	final List<List<ExecutionEdge>> consumers = partition.getConsumers();
	int maxParallelism = KeyGroupRangeAssignment.UPPER_BOUND_MAX_PARALLELISM;
	if (!consumers.isEmpty()) {
		List<ExecutionEdge> consumer = consumers.get(0);
		ExecutionJobVertex consumerVertex = consumer.get(0).getTarget().getJobVertex();
		maxParallelism = consumerVertex.getMaxParallelism();
	}
	return maxParallelism;
}
 
Example #29
Source File: CheckpointCoordinatorTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private void testCreateKeyGroupPartitions(int maxParallelism, int parallelism) {
	List<KeyGroupRange> ranges = StateAssignmentOperation.createKeyGroupPartitions(maxParallelism, parallelism);
	for (int i = 0; i < maxParallelism; ++i) {
		KeyGroupRange range = ranges.get(KeyGroupRangeAssignment.computeOperatorIndexForKeyGroup(maxParallelism, parallelism, i));
		if (!range.contains(i)) {
			Assert.fail("Could not find expected key-group " + i + " in range " + range);
		}
	}
}
 
Example #30
Source File: RocksDBSerializedCompositeKeyBuilderTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private <K> int setKeyAndReturnKeyGroup(
	RocksDBSerializedCompositeKeyBuilder<K> compositeKeyBuilder,
	K key,
	int maxParallelism) {

	int keyGroup = KeyGroupRangeAssignment.assignKeyToParallelOperator(key, maxParallelism, maxParallelism);
	compositeKeyBuilder.setKeyAndKeyGroup(key, keyGroup);
	return keyGroup;
}