Java Code Examples for org.apache.kafka.common.protocol.Errors#NONE
The following examples show how to use
org.apache.kafka.common.protocol.Errors#NONE .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: BootstrapServerParam.java From kafka_book_demo with Apache License 2.0 | 6 votes |
public static MetadataResponse getMetadata(int throttleTimeMs) { Node node = new Node(0, "localhost", 9093); List<Node> brokers = Collections.singletonList(node); int controllerId = 0; String clusterId = "64PniqfkRHa4ASFUisNXrw"; List<Node> empty = new ArrayList<>(); PartitionMetadata pMeta1 = new PartitionMetadata(Errors.NONE, 0, node, brokers, brokers, empty); PartitionMetadata pMeta2 = new PartitionMetadata(Errors.NONE, 1, node, brokers, brokers, empty); PartitionMetadata pMeta3 = new PartitionMetadata(Errors.NONE, 2, node, brokers, brokers, empty); PartitionMetadata pMeta4 = new PartitionMetadata(Errors.NONE, 3, node, brokers, brokers, empty); List<PartitionMetadata> pMetaList = new ArrayList<>(); pMetaList.add(pMeta1); pMetaList.add(pMeta2); pMetaList.add(pMeta3); pMetaList.add(pMeta4); TopicMetadata tMeta1 = new TopicMetadata(Errors.NONE, topic, false, pMetaList); List<TopicMetadata> tMetaList = new ArrayList<>(); tMetaList.add(tMeta1); return new MetadataResponse(throttleTimeMs, brokers, clusterId, controllerId, tMetaList); }
Example 2
Source File: KafkaRequestHandler.java From kop with Apache License 2.0 | 6 votes |
protected ApiVersionsResponse overloadDefaultApiVersionsResponse() { List<ApiVersionsResponse.ApiVersion> versionList = new ArrayList<>(); for (ApiKeys apiKey : ApiKeys.values()) { if (apiKey.minRequiredInterBrokerMagic <= RecordBatch.CURRENT_MAGIC_VALUE) { switch (apiKey) { case FETCH: // V4 added MessageSets responses. We need to make sure RecordBatch format is not used versionList.add(new ApiVersionsResponse.ApiVersion((short) 1, (short) 4, apiKey.latestVersion())); break; case LIST_OFFSETS: // V0 is needed for librdkafka versionList.add(new ApiVersionsResponse.ApiVersion((short) 2, (short) 0, apiKey.latestVersion())); break; default: versionList.add(new ApiVersionsResponse.ApiVersion(apiKey)); } } } return new ApiVersionsResponse(0, Errors.NONE, versionList); }
Example 3
Source File: KafkaRequestHandler.java From kop with Apache License 2.0 | 6 votes |
static PartitionMetadata newPartitionMetadata(TopicName topicName, Node node) { int pulsarPartitionIndex = topicName.getPartitionIndex(); int kafkaPartitionIndex = pulsarPartitionIndex == -1 ? 0 : pulsarPartitionIndex; if (log.isDebugEnabled()) { log.debug("Return PartitionMetadata node: {}, topicName: {}", node, topicName); } return new PartitionMetadata( Errors.NONE, kafkaPartitionIndex, node, // leader Lists.newArrayList(node), // replicas Lists.newArrayList(node), // isr Collections.emptyList() // offline replicas ); }
Example 4
Source File: GroupCoordinator.java From kop with Apache License 2.0 | 6 votes |
public KeyValue<Errors, List<GroupOverview>> handleListGroups() { if (!isActive.get()) { return new KeyValue<>(Errors.COORDINATOR_NOT_AVAILABLE, new ArrayList<>()); } else { Errors errors; if (groupManager.isLoading()) { errors = Errors.COORDINATOR_LOAD_IN_PROGRESS; } else { errors = Errors.NONE; } List<GroupOverview> overviews = new ArrayList<>(); groupManager.currentGroups().forEach(group -> overviews.add(group.overview())); return new KeyValue<>( errors, overviews ); } }
Example 5
Source File: ExecutorAdminUtils.java From cruise-control with BSD 2-Clause "Simplified" License | 6 votes |
/** * Check whether there is ongoing intra-broker replica movement. * @param brokersToCheck List of broker to check. * @param adminClient The adminClient to send describeLogDirs request. * @param config The config object that holds all the Cruise Control related configs * @return True if there is ongoing intra-broker replica movement. */ static boolean hasOngoingIntraBrokerReplicaMovement(Collection<Integer> brokersToCheck, AdminClient adminClient, KafkaCruiseControlConfig config) throws InterruptedException, ExecutionException, TimeoutException { Map<Integer, KafkaFuture<Map<String, LogDirInfo>>> logDirsByBrokerId = adminClient.describeLogDirs(brokersToCheck).values(); for (Map.Entry<Integer, KafkaFuture<Map<String, LogDirInfo>>> entry : logDirsByBrokerId.entrySet()) { Map<String, LogDirInfo> logInfos = entry.getValue().get(config.getLong(LOGDIR_RESPONSE_TIMEOUT_MS_CONFIG), TimeUnit.MILLISECONDS); for (LogDirInfo info : logInfos.values()) { if (info.error == Errors.NONE) { if (info.replicaInfos.values().stream().anyMatch(i -> i.isFuture)) { return true; } } } } return false; }
Example 6
Source File: TopicPartitionsOffsetInfo.java From kafka-utilities with Apache License 2.0 | 6 votes |
public Map<TopicPartition, OffsetFetchResponse.PartitionData> getCommitedOffsets(final String groupName, final List<TopicPartition> topicPartitions, final long responseWaitTime) throws OffsetFetchException { if(this.coordinator == null){ throw new OffsetFetchException("Missing Group Coordinator for group:" + groupName); } OffsetFetchRequest.Builder offsetRequestBuilder = new OffsetFetchRequest.Builder(groupName, topicPartitions); this.kafkaApiRequest.sendApiRequest(this.coordinator, offsetRequestBuilder); OffsetFetchResponse offsetFetchResponse =(OffsetFetchResponse) this.kafkaApiRequest.getLastApiResponse(responseWaitTime); if(offsetFetchResponse.error() == Errors.NONE) { return offsetFetchResponse.responseData(); }else{ throw new OffsetFetchException(offsetFetchResponse.error().message()); } }
Example 7
Source File: TopicPartitionsOffsetInfo.java From kafka-utilities with Apache License 2.0 | 6 votes |
private Map<TopicPartition, Long> processListOffsetResponse(final ListOffsetResponse listOffsetResponse, final Map<TopicPartition, Long>requiredTimestamp) { Map<TopicPartition, Long>processTopicPartitionOffsets = new HashMap<>(); processTopicPartitionOffsets.putAll(requiredTimestamp); for (Map.Entry<TopicPartition, Long> entry : requiredTimestamp.entrySet()) { TopicPartition topicPartition = entry.getKey(); ListOffsetResponse.PartitionData partitionData = listOffsetResponse.responseData().get(topicPartition); Errors error = partitionData.error; if (error == Errors.NONE) { //supporting kafka version greater than 10 only if (partitionData.offset != ListOffsetResponse.UNKNOWN_OFFSET) { processTopicPartitionOffsets.put(topicPartition, partitionData.offset); } } } return processTopicPartitionOffsets; }
Example 8
Source File: OffsetMetadataAndError.java From kop with Apache License 2.0 | 4 votes |
public static OffsetMetadataAndError apply(long offset) { return new OffsetMetadataAndError( new OffsetMetadata(offset, OffsetMetadata.NO_METADATA), Errors.NONE ); }
Example 9
Source File: OffsetMetadataAndError.java From kop with Apache License 2.0 | 4 votes |
private OffsetMetadataAndError(OffsetMetadata offsetMetadata) { this(offsetMetadata, Errors.NONE); }
Example 10
Source File: KafkaRequestHandler.java From kop with Apache License 2.0 | 4 votes |
private SaslHandshakeResponse checkSaslMechanism(String mechanism) { if (getKafkaConfig().getSaslAllowedMechanisms().contains(mechanism)) { return new SaslHandshakeResponse(Errors.NONE, getKafkaConfig().getSaslAllowedMechanisms()); } return new SaslHandshakeResponse(Errors.UNSUPPORTED_SASL_MECHANISM, new HashSet<>()); }