Java Code Examples for org.apache.kafka.common.Node#host()

The following examples show how to use org.apache.kafka.common.Node#host() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: KafkaClusterManager.java    From doctorkafka with Apache License 2.0 6 votes vote down vote up
/**
 *  Remove the under-replicated partitions that are in the middle of partition reassignment.
 */
public List<PartitionInfo> filterOutInReassignmentUrps(List<PartitionInfo> urps,
                                                       Map<String, Integer> replicationFactors) {
  List<PartitionInfo> result = new ArrayList<>();
  for (PartitionInfo urp : urps) {
    if (urp.replicas().length <= replicationFactors.get(urp.topic())) {
      // # of replicas <= replication factor
      result.add(urp);
    } else {
      // # of replicas > replication factor. this can happen after
      // a failed partition reassignment
      Set<Integer> liveReplicas = new HashSet<>();
      for (Node node : urp.replicas()) {
        if (node.host() != null && OperatorUtil.pingKafkaBroker(node.host(), 9092, 5000)) {
          liveReplicas.add(node.id());
        }
      }
      if (liveReplicas.size() < replicationFactors.get(urp.topic())) {
        result.add(urp);
      }
    }
  }
  return result;
}
 
Example 2
Source File: BrokerNodeFunction.java    From data-highway with Apache License 2.0 5 votes vote down vote up
public BrokerNode apply(Predicate<String> hostNamePredicate) {
  Collection<Node> nodes = KafkaFutures.join(client.describeCluster().nodes());

  try {
    Node node = find(nodes, n -> hostNamePredicate.test(n.host()));
    log.debug("Using broker {}", node);
    return new BrokerNode(node.id(), ofNullable(node.rack()).orElse("none"), node.host());
  } catch (NoSuchElementException e) {
    throw new RuntimeException("No broker found on localhost!");
  }
}
 
Example 3
Source File: KafkaTopicPartitionLeader.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
public KafkaTopicPartitionLeader(KafkaTopicPartition topicPartition, Node leader) {
	this.topicPartition = topicPartition;
	if (leader == null) {
		this.leaderId = -1;
		this.leaderHost = null;
		this.leaderPort = -1;
	} else {
		this.leaderId = leader.id();
		this.leaderPort = leader.port();
		this.leaderHost = leader.host();
	}
	int cachedHash = (leader == null) ? 14 : leader.hashCode();
	this.cachedHash = 31 * cachedHash + topicPartition.hashCode();
}
 
Example 4
Source File: KafkaTopicPartitionLeader.java    From flink with Apache License 2.0 5 votes vote down vote up
public KafkaTopicPartitionLeader(KafkaTopicPartition topicPartition, Node leader) {
	this.topicPartition = topicPartition;
	if (leader == null) {
		this.leaderId = -1;
		this.leaderHost = null;
		this.leaderPort = -1;
	} else {
		this.leaderId = leader.id();
		this.leaderPort = leader.port();
		this.leaderHost = leader.host();
	}
	int cachedHash = (leader == null) ? 14 : leader.hashCode();
	this.cachedHash = 31 * cachedHash + topicPartition.hashCode();
}
 
Example 5
Source File: DefaultKafkaClusterProxy.java    From kafka-message-tool with MIT License 5 votes vote down vote up
private void throwIfInvalidConfigMakesClusterUnusable() throws ClusterConfigurationError {
    try {
        Logger.trace("calling kafkaAdminClient.findAllBrokers() ");
        final List<Node> nodes = seqAsJavaList(kafkaAdminClient.findAllBrokers());
        final List<String> advertisedListeners = new ArrayList<>();
        for (Node node : nodes) {
            final String host1 = node.host();
            final int port = node.port();
            final String advertisedListener = String.format("%s:%d", host1, port);
            Logger.debug("Found advertised listener: " + advertisedListener);
            advertisedListeners.add(advertisedListener);

            Logger.trace(String.format("Checking if advertised listener '%s' is reachable", host1));
            if (HostnameUtils.isHostnameReachable(host1, ApplicationConstants.HOSTNAME_REACHABLE_TIMEOUT_MS)) {
                Logger.trace("Yes");
                return;
            }
            Logger.trace("No");
        }
        final String msg = String.format("Cluster config for 'advertised.listeners' is invalid.%n%n" +
                                             "* None of advertised listeners '%s' are reachable from outside world.%n" +
                                             "* Producers/consumers will be unable to use this kafka cluster " +
                                             "(e.g. will not connect properly).%n" +
                                             "* This application (%s) cannot fetch broker configuration", advertisedListeners,
                                         APPLICATION_NAME);
        throw new ClusterConfigurationError(msg);
    } catch (RuntimeException e) {
        Logger.trace(e);
        e.printStackTrace();
        throw e;

    }
}
 
Example 6
Source File: SamplingUtils.java    From cruise-control with BSD 2-Clause "Simplified" License 5 votes vote down vote up
/**
 * Create a {@link BrokerMetricSample}, record the relevant metrics for the given broker, and return the sample.
 *
 * @param node Node hosting the broker.
 * @param brokerLoadById Load information for brokers by the broker id.
 * @param maxMetricTimestamp Maximum timestamp of the sampled metric during the sampling process.
 * @return Metric sample populated with broker metrics, or {@code null} if sample generation is skipped.
 */
static BrokerMetricSample buildBrokerMetricSample(Node node,
                                                  Map<Integer, BrokerLoad> brokerLoadById,
                                                  long maxMetricTimestamp) throws UnknownVersionException {
  BrokerLoad brokerLoad = brokerLoadById.get(node.id());
  if (skipBuildingBrokerMetricSample(brokerLoad, node.id())) {
    return null;
  }
  MetricDef brokerMetricDef = KafkaMetricDef.brokerMetricDef();
  BrokerMetricSample bms = new BrokerMetricSample(node.host(), node.id(), brokerLoad.brokerSampleDeserializationVersion());
  for (Map.Entry<Byte, Set<RawMetricType>> entry : RawMetricType.brokerMetricTypesDiffByVersion().entrySet()) {
    for (RawMetricType rawBrokerMetricType : entry.getValue()) {
      // We require the broker to report all the metric types (including nullable values). Otherwise we skip the broker.
      if (!brokerLoad.brokerMetricAvailable(rawBrokerMetricType)) {
        LOG.warn("{}broker {} because it does not have {} metrics (serde version {}) or the metrics are inconsistent.",
                 SKIP_BUILDING_SAMPLE_PREFIX, node.id(), rawBrokerMetricType, entry.getKey());
        return null;
      } else {
        MetricInfo metricInfo = brokerMetricDef.metricInfo(KafkaMetricDef.forRawMetricType(rawBrokerMetricType).name());
        double metricValue = brokerLoad.brokerMetric(rawBrokerMetricType);
        bms.record(metricInfo, metricValue);
      }
    }
  }

  // Disk usage is not one of the broker raw metric type.
  bms.record(brokerMetricDef.metricInfo(KafkaMetricDef.DISK_USAGE.name()), brokerLoad.diskUsage());
  bms.close(maxMetricTimestamp);
  return bms;
}
 
Example 7
Source File: KafkaTestClusterTest.java    From kafka-junit with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
/**
 * This test calls getKafkaBrokers() after the cluster has been properly started. It is expected
 * to return proper connect strings for each of the brokers.
 */
@Test
void testGetKafkaConnectString() throws Exception {
    final int numberOfBrokers = 3;

    try (final KafkaTestCluster kafkaTestCluster = new KafkaTestCluster(numberOfBrokers)) {
        // Start cluster
        kafkaTestCluster.start();

        // Create test Utils
        final KafkaTestUtils kafkaTestUtils = new KafkaTestUtils(kafkaTestCluster);

        // Ask for the connect string
        final String resultStr = kafkaTestCluster.getKafkaConnectString();
        Assertions.assertNotNull(resultStr, "Should have non-null result");

        // Split the result by commas to get individual hosts.
        final Set<String> hosts = new HashSet<>(Arrays.asList(resultStr.split(",")));
        Assertions.assertEquals(numberOfBrokers, hosts.size(), "Should contain 3 entries.");

        // Ask for which nodes exist in the cluster
        final List<Node> nodes = kafkaTestUtils.describeClusterNodes();

        // Sanity test
        Assertions.assertEquals(numberOfBrokers, nodes.size(), "Should have 3 brokers in the cluster");

        // Make sure each node is represented properly.
        for (final Node node: nodes) {
            final String calculatedConnectString = "PLAINTEXT://" + node.host() + ":" + node.port();
            Assertions.assertTrue(hosts.contains(calculatedConnectString), "Should contain " + calculatedConnectString);
        }
    }
}
 
Example 8
Source File: KafkaPool.java    From feeyo-redisproxy with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
@Override
public boolean startup() {
	Collection<Node> nodes = discoverNodes();
	if (nodes == null || nodes.isEmpty()) {
		return false;
	}

	int poolType = poolCfg.getType();
	String poolName = poolCfg.getName();
	int minCon = poolCfg.getMinCon();
	int maxCon = poolCfg.getMaxCon();

	availableHostList.clear();
	backupHostList.clear();
	for (Node node : nodes) {
		PhysicalNode physicalNode = new PhysicalNode(backendConFactory, 
				poolType, poolName, minCon, maxCon, node.host(), node.port() );
		physicalNode.initConnections();
		physicalNodes.put(node.id(), physicalNode);

		// 防止配置文件未更新的情况
		availableHostList.add(node.host() + ":" + node.port());
		backupHostList.add(node.host() + ":" + node.port());
	}
	
	// 加载 ApiVersion
	loadKafkaVersion();
	
	return true;
}
 
Example 9
Source File: KafkaTopicPartitionLeader.java    From flink with Apache License 2.0 5 votes vote down vote up
public KafkaTopicPartitionLeader(KafkaTopicPartition topicPartition, Node leader) {
	this.topicPartition = topicPartition;
	if (leader == null) {
		this.leaderId = -1;
		this.leaderHost = null;
		this.leaderPort = -1;
	} else {
		this.leaderId = leader.id();
		this.leaderPort = leader.port();
		this.leaderHost = leader.host();
	}
	int cachedHash = (leader == null) ? 14 : leader.hashCode();
	this.cachedHash = 31 * cachedHash + topicPartition.hashCode();
}
 
Example 10
Source File: KafkaPoolCfg.java    From feeyo-redisproxy with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
/**
 * 
 * 
	zhuamdeMacBook-Pro:logs zhuam$ [2018-05-28 15:26:41,394] INFO [Admin Manager on Broker 0]: 
	Error processing create topic request for topic test01 with arguments (numPartitions=3, replicationFactor=2, replicasAssignments={}, configs={}) (kafka.server.AdminManager)
	org.apache.kafka.common.errors.InvalidReplicationFactorException: Replication factor: 2 larger than available brokers: 1.
 */
private void initializeOfKafka(Map<String, TopicCfg> topicCfgMap) throws Exception, org.apache.kafka.common.errors.TimeoutException {
	
	if (topicCfgMap == null || topicCfgMap.isEmpty()) {
		return;
	}
	
	// Get server address for kafka
	StringBuffer servers = new StringBuffer();
	List<String> nodes = this.getNodes();
	for (int i = 0; i < nodes.size(); i++) {
		String str = nodes.get(i);
		String[] node = str.split(":");
		servers.append(node[0]).append(":").append(node[1]);
		if (i < nodes.size() - 1) {
			servers.append(",");
		}
	}
	
	KafkaAdmin kafkaAdmin = null;
	try {
		// 获取 Kafka 管理对象
		kafkaAdmin = KafkaAdmin.create( servers.toString() );
		
		// 获取kafka中的topic情况
		Map<String, TopicDescription> remoteTopics = kafkaAdmin.getTopicAndDescriptions();
		Collection<Node> clusterNodes = kafkaAdmin.getClusterNodes();
		for (TopicCfg topicCfg : topicCfgMap.values()) {

			String topicName = topicCfg.getName();
			short replicationFactor = topicCfg.getReplicationFactor();
			int partitionNum = topicCfg.getPartitions();

			TopicDescription topicDescription = remoteTopics.get(topicName);
			if (topicDescription != null) {
				int oldPartitionNum = topicDescription.partitions().size();
				if (partitionNum > oldPartitionNum) {
					// add partition
					kafkaAdmin.addPartitionsForTopic(topicName, partitionNum);
					topicDescription = kafkaAdmin.getDescriptionByTopicName(topicName);
				}
				
			} else {
				//verify
				if(clusterNodes == null || replicationFactor > clusterNodes.size()) {
					throw new Exception( "kafka topicName="+ topicName + ", no enough alive physical nodes for replication");
				}
				
				// create topic
				kafkaAdmin.createTopic(topicName, partitionNum, replicationFactor);
				topicDescription = kafkaAdmin.getDescriptionByTopicName(topicName);
			}
			
			// 
			if ( topicDescription == null) {
				throw new Exception( " kafka topicName=" + topicName  + ", description is null.");
			} 

			//
			String name = topicDescription.name();
			boolean internal = topicDescription.isInternal();
			int partitionSize = topicDescription.partitions().size();
			
			//
			BrokerPartition[] newPartitions = new BrokerPartition[ partitionSize ];
			for (int i = 0; i < partitionSize; i++) {
				
				TopicPartitionInfo partitionInfo =  topicDescription.partitions().get(i);
				int partition = partitionInfo.partition();
				
				Node leader = partitionInfo.leader();
				BrokerNode newLeader = new BrokerNode(leader.id(), leader.host(), leader.port());
				
				List<Node> replicas = partitionInfo.replicas();
				BrokerNode[] newReplicas = new BrokerNode[replicas.size()];
				for (int j = 0; j < replicas.size(); j++) {
					newReplicas[j] = new BrokerNode(replicas.get(j).id(), replicas.get(j).host(), replicas.get(j).port());
				}
				
				BrokerPartition newPartition = new BrokerPartition(partition, newLeader, newReplicas);
				newPartitions[i] = newPartition;
			}

			topicCfg.setRunningInfo( new BrokerRunningInfo(name, internal, newPartitions) );
		}
		
	} catch(Throwable e) {
		throw new Exception("kafka pool init err: " + servers.toString(), e);
		
	} finally {
		if (kafkaAdmin != null)
			kafkaAdmin.close();
	}
}
 
Example 11
Source File: MonitorUtils.java    From cruise-control with BSD 2-Clause "Simplified" License 2 votes vote down vote up
/**
 * @param node The node whose rack is requested.
 * @return Rack of the given node if the corresponding value is not null and not empty, the host of the node otherwise.
 */
public static String getRackHandleNull(Node node) {
  return node.rack() == null || node.rack().isEmpty() ? node.host() : node.rack();
}