kafka.cluster.Broker Java Examples

The following examples show how to use kafka.cluster.Broker. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KafkaAssignmentGenerator.java    From kafka-assigner with Apache License 2.0 6 votes vote down vote up
private static Set<Integer> brokerHostnamesToBrokerIds(
        ZkUtils zkUtils, Set<String> brokerHostnameSet, boolean checkPresence) {
    List<Broker> brokers = JavaConversions.seqAsJavaList(zkUtils.getAllBrokersInCluster());
    Set<Integer> brokerIdSet = Sets.newHashSet();
    for (Broker broker : brokers) {
        BrokerEndPoint endpoint = broker.getBrokerEndPoint(SecurityProtocol.PLAINTEXT);
        if (brokerHostnameSet.contains(endpoint.host())) {
            brokerIdSet.add(broker.id());
        }
    }
    Preconditions.checkArgument(!checkPresence ||
            brokerHostnameSet.size() == brokerIdSet.size(),
            "Some hostnames could not be found! We found: " + brokerIdSet);

    return brokerIdSet;
}
 
Example #2
Source File: KafkaAssignmentGenerator.java    From kafka-assigner with Apache License 2.0 6 votes vote down vote up
private static void printCurrentBrokers(ZkUtils zkUtils) throws JSONException {
    List<Broker> brokers = JavaConversions.seqAsJavaList(zkUtils.getAllBrokersInCluster());
    JSONArray json = new JSONArray();
    for (Broker broker : brokers) {
        BrokerEndPoint endpoint = broker.getBrokerEndPoint(SecurityProtocol.PLAINTEXT);
        JSONObject brokerJson = new JSONObject();
        brokerJson.put("id", broker.id());
        brokerJson.put("host", endpoint.host());
        brokerJson.put("port", endpoint.port());
        if (broker.rack().isDefined()) {
            brokerJson.put("rack", broker.rack().get());
        }
        json.put(brokerJson);
    }
    System.out.println("CURRENT BROKERS:");
    System.out.println(json.toString());
}
 
Example #3
Source File: KafkaProducerPartitionLeaderChangeBugUnitTest.java    From Scribengin with GNU Affero General Public License v3.0 6 votes vote down vote up
private void info(List<PartitionMetadata> holder) {
  String[] header = { 
      "Partition Id", "Leader", "Replicas"
  };
  TabularFormater formater = new TabularFormater(header);
  formater.setTitle("Partitions");
  for(PartitionMetadata sel : holder) {
    StringBuilder replicas = new StringBuilder();
    for(Broker broker : sel.replicas()) {
      if(replicas.length() > 0) replicas.append(",");
      replicas.append(broker.port());
    }
    formater.addRow(sel.partitionId(), sel.leader().port(), replicas.toString());
  }
  System.out.println(formater.getFormatText());
}
 
Example #4
Source File: KafkaSimpleConsumer.java    From Pistachio with Apache License 2.0 6 votes vote down vote up
private Broker findNewLeader(Broker oldLeader) throws InterruptedException {
    long retryCnt = 0;
    while (true) {
        PartitionMetadata metadata = findLeader();
        logger.debug("findNewLeader - meta leader {}, previous leader {}", metadata, oldLeader);
        if (metadata != null && metadata.leader() != null && (oldLeader == null ||
                (!(oldLeader.host().equalsIgnoreCase(metadata.leader().host()) &&
                  (oldLeader.port() == metadata.leader().port())) || retryCnt != 0))) {
            // first time through if the leader hasn't changed give ZooKeeper a second to recover
            // second time, assume the broker did recover before failover, or it was a non-Broker issue
            logger.info("findNewLeader - using new leader {} from meta data, previous leader {}", metadata.leader(), oldLeader);
            return metadata.leader();
        }
        //TODO: backoff retry
        Thread.sleep(1000L);
        retryCnt ++;
        // if could not find the leader for current replicaBrokers, let's try to find one via allBrokers
        if (retryCnt >= 3 && (retryCnt - 3) % 5 == 0) {
            logger.warn("can nof find leader for {} - {} after {} retries", topic, partitionId, retryCnt);
            replicaBrokers.clear();
            replicaBrokers.addAll(allBrokers);
        }
    }
}
 
Example #5
Source File: AckKafkaWriterTestRunner.java    From Scribengin with GNU Affero General Public License v3.0 6 votes vote down vote up
public void run() {
  try {
    while (!exit) {
      KafkaTool kafkaTool = new KafkaTool(topic, cluster.getZKConnect());
      kafkaTool.connect();
      TopicMetadata topicMeta = kafkaTool.findTopicMetadata(topic);
      PartitionMetadata partitionMeta = findPartition(topicMeta, partition);
      Broker partitionLeader = partitionMeta.leader();
      Server kafkaServer = cluster.findKafkaServerByPort(partitionLeader.port());
      System.out.println("Shutdown kafka server " + kafkaServer.getPort());
      kafkaServer.shutdown();
      failureCount++;
      Thread.sleep(sleepBeforeRestart);
      kafkaServer.start();
      kafkaTool.close();
      Thread.sleep(10000); //wait to make sure that the kafka server start
    }
  } catch (Exception e) {
    e.printStackTrace();
  }
  synchronized (this) {
    notify();
  }
}
 
Example #6
Source File: KafkaClusterTool.java    From Scribengin with GNU Affero General Public License v3.0 5 votes vote down vote up
Server findLeader(String topic, int partition) throws Exception {
  KafkaTool kafkaTool = new KafkaTool("KafkaPartitionLeaderKiller", cluster.getZKConnect());
  kafkaTool.connect();
  TopicMetadata topicMeta = kafkaTool.findTopicMetadata(topic);
  PartitionMetadata partitionMeta = findPartition(topicMeta, partition);
  Broker partitionLeader = partitionMeta.leader();
  Server kafkaServer = cluster.findKafkaServerByPort(partitionLeader.port());
  System.out.println("Shutdown kafka server " + kafkaServer.getPort());
  kafkaTool.close();
  return kafkaServer;
}
 
Example #7
Source File: SimpleKafkaConsumer.java    From attic-apex-malhar with Apache License 2.0 5 votes vote down vote up
private ConsumerThread(Broker broker, Set<KafkaPartition> kpl, SimpleKafkaConsumer consumer)
{
  this.broker = broker;
  this.clientName = consumer.getClientName(broker.host() + "_" + broker.port());
  this.consumer = consumer;
  this.kpS = Collections.newSetFromMap(new ConcurrentHashMap<KafkaPartition, Boolean>());
  this.kpS.addAll(kpl);
}
 
Example #8
Source File: KafkaMetadataUtil.java    From attic-apex-malhar with Apache License 2.0 5 votes vote down vote up
/**
 * There is always only one string in zkHost
 * @param zkHost
 * @return
 */
public static Set<String> getBrokers(Set<String> zkHost)
{
  ZkClient zkclient = new ZkClient(zkHost.iterator().next(), 30000, 30000, ZKStringSerializer$.MODULE$);
  Set<String> brokerHosts = new HashSet<String>();
  for (Broker b : JavaConversions.asJavaIterable(ZkUtils.getAllBrokersInCluster(zkclient))) {
    brokerHosts.add(b.connectionString());
  }
  zkclient.close();
  return brokerHosts;
}
 
Example #9
Source File: KafkaInfos.java    From DCMonitor with MIT License 5 votes vote down vote up
public List<BrokerInfo> getCluster() {
  return Lists.transform(
    JavaConversions.asJavaList(ZkUtils.getAllBrokersInCluster(zkClient)), new Function<Broker, BrokerInfo>() {
      @Override
      public BrokerInfo apply(Broker input) {
        BrokerInfo info = new BrokerInfo();
        info.host = input.host();
        info.port = input.port();
        info.id = input.id();
        return info;
      }
    }
  );
}
 
Example #10
Source File: KafkaAssignmentGenerator.java    From kafka-assigner with Apache License 2.0 5 votes vote down vote up
private Map<Integer, String> getRackAssignment(ZkUtils zkUtils) {
    List<Broker> brokers = JavaConversions.seqAsJavaList(zkUtils.getAllBrokersInCluster());
    Map<Integer, String> rackAssignment = Maps.newHashMap();
    if (!disableRackAwareness) {
        for (Broker broker : brokers) {
            scala.Option<String> rack = broker.rack();
            if (rack.isDefined()) {
                rackAssignment.put(broker.id(), rack.get());
            }
        }
    }
    return rackAssignment;
}
 
Example #11
Source File: KafkaBrokerMonitor.java    From data-highway with Apache License 2.0 5 votes vote down vote up
List<String> getBrokerIds() {
  return JavaConversions
      .seqAsJavaList(zkUtils.getAllBrokersInCluster())
      .stream()
      .map(Broker::id)
      .map(Object::toString)
      .collect(toList());
}
 
Example #12
Source File: KafkaSimpleConsumer.java    From Pistachio with Apache License 2.0 5 votes vote down vote up
public long getLastOffset() throws InterruptedException {
    OffsetResponse response = null;
    Broker previousLeader = leaderBroker;
    while (true) {
        TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partitionId);
        Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
        requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.LatestTime(), 1));
        kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(
                requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientId);

        ensureConsumer(previousLeader);
        try {
            response = consumer.getOffsetsBefore(request);
        } catch (Exception e) {
            // e could be an instance of ClosedByInterruptException as SimpleConsumer.fetch uses nio
            if (Thread.interrupted()) {
                logger.info("catch exception of {} with interrupted in getLastOffset for {} - {}",
                        e.getClass().getName(), topic, partitionId);

                throw new InterruptedException();
            }
            logger.warn("caughte exception in getLastOffset {} - {}", topic, partitionId, e);
            response = null;
        }
        if (response == null || response.hasError()) {
            short errorCode = response != null ? response.errorCode(topic, partitionId) : ErrorMapping.UnknownCode();

            logger.warn("Error fetching data Offset for {} - {}, the Broker. Reason: {}",
                    topic, partitionId, errorCode);

            stopConsumer();
            previousLeader = leaderBroker;
            leaderBroker = null;
            continue;
        }
        break;
    }
    long[] offsets = response.offsets(topic, partitionId);
    return offsets[offsets.length - 1];
}
 
Example #13
Source File: ZkConsumerCommand.java    From jeesuite-libs with Apache License 2.0 5 votes vote down vote up
public List<BrokerInfo> fetchAllBrokers(){
	List<BrokerInfo> result = new ArrayList<>();
	Seq<Broker> brokers = zkUtils.getAllBrokersInCluster();
	Iterator<Broker> iterator = brokers.toList().iterator();
	while(iterator.hasNext()){
		Broker broker = iterator.next();
		Node node = broker.getNode(ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT)).get();
		result.add(new BrokerInfo(node.idString(), node.host(), node.port()));
	}
	return result;
}
 
Example #14
Source File: Email.java    From doctorkafka with Apache License 2.0 5 votes vote down vote up
public static void alertOnNoStatsBrokers(String[] emails,
                                         String clusterName,
                                         List<Broker> noStatsBrokers) {

  if (noStatsBrokerEmails.containsKey(clusterName) &&
      System.currentTimeMillis() - noStatsBrokerEmails.get(clusterName) < COOLOFF_INTERVAL) {
    return;
  }
  noStatsBrokerEmails.put(clusterName, System.currentTimeMillis());
  String title = clusterName + " : " + noStatsBrokers.size() + " brokers do not have stats";
  StringBuilder sb = new StringBuilder();
  sb.append("No stats brokers : \n");
  noStatsBrokers.stream().forEach(broker -> sb.append(broker + "\n"));
  sendTo(emails, title, sb.toString());
}
 
Example #15
Source File: KafkaClusterManager.java    From doctorkafka with Apache License 2.0 5 votes vote down vote up
/**
 *   return the list of brokers that do not have stats
 */
public List<Broker> getNoStatsBrokers() {
  Seq<Broker> brokerSeq = zkUtils.getAllBrokersInCluster();
  List<Broker> brokers = scala.collection.JavaConverters.seqAsJavaList(brokerSeq);
  List<Broker> noStatsBrokers = new ArrayList<>();

  brokers.stream().forEach(broker -> {
    if (kafkaCluster.getBroker(broker.id()) == null) {
      noStatsBrokers.add(broker);
    }
  });
  return noStatsBrokers;
}
 
Example #16
Source File: OperatorUtil.java    From doctorkafka with Apache License 2.0 5 votes vote down vote up
public static String getBrokers(String zkUrl, SecurityProtocol securityProtocol) {
  ZkUtils zkUtils = getZkUtils(zkUrl);
  Seq<Broker> brokersSeq = zkUtils.getAllBrokersInCluster();
  Broker[] brokers = new Broker[brokersSeq.size()];
  brokersSeq.copyToArray(brokers);

  String brokersStr = Arrays.stream(brokers)
      .map(b -> b.brokerEndPoint(
          ListenerName.forSecurityProtocol(securityProtocol)).connectionString())
      .reduce(null, (a, b) -> (a == null) ? b : a + "," + b);
  return brokersStr;
}
 
Example #17
Source File: OffsetGetter.java    From kmanager with Apache License 2.0 5 votes vote down vote up
public Node getClusterViz() {
	Node rootNode = new Node("KafkaCluster");
	List<Node> childNodes = new ArrayList<Node>();
	List<Broker> brokers = JavaConversions.seqAsJavaList(ZKUtils.getZKUtilsFromKafka().getAllBrokersInCluster());
	brokers.forEach(broker -> {
		List<EndPoint> endPoints = JavaConversions.seqAsJavaList(broker.endPoints().seq());
		childNodes.add(new Node(broker.id() + ":" + endPoints.get(0).host() + ":" + endPoints.get(0).port(), null));
	});
	rootNode.setChildren(childNodes);
	return rootNode;
}
 
Example #18
Source File: ZkConsumerCommand.java    From azeroth with Apache License 2.0 5 votes vote down vote up
public List<BrokerInfo> fetchAllBrokers() {
    List<BrokerInfo> result = new ArrayList<>();
    Seq<Broker> brokers = zkUtils.getAllBrokersInCluster();
    Iterator<Broker> iterator = brokers.toList().iterator();
    while (iterator.hasNext()) {
        Broker broker = iterator.next();
        Node node = broker.getNode(SecurityProtocol.PLAINTEXT);
        result.add(new BrokerInfo(node.idString(), node.host(), node.port()));
    }
    return result;
}
 
Example #19
Source File: KafkaProduceOffsetFetcher.java    From DDMQ with Apache License 2.0 5 votes vote down vote up
private SimpleConsumer getSimpleConsumer(int bid) {
    if (!brokerIndex.containsKey(bid)) {
        return null;
    }
    Broker broker = brokerIndex.get(bid);
    if (consumerMap.containsKey(broker)) {
        return consumerMap.get(broker);
    } else {
        SimpleConsumer consumer = new SimpleConsumer(broker.host(), broker.port(), ConsumerConfig.SocketTimeout(), ConsumerConfig.SocketBufferSize(), "ConsumerOffsetChecker");
        consumerMap.put(broker, consumer);
        return consumer;
    }
}
 
Example #20
Source File: KafkaProduceOffsetFetcher.java    From DDMQ with Apache License 2.0 5 votes vote down vote up
public Map<Integer, Map<String, List<Integer>>> getMetadata(Set<String> topics) {
    if (CollectionUtils.isEmpty(topics)) {
        return Collections.emptyMap();
    }
    Seq<Broker> brokers = ZkUtils.getAllBrokersInCluster(zkClient);
    for (Broker broker : JavaConverters.asJavaListConverter(brokers).asJava()) {
        brokerIndex.put(broker.id(), broker);
    }
    TopicMetadataResponse response = ClientUtils.fetchTopicMetadata(JavaConverters.asScalaSetConverter(topics).asScala(), brokers, "GetMetadataClient", 10000, 0);
    return parseMetadataResponse(response);
}
 
Example #21
Source File: KafkaProduceOffsetFetcher.java    From DDMQ with Apache License 2.0 5 votes vote down vote up
private SimpleConsumer getSimpleConsumer(int bid) {
    if (!brokerIndex.containsKey(bid)) {
        return null;
    }
    Broker broker = brokerIndex.get(bid);
    if (consumerMap.containsKey(broker)) {
        return consumerMap.get(broker);
    } else {
        SimpleConsumer consumer = new SimpleConsumer(broker.host(), broker.port(), ConsumerConfig.SocketTimeout(), ConsumerConfig.SocketBufferSize(), "ConsumerOffsetChecker");
        consumerMap.put(broker, consumer);
        return consumer;
    }
}
 
Example #22
Source File: KafkaProduceOffsetFetcher.java    From DDMQ with Apache License 2.0 5 votes vote down vote up
public Map<Integer, Map<String, List<Integer>>> getMetadata(Set<String> topics) {
    if (CollectionUtils.isEmpty(topics)) {
        return Collections.emptyMap();
    }
    Seq<Broker> brokers = ZkUtils.getAllBrokersInCluster(zkClient);
    for (Broker broker : JavaConverters.asJavaListConverter(brokers).asJava()) {
        brokerIndex.put(broker.id(), broker);
    }
    TopicMetadataResponse response = ClientUtils.fetchTopicMetadata(JavaConverters.asScalaSetConverter(topics).asScala(), brokers, "GetMetadataClient", 10000, 0);
    return parseMetadataResponse(response);
}
 
Example #23
Source File: PulsarPartitionMetadata.java    From pulsar with Apache License 2.0 4 votes vote down vote up
@Override
public List<Broker> replicas() {
    return hosts;
}
 
Example #24
Source File: ScribeConsumer.java    From Scribengin with GNU Affero General Public License v3.0 4 votes vote down vote up
private void storeReplicaBrokers(PartitionMetadata metadata) {
  replicaBrokers.clear();
  for (Broker replica : metadata.replicas()) {
    replicaBrokers.add(new HostPort(replica.host(), replica.port()));
  }
}
 
Example #25
Source File: KafkaSimpleConsumer.java    From Pistachio with Apache License 2.0 4 votes vote down vote up
public Iterable<BytesMessageWithOffset> fetch(long offset, int timeoutMs) throws InterruptedException {
    List<BytesMessageWithOffset> newOffsetMsg = new ArrayList<BytesMessageWithOffset>();
    FetchResponse response = null;
    Broker previousLeader = leaderBroker;
    while (true) {
        ensureConsumer(previousLeader);

        if (offset == Long.MAX_VALUE) {
            offset = getOffset(false);
            logger.info("offset max long, fetch from latest in kafka {}", offset);
        }

        FetchRequest request = new FetchRequestBuilder()
                .clientId(clientId)
                .addFetch(topic, partitionId, offset, 100000000)
                .maxWait(timeoutMs)
                .minBytes(1)
                .build();

        //logger.debug("fetch offset {}", offset);

        try {
            response = consumer.fetch(request);
        } catch (Exception e) {
            // e could be an instance of ClosedByInterruptException as SimpleConsumer.fetch uses nio
            if (Thread.interrupted()) {
                logger.info("catch exception of {} with interrupted in fetch for {} - {} with offset {}",
                        e.getClass().getName(), topic, partitionId, offset);

                throw new InterruptedException();
            }
            logger.warn("caughte exception in fetch {} - {}", topic, partitionId, e);
            response = null;
        }

        if (response == null || response.hasError()) {
            short errorCode = response != null ? response.errorCode(topic, partitionId) : ErrorMapping.UnknownCode();
            logger.warn("fetch {} - {} with offset {} encounters error: {}", topic, partitionId, offset, errorCode);

            boolean needNewLeader = false;
            if (errorCode == ErrorMapping.RequestTimedOutCode()) {
                //TODO: leave it here
            } else if (errorCode == ErrorMapping.OffsetOutOfRangeCode()) {
                //TODO: fetch the earliest offset or latest offset ?
                // seems no obvious correct way to handle it
                long earliestOffset = getOffset(true);
                logger.debug("get earilset offset {} for {} - {}", earliestOffset, topic, partitionId);
                if (earliestOffset < 0) {
                    needNewLeader = true;
                } else {
                    newOffsetMsg.add(new BytesMessageWithOffset(null, earliestOffset));
                    offset = earliestOffset;
                    continue;
                }
            } else {
                needNewLeader = true;
            }

            if (needNewLeader) {
                stopConsumer();
                previousLeader = leaderBroker;
                leaderBroker = null;
                continue;
            }
        } else {
            break;
        }
    }

    return response != null ? filterAndDecode(response.messageSet(topic, partitionId), offset) :
        (newOffsetMsg.size() > 0 ? newOffsetMsg : EMPTY_MSGS);
}
 
Example #26
Source File: KafkaConsumer.java    From jstorm with Apache License 2.0 4 votes vote down vote up
public Broker getLeaderBroker() {
    return leaderBroker;
}
 
Example #27
Source File: KafkaConsumer.java    From jstorm with Apache License 2.0 4 votes vote down vote up
public void setLeaderBroker(Broker leaderBroker) {
    this.leaderBroker = leaderBroker;
}
 
Example #28
Source File: KafkaAssignmentGenerator.java    From kafka-assigner with Apache License 2.0 4 votes vote down vote up
private static void printLeastDisruptiveReassignment(
        ZkUtils zkUtils, List<String> specifiedTopics, Set<Integer> specifiedBrokers,
        Set<Integer> excludedBrokers, Map<Integer, String> rackAssignment, int desiredReplicationFactor)
        throws JSONException {
    // We need three inputs for rebalacing: the brokers, the topics, and the current assignment
    // of topics to brokers.
    Set<Integer> brokerSet = specifiedBrokers;
    if (brokerSet == null || brokerSet.isEmpty()) {
        brokerSet = Sets.newHashSet(Lists.transform(
                JavaConversions.seqAsJavaList(zkUtils.getAllBrokersInCluster()),
                new Function<Broker, Integer>() {
                    @Override
                    public Integer apply(Broker broker) {
                        return broker.id();
                    }
                }));
    }

    // Exclude brokers that we want to decommission
    Set<Integer> brokers = Sets.difference(brokerSet, excludedBrokers);
    rackAssignment.keySet().retainAll(brokers);

    // The most common use case is to rebalance all topics, but explicit topic addition is also
    // supported.
    Seq<String> topics = specifiedTopics != null ?
            JavaConversions.collectionAsScalaIterable(specifiedTopics).toSeq() :
            zkUtils.getAllTopics();

    // Print the current assignment in case a rollback is needed
    printCurrentAssignment(zkUtils, JavaConversions.seqAsJavaList(topics));

    Map<String, Map<Integer, List<Integer>>> initialAssignments =
            KafkaTopicAssigner.topicMapToJavaMap(zkUtils.getPartitionAssignmentForTopics(
                    topics));

    // Assign topics one at a time. This is slightly suboptimal from a packing standpoint, but
    // it's close enough to work in practice. We can also always follow it up with a Kafka
    // leader election rebalance if necessary.
    JSONObject json = new JSONObject();
    json.put("version", KAFKA_FORMAT_VERSION);
    JSONArray partitionsJson = new JSONArray();
    KafkaTopicAssigner assigner = new KafkaTopicAssigner();
    for (String topic : JavaConversions.seqAsJavaList(topics)) {
        Map<Integer, List<Integer>> partitionAssignment = initialAssignments.get(topic);
        Map<Integer, List<Integer>> finalAssignment = assigner.generateAssignment(
                topic, partitionAssignment, brokers, rackAssignment, desiredReplicationFactor);
        for (Map.Entry<Integer, List<Integer>> e : finalAssignment.entrySet()) {
            JSONObject partitionJson = new JSONObject();
            partitionJson.put("topic", topic);
            partitionJson.put("partition", e.getKey());
            partitionJson.put("replicas", new JSONArray(e.getValue()));
            partitionsJson.put(partitionJson);
        }
    }
    json.put("partitions", partitionsJson);
    System.out.println("NEW ASSIGNMENT:\n" + json.toString());
}
 
Example #29
Source File: PulsarPartitionMetadata.java    From pulsar with Apache License 2.0 4 votes vote down vote up
@Override
public Broker leader() {
    return hosts.get(0);
}
 
Example #30
Source File: PulsarPartitionMetadata.java    From pulsar with Apache License 2.0 4 votes vote down vote up
public PulsarPartitionMetadata(String hostUrl, int port) {
    super(null);
    this.hosts = Collections.singletonList(new Broker(0, hostUrl, port));
}