Java Code Examples for org.apache.helix.model.InstanceConfig#getHostName()

The following examples show how to use org.apache.helix.model.InstanceConfig#getHostName() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: Replicator.java    From helix with Apache License 2.0 6 votes vote down vote up
public void startReplication(InstanceConfig masterInstanceConfig) throws Exception {
  String remoteHost = masterInstanceConfig.getHostName();
  String remoteChangeLogDir = masterInstanceConfig.getRecord().getSimpleField("change_log_dir");
  String remoteFilestoreDir = masterInstanceConfig.getRecord().getSimpleField("file_store_dir");

  String localChangeLogDir = localInstanceConfig.getRecord().getSimpleField("change_log_dir");
  String localFilestoreDir = localInstanceConfig.getRecord().getSimpleField("file_store_dir");
  String localcheckpointDir = localInstanceConfig.getRecord().getSimpleField("check_point_dir");
  // setup rsync for the change log directory
  setupRsync(remoteHost, remoteChangeLogDir, localChangeLogDir);
  reader = new ChangeLogReader(localChangeLogDir);
  watchService = new FileSystemWatchService(localChangeLogDir, reader);
  processor =
      new ChangeLogProcessor(reader, remoteHost, remoteFilestoreDir, localFilestoreDir,
          localcheckpointDir);
  watchService.start();
  processor.start();
  isReplicationStarted.set(true);
}
 
Example 2
Source File: ServerInstance.java    From incubator-pinot with Apache License 2.0 6 votes vote down vote up
/**
 * By default (auto joined instances), server instance name is of format: {@code Server_<hostname>_<port>}, e.g.
 * {@code Server_localhost_12345}, hostname is of format: {@code Server_<hostname>}, e.g. {@code Server_localhost}.
 */
public ServerInstance(InstanceConfig instanceConfig) {
  String hostname = instanceConfig.getHostName();
  if (hostname != null) {
    if (hostname.startsWith(Helix.PREFIX_OF_SERVER_INSTANCE)) {
      _hostname = hostname.substring(SERVER_INSTANCE_PREFIX_LENGTH);
    } else {
      _hostname = hostname;
    }
    _port = Integer.parseInt(instanceConfig.getPort());
  } else {
    // Hostname might be null in some tests (InstanceConfig created by calling the constructor instead of fetching
    // from ZK), directly parse the instance name
    String instanceName = instanceConfig.getInstanceName();
    String[] hostnameAndPort = instanceName.split(Helix.PREFIX_OF_SERVER_INSTANCE)[1].split(HOSTNAME_PORT_DELIMITER);
    _hostname = hostnameAndPort[0];
    _port = Integer.parseInt(hostnameAndPort[1]);
  }
}
 
Example 3
Source File: CloudToStoreReplicationManager.java    From ambry with Apache License 2.0 6 votes vote down vote up
@Override
public void onInstanceConfigChange(List<InstanceConfig> instanceConfigs, NotificationContext context) {
  logger.info("Instance config change notification received with instanceConfigs: {}", instanceConfigs);
  Set<CloudDataNode> newVcrNodes = new HashSet<>();
  ConcurrentHashMap<String, CloudDataNode> newInstanceNameToCloudDataNode = new ConcurrentHashMap<>();

  // create a new list of available vcr nodes.
  for (InstanceConfig instanceConfig : instanceConfigs) {
    String instanceName = instanceConfig.getInstanceName();
    Port sslPort =
        getSslPortStr(instanceConfig) == null ? null : new Port(getSslPortStr(instanceConfig), PortType.SSL);
    Port http2Port =
        getHttp2PortStr(instanceConfig) == null ? null : new Port(getHttp2PortStr(instanceConfig), PortType.HTTP2);
    CloudDataNode cloudDataNode = new CloudDataNode(instanceConfig.getHostName(),
        new Port(Integer.parseInt(instanceConfig.getPort()), PortType.PLAINTEXT), sslPort, http2Port,
        clusterMapConfig.clustermapVcrDatacenterName, clusterMapConfig);
    newInstanceNameToCloudDataNode.put(instanceName, cloudDataNode);
    newVcrNodes.add(cloudDataNode);
  }

  synchronized (notificationLock) {
    instanceNameToCloudDataNode.set(newInstanceNameToCloudDataNode);
    handleChangeInVcrNodes(newVcrNodes);
  }
}
 
Example 4
Source File: ZKHelixAdmin.java    From helix with Apache License 2.0 5 votes vote down vote up
@Override
public boolean setInstanceConfig(String clusterName, String instanceName,
    InstanceConfig newInstanceConfig) {
  logger.info("Set instance config for instance {} to cluster {} with new InstanceConfig {}.",
      instanceName, clusterName,
      newInstanceConfig == null ? "NULL" : newInstanceConfig.toString());
  String instanceConfigPath = PropertyPathBuilder.getPath(PropertyType.CONFIGS, clusterName,
      HelixConfigScope.ConfigScopeProperty.PARTICIPANT.toString(), instanceName);
  if (!_zkClient.exists(instanceConfigPath)) {
    throw new HelixException(
        "instance" + instanceName + " does not exist in cluster " + clusterName);
  }

  HelixDataAccessor accessor =
      new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<ZNRecord>(_zkClient));
  PropertyKey instanceConfigPropertyKey = accessor.keyBuilder().instanceConfig(instanceName);
  InstanceConfig currentInstanceConfig = accessor.getProperty(instanceConfigPropertyKey);
  if (!newInstanceConfig.getHostName().equals(currentInstanceConfig.getHostName())
      || !newInstanceConfig.getPort().equals(currentInstanceConfig.getPort())) {
    throw new HelixException(
        "Hostname and port cannot be changed, current hostname: " + currentInstanceConfig
            .getHostName() + " and port: " + currentInstanceConfig.getPort()
            + " is different from new hostname: " + newInstanceConfig.getHostName()
            + "and new port: " + newInstanceConfig.getPort());
  }
  return accessor.setProperty(instanceConfigPropertyKey, newInstanceConfig);
}
 
Example 5
Source File: PinotQueryResource.java    From incubator-pinot with Apache License 2.0 4 votes vote down vote up
public String getQueryResponse(String query, String traceEnabled, String queryOptions, HttpHeaders httpHeaders,
    String querySyntax) {
  // Get resource table name.
  BrokerRequest brokerRequest;
  try {
    switch (querySyntax) {
      case CommonConstants.Broker.Request.SQL:
        brokerRequest = SQL_QUERY_COMPILER.compileToBrokerRequest(query);
        break;
      case CommonConstants.Broker.Request.PQL:
        brokerRequest = PQL_QUERY_COMPILER.compileToBrokerRequest(query);
        break;
      default:
        throw new UnsupportedOperationException("Unsupported query syntax - " + querySyntax);
    }
    String inputTableName = brokerRequest.getQuerySource().getTableName();
    brokerRequest.getQuerySource().setTableName(_pinotHelixResourceManager.getActualTableName(inputTableName));
  } catch (Exception e) {
    LOGGER.error("Caught exception while compiling {} query: {}", querySyntax.toUpperCase(), query, e);
    return QueryException.getException(QueryException.PQL_PARSING_ERROR, e).toString();
  }
  String tableName = TableNameBuilder.extractRawTableName(brokerRequest.getQuerySource().getTableName());

  // Validate data access
  AccessControl accessControl = _accessControlFactory.create();
  if (!accessControl.hasDataAccess(httpHeaders, tableName)) {
    return QueryException.ACCESS_DENIED_ERROR.toString();
  }

  // Get brokers for the resource table.
  List<String> instanceIds = _pinotHelixResourceManager.getBrokerInstancesFor(tableName);
  if (instanceIds.isEmpty()) {
    return QueryException.BROKER_RESOURCE_MISSING_ERROR.toString();
  }

  // Retain only online brokers.
  instanceIds.retainAll(_pinotHelixResourceManager.getOnlineInstanceList());
  if (instanceIds.isEmpty()) {
    return QueryException.BROKER_INSTANCE_MISSING_ERROR.toString();
  }

  // Send query to a random broker.
  String instanceId = instanceIds.get(RANDOM.nextInt(instanceIds.size()));
  InstanceConfig instanceConfig = _pinotHelixResourceManager.getHelixInstanceConfig(instanceId);
  if (instanceConfig == null) {
    LOGGER.error("Instance {} not found", instanceId);
    return QueryException.INTERNAL_ERROR.toString();
  }
  String hostNameWithPrefix = instanceConfig.getHostName();
  String url =
      getQueryURL(hostNameWithPrefix.substring(hostNameWithPrefix.indexOf("_") + 1), instanceConfig.getPort(),
          querySyntax);
  ObjectNode requestJson = getRequestJson(query, traceEnabled, queryOptions, querySyntax);
  return sendRequestRaw(url, query, requestJson);
}
 
Example 6
Source File: InstanceConfigToDataNodeConfigAdapter.java    From ambry with Apache License 2.0 4 votes vote down vote up
/**
 * Exposed for testing.
 * @param instanceConfig the {@link InstanceConfig} to convert to a {@link DataNodeConfig} object.
 * @param clusterMapConfig the {@link ClusterMapConfig} containing any default values that may be needed.
 * @return the {@link DataNodeConfig}, or {@code null} if the {@link InstanceConfig} provided has an unsupported schema
 *         version.
 */
static DataNodeConfig convert(InstanceConfig instanceConfig, ClusterMapConfig clusterMapConfig) {
  int schemaVersion = getSchemaVersion(instanceConfig);
  if (schemaVersion != 0) {
    LOGGER.warn("Unknown InstanceConfig schema version {} in {}. Ignoring.", schemaVersion, instanceConfig);
    return null;
  }
  DataNodeConfig dataNodeConfig = new DataNodeConfig(instanceConfig.getInstanceName(), instanceConfig.getHostName(),
      Integer.parseInt(instanceConfig.getPort()), getDcName(instanceConfig), getSslPortStr(instanceConfig),
      getHttp2PortStr(instanceConfig), getRackId(instanceConfig), getXid(instanceConfig));
  dataNodeConfig.getSealedReplicas().addAll(getSealedReplicas(instanceConfig));
  dataNodeConfig.getStoppedReplicas().addAll(getStoppedReplicas(instanceConfig));
  // TODO uncomment this line once 1534 is merged
  // dataNodeConfig.getDisabledReplicas().addAll(getDisabledReplicas(instanceConfig));
  instanceConfig.getRecord().getMapFields().forEach((mountPath, diskProps) -> {
    if (diskProps.get(DISK_STATE) == null) {
      // Check if this map field actually holds disk properties, since we can't tell from just the field key (the
      // mount path with no special prefix). There may be extra fields when Helix controller adds partitions in ERROR
      // state to InstanceConfig.
      LOGGER.warn("{} field does not contain disk info on {}. Skip it and continue on next one.", mountPath,
          instanceConfig.getInstanceName());
    } else {
      DataNodeConfig.DiskConfig disk = new DataNodeConfig.DiskConfig(
          diskProps.get(DISK_STATE).equals(AVAILABLE_STR) ? HardwareState.AVAILABLE : HardwareState.UNAVAILABLE,
          Long.parseLong(diskProps.get(DISK_CAPACITY_STR)));
      String replicasStr = diskProps.get(REPLICAS_STR);
      if (!replicasStr.isEmpty()) {
        for (String replicaStr : replicasStr.split(REPLICAS_DELIM_STR)) {
          String[] replicaStrParts = replicaStr.split(REPLICAS_STR_SEPARATOR);
          // partition name and replica name are the same.
          String partitionName = replicaStrParts[0];
          long replicaCapacity = Long.parseLong(replicaStrParts[1]);
          String partitionClass =
              replicaStrParts.length > 2 ? replicaStrParts[2] : clusterMapConfig.clusterMapDefaultPartitionClass;
          disk.getReplicaConfigs()
              .put(partitionName, new DataNodeConfig.ReplicaConfig(replicaCapacity, partitionClass));
        }
      }
      dataNodeConfig.getDiskConfigs().put(mountPath, disk);
    }
  });
  return dataNodeConfig;
}