Java Code Examples for org.apache.hadoop.conf.Configuration.getTrimmedStringCollection()

The following are Jave code examples for showing how to use getTrimmedStringCollection() of the org.apache.hadoop.conf.Configuration class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
Example 1
Project: hadoop   File: HAUtil.java   Source Code and License Vote up 6 votes
/**
 * Verify configuration that there are at least two RM-ids
 * and RPC addresses are specified for each RM-id.
 * Then set the RM-ids.
 */
private static void verifyAndSetRMHAIdsList(Configuration conf) {
  Collection<String> ids =
    conf.getTrimmedStringCollection(YarnConfiguration.RM_HA_IDS);
  if (ids.size() < 2) {
    throwBadConfigurationException(
      getInvalidValueMessage(YarnConfiguration.RM_HA_IDS,
        conf.get(YarnConfiguration.RM_HA_IDS) +
        "\nHA mode requires atleast two RMs"));
  }

  StringBuilder setValue = new StringBuilder();
  for (String id: ids) {
    // verify the RM service addresses configurations for every RMIds
    for (String prefix : YarnConfiguration.getServiceAddressConfKeys(conf)) {
      checkAndSetRMRPCAddress(prefix, id, conf);
    }
    setValue.append(id);
    setValue.append(",");
  }
  conf.set(YarnConfiguration.RM_HA_IDS,
    setValue.substring(0, setValue.length() - 1));
}
 
Example 2
Project: hadoop   File: HAUtil.java   Source Code and License Vote up 6 votes
private static void verifyAndSetCurrentRMHAId(Configuration conf) {
  String rmId = getRMHAId(conf);
  if (rmId == null) {
    StringBuilder msg = new StringBuilder();
    msg.append("Can not find valid RM_HA_ID. None of ");
    for (String id : conf
        .getTrimmedStringCollection(YarnConfiguration.RM_HA_IDS)) {
      msg.append(addSuffix(YarnConfiguration.RM_ADDRESS, id) + " ");
    }
    msg.append(" are matching" +
        " the local address OR " + YarnConfiguration.RM_HA_ID + " is not" +
        " specified in HA Configuration");
    throwBadConfigurationException(msg.toString());
  } else {
    Collection<String> ids = getRMHAIds(conf);
    if (!ids.contains(rmId)) {
      throwBadConfigurationException(
        getRMHAIdNeedToBeIncludedMessage(ids.toString(), rmId));
    }
  }
  conf.set(YarnConfiguration.RM_HA_ID, rmId);
}
 
Example 3
Project: hadoop   File: DFSUtil.java   Source Code and License Vote up 5 votes
/**
 * Returns list of InetSocketAddresses corresponding to the namenode
 * that manages this cluster. Note this is to be used by datanodes to get
 * the list of namenode addresses to talk to.
 *
 * Returns namenode address specifically configured for datanodes (using
 * service ports), if found. If not, regular RPC address configured for other
 * clients is returned.
 *
 * @param conf configuration
 * @return list of InetSocketAddress
 * @throws IOException on error
 */
public static Map<String, Map<String, InetSocketAddress>>
  getNNServiceRpcAddressesForCluster(Configuration conf) throws IOException {
  // Use default address as fall back
  String defaultAddress;
  try {
    defaultAddress = NetUtils.getHostPortString(NameNode.getAddress(conf));
  } catch (IllegalArgumentException e) {
    defaultAddress = null;
  }

  Collection<String> parentNameServices = conf.getTrimmedStringCollection
          (DFSConfigKeys.DFS_INTERNAL_NAMESERVICES_KEY);

  if (parentNameServices.isEmpty()) {
    parentNameServices = conf.getTrimmedStringCollection
            (DFSConfigKeys.DFS_NAMESERVICES);
  } else {
    // Ensure that the internal service is ineed in the list of all available
    // nameservices.
    Set<String> availableNameServices = Sets.newHashSet(conf
            .getTrimmedStringCollection(DFSConfigKeys.DFS_NAMESERVICES));
    for (String nsId : parentNameServices) {
      if (!availableNameServices.contains(nsId)) {
        throw new IOException("Unknown nameservice: " + nsId);
      }
    }
  }

  Map<String, Map<String, InetSocketAddress>> addressList =
          getAddressesForNsIds(conf, parentNameServices, defaultAddress,
                  DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
  if (addressList.isEmpty()) {
    throw new IOException("Incorrect configuration: namenode address "
            + DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY + " or "
            + DFS_NAMENODE_RPC_ADDRESS_KEY
            + " is not configured.");
  }
  return addressList;
}
 
Example 4
Project: hadoop   File: FSNamesystem.java   Source Code and License Vote up 5 votes
private static Collection<URI> getStorageDirs(Configuration conf,
                                              String propertyName) {
  Collection<String> dirNames = conf.getTrimmedStringCollection(propertyName);
  StartupOption startOpt = NameNode.getStartupOption(conf);
  if(startOpt == StartupOption.IMPORT) {
    // In case of IMPORT this will get rid of default directories 
    // but will retain directories specified in hdfs-site.xml
    // When importing image from a checkpoint, the name-node can
    // start with empty set of storage directories.
    Configuration cE = new HdfsConfiguration(false);
    cE.addResource("core-default.xml");
    cE.addResource("core-site.xml");
    cE.addResource("hdfs-default.xml");
    Collection<String> dirNames2 = cE.getTrimmedStringCollection(propertyName);
    dirNames.removeAll(dirNames2);
    if(dirNames.isEmpty())
      LOG.warn("!!! WARNING !!!" +
        "\n\tThe NameNode currently runs without persistent storage." +
        "\n\tAny changes to the file system meta-data may be lost." +
        "\n\tRecommended actions:" +
        "\n\t\t- shutdown and restart NameNode with configured \"" 
        + propertyName + "\" in hdfs-site.xml;" +
        "\n\t\t- use Backup Node as a persistent and up-to-date storage " +
        "of the file system meta-data.");
  } else if (dirNames.isEmpty()) {
    dirNames = Collections.singletonList(
        DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_DEFAULT);
  }
  return Util.stringCollectionAsURIs(dirNames);
}
 
Example 5
Project: hadoop   File: FSNamesystem.java   Source Code and License Vote up 5 votes
/**
 * Returns edit directories that are shared between primary and secondary.
 * @param conf configuration
 * @return collection of edit directories from {@code conf}
 */
public static List<URI> getSharedEditsDirs(Configuration conf) {
  // don't use getStorageDirs here, because we want an empty default
  // rather than the dir in /tmp
  Collection<String> dirNames = conf.getTrimmedStringCollection(
      DFS_NAMENODE_SHARED_EDITS_DIR_KEY);
  return Util.stringCollectionAsURIs(dirNames);
}
 
Example 6
Project: hadoop   File: FSImage.java   Source Code and License Vote up 5 votes
/**
 * Retrieve checkpoint dirs from configuration.
 *
 * @param conf the Configuration
 * @param defaultValue a default value for the attribute, if null
 * @return a Collection of URIs representing the values in 
 * dfs.namenode.checkpoint.dir configuration property
 */
static Collection<URI> getCheckpointDirs(Configuration conf,
    String defaultValue) {
  Collection<String> dirNames = conf.getTrimmedStringCollection(
      DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY);
  if (dirNames.size() == 0 && defaultValue != null) {
    dirNames.add(defaultValue);
  }
  return Util.stringCollectionAsURIs(dirNames);
}
 
Example 7
Project: hadoop   File: FSImage.java   Source Code and License Vote up 5 votes
static List<URI> getCheckpointEditsDirs(Configuration conf,
    String defaultName) {
  Collection<String> dirNames = conf.getTrimmedStringCollection(
      DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY);
  if (dirNames.size() == 0 && defaultName != null) {
    dirNames.add(defaultName);
  }
  return Util.stringCollectionAsURIs(dirNames);
}
 
Example 8
Project: hadoop   File: DataNode.java   Source Code and License Vote up 5 votes
public static List<StorageLocation> getStorageLocations(Configuration conf) {
  Collection<String> rawLocations =
      conf.getTrimmedStringCollection(DFS_DATANODE_DATA_DIR_KEY);
  List<StorageLocation> locations =
      new ArrayList<StorageLocation>(rawLocations.size());

  for(String locationString : rawLocations) {
    final StorageLocation location;
    try {
      location = StorageLocation.parse(locationString);
    } catch (IOException ioe) {
      LOG.error("Failed to initialize storage directory " + locationString
          + ". Exception details: " + ioe);
      // Ignore the exception.
      continue;
    } catch (SecurityException se) {
      LOG.error("Failed to initialize storage directory " + locationString
                   + ". Exception details: " + se);
      // Ignore the exception.
      continue;
    }

    locations.add(location);
  }

  return locations;
}
 
Example 9
Project: ditb   File: FSHDFSUtils.java   Source Code and License Vote up 5 votes
/**
 * @param conf the Configuration of HBase
 * @param srcFs
 * @param desFs
 * @return Whether srcFs and desFs are on same hdfs or not
 */
public static boolean isSameHdfs(Configuration conf, FileSystem srcFs, FileSystem desFs) {
  // By getCanonicalServiceName, we could make sure both srcFs and desFs
  // show a unified format which contains scheme, host and port.
  String srcServiceName = srcFs.getCanonicalServiceName();
  String desServiceName = desFs.getCanonicalServiceName();

  if (srcServiceName == null || desServiceName == null) {
    return false;
  }
  if (srcServiceName.equals(desServiceName)) {
    return true;
  }
  if (srcServiceName.startsWith("ha-hdfs") && desServiceName.startsWith("ha-hdfs")) {
    Collection<String> internalNameServices =
        conf.getTrimmedStringCollection("dfs.internal.nameservices");
    if (!internalNameServices.isEmpty()) {
      if (internalNameServices.contains(srcServiceName.split(":")[1])) {
        return true;
      } else {
        return false;
      }
    }
  }
  if (srcFs instanceof DistributedFileSystem && desFs instanceof DistributedFileSystem) {
    //If one serviceName is an HA format while the other is a non-HA format,
    // maybe they refer to the same FileSystem.
    //For example, srcFs is "ha-hdfs://nameservices" and desFs is "hdfs://activeNamenode:port"
    Set<InetSocketAddress> srcAddrs = getNNAddresses((DistributedFileSystem) srcFs, conf);
    Set<InetSocketAddress> desAddrs = getNNAddresses((DistributedFileSystem) desFs, conf);
    if (Sets.intersection(srcAddrs, desAddrs).size() > 0) {
      return true;
    }
  }

  return false;
}
 
Example 10
Project: hadoop-oss   File: NuCypherExtUtilClient.java   Source Code and License Vote up 2 votes
/**
 * Returns collection of nameservice Ids from the configuration.
 * @param conf configuration
 * @return collection of nameservice Ids, or null if not specified
 */
public static Collection<String> getNameServiceIds(Configuration conf) {
  return conf.getTrimmedStringCollection(DFS_NAMESERVICES);
}
 
Example 11
Project: hadoop-oss   File: NuCypherExtUtilClient.java   Source Code and License Vote up 2 votes
/**
 * Namenode HighAvailability related configuration.
 * Returns collection of namenode Ids from the configuration. One logical id
 * for each namenode in the in the HA setup.
 *
 * @param conf configuration
 * @param nsId the nameservice ID to look at, or null for non-federated
 * @return collection of namenode Ids
 */
public static Collection<String> getNameNodeIds(Configuration conf, String nsId) {
  String key = addSuffix(DFS_HA_NAMENODES_KEY_PREFIX, nsId);
  return conf.getTrimmedStringCollection(key);
}
 
Example 12
Project: hadoop   File: DFSUtil.java   Source Code and License Vote up 2 votes
/**
 * Returns collection of nameservice Ids from the configuration.
 * @param conf configuration
 * @return collection of nameservice Ids, or null if not specified
 */
public static Collection<String> getNameServiceIds(Configuration conf) {
  return conf.getTrimmedStringCollection(DFS_NAMESERVICES);
}
 
Example 13
Project: hadoop   File: DFSUtil.java   Source Code and License Vote up 2 votes
/**
 * Namenode HighAvailability related configuration.
 * Returns collection of namenode Ids from the configuration. One logical id
 * for each namenode in the in the HA setup.
 * 
 * @param conf configuration
 * @param nsId the nameservice ID to look at, or null for non-federated 
 * @return collection of namenode Ids
 */
public static Collection<String> getNameNodeIds(Configuration conf, String nsId) {
  String key = addSuffix(DFS_HA_NAMENODES_KEY_PREFIX, nsId);
  return conf.getTrimmedStringCollection(key);
}