Java Code Examples for org.apache.hadoop.conf.Configuration#getTrimmedStringCollection()
The following examples show how to use
org.apache.hadoop.conf.Configuration#getTrimmedStringCollection() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: HAUtil.java From hadoop with Apache License 2.0 | 6 votes |
/** * Verify configuration that there are at least two RM-ids * and RPC addresses are specified for each RM-id. * Then set the RM-ids. */ private static void verifyAndSetRMHAIdsList(Configuration conf) { Collection<String> ids = conf.getTrimmedStringCollection(YarnConfiguration.RM_HA_IDS); if (ids.size() < 2) { throwBadConfigurationException( getInvalidValueMessage(YarnConfiguration.RM_HA_IDS, conf.get(YarnConfiguration.RM_HA_IDS) + "\nHA mode requires atleast two RMs")); } StringBuilder setValue = new StringBuilder(); for (String id: ids) { // verify the RM service addresses configurations for every RMIds for (String prefix : YarnConfiguration.getServiceAddressConfKeys(conf)) { checkAndSetRMRPCAddress(prefix, id, conf); } setValue.append(id); setValue.append(","); } conf.set(YarnConfiguration.RM_HA_IDS, setValue.substring(0, setValue.length() - 1)); }
Example 2
Source File: HAUtil.java From hadoop with Apache License 2.0 | 6 votes |
private static void verifyAndSetCurrentRMHAId(Configuration conf) { String rmId = getRMHAId(conf); if (rmId == null) { StringBuilder msg = new StringBuilder(); msg.append("Can not find valid RM_HA_ID. None of "); for (String id : conf .getTrimmedStringCollection(YarnConfiguration.RM_HA_IDS)) { msg.append(addSuffix(YarnConfiguration.RM_ADDRESS, id) + " "); } msg.append(" are matching" + " the local address OR " + YarnConfiguration.RM_HA_ID + " is not" + " specified in HA Configuration"); throwBadConfigurationException(msg.toString()); } else { Collection<String> ids = getRMHAIds(conf); if (!ids.contains(rmId)) { throwBadConfigurationException( getRMHAIdNeedToBeIncludedMessage(ids.toString(), rmId)); } } conf.set(YarnConfiguration.RM_HA_ID, rmId); }
Example 3
Source File: KMSAudit.java From ranger with Apache License 2.0 | 6 votes |
/** * Read the KMSAuditLogger classes from configuration. If any loggers fail to * load, a RumTimeException will be thrown. * * @param conf The configuration. * @return Collection of KMSAudigLogger classes. */ private Set<Class<? extends KMSAuditLogger>> getAuditLoggerClasses( final Configuration conf) { Set<Class<? extends KMSAuditLogger>> result = new HashSet<>(); // getTrimmedStringCollection will remove duplicates. Collection<String> classes = conf.getTrimmedStringCollection(KMSConfiguration.KMS_AUDIT_LOGGER_KEY); if (classes.isEmpty()) { LOG.info("No audit logger configured, using default."); result.add(SimpleKMSAuditLogger.class); return result; } for (String c : classes) { try { Class<?> cls = conf.getClassByName(c); result.add(cls.asSubclass(KMSAuditLogger.class)); } catch (ClassNotFoundException cnfe) { throw new RuntimeException("Failed to load " + c + ", please check " + "configuration " + KMSConfiguration.KMS_AUDIT_LOGGER_KEY, cnfe); } } return result; }
Example 4
Source File: HAUtil.java From big-c with Apache License 2.0 | 6 votes |
/** * Verify configuration that there are at least two RM-ids * and RPC addresses are specified for each RM-id. * Then set the RM-ids. */ private static void verifyAndSetRMHAIdsList(Configuration conf) { Collection<String> ids = conf.getTrimmedStringCollection(YarnConfiguration.RM_HA_IDS); if (ids.size() < 2) { throwBadConfigurationException( getInvalidValueMessage(YarnConfiguration.RM_HA_IDS, conf.get(YarnConfiguration.RM_HA_IDS) + "\nHA mode requires atleast two RMs")); } StringBuilder setValue = new StringBuilder(); for (String id: ids) { // verify the RM service addresses configurations for every RMIds for (String prefix : YarnConfiguration.getServiceAddressConfKeys(conf)) { checkAndSetRMRPCAddress(prefix, id, conf); } setValue.append(id); setValue.append(","); } conf.set(YarnConfiguration.RM_HA_IDS, setValue.substring(0, setValue.length() - 1)); }
Example 5
Source File: HAUtil.java From big-c with Apache License 2.0 | 6 votes |
private static void verifyAndSetCurrentRMHAId(Configuration conf) { String rmId = getRMHAId(conf); if (rmId == null) { StringBuilder msg = new StringBuilder(); msg.append("Can not find valid RM_HA_ID. None of "); for (String id : conf .getTrimmedStringCollection(YarnConfiguration.RM_HA_IDS)) { msg.append(addSuffix(YarnConfiguration.RM_ADDRESS, id) + " "); } msg.append(" are matching" + " the local address OR " + YarnConfiguration.RM_HA_ID + " is not" + " specified in HA Configuration"); throwBadConfigurationException(msg.toString()); } else { Collection<String> ids = getRMHAIds(conf); if (!ids.contains(rmId)) { throwBadConfigurationException( getRMHAIdNeedToBeIncludedMessage(ids.toString(), rmId)); } } conf.set(YarnConfiguration.RM_HA_ID, rmId); }
Example 6
Source File: CubeHFileJob.java From kylin with Apache License 2.0 | 5 votes |
private String[] getAllServices(Configuration hbaseConf) { Collection<String> hbaseHdfsServices = hbaseConf.getTrimmedStringCollection(DFSConfigKeys.DFS_NAMESERVICES); Collection<String> mainNameServices = getConf().getTrimmedStringCollection(DFSConfigKeys.DFS_NAMESERVICES); mainNameServices.addAll(hbaseHdfsServices); return mainNameServices.toArray(new String[0]); }
Example 7
Source File: CubeHFileJob.java From kylin-on-parquet-v2 with Apache License 2.0 | 5 votes |
private String[] getAllServices(Configuration hbaseConf) { Collection<String> hbaseHdfsServices = hbaseConf.getTrimmedStringCollection(DFSConfigKeys.DFS_NAMESERVICES); Collection<String> mainNameServices = getConf().getTrimmedStringCollection(DFSConfigKeys.DFS_NAMESERVICES); mainNameServices.addAll(hbaseHdfsServices); return mainNameServices.toArray(new String[0]); }
Example 8
Source File: TezCommonUtils.java From tez with Apache License 2.0 | 5 votes |
public static String getSystemPropertiesToLog(Configuration conf) { Collection <String> keys = conf.getTrimmedStringCollection( TezConfiguration.TEZ_JVM_SYSTEM_PROPERTIES_TO_LOG); if (keys.isEmpty()) { keys = TezConfiguration.TEZ_JVM_SYSTEM_PROPERTIES_TO_LOG_DEFAULT; } StringBuilder sb = new StringBuilder(); sb.append("\n/************************************************************\n"); sb.append("[system properties]\n"); for (String key : keys) { sb.append(key).append(": ").append(System.getProperty(key)).append('\n'); } sb.append("************************************************************/"); return sb.toString(); }
Example 9
Source File: DataNode.java From big-c with Apache License 2.0 | 5 votes |
public static List<StorageLocation> getStorageLocations(Configuration conf) { Collection<String> rawLocations = conf.getTrimmedStringCollection(DFS_DATANODE_DATA_DIR_KEY); List<StorageLocation> locations = new ArrayList<StorageLocation>(rawLocations.size()); for(String locationString : rawLocations) { final StorageLocation location; try { location = StorageLocation.parse(locationString); } catch (IOException ioe) { LOG.error("Failed to initialize storage directory " + locationString + ". Exception details: " + ioe); // Ignore the exception. continue; } catch (SecurityException se) { LOG.error("Failed to initialize storage directory " + locationString + ". Exception details: " + se); // Ignore the exception. continue; } locations.add(location); } return locations; }
Example 10
Source File: FSImage.java From big-c with Apache License 2.0 | 5 votes |
static List<URI> getCheckpointEditsDirs(Configuration conf, String defaultName) { Collection<String> dirNames = conf.getTrimmedStringCollection( DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY); if (dirNames.size() == 0 && defaultName != null) { dirNames.add(defaultName); } return Util.stringCollectionAsURIs(dirNames); }
Example 11
Source File: FSImage.java From big-c with Apache License 2.0 | 5 votes |
/** * Retrieve checkpoint dirs from configuration. * * @param conf the Configuration * @param defaultValue a default value for the attribute, if null * @return a Collection of URIs representing the values in * dfs.namenode.checkpoint.dir configuration property */ static Collection<URI> getCheckpointDirs(Configuration conf, String defaultValue) { Collection<String> dirNames = conf.getTrimmedStringCollection( DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY); if (dirNames.size() == 0 && defaultValue != null) { dirNames.add(defaultValue); } return Util.stringCollectionAsURIs(dirNames); }
Example 12
Source File: DFSUtil.java From big-c with Apache License 2.0 | 5 votes |
/** * Returns list of InetSocketAddresses corresponding to the namenode * that manages this cluster. Note this is to be used by datanodes to get * the list of namenode addresses to talk to. * * Returns namenode address specifically configured for datanodes (using * service ports), if found. If not, regular RPC address configured for other * clients is returned. * * @param conf configuration * @return list of InetSocketAddress * @throws IOException on error */ public static Map<String, Map<String, InetSocketAddress>> getNNServiceRpcAddressesForCluster(Configuration conf) throws IOException { // Use default address as fall back String defaultAddress; try { defaultAddress = NetUtils.getHostPortString(NameNode.getAddress(conf)); } catch (IllegalArgumentException e) { defaultAddress = null; } Collection<String> parentNameServices = conf.getTrimmedStringCollection (DFSConfigKeys.DFS_INTERNAL_NAMESERVICES_KEY); if (parentNameServices.isEmpty()) { parentNameServices = conf.getTrimmedStringCollection (DFSConfigKeys.DFS_NAMESERVICES); } else { // Ensure that the internal service is ineed in the list of all available // nameservices. Set<String> availableNameServices = Sets.newHashSet(conf .getTrimmedStringCollection(DFSConfigKeys.DFS_NAMESERVICES)); for (String nsId : parentNameServices) { if (!availableNameServices.contains(nsId)) { throw new IOException("Unknown nameservice: " + nsId); } } } Map<String, Map<String, InetSocketAddress>> addressList = getAddressesForNsIds(conf, parentNameServices, defaultAddress, DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY); if (addressList.isEmpty()) { throw new IOException("Incorrect configuration: namenode address " + DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY + " or " + DFS_NAMENODE_RPC_ADDRESS_KEY + " is not configured."); } return addressList; }
Example 13
Source File: DataNode.java From hadoop with Apache License 2.0 | 5 votes |
public static List<StorageLocation> getStorageLocations(Configuration conf) { Collection<String> rawLocations = conf.getTrimmedStringCollection(DFS_DATANODE_DATA_DIR_KEY); List<StorageLocation> locations = new ArrayList<StorageLocation>(rawLocations.size()); for(String locationString : rawLocations) { final StorageLocation location; try { location = StorageLocation.parse(locationString); } catch (IOException ioe) { LOG.error("Failed to initialize storage directory " + locationString + ". Exception details: " + ioe); // Ignore the exception. continue; } catch (SecurityException se) { LOG.error("Failed to initialize storage directory " + locationString + ". Exception details: " + se); // Ignore the exception. continue; } locations.add(location); } return locations; }
Example 14
Source File: FSImage.java From hadoop with Apache License 2.0 | 5 votes |
static List<URI> getCheckpointEditsDirs(Configuration conf, String defaultName) { Collection<String> dirNames = conf.getTrimmedStringCollection( DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY); if (dirNames.size() == 0 && defaultName != null) { dirNames.add(defaultName); } return Util.stringCollectionAsURIs(dirNames); }
Example 15
Source File: HBaseConnection.java From kylin with Apache License 2.0 | 5 votes |
public static void addHBaseClusterNNHAConfiguration(Configuration conf) { String hdfsConfigFile = KylinConfig.getInstanceFromEnv().getHBaseClusterHDFSConfigFile(); if (hdfsConfigFile == null || hdfsConfigFile.isEmpty()) { return; } Configuration hdfsConf = new Configuration(false); hdfsConf.addResource(hdfsConfigFile); Collection<String> nameServices = hdfsConf.getTrimmedStringCollection(DFSConfigKeys.DFS_NAMESERVICES); Collection<String> mainNameServices = conf.getTrimmedStringCollection(DFSConfigKeys.DFS_NAMESERVICES); for (String serviceId : nameServices) { mainNameServices.add(serviceId); String serviceConfKey = DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + "." + serviceId; String proxyConfKey = DFSConfigKeys.DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + serviceId; conf.set(serviceConfKey, hdfsConf.get(serviceConfKey, "")); conf.set(proxyConfKey, hdfsConf.get(proxyConfKey, "")); Collection<String> nameNodes = hdfsConf.getTrimmedStringCollection(serviceConfKey); for (String nameNode : nameNodes) { String rpcConfKey = DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + serviceId + "." + nameNode; conf.set(rpcConfKey, hdfsConf.get(rpcConfKey, "")); } } conf.setStrings(DFSConfigKeys.DFS_NAMESERVICES, mainNameServices.toArray(new String[0])); // See YARN-3021, instruct RM skip renew token of hbase cluster name services conf.setStrings(JOB_NAMENODES_TOKEN_RENEWAL_EXCLUDE, nameServices.toArray(new String[0])); }
Example 16
Source File: FSUtils.java From hbase with Apache License 2.0 | 5 votes |
/** * @param conf the Configuration of HBase * @return Whether srcFs and desFs are on same hdfs or not */ public static boolean isSameHdfs(Configuration conf, FileSystem srcFs, FileSystem desFs) { // By getCanonicalServiceName, we could make sure both srcFs and desFs // show a unified format which contains scheme, host and port. String srcServiceName = srcFs.getCanonicalServiceName(); String desServiceName = desFs.getCanonicalServiceName(); if (srcServiceName == null || desServiceName == null) { return false; } if (srcServiceName.equals(desServiceName)) { return true; } if (srcServiceName.startsWith("ha-hdfs") && desServiceName.startsWith("ha-hdfs")) { Collection<String> internalNameServices = conf.getTrimmedStringCollection("dfs.internal.nameservices"); if (!internalNameServices.isEmpty()) { if (internalNameServices.contains(srcServiceName.split(":")[1])) { return true; } else { return false; } } } if (srcFs instanceof DistributedFileSystem && desFs instanceof DistributedFileSystem) { // If one serviceName is an HA format while the other is a non-HA format, // maybe they refer to the same FileSystem. // For example, srcFs is "ha-hdfs://nameservices" and desFs is "hdfs://activeNamenode:port" Set<InetSocketAddress> srcAddrs = getNNAddresses((DistributedFileSystem) srcFs, conf); Set<InetSocketAddress> desAddrs = getNNAddresses((DistributedFileSystem) desFs, conf); if (Sets.intersection(srcAddrs, desAddrs).size() > 0) { return true; } } return false; }
Example 17
Source File: HBaseConnection.java From kylin-on-parquet-v2 with Apache License 2.0 | 5 votes |
public static void addHBaseClusterNNHAConfiguration(Configuration conf) { String hdfsConfigFile = KylinConfig.getInstanceFromEnv().getHBaseClusterHDFSConfigFile(); if (hdfsConfigFile == null || hdfsConfigFile.isEmpty()) { return; } Configuration hdfsConf = new Configuration(false); hdfsConf.addResource(hdfsConfigFile); Collection<String> nameServices = hdfsConf.getTrimmedStringCollection(DFSConfigKeys.DFS_NAMESERVICES); Collection<String> mainNameServices = conf.getTrimmedStringCollection(DFSConfigKeys.DFS_NAMESERVICES); for (String serviceId : nameServices) { mainNameServices.add(serviceId); String serviceConfKey = DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + "." + serviceId; String proxyConfKey = DFSConfigKeys.DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + serviceId; conf.set(serviceConfKey, hdfsConf.get(serviceConfKey, "")); conf.set(proxyConfKey, hdfsConf.get(proxyConfKey, "")); Collection<String> nameNodes = hdfsConf.getTrimmedStringCollection(serviceConfKey); for (String nameNode : nameNodes) { String rpcConfKey = DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + serviceId + "." + nameNode; conf.set(rpcConfKey, hdfsConf.get(rpcConfKey, "")); } } conf.setStrings(DFSConfigKeys.DFS_NAMESERVICES, mainNameServices.toArray(new String[0])); // See YARN-3021, instruct RM skip renew token of hbase cluster name services conf.setStrings(JOB_NAMENODES_TOKEN_RENEWAL_EXCLUDE, nameServices.toArray(new String[0])); }
Example 18
Source File: DiskUtil.java From tajo with Apache License 2.0 | 4 votes |
public static List<URI> getDataNodeStorageDirs(){ Configuration conf = new HdfsConfiguration(); Collection<String> dirNames = conf.getTrimmedStringCollection(DFS_DATANODE_DATA_DIR_KEY); return Util.stringCollectionAsURIs(dirNames); }
Example 19
Source File: DFSUtil.java From big-c with Apache License 2.0 | 2 votes |
/** * Returns collection of nameservice Ids from the configuration. * @param conf configuration * @return collection of nameservice Ids, or null if not specified */ public static Collection<String> getNameServiceIds(Configuration conf) { return conf.getTrimmedStringCollection(DFS_NAMESERVICES); }
Example 20
Source File: DFSUtil.java From big-c with Apache License 2.0 | 2 votes |
/** * Namenode HighAvailability related configuration. * Returns collection of namenode Ids from the configuration. One logical id * for each namenode in the in the HA setup. * * @param conf configuration * @param nsId the nameservice ID to look at, or null for non-federated * @return collection of namenode Ids */ public static Collection<String> getNameNodeIds(Configuration conf, String nsId) { String key = addSuffix(DFS_HA_NAMENODES_KEY_PREFIX, nsId); return conf.getTrimmedStringCollection(key); }