Java Code Examples for org.apache.hadoop.conf.Configuration#getBoolean()
The following examples show how to use
org.apache.hadoop.conf.Configuration#getBoolean() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ControlledJob.java From big-c with Apache License 2.0 | 6 votes |
/** * Submit this job to mapred. The state becomes RUNNING if submission * is successful, FAILED otherwise. */ protected synchronized void submit() { try { Configuration conf = job.getConfiguration(); if (conf.getBoolean(CREATE_DIR, false)) { FileSystem fs = FileSystem.get(conf); Path inputPaths[] = FileInputFormat.getInputPaths(job); for (int i = 0; i < inputPaths.length; i++) { if (!fs.exists(inputPaths[i])) { try { fs.mkdirs(inputPaths[i]); } catch (IOException e) { } } } } job.submit(); this.state = State.RUNNING; } catch (Exception ioe) { LOG.info(getJobName()+" got an error while submitting ",ioe); this.state = State.FAILED; this.message = StringUtils.stringifyException(ioe); } }
Example 2
Source File: RyaOutputFormat.java From rya with Apache License 2.0 | 6 votes |
private static TemporalIndexer getTemporalIndexer(final Configuration conf) throws IOException { if (!conf.getBoolean(ENABLE_TEMPORAL, true)) { return null; } final AccumuloTemporalIndexer temporal = new AccumuloTemporalIndexer(); temporal.setConf(conf); Connector connector; try { connector = ConfigUtils.getConnector(conf); } catch (AccumuloException | AccumuloSecurityException e) { throw new IOException("Error when attempting to create a connection for writing the temporal index.", e); } final MultiTableBatchWriter mtbw = connector.createMultiTableBatchWriter(new BatchWriterConfig()); temporal.setConnector(connector); temporal.setMultiTableBatchWriter(mtbw); temporal.init(); return temporal; }
Example 3
Source File: RecoveryService.java From tez with Apache License 2.0 | 6 votes |
@Override public void serviceInit(Configuration conf) throws Exception { recoveryPath = appContext.getCurrentRecoveryDir(); recoveryDirFS = FileSystem.get(recoveryPath.toUri(), conf); bufferSize = conf.getInt(TezConfiguration.DAG_RECOVERY_FILE_IO_BUFFER_SIZE, TezConfiguration.DAG_RECOVERY_FILE_IO_BUFFER_SIZE_DEFAULT); flushInterval = conf.getInt(TezConfiguration.DAG_RECOVERY_FLUSH_INTERVAL_SECS, TezConfiguration.DAG_RECOVERY_FLUSH_INTERVAL_SECS_DEFAULT); maxUnflushedEvents = conf.getInt(TezConfiguration.DAG_RECOVERY_MAX_UNFLUSHED_EVENTS, TezConfiguration.DAG_RECOVERY_MAX_UNFLUSHED_EVENTS_DEFAULT); drainEventsFlag = conf.getBoolean( TEZ_TEST_RECOVERY_DRAIN_EVENTS_WHEN_STOPPED, TEZ_TEST_RECOVERY_DRAIN_EVENTS_WHEN_STOPPED_DEFAULT); LOG.info("RecoveryService initialized with " + "recoveryPath=" + recoveryPath + ", bufferSize(bytes)=" + bufferSize + ", flushInterval(s)=" + flushInterval + ", maxUnflushedEvents=" + maxUnflushedEvents); }
Example 4
Source File: ResourceManager.java From big-c with Apache License 2.0 | 5 votes |
@Override protected void serviceInit(Configuration conf) throws Exception { this.shouldExitOnError = conf.getBoolean(Dispatcher.DISPATCHER_EXIT_ON_ERROR_KEY, Dispatcher.DEFAULT_DISPATCHER_EXIT_ON_ERROR); super.serviceInit(conf); }
Example 5
Source File: DataNode.java From big-c with Apache License 2.0 | 5 votes |
private void initDataXceiver(Configuration conf) throws IOException { // find free port or use privileged port provided TcpPeerServer tcpPeerServer; if (secureResources != null) { tcpPeerServer = new TcpPeerServer(secureResources); } else { tcpPeerServer = new TcpPeerServer(dnConf.socketWriteTimeout, DataNode.getStreamingAddr(conf)); } tcpPeerServer.setReceiveBufferSize(HdfsConstants.DEFAULT_DATA_SOCKET_SIZE); streamingAddr = tcpPeerServer.getStreamingAddr(); LOG.info("Opened streaming server at " + streamingAddr); this.threadGroup = new ThreadGroup("dataXceiverServer"); xserver = new DataXceiverServer(tcpPeerServer, conf, this); this.dataXceiverServer = new Daemon(threadGroup, xserver); this.threadGroup.setDaemon(true); // auto destroy when empty if (conf.getBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_DEFAULT) || conf.getBoolean(DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC_DEFAULT)) { DomainPeerServer domainPeerServer = getDomainPeerServer(conf, streamingAddr.getPort()); if (domainPeerServer != null) { this.localDataXceiverServer = new Daemon(threadGroup, new DataXceiverServer(domainPeerServer, conf, this)); LOG.info("Listening on UNIX domain socket: " + domainPeerServer.getBindPath()); } } this.shortCircuitRegistry = new ShortCircuitRegistry(conf); }
Example 6
Source File: ACLManager.java From attic-apex-core with Apache License 2.0 | 5 votes |
public static boolean areACLsRequired(Configuration conf) { logger.debug("Check ACLs required"); if (conf.getBoolean(YarnConfiguration.YARN_ACL_ENABLE, YarnConfiguration.DEFAULT_YARN_ACL_ENABLE)) { logger.debug("Admin ACL {}", conf.get(YarnConfiguration.YARN_ADMIN_ACL)); if (!YarnConfiguration.DEFAULT_YARN_ADMIN_ACL.equals(conf.get(YarnConfiguration.YARN_ADMIN_ACL))) { logger.debug("Non default admin ACL"); return true; } } return false; }
Example 7
Source File: BlockCacheFactory.java From hbase with Apache License 2.0 | 5 votes |
public static BlockCache createBlockCache(Configuration conf) { if (conf.get(DEPRECATED_BLOCKCACHE_BLOCKSIZE_KEY) != null) { LOG.warn("The config key {} is deprecated now, instead please use {}. In future release " + "we will remove the deprecated config.", DEPRECATED_BLOCKCACHE_BLOCKSIZE_KEY, BLOCKCACHE_BLOCKSIZE_KEY); } FirstLevelBlockCache l1Cache = createFirstLevelCache(conf); if (l1Cache == null) { return null; } boolean useExternal = conf.getBoolean(EXTERNAL_BLOCKCACHE_KEY, EXTERNAL_BLOCKCACHE_DEFAULT); if (useExternal) { BlockCache l2CacheInstance = createExternalBlockcache(conf); return l2CacheInstance == null ? l1Cache : new InclusiveCombinedBlockCache(l1Cache, l2CacheInstance); } else { // otherwise use the bucket cache. BucketCache bucketCache = createBucketCache(conf); if (!conf.getBoolean("hbase.bucketcache.combinedcache.enabled", true)) { // Non combined mode is off from 2.0 LOG.warn( "From HBase 2.0 onwards only combined mode of LRU cache and bucket cache is available"); } return bucketCache == null ? l1Cache : new CombinedBlockCache(l1Cache, bucketCache); } }
Example 8
Source File: TransactionProcessor.java From phoenix-tephra with Apache License 2.0 | 5 votes |
private boolean getAllowEmptyValues(RegionCoprocessorEnvironment env, TableDescriptor htd) { String allowEmptyValuesFromTableDesc = htd.getValue(TxConstants.ALLOW_EMPTY_VALUES_KEY); Configuration conf = getConfiguration(env); boolean allowEmptyValuesFromConfig = (conf != null) ? conf.getBoolean(TxConstants.ALLOW_EMPTY_VALUES_KEY, TxConstants.ALLOW_EMPTY_VALUES_DEFAULT) : TxConstants.ALLOW_EMPTY_VALUES_DEFAULT; // If the property is not present in the tableDescriptor, get it from the Configuration return (allowEmptyValuesFromTableDesc != null) ? Boolean.valueOf(allowEmptyValuesFromTableDesc) : allowEmptyValuesFromConfig; }
Example 9
Source File: SleepJob.java From big-c with Apache License 2.0 | 5 votes |
public SleepJob(Configuration conf, long submissionMillis, JobStory jobdesc, Path outRoot, UserGroupInformation ugi, int seq, int numLocations, String[] hosts) throws IOException { super(conf, submissionMillis, jobdesc, outRoot, ugi, seq); this.fakeLocations = numLocations; this.hosts = hosts.clone(); this.selector = (fakeLocations > 0)? new Selector(hosts.length, (float) fakeLocations / hosts.length, rand.get()) : null; this.mapTasksOnly = conf.getBoolean(SLEEPJOB_MAPTASK_ONLY, false); mapMaxSleepTime = conf.getLong(GRIDMIX_SLEEP_MAX_MAP_TIME, Long.MAX_VALUE); reduceMaxSleepTime = conf.getLong(GRIDMIX_SLEEP_MAX_REDUCE_TIME, Long.MAX_VALUE); }
Example 10
Source File: TransactionProcessor.java From phoenix-tephra with Apache License 2.0 | 5 votes |
private boolean getAllowEmptyValues(RegionCoprocessorEnvironment env, HTableDescriptor htd) { String allowEmptyValuesFromTableDesc = htd.getValue(TxConstants.ALLOW_EMPTY_VALUES_KEY); Configuration conf = getConfiguration(env); boolean allowEmptyValuesFromConfig = (conf != null) ? conf.getBoolean(TxConstants.ALLOW_EMPTY_VALUES_KEY, TxConstants.ALLOW_EMPTY_VALUES_DEFAULT) : TxConstants.ALLOW_EMPTY_VALUES_DEFAULT; // If the property is not present in the tableDescriptor, get it from the Configuration return (allowEmptyValuesFromTableDesc != null) ? Boolean.valueOf(allowEmptyValuesFromTableDesc) : allowEmptyValuesFromConfig; }
Example 11
Source File: HistoryClientService.java From hadoop with Apache License 2.0 | 5 votes |
protected void serviceStart() throws Exception { Configuration conf = getConfig(); YarnRPC rpc = YarnRPC.create(conf); initializeWebApp(conf); InetSocketAddress address = conf.getSocketAddr( JHAdminConfig.MR_HISTORY_BIND_HOST, JHAdminConfig.MR_HISTORY_ADDRESS, JHAdminConfig.DEFAULT_MR_HISTORY_ADDRESS, JHAdminConfig.DEFAULT_MR_HISTORY_PORT); server = rpc.getServer(HSClientProtocol.class, protocolHandler, address, conf, jhsDTSecretManager, conf.getInt(JHAdminConfig.MR_HISTORY_CLIENT_THREAD_COUNT, JHAdminConfig.DEFAULT_MR_HISTORY_CLIENT_THREAD_COUNT)); // Enable service authorization? if (conf.getBoolean( CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, false)) { server.refreshServiceAcl(conf, new ClientHSPolicyProvider()); } server.start(); this.bindAddress = conf.updateConnectAddr(JHAdminConfig.MR_HISTORY_BIND_HOST, JHAdminConfig.MR_HISTORY_ADDRESS, JHAdminConfig.DEFAULT_MR_HISTORY_ADDRESS, server.getListenerAddress()); LOG.info("Instantiated HistoryClientService at " + this.bindAddress); super.serviceStart(); }
Example 12
Source File: NativeAzureFileSystem.java From hadoop with Apache License 2.0 | 5 votes |
@Override public void initialize(URI uri, Configuration conf) throws IOException, IllegalArgumentException { // Check authority for the URI to guarantee that it is non-null. uri = reconstructAuthorityIfNeeded(uri, conf); if (null == uri.getAuthority()) { final String errMsg = String .format("Cannot initialize WASB file system, URI authority not recognized."); throw new IllegalArgumentException(errMsg); } super.initialize(uri, conf); if (store == null) { store = createDefaultStore(conf); } instrumentation = new AzureFileSystemInstrumentation(conf); if(!conf.getBoolean(SKIP_AZURE_METRICS_PROPERTY_NAME, false)) { // Make sure the metrics system is available before interacting with Azure AzureFileSystemMetricsSystem.fileSystemStarted(); metricsSourceName = newMetricsSourceName(); String sourceDesc = "Azure Storage Volume File System metrics"; AzureFileSystemMetricsSystem.registerSource(metricsSourceName, sourceDesc, instrumentation); } store.initialize(uri, conf, instrumentation); setConf(conf); this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority()); this.workingDir = new Path("/user", UserGroupInformation.getCurrentUser() .getShortUserName()).makeQualified(getUri(), getWorkingDirectory()); this.blockSize = conf.getLong(AZURE_BLOCK_SIZE_PROPERTY_NAME, MAX_AZURE_BLOCK_SIZE); if (LOG.isDebugEnabled()) { LOG.debug("NativeAzureFileSystem. Initializing."); LOG.debug(" blockSize = " + conf.getLong(AZURE_BLOCK_SIZE_PROPERTY_NAME, MAX_AZURE_BLOCK_SIZE)); } }
Example 13
Source File: HadoopTrainWorker.java From ytk-learn with MIT License | 5 votes |
@Override protected void setup(Context context) { Configuration conf = context.getConfiguration(); workThread = new Thread() { @Override public void run() { HadoopTrainWorkerCore trainWorkerCore = null; try { trainWorkerCore = new HadoopTrainWorkerCore( conf.get("modelName"), conf.get("configPath"), conf.get("configFile"), conf.get("pyTransformScript"), conf.getBoolean("needPyTransform", false), conf.get("loginName"), conf.get("hostName"), conf.getInt("hostPort", -1), conf.getInt("threadNum", -1) ); Map<String, Object> customParamsMap = decodeMap(conf.get("customParamsMap")); for (Map.Entry<String, Object> entry : customParamsMap.entrySet()) { trainWorkerCore.setCustomParam(entry.getKey(), entry.getValue()); LOG.info("hadoop custom params:" + entry.getKey() + "=" + entry.getValue()); } trainWorkerCore.train(Arrays.asList(new ReducerIterator()), null); } catch (Exception e) { e.printStackTrace(); System.exit(1); } } }; workThread.start(); }
Example 14
Source File: NameNode.java From big-c with Apache License 2.0 | 5 votes |
public static void checkAllowFormat(Configuration conf) throws IOException { if (!conf.getBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY, DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_DEFAULT)) { throw new IOException("The option " + DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY + " is set to false for this filesystem, so it " + "cannot be formatted. You will need to set " + DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY +" parameter " + "to true in order to format this filesystem"); } }
Example 15
Source File: StochasticLoadBalancer.java From hbase with Apache License 2.0 | 4 votes |
@Override public synchronized void setConf(Configuration conf) { super.setConf(conf); maxSteps = conf.getInt(MAX_STEPS_KEY, maxSteps); stepsPerRegion = conf.getInt(STEPS_PER_REGION_KEY, stepsPerRegion); maxRunningTime = conf.getLong(MAX_RUNNING_TIME_KEY, maxRunningTime); runMaxSteps = conf.getBoolean(RUN_MAX_STEPS_KEY, runMaxSteps); numRegionLoadsToRemember = conf.getInt(KEEP_REGION_LOADS, numRegionLoadsToRemember); minCostNeedBalance = conf.getFloat(MIN_COST_NEED_BALANCE_KEY, minCostNeedBalance); if (localityCandidateGenerator == null) { localityCandidateGenerator = new LocalityBasedCandidateGenerator(services); } localityCost = new ServerLocalityCostFunction(conf, services); rackLocalityCost = new RackLocalityCostFunction(conf, services); if (this.candidateGenerators == null) { candidateGenerators = Lists.newArrayList(); candidateGenerators.add(new RandomCandidateGenerator()); candidateGenerators.add(new LoadCandidateGenerator()); candidateGenerators.add(localityCandidateGenerator); candidateGenerators.add(new RegionReplicaRackCandidateGenerator()); } regionLoadFunctions = new CostFromRegionLoadFunction[] { new ReadRequestCostFunction(conf), new CPRequestCostFunction(conf), new WriteRequestCostFunction(conf), new MemStoreSizeCostFunction(conf), new StoreFileCostFunction(conf) }; regionReplicaHostCostFunction = new RegionReplicaHostCostFunction(conf); regionReplicaRackCostFunction = new RegionReplicaRackCostFunction(conf); costFunctions = new ArrayList<>(); costFunctions.add(new RegionCountSkewCostFunction(conf)); costFunctions.add(new PrimaryRegionCountSkewCostFunction(conf)); costFunctions.add(new MoveCostFunction(conf)); costFunctions.add(localityCost); costFunctions.add(rackLocalityCost); costFunctions.add(new TableSkewCostFunction(conf)); costFunctions.add(regionReplicaHostCostFunction); costFunctions.add(regionReplicaRackCostFunction); costFunctions.add(regionLoadFunctions[0]); costFunctions.add(regionLoadFunctions[1]); costFunctions.add(regionLoadFunctions[2]); costFunctions.add(regionLoadFunctions[3]); costFunctions.add(regionLoadFunctions[4]); loadCustomCostFunctions(conf); curFunctionCosts= new Double[costFunctions.size()]; tempFunctionCosts= new Double[costFunctions.size()]; LOG.info("Loaded config; maxSteps=" + maxSteps + ", stepsPerRegion=" + stepsPerRegion + ", maxRunningTime=" + maxRunningTime + ", isByTable=" + isByTable + ", CostFunctions=" + Arrays.toString(getCostFunctionNames()) + " etc."); }
Example 16
Source File: CCParseFilter.java From anthelion with Apache License 2.0 | 4 votes |
/** Scan the document adding attributes to metadata.*/ public static void walk(Node doc, URL base, Metadata metadata, Configuration conf) throws ParseException { // walk the DOM tree, scanning for license data Walker walker = new Walker(base); walker.walk(doc); // interpret results of walk String licenseUrl = null; String licenseLocation = null; if (walker.rdfLicense != null) { // 1st choice: subject in RDF licenseLocation = "rdf"; licenseUrl = walker.rdfLicense; } else if (walker.relLicense != null) { // 2nd: anchor w/ rel=license licenseLocation = "rel"; licenseUrl = walker.relLicense.toString(); } else if (walker.anchorLicense != null) { // 3rd: anchor w/ CC license licenseLocation = "a"; licenseUrl = walker.anchorLicense.toString(); } else if (conf.getBoolean("creativecommons.exclude.unlicensed", false)) { throw new ParseException("No CC license. Excluding."); } // add license to metadata if (licenseUrl != null) { if (LOG.isInfoEnabled()) { LOG.info("CC: found "+licenseUrl+" in "+licenseLocation+" of "+base); } metadata.add(CreativeCommons.LICENSE_URL, licenseUrl); metadata.add(CreativeCommons.LICENSE_LOCATION, licenseLocation); } if (walker.workType != null) { if (LOG.isInfoEnabled()) { LOG.info("CC: found "+walker.workType+" in "+base); } metadata.add(CreativeCommons.WORK_TYPE, walker.workType); } }
Example 17
Source File: HalvadeConf.java From halvade with GNU General Public License v3.0 | 4 votes |
public static boolean getRefDirIsSet(Configuration conf) { return conf.getBoolean(refDirIsSet, false); }
Example 18
Source File: ConfigUtil.java From zerowing with MIT License | 4 votes |
public static boolean getSkipDeletes(Configuration conf) { return conf.getBoolean(SKIP_DELETES, false); }
Example 19
Source File: GroupedAggregateRegionObserver.java From phoenix with Apache License 2.0 | 4 votes |
/** * Used for an aggregate query in which the key order does not necessarily match the group by * key order. In this case, we must collect all distinct groups within a region into a map, * aggregating as we go. * @param limit TODO */ private RegionScanner scanUnordered(ObserverContext<RegionCoprocessorEnvironment> c, Scan scan, final RegionScanner scanner, final List<Expression> expressions, final ServerAggregators aggregators, long limit) throws IOException { if (LOGGER.isDebugEnabled()) { LOGGER.debug(LogUtil.addCustomAnnotations( "Grouped aggregation over unordered rows with scan " + scan + ", group by " + expressions + ", aggregators " + aggregators, ScanUtil.getCustomAnnotations(scan))); } RegionCoprocessorEnvironment env = c.getEnvironment(); Configuration conf = env.getConfiguration(); int estDistVals = conf.getInt(GROUPBY_ESTIMATED_DISTINCT_VALUES_ATTRIB, DEFAULT_GROUPBY_ESTIMATED_DISTINCT_VALUES); byte[] estDistValsBytes = scan.getAttribute(BaseScannerRegionObserver.ESTIMATED_DISTINCT_VALUES); if (estDistValsBytes != null) { // Allocate 1.5x estimation estDistVals = Math.max(MIN_DISTINCT_VALUES, (int) (Bytes.toInt(estDistValsBytes) * 1.5f)); } Pair<Integer, Integer> minMaxQualifiers = EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan); boolean useQualifierAsIndex = EncodedColumnsUtil.useQualifierAsIndex(EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan)); final boolean spillableEnabled = conf.getBoolean(GROUPBY_SPILLABLE_ATTRIB, DEFAULT_GROUPBY_SPILLABLE); final PTable.QualifierEncodingScheme encodingScheme = EncodedColumnsUtil.getQualifierEncodingScheme(scan); GroupByCache groupByCache = GroupByCacheFactory.INSTANCE.newCache( env, ScanUtil.getTenantId(scan), ScanUtil.getCustomAnnotations(scan), aggregators, estDistVals); boolean success = false; try { boolean hasMore; Tuple result = useQualifierAsIndex ? new PositionBasedMultiKeyValueTuple() : new MultiKeyValueTuple(); if (LOGGER.isDebugEnabled()) { LOGGER.debug(LogUtil.addCustomAnnotations( "Spillable groupby enabled: " + spillableEnabled, ScanUtil.getCustomAnnotations(scan))); } Region region = c.getEnvironment().getRegion(); boolean acquiredLock = false; try { region.startRegionOperation(); acquiredLock = true; synchronized (scanner) { do { List<Cell> results = useQualifierAsIndex ? new EncodedColumnQualiferCellsList(minMaxQualifiers.getFirst(), minMaxQualifiers.getSecond(), encodingScheme) : new ArrayList<Cell>(); // Results are potentially returned even when the return // value of s.next is false // since this is an indication of whether or not there are // more values after the // ones returned hasMore = scanner.nextRaw(results); if (!results.isEmpty()) { result.setKeyValues(results); ImmutableBytesPtr key = TupleUtil.getConcatenatedValue(result, expressions); Aggregator[] rowAggregators = groupByCache.cache(key); // Aggregate values here aggregators.aggregate(rowAggregators, result); } } while (hasMore && groupByCache.size() < limit); } } finally { if (acquiredLock) region.closeRegionOperation(); } RegionScanner regionScanner = groupByCache.getScanner(scanner); // Do not sort here, but sort back on the client instead // The reason is that if the scan ever extends beyond a region // (which can happen if we're basing our parallelization split // points on old metadata), we'll get incorrect query results. success = true; return regionScanner; } finally { if (!success) { Closeables.closeQuietly(groupByCache); } } }
Example 20
Source File: BulkInputFormat.java From datawave with Apache License 2.0 | 2 votes |
/** * Determines whether a configuration uses local iterators. * * @param conf * the Hadoop configuration object * @return true if uses local iterators, false otherwise * @see #setLocalIterators(Configuration, boolean) */ protected static boolean usesLocalIterators(Configuration conf) { return conf.getBoolean(LOCAL_ITERATORS, false); }