Java Code Examples for org.apache.hadoop.conf.Configuration.getBoolean()

The following are Jave code examples for showing how to use getBoolean() of the org.apache.hadoop.conf.Configuration class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
+ Save this method
Example 1
Project: hadoop   File: FSImageCompression.java   View Source Code Vote up 6 votes
/**
 * Create a compression instance based on the user's configuration in the given
 * Configuration object.
 * @throws IOException if the specified codec is not available.
 */
static FSImageCompression createCompression(Configuration conf)
  throws IOException {
  boolean compressImage = conf.getBoolean(
    DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY,
    DFSConfigKeys.DFS_IMAGE_COMPRESS_DEFAULT);

  if (!compressImage) {
    return createNoopCompression();
  }

  String codecClassName = conf.get(
    DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY,
    DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_DEFAULT);
  return createCompression(conf, codecClassName);
}
 
Example 2
Project: ditb   File: DefaultMemStore.java   View Source Code Vote up 6 votes
/**
 * Constructor.
 * @param c Comparator
 */
public DefaultMemStore(final Configuration conf,
                final KeyValue.KVComparator c) {
  this.conf = conf;
  this.comparator = c;
  this.cellSet = new CellSkipListSet(c);
  this.snapshot = new CellSkipListSet(c);
  timeRangeTracker = new TimeRangeTracker();
  snapshotTimeRangeTracker = new TimeRangeTracker();
  this.size = new AtomicLong(DEEP_OVERHEAD);
  this.snapshotSize = 0;
  if (conf.getBoolean(USEMSLAB_KEY, USEMSLAB_DEFAULT)) {
    String className = conf.get(MSLAB_CLASS_NAME, HeapMemStoreLAB.class.getName());
    this.allocator = ReflectionUtils.instantiateWithCustomCtor(className,
        new Class[] { Configuration.class }, new Object[] { conf });
  } else {
    this.allocator = null;
  }
}
 
Example 3
Project: ditb   File: CacheConfig.java   View Source Code Vote up 6 votes
/**
 * Create a cache configuration using the specified configuration object and
 * family descriptor.
 * @param conf hbase configuration
 * @param family column family configuration
 */
public CacheConfig(Configuration conf, HColumnDescriptor family) {
  this(CacheConfig.instantiateBlockCache(conf),
      family.isBlockCacheEnabled(),
      family.isInMemory(),
      // For the following flags we enable them regardless of per-schema settings
      // if they are enabled in the global configuration.
      conf.getBoolean(CACHE_BLOCKS_ON_WRITE_KEY,
          DEFAULT_CACHE_DATA_ON_WRITE) || family.isCacheDataOnWrite(),
      conf.getBoolean(CACHE_INDEX_BLOCKS_ON_WRITE_KEY,
          DEFAULT_CACHE_INDEXES_ON_WRITE) || family.isCacheIndexesOnWrite(),
      conf.getBoolean(CACHE_BLOOM_BLOCKS_ON_WRITE_KEY,
          DEFAULT_CACHE_BLOOMS_ON_WRITE) || family.isCacheBloomsOnWrite(),
      conf.getBoolean(EVICT_BLOCKS_ON_CLOSE_KEY,
          DEFAULT_EVICT_ON_CLOSE) || family.isEvictBlocksOnClose(),
      conf.getBoolean(CACHE_DATA_BLOCKS_COMPRESSED_KEY, DEFAULT_CACHE_DATA_COMPRESSED),
      conf.getBoolean(PREFETCH_BLOCKS_ON_OPEN_KEY,
          DEFAULT_PREFETCH_ON_OPEN) || family.isPrefetchBlocksOnOpen(),
      conf.getBoolean(HColumnDescriptor.CACHE_DATA_IN_L1,
          HColumnDescriptor.DEFAULT_CACHE_DATA_IN_L1) || family.isCacheDataInL1(),
      conf.getBoolean(DROP_BEHIND_CACHE_COMPACTION_KEY,DROP_BEHIND_CACHE_COMPACTION_DEFAULT)
   );
}
 
Example 4
Project: angel   File: WorkerJVM.java   View Source Code Vote up 5 votes
private static String generateDefaultJVMParameters(Configuration conf, ApplicationId appid,
    WorkerAttemptId workerAttemptId) {
  int workerMemSizeInMB =
      conf.getInt(AngelConf.ANGEL_WORKER_MEMORY_GB,
          AngelConf.DEFAULT_ANGEL_WORKER_MEMORY_GB) * 1024;

  if(workerMemSizeInMB < 2048) {
    workerMemSizeInMB = 2048;
  }

  boolean isUseDirect = conf.getBoolean(AngelConf.ANGEL_NETTY_MATRIXTRANSFER_CLIENT_USEDIRECTBUFFER,
    AngelConf.DEFAULT_ANGEL_NETTY_MATRIXTRANSFER_CLIENT_USEDIRECTBUFFER);
  int maxUse = workerMemSizeInMB - 512;
  int directRegionSize = 0;
  if(isUseDirect) {
    directRegionSize = (int) (maxUse * 0.3);
  } else {
    directRegionSize = (int) (maxUse * 0.2);
  }
  int heapMax = maxUse - directRegionSize;
  int youngRegionSize = (int) (heapMax * 0.4);
  int survivorRatio = 4;

  String ret =
      new StringBuilder().append(" -Xmx").append(heapMax).append("M").append(" -Xmn")
          .append(youngRegionSize).append("M").append(" -XX:MaxDirectMemorySize=")
          .append(directRegionSize).append("M").append(" -XX:SurvivorRatio=").append(survivorRatio)
          .append(" -XX:PermSize=100M -XX:MaxPermSize=200M").append(" -XX:+AggressiveOpts")
          .append(" -XX:+UseLargePages").append(" -XX:+UseConcMarkSweepGC")
          .append(" -XX:CMSInitiatingOccupancyFraction=70")
          .append(" -XX:+UseCMSInitiatingOccupancyOnly").append(" -XX:+CMSScavengeBeforeRemark")
          .append(" -XX:+UseCMSCompactAtFullCollection").append(" -verbose:gc")
          .append(" -XX:+PrintGCDateStamps").append(" -XX:+PrintGCDetails")
          .append(" -XX:+PrintCommandLineFlags").append(" -XX:+PrintTenuringDistribution")
          .append(" -XX:+PrintAdaptiveSizePolicy").append(" -Xloggc:/tmp/").append("angelgc-")
          .append(appid).append("-").append(workerAttemptId).append(".log").toString();

  return ret;
}
 
Example 5
Project: hadoop   File: Bzip2Factory.java   View Source Code Vote up 5 votes
/**
 * Check if native-bzip2 code is loaded & initialized correctly and 
 * can be loaded for this job.
 * 
 * @param conf configuration
 * @return <code>true</code> if native-bzip2 is loaded & initialized 
 *         and can be loaded for this job, else <code>false</code>
 */
public static boolean isNativeBzip2Loaded(Configuration conf) {
  String libname = conf.get("io.compression.codec.bzip2.library", 
                            "system-native");
  if (!bzip2LibraryName.equals(libname)) {
    nativeBzip2Loaded = false;
    bzip2LibraryName = libname;
    if (libname.equals("java-builtin")) {
      LOG.info("Using pure-Java version of bzip2 library");
    } else if (conf.getBoolean(
              CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, 
              CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_DEFAULT) &&
        NativeCodeLoader.isNativeCodeLoaded()) {
      try {
        // Initialize the native library.
        Bzip2Compressor.initSymbols(libname);
        Bzip2Decompressor.initSymbols(libname);
        nativeBzip2Loaded = true;
        LOG.info("Successfully loaded & initialized native-bzip2 library " +
                 libname);
      } catch (Throwable t) {
        LOG.warn("Failed to load/initialize native-bzip2 library " + 
                 libname + ", will use pure-Java version");
      }
    }
  }
  return nativeBzip2Loaded;
}
 
Example 6
Project: hadoop   File: FileSystem.java   View Source Code Vote up 5 votes
/** Called after a new FileSystem instance is constructed.
 * @param name a uri whose authority section names the host, port, etc.
 *   for this FileSystem
 * @param conf the configuration
 */
public void initialize(URI name, Configuration conf) throws IOException {
  statistics = getStatistics(name.getScheme(), getClass());    
  resolveSymlinks = conf.getBoolean(
      CommonConfigurationKeys.FS_CLIENT_RESOLVE_REMOTE_SYMLINKS_KEY,
      CommonConfigurationKeys.FS_CLIENT_RESOLVE_REMOTE_SYMLINKS_DEFAULT);
}
 
Example 7
Project: ditb   File: RegionStateStore.java   View Source Code Vote up 5 votes
RegionStateStore(final Server server) {
  Configuration conf = server.getConfiguration();
  // No need to persist if using ZK but not migrating
  noPersistence = ConfigUtil.useZKForAssignment(conf)
    && !conf.getBoolean("hbase.assignment.usezk.migrating", false);
  this.server = server;
  initialized = false;
}
 
Example 8
Project: hadoop   File: NameNode.java   View Source Code Vote up 5 votes
public static void checkAllowFormat(Configuration conf) throws IOException {
  if (!conf.getBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY, 
      DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_DEFAULT)) {
    throw new IOException("The option " + DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY
              + " is set to false for this filesystem, so it "
              + "cannot be formatted. You will need to set "
              + DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY +" parameter "
              + "to true in order to format this filesystem");
  }
}
 
Example 9
Project: ditb   File: RpcRetryingCallerFactory.java   View Source Code Vote up 5 votes
public RpcRetryingCallerFactory(Configuration conf, RetryingCallerInterceptor interceptor) {
  this.conf = conf;
  pause = conf.getLong(HConstants.HBASE_CLIENT_PAUSE,
      HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
  retries = conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
      HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
  startLogErrorsCnt = conf.getInt(AsyncProcess.START_LOG_ERRORS_AFTER_COUNT_KEY,
      AsyncProcess.DEFAULT_START_LOG_ERRORS_AFTER_COUNT);
  this.interceptor = interceptor;
  enableBackPressure = conf.getBoolean(HConstants.ENABLE_CLIENT_BACKPRESSURE,
      HConstants.DEFAULT_ENABLE_CLIENT_BACKPRESSURE);
}
 
Example 10
Project: ditb   File: HttpServer.java   View Source Code Vote up 5 votes
/**
 * Add default apps.
 * @param appDir The application directory
 * @throws IOException
 */
protected void addDefaultApps(ContextHandlerCollection parent,
    final String appDir, Configuration conf) throws IOException {
  // set up the context for "/logs/" if "hadoop.log.dir" property is defined.
  String logDir = this.logDir;
  if (logDir == null) {
      logDir = System.getProperty("hadoop.log.dir");
  }
  if (logDir != null) {
    Context logContext = new Context(parent, "/logs");
    logContext.setResourceBase(logDir);
    logContext.addServlet(AdminAuthorizedServlet.class, "/*");
    if (conf.getBoolean(
        ServerConfigurationKeys.HBASE_JETTY_LOGS_SERVE_ALIASES,
        ServerConfigurationKeys.DEFAULT_HBASE_JETTY_LOGS_SERVE_ALIASES)) {
      @SuppressWarnings("unchecked")
      Map<String, String> params = logContext.getInitParams();
      params.put(
          "org.mortbay.jetty.servlet.Default.aliases", "true");
    }
    logContext.setDisplayName("logs");
    setContextAttributes(logContext, conf);
    addNoCacheFilter(webAppContext);
    defaultContexts.put(logContext, true);
  }
  // set up the context for "/static/*"
  Context staticContext = new Context(parent, "/static");
  staticContext.setResourceBase(appDir + "/static");
  staticContext.addServlet(DefaultServlet.class, "/*");
  staticContext.setDisplayName("static");
  setContextAttributes(staticContext, conf);
  defaultContexts.put(staticContext, true);
}
 
Example 11
Project: hadoop   File: QueueManager.java   View Source Code Vote up 5 votes
/***
 * Dumps the configuration of hierarchy of queues with 
 * the xml file path given. It is to be used directly ONLY FOR TESTING.
 * @param out the writer object to which dump is written to.
 * @param configFile the filename of xml file
 * @throws IOException
 */
static void dumpConfiguration(Writer out, String configFile,
    Configuration conf) throws IOException {
  if (conf != null && conf.get(DeprecatedQueueConfigurationParser.
      MAPRED_QUEUE_NAMES_KEY) != null) {
    return;
  }
  
  JsonFactory dumpFactory = new JsonFactory();
  JsonGenerator dumpGenerator = dumpFactory.createJsonGenerator(out);
  QueueConfigurationParser parser;
  boolean aclsEnabled = false;
  if (conf != null) {
    aclsEnabled = conf.getBoolean(MRConfig.MR_ACLS_ENABLED, false);
  }
  if (configFile != null && !"".equals(configFile)) {
    parser = new QueueConfigurationParser(configFile, aclsEnabled);
  }
  else {
    parser = getQueueConfigurationParser(null, false, aclsEnabled);
  }
  dumpGenerator.writeStartObject();
  dumpGenerator.writeFieldName("queues");
  dumpGenerator.writeStartArray();
  dumpConfiguration(dumpGenerator,parser.getRoot().getChildren());
  dumpGenerator.writeEndArray();
  dumpGenerator.writeEndObject();
  dumpGenerator.flush();
}
 
Example 12
Project: ditb   File: StripeStoreConfig.java   View Source Code Vote up 5 votes
public StripeStoreConfig(Configuration config, StoreConfigInformation sci) {
  this.level0CompactMinFiles = config.getInt(MIN_FILES_L0_KEY, 4);
  this.flushIntoL0 = config.getBoolean(FLUSH_TO_L0_KEY, false);
  int minMinFiles = flushIntoL0 ? 3 : 4; // make sure not to compact tiny files too often.
  int minFiles = config.getInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, -1);
  this.stripeCompactMinFiles = config.getInt(MIN_FILES_KEY, Math.max(minMinFiles, minFiles));
  this.stripeCompactMaxFiles = config.getInt(MAX_FILES_KEY,
      config.getInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, 10));
  this.maxRegionSplitImbalance = getFloat(config, MAX_REGION_SPLIT_IMBALANCE_KEY, 1.5f, true);

  float splitPartCount = getFloat(config, SPLIT_PARTS_KEY, 2f, true);
  if (Math.abs(splitPartCount - 1.0) < EPSILON) {
    LOG.error("Split part count cannot be 1 (" + splitPartCount + "), using the default");
    splitPartCount = 2f;
  }
  this.splitPartCount = splitPartCount;
  // Arbitrary default split size - 4 times the size of one L0 compaction.
  // If we flush into L0 there's no split compaction, but for default value it is ok.
  double flushSize = sci.getMemstoreFlushSize();
  if (flushSize == 0) {
    flushSize = 128 * 1024 * 1024;
  }
  long defaultSplitSize = (long)(flushSize * getLevel0MinFiles() * 4 * splitPartCount);
  this.sizeToSplitAt = config.getLong(SIZE_TO_SPLIT_KEY, defaultSplitSize);
  int initialCount = config.getInt(INITIAL_STRIPE_COUNT_KEY, 1);
  if (initialCount == 0) {
    LOG.error("Initial stripe count is 0, using the default");
    initialCount = 1;
  }
  this.initialCount = initialCount;
  this.splitPartSize = (long)(this.sizeToSplitAt / this.splitPartCount);
}
 
Example 13
Project: ditb   File: RpcServer.java   View Source Code Vote up 5 votes
private void initReconfigurable(Configuration confToLoad) {
  this.allowFallbackToSimpleAuth = confToLoad.getBoolean(FALLBACK_TO_INSECURE_CLIENT_AUTH, false);
  if (isSecurityEnabled && allowFallbackToSimpleAuth) {
    LOG.warn("********* WARNING! *********");
    LOG.warn("This server is configured to allow connections from INSECURE clients");
    LOG.warn("(" + FALLBACK_TO_INSECURE_CLIENT_AUTH + " = true).");
    LOG.warn("While this option is enabled, client identities cannot be secured, and user");
    LOG.warn("impersonation is possible!");
    LOG.warn("For secure operation, please disable SIMPLE authentication as soon as possible,");
    LOG.warn("by setting " + FALLBACK_TO_INSECURE_CLIENT_AUTH + " = false in hbase-site.xml");
    LOG.warn("****************************");
  }
}
 
Example 14
Project: hadoop   File: ContainerManagerImpl.java   View Source Code Vote up 5 votes
protected LogHandler createLogHandler(Configuration conf, Context context,
    DeletionService deletionService) {
  if (conf.getBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED,
      YarnConfiguration.DEFAULT_LOG_AGGREGATION_ENABLED)) {
    return new LogAggregationService(this.dispatcher, context,
        deletionService, dirsHandler);
  } else {
    return new NonAggregatingLogHandler(this.dispatcher, deletionService,
                                        dirsHandler,
                                        context.getNMStateStore());
  }
}
 
Example 15
Project: hadoop   File: WebHdfsFileSystem.java   View Source Code Vote up 4 votes
/** Is WebHDFS enabled in conf? */
public static boolean isEnabled(final Configuration conf, final Log log) {
  final boolean b = conf.getBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY,
      DFSConfigKeys.DFS_WEBHDFS_ENABLED_DEFAULT);
  return b;
}
 
Example 16
Project: ditb   File: QuotaUtil.java   View Source Code Vote up 4 votes
/** Returns true if the support for quota is enabled */
public static boolean isQuotaEnabled(final Configuration conf) {
  return conf.getBoolean(QUOTA_CONF_KEY, QUOTA_ENABLED_DEFAULT);
}
 
Example 17
Project: ditb   File: ChaosMonkeyRunner.java   View Source Code Vote up 4 votes
private boolean isDistributedCluster(Configuration conf) {
  return conf.getBoolean(HConstants.CLUSTER_DISTRIBUTED, false);
}
 
Example 18
Project: hadoop   File: Client.java   View Source Code Vote up 3 votes
/**
 * The time after which a RPC will timeout.
 * If ping is not enabled (via ipc.client.ping), then the timeout value is the 
 * same as the pingInterval.
 * If ping is enabled, then there is no timeout value.
 * 
 * @param conf Configuration
 * @return the timeout period in milliseconds. -1 if no timeout value is set
 */
final public static int getTimeout(Configuration conf) {
  if (!conf.getBoolean(CommonConfigurationKeys.IPC_CLIENT_PING_KEY,
      CommonConfigurationKeys.IPC_CLIENT_PING_DEFAULT)) {
    return getPingInterval(conf);
  }
  return -1;
}
 
Example 19
Project: ditb   File: BloomFilterFactory.java   View Source Code Vote up 2 votes
/**
 * @return true if general Bloom (Row or RowCol) filters are enabled in the
 * given configuration
 */
public static boolean isGeneralBloomEnabled(Configuration conf) {
  return conf.getBoolean(IO_STOREFILE_BLOOM_ENABLED, true);
}
 
Example 20
Project: hadoop   File: ZlibFactory.java   View Source Code Vote up 2 votes
/**
 * Check if native-zlib code is loaded & initialized correctly and 
 * can be loaded for this job.
 * 
 * @param conf configuration
 * @return <code>true</code> if native-zlib is loaded & initialized 
 *         and can be loaded for this job, else <code>false</code>
 */
public static boolean isNativeZlibLoaded(Configuration conf) {
  return nativeZlibLoaded && conf.getBoolean(
                        CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, 
                        CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_DEFAULT);
}