Java Code Examples for org.apache.hadoop.conf.Configuration.setBoolean()

The following are Jave code examples for showing how to use setBoolean() of the org.apache.hadoop.conf.Configuration class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
+ Save this method
Example 1
Project: hadoop   File: TestSleepJob.java   View Source Code Vote up 6 votes
@Test  (timeout=600000)
public void testMapTasksOnlySleepJobs() throws Exception {
  Configuration configuration = GridmixTestUtils.mrvl.getConfig();

  DebugJobProducer jobProducer = new DebugJobProducer(5, configuration);
  configuration.setBoolean(SleepJob.SLEEPJOB_MAPTASK_ONLY, true);

  UserGroupInformation ugi = UserGroupInformation.getLoginUser();
  JobStory story;
  int seq = 1;
  while ((story = jobProducer.getNextJob()) != null) {
    GridmixJob gridmixJob = JobCreator.SLEEPJOB.createGridmixJob(configuration, 0,
            story, new Path("ignored"), ugi, seq++);
    gridmixJob.buildSplits(null);
    Job job = gridmixJob.call();
    assertEquals(0, job.getNumReduceTasks());
  }
  jobProducer.close();
  assertEquals(6, seq);
}
 
Example 2
Project: hadoop   File: TestDFSAdminWithHA.java   View Source Code Vote up 6 votes
private void setUpHaCluster(boolean security) throws Exception {
  conf = new Configuration();
  conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,
      security);
  cluster = new MiniQJMHACluster.Builder(conf).build();
  setHAConf(conf, cluster.getDfsCluster().getNameNode(0).getHostAndPort(),
      cluster.getDfsCluster().getNameNode(1).getHostAndPort());
  cluster.getDfsCluster().getNameNode(0).getHostAndPort();
  admin = new DFSAdmin();
  admin.setConf(conf);
  assertTrue(HAUtil.isHAEnabled(conf, "ns1"));

  originOut = System.out;
  originErr = System.err;
  System.setOut(new PrintStream(out));
  System.setErr(new PrintStream(err));
}
 
Example 3
Project: hadoop   File: TestFail.java   View Source Code Vote up 6 votes
@Test
//First attempt is failed and second attempt is passed
//The job succeeds.
public void testFailTask() throws Exception {
  MRApp app = new MockFirstFailingAttemptMRApp(1, 0);
  Configuration conf = new Configuration();
  // this test requires two task attempts, but uberization overrides max to 1
  conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
  Job job = app.submit(conf);
  app.waitForState(job, JobState.SUCCEEDED);
  Map<TaskId,Task> tasks = job.getTasks();
  Assert.assertEquals("Num tasks is not correct", 1, tasks.size());
  Task task = tasks.values().iterator().next();
  Assert.assertEquals("Task state not correct", TaskState.SUCCEEDED,
      task.getReport().getTaskState());
  Map<TaskAttemptId, TaskAttempt> attempts =
      tasks.values().iterator().next().getAttempts();
  Assert.assertEquals("Num attempts is not correct", 2, attempts.size());
  //one attempt must be failed 
  //and another must have succeeded
  Iterator<TaskAttempt> it = attempts.values().iterator();
  Assert.assertEquals("Attempt state not correct", TaskAttemptState.FAILED,
      it.next().getReport().getTaskAttemptState());
  Assert.assertEquals("Attempt state not correct", TaskAttemptState.SUCCEEDED,
      it.next().getReport().getTaskAttemptState());
}
 
Example 4
Project: ditb   File: TestGlobalEventLoopGroup.java   View Source Code Vote up 6 votes
@Test
public void test() {
  Configuration conf = HBaseConfiguration.create();
  conf.setBoolean(AsyncRpcClient.USE_GLOBAL_EVENT_LOOP_GROUP, true);
  AsyncRpcClient client = new AsyncRpcClient(conf);
  assertNotNull(AsyncRpcClient.GLOBAL_EVENT_LOOP_GROUP);
  AsyncRpcClient client1 = new AsyncRpcClient(conf);
  assertSame(client.bootstrap.group(), client1.bootstrap.group());
  client1.close();
  assertFalse(client.bootstrap.group().isShuttingDown());

  conf.setBoolean(AsyncRpcClient.USE_GLOBAL_EVENT_LOOP_GROUP, false);
  AsyncRpcClient client2 = new AsyncRpcClient(conf);
  assertNotSame(client.bootstrap.group(), client2.bootstrap.group());
  client2.close();

  client.close();
}
 
Example 5
Project: ditb   File: TestConstraints.java   View Source Code Vote up 6 votes
@Test
public void testConfigurationPreserved() throws Throwable {
  Configuration conf = new Configuration();
  conf.setBoolean("_ENABLED", false);
  conf.setLong("_PRIORITY", 10);
  HTableDescriptor desc = new HTableDescriptor(TableName.valueOf("table"));
  Constraints.add(desc, AlsoWorks.class, conf);
  Constraints.add(desc, WorksConstraint.class);
  assertFalse(Constraints.enabled(desc, AlsoWorks.class));
  List<? extends Constraint> constraints = Constraints.getConstraints(desc,
      this.getClass().getClassLoader());
  for (Constraint c : constraints) {
    Configuration storedConf = c.getConf();
    if (c instanceof AlsoWorks)
      assertEquals(10, storedConf.getLong("_PRIORITY", -1));
    // its just a worksconstraint
    else
      assertEquals(2, storedConf.getLong("_PRIORITY", -1));

  }

}
 
Example 6
Project: hadoop   File: TestFileSystem.java   View Source Code Vote up 5 votes
public void testFsShutdownHook() throws Exception {
  final Set<FileSystem> closed = Collections.synchronizedSet(new HashSet<FileSystem>());
  Configuration conf = new Configuration();
  Configuration confNoAuto = new Configuration();

  conf.setClass("fs.test.impl", TestShutdownFileSystem.class, FileSystem.class);
  confNoAuto.setClass("fs.test.impl", TestShutdownFileSystem.class, FileSystem.class);
  confNoAuto.setBoolean("fs.automatic.close", false);

  TestShutdownFileSystem fsWithAuto =
    (TestShutdownFileSystem)(new Path("test://a/").getFileSystem(conf));
  TestShutdownFileSystem fsWithoutAuto =
    (TestShutdownFileSystem)(new Path("test://b/").getFileSystem(confNoAuto));

  fsWithAuto.setClosedSet(closed);
  fsWithoutAuto.setClosedSet(closed);

  // Different URIs should result in different FS instances
  assertNotSame(fsWithAuto, fsWithoutAuto);

  FileSystem.CACHE.closeAll(true);
  assertEquals(1, closed.size());
  assertTrue(closed.contains(fsWithAuto));

  closed.clear();

  FileSystem.closeAll();
  assertEquals(1, closed.size());
  assertTrue(closed.contains(fsWithoutAuto));
}
 
Example 7
Project: hadoop   File: TestFSMainOperationsWebHdfs.java   View Source Code Vote up 5 votes
@BeforeClass
public static void setupCluster() {
  final Configuration conf = new Configuration();
  conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    cluster.waitActive();

    //change root permission to 777
    cluster.getFileSystem().setPermission(
        new Path("/"), new FsPermission((short)0777));

    final String uri = WebHdfsFileSystem.SCHEME  + "://"
        + conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);

    //get file system as a non-superuser
    final UserGroupInformation current = UserGroupInformation.getCurrentUser();
    final UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
        current.getShortUserName() + "x", new String[]{"user"});
    fileSystem = ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
      @Override
      public FileSystem run() throws Exception {
        return FileSystem.get(new URI(uri), conf);
      }
    });

    defaultWorkingDirectory = fileSystem.getWorkingDirectory();
  } catch (Exception e) {
    throw new RuntimeException(e);
  }
}
 
Example 8
Project: hadoop   File: TestS3AFastOutputStream.java   View Source Code Vote up 5 votes
@Before
public void setUp() throws Exception {
  Configuration conf = new Configuration();
  conf.setLong(Constants.MIN_MULTIPART_THRESHOLD, 5 * 1024 * 1024);
  conf.setInt(Constants.MULTIPART_SIZE, 5 * 1024 * 1024);
  conf.setBoolean(Constants.FAST_UPLOAD, true);
  fs = S3ATestUtils.createTestFileSystem(conf);
}
 
Example 9
Project: ditb   File: TestRegionPlacement2.java   View Source Code Vote up 5 votes
@BeforeClass
public static void setupBeforeClass() throws Exception {
  Configuration conf = TEST_UTIL.getConfiguration();
  // Enable the favored nodes based load balancer
  conf.setClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS,
      FavoredNodeLoadBalancer.class, LoadBalancer.class);
  conf.setBoolean("hbase.tests.use.shortcircuit.reads", false);
  TEST_UTIL.startMiniCluster(SLAVES);
}
 
Example 10
Project: hadoop   File: TestBlockStoragePolicy.java   View Source Code Vote up 5 votes
@Test (timeout=300000)
public void testConfigKeyEnabled() throws IOException {
  Configuration conf = new HdfsConfiguration();
  conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY, true);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(1).build();
  try {
    cluster.waitActive();
    cluster.getFileSystem().setStoragePolicy(new Path("/"),
        HdfsConstants.COLD_STORAGE_POLICY_NAME);
  } finally {
    cluster.shutdown();
  }
}
 
Example 11
Project: hadoop   File: TestShortCircuitLocalRead.java   View Source Code Vote up 5 votes
/**
 * Test that file data can be read by reading the block
 * through RemoteBlockReader
 * @throws IOException
*/
public void doTestShortCircuitReadWithRemoteBlockReader(boolean ignoreChecksum, int size, String shortCircuitUser,
                                                        int readOffset, boolean shortCircuitFails) throws IOException, InterruptedException {
  Configuration conf = new Configuration();
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER, true);
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);

  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
           .format(true).build();
  FileSystem fs = cluster.getFileSystem();
  // check that / exists
  Path path = new Path("/");
  URI uri = cluster.getURI();
  assertTrue("/ should be a directory", fs.getFileStatus(path)
              .isDirectory() == true);

  byte[] fileData = AppendTestUtil.randomBytes(seed, size);
  Path file1 = new Path("filelocal.dat");
  FSDataOutputStream stm = createFile(fs, file1, 1);

  stm.write(fileData);
  stm.close();
  try {
    checkFileContent(uri, file1, fileData, readOffset, shortCircuitUser, 
        conf, shortCircuitFails);
    //RemoteBlockReader have unsupported method read(ByteBuffer bf)
    assertTrue("RemoteBlockReader unsupported method read(ByteBuffer bf) error",
                  checkUnsupportedMethod(fs, file1, fileData, readOffset));
  } catch(IOException e) {
    throw new IOException("doTestShortCircuitReadWithRemoteBlockReader ex error ", e);
  } catch(InterruptedException inEx) {
    throw inEx;
  } finally {
    fs.close();
    cluster.shutdown();
  }
}
 
Example 12
Project: ditb   File: TestWALReplay.java   View Source Code Vote up 5 votes
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setBoolean("dfs.support.append", true);
  // The below config supported by 0.20-append and CDH3b2
  conf.setInt("dfs.client.block.recovery.retries", 2);
  TEST_UTIL.startMiniCluster(3);
  Path hbaseRootDir =
    TEST_UTIL.getDFSCluster().getFileSystem().makeQualified(new Path("/hbase"));
  LOG.info("hbase.rootdir=" + hbaseRootDir);
  FSUtils.setRootDir(conf, hbaseRootDir);
}
 
Example 13
Project: hadoop   File: TestCheckpoint.java   View Source Code Vote up 5 votes
/**
 * Regression test for HDFS-3835 - "Long-lived 2NN cannot perform a
 * checkpoint if security is enabled and the NN restarts without outstanding
 * delegation tokens"
 */
@Test
public void testSecondaryNameNodeWithDelegationTokens() throws IOException {
  MiniDFSCluster cluster = null;
  SecondaryNameNode secondary = null;
  
  Configuration conf = new HdfsConfiguration();
  conf.setBoolean(
      DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
        .format(true).build();
    
    assertNotNull(cluster.getNamesystem().getDelegationToken(new Text("atm")));

    secondary = startSecondaryNameNode(conf);

    // Checkpoint once, so the 2NN loads the DT into its in-memory sate.
    secondary.doCheckpoint();
    
    // Perform a saveNamespace, so that the NN has a new fsimage, and the 2NN
    // therefore needs to download a new fsimage the next time it performs a
    // checkpoint.
    cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
    cluster.getNameNodeRpc().saveNamespace();
    cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
    
    // Ensure that the 2NN can still perform a checkpoint.
    secondary.doCheckpoint();
  } finally {
    cleanup(secondary);
    secondary = null;
    cleanup(cluster);
    cluster = null;
  }
}
 
Example 14
Project: hadoop   File: TestAMRMClientOnRMRestart.java   View Source Code Vote up 5 votes
@BeforeClass
public static void setup() throws Exception {
  conf = new Configuration();
  conf.set(YarnConfiguration.RECOVERY_ENABLED, "true");
  conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
  conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
      YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
  conf.setBoolean(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED, true);
  conf.setLong(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_SCHEDULING_WAIT_MS, 0);
}
 
Example 15
Project: hadoop-oss   File: TestRPC.java   View Source Code Vote up 5 votes
/**
 * Switch off setting socketTimeout values on RPC sockets.
 * Verify that RPC calls still work ok.
 */
public void testNoPings() throws Exception {
  Configuration conf = new Configuration();

  conf.setBoolean("ipc.client.ping", false);
  new TestRPC().testCallsInternal(conf);

  conf.setInt(CommonConfigurationKeys.IPC_SERVER_RPC_READ_THREADS_KEY, 2);
  new TestRPC().testCallsInternal(conf);
}
 
Example 16
Project: hadoop   File: TestFairSchedulerPreemption.java   View Source Code Vote up 5 votes
public Configuration createConfiguration() {
  Configuration conf = super.createConfiguration();
  conf.setClass(YarnConfiguration.RM_SCHEDULER, StubbedFairScheduler.class,
      ResourceScheduler.class);
  conf.setBoolean(FairSchedulerConfiguration.PREEMPTION, true);
  conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE);
  return conf;
}
 
Example 17
Project: hadoop   File: BaileyBorweinPlouffe.java   View Source Code Vote up 5 votes
/** Create and setup a job */
private static Job createJob(String name, Configuration conf
    ) throws IOException {
  final Job job = Job.getInstance(conf, NAME + "_" + name);
  final Configuration jobconf = job.getConfiguration();
  job.setJarByClass(BaileyBorweinPlouffe.class);

  // setup mapper
  job.setMapperClass(BbpMapper.class);
  job.setMapOutputKeyClass(LongWritable.class);
  job.setMapOutputValueClass(BytesWritable.class);

  // setup reducer
  job.setReducerClass(BbpReducer.class);
  job.setOutputKeyClass(LongWritable.class);
  job.setOutputValueClass(BytesWritable.class);
  job.setNumReduceTasks(1);

  // setup input
  job.setInputFormatClass(BbpInputFormat.class);

  // disable task timeout
  jobconf.setLong(MRJobConfig.TASK_TIMEOUT, 0);

  // do not use speculative execution
  jobconf.setBoolean(MRJobConfig.MAP_SPECULATIVE, false);
  jobconf.setBoolean(MRJobConfig.REDUCE_SPECULATIVE, false);
  return job;
}
 
Example 18
Project: hadoop   File: TestUnbuffer.java   View Source Code Vote up 4 votes
/**
 * Test that calling Unbuffer closes sockets.
 */
@Test
public void testUnbufferClosesSockets() throws Exception {
  Configuration conf = new Configuration();
  // Set a new ClientContext.  This way, we will have our own PeerCache,
  // rather than sharing one with other unit tests.
  conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT,
      "testUnbufferClosesSocketsContext");

  // Disable short-circuit reads.  With short-circuit, we wouldn't hold open a
  // TCP socket.
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, false);

  // Set a really long socket timeout to avoid test timing issues.
  conf.setLong(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY,
      100000000L);
  conf.setLong(DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY,
      100000000L);

  MiniDFSCluster cluster = null;
  FSDataInputStream stream = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).build();
    DistributedFileSystem dfs = (DistributedFileSystem)
        FileSystem.newInstance(conf);
    final Path TEST_PATH = new Path("/test1");
    DFSTestUtil.createFile(dfs, TEST_PATH, 128, (short)1, 1);
    stream = dfs.open(TEST_PATH);
    // Read a byte.  This will trigger the creation of a block reader.
    stream.seek(2);
    int b = stream.read();
    Assert.assertTrue(-1 != b);

    // The Peer cache should start off empty.
    PeerCache cache = dfs.getClient().getClientContext().getPeerCache();
    Assert.assertEquals(0, cache.size());

    // Unbuffer should clear the block reader and return the socket to the
    // cache.
    stream.unbuffer();
    stream.seek(2);
    Assert.assertEquals(1, cache.size());
    int b2 = stream.read();
    Assert.assertEquals(b, b2);
  } finally {
    if (stream != null) {
      IOUtils.cleanup(null, stream);
    }
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 19
Project: hadoop   File: AzureBlobStorageTestAccount.java   View Source Code Vote up 4 votes
public static AzureBlobStorageTestAccount createOutOfBandStore(
    int uploadBlockSize, int downloadBlockSize) throws Exception {

  saveMetricsConfigFile();

  CloudBlobContainer container = null;
  Configuration conf = createTestConfiguration();
  CloudStorageAccount account = createTestAccount(conf);
  if (null == account) {
    return null;
  }

  String containerName = String.format("wasbtests-%s-%tQ",
      System.getProperty("user.name"), new Date());

  // Create the container.
  container = account.createCloudBlobClient().getContainerReference(
      containerName);
  container.create();

  String accountName = conf.get(TEST_ACCOUNT_NAME_PROPERTY_NAME);

  // Ensure that custom throttling is disabled and tolerate concurrent
  // out-of-band appends.
  conf.setBoolean(KEY_DISABLE_THROTTLING, true);
  conf.setBoolean(KEY_READ_TOLERATE_CONCURRENT_APPEND, true);

  // Set account URI and initialize Azure file system.
  URI accountUri = createAccountUri(accountName, containerName);

  // Set up instrumentation.
  //
  AzureFileSystemMetricsSystem.fileSystemStarted();
  String sourceName = NativeAzureFileSystem.newMetricsSourceName();
  String sourceDesc = "Azure Storage Volume File System metrics";

  AzureFileSystemInstrumentation instrumentation = new AzureFileSystemInstrumentation(conf);

  AzureFileSystemMetricsSystem.registerSource(
      sourceName, sourceDesc, instrumentation);
  
  
  // Create a new AzureNativeFileSystemStore object.
  AzureNativeFileSystemStore testStorage = new AzureNativeFileSystemStore();

  // Initialize the store with the throttling feedback interfaces.
  testStorage.initialize(accountUri, conf, instrumentation);

  // Create test account initializing the appropriate member variables.
  //
  AzureBlobStorageTestAccount testAcct =
      new AzureBlobStorageTestAccount(testStorage, account, container);

  return testAcct;
}
 
Example 20
Project: hadoop   File: NullRMNodeLabelsManager.java   View Source Code Vote up 4 votes
@Override
protected void serviceInit(Configuration conf) throws Exception {
  // always enable node labels while using MemoryRMNodeLabelsManager
  conf.setBoolean(YarnConfiguration.NODE_LABELS_ENABLED, true);
  super.serviceInit(conf);
}