Java Code Examples for org.apache.hadoop.conf.Configuration.setLong()

The following are Jave code examples for showing how to use setLong() of the org.apache.hadoop.conf.Configuration class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
+ Save this method
Example 1
Project: hadoop   File: TestDatanodeRestart.java   View Source Code Vote up 6 votes
@Test public void testFinalizedReplicas() throws Exception {
  // bring up a cluster of 3
  Configuration conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L);
  conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
  cluster.waitActive();
  FileSystem fs = cluster.getFileSystem();
  try {
    // test finalized replicas
    final String TopDir = "/test";
    DFSTestUtil util = new DFSTestUtil.Builder().
        setName("TestDatanodeRestart").setNumFiles(2).build();
    util.createFiles(fs, TopDir, (short)3);
    util.waitReplication(fs, TopDir, (short)3);
    util.checkFiles(fs, TopDir);
    cluster.restartDataNodes();
    cluster.waitActive();
    util.checkFiles(fs, TopDir);
  } finally {
    cluster.shutdown();
  }
}
 
Example 2
Project: hadoop   File: TestYarnClient.java   View Source Code Vote up 6 votes
private void testAsyncAPIPollTimeoutHelper(Long valueForTimeout,
    boolean expectedTimeoutEnforcement) {
  YarnClientImpl client = new YarnClientImpl();
  try {
    Configuration conf = new Configuration();
    if (valueForTimeout != null) {
      conf.setLong(
          YarnConfiguration.YARN_CLIENT_APPLICATION_CLIENT_PROTOCOL_POLL_TIMEOUT_MS,
          valueForTimeout);
    }

    client.init(conf);

    Assert.assertEquals(
        expectedTimeoutEnforcement, client.enforceAsyncAPITimeout());
  } finally {
    IOUtils.closeQuietly(client);
  }
}
 
Example 3
Project: hadoop   File: SleepJob.java   View Source Code Vote up 6 votes
public Job createJob(int numMapper, int numReducer, 
                     long mapSleepTime, int mapSleepCount, 
                     long reduceSleepTime, int reduceSleepCount) 
    throws IOException {
  Configuration conf = getConf();
  conf.setLong(MAP_SLEEP_TIME, mapSleepTime);
  conf.setLong(REDUCE_SLEEP_TIME, reduceSleepTime);
  conf.setInt(MAP_SLEEP_COUNT, mapSleepCount);
  conf.setInt(REDUCE_SLEEP_COUNT, reduceSleepCount);
  conf.setInt(MRJobConfig.NUM_MAPS, numMapper);
  Job job = Job.getInstance(conf, "sleep");
  job.setNumReduceTasks(numReducer);
  job.setJarByClass(SleepJob.class);
  job.setMapperClass(SleepMapper.class);
  job.setMapOutputKeyClass(IntWritable.class);
  job.setMapOutputValueClass(NullWritable.class);
  job.setReducerClass(SleepReducer.class);
  job.setOutputFormatClass(NullOutputFormat.class);
  job.setInputFormatClass(SleepInputFormat.class);
  job.setPartitionerClass(SleepJobPartitioner.class);
  job.setSpeculativeExecution(false);
  job.setJobName("Sleep job");
  FileInputFormat.addInputPath(job, new Path("ignored"));
  return job;
}
 
Example 4
Project: hadoop   File: TestFiHFlush.java   View Source Code Vote up 5 votes
/** Similar to {@link #hFlushFi01_b()} but writing happens
 * across block and checksum's boundaries
 */
@Test
public void hFlushFi01_c() throws Exception { 
  final String methodName = FiTestUtil.getMethodName();
  Configuration conf = new HdfsConfiguration();
  int customPerChecksumSize = 400;
  int customBlockSize = customPerChecksumSize * 3;
  conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);
  runDiskErrorTest(conf, methodName, 
      customBlockSize, new DerrAction(methodName, 0), 0, true);
}
 
Example 5
Project: ditb   File: TestRowProcessorEndpoint.java   View Source Code Vote up 5 votes
@BeforeClass
public static void setupBeforeClass() throws Exception {
  Configuration conf = util.getConfiguration();
  conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
      RowProcessorEndpoint.class.getName());
  conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2);
  conf.setLong("hbase.hregion.row.processor.timeout", 1000L);
  util.startMiniCluster();
}
 
Example 6
Project: hadoop   File: TestShortCircuitCache.java   View Source Code Vote up 5 votes
@Test(timeout=60000)
public void testPreReceiptVerificationDfsClientCanDoScr() throws Exception {
  BlockReaderTestUtil.enableShortCircuitShmTracing();
  TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
  Configuration conf = createShortCircuitConf(
      "testPreReceiptVerificationDfsClientCanDoScr", sockDir);
  conf.setLong(DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_EXPIRY_MS_KEY,
      1000000000L);
  MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  cluster.waitActive();
  DistributedFileSystem fs = cluster.getFileSystem();
  fs.getClient().getConf().brfFailureInjector =
      new TestPreReceiptVerificationFailureInjector();
  final Path TEST_PATH1 = new Path("/test_file1");
  DFSTestUtil.createFile(fs, TEST_PATH1, 4096, (short)1, 0xFADE2);
  final Path TEST_PATH2 = new Path("/test_file2");
  DFSTestUtil.createFile(fs, TEST_PATH2, 4096, (short)1, 0xFADE2);
  DFSTestUtil.readFileBuffer(fs, TEST_PATH1);
  DFSTestUtil.readFileBuffer(fs, TEST_PATH2);
  ShortCircuitRegistry registry =
      cluster.getDataNodes().get(0).getShortCircuitRegistry();
  registry.visit(new ShortCircuitRegistry.Visitor() {
    @Override
    public void accept(HashMap<ShmId, RegisteredShm> segments,
                       HashMultimap<ExtendedBlockId, Slot> slots) {
      Assert.assertEquals(1, segments.size());
      Assert.assertEquals(2, slots.size());
    }
  });
  cluster.shutdown();
  sockDir.close();
}
 
Example 7
Project: hadoop   File: TestRead.java   View Source Code Vote up 5 votes
@Test(timeout=60000)
public void testEOFWithRemoteBlockReader() throws Exception {
  final Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_CLIENT_CACHE_READAHEAD, BLOCK_SIZE);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
      .format(true).build();
  testEOF(cluster, 1);
  testEOF(cluster, 14);
  testEOF(cluster, 10000);   
  cluster.shutdown();
}
 
Example 8
Project: hadoop   File: TestEditLogAutoroll.java   View Source Code Vote up 5 votes
@Before
public void setUp() throws Exception {
  conf = new Configuration();
  // Stall the standby checkpointer in two ways
  conf.setLong(DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, Long.MAX_VALUE);
  conf.setLong(DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 20);
  // Make it autoroll after 10 edits
  conf.setFloat(DFS_NAMENODE_EDIT_LOG_AUTOROLL_MULTIPLIER_THRESHOLD, 0.5f);
  conf.setInt(DFS_NAMENODE_EDIT_LOG_AUTOROLL_CHECK_INTERVAL_MS, 100);

  int retryCount = 0;
  while (true) {
    try {
      int basePort = 10060 + random.nextInt(100) * 2;
      MiniDFSNNTopology topology = new MiniDFSNNTopology()
          .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
              .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(basePort))
              .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(basePort + 1)));

      cluster = new MiniDFSCluster.Builder(conf)
          .nnTopology(topology)
          .numDataNodes(0)
          .build();
      cluster.waitActive();

      nn0 = cluster.getNameNode(0);
      fs = HATestUtil.configureFailoverFs(cluster, conf);

      cluster.transitionToActive(0);

      fs = cluster.getFileSystem(0);
      editLog = nn0.getNamesystem().getEditLog();
      ++retryCount;
      break;
    } catch (BindException e) {
      LOG.info("Set up MiniDFSCluster failed due to port conflicts, retry "
          + retryCount + " times");
    }
  }
}
 
Example 9
Project: hadoop   File: TestProcessCorruptBlocks.java   View Source Code Vote up 5 votes
/**
 * The corrupt block has to be removed when the number of valid replicas
 * matches replication factor for the file. In this the above condition is
 * tested by reducing the replication factor 
 * The test strategy : 
 *   Bring up Cluster with 3 DataNodes
 *   Create a file of replication factor 3 
 *   Corrupt one replica of a block of the file 
 *   Verify that there are still 2 good replicas and 1 corrupt replica
 *    (corrupt replica should not be removed since number of good
 *     replicas (2) is less than replication factor (3))
 *   Set the replication factor to 2 
 *   Verify that the corrupt replica is removed. 
 *     (corrupt replica  should not be removed since number of good
 *      replicas (2) is equal to replication factor (2))
 */
@Test
public void testWhenDecreasingReplication() throws Exception {
  Configuration conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
  conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
  FileSystem fs = cluster.getFileSystem();
  final FSNamesystem namesystem = cluster.getNamesystem();

  try {
    final Path fileName = new Path("/foo1");
    DFSTestUtil.createFile(fs, fileName, 2, (short) 3, 0L);
    DFSTestUtil.waitReplication(fs, fileName, (short) 3);

    ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
    corruptBlock(cluster, fs, fileName, 0, block);

    DFSTestUtil.waitReplication(fs, fileName, (short) 2);

    assertEquals(2, countReplicas(namesystem, block).liveReplicas());
    assertEquals(1, countReplicas(namesystem, block).corruptReplicas());

    namesystem.setReplication(fileName.toString(), (short) 2);

    // wait for 3 seconds so that all block reports are processed.
    try {
      Thread.sleep(3000);
    } catch (InterruptedException ignored) {
    }

    assertEquals(2, countReplicas(namesystem, block).liveReplicas());
    assertEquals(0, countReplicas(namesystem, block).corruptReplicas());

  } finally {
    cluster.shutdown();
  }
}
 
Example 10
Project: hadoop   File: TestSnapshot.java   View Source Code Vote up 5 votes
@Before
public void setUp() throws Exception {
  conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
      .build();
  cluster.waitActive();

  fsn = cluster.getNamesystem();
  fsdir = fsn.getFSDirectory();
  hdfs = cluster.getFileSystem();
  dirTree = new TestDirectoryTree(DIRECTORY_TREE_LEVEL, hdfs);
}
 
Example 11
Project: ditb   File: TestHeapMemoryManager.java   View Source Code Vote up 5 votes
@Test
public void testPluggingInHeapMemoryTuner() throws Exception {
  BlockCacheStub blockCache = new BlockCacheStub((long) (maxHeapSize * 0.4));
  MemstoreFlusherStub memStoreFlusher = new MemstoreFlusherStub((long) (maxHeapSize * 0.4));
  Configuration conf = HBaseConfiguration.create();
  conf.setFloat(HeapMemoryManager.MEMSTORE_SIZE_MAX_RANGE_KEY, 0.78f);
  conf.setFloat(HeapMemoryManager.MEMSTORE_SIZE_MIN_RANGE_KEY, 0.05f);
  conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MAX_RANGE_KEY, 0.75f);
  conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MIN_RANGE_KEY, 0.02f);
  conf.setLong(HeapMemoryManager.HBASE_RS_HEAP_MEMORY_TUNER_PERIOD, 1000);
  conf.setInt(DefaultHeapMemoryTuner.NUM_PERIODS_TO_IGNORE, 0);
  conf.setClass(HeapMemoryManager.HBASE_RS_HEAP_MEMORY_TUNER_CLASS, CustomHeapMemoryTuner.class,
      HeapMemoryTuner.class);
  // Let the system start with default values for memstore heap and block cache size.
  HeapMemoryManager heapMemoryManager = new HeapMemoryManager(blockCache, memStoreFlusher,
      new RegionServerStub(conf), new RegionServerAccountingStub());
  final ChoreService choreService = new ChoreService("TEST_SERVER_NAME");
  heapMemoryManager.start(choreService);
  // Now we wants to be in write mode. Set bigger memstore size from CustomHeapMemoryTuner
  CustomHeapMemoryTuner.memstoreSize = 0.78f;
  CustomHeapMemoryTuner.blockCacheSize = 0.02f;
  // Allow the tuner to run once and do necessary memory up
  waitForTune(memStoreFlusher, memStoreFlusher.memstoreSize);
  assertHeapSpace(0.78f, memStoreFlusher.memstoreSize);// Memstore
  assertHeapSpace(0.02f, blockCache.maxSize);// BlockCache
  // Now we wants to be in read mode. Set bigger memstore size from CustomHeapMemoryTuner
  CustomHeapMemoryTuner.blockCacheSize = 0.75f;
  CustomHeapMemoryTuner.memstoreSize = 0.05f;
  // Allow the tuner to run once and do necessary memory up
  waitForTune(memStoreFlusher, memStoreFlusher.memstoreSize);
  assertHeapSpace(0.75f, blockCache.maxSize);// BlockCache
  assertHeapSpace(0.05f, memStoreFlusher.memstoreSize);// Memstore
}
 
Example 12
Project: hadoop   File: TestFsDatasetCacheRevocation.java   View Source Code Vote up 4 votes
/**
 * Test that when we have an uncache request, and the client refuses to release
 * the replica for a long time, we will un-mlock it.
 */
@Test(timeout=120000)
public void testRevocation() throws Exception {
  assumeTrue(NativeCodeLoader.isNativeCodeLoaded() && !Path.WINDOWS);
  BlockReaderTestUtil.enableHdfsCachingTracing();
  BlockReaderTestUtil.enableShortCircuitShmTracing();
  Configuration conf = getDefaultConf();
  // Set a really short revocation timeout.
  conf.setLong(DFSConfigKeys.DFS_DATANODE_CACHE_REVOCATION_TIMEOUT_MS, 250L);
  // Poll very often
  conf.setLong(DFSConfigKeys.DFS_DATANODE_CACHE_REVOCATION_POLLING_MS, 2L);
  MiniDFSCluster cluster = null;
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  cluster.waitActive();
  DistributedFileSystem dfs = cluster.getFileSystem();

  // Create and cache a file.
  final String TEST_FILE = "/test_file2";
  DFSTestUtil.createFile(dfs, new Path(TEST_FILE),
      BLOCK_SIZE, (short)1, 0xcafe);
  dfs.addCachePool(new CachePoolInfo("pool"));
  long cacheDirectiveId =
      dfs.addCacheDirective(new CacheDirectiveInfo.Builder().
          setPool("pool").setPath(new Path(TEST_FILE)).
          setReplication((short) 1).build());
  FsDatasetSpi<?> fsd = cluster.getDataNodes().get(0).getFSDataset();
  DFSTestUtil.verifyExpectedCacheUsage(BLOCK_SIZE, 1, fsd);

  // Mmap the file.
  FSDataInputStream in = dfs.open(new Path(TEST_FILE));
  ByteBuffer buf =
      in.read(null, BLOCK_SIZE, EnumSet.noneOf(ReadOption.class));

  // Attempt to uncache file.  The file should get uncached.
  LOG.info("removing cache directive {}", cacheDirectiveId);
  dfs.removeCacheDirective(cacheDirectiveId);
  LOG.info("finished removing cache directive {}", cacheDirectiveId);
  Thread.sleep(1000);
  DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd);

  // Cleanup
  in.releaseBuffer(buf);
  in.close();
  cluster.shutdown();
}
 
Example 13
Project: hadoop   File: TestShortCircuitCache.java   View Source Code Vote up 4 votes
@Test(timeout=60000)
public void testDataXceiverHandlesRequestShortCircuitShmFailure()
    throws Exception {
  BlockReaderTestUtil.enableShortCircuitShmTracing();
  TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
  Configuration conf = createShortCircuitConf(
      "testDataXceiverHandlesRequestShortCircuitShmFailure", sockDir);
  conf.setLong(DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_EXPIRY_MS_KEY,
      1000000000L);
  MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  cluster.waitActive();
  DistributedFileSystem fs = cluster.getFileSystem();
  final Path TEST_PATH1 = new Path("/test_file1");
  DFSTestUtil.createFile(fs, TEST_PATH1, 4096,
      (short)1, 0xFADE1);
  LOG.info("Setting failure injector and performing a read which " +
      "should fail...");
  DataNodeFaultInjector failureInjector = Mockito.mock(DataNodeFaultInjector.class);
  Mockito.doAnswer(new Answer<Void>() {
    @Override
    public Void answer(InvocationOnMock invocation) throws Throwable {
      throw new IOException("injected error into sendShmResponse");
    }
  }).when(failureInjector).sendShortCircuitShmResponse();
  DataNodeFaultInjector prevInjector = DataNodeFaultInjector.instance;
  DataNodeFaultInjector.instance = failureInjector;

  try {
    // The first read will try to allocate a shared memory segment and slot.
    // The shared memory segment allocation will fail because of the failure
    // injector.
    DFSTestUtil.readFileBuffer(fs, TEST_PATH1);
    Assert.fail("expected readFileBuffer to fail, but it succeeded.");
  } catch (Throwable t) {
    GenericTestUtils.assertExceptionContains("TCP reads were disabled for " +
        "testing, but we failed to do a non-TCP read.", t);
  }

  checkNumberOfSegmentsAndSlots(0, 0,
      cluster.getDataNodes().get(0).getShortCircuitRegistry());

  LOG.info("Clearing failure injector and performing another read...");
  DataNodeFaultInjector.instance = prevInjector;

  fs.getClient().getClientContext().getDomainSocketFactory().clearPathMap();

  // The second read should succeed.
  DFSTestUtil.readFileBuffer(fs, TEST_PATH1);

  // We should have added a new short-circuit shared memory segment and slot.
  checkNumberOfSegmentsAndSlots(1, 1,
      cluster.getDataNodes().get(0).getShortCircuitRegistry());

  cluster.shutdown();
  sockDir.close();
}
 
Example 14
Project: hadoop   File: TestCopyFiles.java   View Source Code Vote up 4 votes
public void testMapCount() throws Exception {
  String namenode = null;
  MiniDFSCluster dfs = null;
  MiniDFSCluster mr = null;
  try {
    Configuration conf = new Configuration();
    
    dfs= new MiniDFSCluster.Builder(conf).numDataNodes(3).format(true).build();
    
    FileSystem fs = dfs.getFileSystem();
    final FsShell shell = new FsShell(conf);
    namenode = fs.getUri().toString();
    MyFile[] files = createFiles(fs.getUri(), "/srcdat");
    long totsize = 0;
    for (MyFile f : files) {
      totsize += f.getSize();
    }
    
    Configuration job = new JobConf(conf);
    job.setLong("distcp.bytes.per.map", totsize / 3);
    ToolRunner.run(new DistCpV1(job),
        new String[] {"-m", "100",
                      "-log",
                      namenode+"/logs",
                      namenode+"/srcdat",
                      namenode+"/destdat"});
    assertTrue("Source and destination directories do not match.",
               checkFiles(fs, "/destdat", files));

    String logdir = namenode + "/logs";
    System.out.println(execCmd(shell, "-lsr", logdir));
    FileStatus[] logs = fs.listStatus(new Path(logdir));
    // rare case where splits are exact, logs.length can be 4
    assertTrue( logs.length == 2);

    deldir(fs, "/destdat");
    deldir(fs, "/logs");
    ToolRunner.run(new DistCpV1(job),
        new String[] {"-m", "1",
                      "-log",
                      namenode+"/logs",
                      namenode+"/srcdat",
                      namenode+"/destdat"});

    System.out.println(execCmd(shell, "-lsr", logdir));
    logs = fs.globStatus(new Path(namenode+"/logs/part*"));
    assertTrue("Unexpected map count, logs.length=" + logs.length,
        logs.length == 1);
  } finally {
    if (dfs != null) { dfs.shutdown(); }
    if (mr != null) { mr.shutdown(); }
  }
}
 
Example 15
Project: hadoop   File: TestBlockScanner.java   View Source Code Vote up 4 votes
private static void disableBlockScanner(Configuration conf) {
  conf.setLong(DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND, 0L);
}
 
Example 16
Project: ditb   File: TestCorruptedRegionStoreFile.java   View Source Code Vote up 4 votes
private static void setupConf(Configuration conf) {
  // Disable compaction so the store file count stays constant
  conf.setLong("hbase.hstore.compactionThreshold", NUM_FILES + 1);
  conf.setLong("hbase.hstore.blockingStoreFiles", NUM_FILES * 2);
}
 
Example 17
Project: hadoop   File: TestFsDatasetCacheRevocation.java   View Source Code Vote up 4 votes
/**
 * Test that when a client has a replica mmapped, we will not un-mlock that
 * replica for a reasonable amount of time, even if an uncache request
 * occurs.
 */
@Test(timeout=120000)
public void testPinning() throws Exception {
  assumeTrue(NativeCodeLoader.isNativeCodeLoaded() && !Path.WINDOWS);
  Configuration conf = getDefaultConf();
  // Set a really long revocation timeout, so that we won't reach it during
  // this test.
  conf.setLong(DFSConfigKeys.DFS_DATANODE_CACHE_REVOCATION_TIMEOUT_MS,
      1800000L);
  // Poll very often
  conf.setLong(DFSConfigKeys.DFS_DATANODE_CACHE_REVOCATION_POLLING_MS, 2L);
  MiniDFSCluster cluster = null;
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  cluster.waitActive();
  DistributedFileSystem dfs = cluster.getFileSystem();

  // Create and cache a file.
  final String TEST_FILE = "/test_file";
  DFSTestUtil.createFile(dfs, new Path(TEST_FILE),
      BLOCK_SIZE, (short)1, 0xcafe);
  dfs.addCachePool(new CachePoolInfo("pool"));
  long cacheDirectiveId =
    dfs.addCacheDirective(new CacheDirectiveInfo.Builder().
      setPool("pool").setPath(new Path(TEST_FILE)).
        setReplication((short) 1).build());
  FsDatasetSpi<?> fsd = cluster.getDataNodes().get(0).getFSDataset();
  DFSTestUtil.verifyExpectedCacheUsage(BLOCK_SIZE, 1, fsd);

  // Mmap the file.
  FSDataInputStream in = dfs.open(new Path(TEST_FILE));
  ByteBuffer buf =
      in.read(null, BLOCK_SIZE, EnumSet.noneOf(ReadOption.class));

  // Attempt to uncache file.  The file should still be cached.
  dfs.removeCacheDirective(cacheDirectiveId);
  Thread.sleep(500);
  DFSTestUtil.verifyExpectedCacheUsage(BLOCK_SIZE, 1, fsd);

  // Un-mmap the file.  The file should be uncached after this.
  in.releaseBuffer(buf);
  DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd);

  // Cleanup
  in.close();
  cluster.shutdown();
}
 
Example 18
Project: ditb   File: TestCompactionWithThroughputController.java   View Source Code Vote up 4 votes
/**
 * Test the tuning task of {@link PressureAwareCompactionThroughputController}
 */
@Test
public void testThroughputTuning() throws Exception {
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.set(StoreEngine.STORE_ENGINE_CLASS_KEY, DefaultStoreEngine.class.getName());
  conf.setLong(
    PressureAwareCompactionThroughputController.HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND,
    20L * 1024 * 1024);
  conf.setLong(
    PressureAwareCompactionThroughputController.HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND,
    10L * 1024 * 1024);
  conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, 4);
  conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 6);
  conf.set(CompactionThroughputControllerFactory.HBASE_THROUGHPUT_CONTROLLER_KEY,
    PressureAwareCompactionThroughputController.class.getName());
  conf.setInt(
    PressureAwareCompactionThroughputController.HBASE_HSTORE_COMPACTION_THROUGHPUT_TUNE_PERIOD,
    1000);
  TEST_UTIL.startMiniCluster(1);
  Connection conn = ConnectionFactory.createConnection(conf);
  try {
    HTableDescriptor htd = new HTableDescriptor(tableName);
    htd.addFamily(new HColumnDescriptor(family));
    htd.setCompactionEnabled(false);
    TEST_UTIL.getHBaseAdmin().createTable(htd);
    TEST_UTIL.waitTableAvailable(tableName);
    HRegionServer regionServer = TEST_UTIL.getRSForFirstRegionInTable(tableName);
    PressureAwareCompactionThroughputController throughputController =
        (PressureAwareCompactionThroughputController) regionServer.compactSplitThread
            .getCompactionThroughputController();
    assertEquals(10L * 1024 * 1024, throughputController.maxThroughput, EPSILON);
    Table table = conn.getTable(tableName);
    for (int i = 0; i < 5; i++) {
      table.put(new Put(Bytes.toBytes(i)).add(family, qualifier, new byte[0]));
      TEST_UTIL.flush(tableName);
    }
    Thread.sleep(2000);
    assertEquals(15L * 1024 * 1024, throughputController.maxThroughput, EPSILON);

    table.put(new Put(Bytes.toBytes(5)).add(family, qualifier, new byte[0]));
    TEST_UTIL.flush(tableName);
    Thread.sleep(2000);
    assertEquals(20L * 1024 * 1024, throughputController.maxThroughput, EPSILON);

    table.put(new Put(Bytes.toBytes(6)).add(family, qualifier, new byte[0]));
    TEST_UTIL.flush(tableName);
    Thread.sleep(2000);
    assertEquals(Double.MAX_VALUE, throughputController.maxThroughput, EPSILON);

    conf.set(CompactionThroughputControllerFactory.HBASE_THROUGHPUT_CONTROLLER_KEY,
      NoLimitCompactionThroughputController.class.getName());
    regionServer.compactSplitThread.onConfigurationChange(conf);
    assertTrue(throughputController.isStopped());
    assertTrue(regionServer.compactSplitThread.getCompactionThroughputController() instanceof NoLimitCompactionThroughputController);
  } finally {
    conn.close();
    TEST_UTIL.shutdownMiniCluster();
  }
}
 
Example 19
Project: hadoop   File: TestWhitelistBasedResolver.java   View Source Code Vote up 3 votes
/**
 * Add a bunch of subnets and IPSs to the whitelist
 * Check  for inclusion in whitelist with a null value
 */
public void testNullIPAddress() throws IOException {

  String[] fixedIps = {"10.119.103.112", "10.221.102.0/23"};

  TestFileBasedIPList.createFileWithEntries ("fixedwhitelist.txt", fixedIps);

  String[] variableIps = {"10.222.0.0/16", "10.113.221.221"};

  TestFileBasedIPList.createFileWithEntries ("variablewhitelist.txt", variableIps);

  Configuration conf = new Configuration();
  conf.set(WhitelistBasedResolver.HADOOP_SECURITY_SASL_FIXEDWHITELIST_FILE ,
      "fixedwhitelist.txt");

  conf.setBoolean(WhitelistBasedResolver.HADOOP_SECURITY_SASL_VARIABLEWHITELIST_ENABLE,
      true);

  conf.setLong(WhitelistBasedResolver.HADOOP_SECURITY_SASL_VARIABLEWHITELIST_CACHE_SECS,
      100);

  conf.set(WhitelistBasedResolver.HADOOP_SECURITY_SASL_VARIABLEWHITELIST_FILE ,
      "variablewhitelist.txt");

  WhitelistBasedResolver wqr = new WhitelistBasedResolver();
  wqr.setConf(conf);

  assertEquals (SASL_PRIVACY_PROPS, wqr.getServerProperties((InetAddress)null));
  assertEquals (SASL_PRIVACY_PROPS, wqr.getServerProperties((String)null));

  TestFileBasedIPList.removeFile("fixedwhitelist.txt");
  TestFileBasedIPList.removeFile("variablewhitelist.txt");
}
 
Example 20
Project: hadoop   File: SkipBadRecords.java   View Source Code Vote up 2 votes
/**
 * Set the number of acceptable skip groups surrounding the bad group PER 
 * bad group in reducer. The number includes the bad group as well.
 * To turn the feature of detection/skipping of bad groups off, set the 
 * value to 0.
 * The framework tries to narrow down the skipped range by retrying  
 * until this threshold is met OR all attempts get exhausted for this task. 
 * Set the value to Long.MAX_VALUE to indicate that framework need not try to 
 * narrow down. Whatever groups(depends on application) get skipped are 
 * acceptable.
 * Default value is 0.
 * 
 * @param conf the configuration
 * @param maxSkipGrps acceptable skip groups.
 */
public static void setReducerMaxSkipGroups(Configuration conf, 
    long maxSkipGrps) {
  conf.setLong(REDUCER_MAX_SKIP_GROUPS, maxSkipGrps);
}