Java Code Examples for org.apache.hadoop.conf.Configuration#setClass()

The following examples show how to use org.apache.hadoop.conf.Configuration#setClass() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestRackResolver.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test
public void testCaching() {
  Configuration conf = new Configuration();
  conf.setClass(
    CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
    MyResolver.class, DNSToSwitchMapping.class);
  RackResolver.init(conf);
  try {
    InetAddress iaddr = InetAddress.getByName("host1");
    MyResolver.resolvedHost1 = iaddr.getHostAddress();
  } catch (UnknownHostException e) {
    // Ignore if not found
  }
  Node node = RackResolver.resolve("host1");
  Assert.assertEquals("/rack1", node.getNetworkLocation());
  node = RackResolver.resolve("host1");
  Assert.assertEquals("/rack1", node.getNetworkLocation());
  node = RackResolver.resolve(invalidHost);
  Assert.assertEquals(NetworkTopology.DEFAULT_RACK, node.getNetworkLocation());
}
 
Example 2
Source File: ValuesTest.java    From marklogic-contentpump with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws Exception {
    Configuration conf = new Configuration();
    String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
    if (otherArgs.length < 2) {
        System.err.println("Usage: ValuesTest configFile outputDir");
        System.exit(2);
    }

    Job job = Job.getInstance(conf);
    job.setJarByClass(ValuesTest.class);
    job.setInputFormatClass(ValueInputFormat.class);
    job.setMapperClass(ValueMapper.class);
    job.setMapOutputKeyClass(LongWritable.class);
    job.setMapOutputValueClass(Text.class);
    job.setOutputFormatClass(TextOutputFormat.class);
    FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));

    conf = job.getConfiguration();
    conf.addResource(otherArgs[0]);
    conf.setClass(MarkLogicConstants.INPUT_VALUE_CLASS, Text.class, 
            Writable.class);
    conf.setClass(MarkLogicConstants.INPUT_LEXICON_FUNCTION_CLASS, 
        ValuesFunction.class, Values.class);

    System.exit(job.waitForCompletion(true) ? 0 : 1);
}
 
Example 3
Source File: DataTransferSaslUtil.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Creates a SaslPropertiesResolver from the given configuration.  This method
 * works by cloning the configuration, translating configuration properties
 * specific to DataTransferProtocol to what SaslPropertiesResolver expects,
 * and then delegating to SaslPropertiesResolver for initialization.  This
 * method returns null if SASL protection has not been configured for
 * DataTransferProtocol.
 *
 * @param conf configuration to read
 * @return SaslPropertiesResolver for DataTransferProtocol, or null if not
 *   configured
 */
public static SaslPropertiesResolver getSaslPropertiesResolver(
    Configuration conf) {
  String qops = conf.get(DFS_DATA_TRANSFER_PROTECTION_KEY);
  if (qops == null || qops.isEmpty()) {
    LOG.debug("DataTransferProtocol not using SaslPropertiesResolver, no " +
      "QOP found in configuration for {}", DFS_DATA_TRANSFER_PROTECTION_KEY);
    return null;
  }
  Configuration saslPropsResolverConf = new Configuration(conf);
  saslPropsResolverConf.set(HADOOP_RPC_PROTECTION, qops);
  Class<? extends SaslPropertiesResolver> resolverClass = conf.getClass(
    HADOOP_SECURITY_SASL_PROPS_RESOLVER_CLASS,
    SaslPropertiesResolver.class, SaslPropertiesResolver.class);
  resolverClass = conf.getClass(DFS_DATA_TRANSFER_SASL_PROPS_RESOLVER_CLASS_KEY,
    resolverClass, SaslPropertiesResolver.class);
  saslPropsResolverConf.setClass(HADOOP_SECURITY_SASL_PROPS_RESOLVER_CLASS,
    resolverClass, SaslPropertiesResolver.class);
  SaslPropertiesResolver resolver = SaslPropertiesResolver.getInstance(
    saslPropsResolverConf);
  LOG.debug("DataTransferProtocol using SaslPropertiesResolver, configured " +
    "QOP {} = {}, configured class {} = {}", DFS_DATA_TRANSFER_PROTECTION_KEY, qops, 
    DFS_DATA_TRANSFER_SASL_PROPS_RESOLVER_CLASS_KEY, resolverClass);
  return resolver;
}
 
Example 4
Source File: TestRegionPlacement.java    From hbase with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void setupBeforeClass() throws Exception {
  Configuration conf = TEST_UTIL.getConfiguration();
  // Enable the favored nodes based load balancer
  conf.setClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS,
      FavoredNodeLoadBalancer.class, LoadBalancer.class);
  conf.setBoolean("hbase.tests.use.shortcircuit.reads", false);
  TEST_UTIL.startMiniCluster(SLAVES);
  CONNECTION = TEST_UTIL.getConnection();
  admin = CONNECTION.getAdmin();
  rp = new RegionPlacementMaintainer(conf);
}
 
Example 5
Source File: MiniYARNCluster.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
protected synchronized void serviceInit(Configuration conf)
    throws Exception {
  appHistoryServer = new ApplicationHistoryServer();
  conf.setClass(YarnConfiguration.APPLICATION_HISTORY_STORE,
      MemoryApplicationHistoryStore.class, ApplicationHistoryStore.class);
  conf.setClass(YarnConfiguration.TIMELINE_SERVICE_STORE,
      MemoryTimelineStore.class, TimelineStore.class);
  conf.setClass(YarnConfiguration.TIMELINE_SERVICE_STATE_STORE_CLASS,
      MemoryTimelineStateStore.class, TimelineStateStore.class);
  appHistoryServer.init(conf);
  super.serviceInit(conf);
}
 
Example 6
Source File: BasicParameterHelper.java    From geowave with Apache License 2.0 5 votes vote down vote up
private static final void setParameter(
    final Configuration config,
    final Class<?> scope,
    final Object val,
    final ParameterEnum configItem) {
  if (val != null) {
    if (val instanceof Long) {
      config.setLong(
          GeoWaveConfiguratorBase.enumToConfKey(scope, configItem.self()),
          ((Long) val));
    } else if (val instanceof Double) {
      config.setDouble(
          GeoWaveConfiguratorBase.enumToConfKey(scope, configItem.self()),
          ((Double) val));
    } else if (val instanceof Boolean) {
      config.setBoolean(
          GeoWaveConfiguratorBase.enumToConfKey(scope, configItem.self()),
          ((Boolean) val));
    } else if (val instanceof Integer) {
      config.setInt(
          GeoWaveConfiguratorBase.enumToConfKey(scope, configItem.self()),
          ((Integer) val));
    } else if (val instanceof Class) {
      config.setClass(
          GeoWaveConfiguratorBase.enumToConfKey(scope, configItem.self()),
          ((Class) val),
          ((Class) val));
    } else if (val instanceof byte[]) {
      config.set(
          GeoWaveConfiguratorBase.enumToConfKey(scope, configItem.self()),
          ByteArrayUtils.byteArrayToString((byte[]) val));
    } else {
      config.set(GeoWaveConfiguratorBase.enumToConfKey(scope, configItem.self()), val.toString());
    }
  }
}
 
Example 7
Source File: TestAclCommands.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testLsAclsUnsupported() throws Exception {
  Configuration conf = new Configuration();
  conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "stubfs:///");
  conf.setClass("fs.stubfs.impl", StubFileSystem.class, FileSystem.class);
  assertEquals("ls must succeed even if FileSystem does not implement ACLs.",
    0, ToolRunner.run(conf, new FsShell(), new String[] { "-ls", "/" }));
}
 
Example 8
Source File: TestApplicationHistoryManagerImpl.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Before
public void setup() throws Exception {
  Configuration config = new Configuration();
  config.setClass(YarnConfiguration.APPLICATION_HISTORY_STORE,
    MemoryApplicationHistoryStore.class, ApplicationHistoryStore.class);
  applicationHistoryManagerImpl = new ApplicationHistoryManagerImpl();
  applicationHistoryManagerImpl.init(config);
  applicationHistoryManagerImpl.start();
  store = applicationHistoryManagerImpl.getHistoryStore();
}
 
Example 9
Source File: MiniYARNCluster.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
protected synchronized void serviceInit(Configuration conf)
    throws Exception {
  appHistoryServer = new ApplicationHistoryServer();
  conf.setClass(YarnConfiguration.APPLICATION_HISTORY_STORE,
      MemoryApplicationHistoryStore.class, ApplicationHistoryStore.class);
  conf.setClass(YarnConfiguration.TIMELINE_SERVICE_STORE,
      MemoryTimelineStore.class, TimelineStore.class);
  conf.setClass(YarnConfiguration.TIMELINE_SERVICE_STATE_STORE_CLASS,
      MemoryTimelineStateStore.class, TimelineStateStore.class);
  appHistoryServer.init(conf);
  super.serviceInit(conf);
}
 
Example 10
Source File: TestDatanodeManager.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test (timeout = 100000)
public void testRejectUnresolvedDatanodes() throws IOException {
  //Create the DatanodeManager which will be tested
  FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
  Mockito.when(fsn.hasWriteLock()).thenReturn(true);
  
  Configuration conf = new Configuration();
  
  //Set configuration property for rejecting unresolved topology mapping
  conf.setBoolean(
      DFSConfigKeys.DFS_REJECT_UNRESOLVED_DN_TOPOLOGY_MAPPING_KEY, true);
  
  //set TestDatanodeManager.MyResolver to be used for topology resolving
  conf.setClass(
      CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
      TestDatanodeManager.MyResolver.class, DNSToSwitchMapping.class);
  
  //create DatanodeManager
  DatanodeManager dm = new DatanodeManager(Mockito.mock(BlockManager.class),
      fsn, conf);
  
  //storageID to register.
  String storageID = "someStorageID-123";
  
  DatanodeRegistration dr = Mockito.mock(DatanodeRegistration.class);
  Mockito.when(dr.getDatanodeUuid()).thenReturn(storageID);
  
  try {
    //Register this node
    dm.registerDatanode(dr);
    Assert.fail("Expected an UnresolvedTopologyException");
  } catch (UnresolvedTopologyException ute) {
    LOG.info("Expected - topology is not resolved and " +
        "registration is rejected.");
  } catch (Exception e) {
    Assert.fail("Expected an UnresolvedTopologyException");
  }
}
 
Example 11
Source File: TestPreemption.java    From RDFS with Apache License 2.0 5 votes vote down vote up
@Override
protected void setUp() throws IOException {
  conf = new Configuration();
  conf.setBoolean(CoronaConf.CONFIGURED_POOLS_ONLY, false);
  conf.setClass("topology.node.switch.mapping.impl",
                org.apache.hadoop.net.IPv4AddressTruncationMapping.class,
                org.apache.hadoop.net.DNSToSwitchMapping.class);
  conf.set(CoronaConf.CPU_TO_RESOURCE_PARTITIONING, TstUtils.std_cpu_to_resource_partitioning);

  topologyCache = new TopologyCache(conf);
  cm = new ClusterManagerTestable(conf);

  numNodes = 10;
  nodes = new ClusterNodeInfo[numNodes];
  Map<ResourceType, String> resourceInfos =
      new EnumMap<ResourceType, String>(ResourceType.class);
  resourceInfos.put(ResourceType.MAP, "");
  resourceInfos.put(ResourceType.REDUCE, "");
  for (int i=0; i<numNodes; i++) {
    nodes[i] = new ClusterNodeInfo(TstUtils.getNodeHost(i),
                                   new InetAddress(TstUtils.getNodeHost(i),
                                                   TstUtils.getNodePort(i)),
                                   TstUtils.std_spec);
    nodes[i].setFree(TstUtils.std_spec);
    nodes[i].setResourceInfos(resourceInfos);
  }

  setupSessions(3);
}
 
Example 12
Source File: TestCapacityScheduler.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testResourceOverCommit() throws Exception {
  Configuration conf = new Configuration();
  conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
      ResourceScheduler.class);
  MockRM rm = new MockRM(conf);
  rm.start();
  
  MockNM nm1 = rm.registerNode("127.0.0.1:1234", 4 * GB);
  RMApp app1 = rm.submitApp(2048);
  // kick the scheduling, 2 GB given to AM1, remaining 2GB on nm1
  nm1.nodeHeartbeat(true);
  RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
  MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
  am1.registerAppAttempt();
  SchedulerNodeReport report_nm1 = rm.getResourceScheduler().getNodeReport(
      nm1.getNodeId());
  // check node report, 2 GB used and 2 GB available
  Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemory());
  Assert.assertEquals(2 * GB, report_nm1.getAvailableResource().getMemory());

  // add request for containers
  am1.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, 2 * GB, 1, 1);
  AllocateResponse alloc1Response = am1.schedule(); // send the request

  // kick the scheduler, 2 GB given to AM1, resource remaining 0
  nm1.nodeHeartbeat(true);
  while (alloc1Response.getAllocatedContainers().size() < 1) {
    LOG.info("Waiting for containers to be created for app 1...");
    Thread.sleep(100);
    alloc1Response = am1.schedule();
  }

  List<Container> allocated1 = alloc1Response.getAllocatedContainers();
  Assert.assertEquals(1, allocated1.size());
  Assert.assertEquals(2 * GB, allocated1.get(0).getResource().getMemory());
  Assert.assertEquals(nm1.getNodeId(), allocated1.get(0).getNodeId());
  
  report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
  // check node report, 4 GB used and 0 GB available
  Assert.assertEquals(0, report_nm1.getAvailableResource().getMemory());
  Assert.assertEquals(4 * GB, report_nm1.getUsedResource().getMemory());

  // check container is assigned with 2 GB.
  Container c1 = allocated1.get(0);
  Assert.assertEquals(2 * GB, c1.getResource().getMemory());
  
  // update node resource to 2 GB, so resource is over-consumed.
  Map<NodeId, ResourceOption> nodeResourceMap = 
      new HashMap<NodeId, ResourceOption>();
  nodeResourceMap.put(nm1.getNodeId(), 
      ResourceOption.newInstance(Resource.newInstance(2 * GB, 1), -1));
  UpdateNodeResourceRequest request = 
      UpdateNodeResourceRequest.newInstance(nodeResourceMap);
  AdminService as = ((MockRM)rm).getAdminService();
  as.updateNodeResource(request);
  
  // Now, the used resource is still 4 GB, and available resource is minus value.
  report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
  Assert.assertEquals(4 * GB, report_nm1.getUsedResource().getMemory());
  Assert.assertEquals(-2 * GB, report_nm1.getAvailableResource().getMemory());
  
  // Check container can complete successfully in case of resource over-commitment.
  ContainerStatus containerStatus = BuilderUtils.newContainerStatus(
      c1.getId(), ContainerState.COMPLETE, "", 0);
  nm1.containerStatus(containerStatus);
  int waitCount = 0;
  while (attempt1.getJustFinishedContainers().size() < 1
      && waitCount++ != 20) {
    LOG.info("Waiting for containers to be finished for app 1... Tried "
        + waitCount + " times already..");
    Thread.sleep(100);
  }
  Assert.assertEquals(1, attempt1.getJustFinishedContainers().size());
  Assert.assertEquals(1, am1.schedule().getCompletedContainersStatuses().size());
  report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
  Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemory());
  // As container return 2 GB back, the available resource becomes 0 again.
  Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemory());
  
  // Verify no NPE is trigger in schedule after resource is updated.
  am1.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, 3 * GB, 1, 1);
  alloc1Response = am1.schedule();
  Assert.assertEquals("Shouldn't have enough resource to allocate containers",
      0, alloc1Response.getAllocatedContainers().size());
  int times = 0;
  // try 10 times as scheduling is async process.
  while (alloc1Response.getAllocatedContainers().size() < 1
      && times++ < 10) {
    LOG.info("Waiting for containers to be allocated for app 1... Tried "
        + times + " times already..");
    Thread.sleep(100);
  }
  Assert.assertEquals("Shouldn't have enough resource to allocate containers",
      0, alloc1Response.getAllocatedContainers().size());
  rm.stop();
}
 
Example 13
Source File: TestLocalFileSystem.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Regression test for HADOOP-9307: BufferedFSInputStream returning
 * wrong results after certain sequences of seeks and reads.
 */
@Test
public void testBufferedFSInputStream() throws IOException {
  Configuration conf = new Configuration();
  conf.setClass("fs.file.impl", RawLocalFileSystem.class, FileSystem.class);
  conf.setInt(CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY, 4096);
  FileSystem fs = FileSystem.newInstance(conf);
  
  byte[] buf = new byte[10*1024];
  new Random().nextBytes(buf);
  
  // Write random bytes to file
  FSDataOutputStream stream = fs.create(TEST_PATH);
  try {
    stream.write(buf);
  } finally {
    stream.close();
  }
  
  Random r = new Random();

  FSDataInputStream stm = fs.open(TEST_PATH);
  // Record the sequence of seeks and reads which trigger a failure.
  int seeks[] = new int[10];
  int reads[] = new int[10];
  try {
    for (int i = 0; i < 1000; i++) {
      int seekOff = r.nextInt(buf.length); 
      int toRead = r.nextInt(Math.min(buf.length - seekOff, 32000));
      
      seeks[i % seeks.length] = seekOff;
      reads[i % reads.length] = toRead;
      verifyRead(stm, buf, seekOff, toRead);
      
    }
  } catch (AssertionError afe) {
    StringBuilder sb = new StringBuilder();
    sb.append("Sequence of actions:\n");
    for (int j = 0; j < seeks.length; j++) {
      sb.append("seek @ ").append(seeks[j]).append("  ")
        .append("read ").append(reads[j]).append("\n");
    }
    System.err.println(sb.toString());
    throw afe;
  } finally {
    stm.close();
  }
}
 
Example 14
Source File: LzopMapReduce.java    From hiped2 with Apache License 2.0 4 votes vote down vote up
/**
 * The MapReduce driver - setup and launch the job.
 *
 * @param args the command-line arguments
 * @return the process exit code
 * @throws Exception if something goes wrong
 */
public int run(final String[] args) throws Exception {

  Cli cli = Cli.builder().setArgs(args).addOptions(MrIoOpts.values()).build();
  int result = cli.runCmd();

  if (result != 0) {
    return result;
  }

  Path input = new Path(cli.getArgValueAsString(MrIoOpts.INPUT));
  Path output = new Path(cli.getArgValueAsString(MrIoOpts.OUTPUT));

  Configuration conf = super.getConf();

  Path compressedInputFile = compressAndIndex(input, conf);

  conf.setBoolean("mapred.compress.map.output", true);
  conf.setClass("mapred.map.output.compression.codec",
      LzopCodec.class,
      CompressionCodec.class);

  Job job = new Job(conf);
  job.setJarByClass(LzopMapReduce.class);

  job.setMapperClass(Mapper.class);
  job.setReducerClass(Reducer.class);

  job.setInputFormatClass(LzoTextInputFormat.class);
  job.setOutputFormatClass(TextOutputFormat.class);

  job.getConfiguration().setBoolean("mapred.output.compress", true);
  job.getConfiguration().setClass("mapred.output.compression.codec",
        LzopCodec.class, CompressionCodec.class);

  FileInputFormat.addInputPath(job, compressedInputFile);
  FileOutputFormat.setOutputPath(job, output);

  if (job.waitForCompletion(true)) {
    return 0;
  }
  return 1;
}
 
Example 15
Source File: SafeFileOutputCommitterTest.java    From datawave with Apache License 2.0 4 votes vote down vote up
private void testConcurrentCommitTaskWithSubDir(int version) throws Exception {
    final Job job = Job.getInstance();
    FileOutputFormat.setOutputPath(job, outDir);
    final Configuration conf = job.getConfiguration();
    conf.set(MRJobConfig.TASK_ATTEMPT_ID, attempt);
    conf.setInt(FILEOUTPUTCOMMITTER_ALGORITHM_VERSION, version);
    
    conf.setClass("fs.file.impl", RLFS.class, FileSystem.class);
    FileSystem.closeAll();
    
    final JobContext jContext = new JobContextImpl(conf, taskID.getJobID());
    final FileOutputCommitter amCommitter = new SafeFileOutputCommitter(outDir, jContext);
    amCommitter.setupJob(jContext);
    
    final TaskAttemptContext[] taCtx = new TaskAttemptContextImpl[2];
    taCtx[0] = new TaskAttemptContextImpl(conf, taskID);
    taCtx[1] = new TaskAttemptContextImpl(conf, taskID1);
    
    final TextOutputFormat[] tof = new TextOutputFormat[2];
    for (int i = 0; i < tof.length; i++) {
        tof[i] = new TextOutputFormat() {
            @Override
            public Path getDefaultWorkFile(TaskAttemptContext context, String extension) throws IOException {
                final FileOutputCommitter foc = (FileOutputCommitter) getOutputCommitter(context);
                return new Path(new Path(foc.getWorkPath(), SUB_DIR), getUniqueFile(context, getOutputName(context), extension));
            }
        };
    }
    
    final ExecutorService executor = Executors.newFixedThreadPool(2);
    try {
        for (int i = 0; i < taCtx.length; i++) {
            final int taskIdx = i;
            executor.submit((Callable<Void>) () -> {
                final OutputCommitter outputCommitter = tof[taskIdx].getOutputCommitter(taCtx[taskIdx]);
                outputCommitter.setupTask(taCtx[taskIdx]);
                final RecordWriter rw = tof[taskIdx].getRecordWriter(taCtx[taskIdx]);
                writeOutput(rw, taCtx[taskIdx]);
                outputCommitter.commitTask(taCtx[taskIdx]);
                return null;
            });
        }
    } finally {
        executor.shutdown();
        while (!executor.awaitTermination(1, TimeUnit.SECONDS)) {
            LOG.info("Awaiting thread termination!");
        }
    }
    
    amCommitter.commitJob(jContext);
    final RawLocalFileSystem lfs = new RawLocalFileSystem();
    lfs.setConf(conf);
    assertFalse("Must not end up with sub_dir/sub_dir", lfs.exists(new Path(OUT_SUB_DIR, SUB_DIR)));
    
    // validate output
    validateContent(OUT_SUB_DIR);
    FileUtil.fullyDelete(new File(outDir.toString()));
}
 
Example 16
Source File: TestTrash.java    From RDFS with Apache License 2.0 4 votes vote down vote up
/**
 * @param fs
 * @param conf
 * @throws Exception
 */
protected void trashPatternEmptier(FileSystem fs, Configuration conf) throws Exception {
  // Trash with 12 second deletes and 6 seconds checkpoints
  conf.set("fs.trash.interval", "0.2"); // 12 seconds
  conf.set("fs.trash.checkpoint.interval", "0.1"); // 6 seconds
  conf.setClass("fs.trash.classname", TrashPolicyPattern.class, TrashPolicy.class);
  conf.set("fs.trash.base.paths", TEST_DIR + "/my_root/*/");
  conf.set("fs.trash.unmatched.paths", TEST_DIR + "/unmatched/");
  Trash trash = new Trash(conf);
  // clean up trash can
  fs.delete(new Path(TEST_DIR + "/my_root/*/"), true);
  fs.delete(new Path(TEST_DIR + "/my_root_not/*/"), true);


  FsShell shell = new FsShell();
  shell.setConf(conf);
  shell.init();
  // First create a new directory with mkdirs
  deleteAndCheckTrash(fs, shell, "my_root/sub_dir1/sub_dir1_1/myFile",
      "my_root/sub_dir1/.Trash/Current/" + TEST_DIR
          + "/my_root/sub_dir1/sub_dir1_1");
  deleteAndCheckTrash(fs, shell, "my_root/sub_dir2/sub_dir2_1/myFile",
      "my_root/sub_dir2/.Trash/Current/" + TEST_DIR
          + "/my_root/sub_dir2/sub_dir2_1");
  deleteAndCheckTrash(fs, shell, "my_root_not/", "unmatched/.Trash/Current"
      + TEST_DIR + "/my_root_not");
  deleteAndCheckTrash(fs, shell, "my_root/file", "unmatched/.Trash/Current"
      + TEST_DIR + "/my_root/file");

  Path currentTrash = new Path(TEST_DIR, "my_root/sub_dir1/.Trash/Current/");
  fs.mkdirs(currentTrash);
  cmdUsingShell("-rmr", shell, currentTrash);
  TestCase.assertTrue(!fs.exists(currentTrash));

  cmdUsingShell("-rmr", shell, new Path(TEST_DIR, "my_root"));
  TestCase.assertTrue(fs.exists(new Path(TEST_DIR,
      "unmatched/.Trash/Current/" + TEST_DIR + "/my_root")));
  
  // Test Emplier
  // Start Emptier in background
  Runnable emptier = trash.getEmptier();
  Thread emptierThread = new Thread(emptier);
  emptierThread.start();

  int fileIndex = 0;
  Set<String> checkpoints = new HashSet<String>();
  while (true)  {
    // Create a file with a new name
    Path myFile = new Path(TEST_DIR, "my_root/sub_dir1/sub_dir2/myFile" + fileIndex++);
    writeFile(fs, myFile);

    // Delete the file to trash
    String[] args = new String[2];
    args[0] = "-rm";
    args[1] = myFile.toString();
    int val = -1;
    try {
      val = shell.run(args);
    } catch (Exception e) {
      System.err.println("Exception raised from Trash.run " +
                         e.getLocalizedMessage());
    }
    assertTrue(val == 0);

    Path trashDir = new Path(TEST_DIR, "my_root/sub_dir1/.Trash/Current/");
    FileStatus files[] = fs.listStatus(trashDir.getParent());
    // Scan files in .Trash and add them to set of checkpoints
    for (FileStatus file : files) {
      String fileName = file.getPath().getName();
      checkpoints.add(fileName);
    }
    // If checkpoints has 5 objects it is Current + 4 checkpoint directories
    if (checkpoints.size() == 5) {
      // The actual contents should be smaller since the last checkpoint
      // should've been deleted and Current might not have been recreated yet
      assertTrue(5 > files.length);
      break;
    }
    Thread.sleep(5000);
  }
  emptierThread.interrupt();
  emptierThread.join();
}
 
Example 17
Source File: MiniTezCluster.java    From incubator-tez with Apache License 2.0 4 votes vote down vote up
@Override
public void serviceInit(Configuration conf) throws Exception {
  conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_TEZ_FRAMEWORK_NAME);
  // blacklisting disabled to prevent scheduling issues
  conf.setBoolean(TezConfiguration.TEZ_AM_NODE_BLACKLISTING_ENABLED, false);
  if (conf.get(MRJobConfig.MR_AM_STAGING_DIR) == null) {
    conf.set(MRJobConfig.MR_AM_STAGING_DIR, new File(getTestWorkDir(),
        "apps_staging_dir" + Path.SEPARATOR).getAbsolutePath());
  }
  
  if (conf.get(YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC) == null) {
    // nothing defined. set quick delete value
    conf.setLong(YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC, 0l);
  }
  
  File appJarLocalFile = new File(MiniTezCluster.APPJAR);

  if (!appJarLocalFile.exists()) {
    String message = "TezAppJar " + MiniTezCluster.APPJAR
        + " not found. Exiting.";
    LOG.info(message);
    throw new TezUncheckedException(message);
  }
  
  FileSystem fs = FileSystem.get(conf);
  Path testRootDir = fs.makeQualified(new Path("target", getName() + "-tmpDir"));
  Path appRemoteJar = new Path(testRootDir, "TezAppJar.jar");
  // Copy AppJar and make it public.
  Path appMasterJar = new Path(MiniTezCluster.APPJAR);
  fs.copyFromLocalFile(appMasterJar, appRemoteJar);
  fs.setPermission(appRemoteJar, new FsPermission("777"));

  conf.set(TezConfiguration.TEZ_LIB_URIS, appRemoteJar.toUri().toString());
  LOG.info("Set TEZ-LIB-URI to: " + conf.get(TezConfiguration.TEZ_LIB_URIS));

  // VMEM monitoring disabled, PMEM monitoring enabled.
  conf.setBoolean(YarnConfiguration.NM_PMEM_CHECK_ENABLED, false);
  conf.setBoolean(YarnConfiguration.NM_VMEM_CHECK_ENABLED, false);

  conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY,  "000");

  try {
    Path stagingPath = FileContext.getFileContext(conf).makeQualified(
        new Path(conf.get(MRJobConfig.MR_AM_STAGING_DIR)));
    /*
     * Re-configure the staging path on Windows if the file system is localFs.
     * We need to use a absolute path that contains the drive letter. The unit
     * test could run on a different drive than the AM. We can run into the
     * issue that job files are localized to the drive where the test runs on,
     * while the AM starts on a different drive and fails to find the job
     * metafiles. Using absolute path can avoid this ambiguity.
     */
    if (Path.WINDOWS) {
      if (LocalFileSystem.class.isInstance(stagingPath.getFileSystem(conf))) {
        conf.set(MRJobConfig.MR_AM_STAGING_DIR,
            new File(conf.get(MRJobConfig.MR_AM_STAGING_DIR))
                .getAbsolutePath());
      }
    }
    FileContext fc=FileContext.getFileContext(stagingPath.toUri(), conf);
    if (fc.util().exists(stagingPath)) {
      LOG.info(stagingPath + " exists! deleting...");
      fc.delete(stagingPath, true);
    }
    LOG.info("mkdir: " + stagingPath);
    fc.mkdir(stagingPath, null, true);

    //mkdir done directory as well
    String doneDir =
        JobHistoryUtils.getConfiguredHistoryServerDoneDirPrefix(conf);
    Path doneDirPath = fc.makeQualified(new Path(doneDir));
    fc.mkdir(doneDirPath, null, true);
  } catch (IOException e) {
    throw new TezUncheckedException("Could not create staging directory. ", e);
  }
  conf.set(MRConfig.MASTER_ADDRESS, "test");

  //configure the shuffle service in NM
  conf.setStrings(YarnConfiguration.NM_AUX_SERVICES,
      new String[] { ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID });
  conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,
      ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID), ShuffleHandler.class,
      Service.class);

  // Non-standard shuffle port
  conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY, 0);

  conf.setClass(YarnConfiguration.NM_CONTAINER_EXECUTOR,
      DefaultContainerExecutor.class, ContainerExecutor.class);

  // TestMRJobs is for testing non-uberized operation only; see TestUberAM
  // for corresponding uberized tests.
  conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
  super.serviceInit(conf);
}
 
Example 18
Source File: TestTaskAttempt.java    From tez with Apache License 2.0 4 votes vote down vote up
@Test (timeout = 60000L)
public void testProgressAfterSubmit() throws Exception {
  ApplicationId appId = ApplicationId.newInstance(1, 2);
  ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(
      appId, 0);
  TezDAGID dagID = TezDAGID.getInstance(appId, 1);
  TezVertexID vertexID = TezVertexID.getInstance(dagID, 1);
  TezTaskID taskID = TezTaskID.getInstance(vertexID, 1);

  MockEventHandler eventHandler = spy(new MockEventHandler());
  TaskCommunicatorManagerInterface taListener = createMockTaskAttemptListener();

  Configuration taskConf = new Configuration();
  taskConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
  taskConf.setBoolean("fs.file.impl.disable.cache", true);
  taskConf.setLong(TezConfiguration.TEZ_TASK_PROGRESS_STUCK_INTERVAL_MS, 50);

  locationHint = TaskLocationHint.createTaskLocationHint(
      new HashSet<String>(Arrays.asList(new String[]{"127.0.0.1"})), null);
  Resource resource = Resource.newInstance(1024, 1);

  NodeId nid = NodeId.newInstance("127.0.0.1", 0);
  @SuppressWarnings("deprecation")
  ContainerId contId = ContainerId.newInstance(appAttemptId, 3);
  Container container = mock(Container.class);
  when(container.getId()).thenReturn(contId);
  when(container.getNodeId()).thenReturn(nid);
  when(container.getNodeHttpAddress()).thenReturn("localhost:0");

  AMContainerMap containers = new AMContainerMap(
      mock(ContainerHeartbeatHandler.class), mock(TaskCommunicatorManagerInterface.class),
      new ContainerContextMatcher(), appCtx);
  containers.addContainerIfNew(container, 0, 0, 0);

  doReturn(new ClusterInfo()).when(appCtx).getClusterInfo();
  doReturn(containers).when(appCtx).getAllContainers();

  TaskHeartbeatHandler mockHeartbeatHandler = mock(TaskHeartbeatHandler.class);
  MockClock mockClock = new MockClock();
  TaskAttemptImpl taImpl = new MockTaskAttemptImpl(taskID, 1, eventHandler,
      taListener, taskConf, mockClock,
      mockHeartbeatHandler, appCtx, false,
      resource, createFakeContainerContext(), false);
  TezTaskAttemptID taskAttemptID = taImpl.getID();
  ArgumentCaptor<Event> arg = ArgumentCaptor.forClass(Event.class);
  mockClock.incrementTime(20L);
  taImpl.handle(new TaskAttemptEventSchedule(taskAttemptID, 0, 0));
  mockClock.incrementTime(55L);
  taImpl.handle(new TaskAttemptEventSubmitted(taskAttemptID, contId));
  taImpl.handle(new TaskAttemptEventStatusUpdate(
      taskAttemptID, new TaskStatusUpdateEvent(null, 0.1f, null, false)));
  verify(eventHandler, atLeast(1)).handle(arg.capture());
  if (arg.getValue() instanceof  TaskAttemptEvent) {
    taImpl.handle((TaskAttemptEvent) arg.getValue());
  }
  Assert.assertEquals("Task Attempt's internal state should be SUBMITTED!",
      taImpl.getInternalState(), TaskAttemptStateInternal.SUBMITTED);
}
 
Example 19
Source File: AggregatingContextWriter.java    From datawave with Apache License 2.0 4 votes vote down vote up
@Override
public void configureChainedContextWriter(Configuration conf, Class<? extends ContextWriter<OK,OV>> contextWriterClass) {
    conf.setClass(CONTEXT_WRITER_CLASS, contextWriterClass, ContextWriter.class);
}
 
Example 20
Source File: TestResourceUsageEmulators.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test {@link LoadJob.ResourceUsageMatcherRunner}.
 */
@Test
@SuppressWarnings("unchecked")
public void testResourceUsageMatcherRunner() throws Exception {
  Configuration conf = new Configuration();
  FakeProgressive progress = new FakeProgressive();
  
  // set the resource calculator plugin
  conf.setClass(TTConfig.TT_RESOURCE_CALCULATOR_PLUGIN,
                DummyResourceCalculatorPlugin.class, 
                ResourceCalculatorPlugin.class);
  // set the resources
  // set the resource implementation class
  conf.setClass(ResourceUsageMatcher.RESOURCE_USAGE_EMULATION_PLUGINS, 
                TestResourceUsageEmulatorPlugin.class, 
                ResourceUsageEmulatorPlugin.class);
  
  long currentTime = System.currentTimeMillis();
  
  // initialize the matcher class
  TaskAttemptID id = new TaskAttemptID("test", 1, TaskType.MAP, 1, 1);
  StatusReporter reporter = new DummyReporter(progress);
  TaskInputOutputContext context = 
    new MapContextImpl(conf, id, null, null, null, reporter, null);
  FakeResourceUsageMatcherRunner matcher = 
    new FakeResourceUsageMatcherRunner(context, null);
  
  // check if the matcher initialized the plugin
  String identifier = TestResourceUsageEmulatorPlugin.DEFAULT_IDENTIFIER;
  long initTime = 
    TestResourceUsageEmulatorPlugin.testInitialization(identifier, conf);
  assertTrue("ResourceUsageMatcherRunner failed to initialize the"
             + " configured plugin", initTime > currentTime);
  
  // check the progress
  assertEquals("Progress mismatch in ResourceUsageMatcherRunner", 
               0, progress.getProgress(), 0D);
  
  // call match() and check progress
  progress.setProgress(0.01f);
  currentTime = System.currentTimeMillis();
  matcher.test();
  long emulateTime = 
    TestResourceUsageEmulatorPlugin.testEmulation(identifier, conf);
  assertTrue("ProgressBasedResourceUsageMatcher failed to load and emulate"
             + " the configured plugin", emulateTime > currentTime);
}