Java Code Examples for org.apache.hadoop.conf.Configuration#set()

The following examples show how to use org.apache.hadoop.conf.Configuration#set() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestAllocationFileLoaderService.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Verify that you can't have dynamic user queue and reservable queue on
 * the same queue
 */
@Test (expected = AllocationConfigurationException.class)
public void testReservableCannotBeCombinedWithDynamicUserQueue()
    throws Exception {
  Configuration conf = new Configuration();
  conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE);

  PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE));
  out.println("<?xml version=\"1.0\"?>");
  out.println("<allocations>");
  out.println("<queue name=\"notboth\" type=\"parent\" >");
  out.println("<reservation>");
  out.println("</reservation>");
  out.println("</queue>");
  out.println("</allocations>");
  out.close();

  AllocationFileLoaderService allocLoader = new AllocationFileLoaderService();
  allocLoader.init(conf);
  ReloadListener confHolder = new ReloadListener();
  allocLoader.setReloadListener(confHolder);
  allocLoader.reloadAllocations();
}
 
Example 2
Source File: TestRMHA.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Before
public void setUp() throws Exception {
  configuration = new Configuration();
  UserGroupInformation.setConfiguration(configuration);
  configuration.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
  configuration.set(YarnConfiguration.RM_HA_IDS, RM1_NODE_ID + ","
      + RM2_NODE_ID);
  for (String confKey : YarnConfiguration
      .getServiceAddressConfKeys(configuration)) {
    configuration.set(HAUtil.addSuffix(confKey, RM1_NODE_ID), RM1_ADDRESS);
    configuration.set(HAUtil.addSuffix(confKey, RM2_NODE_ID), RM2_ADDRESS);
    configuration.set(HAUtil.addSuffix(confKey, RM3_NODE_ID), RM3_ADDRESS);
  }

  // Enable webapp to test web-services also
  configuration.setBoolean(MockRM.ENABLE_WEBAPP, true);
  configuration.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true);
  ClusterMetrics.destroy();
  QueueMetrics.clearQueueMetrics();
  DefaultMetricsSystem.shutdown();
}
 
Example 3
Source File: TestMDSSerde.java    From multiple-dimension-spread with Apache License 2.0 6 votes vote down vote up
@Test
public void T_initialize_3() throws SerDeException{
  MDSSerde serde = new MDSSerde();
  Configuration conf = new Configuration();
  conf.set( ColumnProjectionUtils.READ_COLUMN_NAMES_CONF_STR , "num" );

  Properties table = new Properties();
  Properties part = new Properties();
  table.setProperty( serdeConstants.LIST_COLUMNS , "str,num,arry,nest" );
  table.setProperty( serdeConstants.LIST_COLUMN_TYPES , "string,int,array<string>,struct<a:string,b:int>" );

  serde.initialize( conf , table , part );
  StructObjectInspector inspector = (StructObjectInspector)( serde.getObjectInspector() );
  List<? extends StructField> fieldList = inspector.getAllStructFieldRefs();
  assertEquals( fieldList.get(0).getFieldName() , "num" );

  assertEquals( ( fieldList.get(0).getFieldObjectInspector() instanceof PrimitiveObjectInspector ) , true );
}
 
Example 4
Source File: TestFSNamesystem.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Test that FSNamesystem#clear clears all leases.
 */
@Test
public void testFSNamespaceClearLeases() throws Exception {
  Configuration conf = new HdfsConfiguration();
  File nameDir = new File(MiniDFSCluster.getBaseDirectory(), "name");
  conf.set(DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath());

  NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
  DFSTestUtil.formatNameNode(conf);
  FSNamesystem fsn = FSNamesystem.loadFromDisk(conf);
  LeaseManager leaseMan = fsn.getLeaseManager();
  leaseMan.addLease("client1", "importantFile");
  assertEquals(1, leaseMan.countLease());
  fsn.clear();
  leaseMan = fsn.getLeaseManager();
  assertEquals(0, leaseMan.countLease());
}
 
Example 5
Source File: FlagMaker.java    From datawave with Apache License 2.0 5 votes vote down vote up
FileSystem getHadoopFS() throws IOException {
    Configuration hadoopConfiguration = new Configuration();
    hadoopConfiguration.set("fs.defaultFS", fmc.getHdfs());
    try {
        return FileSystem.get(hadoopConfiguration);
    } catch (IOException ex) {
        log.error("Unable to connect to HDFS. Exiting");
        throw ex;
    }
}
 
Example 6
Source File: DFSTestUtil.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public Configuration newConfiguration() {
  Configuration conf = new Configuration();
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);
  conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
      new File(sockDir.getDir(),
        testName + "._PORT.sock").getAbsolutePath());
  return conf;
}
 
Example 7
Source File: TestWasbUriAndConfiguration.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testInvalidKeyProviderNonexistantClass() throws Exception {
  Configuration conf = new Configuration();
  String account = "testacct";

  conf.set("fs.azure.account.keyprovider." + account,
      "org.apache.Nonexistant.Class");
  try {
    AzureNativeFileSystemStore.getAccountKeyFromConfiguration(account, conf);
    Assert.fail("Nonexistant key provider class should have thrown a "
        + "KeyProviderException");
  } catch (KeyProviderException e) {
  }
}
 
Example 8
Source File: TestGroupingTableMap.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Test
@SuppressWarnings({ "deprecation", "unchecked" })
public void shouldCreateNewKeyAlthoughExtraKey() throws Exception {
  GroupingTableMap gTableMap = null;
  try {
    Result result = mock(Result.class);
    Reporter reporter = mock(Reporter.class);
    gTableMap = new GroupingTableMap();
    Configuration cfg = new Configuration();
    cfg.set(GroupingTableMap.GROUP_COLUMNS, "familyA:qualifierA familyB:qualifierB");
    JobConf jobConf = new JobConf(cfg);
    gTableMap.configure(jobConf);

    byte[] row = {};
    List<Cell> keyValues = ImmutableList.<Cell>of(
        new KeyValue(row, Bytes.toBytes("familyA"), Bytes.toBytes("qualifierA"),
            Bytes.toBytes("1111")),
        new KeyValue(row, Bytes.toBytes("familyB"), Bytes.toBytes("qualifierB"),
            Bytes.toBytes("2222")),
        new KeyValue(row, Bytes.toBytes("familyC"), Bytes.toBytes("qualifierC"),
            Bytes.toBytes("3333")));
    when(result.listCells()).thenReturn(keyValues);
    OutputCollector<ImmutableBytesWritable, Result> outputCollectorMock =
        mock(OutputCollector.class);
    gTableMap.map(null, result, outputCollectorMock, reporter);
    verify(result).listCells();
    verify(outputCollectorMock, times(1))
      .collect(any(), any());
    verifyNoMoreInteractions(outputCollectorMock);
  } finally {
    if (gTableMap != null)
      gTableMap.close();
  }
}
 
Example 9
Source File: TestProcessCorruptBlocks.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * The corrupt block has to be removed when the number of valid replicas
 * matches replication factor for the file. In this the above condition is
 * tested by reducing the replication factor 
 * The test strategy : 
 *   Bring up Cluster with 3 DataNodes
 *   Create a file of replication factor 3 
 *   Corrupt one replica of a block of the file 
 *   Verify that there are still 2 good replicas and 1 corrupt replica
 *    (corrupt replica should not be removed since number of good
 *     replicas (2) is less than replication factor (3))
 *   Set the replication factor to 2 
 *   Verify that the corrupt replica is removed. 
 *     (corrupt replica  should not be removed since number of good
 *      replicas (2) is equal to replication factor (2))
 */
@Test
public void testWhenDecreasingReplication() throws Exception {
  Configuration conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
  conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
  FileSystem fs = cluster.getFileSystem();
  final FSNamesystem namesystem = cluster.getNamesystem();

  try {
    final Path fileName = new Path("/foo1");
    DFSTestUtil.createFile(fs, fileName, 2, (short) 3, 0L);
    DFSTestUtil.waitReplication(fs, fileName, (short) 3);

    ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
    corruptBlock(cluster, fs, fileName, 0, block);

    DFSTestUtil.waitReplication(fs, fileName, (short) 2);

    assertEquals(2, countReplicas(namesystem, block).liveReplicas());
    assertEquals(1, countReplicas(namesystem, block).corruptReplicas());

    namesystem.setReplication(fileName.toString(), (short) 2);

    // wait for 3 seconds so that all block reports are processed.
    try {
      Thread.sleep(3000);
    } catch (InterruptedException ignored) {
    }

    assertEquals(2, countReplicas(namesystem, block).liveReplicas());
    assertEquals(0, countReplicas(namesystem, block).corruptReplicas());

  } finally {
    cluster.shutdown();
  }
}
 
Example 10
Source File: ApplicationTest.java    From examples with Apache License 2.0 5 votes vote down vote up
private Configuration getConfig()
{
  final Configuration result = new Configuration(false);
  result.addResource(this.getClass().getResourceAsStream("/META-INF/properties.xml"));
  result.setInt("dt.application.fileOutput.dt.operator.generator.prop.divisor", 3);
  result.set("dt.application.fileOutput.operator.writer.prop.filePath", outputDirName);
  return result;
}
 
Example 11
Source File: TestGenericWritable.java    From RDFS with Apache License 2.0 5 votes vote down vote up
@Override
protected void setUp() throws Exception {
  super.setUp();
  conf = new Configuration();
  //set the configuration parameter
  conf.set(CONF_TEST_KEY, CONF_TEST_VALUE);
}
 
Example 12
Source File: TestWebHDFSForHA.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testFailoverAfterOpen() throws IOException {
  Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
  conf.set(FS_DEFAULT_NAME_KEY, HdfsConstants.HDFS_URI_SCHEME +
      "://" + LOGICAL_NAME);
  MiniDFSCluster cluster = null;
  FileSystem fs = null;
  final Path p = new Path("/test");
  final byte[] data = "Hello".getBytes();

  try {
    cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo)
            .numDataNodes(1).build();

    HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);

    cluster.waitActive();

    fs = FileSystem.get(WEBHDFS_URI, conf);
    cluster.transitionToActive(1);

    FSDataOutputStream out = fs.create(p);
    cluster.shutdownNameNode(1);
    cluster.transitionToActive(0);

    out.write(data);
    out.close();
    FSDataInputStream in = fs.open(p);
    byte[] buf = new byte[data.length];
    IOUtils.readFully(in, buf, 0, buf.length);
    Assert.assertArrayEquals(data, buf);
  } finally {
    IOUtils.cleanup(null, fs);
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 13
Source File: TestSpeculation.java    From tez with Apache License 2.0 5 votes vote down vote up
/**
 * Sets default conf.
 */
@Before
public void setDefaultConf() {
  try {
    defaultConf = new Configuration(false);
    defaultConf.set("fs.defaultFS", "file:///");
    defaultConf.setBoolean(TezConfiguration.TEZ_LOCAL_MODE, true);
    defaultConf.setBoolean(TezConfiguration.TEZ_AM_SPECULATION_ENABLED, true);
    defaultConf.setFloat(
        ShuffleVertexManager.TEZ_SHUFFLE_VERTEX_MANAGER_MIN_SRC_FRACTION, 1);
    defaultConf.setFloat(
        ShuffleVertexManager.TEZ_SHUFFLE_VERTEX_MANAGER_MAX_SRC_FRACTION, 1);
    localFs = FileSystem.getLocal(defaultConf);
    String stagingDir =
        "target" + Path.SEPARATOR + TestSpeculation.class.getName()
            + "-tmpDir";
    defaultConf.set(TezConfiguration.TEZ_AM_STAGING_DIR, stagingDir);
    defaultConf.setClass(TezConfiguration.TEZ_AM_TASK_ESTIMATOR_CLASS,
        estimatorClass,
        TaskRuntimeEstimator.class);
    defaultConf.setInt(TezConfiguration.TEZ_AM_MINIMUM_ALLOWED_SPECULATIVE_TASKS, 20);
    defaultConf.setDouble(TezConfiguration.TEZ_AM_PROPORTION_TOTAL_TASKS_SPECULATABLE, 0.2);
    defaultConf.setDouble(TezConfiguration.TEZ_AM_PROPORTION_RUNNING_TASKS_SPECULATABLE, 0.25);
    defaultConf.setLong(TezConfiguration.TEZ_AM_SOONEST_RETRY_AFTER_NO_SPECULATE, 25);
    defaultConf.setLong(TezConfiguration.TEZ_AM_SOONEST_RETRY_AFTER_SPECULATE, 50);
    defaultConf.setInt(TezConfiguration.TEZ_AM_ESTIMATOR_EXPONENTIAL_SKIP_INITIALS, 2);
  } catch (IOException e) {
    throw new RuntimeException("init failure", e);
  }
}
 
Example 14
Source File: AbstractHdfsConnector.java    From pulsar with Apache License 2.0 5 votes vote down vote up
protected HdfsResources resetHDFSResources(HdfsSinkConfig hdfsSinkConfig) throws IOException {
    Configuration config = new ExtendedConfiguration();
    config.setClassLoader(Thread.currentThread().getContextClassLoader());

    getConfig(config, connectorConfig.getHdfsConfigResources());

    // first check for timeout on HDFS connection, because FileSystem has a hard coded 15 minute timeout
    checkHdfsUriForTimeout(config);

    /* Disable caching of Configuration and FileSystem objects, else we cannot reconfigure
     * the processor without a complete restart
     */
    String disableCacheName = String.format("fs.%s.impl.disable.cache",
            FileSystem.getDefaultUri(config).getScheme());
    config.set(disableCacheName, "true");

    // If kerberos is enabled, create the file system as the kerberos principal
    // -- use RESOURCE_LOCK to guarantee UserGroupInformation is accessed by only a single thread at at time
    FileSystem fs;
    UserGroupInformation ugi;
    synchronized (RESOURCES_LOCK) {
        if (SecurityUtil.isSecurityEnabled(config)) {
            ugi = SecurityUtil.loginKerberos(config,
                    connectorConfig.getKerberosUserPrincipal(), connectorConfig.getKeytab());
            fs = getFileSystemAsUser(config, ugi);
        } else {
            config.set("ipc.client.fallback-to-simple-auth-allowed", "true");
            config.set("hadoop.security.authentication", "simple");
            ugi = SecurityUtil.loginSimple(config);
            fs = getFileSystemAsUser(config, ugi);
        }
    }
    return new HdfsResources(config, fs, ugi);
}
 
Example 15
Source File: ConfigHelper.java    From stratio-cassandra with Apache License 2.0 4 votes vote down vote up
public static void setInputPartitioner(Configuration conf, String classname)
{
    conf.set(INPUT_PARTITIONER_CONFIG, classname);
}
 
Example 16
Source File: GeneralFileActionDemo.java    From JavaBase with MIT License 4 votes vote down vote up
private static Configuration getConfig(String url) {
  Configuration config = new Configuration();
  config.set("fs.defaultFS", getHost(url));

  return config;
}
 
Example 17
Source File: DataProviderFactoryTest.java    From mrgeo with Apache License 2.0 4 votes vote down vote up
private void setupPreferred(Configuration conf, String confVal,
    String mrgeoVal, String defMrgeoVal)
{
  MrGeoProperties.resetProperties();
  if (conf != null)
  {
//    oldConfValues = new HashMap<>();
//
//    oldConfValues.put(DataProviderFactory.PREFERRED_ADHOC_PROVIDER_NAME,
//        conf.get(DataProviderFactory.PREFERRED_ADHOC_PROVIDER_NAME, null));
//    oldConfValues.put(DataProviderFactory.PREFERRED_MRSIMAGE_PROVIDER_NAME,
//        conf.get(DataProviderFactory.PREFERRED_MRSIMAGE_PROVIDER_NAME, null));
//    oldConfValues.put(DataProviderFactory.PREFERRED_VECTOR_PROVIDER_NAME,
//        conf.get(DataProviderFactory.PREFERRED_VECTOR_PROVIDER_NAME, null));

    if (confVal == null)
    {
      conf.unset(DataProviderFactory.PREFERRED_ADHOC_PROVIDER_NAME);
      conf.unset(DataProviderFactory.PREFERRED_MRSIMAGE_PROVIDER_NAME);
      conf.unset(DataProviderFactory.PREFERRED_VECTOR_PROVIDER_NAME);
    }
    else
    {
      conf.set(DataProviderFactory.PREFERRED_ADHOC_PROVIDER_NAME, confVal);
      conf.set(DataProviderFactory.PREFERRED_MRSIMAGE_PROVIDER_NAME, confVal);
      conf.set(DataProviderFactory.PREFERRED_VECTOR_PROVIDER_NAME, confVal);
    }
  }

  Properties mp = MrGeoProperties.getInstance();

//  oldMrGeoValues = new HashMap<>();
//
//  oldMrGeoValues.put(DataProviderFactory.PREFERRED_ADHOC_PROPERTYNAME,
//      mp.getProperty(DataProviderFactory.PREFERRED_ADHOC_PROPERTYNAME, null));
//  oldMrGeoValues.put(DataProviderFactory.PREFERRED_MRSIMAGE_PROPERTYNAME,
//      mp.getProperty(DataProviderFactory.PREFERRED_MRSIMAGE_PROPERTYNAME, null));
//  oldMrGeoValues.put(DataProviderFactory.PREFERRED_VECTOR_PROPERTYNAME,
//      mp.getProperty(DataProviderFactory.PREFERRED_VECTOR_PROPERTYNAME, null));
//  oldMrGeoValues.put(DataProviderFactory.PREFERRED_PROPERTYNAME,
//      mp.getProperty(DataProviderFactory.PREFERRED_PROPERTYNAME, null));

  if (mrgeoVal == null)
  {
    mp.remove(DataProviderFactory.PREFERRED_ADHOC_PROPERTYNAME);
    mp.remove(DataProviderFactory.PREFERRED_MRSIMAGE_PROPERTYNAME);
    mp.remove(DataProviderFactory.PREFERRED_VECTOR_PROPERTYNAME);
  }
  else
  {
    mp.setProperty(DataProviderFactory.PREFERRED_ADHOC_PROPERTYNAME, mrgeoVal);
    mp.setProperty(DataProviderFactory.PREFERRED_MRSIMAGE_PROPERTYNAME, mrgeoVal);
    mp.setProperty(DataProviderFactory.PREFERRED_VECTOR_PROPERTYNAME, mrgeoVal);
  }

  if (defMrgeoVal == null)
  {
    mp.remove(DataProviderFactory.PREFERRED_PROPERTYNAME);
  }
  else
  {
    mp.setProperty(DataProviderFactory.PREFERRED_PROPERTYNAME, defMrgeoVal);
  }

}
 
Example 18
Source File: TestOnFileUnorderedKVOutput.java    From incubator-tez with Apache License 2.0 4 votes vote down vote up
@Test
public void testGeneratedDataMovementEvent() throws Exception {

  OnFileUnorderedKVOutput kvOutput = new OnFileUnorderedKVOutputForTest();

  Configuration conf = new Configuration();
  conf.set(TezJobConfig.TEZ_RUNTIME_KEY_CLASS, Text.class.getName());
  conf.set(TezJobConfig.TEZ_RUNTIME_VALUE_CLASS, IntWritable.class.getName());

  int appAttemptNumber = 1;
  TezUmbilical tezUmbilical = null;
  String dagName = "currentDAG";
  String taskVertexName = "currentVertex";
  String destinationVertexName = "destinationVertex";
  TezDAGID dagID = TezDAGID.getInstance("2000", 1, 1);
  TezVertexID vertexID = TezVertexID.getInstance(dagID, 1);
  TezTaskID taskID = TezTaskID.getInstance(vertexID, 1);
  TezTaskAttemptID taskAttemptID = TezTaskAttemptID.getInstance(taskID, 1);
  TezCounters counters = new TezCounters();
  byte[] userPayload = TezUtils.createUserPayloadFromConf(conf);
  RuntimeTask runtimeTask = mock(RuntimeTask.class);
  
  int shufflePort = 2112;
  Map<String, String> auxEnv = new HashMap<String, String>();
  ByteBuffer bb = ByteBuffer.allocate(4);
  bb.putInt(shufflePort);
  bb.position(0);
  AuxiliaryServiceHelper.setServiceDataIntoEnv(ShuffleUtils.SHUFFLE_HANDLER_SERVICE_ID, bb, auxEnv);


  OutputDescriptor outputDescriptor = mock(OutputDescriptor.class);
  when(outputDescriptor.getClassName()).thenReturn("OutputDescriptor");

  TezOutputContext outputContext = new TezOutputContextImpl(conf, new String[] {workDir.toString()},
      appAttemptNumber, tezUmbilical, dagName, taskVertexName, destinationVertexName,
      taskAttemptID, counters, 0, userPayload, runtimeTask,
      null, auxEnv, new MemoryDistributor(1, 1, conf) , outputDescriptor);

  List<Event> events = null;

  events = kvOutput.initialize(outputContext);
  assertTrue(events != null && events.size() == 0);

  KeyValueWriter kvWriter = kvOutput.getWriter();
  List<KVPair> data = KVDataGen.generateTestData(true);
  for (KVPair kvp : data) {
    kvWriter.write(kvp.getKey(), kvp.getvalue());
  }

  events = kvOutput.close();
  assertTrue(events != null && events.size() == 1);
  DataMovementEvent dmEvent = (DataMovementEvent)events.get(0);

  assertEquals("Invalid source index", 0, dmEvent.getSourceIndex());

  DataMovementEventPayloadProto shufflePayload = DataMovementEventPayloadProto
      .parseFrom(dmEvent.getUserPayload());

  assertFalse(shufflePayload.hasEmptyPartitions());
  assertEquals(outputContext.getUniqueIdentifier(), shufflePayload.getPathComponent());
  assertEquals(shufflePort, shufflePayload.getPort());
  assertEquals("host", shufflePayload.getHost());
}
 
Example 19
Source File: EventInputFormatTest.java    From gemfirexd-oss with Apache License 2.0 4 votes vote down vote up
public void testNoSecureHdfsCheck() throws Exception {
  getConnection();
  Connection conn = startNetserverAndGetLocalNetConnection();
  
  
  Statement st = conn.createStatement();
  st.execute("create hdfsstore myhdfs namenode 'localhost' homedir '" + HDFS_DIR + "'  batchtimeinterval 5000 milliseconds");
  st.execute("create table app.mytab1 (col1 int primary key, col2 varchar(100)) persistent hdfsstore (myhdfs) BUCKETS 1");
  
  PreparedStatement ps = conn.prepareStatement("insert into mytab1 values (?, ?)");
  int NUM_ENTRIES = 20;
  for(int i = 0; i < NUM_ENTRIES; i++) {
    ps.setInt(1, i);
    ps.setString(2, "Value-" + System.nanoTime());
    ps.execute();
  }
  //Wait for data to get to HDFS...
  String qname = HDFSStoreFactoryImpl.getEventQueueName("/APP/MYTAB1");
  st.execute("CALL SYS.WAIT_FOR_SENDER_QUEUE_FLUSH('" + qname + "', 1, 0)");
  
  stopNetServer();
  FabricServiceManager.currentFabricServiceInstance().stop(new Properties());
  
  Configuration conf = new Configuration();
  FileSystem fs = FileSystem.get(conf);
  FileStatus[] list = fs.listStatus(new Path(HDFS_DIR + "/APP_MYTAB1/0/"));
  assertEquals(1, list.length);
  
  conf.set(RowInputFormat.INPUT_TABLE, "MYTAB1");
  conf.set(RowInputFormat.HOME_DIR, HDFS_DIR);
  conf.set("hadoop.security.authentication", "kerberos");
  
  JobConf job = new JobConf(conf);
  job.setBoolean(RowInputFormat.CHECKPOINT_MODE, false);
  RowInputFormat ipformat = new RowInputFormat();
  InputSplit[] splits = ipformat.getSplits(job, 2);
  assertEquals(1, splits.length);
  CombineFileSplit split = (CombineFileSplit) splits[0];
  assertEquals(1, split.getPaths().length);
  assertEquals(list[0].getPath().toString(), split.getPath(0).toString());
  assertEquals(0, split.getOffset(0));
  assertEquals(list[0].getLen(), split.getLength(0));
  
  RecordReader<Key, Row> rr = ipformat.getRecordReader(split, job, null);
  Key key = rr.createKey();
  Row value = rr.createValue();
  
  int count = 0;
  while (rr.next(key, value)) {
    assertEquals(count++, value.getRowAsResultSet().getInt("col1"));
  }
  
  assertEquals(20, count);
  
  TestUtil.shutDown();
}
 
Example 20
Source File: HalvadeConf.java    From halvade with GNU General Public License v3.0 4 votes vote down vote up
public static void setScratchTempDir(Configuration conf, String val) {
    if(!val.endsWith("/"))
        conf.set(scratchTempDirName, val + "/");
    else
        conf.set(scratchTempDirName, val);
}