Java Code Examples for org.apache.hadoop.conf.Configuration.setStrings()

The following are Jave code examples for showing how to use setStrings() of the org.apache.hadoop.conf.Configuration class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
Example 1
Project: ditb   File: TestWALObserver.java   Source Code and License Vote up 6 votes
@BeforeClass
public static void setupBeforeClass() throws Exception {
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setStrings(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY,
      SampleRegionWALObserver.class.getName(), SampleRegionWALObserver.Legacy.class.getName());
  conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
      SampleRegionWALObserver.class.getName());
  conf.setBoolean("dfs.support.append", true);
  conf.setInt("dfs.client.block.recovery.retries", 2);

  TEST_UTIL.startMiniCluster(1);
  Path hbaseRootDir = TEST_UTIL.getDFSCluster().getFileSystem()
      .makeQualified(new Path("/hbase"));
  LOG.info("hbase.rootdir=" + hbaseRootDir);
  FSUtils.setRootDir(conf, hbaseRootDir);
}
 
Example 2
Project: hadoop   File: TestDelegationTokenForProxyUser.java   Source Code and License Vote up 6 votes
private static void configureSuperUserIPAddresses(Configuration conf,
    String superUserShortName) throws IOException {
  ArrayList<String> ipList = new ArrayList<String>();
  Enumeration<NetworkInterface> netInterfaceList = NetworkInterface
      .getNetworkInterfaces();
  while (netInterfaceList.hasMoreElements()) {
    NetworkInterface inf = netInterfaceList.nextElement();
    Enumeration<InetAddress> addrList = inf.getInetAddresses();
    while (addrList.hasMoreElements()) {
      InetAddress addr = addrList.nextElement();
      ipList.add(addr.getHostAddress());
    }
  }
  StringBuilder builder = new StringBuilder();
  for (String ip : ipList) {
    builder.append(ip);
    builder.append(',');
  }
  builder.append("127.0.1.1,");
  builder.append(InetAddress.getLocalHost().getCanonicalHostName());
  LOG.info("Local Ip addresses: " + builder.toString());
  conf.setStrings(DefaultImpersonationProvider.getTestProvider().
          getProxySuperuserIpConfKey(superUserShortName),
      builder.toString());
}
 
Example 3
Project: hadoop   File: TestContainerLaunch.java   Source Code and License Vote up 5 votes
@Test
public void testErrorLogOnContainerExitWithCustomPattern() throws Exception {
  Configuration conf = new Configuration();
  conf.setStrings(YarnConfiguration.NM_CONTAINER_STDERR_PATTERN,
      "{*stderr*,*log*}");
  verifyTailErrorLogOnContainerExit(conf, "/error.log", false);
}
 
Example 4
Project: hadoop   File: TestWeightedRoundRobinMultiplexer.java   Source Code and License Vote up 5 votes
@Test
public void testCustomPattern() {
  // 1x0 1x1
  Configuration conf = new Configuration();
  conf.setStrings("test.custom." + IPC_CALLQUEUE_WRRMUX_WEIGHTS_KEY,
    "1", "1");

  mux = new WeightedRoundRobinMultiplexer(2, "test.custom", conf);
  assertEquals(mux.getAndAdvanceCurrentIndex(), 0);
  assertEquals(mux.getAndAdvanceCurrentIndex(), 1);
  assertEquals(mux.getAndAdvanceCurrentIndex(), 0);
  assertEquals(mux.getAndAdvanceCurrentIndex(), 1);

  // 1x0 3x1 2x2
  conf.setStrings("test.custom." + IPC_CALLQUEUE_WRRMUX_WEIGHTS_KEY,
    "1", "3", "2");

  mux = new WeightedRoundRobinMultiplexer(3, "test.custom", conf);

  for(int i = 0; i < 5; i++) {
    assertEquals(mux.getAndAdvanceCurrentIndex(), 0);
    assertEquals(mux.getAndAdvanceCurrentIndex(), 1);
    assertEquals(mux.getAndAdvanceCurrentIndex(), 1);
    assertEquals(mux.getAndAdvanceCurrentIndex(), 1);
    assertEquals(mux.getAndAdvanceCurrentIndex(), 2);
    assertEquals(mux.getAndAdvanceCurrentIndex(), 2);
  } // Ensure pattern repeats

}
 
Example 5
Project: hadoop-oss   File: TestDoAsEffectiveUser.java   Source Code and License Vote up 5 votes
@Test(timeout=4000)
public void testRealUserAuthorizationSuccess() throws IOException {
  final Configuration conf = new Configuration();
  configureSuperUserIPAddresses(conf, REAL_USER_SHORT_NAME);
  conf.setStrings(DefaultImpersonationProvider.getTestProvider().
          getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME),
      "group1");
  Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
      .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
      .setNumHandlers(2).setVerbose(false).build();

  refreshConf(conf);
  try {
    server.start();

    UserGroupInformation realUserUgi = UserGroupInformation
        .createRemoteUser(REAL_USER_NAME);
    checkRemoteUgi(server, realUserUgi, conf);

    UserGroupInformation proxyUserUgi = UserGroupInformation
        .createProxyUserForTesting(PROXY_USER_NAME, realUserUgi, GROUP_NAMES);
    checkRemoteUgi(server, proxyUserUgi, conf);
  } catch (Exception e) {
    e.printStackTrace();
    Assert.fail();
  } finally {
    server.stop();
    if (proxy != null) {
      RPC.stopProxy(proxy);
    }
  }
}
 
Example 6
Project: ditb   File: TestRegionObserverBypass.java   Source Code and License Vote up 5 votes
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  Configuration conf = HBaseConfiguration.create();
  conf.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
      TestCoprocessor.class.getName());
  util = new HBaseTestingUtility(conf);
  util.startMiniCluster();
}
 
Example 7
Project: hadoop-oss   File: TestWeightedRoundRobinMultiplexer.java   Source Code and License Vote up 5 votes
@Test(expected=IllegalArgumentException.class)
public void testInstantiateIllegalMux() {
  Configuration conf = new Configuration();
  conf.setStrings("namespace." + IPC_CALLQUEUE_WRRMUX_WEIGHTS_KEY,
    "1", "2", "3");

  // ask for 3 weights with 2 queues
  mux = new WeightedRoundRobinMultiplexer(2, "namespace", conf);
}
 
Example 8
Project: hadoop-oss   File: TestWeightedRoundRobinMultiplexer.java   Source Code and License Vote up 5 votes
@Test
public void testLegalInstantiation() {
  Configuration conf = new Configuration();
  conf.setStrings("namespace." + IPC_CALLQUEUE_WRRMUX_WEIGHTS_KEY,
    "1", "2", "3");

  // ask for 3 weights with 3 queues
  mux = new WeightedRoundRobinMultiplexer(3, "namespace.", conf);
}
 
Example 9
Project: hadoop-oss   File: MiniRPCBenchmark.java   Source Code and License Vote up 5 votes
/**
 * Run MiniBenchmark using delegation token authentication.
 * 
 * @param conf - configuration
 * @param count - connect this many times
 * @param keytabKey - key for keytab file in the configuration
 * @param userNameKey - key for user name in the configuration
 * @return average time to connect
 * @throws IOException
 */
long runMiniBenchmarkWithDelegationToken(Configuration conf,
                                         int count,
                                         String keytabKey,
                                         String userNameKey)
throws IOException {
  // get login information
  String user = System.getProperty("user.name");
  if(userNameKey != null)
    user = conf.get(userNameKey, user);
  String keytabFile = null;
  if(keytabKey != null)
    keytabFile = conf.get(keytabKey, keytabFile);
  MiniServer miniServer = null;
  UserGroupInformation.setConfiguration(conf);
  String shortUserName =
    UserGroupInformation.createRemoteUser(user).getShortUserName();
  try {
    conf.setStrings(DefaultImpersonationProvider.getTestProvider().
            getProxySuperuserGroupConfKey(shortUserName), GROUP_NAME_1);
    configureSuperUserIPAddresses(conf, shortUserName);
    // start the server
    miniServer = new MiniServer(conf, user, keytabFile);
    InetSocketAddress addr = miniServer.getAddress();

    connectToServerAndGetDelegationToken(conf, addr);
    // connect to the server count times
    setLoggingLevel(logLevel);
    long elapsed = 0L;
    for(int idx = 0; idx < count; idx ++) {
      elapsed += connectToServerUsingDelegationToken(conf, addr);
    }
    return elapsed;
  } finally {
    if(miniServer != null) miniServer.stop();
  }
}
 
Example 10
Project: hadoop   File: NameNode.java   Source Code and License Vote up 5 votes
/**
 * Clone the supplied configuration but remove the shared edits dirs.
 *
 * @param conf Supplies the original configuration.
 * @return Cloned configuration without the shared edit dirs.
 * @throws IOException on failure to generate the configuration.
 */
private static Configuration getConfigurationWithoutSharedEdits(
    Configuration conf)
    throws IOException {
  List<URI> editsDirs = FSNamesystem.getNamespaceEditsDirs(conf, false);
  String editsDirsString = Joiner.on(",").join(editsDirs);

  Configuration confWithoutShared = new Configuration(conf);
  confWithoutShared.unset(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY);
  confWithoutShared.setStrings(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
      editsDirsString);
  return confWithoutShared;
}
 
Example 11
Project: hadoop   File: TestDoAsEffectiveUser.java   Source Code and License Vote up 5 votes
@Test(timeout=4000)
public void testRealUserAuthorizationSuccess() throws IOException {
  final Configuration conf = new Configuration();
  configureSuperUserIPAddresses(conf, REAL_USER_SHORT_NAME);
  conf.setStrings(DefaultImpersonationProvider.getTestProvider().
          getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME),
      "group1");
  Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
      .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
      .setNumHandlers(2).setVerbose(false).build();

  refreshConf(conf);
  try {
    server.start();

    UserGroupInformation realUserUgi = UserGroupInformation
        .createRemoteUser(REAL_USER_NAME);
    checkRemoteUgi(server, realUserUgi, conf);

    UserGroupInformation proxyUserUgi = UserGroupInformation
        .createProxyUserForTesting(PROXY_USER_NAME, realUserUgi, GROUP_NAMES);
    checkRemoteUgi(server, proxyUserUgi, conf);
  } catch (Exception e) {
    e.printStackTrace();
    Assert.fail();
  } finally {
    server.stop();
    if (proxy != null) {
      RPC.stopProxy(proxy);
    }
  }
}
 
Example 12
Project: ditb   File: TestFSUtils.java   Source Code and License Vote up 5 votes
@Test
public void testPermMask() throws Exception {

  Configuration conf = HBaseConfiguration.create();
  FileSystem fs = FileSystem.get(conf);

  // default fs permission
  FsPermission defaultFsPerm = FSUtils.getFilePermissions(fs, conf,
      HConstants.DATA_FILE_UMASK_KEY);
  // 'hbase.data.umask.enable' is false. We will get default fs permission.
  assertEquals(FsPermission.getFileDefault(), defaultFsPerm);

  conf.setBoolean(HConstants.ENABLE_DATA_FILE_UMASK, true);
  // first check that we don't crash if we don't have perms set
  FsPermission defaultStartPerm = FSUtils.getFilePermissions(fs, conf,
      HConstants.DATA_FILE_UMASK_KEY);
  // default 'hbase.data.umask'is 000, and this umask will be used when
  // 'hbase.data.umask.enable' is true.
  // Therefore we will not get the real fs default in this case.
  // Instead we will get the starting point FULL_RWX_PERMISSIONS
  assertEquals(new FsPermission(FSUtils.FULL_RWX_PERMISSIONS), defaultStartPerm);

  conf.setStrings(HConstants.DATA_FILE_UMASK_KEY, "077");
  // now check that we get the right perms
  FsPermission filePerm = FSUtils.getFilePermissions(fs, conf,
      HConstants.DATA_FILE_UMASK_KEY);
  assertEquals(new FsPermission("700"), filePerm);

  // then that the correct file is created
  Path p = new Path("target" + File.separator + UUID.randomUUID().toString());
  try {
    FSDataOutputStream out = FSUtils.create(conf, fs, p, filePerm, null);
    out.close();
    FileStatus stat = fs.getFileStatus(p);
    assertEquals(new FsPermission("700"), stat.getPermission());
    // and then cleanup
  } finally {
    fs.delete(p, true);
  }
}
 
Example 13
Project: ditb   File: TableMapReduceUtil.java   Source Code and License Vote up 5 votes
/**
 * Use this before submitting a TableMap job. It will appropriately set up
 * the job.
 *
 * @param table  The table name to read from.
 * @param scan  The scan instance with the columns, time range etc.
 * @param mapper  The mapper class to use.
 * @param outputKeyClass  The class of the output key.
 * @param outputValueClass  The class of the output value.
 * @param job  The current job to adjust.  Make sure the passed job is
 * carrying all necessary HBase configuration.
 * @param addDependencyJars upload HBase jars and jars for any of the configured
 *           job classes via the distributed cache (tmpjars).
 * @param initCredentials whether to initialize hbase auth credentials for the job
 * @param inputFormatClass the input format
 * @throws IOException When setting up the details fails.
 */
public static void initTableMapperJob(String table, Scan scan,
    Class<? extends TableMapper> mapper,
    Class<?> outputKeyClass,
    Class<?> outputValueClass, Job job,
    boolean addDependencyJars, boolean initCredentials,
    Class<? extends InputFormat> inputFormatClass)
throws IOException {
  job.setInputFormatClass(inputFormatClass);
  if (outputValueClass != null) job.setMapOutputValueClass(outputValueClass);
  if (outputKeyClass != null) job.setMapOutputKeyClass(outputKeyClass);
  job.setMapperClass(mapper);
  if (Put.class.equals(outputValueClass)) {
    job.setCombinerClass(PutCombiner.class);
  }
  Configuration conf = job.getConfiguration();
  HBaseConfiguration.merge(conf, HBaseConfiguration.create(conf));
  conf.set(TableInputFormat.INPUT_TABLE, table);
  conf.set(TableInputFormat.SCAN, convertScanToString(scan));
  conf.setStrings("io.serializations", conf.get("io.serializations"),
      MutationSerialization.class.getName(), ResultSerialization.class.getName(),
      KeyValueSerialization.class.getName());
  if (addDependencyJars) {
    addDependencyJars(job);
  }
  if (initCredentials) {
    initCredentials(job);
  }
}
 
Example 14
Project: ditb   File: SnapshotManager.java   Source Code and License Vote up 4 votes
/**
 * Called at startup, to verify if snapshot operation is supported, and to avoid
 * starting the master if there're snapshots present but the cleaners needed are missing.
 * Otherwise we can end up with snapshot data loss.
 * @param conf The {@link Configuration} object to use
 * @param mfs The MasterFileSystem to use
 * @throws IOException in case of file-system operation failure
 * @throws UnsupportedOperationException in case cleaners are missing and
 *         there're snapshot in the system
 */
private void checkSnapshotSupport(final Configuration conf, final MasterFileSystem mfs)
    throws IOException, UnsupportedOperationException {
  // Verify if snapshot is disabled by the user
  String enabled = conf.get(HBASE_SNAPSHOT_ENABLED);
  boolean snapshotEnabled = conf.getBoolean(HBASE_SNAPSHOT_ENABLED, false);
  boolean userDisabled = (enabled != null && enabled.trim().length() > 0 && !snapshotEnabled);

  // Extract cleaners from conf
  Set<String> hfileCleaners = new HashSet<String>();
  String[] cleaners = conf.getStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS);
  if (cleaners != null) Collections.addAll(hfileCleaners, cleaners);

  Set<String> logCleaners = new HashSet<String>();
  cleaners = conf.getStrings(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS);
  if (cleaners != null) Collections.addAll(logCleaners, cleaners);

  // check if an older version of snapshot directory was present
  Path oldSnapshotDir = new Path(mfs.getRootDir(), HConstants.OLD_SNAPSHOT_DIR_NAME);
  FileSystem fs = mfs.getFileSystem();
  List<SnapshotDescription> ss = getCompletedSnapshots(new Path(rootDir, oldSnapshotDir));
  if (ss != null && !ss.isEmpty()) {
    LOG.error("Snapshots from an earlier release were found under: " + oldSnapshotDir);
    LOG.error("Please rename the directory as " + HConstants.SNAPSHOT_DIR_NAME);
  }

  // If the user has enabled the snapshot, we force the cleaners to be present
  // otherwise we still need to check if cleaners are enabled or not and verify
  // that there're no snapshot in the .snapshot folder.
  if (snapshotEnabled) {
    // Inject snapshot cleaners, if snapshot.enable is true
    hfileCleaners.add(SnapshotHFileCleaner.class.getName());
    hfileCleaners.add(HFileLinkCleaner.class.getName());
    logCleaners.add(SnapshotLogCleaner.class.getName());

    // Set cleaners conf
    conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,
      hfileCleaners.toArray(new String[hfileCleaners.size()]));
    conf.setStrings(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS,
      logCleaners.toArray(new String[logCleaners.size()]));
  } else {
    // Verify if cleaners are present
    snapshotEnabled = logCleaners.contains(SnapshotLogCleaner.class.getName()) &&
      hfileCleaners.contains(SnapshotHFileCleaner.class.getName()) &&
      hfileCleaners.contains(HFileLinkCleaner.class.getName());

    // Warn if the cleaners are enabled but the snapshot.enabled property is false/not set.
    if (snapshotEnabled) {
      LOG.warn("Snapshot log and hfile cleaners are present in the configuration, " +
        "but the '" + HBASE_SNAPSHOT_ENABLED + "' property " +
        (userDisabled ? "is set to 'false'." : "is not set."));
    }
  }

  // Mark snapshot feature as enabled if cleaners are present and user has not disabled it.
  this.isSnapshotSupported = snapshotEnabled && !userDisabled;

  // If cleaners are not enabled, verify that there're no snapshot in the .snapshot folder
  // otherwise we end up with snapshot data loss.
  if (!snapshotEnabled) {
    LOG.info("Snapshot feature is not enabled, missing log and hfile cleaners.");
    Path snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(mfs.getRootDir());
    if (fs.exists(snapshotDir)) {
      FileStatus[] snapshots = FSUtils.listStatus(fs, snapshotDir,
        new SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter(fs));
      if (snapshots != null) {
        LOG.error("Snapshots are present, but cleaners are not enabled.");
        checkSnapshotSupport();
      }
    }
  }
}
 
Example 15
Project: hadoop-oss   File: TestDoAsEffectiveUser.java   Source Code and License Vote up 4 votes
@Test
public void testRealUserGroupAuthorizationFailure() throws IOException {
  final Configuration conf = new Configuration();
  configureSuperUserIPAddresses(conf, REAL_USER_SHORT_NAME);
  conf.setStrings(DefaultImpersonationProvider.getTestProvider().
          getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME),
      "group3");
  Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
      .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
      .setNumHandlers(2).setVerbose(false).build();
  
  refreshConf(conf);

  try {
    server.start();

    final InetSocketAddress addr = NetUtils.getConnectAddress(server);

    UserGroupInformation realUserUgi = UserGroupInformation
        .createRemoteUser(REAL_USER_NAME);

    UserGroupInformation proxyUserUgi = UserGroupInformation
        .createProxyUserForTesting(PROXY_USER_NAME, realUserUgi, GROUP_NAMES);
    String retVal = proxyUserUgi
        .doAs(new PrivilegedExceptionAction<String>() {
          @Override
          public String run() throws IOException {
            proxy = RPC.getProxy(TestProtocol.class,
                TestProtocol.versionID, addr, conf);
            String ret = proxy.aMethod();
            return ret;
          }
        });

    Assert.fail("The RPC must have failed " + retVal);
  } catch (Exception e) {
    e.printStackTrace();
  } finally {
    server.stop();
    if (proxy != null) {
      RPC.stopProxy(proxy);
    }
  }
}
 
Example 16
Project: ditb   File: TableMapReduceUtil.java   Source Code and License Vote up 4 votes
/**
 * Use this before submitting a TableReduce job. It will
 * appropriately set up the JobConf.
 *
 * @param table  The output table.
 * @param reducer  The reducer class to use.
 * @param job  The current job to adjust.  Make sure the passed job is
 * carrying all necessary HBase configuration.
 * @param partitioner  Partitioner to use. Pass <code>null</code> to use
 * default partitioner.
 * @param quorumAddress Distant cluster to write to; default is null for
 * output to the cluster that is designated in <code>hbase-site.xml</code>.
 * Set this String to the zookeeper ensemble of an alternate remote cluster
 * when you would have the reduce write a cluster that is other than the
 * default; e.g. copying tables between clusters, the source would be
 * designated by <code>hbase-site.xml</code> and this param would have the
 * ensemble address of the remote cluster.  The format to pass is particular.
 * Pass <code> &lt;hbase.zookeeper.quorum&gt;:&lt;
 *             hbase.zookeeper.client.port&gt;:&lt;zookeeper.znode.parent&gt;
 * </code> such as <code>server,server2,server3:2181:/hbase</code>.
 * @param serverClass redefined hbase.regionserver.class
 * @param serverImpl redefined hbase.regionserver.impl
 * @param addDependencyJars upload HBase jars and jars for any of the configured
 *           job classes via the distributed cache (tmpjars).
 * @throws IOException When determining the region count fails.
 */
public static void initTableReducerJob(String table,
  Class<? extends TableReducer> reducer, Job job,
  Class partitioner, String quorumAddress, String serverClass,
  String serverImpl, boolean addDependencyJars) throws IOException {

  Configuration conf = job.getConfiguration();
  HBaseConfiguration.merge(conf, HBaseConfiguration.create(conf));
  job.setOutputFormatClass(TableOutputFormat.class);
  if (reducer != null) job.setReducerClass(reducer);
  conf.set(TableOutputFormat.OUTPUT_TABLE, table);
  conf.setStrings("io.serializations", conf.get("io.serializations"),
      MutationSerialization.class.getName(), ResultSerialization.class.getName());
  // If passed a quorum/ensemble address, pass it on to TableOutputFormat.
  if (quorumAddress != null) {
    // Calling this will validate the format
    ZKConfig.validateClusterKey(quorumAddress);
    conf.set(TableOutputFormat.QUORUM_ADDRESS,quorumAddress);
  }
  if (serverClass != null && serverImpl != null) {
    conf.set(TableOutputFormat.REGION_SERVER_CLASS, serverClass);
    conf.set(TableOutputFormat.REGION_SERVER_IMPL, serverImpl);
  }
  job.setOutputKeyClass(ImmutableBytesWritable.class);
  job.setOutputValueClass(Writable.class);
  if (partitioner == HRegionPartitioner.class) {
    job.setPartitionerClass(HRegionPartitioner.class);
    int regions = MetaTableAccessor.getRegionCount(conf, TableName.valueOf(table));
    if (job.getNumReduceTasks() > regions) {
      job.setNumReduceTasks(regions);
    }
  } else if (partitioner != null) {
    job.setPartitionerClass(partitioner);
  }

  if (addDependencyJars) {
    addDependencyJars(job);
  }

  initCredentials(job);
}
 
Example 17
Project: aliyun-maxcompute-data-collectors   File: HdfsOdpsImportJob.java   Source Code and License Vote up 4 votes
@Override
protected void configureOutputFormat(Job job, String tableName,
                                     String tableClassName)
    throws ClassNotFoundException {
  job.setOutputFormatClass(getOutputFormatClass());
  Configuration conf = job.getConfiguration();
  conf.setClass("sqoop.output.delegate.field.map.processor.class",
      OdpsUploadProcessor.class,
      FieldMapProcessor.class);

  conf.setStrings(OdpsConstants.INPUT_COL_NAMES, options.getColumns());

  String odpsTableName = options.getOdpsTable();
  if (odpsTableName == null) {
    odpsTableName = tableName;
  }
  conf.set(OdpsConstants.TABLE_NAME, odpsTableName);
  conf.set(OdpsConstants.ACCESS_ID, options.getOdpsAccessID());
  conf.set(OdpsConstants.ACCESS_KEY, options.getOdpsAccessKey());
  conf.set(OdpsConstants.ENDPOINT, options.getOdpsEndPoint());

  String tunnelEndPoint = options.getOdpsTunnelEndPoint();
  if (tunnelEndPoint != null) {
    conf.set(OdpsConstants.TUNNEL_ENDPOINT,
        options.getOdpsTunnelEndPoint());
  }

  conf.set(OdpsConstants.PROJECT, options.getOdpsProject());

  String partitionKey = options.getOdpsPartitionKey();
  String partitionValue = options.getOdpsPartitionValue();
  if (partitionKey != null && partitionValue != null) {
    conf.set(OdpsConstants.PARTITION_KEY, partitionKey);
    conf.set(OdpsConstants.PARTITION_VALUE, partitionValue);
  }
  conf.setBoolean(OdpsConstants.CREATE_TABLE,
      options.isOdpsCreateTable());
  String dateFormat = options.getOdpsInputDateFormat();
  if (dateFormat != null) {
    conf.set(OdpsConstants.DATE_FORMAT, dateFormat);
  }
  conf.setInt(OdpsConstants.RETRY_COUNT, options.getOdpsRetryCount());
  conf.setInt(OdpsConstants.BATCH_SIZE, options.getOdpsBatchSize());
  conf.setBoolean(OdpsConstants.USE_COMPRESS_IN_UPLOAD, options.isOdpsUseCompressInUpload());

  job.getConfiguration().set(ExportJobBase.SQOOP_EXPORT_TABLE_CLASS_KEY, tableClassName);
}
 
Example 18
Project: aliyun-maxcompute-data-collectors   File: MySQLDumpImportJob.java   Source Code and License Vote up 4 votes
/**
 * Configure the inputformat to use for the job.
 */
protected void configureInputFormat(Job job, String tableName,
    String tableClassName, String splitByCol)
    throws ClassNotFoundException, IOException {

  if (null == tableName) {
      LOG.error(
          "mysqldump-based import cannot support free-form query imports.");
      LOG.error("Do not use --direct and --query together for MySQL.");
      throw new IOException("null tableName for MySQLDumpImportJob.");
  }

  ConnManager mgr = getContext().getConnManager();
  String username = options.getUsername();
  if (null == username || username.length() == 0) {
    DBConfiguration.configureDB(job.getConfiguration(),
        mgr.getDriverClass(), options.getConnectString(),
        options.getConnectionParams());
  } else {
    DBConfiguration.configureDB(job.getConfiguration(),
        mgr.getDriverClass(), options.getConnectString(), username,
        options.getPassword(), options.getConnectionParams());
  }

  String [] colNames = options.getColumns();
  if (null == colNames) {
    colNames = mgr.getColumnNames(tableName);
  }

  String [] sqlColNames = null;
  if (null != colNames) {
    sqlColNames = new String[colNames.length];
    for (int i = 0; i < colNames.length; i++) {
      sqlColNames[i] = mgr.escapeColName(colNames[i]);
    }
  }

  // It's ok if the where clause is null in DBInputFormat.setInput.
  String whereClause = options.getWhereClause();

  // We can't set the class properly in here, because we may not have the
  // jar loaded in this JVM. So we start by calling setInput() with
  // DBWritable and then overriding the string manually.

  // Note that mysqldump also does *not* want a quoted table name.
  DataDrivenDBInputFormat.setInput(job, DBWritable.class,
      tableName, whereClause,
      mgr.escapeColName(splitByCol), sqlColNames);

  Configuration conf = job.getConfiguration();
  conf.setInt(MySQLUtils.OUTPUT_FIELD_DELIM_KEY,
      options.getOutputFieldDelim());
  conf.setInt(MySQLUtils.OUTPUT_RECORD_DELIM_KEY,
      options.getOutputRecordDelim());
  conf.setInt(MySQLUtils.OUTPUT_ENCLOSED_BY_KEY,
      options.getOutputEnclosedBy());
  conf.setInt(MySQLUtils.OUTPUT_ESCAPED_BY_KEY,
      options.getOutputEscapedBy());
  conf.setBoolean(MySQLUtils.OUTPUT_ENCLOSE_REQUIRED_KEY,
      options.isOutputEncloseRequired());
  String [] extraArgs = options.getExtraArgs();
  if (null != extraArgs) {
    conf.setStrings(MySQLUtils.EXTRA_ARGS_KEY, extraArgs);
  }

  LOG.debug("Using InputFormat: " + inputFormatClass);
  job.setInputFormatClass(getInputFormatClass());
}
 
Example 19
Project: ditb   File: TestZooKeeperTableArchiveClient.java   Source Code and License Vote up 4 votes
private HFileCleaner setupAndCreateCleaner(Configuration conf, FileSystem fs, Path archiveDir,
    Stoppable stop) {
  conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,
    LongTermArchivingHFileCleaner.class.getCanonicalName());
  return new HFileCleaner(1000, stop, conf, fs, archiveDir);
}
 
Example 20
Project: hadoop   File: TestDockerContainerRuntime.java   Source Code and License Vote up 4 votes
@Before
public void setup() {
  String tmpPath = new StringBuffer(System.getProperty("test.build.data"))
      .append('/').append("hadoop.tmp.dir").toString();

  conf = new Configuration();
  conf.set("hadoop.tmp.dir", tmpPath);

  mockExecutor = Mockito
      .mock(PrivilegedOperationExecutor.class);
  mockCGroupsHandler = Mockito.mock(CGroupsHandler.class);
  containerId = "container_id";
  container = mock(Container.class);
  cId = mock(ContainerId.class);
  context = mock(ContainerLaunchContext.class);
  env = new HashMap<String, String>();
  image = "busybox:latest";

  env.put(DockerLinuxContainerRuntime.ENV_DOCKER_CONTAINER_IMAGE, image);
  when(container.getContainerId()).thenReturn(cId);
  when(cId.toString()).thenReturn(containerId);
  when(container.getLaunchContext()).thenReturn(context);
  when(context.getEnvironment()).thenReturn(env);
  when(container.getUser()).thenReturn(submittingUser);

  runAsUser = "run_as_user";
  user = "user";
  appId = "app_id";
  containerIdStr = containerId;
  containerWorkDir = new Path("/test_container_work_dir");
  nmPrivateContainerScriptPath = new Path("/test_script_path");
  nmPrivateTokensPath = new Path("/test_private_tokens_path");
  pidFilePath = new Path("/test_pid_file_path");
  localDirs = new ArrayList<>();
  logDirs = new ArrayList<>();
  filecacheDirs = new ArrayList<>();
  resourcesOptions = "cgroups=none";
  userLocalDirs = new ArrayList<>();
  containerLocalDirs = new ArrayList<>();
  containerLogDirs = new ArrayList<>();
  localizedResources = new HashMap<>();

  localDirs.add("/test_local_dir");
  logDirs.add("/test_log_dir");
  filecacheDirs.add("/test_filecache_dir");
  userLocalDirs.add("/test_user_local_dir");
  containerLocalDirs.add("/test_container_local_dir");
  containerLogDirs.add("/test_container_log_dir");
  localizedResources.put(new Path("/test_local_dir/test_resource_file"),
      Collections.singletonList("test_dir/test_resource_file"));

  testCapabilities = new String[] {"NET_BIND_SERVICE", "SYS_CHROOT"};
  conf.setStrings(YarnConfiguration.NM_DOCKER_CONTAINER_CAPABILITIES,
      testCapabilities);

  builder = new ContainerRuntimeContext
      .Builder(container);

  builder.setExecutionAttribute(RUN_AS_USER, runAsUser)
      .setExecutionAttribute(USER, user)
      .setExecutionAttribute(APPID, appId)
      .setExecutionAttribute(CONTAINER_ID_STR, containerIdStr)
      .setExecutionAttribute(CONTAINER_WORK_DIR, containerWorkDir)
      .setExecutionAttribute(NM_PRIVATE_CONTAINER_SCRIPT_PATH,
          nmPrivateContainerScriptPath)
      .setExecutionAttribute(NM_PRIVATE_TOKENS_PATH, nmPrivateTokensPath)
      .setExecutionAttribute(PID_FILE_PATH, pidFilePath)
      .setExecutionAttribute(LOCAL_DIRS, localDirs)
      .setExecutionAttribute(LOG_DIRS, logDirs)
      .setExecutionAttribute(FILECACHE_DIRS, filecacheDirs)
      .setExecutionAttribute(USER_LOCAL_DIRS, userLocalDirs)
      .setExecutionAttribute(CONTAINER_LOCAL_DIRS, containerLocalDirs)
      .setExecutionAttribute(CONTAINER_LOG_DIRS, containerLogDirs)
      .setExecutionAttribute(LOCALIZED_RESOURCES, localizedResources)
      .setExecutionAttribute(RESOURCES_OPTIONS, resourcesOptions);
}