org.apache.hadoop.conf.Configuration Java Examples

The following examples show how to use org.apache.hadoop.conf.Configuration. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: DistributedCacheUtilImpl.java    From pentaho-hadoop-shims with Apache License 2.0 9 votes vote down vote up
/**
 * Add an file path to the current set of classpath entries. It adds the file to cache as well.
 * <p/>
 * This is copied from Hadoop 0.20.2 o.a.h.filecache.DistributedCache so we can inject the correct path separator for
 * the environment the cluster is executing in. See {@link #getClusterPathSeparator()}.
 *
 * @param file Path of the file to be added
 * @param conf Configuration that contains the classpath setting
 */
public void addFileToClassPath( Path file, Configuration conf )
  throws IOException {

  // Save off the classloader, to make sure the version info can be loaded successfully from the hadoop-common JAR
  ClassLoader cl = Thread.currentThread().getContextClassLoader();
  Thread.currentThread().setContextClassLoader( VersionInfo.class.getClassLoader() );

  // Restore the original classloader
  Thread.currentThread().setContextClassLoader( cl );

  String classpath = conf.get( "mapred.job.classpath.files" );
  conf.set( "mapred.job.classpath.files", classpath == null ? file.toString()
    : classpath + getClusterPathSeparator() + file.toString() );
  FileSystem fs = FileSystem.get( conf );
  URI uri = fs.makeQualified( file ).toUri();

  org.apache.hadoop.mapreduce.filecache.DistributedCache.addCacheFile( uri, conf );
}
 
Example #2
Source File: StramAppLauncher.java    From Bats with Apache License 2.0 6 votes vote down vote up
private void setTokenRefreshCredentials(LogicalPlan dag, Configuration conf) throws IOException
{
  String principal = conf.get(StramClientUtils.TOKEN_REFRESH_PRINCIPAL, StramUserLogin.getPrincipal());
  String keytabPath = conf.get(StramClientUtils.TOKEN_REFRESH_KEYTAB, conf.get(StramClientUtils.KEY_TAB_FILE));
  if (keytabPath == null) {
    String keytab = StramUserLogin.getKeytab();
    if (keytab != null) {
      Path localKeyTabPath = new Path(keytab);
      try (FileSystem fs = StramClientUtils.newFileSystemInstance(conf)) {
        Path destPath = new Path(StramClientUtils.getApexDFSRootDir(fs, conf), localKeyTabPath.getName());
        if (!fs.exists(destPath)) {
          fs.copyFromLocalFile(false, false, localKeyTabPath, destPath);
        }
        keytabPath = destPath.toString();
      }
    }
  }
  LOG.debug("User principal is {}, keytab is {}", principal, keytabPath);
  if ((principal != null) && (keytabPath != null)) {
    dag.setAttribute(LogicalPlan.PRINCIPAL, principal);
    dag.setAttribute(LogicalPlan.KEY_TAB_FILE, keytabPath);
  } else {
    LOG.warn("Credentials for refreshing tokens not available, application may not be able to run indefinitely");
  }
}
 
Example #3
Source File: ThriftBytesWriteSupport.java    From parquet-mr with Apache License 2.0 6 votes vote down vote up
@Override
public WriteContext init(Configuration configuration) {
  if (this.protocolFactory == null) {
    try {
      this.protocolFactory = getTProtocolFactoryClass(configuration).newInstance();
    } catch (InstantiationException | IllegalAccessException e) {
      throw new RuntimeException(e);
    }
  }
  if (thriftClass != null) {
    TBaseWriteSupport.setThriftClass(configuration, thriftClass);
  } else {
    thriftClass = TBaseWriteSupport.getThriftClass(configuration);
  }
  this.thriftStruct = ThriftSchemaConverter.toStructType(thriftClass);
  this.schema = ThriftSchemaConverter.convertWithoutProjection(thriftStruct);
  if (buffered) {
    readToWrite = new BufferedProtocolReadToWrite(thriftStruct, errorHandler);
  } else {
    readToWrite = new ProtocolReadToWrite();
  }
  return thriftWriteSupport.init(configuration);
}
 
Example #4
Source File: ShardedTableMapFileTest.java    From datawave with Apache License 2.0 6 votes vote down vote up
@Test(expected = IOException.class)
public void testGetAllShardedTableMapFilesWithoutPath() throws Exception {
    Configuration conf = new Configuration();
    File tempWorkDir = Files.createTempDir();
    conf.set(FileSystem.FS_DEFAULT_NAME_KEY, tempWorkDir.toURI().toString());
    FileSystem fs = FileSystem.get(tempWorkDir.toURI(), conf);
    fs.setWorkingDirectory(new Path(tempWorkDir.toString()));
    Path workDir = fs.makeQualified(new Path("work"));
    conf.set(ShardedTableMapFile.SPLIT_WORK_DIR, workDir.toString());
    
    conf.set(ShardedDataTypeHandler.SHARDED_TNAMES, "shard_ingest_unit_test_table_1,shard_ingest_unit_test_table_2,shard_ingest_unit_test_table_3");
    
    String[] tableNames = new String[] {TABLE_NAME};
    conf.set(ShardedTableMapFile.TABLE_NAMES, StringUtils.join(",", tableNames));
    ShardedTableMapFile.setupFile(conf);
    ShardedTableMapFile.getShardIdToLocations(conf, TABLE_NAME);
}
 
Example #5
Source File: Groups.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Refresh all user-to-groups mappings.
 * @param conf
 */
public void refresh(Configuration conf) {

  LOG.info("reload staticUserToGroupsMap");
  staticUserToGroupsMap = parseStaticMapping(conf);

  LOG.info("clearing userToGroupsMap cache");
  try {
    impl.cacheGroupsRefresh();
  } catch (IOException e) {
    LOG.warn("Error refreshing groups cache", e);
  }
  cache.invalidateAll();
  if(isNegativeCacheEnabled()) {
    negativeCache.clear();
  }
}
 
Example #6
Source File: TestBlocksRead.java    From hbase with Apache License 2.0 6 votes vote down vote up
/**
 * Callers must afterward call {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)}
 */
private HRegion initHRegion(byte[] tableName, String callingMethod, Configuration conf,
    String family, BlockCache blockCache) throws IOException {
  TableDescriptorBuilder builder =
      TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName));
  for (int i = 0; i < BLOOM_TYPE.length; i++) {
    BloomType bloomType = BLOOM_TYPE[i];
    builder.setColumnFamily(
        ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(family + "_" + bloomType))
            .setBlocksize(1).setBloomFilterType(bloomType).build());
  }
  RegionInfo info = RegionInfoBuilder.newBuilder(TableName.valueOf(tableName)).build();
  Path path = new Path(DIR + callingMethod);
  if (blockCache != null) {
    return HBaseTestingUtility.createRegionAndWAL(info, path, conf, builder.build(), blockCache);
  } else {
    return HBaseTestingUtility.createRegionAndWAL(info, path, conf, builder.build());
  }
}
 
Example #7
Source File: TestHsWebServicesJobs.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Override
protected void configureServlets() {

  appContext = new MockHistoryContext(0, 1, 2, 1, false);
  webApp = mock(HsWebApp.class);
  when(webApp.name()).thenReturn("hsmockwebapp");

  bind(JAXBContextResolver.class);
  bind(HsWebServices.class);
  bind(GenericExceptionHandler.class);
  bind(WebApp.class).toInstance(webApp);
  bind(AppContext.class).toInstance(appContext);
  bind(HistoryContext.class).toInstance(appContext);
  bind(Configuration.class).toInstance(conf);

  serve("/*").with(GuiceContainer.class);
}
 
Example #8
Source File: JobHistorySpoutTest.java    From eagle with Apache License 2.0 6 votes vote down vote up
private void mockHdfs() throws Exception {
    PowerMockito.mockStatic(HDFSUtil.class);
    hdfs = mock(FileSystem.class);
    when(HDFSUtil.getFileSystem(any(Configuration.class))).thenReturn(hdfs);
    FileStatus fileDirStatus = new FileStatus(100l, true, 3, 1000l, new Date().getTime(), new Path("/user/history/done/2016/12/09/000508"));
    when(hdfs.listStatus(any(Path.class))).thenReturn(new FileStatus[] {fileDirStatus});
    FileStatus filePartitionStatus = new FileStatus(100l, false, 3, 1000l, new Date().getTime(), new Path("/user/history/done/2016/12/09/000508/job_1479206441898_508949-1481299030929-testhistory.jhist"));
    when(hdfs.listStatus(any(Path.class), any(PathFilter.class))).thenReturn(new FileStatus[] {filePartitionStatus});
    Path historyFilePath = mock(Path.class);
    Path historyConfPath = mock(Path.class);
    PowerMockito.whenNew(Path.class).withArguments("/mr-history/done/2016/12/12/000508/job_1479206441898_508949-1481299030929-testhistory.jhist").thenReturn(historyFilePath);
    PowerMockito.whenNew(Path.class).withArguments("/mr-history/done/2016/12/12/000508/job_1479206441898_508949_conf.xml").thenReturn(historyConfPath);

    when((InputStream) hdfs.open(historyFilePath)).thenReturn(this.getClass().getResourceAsStream("job_1479206441898_508949-1481299030929-testhistory.jhist"));
    when((InputStream) hdfs.open(historyConfPath)).thenReturn(this.getClass().getResourceAsStream("job_1479206441898_508949_conf.xml"));
}
 
Example #9
Source File: AbstractMRNewApiSaveTest.java    From elasticsearch-hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testSaveWithIngest() throws Exception {
    EsAssume.versionOnOrAfter(EsMajorVersion.V_5_X, "Ingest Supported in 5.x and above only");

    Configuration conf = createConf();

    RestUtils.ExtendedRestClient client = new RestUtils.ExtendedRestClient();
    String prefix = "mrnewapi";
    String pipeline = "{\"description\":\"Test Pipeline\",\"processors\":[{\"set\":{\"field\":\"pipeTEST\",\"value\":true,\"override\":true}}]}";
    client.put("/_ingest/pipeline/" + prefix + "-pipeline", StringUtils.toUTF(pipeline));
    client.close();

    conf.set(ConfigurationOptions.ES_RESOURCE, resource("mrnewapi-ingested", "data", clusterInfo.getMajorVersion()));
    conf.set(ConfigurationOptions.ES_INGEST_PIPELINE, "mrnewapi-pipeline");
    conf.set(ConfigurationOptions.ES_NODES_INGEST_ONLY, "true");

    runJob(conf);
}
 
Example #10
Source File: ReduceTask.java    From RDFS with Apache License 2.0 6 votes vote down vote up
public SkippingReduceValuesIterator(RawKeyValueIterator in,
    RawComparator<KEY> comparator, Class<KEY> keyClass,
    Class<VALUE> valClass, Configuration conf, TaskReporter reporter,
    TaskUmbilicalProtocol umbilical) throws IOException {
  super(in, comparator, keyClass, valClass, conf, reporter);
  this.umbilical = umbilical;
  this.skipGroupCounter =
    reporter.getCounter(Counter.REDUCE_SKIPPED_GROUPS);
  this.skipRecCounter =
    reporter.getCounter(Counter.REDUCE_SKIPPED_RECORDS);
  this.toWriteSkipRecs = toWriteSkipRecs() &&
    SkipBadRecords.getSkipOutputPath(conf)!=null;
  this.keyClass = keyClass;
  this.valClass = valClass;
  this.reporter = reporter;
  skipIt = getSkipRanges().skipRangeIterator();
  mayBeSkip();
}
 
Example #11
Source File: AbstractHoplog.java    From gemfirexd-oss with Apache License 2.0 6 votes vote down vote up
/**
 * 
 * @param path
 * @param conf
 * @param logger
 * @param version - is being used only for testing. Should be passed as null for other purposes. 
 * @return SequenceFile.Writer 
 * @throws IOException
 */
public static SequenceFile.Writer getSequenceFileWriter(Path path, 
  Configuration conf, LogWriterI18n logger, Version version) throws IOException {
  Option optPath = SequenceFile.Writer.file(path);
  Option optKey = SequenceFile.Writer.keyClass(BytesWritable.class);
  Option optVal = SequenceFile.Writer.valueClass(BytesWritable.class);
  Option optCom = withCompression(logger);
  logger.fine("Started creating hoplog " + path);
  
  if (version == null)
    version = Version.CURRENT;
  //Create a metadata option with the gemfire version, for future versioning
  //of the key and value format
  SequenceFile.Metadata metadata = new SequenceFile.Metadata();
  metadata.set(new Text(Meta.GEMFIRE_VERSION.name()), new Text(String.valueOf(version.ordinal())));
  Option optMeta = SequenceFile.Writer.metadata(metadata);
  
  SequenceFile.Writer writer = SequenceFile.createWriter(conf, optPath, optKey, optVal, optCom, optMeta);
  
  return writer;
}
 
Example #12
Source File: HttpServer.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/** Get an array of FilterConfiguration specified in the conf */
private static FilterInitializer[] getFilterInitializers(Configuration conf) {
  if (conf == null) {
    return null;
  }

  Class<?>[] classes = conf.getClasses(FILTER_INITIALIZER_PROPERTY);
  if (classes == null) {
    return null;
  }

  FilterInitializer[] initializers = new FilterInitializer[classes.length];
  for(int i = 0; i < classes.length; i++) {
    initializers[i] = (FilterInitializer)ReflectionUtils.newInstance(
        classes[i], conf);
  }
  return initializers;
}
 
Example #13
Source File: LinkCountHDFS.java    From marklogic-contentpump with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws Exception {
    Configuration conf = new Configuration();
    if (args.length < 2) {
        System.err.println("Usage: LinkCountHDFS inputDir outputDir");
        System.exit(2);
    }
    String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
    
    Job job = Job.getInstance(conf, "link count hdfs");
    job.setJarByClass(LinkCountHDFS.class);
    job.setInputFormatClass(HDFSInputFormat.class);
    job.setMapperClass(RefMapper.class);
    job.setMapOutputKeyClass(Text.class);
    job.setMapOutputValueClass(IntWritable.class);
    
    job.setReducerClass(IntSumReducer.class);
    job.setOutputFormatClass(TextOutputFormat.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(IntWritable.class);
    HDFSInputFormat.setInputPaths(job, new Path(otherArgs[0]));
    FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));

    System.exit(job.waitForCompletion(true) ? 0 : 1);
}
 
Example #14
Source File: IFile.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Construct an IFile Reader.
 * 
 * @param conf Configuration File 
 * @param in   The input stream
 * @param length Length of the data in the stream, including the checksum
 *               bytes.
 * @param codec codec
 * @param readsCounter Counter for records read from disk
 * @throws IOException
 */
public Reader(Configuration conf, FSDataInputStream in, long length, 
              CompressionCodec codec,
              Counters.Counter readsCounter) throws IOException {
  readRecordsCounter = readsCounter;
  checksumIn = new IFileInputStream(in,length, conf);
  if (codec != null) {
    decompressor = CodecPool.getDecompressor(codec);
    if (decompressor != null) {
      this.in = codec.createInputStream(checksumIn, decompressor);
    } else {
      LOG.warn("Could not obtain decompressor from CodecPool");
      this.in = checksumIn;
    }
  } else {
    this.in = checksumIn;
  }
  this.dataIn = new DataInputStream(this.in);
  this.fileLength = length;
  
  if (conf != null) {
    bufferSize = conf.getInt("io.file.buffer.size", DEFAULT_BUFFER_SIZE);
  }
}
 
Example #15
Source File: TestTypedMap.java    From spork with Apache License 2.0 5 votes vote down vote up
@Test
public void testSimpleMapCast() throws IOException, ParserException {
    PigServer pig = new PigServer(ExecType.LOCAL, new Properties());
    String[] input = {
            "[key#1,key2#2]",
            "[key#2]",
    };

    Util.createInputFile(FileSystem.getLocal(new Configuration()), tmpDirName + "/testSimpleMapCast", input);

    String query =
        "a = load '" + tmpDirName + "/testSimpleMapCast' as (m);" +
        "b = foreach a generate ([int])m;";
    Util.registerMultiLineQuery(pig, query);
    Schema sch = pig.dumpSchema("b");
    assertEquals("Checking expected schema",sch.toString(), "{m: map[int]}");
    Iterator<Tuple> it = pig.openIterator("b");

    Assert.assertTrue(it.hasNext());
    Tuple t = it.next();
    Assert.assertTrue(t.size()==1);
    Assert.assertTrue(t.get(0) instanceof Map);
    Assert.assertTrue(((Map)t.get(0)).containsKey("key"));
    Assert.assertTrue(((Map)t.get(0)).containsKey("key2"));
    Assert.assertTrue(((Map)t.get(0)).get("key") instanceof Integer);
    Assert.assertTrue(((Map)t.get(0)).get("key").toString().equals("1"));
    Assert.assertTrue(((Map)t.get(0)).get("key2") instanceof Integer);
    Assert.assertTrue(((Map)t.get(0)).get("key2").toString().equals("2"));

    Assert.assertTrue(it.hasNext());
    t = it.next();
    Assert.assertTrue(((Map)t.get(0)).containsKey("key"));
    Assert.assertTrue(((Map)t.get(0)).get("key") instanceof Integer);
    Assert.assertTrue(((Map)t.get(0)).get("key").toString().equals("2"));

    Assert.assertFalse(it.hasNext());
}
 
Example #16
Source File: CompressWriterFactoryTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private void testCompressByName(String codec, Configuration conf) throws Exception {
	CompressWriterFactory<String> writer = CompressWriters.forExtractor(new DefaultExtractor<String>())
		.withHadoopCompression(codec, conf);
	List<String> lines = Arrays.asList("line1", "line2", "line3");

	File directory = prepareCompressedFile(writer, lines);

	validateResults(directory, lines, new CompressionCodecFactory(conf).getCodecByName(codec));
}
 
Example #17
Source File: SegmentPreprocessingJob.java    From incubator-pinot with Apache License 2.0 5 votes vote down vote up
/**
 * Helper method that returns avro reader for the given avro file.
 * If file name ends in 'gz' then returns the GZIP version, otherwise gives the regular reader.
 *
 * @param avroFile File to read
 * @return Avro reader for the file.
 * @throws IOException exception when accessing to IO
 */
protected DataFileStream<GenericRecord> getAvroReader(Path avroFile)
    throws IOException {
  FileSystem fs = FileSystem.get(new Configuration());
  if (avroFile.getName().endsWith("gz")) {
    return new DataFileStream<>(new GZIPInputStream(fs.open(avroFile)), new GenericDatumReader<>());
  } else {
    return new DataFileStream<>(fs.open(avroFile), new GenericDatumReader<>());
  }
}
 
Example #18
Source File: GoogleHadoopFileSystemIntegrationTest.java    From hadoop-connectors with Apache License 2.0 5 votes vote down vote up
@Test
public void testInitializePath_failure_bucketNotSpecified() throws Exception {
  List<String> invalidPaths = Arrays.asList("gs:/", "gs:/foo", "gs:/foo/bar", "gs:///");
  for (String path : invalidPaths) {
    URI uri = new URI(path);
    try (GoogleHadoopFileSystem testGhfs = createInMemoryGoogleHadoopFileSystem()) {
      IllegalArgumentException e =
          assertThrows(
              "Path '" + path + "' should be invalid",
              IllegalArgumentException.class,
              () -> testGhfs.initialize(uri, new Configuration()));
      assertThat(e).hasMessageThat().startsWith("No bucket specified in GCS URI:");
    }
  }
}
 
Example #19
Source File: WindowedMergeOperatorTestApplication.java    From attic-apex-malhar with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception
{
  LocalMode lma = LocalMode.newInstance();
  Configuration conf = new Configuration(false);
  lma.prepareDAG(new WindowedMergeOperatorTestApplication(), conf);
  LocalMode.Controller lc = lma.getController();
  lc.run(20000);
}
 
Example #20
Source File: ParquetFileTest.java    From parquet-mr with Apache License 2.0 5 votes vote down vote up
private void createTestParquetFile() throws IOException {
  File file = parquetFile();
  Path fsPath = new Path(file.getPath());
  Configuration conf = new Configuration();

  MessageType schema = createSchema();
  SimpleGroupFactory fact = new SimpleGroupFactory(schema);
  GroupWriteSupport.setSchema(schema, conf);

  try (
    ParquetWriter<Group> writer = new ParquetWriter<>(
      fsPath,
      new GroupWriteSupport(),
      CompressionCodecName.UNCOMPRESSED,
      1024,
      1024,
      512,
      true,
      false,
      ParquetProperties.WriterVersion.PARQUET_2_0,
      conf)) {
    for (int i = 0; i < 10; i++) {
      final byte[] bytes = new byte[12];
      ThreadLocalRandom.current().nextBytes(bytes);

      writer.write(fact.newGroup()
       .append(INT32_FIELD, 32 + i)
       .append(INT64_FIELD, 64L + i)
       .append(FLOAT_FIELD, 1.0f + i)
       .append(DOUBLE_FIELD, 2.0d + i)
       .append(BINARY_FIELD, Binary.fromString(COLORS[i % COLORS.length]))
       .append(FIXED_LEN_BYTE_ARRAY_FIELD,
         Binary.fromConstantByteArray(bytes)));
    }
  }
}
 
Example #21
Source File: NullScanner.java    From tajo with Apache License 2.0 5 votes vote down vote up
public NullScanner(Configuration conf, Schema schema, TableMeta meta, Fragment fragment) {
  this.conf = conf;
  this.meta = meta;
  this.schema = schema;
  this.fragment = fragment;
  this.tableStats = new TableStats();
  this.columnNum = this.schema.size();
}
 
Example #22
Source File: RefreshHFilesClient.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * Constructor with Conf object
 *
 * @param cfg the {@link Configuration} object to use
 */
public RefreshHFilesClient(Configuration cfg) {
  try {
    this.connection = ConnectionFactory.createConnection(cfg);
  } catch (IOException e) {
    throw new RuntimeException(e);
  }
}
 
Example #23
Source File: ExtractDictionaryFromGlobalMapper.java    From kylin-on-parquet-v2 with Apache License 2.0 5 votes vote down vote up
@Override
protected void doSetup(Context context) throws IOException {
    Configuration conf = context.getConfiguration();
    bindCurrentConfiguration(conf);
    config = AbstractHadoopJob.loadKylinPropsAndMetadata();

    cubeName = conf.get(BatchConstants.CFG_CUBE_NAME);
    cube = CubeManager.getInstance(config).getCube(cubeName);
    cubeDesc = cube.getDescriptor();
    cubeSeg = cube.getSegmentById(conf.get(BatchConstants.CFG_CUBE_SEGMENT_ID));
    flatTableInputFormat = MRUtil.getBatchCubingInputSide(cubeSeg).getFlatTableInputFormat();

    intermediateTableDesc = new CubeJoinedFlatTableEnrich(EngineFactory.getJoinedFlatTableDesc(cubeSeg), cubeDesc);

    globalColumns = cubeDesc.getAllGlobalDictColumns();
    globalColumnIndex = new int[globalColumns.size()];
    globalColumnValues = Lists.newArrayListWithExpectedSize(globalColumns.size());

    for (int i = 0; i < globalColumns.size(); i++) {
        TblColRef colRef = globalColumns.get(i);
        int columnIndexOnFlatTbl = intermediateTableDesc.getColumnIndex(colRef);
        globalColumnIndex[i] = columnIndexOnFlatTbl;
        globalColumnValues.add(Sets.<String> newHashSet());
    }

    splitKey = DictionaryGetterUtil.getInputSplitSignature(cubeSeg, context.getInputSplit());
}
 
Example #24
Source File: HistoryLogUtils.java    From spydra with Apache License 2.0 5 votes vote down vote up
/**
 * Dumps the full job logs for a particular application to stdout.
 *
 * @param applicationId application to dump logs for
 */
public static void dumpFullLogs(Configuration cfg, ApplicationId applicationId) {
  LogCLIHelpers logCliHelpers = new LogCLIHelpers();
  // TODO: Add the proper base dir settings etc...

  logCliHelpers.setConf(cfg);
  try {
    logCliHelpers.dumpAllContainersLogs(
        applicationId, cfg.get(SPYDRA_HISTORY_USERNAME_PROPERTY), System.out);
  } catch (IOException e) {
    logger.error("Failed dumping log files for application " + applicationId.toString(), e);
  }
}
 
Example #25
Source File: HadoopArchives.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private static void checkPaths(Configuration conf, List<Path> paths) throws
IOException {
  for (Path p : paths) {
    FileSystem fs = p.getFileSystem(conf);
    if (!fs.exists(p)) {
      throw new FileNotFoundException("Source " + p + " does not exist.");
    }
  }
}
 
Example #26
Source File: TestSnapshotListing.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Before
public void setUp() throws Exception {
  conf = new Configuration();
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
      .build();
  cluster.waitActive();
  fsn = cluster.getNamesystem();
  hdfs = cluster.getFileSystem();
  hdfs.mkdirs(dir);
}
 
Example #27
Source File: WALFile.java    From streamx with Apache License 2.0 5 votes vote down vote up
public Reader(Configuration conf, Option... opts) throws IOException {
  // Look up the options, these are null if not set
  FileOption fileOpt = Options.getOption(FileOption.class, opts);
  InputStreamOption streamOpt =
      Options.getOption(InputStreamOption.class, opts);
  StartOption startOpt = Options.getOption(StartOption.class, opts);
  LengthOption lenOpt = Options.getOption(LengthOption.class, opts);
  BufferSizeOption bufOpt = Options.getOption(BufferSizeOption.class, opts);
  OnlyHeaderOption headerOnly =
      Options.getOption(OnlyHeaderOption.class, opts);
  // check for consistency
  if ((fileOpt == null) == (streamOpt == null)) {
    throw new
        IllegalArgumentException("File or stream option must be specified");
  }
  if (fileOpt == null && bufOpt != null) {
    throw new IllegalArgumentException("buffer size can only be set when" +
                                       " a file is specified.");
  }
  // figure out the real values
  Path filename = null;
  FSDataInputStream file;
  final long len;
  if (fileOpt != null) {
    filename = fileOpt.getValue();
    FileSystem fs = filename.getFileSystem(conf);
    int bufSize = bufOpt == null ? getBufferSize(conf) : bufOpt.getValue();
    len = null == lenOpt
          ? fs.getFileStatus(filename).getLen()
          : lenOpt.getValue();
    file = openFile(fs, filename, bufSize, len);
  } else {
    len = null == lenOpt ? Long.MAX_VALUE : lenOpt.getValue();
    file = streamOpt.getValue();
  }
  long start = startOpt == null ? 0 : startOpt.getValue();
  // really set up
  initialize(filename, file, start, len, conf, headerOnly != null);
}
 
Example #28
Source File: PhoenixMRJobSubmitter.java    From phoenix with Apache License 2.0 5 votes vote down vote up
private void enableKeyTabSecurity() throws IOException {

        final String PRINCIPAL = "principal";
        final String KEYTAB = "keyTab";
        // Login with the credentials from the keytab to retrieve the TGT . The
        // renewal of the TGT happens in a Zookeeper thread
        String principal = null;
        String keyTabPath = null;
        AppConfigurationEntry entries[] =
                javax.security.auth.login.Configuration.getConfiguration()
                        .getAppConfigurationEntry("Client");
        LOGGER.info("Security - Fetched App Login Configuration Entries");
        if (entries != null) {
            for (AppConfigurationEntry entry : entries) {
                if (entry.getOptions().get(PRINCIPAL) != null) {
                    principal = (String) entry.getOptions().get(PRINCIPAL);
                }
                if (entry.getOptions().get(KEYTAB) != null) {
                    keyTabPath = (String) entry.getOptions().get(KEYTAB);
                }
            }
            LOGGER.info("Security - Got Principal = " + principal + "");
            if (principal != null && keyTabPath != null) {
                LOGGER.info("Security - Retreiving the TGT with principal:" + principal
                        + " and keytab:" + keyTabPath);
                UserGroupInformation.loginUserFromKeytab(principal, keyTabPath);
                LOGGER.info("Security - Retrieved TGT with principal:" + principal + " and keytab:"
                        + keyTabPath);
            }
        }
    }
 
Example #29
Source File: LimitedKeyPartitionerTest.java    From datawave with Apache License 2.0 5 votes vote down vote up
@Test
public void testHigherMaxThanReducers() throws IllegalAccessException, InstantiationException {
    Configuration conf = new Configuration();
    conf.setInt(PartitionLimiter.MAX_PARTITIONS_PROPERTY, NUM_REDUCERS + 1);
    
    LimitedKeyPartitioner partitioner = LimitedKeyPartitioner.class.newInstance();
    partitioner.setConf(conf);
    assertPartitionsUnderMax(partitioner, NUM_REDUCERS);
}
 
Example #30
Source File: ResourceLimitTest.java    From mr4c with Apache License 2.0 5 votes vote down vote up
@Test public void testConfigurationUpdate() {
	Configuration conf = new Configuration(false);
	m_limit.applyTo(conf);
	ResourceLimit expected = new ResourceLimit(m_resource, m_value, LimitSource.CONFIG);
	ResourceLimit limit = ResourceLimit.extractFrom(m_resource, conf);
	assertEquals(expected, limit);
}