Java Code Examples for org.apache.hadoop.fs.FileSystem.getConf()

The following are Jave code examples for showing how to use getConf() of the org.apache.hadoop.fs.FileSystem class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
Example 1
Project: kafka-connect-fs   File: SequenceFileReader.java   Source Code and License Vote up 6 votes
public SequenceFileReader(FileSystem fs, Path filePath, Map<String, Object> config) throws IOException {
    super(fs, filePath, new SeqToStruct(), config);

    this.reader = new SequenceFile.Reader(fs.getConf(),
            SequenceFile.Reader.file(filePath),
            SequenceFile.Reader.bufferSize(fs.getConf().getInt(FILE_READER_BUFFER_SIZE, DEFAULT_BUFFER_SIZE)));
    this.key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), fs.getConf());
    this.value = (Writable) ReflectionUtils.newInstance(reader.getValueClass(), fs.getConf());
    this.schema = SchemaBuilder.struct()
            .field(keyFieldName, getSchema(this.key))
            .field(valueFieldName, getSchema(this.value))
            .build();
    this.offset = new SeqOffset(0);
    this.recordIndex = this.hasNextIndex = -1;
    this.hasNext = false;
}
 
Example 2
Project: hadoop   File: TestDFSClientFailover.java   Source Code and License Vote up 6 votes
/**
 * Same test as above, but for FileContext.
 */
@Test
public void testFileContextDoesntDnsResolveLogicalURI() throws Exception {
  FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
  NameService spyNS = spyOnNameService();
  String logicalHost = fs.getUri().getHost();
  Configuration haClientConf = fs.getConf();
  
  FileContext fc = FileContext.getFileContext(haClientConf);
  Path root = new Path("/");
  fc.listStatus(root);
  fc.listStatus(fc.makeQualified(root));
  fc.getDefaultFileSystem().getCanonicalServiceName();

  // Ensure that the logical hostname was never resolved.
  Mockito.verify(spyNS, Mockito.never()).lookupAllHostAddr(Mockito.eq(logicalHost));
}
 
Example 3
Project: flume-release-1.7.0   File: TestDatasetSink.java   Source Code and License Vote up 5 votes
@Test
public void testMiniClusterStore() throws EventDeliveryException, IOException {
  // setup a minicluster
  MiniDFSCluster cluster = new MiniDFSCluster
      .Builder(new Configuration())
      .build();

  FileSystem dfs = cluster.getFileSystem();
  Configuration conf = dfs.getConf();

  URI hdfsUri = URI.create(
      "dataset:" + conf.get("fs.defaultFS") + "/tmp/repo" + DATASET_NAME);
  try {
    // create a repository and dataset in HDFS
    Datasets.create(hdfsUri, DESCRIPTOR);

    // update the config to use the HDFS repository
    config.put(DatasetSinkConstants.CONFIG_KITE_DATASET_URI, hdfsUri.toString());

    DatasetSink sink = sink(in, config);

    // run the sink
    sink.start();
    sink.process();
    sink.stop();

    Assert.assertEquals(
        Sets.newHashSet(expected),
        read(Datasets.load(hdfsUri)));
    Assert.assertEquals("Should have committed", 0, remaining(in));

  } finally {
    if (Datasets.exists(hdfsUri)) {
      Datasets.delete(hdfsUri);
    }
    cluster.shutdown();
  }
}
 
Example 4
Project: hadoop   File: DFSAdmin.java   Source Code and License Vote up 5 votes
/** Constructor */
public DFSAdminCommand(FileSystem fs) {
  super(fs.getConf());
  if (!(fs instanceof DistributedFileSystem)) {
    throw new IllegalArgumentException("FileSystem " + fs.getUri() + 
        " is not an HDFS file system");
  }
  this.dfs = (DistributedFileSystem)fs;
}
 
Example 5
Project: hadoop   File: TestHDFSTrash.java   Source Code and License Vote up 5 votes
@Test
public void testNonDefaultFS() throws IOException {
  FileSystem fs = cluster.getFileSystem();
  Configuration conf = fs.getConf();
  conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, fs.getUri().toString());
  TestTrash.trashNonDefaultFS(conf);
}
 
Example 6
Project: cloudup   File: S3ADiag.java   Source Code and License Vote up 4 votes
public int run(String[] args, PrintStream stream) throws Exception {
  out = stream;
  List<String> paths = parseArgs(args);
  if (paths.size() != 1) {
    errorln(USAGE);
    return E_USAGE;
  }
  println("Hadoop %s", getVersion());
  println("Compiled by %s on %s", getUser(), getDate());
  println("Compiled with protoc %s", getProtocVersion());
  println("From source with checksum %s", getSrcChecksum());


  Configuration conf = getConf();
  Path path = new Path(paths.get(0));
  FileSystem fs = path.getFileSystem(conf);

  println("Filesystem for %s is %s", path, fs);

  // examine the FS
  Configuration fsConf = fs.getConf();
  for (int i = 0; i < props.length; i++) {
    showProp(fsConf, (String) props[i][0], (Boolean) props[i][1]);
  }

  Path root = fs.makeQualified(new Path("/"));
  try (DurationInfo d = new DurationInfo(LOG,
      "Listing  %s", root)) {
    println("%s has %d entries", root, fs.listStatus(root).length);
  }

  String dirName = "dir-" + UUID.randomUUID();
  Path dir = new Path(root, dirName);
  try (DurationInfo d = new DurationInfo(LOG,
      "Creating a directory %s", dir)) {
    fs.mkdirs(dir);
  }
  try {
    Path file = new Path(dir, "file");
    try (DurationInfo d = new DurationInfo(LOG,
        "Creating a file %s", file)) {
      FSDataOutputStream data = fs.create(file, true);
      data.writeUTF(HELLO);
      data.close();
    }
    try (DurationInfo d = new DurationInfo(LOG,
        "Listing  %s", dir)) {
      fs.listFiles(dir, false);
    }

    try (DurationInfo d = new DurationInfo(LOG,
        "Reading a file %s", file)) {
      FSDataInputStream in = fs.open(file);
      String utf = in.readUTF();
      in.close();
      if (!HELLO.equals(utf)) {
        throw new IOException("Expected " + file + " to contain the text "
            + HELLO + " -but it has the text \"" + utf + "\"");
      }
    }
    try (DurationInfo d = new DurationInfo(LOG,
        "Deleting file %s", file)) {
      fs.delete(file, true);
    }
  } finally {
    try (DurationInfo d = new DurationInfo(LOG,
        "Deleting directory %s", dir)) {
      try {
        fs.delete(dir, true);
      } catch (Exception e) {
        LOG.warn("When deleting {}: ", dir, e);
      }
    }


  }


  // Validate parameters.
  return SUCCESS;
}
 
Example 7
Project: QDrill   File: BlockMapBuilder.java   Source Code and License Vote up 4 votes
public BlockMapBuilder(FileSystem fs, Collection<DrillbitEndpoint> endpoints) {
  this.fs = fs;
  this.codecFactory = new CompressionCodecFactory(fs.getConf());
  this.endPointMap = buildEndpointMap(endpoints);
}
 
Example 8
Project: hadoop   File: TestReplication.java   Source Code and License Vote up 4 votes
private void changeBlockLen(MiniDFSCluster cluster, int lenDelta)
    throws IOException, InterruptedException, TimeoutException {
  final Path fileName = new Path("/file1");
  final short REPLICATION_FACTOR = (short)1;
  final FileSystem fs = cluster.getFileSystem();
  final int fileLen = fs.getConf().getInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 512);
  DFSTestUtil.createFile(fs, fileName, fileLen, REPLICATION_FACTOR, 0);
  DFSTestUtil.waitReplication(fs, fileName, REPLICATION_FACTOR);

  ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);

  // Change the length of a replica
  for (int i=0; i<cluster.getDataNodes().size(); i++) {
    if (DFSTestUtil.changeReplicaLength(cluster, block, i, lenDelta)) {
      break;
    }
  }

  // increase the file's replication factor
  fs.setReplication(fileName, (short)(REPLICATION_FACTOR+1));

  // block replication triggers corrupt block detection
  DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost", 
      cluster.getNameNodePort()), fs.getConf());
  LocatedBlocks blocks = dfsClient.getNamenode().getBlockLocations(
      fileName.toString(), 0, fileLen);
  if (lenDelta < 0) { // replica truncated
  	while (!blocks.get(0).isCorrupt() || 
  			REPLICATION_FACTOR != blocks.get(0).getLocations().length) {
  		Thread.sleep(100);
  		blocks = dfsClient.getNamenode().getBlockLocations(
  				fileName.toString(), 0, fileLen);
  	}
  } else { // no corruption detected; block replicated
  	while (REPLICATION_FACTOR+1 != blocks.get(0).getLocations().length) {
  		Thread.sleep(100);
  		blocks = dfsClient.getNamenode().getBlockLocations(
  				fileName.toString(), 0, fileLen);
  	}
  }
  fs.delete(fileName, true);
}
 
Example 9
Project: dremio-oss   File: BlockMapBuilder.java   Source Code and License Vote up 4 votes
public BlockMapBuilder(FileSystem fs, Collection<NodeEndpoint> endpoints) {
  this.fs = fs;
  this.codecFactory = new CompressionCodecFactory(fs.getConf());
  this.endPointMap = buildEndpointMap(endpoints);
}