Java Code Examples for org.apache.hadoop.fs.FileSystem#initialize()

The following examples show how to use org.apache.hadoop.fs.FileSystem#initialize() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestFileSystemApplicationHistoryStore.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private void initAndStartStore(final FileSystem fs) throws IOException,
    URISyntaxException {
  Configuration conf = new Configuration();
  fs.initialize(new URI("/"), conf);
  fsWorkingPath =
      new Path("target",
        TestFileSystemApplicationHistoryStore.class.getSimpleName());
  fs.delete(fsWorkingPath, true);
  conf.set(YarnConfiguration.FS_APPLICATION_HISTORY_STORE_URI,
    fsWorkingPath.toString());
  store = new FileSystemApplicationHistoryStore() {
    @Override
    protected FileSystem getFileSystem(Path path, Configuration conf) {
      return fs;
    }
  };
  store.init(conf);
  store.start();
}
 
Example 2
Source File: TestFileSystemApplicationHistoryStore.java    From big-c with Apache License 2.0 6 votes vote down vote up
private void initAndStartStore(final FileSystem fs) throws IOException,
    URISyntaxException {
  Configuration conf = new Configuration();
  fs.initialize(new URI("/"), conf);
  fsWorkingPath =
      new Path("target",
        TestFileSystemApplicationHistoryStore.class.getSimpleName());
  fs.delete(fsWorkingPath, true);
  conf.set(YarnConfiguration.FS_APPLICATION_HISTORY_STORE_URI,
    fsWorkingPath.toString());
  store = new FileSystemApplicationHistoryStore() {
    @Override
    protected FileSystem getFileSystem(Path path, Configuration conf) {
      return fs;
    }
  };
  store.init(conf);
  store.start();
}
 
Example 3
Source File: SwiftPublicContainerTest.java    From stocator with Apache License 2.0 6 votes vote down vote up
public void accessPublicSwiftContainerTest() throws Exception {

    FileSystem fs = new ObjectStoreFileSystem();
    Configuration conf = new Configuration();
    String uriString = conf.get(PUBLIC_URI_PROPERTY);
    Assume.assumeNotNull(uriString);
    URI publicContainerURI = new URI(uriString);
    System.out.println("publicContainerURI1: " + publicContainerURI);
    fs.initialize(publicContainerURI, conf);
    FileStatus objectFS = null;
    try {
      objectFS = fs.getFileStatus(new Path(publicContainerURI));
    } finally {
      Assert.assertNotNull("Unable to access public object.", objectFS);
    }
  }
 
Example 4
Source File: StreamingSwiftTest.java    From stocator with Apache License 2.0 6 votes vote down vote up
public void accessPublicSwiftContainerWithSpaceTest() throws Exception {
  FileSystem fs = new ObjectStoreFileSystem();
  Configuration conf = new Configuration();
  String uriString = conf.get("fs.swift2d.test.uri");
  Assume.assumeNotNull(uriString);
  // adding suffix with space to the container name
  String scheme = "swift2d";
  String host = getHost(URI.create(uriString));
  // String origContainerName = getContainerName(host);
  // String newContainerName = origContainerName + " t";
  // uriString = uriString.replace(origContainerName, newContainerName);
  // use URI ctor that encodes authority according to the rules specified
  // in RFC 2396, section 5.2, step 7
  URI publicContainerURI = new URI(scheme, getHost(URI.create(uriString)), "/", null, null);
  fs.initialize(publicContainerURI, conf);
  FileStatus objectFS = null;
  try {
    objectFS = fs.getFileStatus(new Path(publicContainerURI));
  } catch (Exception e) {
    e.printStackTrace();
    Assert.assertNotNull("Unable to access public object ", objectFS);
  }
}
 
Example 5
Source File: TestAzureFileSystemErrorConditions.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testAccessContainerWithWrongVersion() throws Exception {
  AzureNativeFileSystemStore store = new AzureNativeFileSystemStore();
  MockStorageInterface mockStorage = new MockStorageInterface();
  store.setAzureStorageInteractionLayer(mockStorage);
  FileSystem fs = new NativeAzureFileSystem(store);
  try {
    Configuration conf = new Configuration();
    AzureBlobStorageTestAccount.setMockAccountKey(conf);
    HashMap<String, String> metadata = new HashMap<String, String>();
    metadata.put(AzureNativeFileSystemStore.VERSION_METADATA_KEY,
        "2090-04-05"); // It's from the future!
    mockStorage.addPreExistingContainer(
        AzureBlobStorageTestAccount.getMockContainerUri(), metadata);

    boolean passed = false;
    try {
      fs.initialize(new URI(AzureBlobStorageTestAccount.MOCK_WASB_URI), conf);
      fs.listStatus(new Path("/"));
      passed = true;
    } catch (AzureException ex) {
      assertTrue("Unexpected exception message: " + ex,
          ex.getMessage().contains("unsupported version: 2090-04-05."));
    }
    assertFalse("Should've thrown an exception because of the wrong version.",
        passed);
  } finally {
    fs.close();
  }
}
 
Example 6
Source File: PseudoDistributedFileSystem.java    From dremio-oss with Apache License 2.0 5 votes vote down vote up
static FileSystem newLocalFileSystem(Configuration conf, boolean isLocalAccessAllowed) throws IOException {
  // we'll grab our own local file system so append is supported (rather than the checksum local file system).
  final FileSystem localFS = isLocalAccessAllowed ? new PDFSLocalFileSystem() : new NoopFileSystem();
  localFS.initialize(localFS.getUri(), conf);

  return localFS;
}
 
Example 7
Source File: TestRemoteReadRequestChain.java    From rubix with Apache License 2.0 5 votes vote down vote up
@BeforeMethod
public void setup()
    throws IOException
{
  // Populate File
  DataGen.populateFile(backendFileName);

  FileSystem localFileSystem = new RawLocalFileSystem();
  Path backendFilePath = new Path(backendFileName);
  localFileSystem.initialize(backendFilePath.toUri(), new Configuration());
  fsDataInputStream = localFileSystem.open(backendFilePath);

  remoteReadRequestChain = new RemoteReadRequestChain(fsDataInputStream, localFileName);
}
 
Example 8
Source File: TestCachingInputStream.java    From rubix with Apache License 2.0 5 votes vote down vote up
private CachingInputStream createCachingStream(Configuration conf)
    throws IOException
{
  FileSystem localFileSystem = new RawLocalFileSystem();
  Path backendFilePath = new Path(backendFileName);
  localFileSystem.initialize(backendFilePath.toUri(), new Configuration());
  CacheConfig.setBlockSize(conf, blockSize);

  // This should be after server comes up else client could not be created
  return new CachingInputStream(backendPath, conf,
      new CachingFileSystemStats(), ClusterType.TEST_CLUSTER_MANAGER,
      new BookKeeperFactory(), localFileSystem,
      CacheConfig.getBlockSize(conf), null);
}
 
Example 9
Source File: TestAzureFileSystemErrorConditions.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testAccessContainerWithWrongVersion() throws Exception {
  AzureNativeFileSystemStore store = new AzureNativeFileSystemStore();
  MockStorageInterface mockStorage = new MockStorageInterface();
  store.setAzureStorageInteractionLayer(mockStorage);
  FileSystem fs = new NativeAzureFileSystem(store);
  try {
    Configuration conf = new Configuration();
    AzureBlobStorageTestAccount.setMockAccountKey(conf);
    HashMap<String, String> metadata = new HashMap<String, String>();
    metadata.put(AzureNativeFileSystemStore.VERSION_METADATA_KEY,
        "2090-04-05"); // It's from the future!
    mockStorage.addPreExistingContainer(
        AzureBlobStorageTestAccount.getMockContainerUri(), metadata);

    boolean passed = false;
    try {
      fs.initialize(new URI(AzureBlobStorageTestAccount.MOCK_WASB_URI), conf);
      fs.listStatus(new Path("/"));
      passed = true;
    } catch (AzureException ex) {
      assertTrue("Unexpected exception message: " + ex,
          ex.getMessage().contains("unsupported version: 2090-04-05."));
    }
    assertFalse("Should've thrown an exception because of the wrong version.",
        passed);
  } finally {
    fs.close();
  }
}
 
Example 10
Source File: TestFileSystems.java    From incubator-tajo with Apache License 2.0 5 votes vote down vote up
public TestFileSystems(FileSystem fs) throws IOException {
  conf = new TajoConf();

  if(fs instanceof S3FileSystem){
    conf.set(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, "10");
    fs.initialize(URI.create(fs.getScheme() + ":///"), conf);
  }
  this.fs = fs;
  sm = StorageManagerFactory.getStorageManager(conf);
  testDir = getTestDir(this.fs, TEST_PATH);
}
 
Example 11
Source File: GobblinYarnAppLauncher.java    From incubator-gobblin with Apache License 2.0 5 votes vote down vote up
private LogCopier buildLogCopier(Config config, Path sinkLogDir, Path appWorkDir) throws IOException {
  FileSystem rawLocalFs = this.closer.register(new RawLocalFileSystem());
  rawLocalFs.initialize(URI.create(ConfigurationKeys.LOCAL_FS_URI), new Configuration());

  LogCopier.Builder builder = LogCopier.newBuilder()
          .useSrcFileSystem(this.fs)
          .useDestFileSystem(rawLocalFs)
          .readFrom(getHdfsLogDir(appWorkDir))
          .writeTo(sinkLogDir)
          .acceptsLogFileExtensions(ImmutableSet.of(ApplicationConstants.STDOUT, ApplicationConstants.STDERR));
  return builder.build();
}
 
Example 12
Source File: MetadataTableSplitsTest.java    From datawave with Apache License 2.0 4 votes vote down vote up
protected void createMockFileSystem() throws Exception {
    
    FileSystem fs = new MetadataTableSplitsTest.WrappedLocalFileSystem();
    
    mockConfiguration.put(FileSystem.FS_DEFAULT_NAME_KEY, "file:///localhost");
    
    // Lifted from DfsMonitorTest
    mockConfiguration.put("fs.file.impl", MetadataTableSplitsTest.WrappedLocalFileSystem.class.getName());
    mockConfiguration.put("fs.automatic.close", "false");
    mockConfiguration.put(MRJobConfig.CACHE_FILES, ".");
    
    Configuration conf = createMockConfiguration();
    fs.setConf(conf);
    fs.initialize(URI.create("file:///localhost"), conf);
    
    Whitebox.invokeMethod(FileSystem.class, "addFileSystemForTesting", FileSystem.getDefaultUri(conf), conf, fs);
    
}
 
Example 13
Source File: TestFileAppend.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/** Tests appending after soft-limit expires. */
@Test
public void testAppendAfterSoftLimit() 
    throws IOException, InterruptedException {
  Configuration conf = new HdfsConfiguration();
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
  conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
  //Set small soft-limit for lease
  final long softLimit = 1L;
  final long hardLimit = 9999999L;

  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
      .build();
  cluster.setLeasePeriod(softLimit, hardLimit);
  cluster.waitActive();

  FileSystem fs = cluster.getFileSystem();
  FileSystem fs2 = new DistributedFileSystem();
  fs2.initialize(fs.getUri(), conf);

  final Path testPath = new Path("/testAppendAfterSoftLimit");
  final byte[] fileContents = AppendTestUtil.initBuffer(32);

  // create a new file without closing
  FSDataOutputStream out = fs.create(testPath);
  out.write(fileContents);

  //Wait for > soft-limit
  Thread.sleep(250);

  try {
    FSDataOutputStream appendStream2 = fs2.append(testPath);
    appendStream2.write(fileContents);
    appendStream2.close();
    assertEquals(fileContents.length, fs.getFileStatus(testPath).getLen());
  } finally {
    fs.close();
    fs2.close();
    cluster.shutdown();
  }
}
 
Example 14
Source File: FileDownloader.java    From rubix with Apache License 2.0 4 votes vote down vote up
protected List<FileDownloadRequestChain> getFileDownloadRequestChains(ConcurrentMap<String, DownloadRequestContext> contextMap)
    throws IOException
{
  List<FileDownloadRequestChain> readRequestChainList = new ArrayList<FileDownloadRequestChain>();
  for (Map.Entry<String, DownloadRequestContext> entry : contextMap.entrySet()) {
    Path path = new Path(entry.getKey());
    DownloadRequestContext context = entry.getValue();

    FileSystem fs = FileSystem.get(path.toUri(), conf);
    fs.initialize(path.toUri(), conf);

    String localPath = CacheUtil.getLocalPath(entry.getKey(), conf);
    log.debug("Processing Request for File : " + path.toString() + " LocalFile : " + localPath);
    ByteBuffer directWriteBuffer = bufferPool.getBuffer(diskReadBufferSize);

    FileDownloadRequestChain requestChain = new FileDownloadRequestChain(bookKeeper, fs, localPath,
        directWriteBuffer, conf, context.getRemoteFilePath(), context.getFileSize(),
        context.getLastModifiedTime());

    Range<Long> previousRange = null;
    for (Range<Long> range : context.getRanges().asRanges()) {
      // align range to block boundary
      long startBlock = toStartBlock(range.lowerEndpoint(), conf);
      long endBlock = toEndBlock(range.upperEndpoint(), conf);

      // We can get cases where multiple reads are part of same Block
      Range<Long> currentRange = Range.closedOpen(startBlock, endBlock);
      if (previousRange != null && previousRange.encloses(currentRange)) {
        // already covered in previous request
        continue;
      }

      previousRange = currentRange;

      // Avoid duplicate warm-ups
      List<BlockLocation> blockLocations = null;

      try {
        blockLocations = bookKeeper.getCacheStatus(
                new CacheStatusRequest(
                        context.getRemoteFilePath(),
                        context.getFileSize(),
                        context.getLastModifiedTime(),
                        startBlock,
                        endBlock));
      }
      catch (Exception e) {
        log.warn("Error communicating with bookKeeper", e);
        // Exception is not expected as RemoteFetchProcessor ensures to not start processing until BookKeeper has initialized
        // recover from this, requeue the requests for this file and continue with next file
        remoteFetchProcessor.addToProcessQueueSafe(context.getRemoteFilePath(), context.getRanges().asRanges(), context.getFileSize(), context.getLastModifiedTime());
        requestChain = null;
        break;
      }

      for (int i = 0; i < blockLocations.size(); i++) {
        if (!blockLocations.get(i).getLocation().equals(Location.LOCAL)) {
          continue;
        }

        long block = startBlock + i;
        long startPosition = toBlockStartPosition(block, conf);
        long endPosition = Math.min(toBlockStartPosition(block + 1, conf), context.getFileSize());
        ReadRequest readRequest = new ReadRequest(startPosition, endPosition, startPosition, endPosition, null, 0, context.getFileSize());
        requestChain.addReadRequest(readRequest);
      }
    }

    if (requestChain != null) {
      log.debug("Request added for file: " + requestChain.getRemotePath() + " Number of Requests : " +
              requestChain.getReadRequests().size());

      readRequestChainList.add(requestChain);
    }
  }

  return readRequestChainList;
}
 
Example 15
Source File: TestFileAppend.java    From big-c with Apache License 2.0 4 votes vote down vote up
/** Tests appending after soft-limit expires. */
@Test
public void testAppendAfterSoftLimit() 
    throws IOException, InterruptedException {
  Configuration conf = new HdfsConfiguration();
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
  conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
  //Set small soft-limit for lease
  final long softLimit = 1L;
  final long hardLimit = 9999999L;

  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
      .build();
  cluster.setLeasePeriod(softLimit, hardLimit);
  cluster.waitActive();

  FileSystem fs = cluster.getFileSystem();
  FileSystem fs2 = new DistributedFileSystem();
  fs2.initialize(fs.getUri(), conf);

  final Path testPath = new Path("/testAppendAfterSoftLimit");
  final byte[] fileContents = AppendTestUtil.initBuffer(32);

  // create a new file without closing
  FSDataOutputStream out = fs.create(testPath);
  out.write(fileContents);

  //Wait for > soft-limit
  Thread.sleep(250);

  try {
    FSDataOutputStream appendStream2 = fs2.append(testPath);
    appendStream2.write(fileContents);
    appendStream2.close();
    assertEquals(fileContents.length, fs.getFileStatus(testPath).getLen());
  } finally {
    fs.close();
    fs2.close();
    cluster.shutdown();
  }
}