Java Code Examples for org.apache.hadoop.fs.LocalFileSystem

The following examples show how to use org.apache.hadoop.fs.LocalFileSystem. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: celos   Source File: HdfsDeployerTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testUndeploy() throws Exception {
    CelosCiContext context = mock(CelosCiContext.class);

    File remoteHdfsFolder = tempDir.newFolder();

    doReturn(LocalFileSystem.get(new Configuration())).when(context).getFileSystem();
    doReturn(remoteHdfsFolder.getAbsolutePath()).when(context).getHdfsPrefix();
    doReturn("workflow").when(context).getWorkflowName();
    doReturn("/some/hdfs/root").when(context).getHdfsRoot();
    
    File remoteDir = new File(remoteHdfsFolder, "some/hdfs/root/workflow");
    remoteDir.mkdirs();
    new File(remoteDir, "file").createNewFile();

    HdfsDeployer deployer = new HdfsDeployer(context);
    deployer.undeploy();

    Assert.assertFalse(remoteDir.exists());
}
 
Example 2
Source Project: hadoop   Source File: TestIFile.java    License: Apache License 2.0 6 votes vote down vote up
@Test
/** Same as above but create a reader. */
public void testIFileReaderWithCodec() throws Exception {
  Configuration conf = new Configuration();
  FileSystem localFs = FileSystem.getLocal(conf);
  FileSystem rfs = ((LocalFileSystem)localFs).getRaw();
  Path path = new Path(new Path("build/test.ifile"), "data");
  DefaultCodec codec = new GzipCodec();
  codec.setConf(conf);
  FSDataOutputStream out = rfs.create(path);
  IFile.Writer<Text, Text> writer =
      new IFile.Writer<Text, Text>(conf, out, Text.class, Text.class,
                                   codec, null);
  writer.close();
  FSDataInputStream in = rfs.open(path);
  IFile.Reader<Text, Text> reader =
    new IFile.Reader<Text, Text>(conf, in, rfs.getFileStatus(path).getLen(),
        codec, null);
  reader.close();
  
  // test check sum 
  byte[] ab= new byte[100];
  int readed= reader.checksumIn.readWithChecksum(ab, 0, ab.length);
  assertEquals( readed,reader.checksumIn.getChecksum().length);
  
}
 
Example 3
Source Project: hadoop   Source File: UpgradeUtilities.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Simulate the {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} of a 
 * populated DFS filesystem.
 * This method populates for each parent directory, <code>parent/dirName</code>
 * with the content of block pool storage directory that comes from a singleton
 * datanode master (that contains version and block files). If the destination
 * directory does not exist, it will be created.  If the directory already 
 * exists, it will first be deleted.
 * 
 * @param parents parent directory where {@code dirName} is created
 * @param dirName directory under which storage directory is created
 * @param bpid block pool id for which the storage directory is created.
 * @return the array of created directories
 */
public static File[] createBlockPoolStorageDirs(String[] parents,
    String dirName, String bpid) throws Exception {
  File[] retVal = new File[parents.length];
  Path bpCurDir = new Path(MiniDFSCluster.getBPDir(datanodeStorage,
      bpid, Storage.STORAGE_DIR_CURRENT));
  for (int i = 0; i < parents.length; i++) {
    File newDir = new File(parents[i] + "/current/" + bpid, dirName);
    createEmptyDirs(new String[] {newDir.toString()});
    LocalFileSystem localFS = FileSystem.getLocal(new HdfsConfiguration());
    localFS.copyToLocalFile(bpCurDir,
                            new Path(newDir.toString()),
                            false);
    retVal[i] = newDir;
  }
  return retVal;
}
 
Example 4
Source Project: incubator-gobblin   Source File: FileSystemFactoryTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testCreationWithConfigurationFSImpl() throws Exception {
  SharedResourcesBrokerImpl<SimpleScopeType> broker = SharedResourcesBrokerFactory.<SimpleScopeType>createDefaultTopLevelBroker(
      ConfigFactory.empty(), SimpleScopeType.GLOBAL.defaultScopeInstance());

  Configuration conf = new Configuration();
  conf.set("fs.local.impl", InstrumentedLocalFileSystem.class.getName());

  FileSystemKey key = new FileSystemKey(new URI("file:///"), new Configuration());
  FileSystemFactory<SimpleScopeType> factory = new FileSystemFactory<>();

  FileSystem fs =  broker.getSharedResource(factory, key);

  verifyInstrumentedOnce(fs);
  Assert.assertTrue(DecoratorUtils.resolveUnderlyingObject(fs) instanceof LocalFileSystem);
}
 
Example 5
Source Project: hadoop   Source File: TestDiskChecker.java    License: Apache License 2.0 6 votes vote down vote up
private void _mkdirs(boolean exists, FsPermission before, FsPermission after)
    throws Throwable {
  File localDir = make(stub(File.class).returning(exists).from.exists());
  when(localDir.mkdir()).thenReturn(true);
  Path dir = mock(Path.class); // use default stubs
  LocalFileSystem fs = make(stub(LocalFileSystem.class)
      .returning(localDir).from.pathToFile(dir));
  FileStatus stat = make(stub(FileStatus.class)
      .returning(after).from.getPermission());
  when(fs.getFileStatus(dir)).thenReturn(stat);

  try {
    DiskChecker.mkdirsWithExistsAndPermissionCheck(fs, dir, before);

    if (!exists)
      verify(fs).setPermission(dir, before);
    else {
      verify(fs).getFileStatus(dir);
      verify(stat).getPermission();
    }
  }
  catch (DiskErrorException e) {
    if (before != after)
      assertTrue(e.getMessage().startsWith("Incorrect permission"));
  }
}
 
Example 6
@Override
public HadoopFileSystem create( NamedCluster namedCluster, URI uri ) throws IOException {
  final Configuration configuration = hadoopShim.createConfiguration( namedCluster );
  FileSystem fileSystem = (FileSystem) hadoopShim.getFileSystem( configuration ).getDelegate();
  if ( fileSystem instanceof LocalFileSystem ) {
    LOGGER.error( "Got a local filesystem, was expecting an hdfs connection" );
    throw new IOException( "Got a local filesystem, was expecting an hdfs connection" );
  }

  final URI finalUri = fileSystem.getUri() != null ? fileSystem.getUri() : uri;
  HadoopFileSystem hadoopFileSystem = new HadoopFileSystemImpl( () -> {
    try {
      return finalUri != null
        ? (FileSystem) hadoopShim.getFileSystem( finalUri, configuration, (NamedCluster) namedCluster ).getDelegate()
        : (FileSystem) hadoopShim.getFileSystem( configuration ).getDelegate();
    } catch ( IOException | InterruptedException e ) {
      LOGGER.debug( "Error looking up/creating the file system ", e );
      return null;
    }
  } );
  ( (HadoopFileSystemImpl) hadoopFileSystem ).setNamedCluster( namedCluster );

  return hadoopFileSystem;
}
 
Example 7
Source Project: big-c   Source File: TestLogsCLI.java    License: Apache License 2.0 6 votes vote down vote up
@Test(timeout = 5000l)
public void testFailResultCodes() throws Exception {
  Configuration conf = new YarnConfiguration();
  conf.setClass("fs.file.impl", LocalFileSystem.class, FileSystem.class);
  LogCLIHelpers cliHelper = new LogCLIHelpers();
  cliHelper.setConf(conf);
  YarnClient mockYarnClient = createMockYarnClient(YarnApplicationState.FINISHED);
  LogsCLI dumper = new LogsCLIForTest(mockYarnClient);
  dumper.setConf(conf);
  
  // verify dumping a non-existent application's logs returns a failure code
  int exitCode = dumper.run( new String[] {
      "-applicationId", "application_0_0" } );
  assertTrue("Should return an error code", exitCode != 0);
  
  // verify dumping a non-existent container log is a failure code 
  exitCode = cliHelper.dumpAContainersLogs("application_0_0", "container_0_0",
      "nonexistentnode:1234", "nobody");
  assertTrue("Should return an error code", exitCode != 0);
}
 
Example 8
Source Project: big-c   Source File: TestMerge.java    License: Apache License 2.0 6 votes vote down vote up
private void copyPartitions(Path mapOutputPath, Path indexPath)
  throws IOException {
  FileSystem localFs = FileSystem.getLocal(jobConf);
  FileSystem rfs = ((LocalFileSystem)localFs).getRaw();
  FSDataOutputStream rawOutput = rfs.create(mapOutputPath, true, BUF_SIZE);
  SpillRecord spillRecord = new SpillRecord(numberOfPartitions);
  IndexRecord indexRecord = new IndexRecord();
  for (int i = 0; i < numberOfPartitions; i++) {
    indexRecord.startOffset = rawOutput.getPos();
    byte buffer[] = outStreams[i].toByteArray();
    IFileOutputStream checksumOutput = new IFileOutputStream(rawOutput);
    checksumOutput.write(buffer);
    // Write checksum.
    checksumOutput.finish();
    // Write index record
    indexRecord.rawLength = (long)buffer.length;
    indexRecord.partLength = rawOutput.getPos() - indexRecord.startOffset;
    spillRecord.putIndex(indexRecord, i);
    reporter.progress();
  }
  rawOutput.close();
  spillRecord.writeToFile(indexPath, jobConf);
}
 
Example 9
Source Project: big-c   Source File: TestIFile.java    License: Apache License 2.0 6 votes vote down vote up
@Test
/** Same as above but create a reader. */
public void testIFileReaderWithCodec() throws Exception {
  Configuration conf = new Configuration();
  FileSystem localFs = FileSystem.getLocal(conf);
  FileSystem rfs = ((LocalFileSystem)localFs).getRaw();
  Path path = new Path(new Path("build/test.ifile"), "data");
  DefaultCodec codec = new GzipCodec();
  codec.setConf(conf);
  FSDataOutputStream out = rfs.create(path);
  IFile.Writer<Text, Text> writer =
      new IFile.Writer<Text, Text>(conf, out, Text.class, Text.class,
                                   codec, null);
  writer.close();
  FSDataInputStream in = rfs.open(path);
  IFile.Reader<Text, Text> reader =
    new IFile.Reader<Text, Text>(conf, in, rfs.getFileStatus(path).getLen(),
        codec, null);
  reader.close();
  
  // test check sum 
  byte[] ab= new byte[100];
  int readed= reader.checksumIn.readWithChecksum(ab, 0, ab.length);
  assertEquals( readed,reader.checksumIn.getChecksum().length);
  
}
 
Example 10
Source Project: big-c   Source File: TestDataDirs.java    License: Apache License 2.0 6 votes vote down vote up
@Test (timeout = 30000)
public void testDataDirValidation() throws Throwable {
  
  DataNodeDiskChecker diskChecker = mock(DataNodeDiskChecker.class);
  doThrow(new IOException()).doThrow(new IOException()).doNothing()
    .when(diskChecker).checkDir(any(LocalFileSystem.class), any(Path.class));
  LocalFileSystem fs = mock(LocalFileSystem.class);
  AbstractList<StorageLocation> locations = new ArrayList<StorageLocation>();

  locations.add(StorageLocation.parse("file:/p1/"));
  locations.add(StorageLocation.parse("file:/p2/"));
  locations.add(StorageLocation.parse("file:/p3/"));

  List<StorageLocation> checkedLocations =
      DataNode.checkStorageLocations(locations, fs, diskChecker);
  assertEquals("number of valid data dirs", 1, checkedLocations.size());
  String validDir = checkedLocations.iterator().next().getFile().getPath();
  assertThat("p3 should be valid", new File("/p3/").getPath(), is(validDir));
}
 
Example 11
Source Project: big-c   Source File: UpgradeUtilities.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Simulate the {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} of a 
 * populated DFS filesystem.
 * This method populates for each parent directory, <code>parent/dirName</code>
 * with the content of block pool storage directory that comes from a singleton
 * datanode master (that contains version and block files). If the destination
 * directory does not exist, it will be created.  If the directory already 
 * exists, it will first be deleted.
 * 
 * @param parents parent directory where {@code dirName} is created
 * @param dirName directory under which storage directory is created
 * @param bpid block pool id for which the storage directory is created.
 * @return the array of created directories
 */
public static File[] createBlockPoolStorageDirs(String[] parents,
    String dirName, String bpid) throws Exception {
  File[] retVal = new File[parents.length];
  Path bpCurDir = new Path(MiniDFSCluster.getBPDir(datanodeStorage,
      bpid, Storage.STORAGE_DIR_CURRENT));
  for (int i = 0; i < parents.length; i++) {
    File newDir = new File(parents[i] + "/current/" + bpid, dirName);
    createEmptyDirs(new String[] {newDir.toString()});
    LocalFileSystem localFS = FileSystem.getLocal(new HdfsConfiguration());
    localFS.copyToLocalFile(bpCurDir,
                            new Path(newDir.toString()),
                            false);
    retVal[i] = newDir;
  }
  return retVal;
}
 
Example 12
Source Project: RDFS   Source File: TestFSInputChecker.java    License: Apache License 2.0 5 votes vote down vote up
private void testFileCorruption(LocalFileSystem fileSys) throws IOException {
  // create a file and verify that checksum corruption results in 
  // a checksum exception on LocalFS
  
  String dir = System.getProperty("test.build.data", ".");
  Path file = new Path(dir + "/corruption-test.dat");
  Path crcFile = new Path(dir + "/.corruption-test.dat.crc");
  
  writeFile(fileSys, file);
  
  int fileLen = (int)fileSys.getFileStatus(file).getLen();
  
  byte [] buf = new byte[fileLen];

  InputStream in = fileSys.open(file);
  IOUtils.readFully(in, buf, 0, buf.length);
  in.close();
  
  // check .crc corruption
  checkFileCorruption(fileSys, file, crcFile);
  fileSys.delete(file, true);
  
  writeFile(fileSys, file);
  
  // check data corrutpion
  checkFileCorruption(fileSys, file, file);
  
  fileSys.delete(file, true);
}
 
Example 13
Source Project: systemds   Source File: LineageItemUtils.java    License: Apache License 2.0 5 votes vote down vote up
public static void writeTraceToHDFS(String trace, String fname) {
	try {
		HDFSTool.writeStringToHDFS(trace, fname);
		FileSystem fs = IOUtilFunctions.getFileSystem(fname);
		if (fs instanceof LocalFileSystem) {
			Path path = new Path(fname);
			IOUtilFunctions.deleteCrcFilesFromLocalFileSystem(fs, path);
		}
		
	} catch (IOException e) {
		throw new DMLRuntimeException(e);
	}
}
 
Example 14
Source Project: RDFS   Source File: ClusterWithCapacityScheduler.java    License: Apache License 2.0 5 votes vote down vote up
private void setUpSchedulerConfigFile(Properties schedulerConfProps)
    throws IOException {
  LocalFileSystem fs = FileSystem.getLocal(new Configuration());

  String myResourcePath = System.getProperty("test.build.data");
  Path schedulerConfigFilePath =
      new Path(myResourcePath, CapacitySchedulerConf.SCHEDULER_CONF_FILE);
  OutputStream out = fs.create(schedulerConfigFilePath);

  Configuration config = new Configuration(false);
  for (Enumeration<?> e = schedulerConfProps.propertyNames(); e
      .hasMoreElements();) {
    String key = (String) e.nextElement();
    LOG.debug("Adding " + key + schedulerConfProps.getProperty(key));
    config.set(key, schedulerConfProps.getProperty(key));
  }

  config.writeXml(out);
  out.close();

  LOG.info("setting resource path where capacity-scheduler's config file "
      + "is placed to " + myResourcePath);
  System.setProperty(MY_SCHEDULER_CONF_PATH_PROPERTY, myResourcePath);
}
 
Example 15
Source Project: elasticsearch-hadoop   Source File: HdfsUtils.java    License: Apache License 2.0 5 votes vote down vote up
public static void copyFromLocal(String localPath, String destination) {
    try {
        JobConf hadoopConfig = HdpBootstrap.hadoopConfig();
        FileSystem fs = FileSystem.get(hadoopConfig);
        if (!(fs instanceof LocalFileSystem)) {
            Path src = new Path(localPath);
            Path dst = new Path(destination);
            fs.copyFromLocalFile(false, true, src, dst);
            System.out.println(String.format("Copying [%s] to [%s]", src, dst));
        }
    } catch (Exception ex) {
        throw new RuntimeException(ex);
    }
}
 
Example 16
Source Project: paraflow   Source File: FSFactory.java    License: Apache License 2.0 5 votes vote down vote up
@Inject
public FSFactory(ParaflowPrestoConfig prestoConfig)
{
    config.set("fs.hdfs.impl", DistributedFileSystem.class.getName());
    config.set("fs.file.impl", LocalFileSystem.class.getName());
    try {
        this.fileSystem = FileSystem.get(new URI(prestoConfig.getHDFSWarehouse()), config);
    }
    catch (IOException | URISyntaxException e) {
        this.fileSystem = null;
    }
}
 
Example 17
Source Project: hadoop   Source File: FileSystemNodeLabelsStore.java    License: Apache License 2.0 5 votes vote down vote up
private void setFileSystem(Configuration conf) throws IOException {
  Configuration confCopy = new Configuration(conf);
  confCopy.setBoolean("dfs.client.retry.policy.enabled", true);
  String retryPolicy =
      confCopy.get(YarnConfiguration.FS_NODE_LABELS_STORE_RETRY_POLICY_SPEC,
          YarnConfiguration.DEFAULT_FS_NODE_LABELS_STORE_RETRY_POLICY_SPEC);
  confCopy.set("dfs.client.retry.policy.spec", retryPolicy);
  fs = fsWorkingPath.getFileSystem(confCopy);
  
  // if it's local file system, use RawLocalFileSystem instead of
  // LocalFileSystem, the latter one doesn't support append.
  if (fs.getScheme().equals("file")) {
    fs = ((LocalFileSystem)fs).getRaw();
  }
}
 
Example 18
Source Project: vespa   Source File: MapReduceTest.java    License: Apache License 2.0 5 votes vote down vote up
@AfterClass
public static void tearDown() throws IOException {
    Path testDir = new Path(hdfsBaseDir.getParent());
    hdfs.delete(testDir, true);
    cluster.shutdown();
    LocalFileSystem localFileSystem = FileSystem.getLocal(conf);
    localFileSystem.delete(testDir, true);
}
 
Example 19
Source Project: celos   Source File: HdfsDeployerTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test(expected = IllegalStateException.class)
public void testDeployThrowsExceptionNoDir() throws Exception {
    CelosCiContext context = mock(CelosCiContext.class);
    HdfsDeployer deployer = new HdfsDeployer(context);

    doReturn(LocalFileSystem.get(new Configuration())).when(context).getFileSystem();
    doReturn(new File("nodir" + UUID.randomUUID())).when(context).getDeployDir();

    deployer.deploy();
}
 
Example 20
Source Project: RDFS   Source File: UpgradeUtilities.java    License: Apache License 2.0 5 votes vote down vote up
public static File[] createStorageDirs(NodeType nodeType, String[] parents, String dirName,
    File srcFile) throws Exception {
  File[] retVal = new File[parents.length];
  for (int i = 0; i < parents.length; i++) {
    File newDir = new File(parents[i], dirName);
    createEmptyDirs(new String[] {newDir.toString()});
    LocalFileSystem localFS = FileSystem.getLocal(new Configuration());
    switch (nodeType) {
    case NAME_NODE:
      localFS.copyToLocalFile(new Path(srcFile.toString(), "current"),
                              new Path(newDir.toString()),
                              false);
      Path newImgDir = new Path(newDir.getParent(), "image");
      if (!localFS.exists(newImgDir))
        localFS.copyToLocalFile(
            new Path(srcFile.toString(), "image"),
            newImgDir,
            false);
      break;
    case DATA_NODE:
      localFS.copyToLocalFile(new Path(srcFile.toString(), "current"),
                              new Path(newDir.toString()),
                              false);
      Path newStorageFile = new Path(newDir.getParent(), "storage");
      if (!localFS.exists(newStorageFile))
        localFS.copyToLocalFile(
            new Path(srcFile.toString(), "storage"),
            newStorageFile,
            false);
      break;
    }
    retVal[i] = newDir;
  }
  return retVal;
}
 
Example 21
Source Project: incubator-gobblin   Source File: HadoopUtils.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Renames a src {@link Path} on fs {@link FileSystem} to a dst {@link Path}. If fs is a {@link LocalFileSystem} and
 * src is a directory then {@link File#renameTo} is called directly to avoid a directory rename race condition where
 * {@link org.apache.hadoop.fs.RawLocalFileSystem#rename} copies the conflicting src directory into dst resulting in
 * an extra nested level, such as /root/a/b/c/e/e where e is repeated.
 *
 * @param fs the {@link FileSystem} where the src {@link Path} exists
 * @param src the source {@link Path} which will be renamed
 * @param dst the {@link Path} to rename to
 * @return true if rename succeeded, false if rename failed.
 * @throws IOException if rename failed for reasons other than target exists.
 */
public static boolean renamePathHandleLocalFSRace(FileSystem fs, Path src, Path dst) throws IOException {
  if (DecoratorUtils.resolveUnderlyingObject(fs) instanceof LocalFileSystem && fs.isDirectory(src)) {
    LocalFileSystem localFs = (LocalFileSystem) DecoratorUtils.resolveUnderlyingObject(fs);
    File srcFile = localFs.pathToFile(src);
    File dstFile = localFs.pathToFile(dst);

    return srcFile.renameTo(dstFile);
  }
  else {
    return fs.rename(src, dst);
  }
}
 
Example 22
Source Project: tajo   Source File: LocalFetcher.java    License: Apache License 2.0 5 votes vote down vote up
@VisibleForTesting
public LocalFetcher(TajoConf conf, URI uri, String tableName) throws IOException {
  super(conf, uri);
  this.maxUrlLength = conf.getIntVar(ConfVars.PULLSERVER_FETCH_URL_MAX_LENGTH);
  this.tableName = tableName;
  this.localFileSystem = new LocalFileSystem();
  this.localDirAllocator = new LocalDirAllocator(ConfVars.WORKER_TEMPORAL_DIR.varname);
  this.pullServerService = null;

  String scheme = uri.getScheme() == null ? "http" : uri.getScheme();
  this.host = uri.getHost() == null ? "localhost" : uri.getHost();
  this.port = uri.getPort();
  if (port == -1) {
    if (scheme.equalsIgnoreCase("http")) {
      this.port = 80;
    } else if (scheme.equalsIgnoreCase("https")) {
      this.port = 443;
    }
  }

  bootstrap = new Bootstrap()
      .group(
          NettyUtils.getSharedEventLoopGroup(NettyUtils.GROUP.FETCHER,
              conf.getIntVar(ConfVars.SHUFFLE_RPC_CLIENT_WORKER_THREAD_NUM)))
      .channel(NioSocketChannel.class)
      .option(ChannelOption.ALLOCATOR, NettyUtils.ALLOCATOR)
      .option(ChannelOption.CONNECT_TIMEOUT_MILLIS,
          conf.getIntVar(ConfVars.SHUFFLE_FETCHER_CONNECT_TIMEOUT) * 1000)
      .option(ChannelOption.SO_RCVBUF, 1048576) // set 1M
      .option(ChannelOption.TCP_NODELAY, true);
}
 
Example 23
Source Project: incubator-gobblin   Source File: FileSystemFactoryTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testCreationWithInstrumentedScheme() throws Exception {
  SharedResourcesBrokerImpl<SimpleScopeType> broker = SharedResourcesBrokerFactory.<SimpleScopeType>createDefaultTopLevelBroker(
      ConfigFactory.empty(), SimpleScopeType.GLOBAL.defaultScopeInstance());

  FileSystemKey key = new FileSystemKey(new URI("instrumented-file:///"), new Configuration());
  FileSystemFactory<SimpleScopeType> factory = new FileSystemFactory<>();

  FileSystem fs =  broker.getSharedResource(factory, key);

  verifyInstrumentedOnce(fs);
  Assert.assertTrue(DecoratorUtils.resolveUnderlyingObject(fs) instanceof LocalFileSystem);
}
 
Example 24
Source Project: hadoop   Source File: PathData.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Get the path to a local file
 * @return File representing the local path
 * @throws IllegalArgumentException if this.fs is not the LocalFileSystem
 */
public File toFile() {
  if (!(fs instanceof LocalFileSystem)) {
     throw new IllegalArgumentException("Not a local path: " + path);
  }
  return ((LocalFileSystem)fs).pathToFile(path);
}
 
Example 25
Source Project: Transwarp-Sample-Code   Source File: HDFSDataStream.java    License: MIT License 5 votes vote down vote up
protected void doOpen(Configuration conf,
  Path dstPath, FileSystem hdfs) throws
  IOException {
  if(useRawLocalFileSystem) {
    if(hdfs instanceof LocalFileSystem) {
      hdfs = ((LocalFileSystem)hdfs).getRaw();
    } else {
      logger.warn("useRawLocalFileSystem is set to true but file system " +
          "is not of type LocalFileSystem: " + hdfs.getClass().getName());
    }
  }

  boolean appending = false;
  if (conf.getBoolean("hdfs.append.support", false) == true && hdfs.isFile
          (dstPath)) {
    outStream = hdfs.append(dstPath);
    appending = true;
  } else {
    outStream = hdfs.create(dstPath);
  }

  serializer = EventSerializerFactory.getInstance(
      serializerType, serializerContext, outStream);
  if (appending && !serializer.supportsReopen()) {
    outStream.close();
    serializer = null;
    throw new IOException("serializer (" + serializerType +
        ") does not support append");
  }

  // must call superclass to check for replication issues
  registerCurrentStream(outStream, hdfs, dstPath);

  if (appending) {
    serializer.afterReopen();
  } else {
    serializer.afterCreate();
  }
}
 
Example 26
Source Project: RDFS   Source File: TestFSInputChecker.java    License: Apache License 2.0 5 votes vote down vote up
public void testFSInputChecker() throws Exception {
  Configuration conf = new Configuration();
  conf.setLong("dfs.block.size", BLOCK_SIZE);
  conf.setInt("io.bytes.per.checksum", BYTES_PER_SUM);
  conf.set("fs.hdfs.impl",
           "org.apache.hadoop.hdfs.ChecksumDistributedFileSystem");
  rand.nextBytes(expected);

  // test DFS
  MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
  ChecksumFileSystem fileSys = (ChecksumFileSystem)cluster.getFileSystem();
  try {
    testChecker(fileSys, true);
    testChecker(fileSys, false);
    testSeekAndRead(fileSys);
  } finally {
    fileSys.close();
    cluster.shutdown();
  }
  
  
  // test Local FS
  fileSys = FileSystem.getLocal(conf);
  try {
    testChecker(fileSys, true);
    testChecker(fileSys, false);
    testFileCorruption((LocalFileSystem)fileSys);
    testSeekAndRead(fileSys);
  }finally {
    fileSys.close();
  }
}
 
Example 27
Source Project: big-c   Source File: FileSystemNodeLabelsStore.java    License: Apache License 2.0 5 votes vote down vote up
private void setFileSystem(Configuration conf) throws IOException {
  Configuration confCopy = new Configuration(conf);
  confCopy.setBoolean("dfs.client.retry.policy.enabled", true);
  String retryPolicy =
      confCopy.get(YarnConfiguration.FS_NODE_LABELS_STORE_RETRY_POLICY_SPEC,
          YarnConfiguration.DEFAULT_FS_NODE_LABELS_STORE_RETRY_POLICY_SPEC);
  confCopy.set("dfs.client.retry.policy.spec", retryPolicy);
  fs = fsWorkingPath.getFileSystem(confCopy);
  
  // if it's local file system, use RawLocalFileSystem instead of
  // LocalFileSystem, the latter one doesn't support append.
  if (fs.getScheme().equals("file")) {
    fs = ((LocalFileSystem)fs).getRaw();
  }
}
 
Example 28
Source Project: incubator-gobblin   Source File: HadoopUtils.java    License: Apache License 2.0 5 votes vote down vote up
private static void copyPath(FileSystem srcFs, Path src, FileSystem dstFs, Path dst, boolean deleteSource,
    boolean overwrite, Configuration conf) throws IOException {

  Preconditions.checkArgument(srcFs.exists(src),
      String.format("Cannot copy from %s to %s because src does not exist", src, dst));
  Preconditions.checkArgument(overwrite || !dstFs.exists(dst),
      String.format("Cannot copy from %s to %s because dst exists", src, dst));

  try {
    boolean isSourceFileSystemLocal = srcFs instanceof LocalFileSystem || srcFs instanceof RawLocalFileSystem;
    if (isSourceFileSystemLocal) {
      try {
        dstFs.copyFromLocalFile(deleteSource, overwrite, src, dst);
      } catch (IOException e) {
        throw new IOException(String.format("Failed to copy %s to %s", src, dst), e);
      }
    } else if (!FileUtil.copy(srcFs, src, dstFs, dst, deleteSource, overwrite, conf)) {
      throw new IOException(String.format("Failed to copy %s to %s", src, dst));
    }
  } catch (Throwable t1) {
    try {
      deleteIfExists(dstFs, dst, true);
    } catch (Throwable t2) {
      // Do nothing
    }
    throw t1;
  }
}
 
Example 29
private static String getFSIdentifier(URI uri) {
  if (new LocalFileSystem().getScheme().equals(uri.getScheme())) {
    return "localhost";
  } else {
    return ClustersNames.getInstance().getClusterName(uri.toString());
  }
}
 
Example 30
Source Project: hbase   Source File: TestExportSnapshotV2NoCluster.java    License: Apache License 2.0 5 votes vote down vote up
@Before
public void before() throws Exception {
  // Make sure testDir is on LocalFileSystem
  this.fs = FileSystem.getLocal(this.testUtil.getConfiguration());
  this.testDir = TestExportSnapshotV1NoCluster.setup(this.fs, this.testUtil);
  LOG.info("fs={}, testDir={}", this.fs, this.testDir);
  assertTrue("FileSystem '" + fs + "' is not local", fs instanceof LocalFileSystem);
}