Java Code Examples for org.apache.hadoop.fs.FileSystem

The following are top voted examples for showing how to use org.apache.hadoop.fs.FileSystem. These examples are extracted from open source projects. You can vote up the examples you like and your votes will be used in our system to generate more good examples.
Example 1
Project: hadoop   File: TestRead.java   View source code 6 votes vote down vote up
/**
 * Regression test for HDFS-7045.
 * If deadlock happen, the test will time out.
 * @throws Exception
 */
@Test(timeout=60000)
public void testReadReservedPath() throws Exception {
  Configuration conf = new Configuration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).
      numDataNodes(1).format(true).build();
  try {
    FileSystem fs = cluster.getFileSystem();
    fs.open(new Path("/.reserved/.inodes/file"));
    Assert.fail("Open a non existing file should fail.");
  } catch (FileNotFoundException e) {
    // Expected
  } finally {
    cluster.shutdown();
  }
}
 
Example 2
Project: aliyun-maxcompute-data-collectors   File: TestSplittableBufferedWriter.java   View source code 6 votes vote down vote up
/** Create the directory where we'll write our test files to; and
 * make sure it has no files in it.
 */
private void ensureEmptyWriteDir() throws IOException {
  FileSystem fs = FileSystem.getLocal(getConf());
  Path writeDir = getWritePath();

  fs.mkdirs(writeDir);

  FileStatus [] stats = fs.listStatus(writeDir);

  for (FileStatus stat : stats) {
    if (stat.isDir()) {
      fail("setUp(): Write directory " + writeDir
          + " contains subdirectories");
    }

    LOG.debug("setUp(): Removing " + stat.getPath());
    if (!fs.delete(stat.getPath(), false)) {
      fail("setUp(): Could not delete residual file " + stat.getPath());
    }
  }

  if (!fs.exists(writeDir)) {
    fail("setUp: Could not create " + writeDir);
  }
}
 
Example 3
Project: hadoop   File: FileOutputCommitter.java   View source code 6 votes vote down vote up
/**
 * Create a file output committer
 * @param outputPath the job's output path, or null if you want the output
 * committer to act as a noop.
 * @param context the task's context
 * @throws IOException
 */
@Private
public FileOutputCommitter(Path outputPath, 
                           JobContext context) throws IOException {
  Configuration conf = context.getConfiguration();
  algorithmVersion =
      conf.getInt(FILEOUTPUTCOMMITTER_ALGORITHM_VERSION,
                  FILEOUTPUTCOMMITTER_ALGORITHM_VERSION_DEFAULT);
  LOG.info("File Output Committer Algorithm version is " + algorithmVersion);
  if (algorithmVersion != 1 && algorithmVersion != 2) {
    throw new IOException("Only 1 or 2 algorithm version is supported");
  }
  if (outputPath != null) {
    FileSystem fs = outputPath.getFileSystem(context.getConfiguration());
    this.outputPath = fs.makeQualified(outputPath);
  }
}
 
Example 4
Project: ditb   File: HBaseTestCase.java   View source code 6 votes vote down vote up
/**
 * Note that this method must be called after the mini hdfs cluster has
 * started or we end up with a local file system.
 */
@Override
protected void setUp() throws Exception {
  super.setUp();
  localfs =
    (conf.get("fs.defaultFS", "file:///").compareTo("file:///") == 0);

  if (fs == null) {
    this.fs = FileSystem.get(conf);
  }
  try {
    if (localfs) {
      this.testDir = getUnitTestdir(getName());
      if (fs.exists(testDir)) {
        fs.delete(testDir, true);
      }
    } else {
      this.testDir = FSUtils.getRootDir(conf);
    }
  } catch (Exception e) {
    LOG.fatal("error during setup", e);
    throw e;
  }
}
 
Example 5
Project: hadoop   File: TestTokenCache.java   View source code 6 votes vote down vote up
@SuppressWarnings("deprecation")
@Test
public void testGetTokensForNamenodes() throws IOException,
    URISyntaxException {
  Path TEST_ROOT_DIR =
      new Path(System.getProperty("test.build.data", "test/build/data"));
  // ick, but need fq path minus file:/
  String binaryTokenFile =
      FileSystem.getLocal(conf)
        .makeQualified(new Path(TEST_ROOT_DIR, "tokenFile")).toUri()
        .getPath();

  MockFileSystem fs1 = createFileSystemForServiceName("service1");
  Credentials creds = new Credentials();
  Token<?> token1 = fs1.getDelegationToken(renewer);
  creds.addToken(token1.getService(), token1);
  // wait to set, else the obtain tokens call above will fail with FNF
  conf.set(MRJobConfig.MAPREDUCE_JOB_CREDENTIALS_BINARY, binaryTokenFile);
  creds.writeTokenStorageFile(new Path(binaryTokenFile), conf);
  TokenCache.obtainTokensForNamenodesInternal(fs1, creds, conf);
  String fs_addr = fs1.getCanonicalServiceName();
  Token<?> nnt = TokenCache.getDelegationToken(creds, fs_addr);
  assertNotNull("Token for nn is null", nnt);
}
 
Example 6
Project: dremio-oss   File: BackupRestoreUtil.java   View source code 6 votes vote down vote up
private static void validateFileWithChecksum(FileSystem fs, Path filePath, BackupFileInfo backupFileInfo) throws IOException {
  final CheckedInputStream cin = new CheckedInputStream(fs.open(filePath), new CRC32());
  final BufferedReader reader = new BufferedReader(new InputStreamReader(cin));
  final ObjectMapper objectMapper = new ObjectMapper();
  String line;
  long records = 0;
  // parse records just to make sure formatting is correct
  while ((line = reader.readLine()) != null) {
    objectMapper.readValue(line, BackupRecord.class);
    ++records;
  }
  cin.close();
  long found = cin.getChecksum().getValue();
  if (backupFileInfo.getChecksum() != found) {
    throw new IOException(format("Corrupt backup data file %s. Expected checksum %x, found %x", filePath, backupFileInfo.getChecksum(), found));
  }
  if (backupFileInfo.getRecords() != records) {
    throw new IOException(format("Corrupt backup data file %s. Expected records %x, found %x", filePath, backupFileInfo.getRecords(), records));
  }
}
 
Example 7
Project: hadoop   File: NameNode.java   View source code 6 votes vote down vote up
/**
 * @return address of file system
 */
public static InetSocketAddress getAddress(URI filesystemURI) {
  String authority = filesystemURI.getAuthority();
  if (authority == null) {
    throw new IllegalArgumentException(String.format(
        "Invalid URI for NameNode address (check %s): %s has no authority.",
        FileSystem.FS_DEFAULT_NAME_KEY, filesystemURI.toString()));
  }
  if (!HdfsConstants.HDFS_URI_SCHEME.equalsIgnoreCase(
      filesystemURI.getScheme())) {
    throw new IllegalArgumentException(String.format(
        "Invalid URI for NameNode address (check %s): %s is not of scheme '%s'.",
        FileSystem.FS_DEFAULT_NAME_KEY, filesystemURI.toString(),
        HdfsConstants.HDFS_URI_SCHEME));
  }
  return getAddress(authority);
}
 
Example 8
Project: circus-train   File: PathToPathMetadata.java   View source code 6 votes vote down vote up
@Override
public PathMetadata apply(@Nonnull Path location) {
  try {
    FileSystem fs = location.getFileSystem(conf);
    FileStatus fileStatus = fs.getFileStatus(location);
    FileChecksum checksum = null;
    if (fileStatus.isFile()) {
      checksum = fs.getFileChecksum(location);
    }

    List<PathMetadata> childPathDescriptors = new ArrayList<>();
    if (fileStatus.isDirectory()) {
      FileStatus[] childStatuses = fs.listStatus(location);
      for (FileStatus childStatus : childStatuses) {
        childPathDescriptors.add(apply(childStatus.getPath()));
      }
    }

    return new PathMetadata(location, fileStatus.getModificationTime(), checksum, childPathDescriptors);

  } catch (IOException e) {
    throw new CircusTrainException("Unable to compute digest for location " + location.toString(), e);
  }
}
 
Example 9
Project: dremio-oss   File: BackupRestoreUtil.java   View source code 6 votes vote down vote up
public static BackupStats createBackup(FileSystem fs, Path backupRootDir, LocalKVStoreProvider localKVStoreProvider, HomeFileConfig homeFileStore) throws IOException, NamespaceException {
  final Date now = new Date();
  final BackupStats backupStats = new BackupStats();

  final Path backupDir = new Path(backupRootDir, format("%s%s", BACKUP_DIR_PREFIX, DATE_FORMAT.format(now)));
  fs.mkdirs(backupDir, DEFAULT_PERMISSIONS);
  backupStats.backupPath = backupDir.toUri().getPath();

  for (Map.Entry<StoreBuilderConfig, CoreKVStore<?, ?>> entry : localKVStoreProvider.getStores().entrySet()) {
    final StoreBuilderConfig storeBuilderConfig = entry.getKey();
    if (TokenUtils.TOKENS_TABLE_NAME.equals(storeBuilderConfig.getName())) {
      // Skip creating a backup of tokens table
      // TODO: In the future, if there are other tables that should not be backed up, this could be part of
      // StoreBuilderConfig interface
      continue;
    }
    final BackupFileInfo backupFileInfo = new BackupFileInfo().setKvstoreInfo(DataStoreUtils.toInfo(storeBuilderConfig));
    dumpTable(fs, backupDir, backupFileInfo, entry.getValue());
    ++backupStats.tables;
  }
  backupUploadedFiles(fs, backupDir, homeFileStore, backupStats);
  return backupStats;
}
 
Example 10
Project: hadoop-oss   File: TestCredentialProviderFactory.java   View source code 6 votes vote down vote up
public void checkPermissionRetention(Configuration conf, String ourUrl,
    Path path) throws Exception {
  CredentialProvider provider = CredentialProviderFactory.getProviders(conf).get(0);
  // let's add a new credential and flush and check that permissions are still set to 777
  char[] cred = new char[32];
  for(int i =0; i < cred.length; ++i) {
    cred[i] = (char) i;
  }
  // create a new key
  try {
    provider.createCredentialEntry("key5", cred);
  } catch (Exception e) {
    e.printStackTrace();
    throw e;
  }
  provider.flush();
  // get a new instance of the provider to ensure it was saved correctly
  provider = CredentialProviderFactory.getProviders(conf).get(0);
  assertArrayEquals(cred, provider.getCredentialEntry("key5").getCredential());

  FileSystem fs = path.getFileSystem(conf);
  FileStatus s = fs.getFileStatus(path);
  assertTrue("Permissions should have been retained from the preexisting " +
  		"keystore.", s.getPermission().toString().equals("rwxrwxrwx"));
}
 
Example 11
Project: hadoop   File: TestFileCreation.java   View source code 6 votes vote down vote up
/**
 * Test that server default values can be retrieved on the client side
 */
@Test
public void testServerDefaults() throws IOException {
  Configuration conf = new HdfsConfiguration();
  conf.setLong(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT);
  conf.setInt(DFS_BYTES_PER_CHECKSUM_KEY, DFS_BYTES_PER_CHECKSUM_DEFAULT);
  conf.setInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY, DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
  conf.setInt(DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT + 1);
  conf.setInt(IO_FILE_BUFFER_SIZE_KEY, IO_FILE_BUFFER_SIZE_DEFAULT);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
                   .numDataNodes(DFSConfigKeys.DFS_REPLICATION_DEFAULT + 1)
                   .build();
  cluster.waitActive();
  FileSystem fs = cluster.getFileSystem();
  try {
    FsServerDefaults serverDefaults = fs.getServerDefaults();
    assertEquals(DFS_BLOCK_SIZE_DEFAULT, serverDefaults.getBlockSize());
    assertEquals(DFS_BYTES_PER_CHECKSUM_DEFAULT, serverDefaults.getBytesPerChecksum());
    assertEquals(DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT, serverDefaults.getWritePacketSize());
    assertEquals(DFS_REPLICATION_DEFAULT + 1, serverDefaults.getReplication());
    assertEquals(IO_FILE_BUFFER_SIZE_DEFAULT, serverDefaults.getFileBufferSize());
  } finally {
    fs.close();
    cluster.shutdown();
  }
}
 
Example 12
Project: ditb   File: HFileOutputFormat2.java   View source code 6 votes vote down vote up
/**
 * Configure <code>job</code> with a TotalOrderPartitioner, partitioning against
 * <code>splitPoints</code>. Cleans up the partitions file after job exists.
 */
static void configurePartitioner(Job job, List<ImmutableBytesWritable> splitPoints)
    throws IOException {
  Configuration conf = job.getConfiguration();
  // create the partitions file
  FileSystem fs = FileSystem.get(conf);
  String hbaseTmpFsDir =
      conf.get(HConstants.TEMPORARY_FS_DIRECTORY_KEY,
        HConstants.DEFAULT_TEMPORARY_HDFS_DIRECTORY);
  Path partitionsPath = new Path(hbaseTmpFsDir, "partitions_" + UUID.randomUUID());
  fs.makeQualified(partitionsPath);
  writePartitions(conf, partitionsPath, splitPoints);
  fs.deleteOnExit(partitionsPath);

  // configure job to use it
  job.setPartitionerClass(TotalOrderPartitioner.class);
  TotalOrderPartitioner.setPartitionFile(conf, partitionsPath);
}
 
Example 13
Project: hadoop   File: TestFileInputFormat.java   View source code 6 votes vote down vote up
public static List<Path> configureTestErrorOnNonExistantDir(Configuration conf,
    FileSystem localFs) throws IOException {
  Path base1 = new Path(TEST_ROOT_DIR, "input1");
  Path base2 = new Path(TEST_ROOT_DIR, "input2");
  conf.set(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR,
      localFs.makeQualified(base1) + "," + localFs.makeQualified(base2));
  conf.setBoolean(
      org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR_RECURSIVE,
      true);
  localFs.mkdirs(base1);

  Path inFile1 = new Path(base1, "file1");
  Path inFile2 = new Path(base1, "file2");

  localFs.createNewFile(inFile1);
  localFs.createNewFile(inFile2);

  List<Path> expectedPaths = Lists.newArrayList();
  return expectedPaths;
}
 
Example 14
Project: hadoop   File: TestLocalJobSubmission.java   View source code 6 votes vote down vote up
/**
 * test the local job submission options of
 * -jt local -libjars
 * @throws IOException
 */
@Test
public void testLocalJobLibjarsOption() throws IOException {
  Path jarPath = makeJar(new Path(TEST_ROOT_DIR, "test.jar"));

  Configuration conf = new Configuration();
  conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "hdfs://testcluster");
  final String[] args = {
      "-jt" , "local", "-libjars", jarPath.toString(),
      "-m", "1", "-r", "1", "-mt", "1", "-rt", "1"
  };
  int res = -1;
  try {
    res = ToolRunner.run(conf, new SleepJob(), args);
  } catch (Exception e) {
    System.out.println("Job failed with " + e.getLocalizedMessage());
    e.printStackTrace(System.out);
    fail("Job failed");
  }
  assertEquals("dist job res is not 0:", 0, res);
}
 
Example 15
Project: Transwarp-Sample-Code   File: Delete.java   View source code 6 votes vote down vote up
public static void main(String[] args) {
    String rootPath = "hdfs://nameservice1";
    Path p = new Path(rootPath + "/tmp/file.txt");
    Configuration conf = new Configuration();
    conf.addResource("core-site.xml");
    conf.addResource("hdfs-site.xml");
    conf.addResource("yarn-site.xml");
    try {
        // 没开kerberos,注释下面两行
        UserGroupInformation.setConfiguration(conf);
        UserGroupInformation.loginUserFromKeytab("[email protected]","E:\\星环\\hdfs.keytab");
        FileSystem fs = p.getFileSystem(conf);
        boolean b = fs.delete(p, true);
        System.out.println(b);
        fs.close();
    } catch (IOException e) {
        e.printStackTrace();
    }
}
 
Example 16
Project: ditb   File: TestFSTableDescriptors.java   View source code 6 votes vote down vote up
@Test
public void testCreateTableDescriptorUpdatesIfExistsAlready() throws IOException {
  Path testdir = UTIL.getDataTestDir("testCreateTableDescriptorUpdatesIfThereExistsAlready");
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(
      "testCreateTableDescriptorUpdatesIfThereExistsAlready"));
  FileSystem fs = FileSystem.get(UTIL.getConfiguration());
  FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, testdir);
  assertTrue(fstd.createTableDescriptor(htd));
  assertFalse(fstd.createTableDescriptor(htd));
  htd.setValue(Bytes.toBytes("mykey"), Bytes.toBytes("myValue"));
  assertTrue(fstd.createTableDescriptor(htd)); //this will re-create
  Path tableDir = fstd.getTableDir(htd.getTableName());
  Path tmpTableDir = new Path(tableDir, FSTableDescriptors.TMP_DIR);
  FileStatus[] statuses = fs.listStatus(tmpTableDir);
  assertTrue(statuses.length == 0);

  assertEquals(htd, FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir));
}
 
Example 17
Project: ditb   File: FSUtils.java   View source code 6 votes vote down vote up
/**
 * Runs through the HBase rootdir and creates a reverse lookup map for
 * table StoreFile names to the full Path.
 * <br>
 * Example...<br>
 * Key = 3944417774205889744  <br>
 * Value = hdfs://localhost:51169/user/userid/-ROOT-/70236052/info/3944417774205889744
 *
 * @param fs  The file system to use.
 * @param hbaseRootDir  The root directory to scan.
 * @param errors ErrorReporter instance or null
 * @return Map keyed by StoreFile name with a value of the full Path.
 * @throws IOException When scanning the directory fails.
 */
public static Map<String, Path> getTableStoreFilePathMap(
  final FileSystem fs, final Path hbaseRootDir, ErrorReporter errors)
throws IOException {
  Map<String, Path> map = new HashMap<String, Path>();

  // if this method looks similar to 'getTableFragmentation' that is because
  // it was borrowed from it.

  // only include the directory paths to tables
  for (Path tableDir : FSUtils.getTableDirs(fs, hbaseRootDir)) {
    getTableStoreFilePathMap(map, fs, hbaseRootDir,
        FSUtils.getTableName(tableDir), errors);
  }
  return map;
}
 
Example 18
Project: hadoop   File: TestCodec.java   View source code 6 votes vote down vote up
private void codecTestMapFile(Class<? extends CompressionCodec> clazz,
    CompressionType type, int records) throws Exception {
  
  FileSystem fs = FileSystem.get(conf);
  LOG.info("Creating MapFiles with " + records  + 
          " records using codec " + clazz.getSimpleName());
  Path path = new Path(new Path(
      System.getProperty("test.build.data", "/tmp")),
    clazz.getSimpleName() + "-" + type + "-" + records);

  LOG.info("Writing " + path);
  createMapFile(conf, fs, path, clazz.newInstance(), type, records);
  MapFile.Reader reader = new MapFile.Reader(path, conf);
  Text key1 = new Text("002");
  assertNotNull(reader.get(key1, new Text()));
  Text key2 = new Text("004");
  assertNotNull(reader.get(key2, new Text()));
}
 
Example 19
Project: hadoop   File: TestKeyProviderFactory.java   View source code 6 votes vote down vote up
public void checkPermissionRetention(Configuration conf, String ourUrl, Path path) throws Exception {
  KeyProvider provider = KeyProviderFactory.getProviders(conf).get(0);
  // let's add a new key and flush and check that permissions are still set to 777
  byte[] key = new byte[16];
  for(int i =0; i < key.length; ++i) {
    key[i] = (byte) i;
  }
  // create a new key
  try {
    provider.createKey("key5", key, KeyProvider.options(conf));
  } catch (Exception e) {
    e.printStackTrace();
    throw e;
  }
  provider.flush();
  // get a new instance of the provider to ensure it was saved correctly
  provider = KeyProviderFactory.getProviders(conf).get(0);
  assertArrayEquals(key, provider.getCurrentKey("key5").getMaterial());

  FileSystem fs = path.getFileSystem(conf);
  FileStatus s = fs.getFileStatus(path);
  assertTrue("Permissions should have been retained from the preexisting keystore.", s.getPermission().toString().equals("rwxrwxrwx"));
}
 
Example 20
Project: hadoop   File: TestViewFileSystemWithAcls.java   View source code 6 votes vote down vote up
@Before
public void setUp() throws Exception {
  fsTarget = fHdfs;
  fsTarget2 = fHdfs2;
  targetTestRoot = fileSystemTestHelper.getAbsoluteTestRootPath(fsTarget);
  targetTestRoot2 = fileSystemTestHelper.getAbsoluteTestRootPath(fsTarget2);

  fsTarget.delete(targetTestRoot, true);
  fsTarget2.delete(targetTestRoot2, true);
  fsTarget.mkdirs(targetTestRoot);
  fsTarget2.mkdirs(targetTestRoot2);

  fsViewConf = ViewFileSystemTestSetup.createConfig();
  setupMountPoints();
  fsView = FileSystem.get(FsConstants.VIEWFS_URI, fsViewConf);
}
 
Example 21
Project: hadoop   File: TestWebHDFS.java   View source code 5 votes vote down vote up
static void verifyPread(FileSystem fs, Path p, long offset, long length,
    byte[] buf, byte[] expected) throws IOException {
  long remaining = length - offset;
  long checked = 0;
  LOG.info("XXX PREAD: offset=" + offset + ", remaining=" + remaining);

  final Ticker t = new Ticker("PREAD", "offset=%d, remaining=%d",
      offset, remaining);
  final FSDataInputStream in = fs.open(p, 64 << 10);
  for(; remaining > 0; ) {
    t.tick(checked, "offset=%d, remaining=%d", offset, remaining);
    final int n = (int)Math.min(remaining, buf.length);
    in.readFully(offset, buf, 0, n);
    checkData(offset, remaining, n, buf, expected);

    offset += n;
    remaining -= n;
    checked += n;
  }
  in.close();
  t.end(checked);
}
 
Example 22
Project: multiple-dimension-spread   File: MDSCombineSpreadReader.java   View source code 5 votes vote down vote up
public MDSCombineSpreadReader( final CombineFileSplit split , final TaskAttemptContext context , final Integer index ) throws IOException{
  Configuration config = context.getConfiguration();
  Path path = split.getPath( index );
  FileSystem fs = path.getFileSystem( config );
  long fileLength = fs.getLength( path );
  InputStream in = fs.open( path );

  innerReader = new MDSSpreadReader();
  innerReader.setStream( in , fileLength , 0 , fileLength );
}
 
Example 23
Project: hadoop   File: ViewFileSystem.java   View source code 5 votes vote down vote up
@Override
public void setPermission(final Path f, final FsPermission permission)
    throws AccessControlException, FileNotFoundException,
    IOException {
  InodeTree.ResolveResult<FileSystem> res = 
    fsState.resolve(getUriPath(f), true);
  res.targetFileSystem.setPermission(res.remainingPath, permission); 
}
 
Example 24
Project: hadoop-oss   File: TestGenericOptionsParser.java   View source code 5 votes vote down vote up
@Override
protected void setUp() throws Exception {
  super.setUp();
  conf = new Configuration();
  localFs = FileSystem.getLocal(conf);
  testDir = new File(System.getProperty("test.build.data", "/tmp"), "generic");
  if(testDir.exists())
    localFs.delete(new Path(testDir.toString()), true);
}
 
Example 25
Project: hadoop   File: TestWebHDFS.java   View source code 5 votes vote down vote up
/**
 * Test snapshot deletion through WebHdfs
 */
@Test
public void testWebHdfsDeleteSnapshot() throws Exception {
  MiniDFSCluster cluster = null;
  final Configuration conf = WebHdfsTestUtil.createConf();
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
        WebHdfsFileSystem.SCHEME);

    final Path foo = new Path("/foo");
    dfs.mkdirs(foo);
    dfs.allowSnapshot(foo);

    webHdfs.createSnapshot(foo, "s1");
    final Path spath = webHdfs.createSnapshot(foo, null);
    Assert.assertTrue(webHdfs.exists(spath));
    final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
    Assert.assertTrue(webHdfs.exists(s1path));

    // delete the two snapshots
    webHdfs.deleteSnapshot(foo, "s1");
    Assert.assertFalse(webHdfs.exists(s1path));
    webHdfs.deleteSnapshot(foo, spath.getName());
    Assert.assertFalse(webHdfs.exists(spath));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 26
Project: hadoop   File: AbstractContractRootDirectoryTest.java   View source code 5 votes vote down vote up
@Test
public void testMkDirDepth1() throws Throwable {
  FileSystem fs = getFileSystem();
  Path dir = new Path("/testmkdirdepth1");
  assertPathDoesNotExist("directory already exists", dir);
  fs.mkdirs(dir);
  ContractTestUtils.assertIsDirectory(getFileSystem(), dir);
  assertPathExists("directory already exists", dir);
  assertDeleted(dir, true);
}
 
Example 27
Project: ditb   File: PerformanceEvaluation.java   View source code 5 votes vote down vote up
@Override
public List<InputSplit> getSplits(JobContext job) throws IOException {
  // generate splits
  List<InputSplit> splitList = new ArrayList<InputSplit>();

  for (FileStatus file: listStatus(job)) {
    if (file.isDirectory()) {
      continue;
    }
    Path path = file.getPath();
    FileSystem fs = path.getFileSystem(job.getConfiguration());
    FSDataInputStream fileIn = fs.open(path);
    LineReader in = new LineReader(fileIn, job.getConfiguration());
    int lineLen = 0;
    while(true) {
      Text lineText = new Text();
      lineLen = in.readLine(lineText);
      if(lineLen <= 0) {
      break;
      }
      Matcher m = LINE_PATTERN.matcher(lineText.toString());
      if((m != null) && m.matches()) {
        TableName tableName = TableName.valueOf(m.group(1));
        int startRow = Integer.parseInt(m.group(2));
        int rows = Integer.parseInt(m.group(3));
        int totalRows = Integer.parseInt(m.group(4));
        int clients = Integer.parseInt(m.group(5));
        boolean flushCommits = Boolean.parseBoolean(m.group(6));
        boolean writeToWAL = Boolean.parseBoolean(m.group(7));
        boolean useTags = Boolean.parseBoolean(m.group(8));
        int noOfTags = Integer.parseInt(m.group(9));

        LOG.debug("tableName=" + tableName +
                  " split["+ splitList.size() + "] " +
                  " startRow=" + startRow +
                  " rows=" + rows +
                  " totalRows=" + totalRows +
                  " clients=" + clients +
                  " flushCommits=" + flushCommits +
                  " writeToWAL=" + writeToWAL +
                  " useTags=" + useTags +
                  " noOfTags=" + noOfTags);

        PeInputSplit newSplit =
          new PeInputSplit(tableName, startRow, rows, totalRows, clients,
              flushCommits, writeToWAL, useTags, noOfTags);
        splitList.add(newSplit);
      }
    }
    in.close();
  }

  LOG.info("Total # of splits: " + splitList.size());
  return splitList;
}
 
Example 28
Project: hadoop   File: ViewFileSystem.java   View source code 5 votes vote down vote up
@Override
public long getDefaultBlockSize(Path f) {
  try {
    InodeTree.ResolveResult<FileSystem> res =
      fsState.resolve(getUriPath(f), true);
    return res.targetFileSystem.getDefaultBlockSize(res.remainingPath);
  } catch (FileNotFoundException e) {
    throw new NotInMountpointException(f, "getDefaultBlockSize"); 
  }
}
 
Example 29
Project: hadoop   File: TestTextInputFormat.java   View source code 5 votes vote down vote up
private static void writeFile(FileSystem fs, Path name, 
                              CompressionCodec codec,
                              String contents) throws IOException {
  OutputStream stm;
  if (codec == null) {
    stm = fs.create(name);
  } else {
    stm = codec.createOutputStream(fs.create(name));
  }
  stm.write(contents.getBytes());
  stm.close();
}
 
Example 30
Project: hadoop-oss   File: RollingFileSystemSinkTestBase.java   View source code 5 votes vote down vote up
/**
 * Read the target log file and append its contents to the StringBuilder.
 * @param fs the target FileSystem
 * @param logFile the target file path
 * @param metrics where to append the file contents
 * @throws IOException thrown if the file cannot be read
 */
protected void readLogData(FileSystem fs, Path logFile, StringBuilder metrics)
    throws IOException {
  FSDataInputStream fsin = fs.open(logFile);
  BufferedReader in = new BufferedReader(new InputStreamReader(fsin,
      StandardCharsets.UTF_8));
  String line = null;

  while ((line = in.readLine()) != null) {
    metrics.append(line).append("\n");
  }
}
 
Example 31
Project: angel   File: ModelLoader.java   View source code 5 votes vote down vote up
/**
 * Get model meta
 *
 * @param modelDir model save directory path
 * @return model meta
 */
public static ModelFilesMeta getMeta(String modelDir, Configuration conf) throws IOException {
  Path modelPath = new Path(modelDir);
  Path meteFilePath = new Path(modelPath, ModelFilesConstent.modelMetaFileName);
  ModelFilesMeta meta = new ModelFilesMeta();
  FileSystem fs = meteFilePath.getFileSystem(conf);
  if (!fs.exists(meteFilePath)) {
    throw new IOException("matrix meta file does not exist ");
  }
  FSDataInputStream input = fs.open(meteFilePath);
  meta.read(input);
  input.close();
  return meta;
}
 
Example 32
Project: hadoop-oss   File: AbstractContractMkdirTest.java   View source code 5 votes vote down vote up
@Test
public void testMkDirRmDir() throws Throwable {
  FileSystem fs = getFileSystem();

  Path dir = path("testMkDirRmDir");
  assertPathDoesNotExist("directory already exists", dir);
  fs.mkdirs(dir);
  assertPathExists("mkdir failed", dir);
  assertDeleted(dir, false);
}
 
Example 33
Project: hadoop-oss   File: BloomMapFile.java   View source code 5 votes vote down vote up
@Deprecated
public Writer(Configuration conf, FileSystem fs, String dirName,
    WritableComparator comparator, Class valClass,
    CompressionType compress, Progressable progress) throws IOException {
  this(conf, new Path(dirName), comparator(comparator), 
       valueClass(valClass), compression(compress),
       progressable(progress));
}
 
Example 34
Project: hadoop   File: MockFileSystem.java   View source code 5 votes vote down vote up
/** Setup and return the underlying {@link FileSystem} mock */
static FileSystem setup() throws IOException {
  if (mockFs == null) {
    mockFs = mock(FileSystem.class);
  }
  reset(mockFs);
  Configuration conf = new Configuration();
  conf.set("fs.defaultFS", "mockfs:///");
  conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class);
  when(mockFs.getConf()).thenReturn(conf);
  return mockFs;
}
 
Example 35
Project: hadoop   File: SwiftTestUtils.java   View source code 5 votes vote down vote up
/**
   /**
   * Assert that a file exists and whose {@link FileStatus} entry
   * declares that this is a file and not a symlink or directory.
   * @param fileSystem filesystem to resolve path against
   * @param filename name of the file
   * @throws IOException IO problems during file operations
   */
  public static void assertIsFile(FileSystem fileSystem, Path filename) throws
                                                                 IOException {
    assertPathExists(fileSystem, "Expected file", filename);
    FileStatus status = fileSystem.getFileStatus(filename);
    String fileInfo = filename + "  " + status;
    assertFalse("File claims to be a directory " + fileInfo,
                status.isDirectory());
/* disabled for Hadoop v1 compatibility
    assertFalse("File claims to be a symlink " + fileInfo,
                       status.isSymlink());
*/
  }
 
Example 36
Project: hadoop   File: TestMapRed.java   View source code 5 votes vote down vote up
private static boolean isSequenceFile(FileSystem fs,
                                      Path f) throws IOException {
  DataInputStream in = fs.open(f);
  byte[] seq = "SEQ".getBytes();
  for(int i=0; i < seq.length; ++i) {
    if (seq[i] != in.read()) {
      return false;
    }
  }
  return true;
}
 
Example 37
Project: hadoop   File: TestHistograms.java   View source code 5 votes vote down vote up
/**
 * @throws IOException
 * 
 *           There should be files in the directory named by
 *           ${test.build.data}/rumen/histogram-test .
 * 
 *           There will be pairs of files, inputXxx.json and goldXxx.json .
 * 
 *           We read the input file as a HistogramRawTestData in json. Then we
 *           create a Histogram using the data field, and then a
 *           LoggedDiscreteCDF using the percentiles and scale field. Finally,
 *           we read the corresponding goldXxx.json as a LoggedDiscreteCDF and
 *           deepCompare them.
 */
@Test
public void testHistograms() throws IOException {
  final Configuration conf = new Configuration();
  final FileSystem lfs = FileSystem.getLocal(conf);
  final Path rootInputDir = new Path(
      System.getProperty("test.tools.input.dir", "")).makeQualified(lfs);
  final Path rootInputFile = new Path(rootInputDir, "rumen/histogram-tests");


  FileStatus[] tests = lfs.listStatus(rootInputFile);

  for (int i = 0; i < tests.length; ++i) {
    Path filePath = tests[i].getPath();
    String fileName = filePath.getName();
    if (fileName.startsWith("input")) {
      String testName = fileName.substring("input".length());
      Path goldFilePath = new Path(rootInputFile, "gold"+testName);
      assertTrue("Gold file dies not exist", lfs.exists(goldFilePath));
      LoggedDiscreteCDF newResult = histogramFileToCDF(filePath, lfs);
      System.out.println("Testing a Histogram for " + fileName);
      FSDataInputStream goldStream = lfs.open(goldFilePath);
      JsonObjectMapperParser<LoggedDiscreteCDF> parser = new JsonObjectMapperParser<LoggedDiscreteCDF>(
          goldStream, LoggedDiscreteCDF.class); 
      try {
        LoggedDiscreteCDF dcdf = parser.getNext();
        dcdf.deepCompare(newResult, new TreePath(null, "<root>"));
      } catch (DeepInequalityException e) {
        fail(e.path.toString());
      }
      finally {
          parser.close();
      }
    }
  }
}
 
Example 38
Project: ditb   File: TableNamespaceManager.java   View source code 5 votes vote down vote up
private void create(Table table, NamespaceDescriptor ns) throws IOException {
  if (get(table, ns.getName()) != null) {
    throw new NamespaceExistException(ns.getName());
  }
  validateTableAndRegionCount(ns);
  FileSystem fs = masterServices.getMasterFileSystem().getFileSystem();
  fs.mkdirs(FSUtils.getNamespaceDir(
      masterServices.getMasterFileSystem().getRootDir(), ns.getName()));
  upsert(table, ns);
  if (this.masterServices.isInitialized()) {
    this.masterServices.getMasterQuotaManager().setNamespaceQuota(ns);
  }
}
 
Example 39
Project: QDrill   File: TestImpersonationMetadata.java   View source code 5 votes vote down vote up
private static Map<String , WorkspaceConfig> createTestWorkspaces() throws Exception {
  // Create "/tmp" folder and set permissions to "777"
  final Path tmpPath = new Path("/tmp");
  fs.delete(tmpPath, true);
  FileSystem.mkdirs(fs, tmpPath, new FsPermission((short)0777));

  Map<String, WorkspaceConfig> workspaces = Maps.newHashMap();

  // Create /drillTestGrp0_700 directory with permissions 700 (owned by user running the tests)
  createAndAddWorkspace("drillTestGrp0_700", "/drillTestGrp0_700", (short)0700, processUser, group0, workspaces);

  // Create /drillTestGrp0_750 directory with permissions 750 (owned by user running the tests)
  createAndAddWorkspace("drillTestGrp0_750", "/drillTestGrp0_750", (short)0750, processUser, group0, workspaces);

  // Create /drillTestGrp0_755 directory with permissions 755 (owned by user running the tests)
  createAndAddWorkspace("drillTestGrp0_755", "/drillTestGrp0_755", (short)0755, processUser, group0, workspaces);

  // Create /drillTestGrp0_770 directory with permissions 770 (owned by user running the tests)
  createAndAddWorkspace("drillTestGrp0_770", "/drillTestGrp0_770", (short)0770, processUser, group0, workspaces);

  // Create /drillTestGrp0_777 directory with permissions 777 (owned by user running the tests)
  createAndAddWorkspace("drillTestGrp0_777", "/drillTestGrp0_777", (short)0777, processUser, group0, workspaces);

  // Create /drillTestGrp1_700 directory with permissions 700 (owned by user1)
  createAndAddWorkspace("drillTestGrp1_700", "/drillTestGrp1_700", (short)0700, user1, group1, workspaces);

  // create /user2_workspace1 with 775 permissions (owner by user1)
  createAndAddWorkspace("user2_workspace1", "/user2_workspace1", (short)0775, user2, group1, workspaces);

  // create /user2_workspace with 755 permissions (owner by user1)
  createAndAddWorkspace("user2_workspace2", "/user2_workspace2", (short)0755, user2, group1, workspaces);

  return workspaces;
}
 
Example 40
Project: hadoop   File: TestFileQueue.java   View source code 5 votes vote down vote up
@AfterClass
public static void cleanup() throws IOException {
  final Configuration conf = new Configuration();
  final FileSystem fs = FileSystem.getLocal(conf).getRaw();
  final Path p = new Path(System.getProperty("test.build.data", "/tmp"),
      "testFileQueue").makeQualified(fs);
  fs.delete(p, true);
}