Java Code Examples for org.apache.hadoop.fs.FileSystem

The following are top voted examples for showing how to use org.apache.hadoop.fs.FileSystem. These examples are extracted from open source projects. You can vote up the examples you like and your votes will be used in our system to generate more good examples.
Example 1
Project: hadoop   File: TestRead.java   Source Code and License 7 votes vote down vote up
/**
 * Regression test for HDFS-7045.
 * If deadlock happen, the test will time out.
 * @throws Exception
 */
@Test(timeout=60000)
public void testReadReservedPath() throws Exception {
  Configuration conf = new Configuration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).
      numDataNodes(1).format(true).build();
  try {
    FileSystem fs = cluster.getFileSystem();
    fs.open(new Path("/.reserved/.inodes/file"));
    Assert.fail("Open a non existing file should fail.");
  } catch (FileNotFoundException e) {
    // Expected
  } finally {
    cluster.shutdown();
  }
}
 
Example 2
Project: hadoop   File: FileOutputCommitter.java   Source Code and License 7 votes vote down vote up
/**
 * Create a file output committer
 * @param outputPath the job's output path, or null if you want the output
 * committer to act as a noop.
 * @param context the task's context
 * @throws IOException
 */
@Private
public FileOutputCommitter(Path outputPath, 
                           JobContext context) throws IOException {
  Configuration conf = context.getConfiguration();
  algorithmVersion =
      conf.getInt(FILEOUTPUTCOMMITTER_ALGORITHM_VERSION,
                  FILEOUTPUTCOMMITTER_ALGORITHM_VERSION_DEFAULT);
  LOG.info("File Output Committer Algorithm version is " + algorithmVersion);
  if (algorithmVersion != 1 && algorithmVersion != 2) {
    throw new IOException("Only 1 or 2 algorithm version is supported");
  }
  if (outputPath != null) {
    FileSystem fs = outputPath.getFileSystem(context.getConfiguration());
    this.outputPath = fs.makeQualified(outputPath);
  }
}
 
Example 3
Project: ditb   File: HFileOutputFormat2.java   Source Code and License 7 votes vote down vote up
/**
 * Configure <code>job</code> with a TotalOrderPartitioner, partitioning against
 * <code>splitPoints</code>. Cleans up the partitions file after job exists.
 */
static void configurePartitioner(Job job, List<ImmutableBytesWritable> splitPoints)
    throws IOException {
  Configuration conf = job.getConfiguration();
  // create the partitions file
  FileSystem fs = FileSystem.get(conf);
  String hbaseTmpFsDir =
      conf.get(HConstants.TEMPORARY_FS_DIRECTORY_KEY,
        HConstants.DEFAULT_TEMPORARY_HDFS_DIRECTORY);
  Path partitionsPath = new Path(hbaseTmpFsDir, "partitions_" + UUID.randomUUID());
  fs.makeQualified(partitionsPath);
  writePartitions(conf, partitionsPath, splitPoints);
  fs.deleteOnExit(partitionsPath);

  // configure job to use it
  job.setPartitionerClass(TotalOrderPartitioner.class);
  TotalOrderPartitioner.setPartitionFile(conf, partitionsPath);
}
 
Example 4
Project: aliyun-maxcompute-data-collectors   File: TestSplittableBufferedWriter.java   Source Code and License 6 votes vote down vote up
/** Create the directory where we'll write our test files to; and
 * make sure it has no files in it.
 */
private void ensureEmptyWriteDir() throws IOException {
  FileSystem fs = FileSystem.getLocal(getConf());
  Path writeDir = getWritePath();

  fs.mkdirs(writeDir);

  FileStatus [] stats = fs.listStatus(writeDir);

  for (FileStatus stat : stats) {
    if (stat.isDir()) {
      fail("setUp(): Write directory " + writeDir
          + " contains subdirectories");
    }

    LOG.debug("setUp(): Removing " + stat.getPath());
    if (!fs.delete(stat.getPath(), false)) {
      fail("setUp(): Could not delete residual file " + stat.getPath());
    }
  }

  if (!fs.exists(writeDir)) {
    fail("setUp: Could not create " + writeDir);
  }
}
 
Example 5
Project: ditb   File: HBaseTestCase.java   Source Code and License 6 votes vote down vote up
/**
 * Note that this method must be called after the mini hdfs cluster has
 * started or we end up with a local file system.
 */
@Override
protected void setUp() throws Exception {
  super.setUp();
  localfs =
    (conf.get("fs.defaultFS", "file:///").compareTo("file:///") == 0);

  if (fs == null) {
    this.fs = FileSystem.get(conf);
  }
  try {
    if (localfs) {
      this.testDir = getUnitTestdir(getName());
      if (fs.exists(testDir)) {
        fs.delete(testDir, true);
      }
    } else {
      this.testDir = FSUtils.getRootDir(conf);
    }
  } catch (Exception e) {
    LOG.fatal("error during setup", e);
    throw e;
  }
}
 
Example 6
Project: hadoop   File: TestTokenCache.java   Source Code and License 6 votes vote down vote up
@SuppressWarnings("deprecation")
@Test
public void testGetTokensForNamenodes() throws IOException,
    URISyntaxException {
  Path TEST_ROOT_DIR =
      new Path(System.getProperty("test.build.data", "test/build/data"));
  // ick, but need fq path minus file:/
  String binaryTokenFile =
      FileSystem.getLocal(conf)
        .makeQualified(new Path(TEST_ROOT_DIR, "tokenFile")).toUri()
        .getPath();

  MockFileSystem fs1 = createFileSystemForServiceName("service1");
  Credentials creds = new Credentials();
  Token<?> token1 = fs1.getDelegationToken(renewer);
  creds.addToken(token1.getService(), token1);
  // wait to set, else the obtain tokens call above will fail with FNF
  conf.set(MRJobConfig.MAPREDUCE_JOB_CREDENTIALS_BINARY, binaryTokenFile);
  creds.writeTokenStorageFile(new Path(binaryTokenFile), conf);
  TokenCache.obtainTokensForNamenodesInternal(fs1, creds, conf);
  String fs_addr = fs1.getCanonicalServiceName();
  Token<?> nnt = TokenCache.getDelegationToken(creds, fs_addr);
  assertNotNull("Token for nn is null", nnt);
}
 
Example 7
Project: dremio-oss   File: BackupRestoreUtil.java   Source Code and License 6 votes vote down vote up
private static void validateFileWithChecksum(FileSystem fs, Path filePath, BackupFileInfo backupFileInfo) throws IOException {
  final CheckedInputStream cin = new CheckedInputStream(fs.open(filePath), new CRC32());
  final BufferedReader reader = new BufferedReader(new InputStreamReader(cin));
  final ObjectMapper objectMapper = new ObjectMapper();
  String line;
  long records = 0;
  // parse records just to make sure formatting is correct
  while ((line = reader.readLine()) != null) {
    objectMapper.readValue(line, BackupRecord.class);
    ++records;
  }
  cin.close();
  long found = cin.getChecksum().getValue();
  if (backupFileInfo.getChecksum() != found) {
    throw new IOException(format("Corrupt backup data file %s. Expected checksum %x, found %x", filePath, backupFileInfo.getChecksum(), found));
  }
  if (backupFileInfo.getRecords() != records) {
    throw new IOException(format("Corrupt backup data file %s. Expected records %x, found %x", filePath, backupFileInfo.getRecords(), records));
  }
}
 
Example 8
Project: hadoop   File: NameNode.java   Source Code and License 6 votes vote down vote up
/**
 * @return address of file system
 */
public static InetSocketAddress getAddress(URI filesystemURI) {
  String authority = filesystemURI.getAuthority();
  if (authority == null) {
    throw new IllegalArgumentException(String.format(
        "Invalid URI for NameNode address (check %s): %s has no authority.",
        FileSystem.FS_DEFAULT_NAME_KEY, filesystemURI.toString()));
  }
  if (!HdfsConstants.HDFS_URI_SCHEME.equalsIgnoreCase(
      filesystemURI.getScheme())) {
    throw new IllegalArgumentException(String.format(
        "Invalid URI for NameNode address (check %s): %s is not of scheme '%s'.",
        FileSystem.FS_DEFAULT_NAME_KEY, filesystemURI.toString(),
        HdfsConstants.HDFS_URI_SCHEME));
  }
  return getAddress(authority);
}
 
Example 9
Project: circus-train   File: PathToPathMetadata.java   Source Code and License 6 votes vote down vote up
@Override
public PathMetadata apply(@Nonnull Path location) {
  try {
    FileSystem fs = location.getFileSystem(conf);
    FileStatus fileStatus = fs.getFileStatus(location);
    FileChecksum checksum = null;
    if (fileStatus.isFile()) {
      checksum = fs.getFileChecksum(location);
    }

    List<PathMetadata> childPathDescriptors = new ArrayList<>();
    if (fileStatus.isDirectory()) {
      FileStatus[] childStatuses = fs.listStatus(location);
      for (FileStatus childStatus : childStatuses) {
        childPathDescriptors.add(apply(childStatus.getPath()));
      }
    }

    return new PathMetadata(location, fileStatus.getModificationTime(), checksum, childPathDescriptors);

  } catch (IOException e) {
    throw new CircusTrainException("Unable to compute digest for location " + location.toString(), e);
  }
}
 
Example 10
Project: dremio-oss   File: BackupRestoreUtil.java   Source Code and License 6 votes vote down vote up
public static BackupStats createBackup(FileSystem fs, Path backupRootDir, LocalKVStoreProvider localKVStoreProvider, HomeFileConfig homeFileStore) throws IOException, NamespaceException {
  final Date now = new Date();
  final BackupStats backupStats = new BackupStats();

  final Path backupDir = new Path(backupRootDir, format("%s%s", BACKUP_DIR_PREFIX, DATE_FORMAT.format(now)));
  fs.mkdirs(backupDir, DEFAULT_PERMISSIONS);
  backupStats.backupPath = backupDir.toUri().getPath();

  for (Map.Entry<StoreBuilderConfig, CoreKVStore<?, ?>> entry : localKVStoreProvider.getStores().entrySet()) {
    final StoreBuilderConfig storeBuilderConfig = entry.getKey();
    if (TokenUtils.TOKENS_TABLE_NAME.equals(storeBuilderConfig.getName())) {
      // Skip creating a backup of tokens table
      // TODO: In the future, if there are other tables that should not be backed up, this could be part of
      // StoreBuilderConfig interface
      continue;
    }
    final BackupFileInfo backupFileInfo = new BackupFileInfo().setKvstoreInfo(DataStoreUtils.toInfo(storeBuilderConfig));
    dumpTable(fs, backupDir, backupFileInfo, entry.getValue());
    ++backupStats.tables;
  }
  backupUploadedFiles(fs, backupDir, homeFileStore, backupStats);
  return backupStats;
}
 
Example 11
Project: hadoop-oss   File: TestCredentialProviderFactory.java   Source Code and License 6 votes vote down vote up
public void checkPermissionRetention(Configuration conf, String ourUrl,
    Path path) throws Exception {
  CredentialProvider provider = CredentialProviderFactory.getProviders(conf).get(0);
  // let's add a new credential and flush and check that permissions are still set to 777
  char[] cred = new char[32];
  for(int i =0; i < cred.length; ++i) {
    cred[i] = (char) i;
  }
  // create a new key
  try {
    provider.createCredentialEntry("key5", cred);
  } catch (Exception e) {
    e.printStackTrace();
    throw e;
  }
  provider.flush();
  // get a new instance of the provider to ensure it was saved correctly
  provider = CredentialProviderFactory.getProviders(conf).get(0);
  assertArrayEquals(cred, provider.getCredentialEntry("key5").getCredential());

  FileSystem fs = path.getFileSystem(conf);
  FileStatus s = fs.getFileStatus(path);
  assertTrue("Permissions should have been retained from the preexisting " +
  		"keystore.", s.getPermission().toString().equals("rwxrwxrwx"));
}
 
Example 12
Project: hadoop   File: TestFileCreation.java   Source Code and License 6 votes vote down vote up
/**
 * Test that server default values can be retrieved on the client side
 */
@Test
public void testServerDefaults() throws IOException {
  Configuration conf = new HdfsConfiguration();
  conf.setLong(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT);
  conf.setInt(DFS_BYTES_PER_CHECKSUM_KEY, DFS_BYTES_PER_CHECKSUM_DEFAULT);
  conf.setInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY, DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
  conf.setInt(DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT + 1);
  conf.setInt(IO_FILE_BUFFER_SIZE_KEY, IO_FILE_BUFFER_SIZE_DEFAULT);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
                   .numDataNodes(DFSConfigKeys.DFS_REPLICATION_DEFAULT + 1)
                   .build();
  cluster.waitActive();
  FileSystem fs = cluster.getFileSystem();
  try {
    FsServerDefaults serverDefaults = fs.getServerDefaults();
    assertEquals(DFS_BLOCK_SIZE_DEFAULT, serverDefaults.getBlockSize());
    assertEquals(DFS_BYTES_PER_CHECKSUM_DEFAULT, serverDefaults.getBytesPerChecksum());
    assertEquals(DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT, serverDefaults.getWritePacketSize());
    assertEquals(DFS_REPLICATION_DEFAULT + 1, serverDefaults.getReplication());
    assertEquals(IO_FILE_BUFFER_SIZE_DEFAULT, serverDefaults.getFileBufferSize());
  } finally {
    fs.close();
    cluster.shutdown();
  }
}
 
Example 13
Project: hadoop   File: TestLocalJobSubmission.java   Source Code and License 6 votes vote down vote up
/**
 * test the local job submission options of
 * -jt local -libjars
 * @throws IOException
 */
@Test
public void testLocalJobLibjarsOption() throws IOException {
  Path jarPath = makeJar(new Path(TEST_ROOT_DIR, "test.jar"));

  Configuration conf = new Configuration();
  conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "hdfs://testcluster");
  final String[] args = {
      "-jt" , "local", "-libjars", jarPath.toString(),
      "-m", "1", "-r", "1", "-mt", "1", "-rt", "1"
  };
  int res = -1;
  try {
    res = ToolRunner.run(conf, new SleepJob(), args);
  } catch (Exception e) {
    System.out.println("Job failed with " + e.getLocalizedMessage());
    e.printStackTrace(System.out);
    fail("Job failed");
  }
  assertEquals("dist job res is not 0:", 0, res);
}
 
Example 14
Project: Transwarp-Sample-Code   File: Delete.java   Source Code and License 6 votes vote down vote up
public static void main(String[] args) {
    String rootPath = "hdfs://nameservice1";
    Path p = new Path(rootPath + "/tmp/file.txt");
    Configuration conf = new Configuration();
    conf.addResource("core-site.xml");
    conf.addResource("hdfs-site.xml");
    conf.addResource("yarn-site.xml");
    try {
        // 没开kerberos,注释下面两行
        UserGroupInformation.setConfiguration(conf);
        UserGroupInformation.loginUserFromKeytab("[email protected]","E:\\星环\\hdfs.keytab");
        FileSystem fs = p.getFileSystem(conf);
        boolean b = fs.delete(p, true);
        System.out.println(b);
        fs.close();
    } catch (IOException e) {
        e.printStackTrace();
    }
}
 
Example 15
Project: ditb   File: TestFSTableDescriptors.java   Source Code and License 6 votes vote down vote up
@Test
public void testCreateTableDescriptorUpdatesIfExistsAlready() throws IOException {
  Path testdir = UTIL.getDataTestDir("testCreateTableDescriptorUpdatesIfThereExistsAlready");
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(
      "testCreateTableDescriptorUpdatesIfThereExistsAlready"));
  FileSystem fs = FileSystem.get(UTIL.getConfiguration());
  FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, testdir);
  assertTrue(fstd.createTableDescriptor(htd));
  assertFalse(fstd.createTableDescriptor(htd));
  htd.setValue(Bytes.toBytes("mykey"), Bytes.toBytes("myValue"));
  assertTrue(fstd.createTableDescriptor(htd)); //this will re-create
  Path tableDir = fstd.getTableDir(htd.getTableName());
  Path tmpTableDir = new Path(tableDir, FSTableDescriptors.TMP_DIR);
  FileStatus[] statuses = fs.listStatus(tmpTableDir);
  assertTrue(statuses.length == 0);

  assertEquals(htd, FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir));
}
 
Example 16
Project: ditb   File: FSUtils.java   Source Code and License 6 votes vote down vote up
/**
 * Runs through the HBase rootdir and creates a reverse lookup map for
 * table StoreFile names to the full Path.
 * <br>
 * Example...<br>
 * Key = 3944417774205889744  <br>
 * Value = hdfs://localhost:51169/user/userid/-ROOT-/70236052/info/3944417774205889744
 *
 * @param fs  The file system to use.
 * @param hbaseRootDir  The root directory to scan.
 * @param errors ErrorReporter instance or null
 * @return Map keyed by StoreFile name with a value of the full Path.
 * @throws IOException When scanning the directory fails.
 */
public static Map<String, Path> getTableStoreFilePathMap(
  final FileSystem fs, final Path hbaseRootDir, ErrorReporter errors)
throws IOException {
  Map<String, Path> map = new HashMap<String, Path>();

  // if this method looks similar to 'getTableFragmentation' that is because
  // it was borrowed from it.

  // only include the directory paths to tables
  for (Path tableDir : FSUtils.getTableDirs(fs, hbaseRootDir)) {
    getTableStoreFilePathMap(map, fs, hbaseRootDir,
        FSUtils.getTableName(tableDir), errors);
  }
  return map;
}
 
Example 17
Project: hadoop   File: TestCodec.java   Source Code and License 6 votes vote down vote up
private void codecTestMapFile(Class<? extends CompressionCodec> clazz,
    CompressionType type, int records) throws Exception {
  
  FileSystem fs = FileSystem.get(conf);
  LOG.info("Creating MapFiles with " + records  + 
          " records using codec " + clazz.getSimpleName());
  Path path = new Path(new Path(
      System.getProperty("test.build.data", "/tmp")),
    clazz.getSimpleName() + "-" + type + "-" + records);

  LOG.info("Writing " + path);
  createMapFile(conf, fs, path, clazz.newInstance(), type, records);
  MapFile.Reader reader = new MapFile.Reader(path, conf);
  Text key1 = new Text("002");
  assertNotNull(reader.get(key1, new Text()));
  Text key2 = new Text("004");
  assertNotNull(reader.get(key2, new Text()));
}
 
Example 18
Project: hadoop   File: TestKeyProviderFactory.java   Source Code and License 6 votes vote down vote up
public void checkPermissionRetention(Configuration conf, String ourUrl, Path path) throws Exception {
  KeyProvider provider = KeyProviderFactory.getProviders(conf).get(0);
  // let's add a new key and flush and check that permissions are still set to 777
  byte[] key = new byte[16];
  for(int i =0; i < key.length; ++i) {
    key[i] = (byte) i;
  }
  // create a new key
  try {
    provider.createKey("key5", key, KeyProvider.options(conf));
  } catch (Exception e) {
    e.printStackTrace();
    throw e;
  }
  provider.flush();
  // get a new instance of the provider to ensure it was saved correctly
  provider = KeyProviderFactory.getProviders(conf).get(0);
  assertArrayEquals(key, provider.getCurrentKey("key5").getMaterial());

  FileSystem fs = path.getFileSystem(conf);
  FileStatus s = fs.getFileStatus(path);
  assertTrue("Permissions should have been retained from the preexisting keystore.", s.getPermission().toString().equals("rwxrwxrwx"));
}
 
Example 19
Project: hadoop   File: TestViewFileSystemWithAcls.java   Source Code and License 6 votes vote down vote up
@Before
public void setUp() throws Exception {
  fsTarget = fHdfs;
  fsTarget2 = fHdfs2;
  targetTestRoot = fileSystemTestHelper.getAbsoluteTestRootPath(fsTarget);
  targetTestRoot2 = fileSystemTestHelper.getAbsoluteTestRootPath(fsTarget2);

  fsTarget.delete(targetTestRoot, true);
  fsTarget2.delete(targetTestRoot2, true);
  fsTarget.mkdirs(targetTestRoot);
  fsTarget2.mkdirs(targetTestRoot2);

  fsViewConf = ViewFileSystemTestSetup.createConfig();
  setupMountPoints();
  fsView = FileSystem.get(FsConstants.VIEWFS_URI, fsViewConf);
}
 
Example 20
Project: hadoop   File: TestWebHDFS.java   Source Code and License 5 votes vote down vote up
static void verifyPread(FileSystem fs, Path p, long offset, long length,
    byte[] buf, byte[] expected) throws IOException {
  long remaining = length - offset;
  long checked = 0;
  LOG.info("XXX PREAD: offset=" + offset + ", remaining=" + remaining);

  final Ticker t = new Ticker("PREAD", "offset=%d, remaining=%d",
      offset, remaining);
  final FSDataInputStream in = fs.open(p, 64 << 10);
  for(; remaining > 0; ) {
    t.tick(checked, "offset=%d, remaining=%d", offset, remaining);
    final int n = (int)Math.min(remaining, buf.length);
    in.readFully(offset, buf, 0, n);
    checkData(offset, remaining, n, buf, expected);

    offset += n;
    remaining -= n;
    checked += n;
  }
  in.close();
  t.end(checked);
}
 
Example 21
Project: multiple-dimension-spread   File: MDSCombineSpreadReader.java   Source Code and License 5 votes vote down vote up
public MDSCombineSpreadReader( final CombineFileSplit split , final TaskAttemptContext context , final Integer index ) throws IOException{
  Configuration config = context.getConfiguration();
  Path path = split.getPath( index );
  FileSystem fs = path.getFileSystem( config );
  long fileLength = fs.getLength( path );
  InputStream in = fs.open( path );

  innerReader = new MDSSpreadReader();
  innerReader.setStream( in , fileLength , 0 , fileLength );
}
 
Example 22
Project: hadoop   File: ViewFileSystem.java   Source Code and License 5 votes vote down vote up
@Override
public void setPermission(final Path f, final FsPermission permission)
    throws AccessControlException, FileNotFoundException,
    IOException {
  InodeTree.ResolveResult<FileSystem> res = 
    fsState.resolve(getUriPath(f), true);
  res.targetFileSystem.setPermission(res.remainingPath, permission); 
}
 
Example 23
Project: hadoop-oss   File: TestGenericOptionsParser.java   Source Code and License 5 votes vote down vote up
@Override
protected void setUp() throws Exception {
  super.setUp();
  conf = new Configuration();
  localFs = FileSystem.getLocal(conf);
  testDir = new File(System.getProperty("test.build.data", "/tmp"), "generic");
  if(testDir.exists())
    localFs.delete(new Path(testDir.toString()), true);
}
 
Example 24
Project: hadoop   File: TestWebHDFS.java   Source Code and License 5 votes vote down vote up
/**
 * Test snapshot deletion through WebHdfs
 */
@Test
public void testWebHdfsDeleteSnapshot() throws Exception {
  MiniDFSCluster cluster = null;
  final Configuration conf = WebHdfsTestUtil.createConf();
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
        WebHdfsFileSystem.SCHEME);

    final Path foo = new Path("/foo");
    dfs.mkdirs(foo);
    dfs.allowSnapshot(foo);

    webHdfs.createSnapshot(foo, "s1");
    final Path spath = webHdfs.createSnapshot(foo, null);
    Assert.assertTrue(webHdfs.exists(spath));
    final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
    Assert.assertTrue(webHdfs.exists(s1path));

    // delete the two snapshots
    webHdfs.deleteSnapshot(foo, "s1");
    Assert.assertFalse(webHdfs.exists(s1path));
    webHdfs.deleteSnapshot(foo, spath.getName());
    Assert.assertFalse(webHdfs.exists(spath));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 25
Project: hadoop   File: AbstractContractRootDirectoryTest.java   Source Code and License 5 votes vote down vote up
@Test
public void testMkDirDepth1() throws Throwable {
  FileSystem fs = getFileSystem();
  Path dir = new Path("/testmkdirdepth1");
  assertPathDoesNotExist("directory already exists", dir);
  fs.mkdirs(dir);
  ContractTestUtils.assertIsDirectory(getFileSystem(), dir);
  assertPathExists("directory already exists", dir);
  assertDeleted(dir, true);
}
 
Example 26
Project: ditb   File: PerformanceEvaluation.java   Source Code and License 5 votes vote down vote up
@Override
public List<InputSplit> getSplits(JobContext job) throws IOException {
  // generate splits
  List<InputSplit> splitList = new ArrayList<InputSplit>();

  for (FileStatus file: listStatus(job)) {
    if (file.isDirectory()) {
      continue;
    }
    Path path = file.getPath();
    FileSystem fs = path.getFileSystem(job.getConfiguration());
    FSDataInputStream fileIn = fs.open(path);
    LineReader in = new LineReader(fileIn, job.getConfiguration());
    int lineLen = 0;
    while(true) {
      Text lineText = new Text();
      lineLen = in.readLine(lineText);
      if(lineLen <= 0) {
      break;
      }
      Matcher m = LINE_PATTERN.matcher(lineText.toString());
      if((m != null) && m.matches()) {
        TableName tableName = TableName.valueOf(m.group(1));
        int startRow = Integer.parseInt(m.group(2));
        int rows = Integer.parseInt(m.group(3));
        int totalRows = Integer.parseInt(m.group(4));
        int clients = Integer.parseInt(m.group(5));
        boolean flushCommits = Boolean.parseBoolean(m.group(6));
        boolean writeToWAL = Boolean.parseBoolean(m.group(7));
        boolean useTags = Boolean.parseBoolean(m.group(8));
        int noOfTags = Integer.parseInt(m.group(9));

        LOG.debug("tableName=" + tableName +
                  " split["+ splitList.size() + "] " +
                  " startRow=" + startRow +
                  " rows=" + rows +
                  " totalRows=" + totalRows +
                  " clients=" + clients +
                  " flushCommits=" + flushCommits +
                  " writeToWAL=" + writeToWAL +
                  " useTags=" + useTags +
                  " noOfTags=" + noOfTags);

        PeInputSplit newSplit =
          new PeInputSplit(tableName, startRow, rows, totalRows, clients,
              flushCommits, writeToWAL, useTags, noOfTags);
        splitList.add(newSplit);
      }
    }
    in.close();
  }

  LOG.info("Total # of splits: " + splitList.size());
  return splitList;
}
 
Example 27
Project: hadoop   File: ViewFileSystem.java   Source Code and License 5 votes vote down vote up
@Override
public long getDefaultBlockSize(Path f) {
  try {
    InodeTree.ResolveResult<FileSystem> res =
      fsState.resolve(getUriPath(f), true);
    return res.targetFileSystem.getDefaultBlockSize(res.remainingPath);
  } catch (FileNotFoundException e) {
    throw new NotInMountpointException(f, "getDefaultBlockSize"); 
  }
}
 
Example 28
Project: hadoop   File: TestTextInputFormat.java   Source Code and License 5 votes vote down vote up
private static void writeFile(FileSystem fs, Path name, 
                              CompressionCodec codec,
                              String contents) throws IOException {
  OutputStream stm;
  if (codec == null) {
    stm = fs.create(name);
  } else {
    stm = codec.createOutputStream(fs.create(name));
  }
  stm.write(contents.getBytes());
  stm.close();
}
 
Example 29
Project: hadoop-oss   File: RollingFileSystemSinkTestBase.java   Source Code and License 5 votes vote down vote up
/**
 * Read the target log file and append its contents to the StringBuilder.
 * @param fs the target FileSystem
 * @param logFile the target file path
 * @param metrics where to append the file contents
 * @throws IOException thrown if the file cannot be read
 */
protected void readLogData(FileSystem fs, Path logFile, StringBuilder metrics)
    throws IOException {
  FSDataInputStream fsin = fs.open(logFile);
  BufferedReader in = new BufferedReader(new InputStreamReader(fsin,
      StandardCharsets.UTF_8));
  String line = null;

  while ((line = in.readLine()) != null) {
    metrics.append(line).append("\n");
  }
}
 
Example 30
Project: angel   File: ModelLoader.java   Source Code and License 5 votes vote down vote up
/**
 * Get model meta
 *
 * @param modelDir model save directory path
 * @return model meta
 */
public static ModelFilesMeta getMeta(String modelDir, Configuration conf) throws IOException {
  Path modelPath = new Path(modelDir);
  Path meteFilePath = new Path(modelPath, ModelFilesConstent.modelMetaFileName);
  ModelFilesMeta meta = new ModelFilesMeta();
  FileSystem fs = meteFilePath.getFileSystem(conf);
  if (!fs.exists(meteFilePath)) {
    throw new IOException("matrix meta file does not exist ");
  }
  FSDataInputStream input = fs.open(meteFilePath);
  meta.read(input);
  input.close();
  return meta;
}
 
Example 31
Project: hadoop-oss   File: AbstractContractMkdirTest.java   Source Code and License 5 votes vote down vote up
@Test
public void testMkDirRmDir() throws Throwable {
  FileSystem fs = getFileSystem();

  Path dir = path("testMkDirRmDir");
  assertPathDoesNotExist("directory already exists", dir);
  fs.mkdirs(dir);
  assertPathExists("mkdir failed", dir);
  assertDeleted(dir, false);
}
 
Example 32
Project: hadoop-oss   File: BloomMapFile.java   Source Code and License 5 votes vote down vote up
@Deprecated
public Writer(Configuration conf, FileSystem fs, String dirName,
    WritableComparator comparator, Class valClass,
    CompressionType compress, Progressable progress) throws IOException {
  this(conf, new Path(dirName), comparator(comparator), 
       valueClass(valClass), compression(compress),
       progressable(progress));
}
 
Example 33
Project: hadoop   File: MockFileSystem.java   Source Code and License 5 votes vote down vote up
/** Setup and return the underlying {@link FileSystem} mock */
static FileSystem setup() throws IOException {
  if (mockFs == null) {
    mockFs = mock(FileSystem.class);
  }
  reset(mockFs);
  Configuration conf = new Configuration();
  conf.set("fs.defaultFS", "mockfs:///");
  conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class);
  when(mockFs.getConf()).thenReturn(conf);
  return mockFs;
}
 
Example 34
Project: hadoop   File: SwiftTestUtils.java   Source Code and License 5 votes vote down vote up
/**
   /**
   * Assert that a file exists and whose {@link FileStatus} entry
   * declares that this is a file and not a symlink or directory.
   * @param fileSystem filesystem to resolve path against
   * @param filename name of the file
   * @throws IOException IO problems during file operations
   */
  public static void assertIsFile(FileSystem fileSystem, Path filename) throws
                                                                 IOException {
    assertPathExists(fileSystem, "Expected file", filename);
    FileStatus status = fileSystem.getFileStatus(filename);
    String fileInfo = filename + "  " + status;
    assertFalse("File claims to be a directory " + fileInfo,
                status.isDirectory());
/* disabled for Hadoop v1 compatibility
    assertFalse("File claims to be a symlink " + fileInfo,
                       status.isSymlink());
*/
  }
 
Example 35
Project: hadoop   File: TestMapRed.java   Source Code and License 5 votes vote down vote up
private static boolean isSequenceFile(FileSystem fs,
                                      Path f) throws IOException {
  DataInputStream in = fs.open(f);
  byte[] seq = "SEQ".getBytes();
  for(int i=0; i < seq.length; ++i) {
    if (seq[i] != in.read()) {
      return false;
    }
  }
  return true;
}
 
Example 36
Project: hadoop   File: TestHistograms.java   Source Code and License 5 votes vote down vote up
/**
 * @throws IOException
 * 
 *           There should be files in the directory named by
 *           ${test.build.data}/rumen/histogram-test .
 * 
 *           There will be pairs of files, inputXxx.json and goldXxx.json .
 * 
 *           We read the input file as a HistogramRawTestData in json. Then we
 *           create a Histogram using the data field, and then a
 *           LoggedDiscreteCDF using the percentiles and scale field. Finally,
 *           we read the corresponding goldXxx.json as a LoggedDiscreteCDF and
 *           deepCompare them.
 */
@Test
public void testHistograms() throws IOException {
  final Configuration conf = new Configuration();
  final FileSystem lfs = FileSystem.getLocal(conf);
  final Path rootInputDir = new Path(
      System.getProperty("test.tools.input.dir", "")).makeQualified(lfs);
  final Path rootInputFile = new Path(rootInputDir, "rumen/histogram-tests");


  FileStatus[] tests = lfs.listStatus(rootInputFile);

  for (int i = 0; i < tests.length; ++i) {
    Path filePath = tests[i].getPath();
    String fileName = filePath.getName();
    if (fileName.startsWith("input")) {
      String testName = fileName.substring("input".length());
      Path goldFilePath = new Path(rootInputFile, "gold"+testName);
      assertTrue("Gold file dies not exist", lfs.exists(goldFilePath));
      LoggedDiscreteCDF newResult = histogramFileToCDF(filePath, lfs);
      System.out.println("Testing a Histogram for " + fileName);
      FSDataInputStream goldStream = lfs.open(goldFilePath);
      JsonObjectMapperParser<LoggedDiscreteCDF> parser = new JsonObjectMapperParser<LoggedDiscreteCDF>(
          goldStream, LoggedDiscreteCDF.class); 
      try {
        LoggedDiscreteCDF dcdf = parser.getNext();
        dcdf.deepCompare(newResult, new TreePath(null, "<root>"));
      } catch (DeepInequalityException e) {
        fail(e.path.toString());
      }
      finally {
          parser.close();
      }
    }
  }
}
 
Example 37
Project: ditb   File: TableNamespaceManager.java   Source Code and License 5 votes vote down vote up
private void create(Table table, NamespaceDescriptor ns) throws IOException {
  if (get(table, ns.getName()) != null) {
    throw new NamespaceExistException(ns.getName());
  }
  validateTableAndRegionCount(ns);
  FileSystem fs = masterServices.getMasterFileSystem().getFileSystem();
  fs.mkdirs(FSUtils.getNamespaceDir(
      masterServices.getMasterFileSystem().getRootDir(), ns.getName()));
  upsert(table, ns);
  if (this.masterServices.isInitialized()) {
    this.masterServices.getMasterQuotaManager().setNamespaceQuota(ns);
  }
}
 
Example 38
Project: QDrill   File: TestImpersonationMetadata.java   Source Code and License 5 votes vote down vote up
private static Map<String , WorkspaceConfig> createTestWorkspaces() throws Exception {
  // Create "/tmp" folder and set permissions to "777"
  final Path tmpPath = new Path("/tmp");
  fs.delete(tmpPath, true);
  FileSystem.mkdirs(fs, tmpPath, new FsPermission((short)0777));

  Map<String, WorkspaceConfig> workspaces = Maps.newHashMap();

  // Create /drillTestGrp0_700 directory with permissions 700 (owned by user running the tests)
  createAndAddWorkspace("drillTestGrp0_700", "/drillTestGrp0_700", (short)0700, processUser, group0, workspaces);

  // Create /drillTestGrp0_750 directory with permissions 750 (owned by user running the tests)
  createAndAddWorkspace("drillTestGrp0_750", "/drillTestGrp0_750", (short)0750, processUser, group0, workspaces);

  // Create /drillTestGrp0_755 directory with permissions 755 (owned by user running the tests)
  createAndAddWorkspace("drillTestGrp0_755", "/drillTestGrp0_755", (short)0755, processUser, group0, workspaces);

  // Create /drillTestGrp0_770 directory with permissions 770 (owned by user running the tests)
  createAndAddWorkspace("drillTestGrp0_770", "/drillTestGrp0_770", (short)0770, processUser, group0, workspaces);

  // Create /drillTestGrp0_777 directory with permissions 777 (owned by user running the tests)
  createAndAddWorkspace("drillTestGrp0_777", "/drillTestGrp0_777", (short)0777, processUser, group0, workspaces);

  // Create /drillTestGrp1_700 directory with permissions 700 (owned by user1)
  createAndAddWorkspace("drillTestGrp1_700", "/drillTestGrp1_700", (short)0700, user1, group1, workspaces);

  // create /user2_workspace1 with 775 permissions (owner by user1)
  createAndAddWorkspace("user2_workspace1", "/user2_workspace1", (short)0775, user2, group1, workspaces);

  // create /user2_workspace with 755 permissions (owner by user1)
  createAndAddWorkspace("user2_workspace2", "/user2_workspace2", (short)0755, user2, group1, workspaces);

  return workspaces;
}
 
Example 39
Project: hadoop   File: TestFileQueue.java   Source Code and License 5 votes vote down vote up
@AfterClass
public static void cleanup() throws IOException {
  final Configuration conf = new Configuration();
  final FileSystem fs = FileSystem.getLocal(conf).getRaw();
  final Path p = new Path(System.getProperty("test.build.data", "/tmp"),
      "testFileQueue").makeQualified(fs);
  fs.delete(p, true);
}
 
Example 40
Project: hadoop   File: TestWebHdfsWithAuthenticationFilter.java   Source Code and License 5 votes vote down vote up
@BeforeClass
public static void setUp() throws IOException {
  conf = new Configuration();
  conf.set(DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_KEY,
      CustomizedFilter.class.getName());
  conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "localhost:0");
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  InetSocketAddress addr = cluster.getNameNode().getHttpAddress();
  fs = FileSystem.get(
      URI.create("webhdfs://" + NetUtils.getHostPortString(addr)), conf);
  cluster.waitActive();
}
 
Example 41
Project: angel   File: AngelApps.java   Source Code and License 5 votes vote down vote up
private static void parseDistributedCacheArtifacts(Configuration conf,
    Map<String, LocalResource> localResources, LocalResourceType type, URI[] uris,
    long[] timestamps, long[] sizes, boolean visibilities[]) throws IOException {

  if (uris != null) {
    // Sanity check
    if ((uris.length != timestamps.length) || (uris.length != sizes.length)
        || (uris.length != visibilities.length)) {
      throw new IllegalArgumentException("Invalid specification for "
          + "distributed-cache artifacts of type " + type + " :" + " #uris=" + uris.length
          + " #timestamps=" + timestamps.length + " #visibilities=" + visibilities.length);
    }

    for (int i = 0; i < uris.length; ++i) {
      URI u = uris[i];
      Path p = new Path(u);
      FileSystem remoteFS = p.getFileSystem(conf);
      p =
          remoteFS
              .resolvePath(p.makeQualified(remoteFS.getUri(), remoteFS.getWorkingDirectory()));
      // Add URI fragment or just the filename
      Path name = new Path((null == u.getFragment()) ? p.getName() : u.getFragment());
      if (name.isAbsolute()) {
        throw new IllegalArgumentException("Resource name must be relative");
      }
      String linkName = name.toUri().getPath();
      LocalResource orig = localResources.get(linkName);
      org.apache.hadoop.yarn.api.records.URL url = ConverterUtils.getYarnUrlFromURI(p.toUri());
      if (orig != null && !orig.getResource().equals(url)) {
        LOG.warn(getResourceDescription(orig.getType()) + toString(orig.getResource())
            + " conflicts with " + getResourceDescription(type) + toString(url)
            + " This will be an error in Hadoop 2.0");
        continue;
      }
      localResources.put(linkName, LocalResource.newInstance(ConverterUtils.getYarnUrlFromURI(p
          .toUri()), type, visibilities[i] ? LocalResourceVisibility.PUBLIC
          : LocalResourceVisibility.PRIVATE, sizes[i], timestamps[i]));
    }
  }
}
 
Example 42
Project: hadoop   File: MiniMRCluster.java   Source Code and License 5 votes vote down vote up
public MiniMRCluster(int jobTrackerPort, int taskTrackerPort,
    int numTaskTrackers, String namenode, int numDir, String[] racks,
    String[] hosts, UserGroupInformation ugi, JobConf conf,
    int numTrackerToExclude, Clock clock) throws IOException {
  if (conf == null) conf = new JobConf();
  FileSystem.setDefaultUri(conf, namenode);
  String identifier = this.getClass().getSimpleName() + "_"
      + Integer.toString(new Random().nextInt(Integer.MAX_VALUE));
  mrClientCluster = MiniMRClientClusterFactory.create(this.getClass(),
      identifier, numTaskTrackers, conf);
}
 
Example 43
Project: hadoop-oss   File: AbstractContractRootDirectoryTest.java   Source Code and License 5 votes vote down vote up
@Test
public void testMkDirDepth1() throws Throwable {
  FileSystem fs = getFileSystem();
  Path dir = new Path("/testmkdirdepth1");
  assertPathDoesNotExist("directory already exists", dir);
  fs.mkdirs(dir);
  ContractTestUtils.assertIsDirectory(getFileSystem(), dir);
  assertPathExists("directory already exists", dir);
  assertDeleted(dir, true);
}
 
Example 44
Project: aliyun-maxcompute-data-collectors   File: HiveImport.java   Source Code and License 5 votes vote down vote up
/**
 * If we used a MapReduce-based upload of the data, remove the _logs dir
 * from where we put it, before running Hive LOAD DATA INPATH.
 */
private void removeTempLogs(Path tablePath) throws IOException {
  FileSystem fs = FileSystem.get(configuration);
  Path logsPath = new Path(tablePath, "_logs");
  if (fs.exists(logsPath)) {
    LOG.info("Removing temporary files from import process: " + logsPath);
    if (!fs.delete(logsPath, true)) {
      LOG.warn("Could not delete temporary files; "
          + "continuing with import, but it may fail.");
    }
  }
}
 
Example 45
Project: ditb   File: SnapshotReferenceUtil.java   Source Code and License 5 votes vote down vote up
/**
 * Returns the log file names available in the snapshot.
 *
 * @param fs {@link FileSystem}
 * @param snapshotDir {@link Path} to the Snapshot directory
 * @throws IOException if an error occurred while scanning the directory
 * @return the names of wals in the specified snaphot
 */
public static Set<String> getWALNames(final FileSystem fs, final Path snapshotDir)
    throws IOException {
  final Set<String> names = new HashSet<String>();
  visitLogFiles(fs, snapshotDir, new FSVisitor.LogFileVisitor() {
    @Override
    public void logFile (final String server, final String logfile) throws IOException {
      names.add(logfile);
    }
  });
  return names;
}
 
Example 46
Project: hadoop-oss   File: TestMapFile.java   Source Code and License 5 votes vote down vote up
/**
 * test {@code MapFile.Writer.testFix} method
 */
@Test
public void testFix() {
  final String INDEX_LESS_MAP_FILE = "testFix.mapfile";
  int PAIR_SIZE = 20;
  MapFile.Writer writer = null;
  try {
    FileSystem fs = FileSystem.getLocal(conf);
    Path dir = new Path(TEST_DIR, INDEX_LESS_MAP_FILE);
    writer = createWriter(INDEX_LESS_MAP_FILE, IntWritable.class, Text.class);
    for (int i = 0; i < PAIR_SIZE; i++)
      writer.append(new IntWritable(0), new Text("value"));
    writer.close();

    File indexFile = new File(".", "." + INDEX_LESS_MAP_FILE + "/index");
    boolean isDeleted = false;
    if (indexFile.exists())
      isDeleted = indexFile.delete();

    if (isDeleted)
      assertTrue("testFix error !!!",
          MapFile.fix(fs, dir, IntWritable.class, Text.class, true, conf) == PAIR_SIZE);
  } catch (Exception ex) {
    fail("testFix error !!!");
  } finally {
    IOUtils.cleanup(null, writer);
  }
}
 
Example 47
Project: PACE   File: VersioningIT.java   Source Code and License 5 votes vote down vote up
/**
 * Test that rfiles created by a specific version of PACE still work correctly.
 *
 * @param versionConfig
 *          Configuration for the version with which the rfiles were generated.
 */
private void testVersion(File versionConfig) throws Exception {
  String parentDirectory = versionConfig.getParent();

  JsonParser parser = new JsonParser();
  JsonObject configJson = parser.parse(new FileReader(versionConfig)).getAsJsonObject();

  // Get a scanner for the data file.
  List<byte[]> auths = new ArrayList<>();
  for (JsonElement authElem : configJson.getAsJsonArray("authorizations")) {
    auths.add(VisibilityEvaluator.escape(authElem.getAsString().getBytes(Utils.VISIBILITY_CHARSET), false));
  }
  Authorizations authorizations = new Authorizations(auths);

  LocalFileSystem fs = FileSystem.getLocal(new Configuration());
  Scanner dataScanner = RFile.newScanner().from(Paths.get(parentDirectory, configJson.getAsJsonPrimitive("data-table").getAsString()).toString())
      .withFileSystem(fs).withAuthorizations(authorizations).build();

  // Validate each configuration.
  for (JsonElement testElem : configJson.getAsJsonArray("tests")) {
    JsonObject test = testElem.getAsJsonObject();
    SignatureConfig signatureConfig = new SignatureConfigBuilder().readFromFile(
        new FileReader(Paths.get(parentDirectory, test.getAsJsonPrimitive("config").getAsString()).toFile())).build();
    SignatureKeyContainer keyContainer = LocalSignatureKeyContainer.read(new FileReader(Paths.get(parentDirectory,
        test.getAsJsonPrimitive("keys").getAsString()).toFile()));

    EntrySigner verifier = new EntrySigner(signatureConfig, keyContainer);
    Scanner signedScanner = RFile.newScanner().from(Paths.get(parentDirectory, test.getAsJsonPrimitive("table").getAsString()).toString()).withFileSystem(fs)
        .withAuthorizations(authorizations).build();

    runTest(dataScanner, signedScanner, verifier, signatureConfig);
  }
}
 
Example 48
Project: hadoop-oss   File: TestBloomMapFile.java   Source Code and License 5 votes vote down vote up
@Before
public void setUp() throws Exception {
  LocalFileSystem fs = FileSystem.getLocal(conf);
  if (fs.exists(TEST_ROOT) && !fs.delete(TEST_ROOT, true)) {
    fail("Can't clean up test root dir");
  }
  fs.mkdirs(TEST_ROOT);
}
 
Example 49
Project: hadoop   File: TestFSMainOperationsWebHdfs.java   Source Code and License 5 votes vote down vote up
@BeforeClass
public static void setupCluster() {
  final Configuration conf = new Configuration();
  conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    cluster.waitActive();

    //change root permission to 777
    cluster.getFileSystem().setPermission(
        new Path("/"), new FsPermission((short)0777));

    final String uri = WebHdfsFileSystem.SCHEME  + "://"
        + conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);

    //get file system as a non-superuser
    final UserGroupInformation current = UserGroupInformation.getCurrentUser();
    final UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
        current.getShortUserName() + "x", new String[]{"user"});
    fileSystem = ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
      @Override
      public FileSystem run() throws Exception {
        return FileSystem.get(new URI(uri), conf);
      }
    });

    defaultWorkingDirectory = fileSystem.getWorkingDirectory();
  } catch (Exception e) {
    throw new RuntimeException(e);
  }
}
 
Example 50
Project: ditb   File: SnapshotFileCache.java   Source Code and License 5 votes vote down vote up
/**
 * Create a snapshot file cache for all snapshots under the specified [root]/.snapshot on the
 * filesystem
 * @param fs {@link FileSystem} where the snapshots are stored
 * @param rootDir hbase root directory
 * @param cacheRefreshPeriod period (ms) with which the cache should be refreshed
 * @param cacheRefreshDelay amount of time to wait for the cache to be refreshed
 * @param refreshThreadName name of the cache refresh thread
 * @param inspectSnapshotFiles Filter to apply to each snapshot to extract the files.
 */
public SnapshotFileCache(FileSystem fs, Path rootDir, long cacheRefreshPeriod,
    long cacheRefreshDelay, String refreshThreadName, SnapshotFileInspector inspectSnapshotFiles) {
  this.fs = fs;
  this.fileInspector = inspectSnapshotFiles;
  this.snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(rootDir);
  // periodically refresh the file cache to make sure we aren't superfluously saving files.
  this.refreshTimer = new Timer(refreshThreadName, true);
  this.refreshTimer.scheduleAtFixedRate(new RefreshCacheTask(), cacheRefreshDelay,
    cacheRefreshPeriod);
}
 
Example 51
Project: hadoop   File: TestFileInputFormat.java   Source Code and License 5 votes vote down vote up
public static List<Path> configureTestErrorOnNonExistantDir(Configuration conf,
    FileSystem localFs) throws IOException {
  Path base1 = new Path(TEST_ROOT_DIR, "input1");
  Path base2 = new Path(TEST_ROOT_DIR, "input2");
  conf.set(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR,
      localFs.makeQualified(base1) + "," + localFs.makeQualified(base2));
  conf.setBoolean(
      org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR_RECURSIVE,
      true);
  localFs.mkdirs(base1);

  Path inFile1 = new Path(base1, "file1");
  Path inFile2 = new Path(base1, "file2");

  localFs.createNewFile(inFile1);
  localFs.createNewFile(inFile2);

  List<Path> expectedPaths = Lists.newArrayList();
  return expectedPaths;
}
 
Example 52
Project: hadoop   File: FileSystemReleaseFilter.java   Source Code and License 5 votes vote down vote up
/**
 * It delegates the incoming request to the <code>FilterChain</code>, and
 * at its completion (in a finally block) releases the filesystem instance
 * back to the {@link FileSystemAccess} service.
 *
 * @param servletRequest servlet request.
 * @param servletResponse servlet response.
 * @param filterChain filter chain.
 *
 * @throws IOException thrown if an IO error occurrs.
 * @throws ServletException thrown if a servet error occurrs.
 */
@Override
public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse, FilterChain filterChain)
  throws IOException, ServletException {
  try {
    filterChain.doFilter(servletRequest, servletResponse);
  } finally {
    FileSystem fs = FILE_SYSTEM_TL.get();
    if (fs != null) {
      FILE_SYSTEM_TL.remove();
      getFileSystemAccess().releaseFileSystem(fs);
    }
  }
}
 
Example 53
Project: hadoop-oss   File: PathData.java   Source Code and License 5 votes vote down vote up
/**
 * Creates an object to wrap the given parameters as fields.  The string
 * used to create the path will be recorded since the Path object does not
 * return exactly the same string used to initialize it.
 * @param fs the FileSystem
 * @param pathString a String of the path
 * @param stat the FileStatus (may be null if the path doesn't exist)
 */
private PathData(FileSystem fs, String pathString, FileStatus stat)
throws IOException {
  this.fs = fs;
  this.uri = stringToUri(pathString);
  this.path = fs.makeQualified(new Path(uri));
  setStat(stat);

  if (Path.WINDOWS) {
    inferredSchemeFromPath = checkIfSchemeInferredFromPath(pathString);
  }
}
 
Example 54
Project: hadoop-oss   File: TestFSMainOperationsLocalFileSystem.java   Source Code and License 5 votes vote down vote up
@Override
@Before
public void setUp() throws Exception {
  Configuration conf = new Configuration();
  fcTarget = FileSystem.getLocal(conf);
  super.setUp();
}
 
Example 55
Project: hadoop   File: UtilsForTests.java   Source Code and License 5 votes vote down vote up
/**
 * Signal the maps/reduces to start.
 */
static void signalTasks(MiniDFSCluster dfs, FileSystem fileSys, 
                        boolean isMap, String mapSignalFile, 
                        String reduceSignalFile)
    throws IOException, TimeoutException {
  try {
    //  signal the maps to complete
    writeFile(dfs.getNameNode(), fileSys.getConf(),
              isMap 
              ? new Path(mapSignalFile)
              : new Path(reduceSignalFile), (short)1);
  } catch (InterruptedException ie) {
    // Ignore
  }
}
 
Example 56
Project: kafka-connect-fs   File: AbstractPolicy.java   Source Code and License 5 votes vote down vote up
private void configFs(Map<String, Object> customConfigs) throws IOException {
    for (String uri : this.conf.getFsUris()) {
        Configuration fsConfig = new Configuration();
        customConfigs.entrySet().stream()
                .filter(entry -> entry.getKey().startsWith(FsSourceTaskConfig.POLICY_PREFIX_FS))
                .forEach(entry -> fsConfig.set(entry.getKey().replace(FsSourceTaskConfig.POLICY_PREFIX_FS, ""),
                        (String) entry.getValue()));

        Path workingDir = new Path(convert(uri));
        FileSystem fs = FileSystem.newInstance(workingDir.toUri(), fsConfig);
        fs.setWorkingDirectory(workingDir);
        this.fileSystems.add(fs);
    }
}
 
Example 57
Project: hadoop   File: TestTFileSeqFileComparison.java   Source Code and License 5 votes vote down vote up
public SeqFileReadable(FileSystem fs, Path path, int osBufferSize)
    throws IOException {
  Configuration conf = new Configuration();
  conf.setInt("io.file.buffer.size", osBufferSize);
  reader = new SequenceFile.Reader(fs, path, conf);
  key = new BytesWritable();
  value = new BytesWritable();
}
 
Example 58
Project: ditb   File: FSTableDescriptorMigrationToSubdir.java   Source Code and License 5 votes vote down vote up
/**
 * Determines if migration is required by checking to see whether the hbase:meta table has been
 * migrated.
 */
private static boolean needsMigration(FileSystem fs, Path rootDir) throws IOException {
  Path metaTableDir = FSUtils.getTableDir(rootDir,
    TableName.META_TABLE_NAME);
  FileStatus metaTableInfoStatus =
    FSTableDescriptors.getTableInfoPath(fs, metaTableDir);
  return metaTableInfoStatus == null;
}
 
Example 59
Project: hadoop   File: UtilsForTests.java   Source Code and License 5 votes vote down vote up
static RunningJob runJob(JobConf conf, Path inDir, Path outDir, int numMaps, 
                         int numReds, String input) throws IOException {
  FileSystem fs = FileSystem.get(conf);
  if (fs.exists(outDir)) {
    fs.delete(outDir, true);
  }
  if (!fs.exists(inDir)) {
    fs.mkdirs(inDir);
  }
  
  for (int i = 0; i < numMaps; ++i) {
    DataOutputStream file = fs.create(new Path(inDir, "part-" + i));
    file.writeBytes(input);
    file.close();
  }    

  conf.setInputFormat(TextInputFormat.class);
  conf.setOutputKeyClass(LongWritable.class);
  conf.setOutputValueClass(Text.class);

  FileInputFormat.setInputPaths(conf, inDir);
  FileOutputFormat.setOutputPath(conf, outDir);
  conf.setNumMapTasks(numMaps);
  conf.setNumReduceTasks(numReds);

  JobClient jobClient = new JobClient(conf);
  RunningJob job = jobClient.submitJob(conf);

  return job;
}
 
Example 60
Project: hadoop   File: TestFsck.java   Source Code and License 5 votes vote down vote up
/** Test if fsck can return -1 in case of failure
 * 
 * @throws Exception
 */
@Test
public void testFsckError() throws Exception {
  MiniDFSCluster cluster = null;
  try {
    // bring up a one-node cluster
    Configuration conf = new HdfsConfiguration();
    cluster = new MiniDFSCluster.Builder(conf).build();
    String fileName = "/test.txt";
    Path filePath = new Path(fileName);
    FileSystem fs = cluster.getFileSystem();
    
    // create a one-block file
    DFSTestUtil.createFile(fs, filePath, 1L, (short)1, 1L);
    DFSTestUtil.waitReplication(fs, filePath, (short)1);
    
    // intentionally corrupt NN data structure
    INodeFile node = (INodeFile) cluster.getNamesystem().dir.getINode
        (fileName, true);
    final BlockInfoContiguous[] blocks = node.getBlocks();
    assertEquals(blocks.length, 1);
    blocks[0].setNumBytes(-1L);  // set the block length to be negative
    
    // run fsck and expect a failure with -1 as the error code
    String outStr = runFsck(conf, -1, true, fileName);
    System.out.println(outStr);
    assertTrue(outStr.contains(NamenodeFsck.FAILURE_STATUS));
    
    // clean up file system
    fs.delete(filePath, true);
  } finally {
    if (cluster != null) {cluster.shutdown();}
  }
}