Java Code Examples for org.apache.hadoop.hdfs.DistributedFileSystem

The following examples show how to use org.apache.hadoop.hdfs.DistributedFileSystem. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: incubator-retired-blur   Source File: HdfsDirectory.java    License: Apache License 2.0 6 votes vote down vote up
protected long length(String name) throws IOException {
  Path path = getPath(name);
  Tracer trace = Trace.trace("filesystem - length", Trace.param("path", path));
  try {
    if (_fileSystem instanceof DistributedFileSystem) {
      FSDataInputStream in = _fileSystem.open(path);
      try {
        return HdfsUtils.getFileLength(_fileSystem, path, in);
      } finally {
        in.close();
      }
    } else {
      return _fileSystem.getFileStatus(path).getLen();
    }
  } finally {
    trace.done();
  }
}
 
Example 2
Source Project: hbase   Source File: HBaseTestingUtility.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * This method clones the passed <code>c</code> configuration setting a new
 * user into the clone.  Use it getting new instances of FileSystem.  Only
 * works for DistributedFileSystem w/o Kerberos.
 * @param c Initial configuration
 * @param differentiatingSuffix Suffix to differentiate this user from others.
 * @return A new configuration instance with a different user set into it.
 * @throws IOException
 */
public static User getDifferentUser(final Configuration c,
  final String differentiatingSuffix)
throws IOException {
  FileSystem currentfs = FileSystem.get(c);
  if (!(currentfs instanceof DistributedFileSystem) || User.isHBaseSecurityEnabled(c)) {
    return User.getCurrent();
  }
  // Else distributed filesystem.  Make a new instance per daemon.  Below
  // code is taken from the AppendTestUtil over in hdfs.
  String username = User.getCurrent().getName() +
    differentiatingSuffix;
  User user = User.createUserForTesting(c, username,
      new String[]{"supergroup"});
  return user;
}
 
Example 3
@Test public void testSeek() throws IOException {
  String archiveLocation = location+"/testSeek";
  LogName logName = LogName.of("testSeek");
  DistributedFileSystem fs = cluster.getFileSystem();
  fs.delete(new Path(archiveLocation), true);
  ArchiveLogWriter writer = new ArchiveHdfsLogWriter(conf);
  writer.init(archiveLocation, logName);
  int k = 100;
  write(writer, 1, k);
  writer.close();
  ArchiveLogReader reader = new ArchiveHdfsLogReader(conf,
      LogServiceUtils.getArchiveLocationForLog(archiveLocation, logName));
  reader.seek(80);
  Assert.assertEquals(80, reader.getPosition());
  int count = 0;
  while (reader.next() != null) {
    count++;
  }
  Assert.assertEquals(20, count);
}
 
Example 4
Source Project: jstorm   Source File: FileLock.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Takes ownership of the lock file if possible.
 * @param lockFile
 * @param lastEntry   last entry in the lock file. this param is an optimization.
 *                    we dont scan the lock file again to find its last entry here since
 *                    its already been done once by the logic used to check if the lock
 *                    file is stale. so this value comes from that earlier scan.
 * @param spoutId     spout id
 * @throws IOException if unable to acquire
 * @return null if lock File is not recoverable
 */
public static FileLock takeOwnership(FileSystem fs, Path lockFile, LogEntry lastEntry, String spoutId)
        throws IOException {
  try {
    if(fs instanceof DistributedFileSystem ) {
      if( !((DistributedFileSystem) fs).recoverLease(lockFile) ) {
        LOG.warn("Unable to recover lease on lock file {} right now. Cannot transfer ownership. Will need to try later. Spout = {}", lockFile, spoutId);
        return null;
      }
    }
    return new FileLock(fs, lockFile, spoutId, lastEntry);
  } catch (IOException e) {
    if (e instanceof RemoteException &&
            ((RemoteException) e).unwrapRemoteException() instanceof AlreadyBeingCreatedException) {
      LOG.warn("Lock file " + lockFile + "is currently open. Cannot transfer ownership now. Will need to try later. Spout= " + spoutId, e);
      return null;
    } else { // unexpected error
      LOG.warn("Cannot transfer ownership now for lock file " + lockFile + ". Will need to try later. Spout =" + spoutId, e);
      throw e;
    }
  }
}
 
Example 5
Source Project: big-c   Source File: DistCpSync.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Compute the snapshot diff on the given file system. Return true if the diff
 * is empty, i.e., no changes have happened in the FS.
 */
private static boolean checkNoChange(DistCpOptions inputOptions,
    DistributedFileSystem fs, Path path) {
  try {
    SnapshotDiffReport targetDiff =
        fs.getSnapshotDiffReport(path, inputOptions.getFromSnapshot(), "");
    if (!targetDiff.getDiffList().isEmpty()) {
      DistCp.LOG.warn("The target has been modified since snapshot "
          + inputOptions.getFromSnapshot());
      return false;
    } else {
      return true;
    }
  } catch (IOException e) {
    DistCp.LOG.warn("Failed to compute snapshot diff on " + path, e);
  }
  return false;
}
 
Example 6
Source Project: RDFS   Source File: DistributedRaidFileSystem.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public FSDataInputStream open(Path f, int bufferSize) throws IOException {
  // We want to use RAID logic only on instance of DFS.
  if (fs instanceof DistributedFileSystem) {
    DistributedFileSystem underlyingDfs = (DistributedFileSystem) fs;
    LocatedBlocks lbs =
        underlyingDfs.getLocatedBlocks(f, 0L, Long.MAX_VALUE);
    if (lbs != null) {
      // Use underlying filesystem if the file is under construction.
      if (!lbs.isUnderConstruction()) {
        // Use underlying filesystem if file length is 0.
        final long fileSize = getFileSize(lbs);
        if (fileSize > 0) {
          return new ExtFSDataInputStream(conf, this, f,
            fileSize, getBlockSize(lbs), bufferSize);
        }
      }
    }
  }
  return fs.open(f, bufferSize);
}
 
Example 7
Source Project: spork   Source File: HDataStorage.java    License: Apache License 2.0 6 votes vote down vote up
public Map<String, Object> getStatistics() throws IOException {
    Map<String, Object> stats = new HashMap<String, Object>();

    long usedBytes = fs.getUsed();
    stats.put(USED_BYTES_KEY , Long.valueOf(usedBytes).toString());
    
    if (fs instanceof DistributedFileSystem) {
        DistributedFileSystem dfs = (DistributedFileSystem) fs;
        
        long rawCapacityBytes = dfs.getRawCapacity();
        stats.put(RAW_CAPACITY_KEY, Long.valueOf(rawCapacityBytes).toString());
        
        long rawUsedBytes = dfs.getRawUsed();
        stats.put(RAW_USED_KEY, Long.valueOf(rawUsedBytes).toString());
    }
    
    return stats;
}
 
Example 8
Source Project: hudi   Source File: FSUtils.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * When a file was opened and the task died without closing the stream, another task executor cannot open because the
 * existing lease will be active. We will try to recover the lease, from HDFS. If a data node went down, it takes
 * about 10 minutes for the lease to be rocovered. But if the client dies, this should be instant.
 */
public static boolean recoverDFSFileLease(final DistributedFileSystem dfs, final Path p)
    throws IOException, InterruptedException {
  LOG.info("Recover lease on dfs file " + p);
  // initiate the recovery
  boolean recovered = false;
  for (int nbAttempt = 0; nbAttempt < MAX_ATTEMPTS_RECOVER_LEASE; nbAttempt++) {
    LOG.info("Attempt " + nbAttempt + " to recover lease on dfs file " + p);
    recovered = dfs.recoverLease(p);
    if (recovered) {
      break;
    }
    // Sleep for 1 second before trying again. Typically it takes about 2-3 seconds to recover
    // under default settings
    Thread.sleep(1000);
  }
  return recovered;
}
 
Example 9
Source Project: big-c   Source File: TestFSImage.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Ensure that the digest written by the saver equals to the digest of the
 * file.
 */
@Test
public void testDigest() throws IOException {
  Configuration conf = new Configuration();
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    DistributedFileSystem fs = cluster.getFileSystem();
    fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
    fs.saveNamespace();
    fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
    File currentDir = FSImageTestUtil.getNameNodeCurrentDirs(cluster, 0).get(
        0);
    File fsimage = FSImageTestUtil.findNewestImageFile(currentDir
        .getAbsolutePath());
    assertEquals(MD5FileUtils.readStoredMd5ForFile(fsimage),
        MD5FileUtils.computeMd5ForFile(fsimage));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 10
Source Project: RDFS   Source File: BlockReconstructor.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Choose a datanode (hostname:portnumber). The datanode is chosen at random
 * from the live datanodes.
 * 
 * @param locationsToAvoid
 *            locations to avoid.
 * @return A string in the format name:port.
 * @throws IOException
 */
private String chooseDatanode(DatanodeInfo[] locationsToAvoid)
		throws IOException {
	DistributedFileSystem dfs = getDFS(new Path("/"));
	DatanodeInfo[] live = dfs.getClient().datanodeReport(
			DatanodeReportType.LIVE);

	Random rand = new Random();
	String chosen = null;
	int maxAttempts = 1000;
	for (int i = 0; i < maxAttempts && chosen == null; i++) {
		int idx = rand.nextInt(live.length);
		chosen = live[idx].name;
		for (DatanodeInfo avoid : locationsToAvoid) {
			if (chosen.equals(avoid.name)) {
				//LOG.info("Avoiding " + avoid.name);
				chosen = null;
				break;
			}
		}
	}
	if (chosen == null) {
		throw new IOException("Could not choose datanode");
	}
	return chosen;
}
 
Example 11
Source Project: hadoop   Source File: DFSAdmin.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Dumps DFS data structures into specified file.
 * Usage: hdfs dfsadmin -metasave filename
 * @param argv List of of command line parameters.
 * @param idx The index of the command that is being processed.
 * @exception IOException if an error occurred while accessing
 *            the file or path.
 */
public int metaSave(String[] argv, int idx) throws IOException {
  String pathname = argv[idx];
  DistributedFileSystem dfs = getDFS();
  Configuration dfsConf = dfs.getConf();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);

  if (isHaEnabled) {
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<ClientProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
        nsId, ClientProtocol.class);
    for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
      proxy.getProxy().metaSave(pathname);
      System.out.println("Created metasave file " + pathname + " in the log "
          + "directory of namenode " + proxy.getAddress());
    }
  } else {
    dfs.metaSave(pathname);
    System.out.println("Created metasave file " + pathname + " in the log " +
        "directory of namenode " + dfs.getUri());
  }
  return 0;
}
 
Example 12
Source Project: big-c   Source File: DFSAdmin.java    License: Apache License 2.0 6 votes vote down vote up
static int run(DistributedFileSystem dfs, String[] argv, int idx) throws IOException {
  final RollingUpgradeAction action = RollingUpgradeAction.fromString(
      argv.length >= 2? argv[1]: "");
  if (action == null) {
    throw new IllegalArgumentException("Failed to covert \"" + argv[1]
        +"\" to " + RollingUpgradeAction.class.getSimpleName());
  }

  System.out.println(action + " rolling upgrade ...");

  final RollingUpgradeInfo info = dfs.rollingUpgrade(action);
  switch(action){
  case QUERY:
    break;
  case PREPARE:
    Preconditions.checkState(info.isStarted());
    break;
  case FINALIZE:
    Preconditions.checkState(info == null || info.isFinalized());
    break;
  }
  printMessage(info, System.out);
  return 0;
}
 
Example 13
@Before
public void setup() throws IOException {
  namedCluster = mock( NamedCluster.class );
  isActiveConfiguration = true;
  hadoopShim = mock( HadoopShim.class );
  configuration = mock( Configuration.class );
  when( hadoopShim.createConfiguration( namedCluster ) ).thenReturn( configuration );
  fileSystem = mock( FileSystem.class );
  when( fileSystem.getDelegate() ).thenReturn( new DistributedFileSystem() );
  when( hadoopShim.getFileSystem( configuration ) ).thenReturn( fileSystem );
  identifier = "testId";
  shimIdentifierInterface = mock( ShimIdentifierInterface.class );
  when( shimIdentifierInterface.getId() ).thenReturn( identifier );
  hadoopFileSystemFactory =
    new HadoopFileSystemFactoryImpl( isActiveConfiguration, hadoopShim, "hdfs", shimIdentifierInterface );
}
 
Example 14
Source Project: hadoop   Source File: DistCpSync.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Compute the snapshot diff on the given file system. Return true if the diff
 * is empty, i.e., no changes have happened in the FS.
 */
private static boolean checkNoChange(DistCpOptions inputOptions,
    DistributedFileSystem fs, Path path) {
  try {
    SnapshotDiffReport targetDiff =
        fs.getSnapshotDiffReport(path, inputOptions.getFromSnapshot(), "");
    if (!targetDiff.getDiffList().isEmpty()) {
      DistCp.LOG.warn("The target has been modified since snapshot "
          + inputOptions.getFromSnapshot());
      return false;
    } else {
      return true;
    }
  } catch (IOException e) {
    DistCp.LOG.warn("Failed to compute snapshot diff on " + path, e);
  }
  return false;
}
 
Example 15
Source Project: hbase   Source File: FanOutOneBlockAsyncDFSOutput.java    License: Apache License 2.0 6 votes vote down vote up
FanOutOneBlockAsyncDFSOutput(Configuration conf,DistributedFileSystem dfs,
    DFSClient client, ClientProtocol namenode, String clientName, String src, long fileId,
    LocatedBlock locatedBlock, Encryptor encryptor, List<Channel> datanodeList,
    DataChecksum summer, ByteBufAllocator alloc) {
  this.conf = conf;
  this.dfs = dfs;
  this.client = client;
  this.namenode = namenode;
  this.fileId = fileId;
  this.clientName = clientName;
  this.src = src;
  this.block = locatedBlock.getBlock();
  this.locations = locatedBlock.getLocations();
  this.encryptor = encryptor;
  this.datanodeList = datanodeList;
  this.summer = summer;
  this.maxDataLen = MAX_DATA_LEN - (MAX_DATA_LEN % summer.getBytesPerChecksum());
  this.alloc = alloc;
  this.buf = alloc.directBuffer(sendBufSizePRedictor.initialSize());
  this.state = State.STREAMING;
  setupReceiver(conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, READ_TIMEOUT));
}
 
Example 16
Source Project: RDFS   Source File: TestCombineFileInputFormat.java    License: Apache License 2.0 6 votes vote down vote up
private void splitRealFiles(String[] args) throws IOException {
  JobConf conf = new JobConf();
  FileSystem fs = FileSystem.get(conf);
  if (!(fs instanceof DistributedFileSystem)) {
    throw new IOException("Wrong file system: " + fs.getClass().getName());
  }
  int blockSize = conf.getInt("dfs.block.size", 128 * 1024 * 1024);

  DummyInputFormat inFormat = new DummyInputFormat();
  for (int i = 0; i < args.length; i++) {
    inFormat.addInputPaths(conf, args[i]);
  }
  inFormat.setMinSplitSizeRack(blockSize);
  inFormat.setMaxSplitSize(10 * blockSize);

  InputSplit[] splits = inFormat.getSplits(conf, 1);
  System.out.println("Total number of splits " + splits.length);
  for (int i = 0; i < splits.length; ++i) {
    CombineFileSplit fileSplit = (CombineFileSplit) splits[i];
    System.out.println("Split[" + i + "] " + fileSplit);
  }
}
 
Example 17
Source Project: big-c   Source File: HDFSConcat.java    License: Apache License 2.0 6 votes vote down vote up
public static void main(String... args) throws IOException {

    if(args.length < 2) {
      System.err.println("Usage HDFSConcat target srcs..");
      System.exit(0);
    }
    
    Configuration conf = new Configuration();
    String uri = conf.get("fs.default.name", def_uri);
    Path path = new Path(uri);
    DistributedFileSystem dfs = 
      (DistributedFileSystem)FileSystem.get(path.toUri(), conf);
    
    Path [] srcs = new Path[args.length-1];
    for(int i=1; i<args.length; i++) {
      srcs[i-1] = new Path(args[i]);
    }
    dfs.concat(new Path(args[0]), srcs);
  }
 
Example 18
Source Project: bigdata-tutorial   Source File: HdfsClient.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * 列出所有DataNode的名字信息
 */
public void listDataNodeInfo() {
	try {
		DistributedFileSystem hdfs = (DistributedFileSystem) fs;
		DatanodeInfo[] dataNodeStats = hdfs.getDataNodeStats();
		String[] names = new String[dataNodeStats.length];
		System.out.println(">>>> List of all the datanode in the HDFS cluster:");

		for (int i = 0; i < names.length; i++) {
			names[i] = dataNodeStats[i].getHostName();
			System.out.println(">>>> datanode : " + names[i]);
		}
	} catch (Exception e) {
		e.printStackTrace();
	}
}
 
Example 19
Source Project: kite   Source File: TestFileSystemRepositoryURIs.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testHdfsAbsolute() throws URISyntaxException {
  URI hdfsUri = getDFS().getUri();
  URI repositoryUri = new URI("repo:hdfs://" + hdfsUri.getAuthority() + "/tmp/dsr-repo-test");
  DatasetRepository repository = DatasetRepositories.repositoryFor(repositoryUri);

  // We only do the deeper implementation checks one per combination.
  Assert.assertNotNull("Received a repository", repository);
  Assert.assertTrue("Repo is a FileSystem repo",
      repository instanceof FileSystemDatasetRepository);
  MetadataProvider provider = ((FileSystemDatasetRepository) repository)
      .getMetadataProvider();
  Assert.assertTrue("Repo is using a FileSystemMetadataProvider",
      provider instanceof FileSystemMetadataProvider);
  FileSystemMetadataProvider fsProvider = (FileSystemMetadataProvider) provider;
  Assert.assertTrue("FileSystem is a DistributedFileSystem",
    fsProvider.getFileSytem() instanceof DistributedFileSystem);
  Path expected = fsProvider.getFileSytem().makeQualified(
      new Path("/tmp/dsr-repo-test"));
  Assert.assertEquals("Root directory should be the correct qualified path",
      expected, fsProvider.getRootDirectory());
  Assert.assertEquals("Repository URI", repositoryUri, repository.getUri());
}
 
Example 20
Source Project: hadoop   Source File: TestAddBlock.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Test adding new blocks but without closing the corresponding the file
 */
@Test
public void testAddBlockUC() throws Exception {
  DistributedFileSystem fs = cluster.getFileSystem();
  final Path file1 = new Path("/file1");
  DFSTestUtil.createFile(fs, file1, BLOCKSIZE - 1, REPLICATION, 0L);
  
  FSDataOutputStream out = null;
  try {
    // append files without closing the streams
    out = fs.append(file1);
    String appendContent = "appending-content";
    out.writeBytes(appendContent);
    ((DFSOutputStream) out.getWrappedStream()).hsync(
        EnumSet.of(SyncFlag.UPDATE_LENGTH));
    
    // restart NN
    cluster.restartNameNode(true);
    FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
    
    INodeFile fileNode = fsdir.getINode4Write(file1.toString()).asFile();
    BlockInfoContiguous[] fileBlocks = fileNode.getBlocks();
    assertEquals(2, fileBlocks.length);
    assertEquals(BLOCKSIZE, fileBlocks[0].getNumBytes());
    assertEquals(BlockUCState.COMPLETE, fileBlocks[0].getBlockUCState());
    assertEquals(appendContent.length() - 1, fileBlocks[1].getNumBytes());
    assertEquals(BlockUCState.UNDER_CONSTRUCTION,
        fileBlocks[1].getBlockUCState());
  } finally {
    if (out != null) {
      out.close();
    }
  }
}
 
Example 21
Source Project: RDFS   Source File: FastCopy.java    License: Apache License 2.0 5 votes vote down vote up
public FastFileCopyRequest(String src, String dst,
    DistributedFileSystem srcFs, DistributedFileSystem dstFs) {
  this.src = src;
  this.dst = dst;
  this.srcFs = srcFs;
  this.dstFs = dstFs;
}
 
Example 22
Source Project: hudi   Source File: TestInputPathHandler.java    License: Apache License 2.0 5 votes vote down vote up
static List<Path> generatePartitions(DistributedFileSystem dfs, String basePath)
    throws IOException {
  List<Path> paths = new ArrayList<>();
  paths.add(new Path(basePath + Path.SEPARATOR + "2019/05/21"));
  paths.add(new Path(basePath + Path.SEPARATOR + "2019/05/22"));
  paths.add(new Path(basePath + Path.SEPARATOR + "2019/05/23"));
  paths.add(new Path(basePath + Path.SEPARATOR + "2019/05/24"));
  paths.add(new Path(basePath + Path.SEPARATOR + "2019/05/25"));
  for (Path path: paths) {
    dfs.mkdirs(path);
  }
  return paths;
}
 
Example 23
Source Project: DataLink   Source File: FileStreamToken.java    License: Apache License 2.0 5 votes vote down vote up
public FileStreamToken(String pathString, Path path, DistributedFileSystem fileSystem, FSDataOutputStream fileStream) {
    this.pathString = pathString;
    this.path = path;
    this.fileSystem = fileSystem;
    this.fileStream = fileStream;
    this.lastUpdateTime = System.currentTimeMillis();
    this.lastHSyncTime = 0;
}
 
Example 24
Source Project: big-c   Source File: NameNodeConnector.java    License: Apache License 2.0 5 votes vote down vote up
public NameNodeConnector(String name, URI nameNodeUri, Path idPath,
                         List<Path> targetPaths, Configuration conf,
                         int maxNotChangedIterations)
    throws IOException {
  this.nameNodeUri = nameNodeUri;
  this.idPath = idPath;
  this.targetPaths = targetPaths == null || targetPaths.isEmpty() ? Arrays
      .asList(new Path("/")) : targetPaths;
  this.maxNotChangedIterations = maxNotChangedIterations;

  this.namenode = NameNodeProxies.createProxy(conf, nameNodeUri,
      NamenodeProtocol.class).getProxy();
  this.client = NameNodeProxies.createProxy(conf, nameNodeUri,
      ClientProtocol.class, fallbackToSimpleAuth).getProxy();
  this.fs = (DistributedFileSystem)FileSystem.get(nameNodeUri, conf);

  final NamespaceInfo namespaceinfo = namenode.versionRequest();
  this.blockpoolID = namespaceinfo.getBlockPoolID();

  final FsServerDefaults defaults = fs.getServerDefaults(new Path("/"));
  this.keyManager = new KeyManager(blockpoolID, namenode,
      defaults.getEncryptDataTransfer(), conf);
  // if it is for test, we do not create the id file
  out = checkAndMarkRunning();
  if (out == null) {
    // Exit if there is another one running.
    throw new IOException("Another " + name + " is running.");
  }
}
 
Example 25
Source Project: big-c   Source File: TestWebHDFS.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Test snapshot rename through WebHdfs
 */
@Test
public void testWebHdfsRenameSnapshot() throws Exception {
  MiniDFSCluster cluster = null;
  final Configuration conf = WebHdfsTestUtil.createConf();
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
        WebHdfsFileSystem.SCHEME);

    final Path foo = new Path("/foo");
    dfs.mkdirs(foo);
    dfs.allowSnapshot(foo);

    webHdfs.createSnapshot(foo, "s1");
    final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
    Assert.assertTrue(webHdfs.exists(s1path));

    // rename s1 to s2
    webHdfs.renameSnapshot(foo, "s1", "s2");
    Assert.assertFalse(webHdfs.exists(s1path));
    final Path s2path = SnapshotTestHelper.getSnapshotRoot(foo, "s2");
    Assert.assertTrue(webHdfs.exists(s2path));

    webHdfs.deleteSnapshot(foo, "s2");
    Assert.assertFalse(webHdfs.exists(s2path));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 26
Source Project: hadoop-gpu   Source File: DFSAdmin.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Dumps DFS data structures into specified file.
 * Usage: java DFSAdmin -metasave filename
 * @param argv List of of command line parameters.
 * @param idx The index of the command that is being processed.
 * @exception IOException if an error accoured wile accessing
 *            the file or path.
 */
public int metaSave(String[] argv, int idx) throws IOException {
  String pathname = argv[idx];
  DistributedFileSystem dfs = (DistributedFileSystem) fs;
  dfs.metaSave(pathname);
  System.out.println("Created file " + pathname + " on server " +
                     dfs.getUri());
  return 0;
}
 
Example 27
Source Project: RDFS   Source File: DatanodeBenThread.java    License: Apache License 2.0 5 votes vote down vote up
public DatanodeBenThread(Configuration conf, Path input, Path output, int id,
    RUNNING_TYPE init_type, DatanodeBenRunTimeConstants rtc) throws IOException{
  super(conf, input, output, rtc);
  this.rtc = rtc;
  this.replication = (short)conf.getInt(REPLICATION_KEY, DEFAULT_REPLICATION_NUM);
  this.max_size = conf.getLong(FILE_SIZE_KEY, DEFAULT_FILE_SIZE) * 1024 * 1024;
  this.pread = conf.getFloat(READ_PERCENT_KEY, DEFAULT_READ_PERCENT);
  this.tb = new TokenBucket(rtc.data_rate);
  this.id = id;
  this.thread_name = rtc.task_name + "_" + id;
  this.running_type = init_type;
  if (running_type.equals(RUNNING_TYPE.PREPARE)) {
    this.file_prefix = rtc.cur_datanode + thread_name +  "_part";
  } else {
    this.file_prefix = thread_name + "_part";
    this.nsPickLists = rtc.pickLists.get(conf.get(FileSystem.FS_DEFAULT_NAME_KEY));
    this.dfs = (DistributedFileSystem)fs;
    float f = rb.nextFloat();
    if (f < pread + 1e-9) {
      this.running_type = RUNNING_TYPE.READ;
    } else {
      this.outputPath = new Path(outputPath, thread_name);
      this.running_type = RUNNING_TYPE.WRITE;
    }
  }
  fs.mkdirs(this.outputPath); 
}
 
Example 28
Source Project: big-c   Source File: DFSAdmin.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Command to ask the namenode to save the namespace.
 * Usage: hdfs dfsadmin -saveNamespace
 * @exception IOException 
 * @see org.apache.hadoop.hdfs.protocol.ClientProtocol#saveNamespace()
 */
public int saveNamespace() throws IOException {
  int exitCode = -1;

  DistributedFileSystem dfs = getDFS();
  Configuration dfsConf = dfs.getConf();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);

  if (isHaEnabled) {
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<ClientProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
        nsId, ClientProtocol.class);
    for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
      proxy.getProxy().saveNamespace();
      System.out.println("Save namespace successful for " +
          proxy.getAddress());
    }
  } else {
    dfs.saveNamespace();
    System.out.println("Save namespace successful");
  }
  exitCode = 0;
 
  return exitCode;
}
 
Example 29
Source Project: hadoop   Source File: TestFsck.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Test storage policy display
 */
@Test
public void testStoragePoliciesCK() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(3)
      .storageTypes(
          new StorageType[] {StorageType.DISK, StorageType.ARCHIVE})
      .build();
  try {
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    writeFile(dfs, "/testhot", "file", "HOT");
    writeFile(dfs, "/testwarm", "file", "WARM");
    writeFile(dfs, "/testcold", "file", "COLD");
    String outStr = runFsck(conf, 0, true, "/", "-storagepolicies");
    assertTrue(outStr.contains("DISK:3(HOT)"));
    assertTrue(outStr.contains("DISK:1,ARCHIVE:2(WARM)"));
    assertTrue(outStr.contains("ARCHIVE:3(COLD)"));
    assertTrue(outStr.contains("All blocks satisfy specified storage policy."));
    dfs.setStoragePolicy(new Path("/testhot"), "COLD");
    dfs.setStoragePolicy(new Path("/testwarm"), "COLD");
    outStr = runFsck(conf, 0, true, "/", "-storagepolicies");
    assertTrue(outStr.contains("DISK:3(HOT)"));
    assertTrue(outStr.contains("DISK:1,ARCHIVE:2(WARM)"));
    assertTrue(outStr.contains("ARCHIVE:3(COLD)"));
    assertFalse(outStr.contains("All blocks satisfy specified storage policy."));
   } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 30
Source Project: big-c   Source File: DFSck.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Derive the namenode http address from the current file system,
 * either default or as set by "-fs" in the generic options.
 * @return Returns http address or null if failure.
 * @throws IOException if we can't determine the active NN address
 */
private URI getCurrentNamenodeAddress(Path target) throws IOException {
  //String nnAddress = null;
  Configuration conf = getConf();

  //get the filesystem object to verify it is an HDFS system
  final FileSystem fs = target.getFileSystem(conf);
  if (!(fs instanceof DistributedFileSystem)) {
    System.err.println("FileSystem is " + fs.getUri());
    return null;
  }

  return DFSUtil.getInfoServer(HAUtil.getAddressOfActive(fs), conf,
      DFSUtil.getHttpClientScheme(conf));
}