org.apache.hadoop.hdfs.DistributedFileSystem Java Examples

The following examples show how to use org.apache.hadoop.hdfs.DistributedFileSystem. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: HDataStorage.java    From spork with Apache License 2.0 6 votes vote down vote up
public Map<String, Object> getStatistics() throws IOException {
    Map<String, Object> stats = new HashMap<String, Object>();

    long usedBytes = fs.getUsed();
    stats.put(USED_BYTES_KEY , Long.valueOf(usedBytes).toString());
    
    if (fs instanceof DistributedFileSystem) {
        DistributedFileSystem dfs = (DistributedFileSystem) fs;
        
        long rawCapacityBytes = dfs.getRawCapacity();
        stats.put(RAW_CAPACITY_KEY, Long.valueOf(rawCapacityBytes).toString());
        
        long rawUsedBytes = dfs.getRawUsed();
        stats.put(RAW_USED_KEY, Long.valueOf(rawUsedBytes).toString());
    }
    
    return stats;
}
 
Example #2
Source File: HdfsClient.java    From bigdata-tutorial with Apache License 2.0 6 votes vote down vote up
/**
 * 列出所有DataNode的名字信息
 */
public void listDataNodeInfo() {
	try {
		DistributedFileSystem hdfs = (DistributedFileSystem) fs;
		DatanodeInfo[] dataNodeStats = hdfs.getDataNodeStats();
		String[] names = new String[dataNodeStats.length];
		System.out.println(">>>> List of all the datanode in the HDFS cluster:");

		for (int i = 0; i < names.length; i++) {
			names[i] = dataNodeStats[i].getHostName();
			System.out.println(">>>> datanode : " + names[i]);
		}
	} catch (Exception e) {
		e.printStackTrace();
	}
}
 
Example #3
Source File: HDFSConcat.java    From big-c with Apache License 2.0 6 votes vote down vote up
public static void main(String... args) throws IOException {

    if(args.length < 2) {
      System.err.println("Usage HDFSConcat target srcs..");
      System.exit(0);
    }
    
    Configuration conf = new Configuration();
    String uri = conf.get("fs.default.name", def_uri);
    Path path = new Path(uri);
    DistributedFileSystem dfs = 
      (DistributedFileSystem)FileSystem.get(path.toUri(), conf);
    
    Path [] srcs = new Path[args.length-1];
    for(int i=1; i<args.length; i++) {
      srcs[i-1] = new Path(args[i]);
    }
    dfs.concat(new Path(args[0]), srcs);
  }
 
Example #4
Source File: HBaseTestingUtility.java    From hbase with Apache License 2.0 6 votes vote down vote up
/**
 * This method clones the passed <code>c</code> configuration setting a new
 * user into the clone.  Use it getting new instances of FileSystem.  Only
 * works for DistributedFileSystem w/o Kerberos.
 * @param c Initial configuration
 * @param differentiatingSuffix Suffix to differentiate this user from others.
 * @return A new configuration instance with a different user set into it.
 * @throws IOException
 */
public static User getDifferentUser(final Configuration c,
  final String differentiatingSuffix)
throws IOException {
  FileSystem currentfs = FileSystem.get(c);
  if (!(currentfs instanceof DistributedFileSystem) || User.isHBaseSecurityEnabled(c)) {
    return User.getCurrent();
  }
  // Else distributed filesystem.  Make a new instance per daemon.  Below
  // code is taken from the AppendTestUtil over in hdfs.
  String username = User.getCurrent().getName() +
    differentiatingSuffix;
  User user = User.createUserForTesting(c, username,
      new String[]{"supergroup"});
  return user;
}
 
Example #5
Source File: TestFileSystemRepositoryURIs.java    From kite with Apache License 2.0 6 votes vote down vote up
@Test
public void testHdfsAbsolute() throws URISyntaxException {
  URI hdfsUri = getDFS().getUri();
  URI repositoryUri = new URI("repo:hdfs://" + hdfsUri.getAuthority() + "/tmp/dsr-repo-test");
  DatasetRepository repository = DatasetRepositories.repositoryFor(repositoryUri);

  // We only do the deeper implementation checks one per combination.
  Assert.assertNotNull("Received a repository", repository);
  Assert.assertTrue("Repo is a FileSystem repo",
      repository instanceof FileSystemDatasetRepository);
  MetadataProvider provider = ((FileSystemDatasetRepository) repository)
      .getMetadataProvider();
  Assert.assertTrue("Repo is using a FileSystemMetadataProvider",
      provider instanceof FileSystemMetadataProvider);
  FileSystemMetadataProvider fsProvider = (FileSystemMetadataProvider) provider;
  Assert.assertTrue("FileSystem is a DistributedFileSystem",
    fsProvider.getFileSytem() instanceof DistributedFileSystem);
  Path expected = fsProvider.getFileSytem().makeQualified(
      new Path("/tmp/dsr-repo-test"));
  Assert.assertEquals("Root directory should be the correct qualified path",
      expected, fsProvider.getRootDirectory());
  Assert.assertEquals("Repository URI", repositoryUri, repository.getUri());
}
 
Example #6
Source File: TestCombineFileInputFormat.java    From RDFS with Apache License 2.0 6 votes vote down vote up
private void splitRealFiles(String[] args) throws IOException {
  JobConf conf = new JobConf();
  FileSystem fs = FileSystem.get(conf);
  if (!(fs instanceof DistributedFileSystem)) {
    throw new IOException("Wrong file system: " + fs.getClass().getName());
  }
  int blockSize = conf.getInt("dfs.block.size", 128 * 1024 * 1024);

  DummyInputFormat inFormat = new DummyInputFormat();
  for (int i = 0; i < args.length; i++) {
    inFormat.addInputPaths(conf, args[i]);
  }
  inFormat.setMinSplitSizeRack(blockSize);
  inFormat.setMaxSplitSize(10 * blockSize);

  InputSplit[] splits = inFormat.getSplits(conf, 1);
  System.out.println("Total number of splits " + splits.length);
  for (int i = 0; i < splits.length; ++i) {
    CombineFileSplit fileSplit = (CombineFileSplit) splits[i];
    System.out.println("Split[" + i + "] " + fileSplit);
  }
}
 
Example #7
Source File: HdfsDirectory.java    From incubator-retired-blur with Apache License 2.0 6 votes vote down vote up
protected long length(String name) throws IOException {
  Path path = getPath(name);
  Tracer trace = Trace.trace("filesystem - length", Trace.param("path", path));
  try {
    if (_fileSystem instanceof DistributedFileSystem) {
      FSDataInputStream in = _fileSystem.open(path);
      try {
        return HdfsUtils.getFileLength(_fileSystem, path, in);
      } finally {
        in.close();
      }
    } else {
      return _fileSystem.getFileStatus(path).getLen();
    }
  } finally {
    trace.done();
  }
}
 
Example #8
Source File: TestArchiveHdfsLogReaderAndWriter.java    From incubator-ratis with Apache License 2.0 6 votes vote down vote up
@Test public void testSeek() throws IOException {
  String archiveLocation = location+"/testSeek";
  LogName logName = LogName.of("testSeek");
  DistributedFileSystem fs = cluster.getFileSystem();
  fs.delete(new Path(archiveLocation), true);
  ArchiveLogWriter writer = new ArchiveHdfsLogWriter(conf);
  writer.init(archiveLocation, logName);
  int k = 100;
  write(writer, 1, k);
  writer.close();
  ArchiveLogReader reader = new ArchiveHdfsLogReader(conf,
      LogServiceUtils.getArchiveLocationForLog(archiveLocation, logName));
  reader.seek(80);
  Assert.assertEquals(80, reader.getPosition());
  int count = 0;
  while (reader.next() != null) {
    count++;
  }
  Assert.assertEquals(20, count);
}
 
Example #9
Source File: FanOutOneBlockAsyncDFSOutput.java    From hbase with Apache License 2.0 6 votes vote down vote up
FanOutOneBlockAsyncDFSOutput(Configuration conf,DistributedFileSystem dfs,
    DFSClient client, ClientProtocol namenode, String clientName, String src, long fileId,
    LocatedBlock locatedBlock, Encryptor encryptor, List<Channel> datanodeList,
    DataChecksum summer, ByteBufAllocator alloc) {
  this.conf = conf;
  this.dfs = dfs;
  this.client = client;
  this.namenode = namenode;
  this.fileId = fileId;
  this.clientName = clientName;
  this.src = src;
  this.block = locatedBlock.getBlock();
  this.locations = locatedBlock.getLocations();
  this.encryptor = encryptor;
  this.datanodeList = datanodeList;
  this.summer = summer;
  this.maxDataLen = MAX_DATA_LEN - (MAX_DATA_LEN % summer.getBytesPerChecksum());
  this.alloc = alloc;
  this.buf = alloc.directBuffer(sendBufSizePRedictor.initialSize());
  this.state = State.STREAMING;
  setupReceiver(conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, READ_TIMEOUT));
}
 
Example #10
Source File: DistCpSync.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Compute the snapshot diff on the given file system. Return true if the diff
 * is empty, i.e., no changes have happened in the FS.
 */
private static boolean checkNoChange(DistCpOptions inputOptions,
    DistributedFileSystem fs, Path path) {
  try {
    SnapshotDiffReport targetDiff =
        fs.getSnapshotDiffReport(path, inputOptions.getFromSnapshot(), "");
    if (!targetDiff.getDiffList().isEmpty()) {
      DistCp.LOG.warn("The target has been modified since snapshot "
          + inputOptions.getFromSnapshot());
      return false;
    } else {
      return true;
    }
  } catch (IOException e) {
    DistCp.LOG.warn("Failed to compute snapshot diff on " + path, e);
  }
  return false;
}
 
Example #11
Source File: DFSAdmin.java    From big-c with Apache License 2.0 6 votes vote down vote up
static int run(DistributedFileSystem dfs, String[] argv, int idx) throws IOException {
  final RollingUpgradeAction action = RollingUpgradeAction.fromString(
      argv.length >= 2? argv[1]: "");
  if (action == null) {
    throw new IllegalArgumentException("Failed to covert \"" + argv[1]
        +"\" to " + RollingUpgradeAction.class.getSimpleName());
  }

  System.out.println(action + " rolling upgrade ...");

  final RollingUpgradeInfo info = dfs.rollingUpgrade(action);
  switch(action){
  case QUERY:
    break;
  case PREPARE:
    Preconditions.checkState(info.isStarted());
    break;
  case FINALIZE:
    Preconditions.checkState(info == null || info.isFinalized());
    break;
  }
  printMessage(info, System.out);
  return 0;
}
 
Example #12
Source File: FileLock.java    From jstorm with Apache License 2.0 6 votes vote down vote up
/**
 * Takes ownership of the lock file if possible.
 * @param lockFile
 * @param lastEntry   last entry in the lock file. this param is an optimization.
 *                    we dont scan the lock file again to find its last entry here since
 *                    its already been done once by the logic used to check if the lock
 *                    file is stale. so this value comes from that earlier scan.
 * @param spoutId     spout id
 * @throws IOException if unable to acquire
 * @return null if lock File is not recoverable
 */
public static FileLock takeOwnership(FileSystem fs, Path lockFile, LogEntry lastEntry, String spoutId)
        throws IOException {
  try {
    if(fs instanceof DistributedFileSystem ) {
      if( !((DistributedFileSystem) fs).recoverLease(lockFile) ) {
        LOG.warn("Unable to recover lease on lock file {} right now. Cannot transfer ownership. Will need to try later. Spout = {}", lockFile, spoutId);
        return null;
      }
    }
    return new FileLock(fs, lockFile, spoutId, lastEntry);
  } catch (IOException e) {
    if (e instanceof RemoteException &&
            ((RemoteException) e).unwrapRemoteException() instanceof AlreadyBeingCreatedException) {
      LOG.warn("Lock file " + lockFile + "is currently open. Cannot transfer ownership now. Will need to try later. Spout= " + spoutId, e);
      return null;
    } else { // unexpected error
      LOG.warn("Cannot transfer ownership now for lock file " + lockFile + ". Will need to try later. Spout =" + spoutId, e);
      throw e;
    }
  }
}
 
Example #13
Source File: FSUtils.java    From hudi with Apache License 2.0 6 votes vote down vote up
/**
 * When a file was opened and the task died without closing the stream, another task executor cannot open because the
 * existing lease will be active. We will try to recover the lease, from HDFS. If a data node went down, it takes
 * about 10 minutes for the lease to be rocovered. But if the client dies, this should be instant.
 */
public static boolean recoverDFSFileLease(final DistributedFileSystem dfs, final Path p)
    throws IOException, InterruptedException {
  LOG.info("Recover lease on dfs file " + p);
  // initiate the recovery
  boolean recovered = false;
  for (int nbAttempt = 0; nbAttempt < MAX_ATTEMPTS_RECOVER_LEASE; nbAttempt++) {
    LOG.info("Attempt " + nbAttempt + " to recover lease on dfs file " + p);
    recovered = dfs.recoverLease(p);
    if (recovered) {
      break;
    }
    // Sleep for 1 second before trying again. Typically it takes about 2-3 seconds to recover
    // under default settings
    Thread.sleep(1000);
  }
  return recovered;
}
 
Example #14
Source File: DistributedRaidFileSystem.java    From RDFS with Apache License 2.0 6 votes vote down vote up
@Override
public FSDataInputStream open(Path f, int bufferSize) throws IOException {
  // We want to use RAID logic only on instance of DFS.
  if (fs instanceof DistributedFileSystem) {
    DistributedFileSystem underlyingDfs = (DistributedFileSystem) fs;
    LocatedBlocks lbs =
        underlyingDfs.getLocatedBlocks(f, 0L, Long.MAX_VALUE);
    if (lbs != null) {
      // Use underlying filesystem if the file is under construction.
      if (!lbs.isUnderConstruction()) {
        // Use underlying filesystem if file length is 0.
        final long fileSize = getFileSize(lbs);
        if (fileSize > 0) {
          return new ExtFSDataInputStream(conf, this, f,
            fileSize, getBlockSize(lbs), bufferSize);
        }
      }
    }
  }
  return fs.open(f, bufferSize);
}
 
Example #15
Source File: TestFSImage.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Ensure that the digest written by the saver equals to the digest of the
 * file.
 */
@Test
public void testDigest() throws IOException {
  Configuration conf = new Configuration();
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    DistributedFileSystem fs = cluster.getFileSystem();
    fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
    fs.saveNamespace();
    fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
    File currentDir = FSImageTestUtil.getNameNodeCurrentDirs(cluster, 0).get(
        0);
    File fsimage = FSImageTestUtil.findNewestImageFile(currentDir
        .getAbsolutePath());
    assertEquals(MD5FileUtils.readStoredMd5ForFile(fsimage),
        MD5FileUtils.computeMd5ForFile(fsimage));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example #16
Source File: BlockReconstructor.java    From RDFS with Apache License 2.0 6 votes vote down vote up
/**
 * Choose a datanode (hostname:portnumber). The datanode is chosen at random
 * from the live datanodes.
 * 
 * @param locationsToAvoid
 *            locations to avoid.
 * @return A string in the format name:port.
 * @throws IOException
 */
private String chooseDatanode(DatanodeInfo[] locationsToAvoid)
		throws IOException {
	DistributedFileSystem dfs = getDFS(new Path("/"));
	DatanodeInfo[] live = dfs.getClient().datanodeReport(
			DatanodeReportType.LIVE);

	Random rand = new Random();
	String chosen = null;
	int maxAttempts = 1000;
	for (int i = 0; i < maxAttempts && chosen == null; i++) {
		int idx = rand.nextInt(live.length);
		chosen = live[idx].name;
		for (DatanodeInfo avoid : locationsToAvoid) {
			if (chosen.equals(avoid.name)) {
				//LOG.info("Avoiding " + avoid.name);
				chosen = null;
				break;
			}
		}
	}
	if (chosen == null) {
		throw new IOException("Could not choose datanode");
	}
	return chosen;
}
 
Example #17
Source File: DFSAdmin.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Dumps DFS data structures into specified file.
 * Usage: hdfs dfsadmin -metasave filename
 * @param argv List of of command line parameters.
 * @param idx The index of the command that is being processed.
 * @exception IOException if an error occurred while accessing
 *            the file or path.
 */
public int metaSave(String[] argv, int idx) throws IOException {
  String pathname = argv[idx];
  DistributedFileSystem dfs = getDFS();
  Configuration dfsConf = dfs.getConf();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);

  if (isHaEnabled) {
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<ClientProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
        nsId, ClientProtocol.class);
    for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
      proxy.getProxy().metaSave(pathname);
      System.out.println("Created metasave file " + pathname + " in the log "
          + "directory of namenode " + proxy.getAddress());
    }
  } else {
    dfs.metaSave(pathname);
    System.out.println("Created metasave file " + pathname + " in the log " +
        "directory of namenode " + dfs.getUri());
  }
  return 0;
}
 
Example #18
Source File: DistCpSync.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Compute the snapshot diff on the given file system. Return true if the diff
 * is empty, i.e., no changes have happened in the FS.
 */
private static boolean checkNoChange(DistCpOptions inputOptions,
    DistributedFileSystem fs, Path path) {
  try {
    SnapshotDiffReport targetDiff =
        fs.getSnapshotDiffReport(path, inputOptions.getFromSnapshot(), "");
    if (!targetDiff.getDiffList().isEmpty()) {
      DistCp.LOG.warn("The target has been modified since snapshot "
          + inputOptions.getFromSnapshot());
      return false;
    } else {
      return true;
    }
  } catch (IOException e) {
    DistCp.LOG.warn("Failed to compute snapshot diff on " + path, e);
  }
  return false;
}
 
Example #19
Source File: HadoopFileSystemFactoryImplTest.java    From pentaho-hadoop-shims with Apache License 2.0 6 votes vote down vote up
@Before
public void setup() throws IOException {
  namedCluster = mock( NamedCluster.class );
  isActiveConfiguration = true;
  hadoopShim = mock( HadoopShim.class );
  configuration = mock( Configuration.class );
  when( hadoopShim.createConfiguration( namedCluster ) ).thenReturn( configuration );
  fileSystem = mock( FileSystem.class );
  when( fileSystem.getDelegate() ).thenReturn( new DistributedFileSystem() );
  when( hadoopShim.getFileSystem( configuration ) ).thenReturn( fileSystem );
  identifier = "testId";
  shimIdentifierInterface = mock( ShimIdentifierInterface.class );
  when( shimIdentifierInterface.getId() ).thenReturn( identifier );
  hadoopFileSystemFactory =
    new HadoopFileSystemFactoryImpl( isActiveConfiguration, hadoopShim, "hdfs", shimIdentifierInterface );
}
 
Example #20
Source File: TestStickyBit.java    From big-c with Apache License 2.0 5 votes vote down vote up
private static void initCluster(boolean format) throws Exception {
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).format(format)
    .build();
  hdfs = cluster.getFileSystem();
  assertTrue(hdfs instanceof DistributedFileSystem);
  hdfsAsUser1 = DFSTestUtil.getFileSystemAs(user1, conf);
  assertTrue(hdfsAsUser1 instanceof DistributedFileSystem);
  hdfsAsUser2 = DFSTestUtil.getFileSystemAs(user2, conf);
  assertTrue(hdfsAsUser2 instanceof DistributedFileSystem);
}
 
Example #21
Source File: FSUtils.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * @param conf the Configuration of HBase
 * @return Whether srcFs and desFs are on same hdfs or not
 */
public static boolean isSameHdfs(Configuration conf, FileSystem srcFs, FileSystem desFs) {
  // By getCanonicalServiceName, we could make sure both srcFs and desFs
  // show a unified format which contains scheme, host and port.
  String srcServiceName = srcFs.getCanonicalServiceName();
  String desServiceName = desFs.getCanonicalServiceName();

  if (srcServiceName == null || desServiceName == null) {
    return false;
  }
  if (srcServiceName.equals(desServiceName)) {
    return true;
  }
  if (srcServiceName.startsWith("ha-hdfs") && desServiceName.startsWith("ha-hdfs")) {
    Collection<String> internalNameServices =
      conf.getTrimmedStringCollection("dfs.internal.nameservices");
    if (!internalNameServices.isEmpty()) {
      if (internalNameServices.contains(srcServiceName.split(":")[1])) {
        return true;
      } else {
        return false;
      }
    }
  }
  if (srcFs instanceof DistributedFileSystem && desFs instanceof DistributedFileSystem) {
    // If one serviceName is an HA format while the other is a non-HA format,
    // maybe they refer to the same FileSystem.
    // For example, srcFs is "ha-hdfs://nameservices" and desFs is "hdfs://activeNamenode:port"
    Set<InetSocketAddress> srcAddrs = getNNAddresses((DistributedFileSystem) srcFs, conf);
    Set<InetSocketAddress> desAddrs = getNNAddresses((DistributedFileSystem) desFs, conf);
    if (Sets.intersection(srcAddrs, desAddrs).size() > 0) {
      return true;
    }
  }

  return false;
}
 
Example #22
Source File: DFSAdmin.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
 * Command to request current distributed upgrade status, 
 * a detailed status, or to force the upgrade to proceed.
 * 
 * Usage: java DFSAdmin -upgradeProgress [status | details | force]
 * @exception IOException 
 */
public int upgradeProgress(String[] argv, int idx) throws IOException {
  DistributedFileSystem dfs = getDFS();
  if (dfs == null) {
    System.out.println("FileSystem is " + getFS().getUri());
    return -1;
  }
  if (idx != argv.length - 1) {
    printUsage("-upgradeProgress");
    return -1;
  }

  UpgradeAction action;
  if ("status".equalsIgnoreCase(argv[idx])) {
    action = UpgradeAction.GET_STATUS;
  } else if ("details".equalsIgnoreCase(argv[idx])) {
    action = UpgradeAction.DETAILED_STATUS;
  } else if ("force".equalsIgnoreCase(argv[idx])) {
    action = UpgradeAction.FORCE_PROCEED;
  } else {
    printUsage("-upgradeProgress");
    return -1;
  }

  UpgradeStatusReport status = dfs.distributedUpgradeProgress(action);
  String statusText = (status == null ? 
      "There are no upgrades in progress." :
        status.getStatusText(action == UpgradeAction.DETAILED_STATUS));
  System.out.println(statusText);
  return 0;
}
 
Example #23
Source File: DFSAdmin.java    From big-c with Apache License 2.0 5 votes vote down vote up
public int refreshCallQueue() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();
  
  // for security authorization
  // server principal for this call   
  // should be NN's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
      conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));

  DistributedFileSystem dfs = getDFS();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri);

  if (isHaEnabled) {
    // Run refreshCallQueue for all NNs if HA is enabled
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<RefreshCallQueueProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
            RefreshCallQueueProtocol.class);
    for (ProxyAndInfo<RefreshCallQueueProtocol> proxy : proxies) {
      proxy.getProxy().refreshCallQueue();
      System.out.println("Refresh call queue successful for "
          + proxy.getAddress());
    }
  } else {
    // Create the client
    RefreshCallQueueProtocol refreshProtocol =
        NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
            RefreshCallQueueProtocol.class).getProxy();

    // Refresh the call queue
    refreshProtocol.refreshCallQueue();
    System.out.println("Refresh call queue successful");
  }

  return 0;
}
 
Example #24
Source File: TestAddBlock.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Test adding new blocks but without closing the corresponding the file
 */
@Test
public void testAddBlockUC() throws Exception {
  DistributedFileSystem fs = cluster.getFileSystem();
  final Path file1 = new Path("/file1");
  DFSTestUtil.createFile(fs, file1, BLOCKSIZE - 1, REPLICATION, 0L);
  
  FSDataOutputStream out = null;
  try {
    // append files without closing the streams
    out = fs.append(file1);
    String appendContent = "appending-content";
    out.writeBytes(appendContent);
    ((DFSOutputStream) out.getWrappedStream()).hsync(
        EnumSet.of(SyncFlag.UPDATE_LENGTH));
    
    // restart NN
    cluster.restartNameNode(true);
    FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
    
    INodeFile fileNode = fsdir.getINode4Write(file1.toString()).asFile();
    BlockInfoContiguous[] fileBlocks = fileNode.getBlocks();
    assertEquals(2, fileBlocks.length);
    assertEquals(BLOCKSIZE, fileBlocks[0].getNumBytes());
    assertEquals(BlockUCState.COMPLETE, fileBlocks[0].getBlockUCState());
    assertEquals(appendContent.length() - 1, fileBlocks[1].getNumBytes());
    assertEquals(BlockUCState.UNDER_CONSTRUCTION,
        fileBlocks[1].getBlockUCState());
  } finally {
    if (out != null) {
      out.close();
    }
  }
}
 
Example #25
Source File: DFSAdmin.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
 * Command to ask the namenode to finalize previously performed upgrade.
 * Usage: java DFSAdmin -finalizeUpgrade
 * @exception IOException 
 */
public int finalizeUpgrade() throws IOException {
  int exitCode = -1;

  DistributedFileSystem dfs = getDFS();
  if (dfs == null) {
    System.out.println("FileSystem is " + getFS().getUri());
    return exitCode;
  }

  dfs.finalizeUpgrade();
  exitCode = 0;
 
  return exitCode;
}
 
Example #26
Source File: DistCp.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
 * @param srcs   source paths
 * @param dst    destination path
 * @param conf
 * @return True if, all source paths and destination path are DFS
 *         locations, and they are from the same DFS clusters. If
 *         it can't find the DFS cluster name since an older server
 *         build, it will assume cluster name matches.
 * @throws IOException
 */
static public boolean canUseFastCopy(List<Path> srcs, Path dst,
    Configuration conf) throws IOException {
  DistributedFileSystem dstdfs = DFSUtil.convertToDFS(dst
      .getFileSystem(conf));
  if (dstdfs == null) {
    return false;
  }

  String dstClusterName = dstdfs.getClusterName();
  for (Path src : srcs) {
    DistributedFileSystem srcdfs = DFSUtil.convertToDFS(src
        .getFileSystem(conf));
    if (srcdfs == null) {
      return false;
    } else if (dstClusterName != null) {
      // We assume those clusterName == null case was older
      // version of DFS. We always enable fastcopy for those
      // cases.
      String srcClusterName = srcdfs.getClusterName();
      if (srcClusterName != null && !srcClusterName.equals(dstClusterName)) {
        return false;
      }
    }
  }
  return true;
}
 
Example #27
Source File: TestStorageMover.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Move hot files to warm and cold, warm files to hot and cold,
 * and cold files to hot and warm.
 */
void moveAround(DistributedFileSystem dfs) throws Exception {
  for(Path srcDir : map.keySet()) {
    int i = 0;
    for(Path dstDir : map.keySet()) {
      if (!srcDir.equals(dstDir)) {
        final Path src = new Path(srcDir, "file" + i++);
        final Path dst = new Path(dstDir, srcDir.getName() + "2" + dstDir.getName());
        LOG.info("rename " + src + " to " + dst);
        dfs.rename(src, dst);
      }
    }
  }
}
 
Example #28
Source File: TestShortCircuitCache.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test(timeout=60000)
public void testDataXceiverCleansUpSlotsOnFailure() throws Exception {
  BlockReaderTestUtil.enableShortCircuitShmTracing();
  TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
  Configuration conf = createShortCircuitConf(
      "testDataXceiverCleansUpSlotsOnFailure", sockDir);
  conf.setLong(DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_EXPIRY_MS_KEY,
      1000000000L);
  MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  cluster.waitActive();
  DistributedFileSystem fs = cluster.getFileSystem();
  final Path TEST_PATH1 = new Path("/test_file1");
  final Path TEST_PATH2 = new Path("/test_file2");
  final int TEST_FILE_LEN = 4096;
  final int SEED = 0xFADE1;
  DFSTestUtil.createFile(fs, TEST_PATH1, TEST_FILE_LEN,
      (short)1, SEED);
  DFSTestUtil.createFile(fs, TEST_PATH2, TEST_FILE_LEN,
      (short)1, SEED);

  // The first read should allocate one shared memory segment and slot.
  DFSTestUtil.readFileBuffer(fs, TEST_PATH1);

  // The second read should fail, and we should only have 1 segment and 1 slot
  // left.
  fs.getClient().getConf().brfFailureInjector =
      new TestCleanupFailureInjector();
  try {
    DFSTestUtil.readFileBuffer(fs, TEST_PATH2);
  } catch (Throwable t) {
    GenericTestUtils.assertExceptionContains("TCP reads were disabled for " +
        "testing, but we failed to do a non-TCP read.", t);
  }
  checkNumberOfSegmentsAndSlots(1, 1,
      cluster.getDataNodes().get(0).getShortCircuitRegistry());
  cluster.shutdown();
  sockDir.close();
}
 
Example #29
Source File: TestHDFSFileContextMainOperations.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void oldRename(Path src, Path dst, boolean renameSucceeds,
    boolean exception) throws Exception {
  DistributedFileSystem fs = cluster.getFileSystem();
  try {
    Assert.assertEquals(renameSucceeds, fs.rename(src, dst));
  } catch (Exception ex) {
    Assert.assertTrue(exception);
  }
  Assert.assertEquals(renameSucceeds, !exists(fc, src));
  Assert.assertEquals(renameSucceeds, exists(fc, dst));
}
 
Example #30
Source File: TestMover.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testMoverFailedRetry() throws Exception {
  // HDFS-8147
  final Configuration conf = new HdfsConfiguration();
  conf.set(DFSConfigKeys.DFS_MOVER_RETRY_MAX_ATTEMPTS_KEY, "2");
  final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(3)
      .storageTypes(
          new StorageType[][] {{StorageType.DISK, StorageType.ARCHIVE},
              {StorageType.DISK, StorageType.ARCHIVE},
              {StorageType.DISK, StorageType.ARCHIVE}}).build();
  try {
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final String file = "/testMoverFailedRetry";
    // write to DISK
    final FSDataOutputStream out = dfs.create(new Path(file), (short) 2);
    out.writeChars("testMoverFailedRetry");
    out.close();

    // Delete block file so, block move will fail with FileNotFoundException
    LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
    cluster.corruptBlockOnDataNodesByDeletingBlockFile(lb.getBlock());
    // move to ARCHIVE
    dfs.setStoragePolicy(new Path(file), "COLD");
    int rc = ToolRunner.run(conf, new Mover.Cli(),
        new String[] {"-p", file.toString()});
    Assert.assertEquals("Movement should fail after some retry",
        ExitStatus.IO_EXCEPTION.getExitCode(), rc);
  } finally {
    cluster.shutdown();
  }
}