org.apache.hadoop.hdfs.DFSUtil Java Examples

The following examples show how to use org.apache.hadoop.hdfs.DFSUtil. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: PBHelper.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public static SnapshottableDirectoryStatusProto convert(
    SnapshottableDirectoryStatus status) {
  if (status == null) {
    return null;
  }
  int snapshotNumber = status.getSnapshotNumber();
  int snapshotQuota = status.getSnapshotQuota();
  byte[] parentFullPath = status.getParentFullPath();
  ByteString parentFullPathBytes = ByteString.copyFrom(
      parentFullPath == null ? DFSUtil.EMPTY_BYTES : parentFullPath);
  HdfsFileStatusProto fs = convert(status.getDirStatus());
  SnapshottableDirectoryStatusProto.Builder builder = 
      SnapshottableDirectoryStatusProto
      .newBuilder().setSnapshotNumber(snapshotNumber)
      .setSnapshotQuota(snapshotQuota).setParentFullpath(parentFullPathBytes)
      .setDirStatus(fs);
  return builder.build();
}
 
Example #2
Source File: HftpFileSystem.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("unchecked")
@Override
public long renewDelegationToken(final Token<?> token) throws IOException {
  // update the kerberos credentials, if they are coming from a keytab
  UserGroupInformation connectUgi = ugi.getRealUser();
  if (connectUgi == null) {
    connectUgi = ugi;
  }
  try {
    return connectUgi.doAs(new PrivilegedExceptionAction<Long>() {
      @Override
      public Long run() throws Exception {
        InetSocketAddress serviceAddr = SecurityUtil
            .getTokenServiceAddr(token);
        return DelegationTokenFetcher.renewDelegationToken(connectionFactory,
            DFSUtil.createUri(getUnderlyingProtocol(), serviceAddr),
            (Token<DelegationTokenIdentifier>) token);
      }
    });
  } catch (InterruptedException e) {
    throw new IOException(e);
  }
}
 
Example #3
Source File: FSDirectory.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Verify child's name for fs limit.
 *
 * @param childName byte[] containing new child name
 * @param parentPath String containing parent path
 * @throws PathComponentTooLongException child's name is too long.
 */
void verifyMaxComponentLength(byte[] childName, String parentPath)
    throws PathComponentTooLongException {
  if (maxComponentLength == 0) {
    return;
  }

  final int length = childName.length;
  if (length > maxComponentLength) {
    final PathComponentTooLongException e = new PathComponentTooLongException(
        maxComponentLength, length, parentPath,
        DFSUtil.bytes2String(childName));
    if (namesystem.isImageLoaded()) {
      throw e;
    } else {
      // Do not throw if edits log is still being processed
      NameNode.LOG.error("ERROR in FSDirectory.verifyINodeName", e);
    }
  }
}
 
Example #4
Source File: NameNode.java    From big-c with Apache License 2.0 6 votes vote down vote up
@VisibleForTesting
public static boolean doRollback(Configuration conf,
    boolean isConfirmationNeeded) throws IOException {
  String nsId = DFSUtil.getNamenodeNameServiceId(conf);
  String namenodeId = HAUtil.getNameNodeId(conf, nsId);
  initializeGenericKeys(conf, nsId, namenodeId);

  FSNamesystem nsys = new FSNamesystem(conf, new FSImage(conf));
  System.err.print(
      "\"rollBack\" will remove the current state of the file system,\n"
      + "returning you to the state prior to initiating your recent.\n"
      + "upgrade. This action is permanent and cannot be undone. If you\n"
      + "are performing a rollback in an HA environment, you should be\n"
      + "certain that no NameNode process is running on any host.");
  if (isConfirmationNeeded) {
    if (!confirmPrompt("Roll back file system state?")) {
      System.err.println("Rollback aborted.");
      return true;
    }
  }
  nsys.getFSImage().doRollback(nsys);
  return false;
}
 
Example #5
Source File: Nfs3HttpServer.java    From big-c with Apache License 2.0 6 votes vote down vote up
void start() throws IOException {
  final InetSocketAddress httpAddr = getHttpAddress(conf);

  final String httpsAddrString = conf.get(
      NfsConfigKeys.NFS_HTTPS_ADDRESS_KEY,
      NfsConfigKeys.NFS_HTTPS_ADDRESS_DEFAULT);
  InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString);

  HttpServer2.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf,
      httpAddr, httpsAddr, "nfs3",
      NfsConfigKeys.DFS_NFS_KERBEROS_PRINCIPAL_KEY,
      NfsConfigKeys.DFS_NFS_KEYTAB_FILE_KEY);

  this.httpServer = builder.build();
  this.httpServer.start();
  
  HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf);
  int connIdx = 0;
  if (policy.isHttpEnabled()) {
    infoPort = httpServer.getConnectorAddress(connIdx++).getPort();
  }

  if (policy.isHttpsEnabled()) {
    infoSecurePort = httpServer.getConnectorAddress(connIdx).getPort();
  }
}
 
Example #6
Source File: HATestUtil.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Sets the required configurations for performing failover
 */
public static void setFailoverConfigurations(Configuration conf,
    String logicalName, InetSocketAddress nnAddr1,
    InetSocketAddress nnAddr2) {
  String nameNodeId1 = "nn1";
  String nameNodeId2 = "nn2";
  String address1 = "hdfs://" + nnAddr1.getHostName() + ":" + nnAddr1.getPort();
  String address2 = "hdfs://" + nnAddr2.getHostName() + ":" + nnAddr2.getPort();
  conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,
      logicalName, nameNodeId1), address1);
  conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,
      logicalName, nameNodeId2), address2);
  
  conf.set(DFSConfigKeys.DFS_NAMESERVICES, logicalName);
  conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, logicalName),
      nameNodeId1 + "," + nameNodeId2);
  conf.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + logicalName,
      ConfiguredFailoverProxyProvider.class.getName());
  conf.set("fs.defaultFS", "hdfs://" + logicalName);
}
 
Example #7
Source File: DFSAdmin.java    From big-c with Apache License 2.0 6 votes vote down vote up
private ClientDatanodeProtocol getDataNodeProxy(String datanode)
    throws IOException {
  InetSocketAddress datanodeAddr = NetUtils.createSocketAddr(datanode);
  // Get the current configuration
  Configuration conf = getConf();

  // For datanode proxy the server principal should be DN's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
      conf.get(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, ""));

  // Create the client
  ClientDatanodeProtocol dnProtocol =     
      DFSUtil.createClientDatanodeProtocolProxy(datanodeAddr, getUGI(), conf,
          NetUtils.getSocketFactory(conf, ClientDatanodeProtocol.class));
  return dnProtocol;
}
 
Example #8
Source File: DFSZKFailoverController.java    From big-c with Apache License 2.0 6 votes vote down vote up
public static DFSZKFailoverController create(Configuration conf) {
  Configuration localNNConf = DFSHAAdmin.addSecurityConfiguration(conf);
  String nsId = DFSUtil.getNamenodeNameServiceId(conf);

  if (!HAUtil.isHAEnabled(localNNConf, nsId)) {
    throw new HadoopIllegalArgumentException(
        "HA is not enabled for this namenode.");
  }
  String nnId = HAUtil.getNameNodeId(localNNConf, nsId);
  if (nnId == null) {
    String msg = "Could not get the namenode ID of this node. " +
        "You may run zkfc on the node other than namenode.";
    throw new HadoopIllegalArgumentException(msg);
  }
  NameNode.initializeGenericKeys(localNNConf, nsId, nnId);
  DFSUtil.setGenericConf(localNNConf, nsId, nnId, ZKFC_CONF_KEYS);
  
  NNHAServiceTarget localTarget = new NNHAServiceTarget(
      localNNConf, nsId, nnId);
  return new DFSZKFailoverController(localNNConf, localTarget);
}
 
Example #9
Source File: PBHelper.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public static SnapshotDiffReportEntryProto convert(DiffReportEntry entry) {
  if (entry == null) {
    return null;
  }
  ByteString sourcePath = ByteString
      .copyFrom(entry.getSourcePath() == null ? DFSUtil.EMPTY_BYTES : entry
          .getSourcePath());
  String modification = entry.getType().getLabel();
  SnapshotDiffReportEntryProto.Builder builder = SnapshotDiffReportEntryProto
      .newBuilder().setFullpath(sourcePath)
      .setModificationLabel(modification);
  if (entry.getType() == DiffType.RENAME) {
    ByteString targetPath = ByteString
        .copyFrom(entry.getTargetPath() == null ? DFSUtil.EMPTY_BYTES : entry
            .getTargetPath());
    builder.setTargetPath(targetPath);
  }
  return builder.build();
}
 
Example #10
Source File: TestINodeFile.java    From big-c with Apache License 2.0 6 votes vote down vote up
/** 
 * Creates the required number of files with one block each
 * @param nCount Number of INodes to create
 * @return Array of INode files
 */
private INodeFile[] createINodeFiles(int nCount, String fileNamePrefix) {
  if(nCount <= 0)
    return new INodeFile[1];

  replication = 3;
  preferredBlockSize = 128 * 1024 * 1024;
  INodeFile[] iNodes = new INodeFile[nCount];
  for (int i = 0; i < nCount; i++) {
    iNodes[i] = new INodeFile(i, null, perm, 0L, 0L, null, replication,
        preferredBlockSize, (byte)0);
    iNodes[i].setLocalName(DFSUtil.string2Bytes(fileNamePrefix + i));
    BlockInfoContiguous newblock = new BlockInfoContiguous(replication);
    iNodes[i].addBlock(newblock);
  }
  
  return iNodes;
}
 
Example #11
Source File: BackupNode.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private NamespaceInfo handshake(Configuration conf) throws IOException {
  // connect to name node
  InetSocketAddress nnAddress = NameNode.getServiceAddress(conf, true);
  this.namenode = NameNodeProxies.createNonHAProxy(conf, nnAddress,
      NamenodeProtocol.class, UserGroupInformation.getCurrentUser(),
      true).getProxy();
  this.nnRpcAddress = NetUtils.getHostPortString(nnAddress);
  this.nnHttpAddress = DFSUtil.getInfoServer(nnAddress, conf,
      DFSUtil.getHttpClientScheme(conf)).toURL();
  // get version and id info from the name-node
  NamespaceInfo nsInfo = null;
  while(!isStopRequested()) {
    try {
      nsInfo = handshake(namenode);
      break;
    } catch(SocketTimeoutException e) {  // name-node is busy
      LOG.info("Problem connecting to server: " + nnAddress);
      try {
        Thread.sleep(1000);
      } catch (InterruptedException ie) {
        LOG.warn("Encountered exception ", e);
      }
    }
  }
  return nsInfo;
}
 
Example #12
Source File: DFSZKFailoverController.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public static DFSZKFailoverController create(Configuration conf) {
  Configuration localNNConf = DFSHAAdmin.addSecurityConfiguration(conf);
  String nsId = DFSUtil.getNamenodeNameServiceId(conf);

  if (!HAUtil.isHAEnabled(localNNConf, nsId)) {
    throw new HadoopIllegalArgumentException(
        "HA is not enabled for this namenode.");
  }
  String nnId = HAUtil.getNameNodeId(localNNConf, nsId);
  if (nnId == null) {
    String msg = "Could not get the namenode ID of this node. " +
        "You may run zkfc on the node other than namenode.";
    throw new HadoopIllegalArgumentException(msg);
  }
  NameNode.initializeGenericKeys(localNNConf, nsId, nnId);
  DFSUtil.setGenericConf(localNNConf, nsId, nnId, ZKFC_CONF_KEYS);
  
  NNHAServiceTarget localTarget = new NNHAServiceTarget(
      localNNConf, nsId, nnId);
  return new DFSZKFailoverController(localNNConf, localTarget);
}
 
Example #13
Source File: NameNode.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@VisibleForTesting
public static boolean doRollback(Configuration conf,
    boolean isConfirmationNeeded) throws IOException {
  String nsId = DFSUtil.getNamenodeNameServiceId(conf);
  String namenodeId = HAUtil.getNameNodeId(conf, nsId);
  initializeGenericKeys(conf, nsId, namenodeId);

  FSNamesystem nsys = new FSNamesystem(conf, new FSImage(conf));
  System.err.print(
      "\"rollBack\" will remove the current state of the file system,\n"
      + "returning you to the state prior to initiating your recent.\n"
      + "upgrade. This action is permanent and cannot be undone. If you\n"
      + "are performing a rollback in an HA environment, you should be\n"
      + "certain that no NameNode process is running on any host.");
  if (isConfirmationNeeded) {
    if (!confirmPrompt("Roll back file system state?")) {
      System.err.println("Rollback aborted.");
      return true;
    }
  }
  nsys.getFSImage().doRollback(nsys);
  return false;
}
 
Example #14
Source File: INodesInPath.java    From big-c with Apache License 2.0 6 votes vote down vote up
private String toString(boolean vaildateObject) {
  if (vaildateObject) {
    validate();
  }

  final StringBuilder b = new StringBuilder(getClass().getSimpleName())
      .append(": path = ").append(DFSUtil.byteArray2PathString(path))
      .append("\n  inodes = ");
  if (inodes == null) {
    b.append("null");
  } else if (inodes.length == 0) {
    b.append("[]");
  } else {
    b.append("[").append(toString(inodes[0]));
    for(int i = 1; i < inodes.length; i++) {
      b.append(", ").append(toString(inodes[i]));
    }
    b.append("], length=").append(inodes.length);
  }
  b.append("\n  isSnapshot        = ").append(isSnapshot)
   .append("\n  snapshotId        = ").append(snapshotId);
  return b.toString();
}
 
Example #15
Source File: TestMover.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testMoverCliWithHAConf() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  final MiniDFSCluster cluster = new MiniDFSCluster
      .Builder(new HdfsConfiguration())
      .nnTopology(MiniDFSNNTopology.simpleHATopology())
      .numDataNodes(0).build();
  HATestUtil.setFailoverConfigurations(cluster, conf, "MyCluster");
  try {
    Map<URI, List<Path>> movePaths = Mover.Cli.getNameNodePathsToMove(conf,
        "-p", "/foo", "/bar");
    Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
    Assert.assertEquals(1, namenodes.size());
    Assert.assertEquals(1, movePaths.size());
    URI nn = namenodes.iterator().next();
    Assert.assertEquals(new URI("hdfs://MyCluster"), nn);
    Assert.assertTrue(movePaths.containsKey(nn));
    checkMovePaths(movePaths.get(nn), new Path("/foo"), new Path("/bar"));
  } finally {
    cluster.shutdown();
  }
}
 
Example #16
Source File: NameNode.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 */
public static void main(String argv[]) throws Exception {
  if (DFSUtil.parseHelpArgument(argv, NameNode.USAGE, System.out, true)) {
    System.exit(0);
  }

  try {
    StringUtils.startupShutdownMessage(NameNode.class, argv, LOG);
    NameNode namenode = createNameNode(argv, null);
    if (namenode != null) {
      namenode.join();
    }
  } catch (Throwable e) {
    LOG.error("Failed to start namenode.", e);
    terminate(1, e);
  }
}
 
Example #17
Source File: OzoneGetConf.java    From hadoop-ozone with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
  if (DFSUtil.parseHelpArgument(args, USAGE, System.out, true)) {
    System.exit(0);
  }

  OzoneConfiguration conf = new OzoneConfiguration();
  conf.addResource(new OzoneConfiguration());
  int res = ToolRunner.run(new OzoneGetConf(conf), args);
  System.exit(res);
}
 
Example #18
Source File: SecondaryNameNode.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Returns the Jetty server that the Namenode is listening on.
 */
private URL getInfoServer() throws IOException {
  URI fsName = FileSystem.getDefaultUri(conf);
  if (!HdfsConstants.HDFS_URI_SCHEME.equalsIgnoreCase(fsName.getScheme())) {
    throw new IOException("This is not a DFS");
  }

  final String scheme = DFSUtil.getHttpClientScheme(conf);
  URI address = DFSUtil.getInfoServerWithDefaultHost(fsName.getHost(), conf,
      scheme);
  LOG.debug("Will connect to NameNode at " + address);
  return address.toURL();
}
 
Example #19
Source File: TestAllowFormat.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Test to skip format for non file scheme directory configured
 *
 * @throws Exception
 */
@Test
public void testFormatShouldBeIgnoredForNonFileBasedDirs() throws Exception {
  Configuration conf = new HdfsConfiguration();
  String logicalName = "mycluster";

  // DFS_NAMENODE_RPC_ADDRESS_KEY are required to identify the NameNode
  // is configured in HA, then only DFS_NAMENODE_SHARED_EDITS_DIR_KEY
  // is considered.
  String localhost = "127.0.0.1";
  InetSocketAddress nnAddr1 = new InetSocketAddress(localhost, 8020);
  InetSocketAddress nnAddr2 = new InetSocketAddress(localhost, 9020);
  HATestUtil.setFailoverConfigurations(conf, logicalName, nnAddr1, nnAddr2);

  conf.set(DFS_NAMENODE_NAME_DIR_KEY,
      new File(DFS_BASE_DIR, "name").getAbsolutePath());
  conf.setBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY, true);
  conf.set(DFSUtil.addKeySuffixes(
      DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX, "dummy"),
      DummyJournalManager.class.getName());
  conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, "dummy://"
      + localhost + ":2181/ledgers");
  conf.set(DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY, "nn1");

  // An internal assert is added to verify the working of test
  NameNode.format(conf);
}
 
Example #20
Source File: FSImageSerialization.java    From RDFS with Apache License 2.0 5 votes vote down vote up
static INodeFileUnderConstruction readINodeUnderConstruction(
                          DataInputStream in) throws IOException {
  byte[] name = readBytes(in);
  String path = DFSUtil.bytes2String(name);
  short blockReplication = in.readShort();
  long modificationTime = in.readLong();
  long preferredBlockSize = in.readLong();
  int numBlocks = in.readInt();
  BlockInfo[] blocks = new BlockInfo[numBlocks];
  Block blk = new Block();
  for (int i = 0; i < numBlocks; i++) {
    blk.readFields(in);
    blocks[i] = new BlockInfo(blk, blockReplication);
  }
  PermissionStatus perm = PermissionStatus.read(in);
  String clientName = readString(in);
  String clientMachine = readString(in);

  // These locations are not used at all
  int numLocs = in.readInt();
  DatanodeDescriptor[] locations = new DatanodeDescriptor[numLocs];
  for (int i = 0; i < numLocs; i++) {
    locations[i] = new DatanodeDescriptor();
    locations[i].readFields(in);
  }

  return new INodeFileUnderConstruction(name, 
                                        blockReplication, 
                                        modificationTime,
                                        preferredBlockSize,
                                        blocks,
                                        perm,
                                        clientName,
                                        clientMachine,
                                        null);
}
 
Example #21
Source File: PBHelper.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public static BlockKeyProto convert(BlockKey key) {
  byte[] encodedKey = key.getEncodedKey();
  ByteString keyBytes = ByteString.copyFrom(encodedKey == null ? 
      DFSUtil.EMPTY_BYTES : encodedKey);
  return BlockKeyProto.newBuilder().setKeyId(key.getKeyId())
      .setKeyBytes(keyBytes).setExpiryDate(key.getExpiryDate()).build();
}
 
Example #22
Source File: TestByteArrayManager.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testCounter() throws Exception {
  final long countResetTimePeriodMs = 200L;
  final Counter c = new Counter(countResetTimePeriodMs);

  final int n = DFSUtil.getRandom().nextInt(512) + 512;
  final List<Future<Integer>> futures = new ArrayList<Future<Integer>>(n);
  
  final ExecutorService pool = Executors.newFixedThreadPool(32);
  try {
    // increment
    for(int i = 0; i < n; i++) {
      futures.add(pool.submit(new Callable<Integer>() {
        @Override
        public Integer call() throws Exception {
          return (int)c.increment();
        }
      }));
    }

    // sort and wait for the futures
    Collections.sort(futures, CMP);
  } finally {
    pool.shutdown();
  }

  // check futures
  Assert.assertEquals(n, futures.size());
  for(int i = 0; i < n; i++) {
    Assert.assertEquals(i + 1, futures.get(i).get().intValue());
  }
  Assert.assertEquals(n, c.getCount());

  // test auto-reset
  Thread.sleep(countResetTimePeriodMs + 100);
  Assert.assertEquals(1, c.increment());
}
 
Example #23
Source File: NNStorage.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Generate new blockpoolID.
 * 
 * @return new blockpoolID
 */ 
static String newBlockPoolID() throws UnknownHostException{
  String ip = "unknownIP";
  try {
    ip = DNS.getDefaultIP("default");
  } catch (UnknownHostException e) {
    LOG.warn("Could not find ip address of \"default\" inteface.");
    throw e;
  }
  
  int rand = DFSUtil.getSecureRandom().nextInt(Integer.MAX_VALUE);
  String bpid = "BP-" + rand + "-"+ ip + "-" + Time.now();
  return bpid;
}
 
Example #24
Source File: TestSnapshotDiffReport.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Make changes under a sub-directory, then delete the sub-directory. Make
 * sure the diff report computation correctly retrieve the diff from the
 * deleted sub-directory.
 */
@Test (timeout=60000)
public void testDiffReport2() throws Exception {
  Path subsub1 = new Path(sub1, "subsub1");
  Path subsubsub1 = new Path(subsub1, "subsubsub1");
  hdfs.mkdirs(subsubsub1);
  modifyAndCreateSnapshot(subsubsub1, new Path[]{sub1});
  
  // delete subsub1
  hdfs.delete(subsub1, true);
  // check diff report between s0 and s2
  verifyDiffReport(sub1, "s0", "s2", 
      new DiffReportEntry(DiffType.MODIFY,
          DFSUtil.string2Bytes("subsub1/subsubsub1")), 
      new DiffReportEntry(DiffType.CREATE, 
          DFSUtil.string2Bytes("subsub1/subsubsub1/file15")),
      new DiffReportEntry(DiffType.DELETE,
          DFSUtil.string2Bytes("subsub1/subsubsub1/file12")),
      new DiffReportEntry(DiffType.DELETE,
          DFSUtil.string2Bytes("subsub1/subsubsub1/file11")),
      new DiffReportEntry(DiffType.CREATE,
          DFSUtil.string2Bytes("subsub1/subsubsub1/file11")),
      new DiffReportEntry(DiffType.MODIFY,
          DFSUtil.string2Bytes("subsub1/subsubsub1/file13")),
      new DiffReportEntry(DiffType.CREATE,
          DFSUtil.string2Bytes("subsub1/subsubsub1/link13")),
      new DiffReportEntry(DiffType.DELETE,
          DFSUtil.string2Bytes("subsub1/subsubsub1/link13")));
  // check diff report between s0 and the current status
  verifyDiffReport(sub1, "s0", "", 
      new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
      new DiffReportEntry(DiffType.DELETE, DFSUtil.string2Bytes("subsub1")));
}
 
Example #25
Source File: DatanodeBenThread.java    From RDFS with Apache License 2.0 5 votes vote down vote up
public static List<JobConf> getNameNodeConfs(JobConf conf) 
    throws IOException {
  List<InetSocketAddress> nameNodeAddrs = 
      DFSUtil.getClientRpcAddresses(conf, null);
  List<JobConf> nameNodeConfs = 
      new ArrayList<JobConf>(nameNodeAddrs.size());
  for (InetSocketAddress nnAddr : nameNodeAddrs) {
    JobConf newConf = new JobConf(conf);
    newConf.set(NameNode.DFS_NAMENODE_RPC_ADDRESS_KEY,
        nnAddr.getHostName() + ":" + nnAddr.getPort());
    NameNode.setupDefaultURI(newConf);
    nameNodeConfs.add(newConf);
  }
  return nameNodeConfs;
}
 
Example #26
Source File: TestDFSAdminWithHA.java    From big-c with Apache License 2.0 5 votes vote down vote up
private void setHAConf(Configuration conf, String nn1Addr, String nn2Addr) {
  conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,
      "hdfs://" + NSID);
  conf.set(DFSConfigKeys.DFS_NAMESERVICES, NSID);
  conf.set(DFSConfigKeys.DFS_NAMESERVICE_ID, NSID);
  conf.set(DFSUtil.addKeySuffixes(
      DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX, NSID), "nn1,nn2");
  conf.set(DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY, "nn1");
  conf.set(DFSUtil.addKeySuffixes(
          DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, NSID, "nn1"), nn1Addr);
  conf.set(DFSUtil.addKeySuffixes(
          DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, NSID, "nn2"), nn2Addr);
}
 
Example #27
Source File: Checkpointer.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private URL getImageListenAddress() {
  InetSocketAddress httpSocAddr = backupNode.getHttpAddress();
  int httpPort = httpSocAddr.getPort();
  try {
    return new URL(DFSUtil.getHttpClientScheme(conf) + "://" + infoBindAddress + ":" + httpPort);
  } catch (MalformedURLException e) {
    // Unreachable
    throw new RuntimeException(e);
  }
}
 
Example #28
Source File: InvalidateBlocks.java    From big-c with Apache License 2.0 5 votes vote down vote up
private void printBlockDeletionTime(final Logger log) {
  log.info(DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_KEY
      + " is set to " + DFSUtil.durationToString(pendingPeriodInMs));
  SimpleDateFormat sdf = new SimpleDateFormat("yyyy MMM dd HH:mm:ss");
  Calendar calendar = new GregorianCalendar();
  calendar.add(Calendar.SECOND, (int) (this.pendingPeriodInMs / 1000));
  log.info("The block deletion will start around "
      + sdf.format(calendar.getTime()));
}
 
Example #29
Source File: TestRaidShellFsck.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
 * sleeps for up to 20s until the number of corrupt files 
 * in the file system is equal to the number specified
 */
static public void waitUntilCorruptFileCount(DistributedFileSystem dfs,
                                       int corruptFiles)
  throws IOException, InterruptedException {
  long waitStart = System.currentTimeMillis();
  while (DFSUtil.getCorruptFiles(dfs).length != corruptFiles &&
      System.currentTimeMillis() < waitStart + 20000L) {
    Thread.sleep(1000);
  }
  assertEquals("expected " + corruptFiles + " corrupt files", 
      corruptFiles, DFSUtil.getCorruptFiles(dfs).length);
}
 
Example #30
Source File: FSDirectory.java    From hadoop with Apache License 2.0 5 votes vote down vote up
INodeAttributes getAttributes(String fullPath, byte[] path,
    INode node, int snapshot) {
  INodeAttributes nodeAttrs = node;
  if (attributeProvider != null) {
    nodeAttrs = node.getSnapshotINode(snapshot);
    fullPath = fullPath + (fullPath.endsWith(Path.SEPARATOR) ? ""
                                                             : Path.SEPARATOR)
        + DFSUtil.bytes2String(path);
    nodeAttrs = attributeProvider.getAttributes(fullPath, nodeAttrs);
  } else {
    nodeAttrs = node.getSnapshotINode(snapshot);
  }
  return nodeAttrs;
}