Java Code Examples for org.apache.hadoop.hdfs.protocol.HdfsConstants

The following examples show how to use org.apache.hadoop.hdfs.protocol.HdfsConstants. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: hadoop   Source File: TestDatanodeRegister.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testDifferentLayoutVersions() throws Exception {
  // We expect no exceptions to be thrown when the layout versions match.
  assertEquals(HdfsConstants.NAMENODE_LAYOUT_VERSION,
      actor.retrieveNamespaceInfo().getLayoutVersion());
  
  // We expect an exception to be thrown when the NN reports a layout version
  // different from that of the DN.
  doReturn(HdfsConstants.NAMENODE_LAYOUT_VERSION * 1000).when(fakeNsInfo)
      .getLayoutVersion();
  try {
    actor.retrieveNamespaceInfo();
  } catch (IOException e) {
    fail("Should not fail to retrieve NS info from DN with different layout version");
  }
}
 
Example 2
Source Project: hadoop   Source File: DFSOutputStream.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Create a socket for a write pipeline
 * @param first the first datanode 
 * @param length the pipeline length
 * @param client client
 * @return the socket connected to the first datanode
 */
static Socket createSocketForPipeline(final DatanodeInfo first,
    final int length, final DFSClient client) throws IOException {
  final String dnAddr = first.getXferAddr(
      client.getConf().connectToDnViaHostname);
  if (DFSClient.LOG.isDebugEnabled()) {
    DFSClient.LOG.debug("Connecting to datanode " + dnAddr);
  }
  final InetSocketAddress isa = NetUtils.createSocketAddr(dnAddr);
  final Socket sock = client.socketFactory.createSocket();
  final int timeout = client.getDatanodeReadTimeout(length);
  NetUtils.connect(sock, isa, client.getRandomLocalInterfaceAddr(), client.getConf().socketTimeout);
  sock.setSoTimeout(timeout);
  sock.setSendBufferSize(HdfsConstants.DEFAULT_DATA_SOCKET_SIZE);
  if(DFSClient.LOG.isDebugEnabled()) {
    DFSClient.LOG.debug("Send buf size " + sock.getSendBufferSize());
  }
  return sock;
}
 
Example 3
Source Project: hadoop   Source File: DFSClient.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Infer the checksum type for a replica by sending an OP_READ_BLOCK
 * for the first byte of that replica. This is used for compatibility
 * with older HDFS versions which did not include the checksum type in
 * OpBlockChecksumResponseProto.
 *
 * @param lb the located block
 * @param dn the connected datanode
 * @return the inferred checksum type
 * @throws IOException if an error occurs
 */
private Type inferChecksumTypeByReading(LocatedBlock lb, DatanodeInfo dn)
    throws IOException {
  IOStreamPair pair = connectToDN(dn, dfsClientConf.socketTimeout, lb);

  try {
    DataOutputStream out = new DataOutputStream(new BufferedOutputStream(pair.out,
        HdfsConstants.SMALL_BUFFER_SIZE));
    DataInputStream in = new DataInputStream(pair.in);

    new Sender(out).readBlock(lb.getBlock(), lb.getBlockToken(), clientName,
        0, 1, true, CachingStrategy.newDefaultStrategy());
    final BlockOpResponseProto reply =
        BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
    String logInfo = "trying to read " + lb.getBlock() + " from datanode " + dn;
    DataTransferProtoUtil.checkBlockOpStatus(reply, logInfo);

    return PBHelper.convert(reply.getReadOpChecksumInfo().getChecksum().getType());
  } finally {
    IOUtils.cleanup(null, pair.in, pair.out);
  }
}
 
Example 4
Source Project: hadoop   Source File: TestLazyPersistFiles.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testPolicyPersistenceInFsImage() throws IOException {
  startUpCluster(false, -1);
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path path = new Path("/" + METHOD_NAME + ".dat");

  makeTestFile(path, 0, true);
  // checkpoint
  fs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
  fs.saveNamespace();
  fs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
  cluster.restartNameNode(true);

  // Stat the file and check that the lazyPersist flag is returned back.
  HdfsFileStatus status = client.getFileInfo(path.toString());
  assertThat(status.getStoragePolicy(), is(LAZY_PERSIST_POLICY_ID));
}
 
Example 5
Source Project: hadoop   Source File: TestQuotaByStorageType.java    License: Apache License 2.0 6 votes vote down vote up
@Test(timeout = 60000)
public void testQuotaByStorageTypeParentOnChildOn() throws Exception {
  final Path parent = new Path(dir, "parent");
  final Path child = new Path(parent, "child");
  dfs.mkdirs(parent);
  dfs.mkdirs(child);

  dfs.setStoragePolicy(parent, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
  dfs.setQuotaByStorageType(parent, StorageType.SSD, 2 * BLOCKSIZE);
  dfs.setQuotaByStorageType(child, StorageType.SSD, 3 * BLOCKSIZE);

  // Create file of size 2.5 * BLOCKSIZE under child directory
  // Verify parent Quota applies
  Path createdFile1 = new Path(child, "created_file1.data");
  long file1Len = BLOCKSIZE * 2 + BLOCKSIZE / 2;
  int bufLen = BLOCKSIZE / 16;
  try {
    DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE,
        REPLICATION, seed);
    fail("Should have failed with QuotaByStorageTypeExceededException ");
  } catch (Throwable t) {
    LOG.info("Got expected exception ", t);
  }
}
 
Example 6
Source Project: big-c   Source File: TestHistoryFileManager.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testCreateDirsWithFileSystemBecomingAvailBeforeTimeout()
    throws Exception {
  dfsCluster.getFileSystem().setSafeMode(
      HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
  Assert.assertTrue(dfsCluster.getFileSystem().isInSafeMode());
  new Thread() {
    @Override
    public void run() {
      try {
        Thread.sleep(500);
        dfsCluster.getFileSystem().setSafeMode(
            HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
        Assert.assertTrue(dfsCluster.getFileSystem().isInSafeMode());
      } catch (Exception ex) {
        Assert.fail(ex.toString());
      }
    }
  }.start();
  testCreateHistoryDirs(dfsCluster.getConfiguration(0), new SystemClock());
}
 
Example 7
Source Project: hadoop   Source File: TestTruncateQuotaUpdate.java    License: Apache License 2.0 6 votes vote down vote up
@Before
public void setUp() throws Exception {
  final Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
      .build();
  cluster.waitActive();

  fsdir = cluster.getNamesystem().getFSDirectory();
  dfs = cluster.getFileSystem();

  dfs.mkdirs(dir);
  dfs.setQuota(dir, Long.MAX_VALUE - 1, DISKQUOTA);
  dfs.setQuotaByStorageType(dir, StorageType.DISK, DISKQUOTA);
  dfs.setStoragePolicy(dir, HdfsConstants.HOT_STORAGE_POLICY_NAME);
}
 
Example 8
Source Project: big-c   Source File: NamespaceInfo.java    License: Apache License 2.0 5 votes vote down vote up
public NamespaceInfo(int nsID, String clusterID, String bpID,
    long cT, String buildVersion, String softwareVersion,
    long capabilities) {
  super(HdfsConstants.NAMENODE_LAYOUT_VERSION, nsID, clusterID, cT,
      NodeType.NAME_NODE);
  blockPoolID = bpID;
  this.buildVersion = buildVersion;
  this.softwareVersion = softwareVersion;
  this.capabilities = capabilities;
}
 
Example 9
Source Project: hadoop   Source File: TestWebHDFSForHA.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testFailoverAfterOpen() throws IOException {
  Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
  conf.set(FS_DEFAULT_NAME_KEY, HdfsConstants.HDFS_URI_SCHEME +
      "://" + LOGICAL_NAME);
  MiniDFSCluster cluster = null;
  FileSystem fs = null;
  final Path p = new Path("/test");
  final byte[] data = "Hello".getBytes();

  try {
    cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo)
            .numDataNodes(1).build();

    HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);

    cluster.waitActive();

    fs = FileSystem.get(WEBHDFS_URI, conf);
    cluster.transitionToActive(1);

    FSDataOutputStream out = fs.create(p);
    cluster.shutdownNameNode(1);
    cluster.transitionToActive(0);

    out.write(data);
    out.close();
    FSDataInputStream in = fs.open(p);
    byte[] buf = new byte[data.length];
    IOUtils.readFully(in, buf, 0, buf.length);
    Assert.assertArrayEquals(data, buf);
  } finally {
    IOUtils.cleanup(null, fs);
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 10
Source Project: hadoop   Source File: TestDFSUpgradeWithHA.java    License: Apache License 2.0 5 votes vote down vote up
private long getCommittedTxnIdValue(MiniQJMHACluster qjCluster)
    throws IOException {
  Journal journal1 = qjCluster.getJournalCluster().getJournalNode(0)
      .getOrCreateJournal(MiniQJMHACluster.NAMESERVICE);
  BestEffortLongFile committedTxnId = (BestEffortLongFile) Whitebox
      .getInternalState(journal1, "committedTxnId");
  return committedTxnId != null ? committedTxnId.get() :
      HdfsConstants.INVALID_TXID;
}
 
Example 11
Source Project: hadoop   Source File: TestFileStatus.java    License: Apache License 2.0 5 votes vote down vote up
private static void writeFile(FileSystem fileSys, Path name, int repl,
    int fileSize, int blockSize) throws IOException {
  // Create and write a file that contains three blocks of data
  FSDataOutputStream stm = fileSys.create(name, true,
      HdfsConstants.IO_FILE_BUFFER_SIZE, (short)repl, (long)blockSize);
  byte[] buffer = new byte[fileSize];
  Random rand = new Random(seed);
  rand.nextBytes(buffer);
  stm.write(buffer);
  stm.close();
}
 
Example 12
Source Project: big-c   Source File: TestReplicationPolicyWithNodeGroup.java    License: Apache License 2.0 5 votes vote down vote up
private static void setupDataNodeCapacity() {
  for(int i=0; i<NUM_OF_DATANODES; i++) {
    updateHeartbeatWithUsage(dataNodes[i],
        2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
        2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
  }
}
 
Example 13
Source Project: big-c   Source File: INode.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Get the quota set for this inode
 * @return the quota counts.  The count is -1 if it is not set.
 */
public QuotaCounts getQuotaCounts() {
  return new QuotaCounts.Builder().
      nameSpace(HdfsConstants.QUOTA_RESET).
      storageSpace(HdfsConstants.QUOTA_RESET).
      typeSpaces(HdfsConstants.QUOTA_RESET).
      build();
}
 
Example 14
Source Project: hadoop   Source File: TestReplicationPolicy.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * In this testcase, client is dataNodes[0]. So the 1st replica should be
 * placed on dataNodes[0], the 2nd replica should be placed on 
 * different rack and third should be placed on different node
 * of rack chosen for 2nd node.
 * The only excpetion is when the <i>numOfReplicas</i> is 2, 
 * the 1st is on dataNodes[0] and the 2nd is on a different rack.
 * @throws Exception
 */
@Test
public void testChooseTarget1() throws Exception {
  updateHeartbeatWithUsage(dataNodes[0],
      2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 
      HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
      0L, 0L, 4, 0); // overloaded

  DatanodeStorageInfo[] targets;
  targets = chooseTarget(0);
  assertEquals(targets.length, 0);
  
  targets = chooseTarget(1);
  assertEquals(targets.length, 1);
  assertEquals(storages[0], targets[0]);
  
  targets = chooseTarget(2);
  assertEquals(targets.length, 2);
  assertEquals(storages[0], targets[0]);
  assertFalse(isOnSameRack(targets[0], targets[1]));
  
  targets = chooseTarget(3);
  assertEquals(targets.length, 3);
  assertEquals(storages[0], targets[0]);
  assertFalse(isOnSameRack(targets[0], targets[1]));
  assertTrue(isOnSameRack(targets[1], targets[2]));

  targets = chooseTarget(4);
  assertEquals(targets.length, 4);
  assertEquals(storages[0], targets[0]);
  assertTrue(isOnSameRack(targets[1], targets[2]) ||
          isOnSameRack(targets[2], targets[3]));
  assertFalse(isOnSameRack(targets[0], targets[2]));
  
  resetHeartbeatForStorages();
}
 
Example 15
Source Project: big-c   Source File: DataNode.java    License: Apache License 2.0 5 votes vote down vote up
static DomainPeerServer getDomainPeerServer(Configuration conf,
    int port) throws IOException {
  String domainSocketPath =
      conf.getTrimmed(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
          DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_DEFAULT);
  if (domainSocketPath.isEmpty()) {
    if (conf.getBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY,
          DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_DEFAULT) &&
       (!conf.getBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL,
        DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT))) {
      LOG.warn("Although short-circuit local reads are configured, " +
          "they are disabled because you didn't configure " +
          DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY);
    }
    return null;
  }
  if (DomainSocket.getLoadingFailureReason() != null) {
    throw new RuntimeException("Although a UNIX domain socket " +
        "path is configured as " + domainSocketPath + ", we cannot " +
        "start a localDataXceiverServer because " +
        DomainSocket.getLoadingFailureReason());
  }
  DomainPeerServer domainPeerServer =
    new DomainPeerServer(domainSocketPath, port);
  domainPeerServer.setReceiveBufferSize(
      HdfsConstants.DEFAULT_DATA_SOCKET_SIZE);
  return domainPeerServer;
}
 
Example 16
Source Project: big-c   Source File: TestQuotaByStorageType.java    License: Apache License 2.0 5 votes vote down vote up
private void testQuotaByStorageTypeOrTraditionalQuotaExceededCase(
    long storageSpaceQuotaInBlocks, long ssdQuotaInBlocks,
    long testFileLenInBlocks, short replication) throws Exception {
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  final Path testDir = new Path(dir, METHOD_NAME);

  dfs.mkdirs(testDir);
  dfs.setStoragePolicy(testDir, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);

  final long ssdQuota = BLOCKSIZE * ssdQuotaInBlocks;
  final long storageSpaceQuota = BLOCKSIZE * storageSpaceQuotaInBlocks;

  dfs.setQuota(testDir, Long.MAX_VALUE - 1, storageSpaceQuota);
  dfs.setQuotaByStorageType(testDir, StorageType.SSD, ssdQuota);

  INode testDirNode = fsdir.getINode4Write(testDir.toString());
  assertTrue(testDirNode.isDirectory());
  assertTrue(testDirNode.isQuotaSet());

  Path createdFile = new Path(testDir, "created_file.data");
  long fileLen = testFileLenInBlocks * BLOCKSIZE;

  try {
    DFSTestUtil.createFile(dfs, createdFile, BLOCKSIZE / 16,
        fileLen, BLOCKSIZE, replication, seed);
    fail("Should have failed with DSQuotaExceededException or " +
        "QuotaByStorageTypeExceededException ");
  } catch (Throwable t) {
    LOG.info("Got expected exception ", t);
    long currentSSDConsumed = testDirNode.asDirectory().getDirectoryWithQuotaFeature()
        .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
    assertEquals(Math.min(ssdQuota, storageSpaceQuota/replication),
        currentSSDConsumed);
  }
}
 
Example 17
Source Project: big-c   Source File: Hdfs.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * This constructor has the signature needed by
 * {@link AbstractFileSystem#createFileSystem(URI, Configuration)}
 * 
 * @param theUri which must be that of Hdfs
 * @param conf configuration
 * @throws IOException
 */
Hdfs(final URI theUri, final Configuration conf) throws IOException, URISyntaxException {
  super(theUri, HdfsConstants.HDFS_URI_SCHEME, true, NameNode.DEFAULT_PORT);

  if (!theUri.getScheme().equalsIgnoreCase(HdfsConstants.HDFS_URI_SCHEME)) {
    throw new IllegalArgumentException("Passed URI's scheme is not for Hdfs");
  }
  String host = theUri.getHost();
  if (host == null) {
    throw new IOException("Incomplete HDFS URI, no host: " + theUri);
  }

  this.dfs = new DFSClient(theUri, conf, getStatistics());
}
 
Example 18
Source Project: hadoop   Source File: TestQuotaByStorageType.java    License: Apache License 2.0 5 votes vote down vote up
@Test(timeout = 60000)
public void testQuotaByStorageTypeWithFileCreateDelete() throws Exception {
  final Path foo = new Path(dir, "foo");
  Path createdFile1 = new Path(foo, "created_file1.data");
  dfs.mkdirs(foo);

  dfs.setStoragePolicy(foo, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);

  // set quota by storage type on directory "foo"
  dfs.setQuotaByStorageType(foo, StorageType.SSD, BLOCKSIZE * 10);
  INode fnode = fsdir.getINode4Write(foo.toString());
  assertTrue(fnode.isDirectory());
  assertTrue(fnode.isQuotaSet());

  // Create file of size 2.5 * BLOCKSIZE under directory "foo"
  long file1Len = BLOCKSIZE * 2 + BLOCKSIZE / 2;
  int bufLen = BLOCKSIZE / 16;
  DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed);

  // Verify space consumed and remaining quota
  long storageTypeConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
      .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
  assertEquals(file1Len, storageTypeConsumed);

  // Delete file and verify the consumed space of the storage type is updated
  dfs.delete(createdFile1, false);
  storageTypeConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
      .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
  assertEquals(0, storageTypeConsumed);

  QuotaCounts counts = new QuotaCounts.Builder().build();
  fnode.computeQuotaUsage(fsn.getBlockManager().getStoragePolicySuite(), counts, true);
  assertEquals(fnode.dumpTreeRecursively().toString(), 0,
      counts.getTypeSpaces().get(StorageType.SSD));

  ContentSummary cs = dfs.getContentSummary(foo);
  assertEquals(cs.getSpaceConsumed(), 0);
  assertEquals(cs.getTypeConsumed(StorageType.SSD), 0);
  assertEquals(cs.getTypeConsumed(StorageType.DISK), 0);
}
 
Example 19
Source Project: hadoop   Source File: DistCpSync.java    License: Apache License 2.0 5 votes vote down vote up
private static Path getSourceSnapshotPath(Path sourceDir, String snapshotName) {
  if (Path.CUR_DIR.equals(snapshotName)) {
    return sourceDir;
  } else {
    return new Path(sourceDir,
        HdfsConstants.DOT_SNAPSHOT_DIR + Path.SEPARATOR + snapshotName);
  }
}
 
Example 20
Source Project: hadoop   Source File: TestReplicationPolicyWithNodeGroup.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Test replica placement policy in case of boundary topology.
 * Rack 2 has only 1 node group & can't be placed with two replicas
 * The 1st replica will be placed on writer.
 * The 2nd replica should be placed on a different rack 
 * The 3rd replica should be placed on the same rack with writer, but on a 
 * different node group.
 */
@Test
public void testChooseTargetsOnBoundaryTopology() throws Exception {
  for(int i=0; i<NUM_OF_DATANODES; i++) {
    cluster.remove(dataNodes[i]);
  }

  for(int i=0; i<NUM_OF_DATANODES_BOUNDARY; i++) {
    cluster.add(dataNodesInBoundaryCase[i]);
  }
  for(int i=0; i<NUM_OF_DATANODES_BOUNDARY; i++) {
    updateHeartbeatWithUsage(dataNodes[0],
              2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
              (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE,
              0L, 0L, 0L, 0, 0);

    updateHeartbeatWithUsage(dataNodesInBoundaryCase[i],
        2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
        2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
  }

  DatanodeStorageInfo[] targets;
  targets = chooseTarget(0, dataNodesInBoundaryCase[0]);
  assertEquals(targets.length, 0);
  
  targets = chooseTarget(1, dataNodesInBoundaryCase[0]);
  assertEquals(targets.length, 1);

  targets = chooseTarget(2, dataNodesInBoundaryCase[0]);
  assertEquals(targets.length, 2);
  assertFalse(isOnSameRack(targets[0], targets[1]));
  
  targets = chooseTarget(3, dataNodesInBoundaryCase[0]);
  assertEquals(targets.length, 3);
  assertTrue(checkTargetsOnDifferentNodeGroup(targets));
}
 
Example 21
Source Project: hadoop   Source File: TestMetadataVersionOutput.java    License: Apache License 2.0 5 votes vote down vote up
@Test(timeout = 30000)
public void testMetadataVersionOutput() throws IOException {

  initConfig();
  dfsCluster = new MiniDFSCluster.Builder(conf).
      manageNameDfsDirs(false).
      numDataNodes(1).
      checkExitOnShutdown(false).
      build();
  dfsCluster.waitClusterUp();
  dfsCluster.shutdown(false);
  initConfig();
  final PrintStream origOut = System.out;
  final ByteArrayOutputStream baos = new ByteArrayOutputStream();
  final PrintStream stdOut = new PrintStream(baos);
  System.setOut(stdOut);
  try {
    NameNode.createNameNode(new String[] { "-metadataVersion" }, conf);
  } catch (Exception e) {
    assertExceptionContains("ExitException", e);
  }
  /* Check if meta data version is printed correctly. */
  final String verNumStr = HdfsConstants.NAMENODE_LAYOUT_VERSION + "";
  assertTrue(baos.toString("UTF-8").
    contains("HDFS Image Version: " + verNumStr));
  assertTrue(baos.toString("UTF-8").
    contains("Software format version: " + verNumStr));
  System.setOut(origOut);
}
 
Example 22
Source Project: hadoop   Source File: EditLogLedgerMetadata.java    License: Apache License 2.0 5 votes vote down vote up
EditLogLedgerMetadata(String zkPath, int dataLayoutVersion,
                      long ledgerId, long firstTxId) {
  this.zkPath = zkPath;
  this.dataLayoutVersion = dataLayoutVersion;
  this.ledgerId = ledgerId;
  this.firstTxId = firstTxId;
  this.lastTxId = HdfsConstants.INVALID_TXID;
  this.inprogress = true;
}
 
Example 23
Source Project: big-c   Source File: SecondaryNameNode.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Returns the Jetty server that the Namenode is listening on.
 */
private URL getInfoServer() throws IOException {
  URI fsName = FileSystem.getDefaultUri(conf);
  if (!HdfsConstants.HDFS_URI_SCHEME.equalsIgnoreCase(fsName.getScheme())) {
    throw new IOException("This is not a DFS");
  }

  final String scheme = DFSUtil.getHttpClientScheme(conf);
  URI address = DFSUtil.getInfoServerWithDefaultHost(fsName.getHost(), conf,
      scheme);
  LOG.debug("Will connect to NameNode at " + address);
  return address.toURL();
}
 
Example 24
Source Project: big-c   Source File: TestQuotaByStorageType.java    License: Apache License 2.0 5 votes vote down vote up
@Test(timeout = 60000)
public void testQuotaByStorageTypeParentOnChildOff() throws Exception {
  short replication = 1;
  final Path parent = new Path(dir, "parent");
  final Path child = new Path(parent, "child");
  dfs.mkdirs(parent);
  dfs.mkdirs(child);

  dfs.setStoragePolicy(parent, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
  dfs.setQuotaByStorageType(parent, StorageType.SSD, 3 * BLOCKSIZE);

  // Create file of size 2.5 * BLOCKSIZE under child directory
  // Verify parent Quota applies
  Path createdFile1 = new Path(child, "created_file1.data");
  long file1Len = BLOCKSIZE * 2 + BLOCKSIZE / 2;
  int bufLen = BLOCKSIZE / 16;
  DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE,
      replication, seed);

  INode fnode = fsdir.getINode4Write(parent.toString());
  assertTrue(fnode.isDirectory());
  assertTrue(fnode.isQuotaSet());
  long currentSSDConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
      .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
  assertEquals(file1Len, currentSSDConsumed);

  // Create the 2nd file of size BLOCKSIZE under child directory and expect quota exceeded exception
  Path createdFile2 = new Path(child, "created_file2.data");
  long file2Len = BLOCKSIZE;

  try {
    DFSTestUtil.createFile(dfs, createdFile2, bufLen, file2Len, BLOCKSIZE, replication, seed);
    fail("Should have failed with QuotaByStorageTypeExceededException ");
  } catch (Throwable t) {
    LOG.info("Got expected exception ", t);
    currentSSDConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
        .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
    assertEquals(file1Len, currentSSDConsumed);
  }
}
 
Example 25
Source Project: hadoop   Source File: FSDirectory.java    License: Apache License 2.0 5 votes vote down vote up
/** Verify if the inode name is legal. */
void verifyINodeName(byte[] childName) throws HadoopIllegalArgumentException {
  if (Arrays.equals(HdfsConstants.DOT_SNAPSHOT_DIR_BYTES, childName)) {
    String s = "\"" + HdfsConstants.DOT_SNAPSHOT_DIR + "\" is a reserved name.";
    if (!namesystem.isImageLoaded()) {
      s += "  Please rename it before upgrade.";
    }
    throw new HadoopIllegalArgumentException(s);
  }
}
 
Example 26
Source Project: big-c   Source File: HAUtil.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Locate a delegation token associated with the given HA cluster URI, and if
 * one is found, clone it to also represent the underlying namenode address.
 * @param ugi the UGI to modify
 * @param haUri the logical URI for the cluster
 * @param nnAddrs collection of NNs in the cluster to which the token
 * applies
 */
public static void cloneDelegationTokenForLogicalUri(
    UserGroupInformation ugi, URI haUri,
    Collection<InetSocketAddress> nnAddrs) {
  // this cloning logic is only used by hdfs
  Text haService = HAUtil.buildTokenServiceForLogicalUri(haUri,
      HdfsConstants.HDFS_URI_SCHEME);
  Token<DelegationTokenIdentifier> haToken =
      tokenSelector.selectToken(haService, ugi.getTokens());
  if (haToken != null) {
    for (InetSocketAddress singleNNAddr : nnAddrs) {
      // this is a minor hack to prevent physical HA tokens from being
      // exposed to the user via UGI.getCredentials(), otherwise these
      // cloned tokens may be inadvertently propagated to jobs
      Token<DelegationTokenIdentifier> specificToken =
          new Token.PrivateToken<DelegationTokenIdentifier>(haToken);
      SecurityUtil.setTokenService(specificToken, singleNNAddr);
      Text alias = new Text(
          buildTokenServicePrefixForLogicalUri(HdfsConstants.HDFS_URI_SCHEME)
              + "//" + specificToken.getService());
      ugi.addToken(alias, specificToken);
      LOG.debug("Mapped HA service delegation token for logical URI " +
          haUri + " to namenode " + singleNNAddr);
    }
  } else {
    LOG.debug("No HA service delegation token found for logical URI " +
        haUri);
  }
}
 
Example 27
Source Project: hadoop   Source File: TestDFSUpgradeFromImage.java    License: Apache License 2.0 5 votes vote down vote up
void upgradeAndVerify(MiniDFSCluster.Builder bld, ClusterVerifier verifier)
    throws IOException {
  MiniDFSCluster cluster = null;
  try {
    bld.format(false).startupOption(StartupOption.UPGRADE)
      .clusterId("testClusterId");
    cluster = bld.build();
    cluster.waitActive();
    DistributedFileSystem dfs = cluster.getFileSystem();
    DFSClient dfsClient = dfs.dfs;
    //Safemode will be off only after upgrade is complete. Wait for it.
    while ( dfsClient.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_GET) ) {
      LOG.info("Waiting for SafeMode to be OFF.");
      try {
        Thread.sleep(1000);
      } catch (InterruptedException ignored) {}
    }
    recoverAllLeases(dfsClient, new Path("/"));
    verifyFileSystem(dfs);

    if (verifier != null) {
      verifier.verifyClusterPostUpgrade(cluster);
    }
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  } 
}
 
Example 28
Source Project: big-c   Source File: RedundantEditLogInputStream.java    License: Apache License 2.0 5 votes vote down vote up
RedundantEditLogInputStream(Collection<EditLogInputStream> streams,
    long startTxId) {
  this.curIdx = 0;
  this.prevTxId = (startTxId == HdfsConstants.INVALID_TXID) ?
    HdfsConstants.INVALID_TXID : (startTxId - 1);
  this.state = (streams.isEmpty()) ? State.EOF : State.SKIP_UNTIL;
  this.prevException = null;
  // EditLogInputStreams in a RedundantEditLogInputStream must be finalized,
  // and can't be pre-transactional.
  EditLogInputStream first = null;
  for (EditLogInputStream s : streams) {
    Preconditions.checkArgument(s.getFirstTxId() !=
        HdfsConstants.INVALID_TXID, "invalid first txid in stream: %s", s);
    Preconditions.checkArgument(s.getLastTxId() !=
        HdfsConstants.INVALID_TXID, "invalid last txid in stream: %s", s);
    if (first == null) {
      first = s;
    } else {
      Preconditions.checkArgument(s.getFirstTxId() == first.getFirstTxId(),
        "All streams in the RedundantEditLogInputStream must have the same " +
        "start transaction ID!  " + first + " had start txId " +
        first.getFirstTxId() + ", but " + s + " had start txId " +
        s.getFirstTxId());
    }
  }

  this.streams = streams.toArray(new EditLogInputStream[0]);

  // We sort the streams here so that the streams that end later come first.
  Arrays.sort(this.streams, new Comparator<EditLogInputStream>() {
    @Override
    public int compare(EditLogInputStream a, EditLogInputStream b) {
      return Longs.compare(b.getLastTxId(), a.getLastTxId());
    }
  });
}
 
Example 29
Source Project: terrapin   Source File: ClusterStatusServlet.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Get all data nodes
 * @param hdfsClient client instance for HDFS
 * @return live data nodes
 * @throws IOException if client goes wrong when communicating with server
 */
public static List<String> getAllNodeNames(DFSClient hdfsClient) throws IOException {
  DatanodeInfo[] allNodes = hdfsClient.datanodeReport(HdfsConstants.DatanodeReportType.LIVE);
  List<String> allNodeNames = new ArrayList<String>(allNodes.length);
  for (DatanodeInfo nodeInfo : allNodes) {
    allNodeNames.add(TerrapinUtil.getHelixInstanceFromHDFSHost(nodeInfo.getHostName()));
  }
  return allNodeNames;
}
 
Example 30
Source Project: big-c   Source File: DFSAdmin.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void run(Path path) throws IOException {
  if (type != null) {
    dfs.setQuotaByStorageType(path, type, HdfsConstants.QUOTA_RESET);
  } else {
    dfs.setQuota(path, HdfsConstants.QUOTA_DONT_SET, HdfsConstants.QUOTA_RESET);
  }
}