org.apache.hadoop.hdfs.DFSClient Java Examples

The following examples show how to use org.apache.hadoop.hdfs.DFSClient. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: WebHdfsHandler.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private void onGetFileChecksum(ChannelHandlerContext ctx) throws IOException {
  MD5MD5CRC32FileChecksum checksum = null;
  final String nnId = params.namenodeId();
  DFSClient dfsclient = newDfsClient(nnId, conf);
  try {
    checksum = dfsclient.getFileChecksum(path, Long.MAX_VALUE);
    dfsclient.close();
    dfsclient = null;
  } finally {
    IOUtils.cleanup(LOG, dfsclient);
  }
  final byte[] js = JsonUtil.toJsonString(checksum).getBytes(Charsets.UTF_8);
  DefaultFullHttpResponse resp =
    new DefaultFullHttpResponse(HTTP_1_1, OK, Unpooled.wrappedBuffer(js));

  resp.headers().set(CONTENT_TYPE, APPLICATION_JSON_UTF8);
  resp.headers().set(CONTENT_LENGTH, js.length);
  resp.headers().set(CONNECTION, CLOSE);
  ctx.writeAndFlush(resp).addListener(ChannelFutureListener.CLOSE);
}
 
Example #2
Source File: TestRetryCacheWithHA.java    From big-c with Apache License 2.0 6 votes vote down vote up
private DFSClient genClientWithDummyHandler() throws IOException {
  URI nnUri = dfs.getUri();
  FailoverProxyProvider<ClientProtocol> failoverProxyProvider = 
      NameNodeProxies.createFailoverProxyProvider(conf, 
          nnUri, ClientProtocol.class, true, null);
  InvocationHandler dummyHandler = new DummyRetryInvocationHandler(
      failoverProxyProvider, RetryPolicies
      .failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL,
          Integer.MAX_VALUE,
          DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT,
          DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT));
  ClientProtocol proxy = (ClientProtocol) Proxy.newProxyInstance(
      failoverProxyProvider.getInterface().getClassLoader(),
      new Class[] { ClientProtocol.class }, dummyHandler);
  
  DFSClient client = new DFSClient(null, proxy, conf, null);
  return client;
}
 
Example #3
Source File: FileChecksumServlets.java    From hadoop-gpu with Apache License 2.0 6 votes vote down vote up
/** {@inheritDoc} */
public void doGet(HttpServletRequest request, HttpServletResponse response
    ) throws ServletException, IOException {
  final UnixUserGroupInformation ugi = getUGI(request);
  final PrintWriter out = response.getWriter();
  final String filename = getFilename(request, response);
  final XMLOutputter xml = new XMLOutputter(out, "UTF-8");
  xml.declaration();

  final Configuration conf = new Configuration(DataNode.getDataNode().getConf());
  final int socketTimeout = conf.getInt("dfs.socket.timeout", HdfsConstants.READ_TIMEOUT);
  final SocketFactory socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class);
  UnixUserGroupInformation.saveToConf(conf,
      UnixUserGroupInformation.UGI_PROPERTY_NAME, ugi);
  final ClientProtocol nnproxy = DFSClient.createNamenode(conf);

  try {
    final MD5MD5CRC32FileChecksum checksum = DFSClient.getFileChecksum(
        filename, nnproxy, socketFactory, socketTimeout);
    MD5MD5CRC32FileChecksum.write(xml, checksum);
  } catch(IOException ioe) {
    new RemoteException(ioe.getClass().getName(), ioe.getMessage()
        ).writeXml(filename, xml);
  }
  xml.endDocument();
}
 
Example #4
Source File: NamenodeFsck.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private void lostFoundInit(DFSClient dfs) {
  lfInited = true;
  try {
    String lfName = "/lost+found";
    
    final HdfsFileStatus lfStatus = dfs.getFileInfo(lfName);
    if (lfStatus == null) { // not exists
      lfInitedOk = dfs.mkdirs(lfName, null, true);
      lostFound = lfName;
    } else if (!lfStatus.isDir()) { // exists but not a directory
      LOG.warn("Cannot use /lost+found : a regular file with this name exists.");
      lfInitedOk = false;
    }  else { // exists and is a directory
      lostFound = lfName;
      lfInitedOk = true;
    }
  }  catch (Exception e) {
    e.printStackTrace();
    lfInitedOk = false;
  }
  if (lostFound == null) {
    LOG.warn("Cannot initialize /lost+found .");
    lfInitedOk = false;
    internalError = true;
  }
}
 
Example #5
Source File: TerrapinControllerServiceImpl.java    From terrapin with Apache License 2.0 6 votes vote down vote up
public TerrapinControllerServiceImpl(PropertiesConfiguration configuration,
                                     ZooKeeperManager zkManager,
                                     DFSClient hdfsClient,
                                     HelixAdmin helixAdmin,
                                     String clusterName) {
  this.configuration = configuration;
  this.zkManager = zkManager;
  this.hdfsClient = hdfsClient;
  this.helixAdmin = helixAdmin;
  this.clusterName = clusterName;

  ExecutorService threadPool = new ThreadPoolExecutor(100,
      100,
      0,
      TimeUnit.SECONDS,
      new LinkedBlockingDeque<Runnable>(1000),
      new ThreadFactoryBuilder().setDaemon(false)
                    .setNameFormat("controller-pool-%d")
                    .build());
 this.futurePool = new ExecutorServiceFuturePool(threadPool);
}
 
Example #6
Source File: DomainSocketFactory.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Get information about a domain socket path.
 *
 * @param addr         The inet address to use.
 * @param conf         The client configuration.
 *
 * @return             Information about the socket path.
 */
public PathInfo getPathInfo(InetSocketAddress addr, DFSClient.Conf conf) {
  // If there is no domain socket path configured, we can't use domain
  // sockets.
  if (conf.getDomainSocketPath().isEmpty()) return PathInfo.NOT_CONFIGURED;
  // If we can't do anything with the domain socket, don't create it.
  if (!conf.isDomainSocketDataTraffic() &&
      (!conf.isShortCircuitLocalReads() || conf.isUseLegacyBlockReaderLocal())) {
    return PathInfo.NOT_CONFIGURED;
  }
  // If the DomainSocket code is not loaded, we can't create
  // DomainSocket objects.
  if (DomainSocket.getLoadingFailureReason() != null) {
    return PathInfo.NOT_CONFIGURED;
  }
  // UNIX domain sockets can only be used to talk to local peers
  if (!DFSClient.isLocalAddress(addr)) return PathInfo.NOT_CONFIGURED;
  String escapedPath = DomainSocket.getEffectivePath(
      conf.getDomainSocketPath(), addr.getPort());
  PathState status = pathMap.getIfPresent(escapedPath);
  if (status == null) {
    return new PathInfo(escapedPath, PathState.VALID);
  } else {
    return new PathInfo(escapedPath, status);
  }
}
 
Example #7
Source File: DFSClientCache.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private CacheLoader<String, DFSClient> clientLoader() {
  return new CacheLoader<String, DFSClient>() {
    @Override
    public DFSClient load(String userName) throws Exception {
      UserGroupInformation ugi = getUserGroupInformation(
              userName,
              UserGroupInformation.getCurrentUser());

      // Guava requires CacheLoader never returns null.
      return ugi.doAs(new PrivilegedExceptionAction<DFSClient>() {
        @Override
        public DFSClient run() throws IOException {
          return new DFSClient(NameNode.getAddress(config), config);
        }
      });
    }
  };
}
 
Example #8
Source File: RpcProgramMountd.java    From big-c with Apache License 2.0 6 votes vote down vote up
public RpcProgramMountd(NfsConfiguration config,
    DatagramSocket registrationSocket, boolean allowInsecurePorts)
    throws IOException {
  // Note that RPC cache is not enabled
  super("mountd", "localhost", config.getInt(
      NfsConfigKeys.DFS_NFS_MOUNTD_PORT_KEY,
      NfsConfigKeys.DFS_NFS_MOUNTD_PORT_DEFAULT), PROGRAM, VERSION_1,
      VERSION_3, registrationSocket, allowInsecurePorts);
  exports = new ArrayList<String>();
  exports.add(config.get(NfsConfigKeys.DFS_NFS_EXPORT_POINT_KEY,
      NfsConfigKeys.DFS_NFS_EXPORT_POINT_DEFAULT));
  this.hostsMatcher = NfsExports.getInstance(config);
  this.mounts = Collections.synchronizedList(new ArrayList<MountEntry>());
  UserGroupInformation.setConfiguration(config);
  SecurityUtil.login(config, NfsConfigKeys.DFS_NFS_KEYTAB_FILE_KEY,
      NfsConfigKeys.DFS_NFS_KERBEROS_PRINCIPAL_KEY);
  this.dfsClient = new DFSClient(NameNode.getAddress(config), config);
}
 
Example #9
Source File: WriteManager.java    From hadoop with Apache License 2.0 6 votes vote down vote up
Nfs3FileAttributes getFileAttr(DFSClient client, FileHandle dirHandle,
    String fileName) throws IOException {
  String fileIdPath = Nfs3Utils.getFileIdPath(dirHandle) + "/" + fileName;
  Nfs3FileAttributes attr = Nfs3Utils.getFileAttr(client, fileIdPath, iug);

  if ((attr != null) && (attr.getType() == NfsFileType.NFSREG.toValue())) {
    OpenFileCtx openFileCtx = fileContextCache.get(new FileHandle(attr
        .getFileId()));

    if (openFileCtx != null) {
      attr.setSize(openFileCtx.getNextOffset());
      attr.setUsed(openFileCtx.getNextOffset());
    }
  }
  return attr;
}
 
Example #10
Source File: FileChecksumServlets.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Override
public void doGet(HttpServletRequest request, HttpServletResponse response
    ) throws ServletException, IOException {
  final PrintWriter out = response.getWriter();
  final String path = ServletUtil.getDecodedPath(request, "/getFileChecksum");
  final XMLOutputter xml = new XMLOutputter(out, "UTF-8");
  xml.declaration();

  final ServletContext context = getServletContext();
  final DataNode datanode = (DataNode) context.getAttribute("datanode");
  final Configuration conf = 
    new HdfsConfiguration(datanode.getConf());
  
  try {
    final DFSClient dfs = DatanodeJspHelper.getDFSClient(request, 
        datanode, conf, getUGI(request, conf));
    final MD5MD5CRC32FileChecksum checksum = dfs.getFileChecksum(path, Long.MAX_VALUE);
    MD5MD5CRC32FileChecksum.write(xml, checksum);
  } catch(IOException ioe) {
    writeXml(ioe, path, xml);
  } catch (InterruptedException e) {
    writeXml(e, path, xml);
  }
  xml.endDocument();
}
 
Example #11
Source File: TestResolveHdfsSymlink.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Verifies that attempting to resolve a non-symlink results in client
 * exception
 */
@Test
public void testLinkTargetNonSymlink() throws UnsupportedFileSystemException,
    IOException {
  FileContext fc = null;
  Path notSymlink = new Path("/notasymlink");
  try {
    fc = FileContext.getFileContext(cluster.getFileSystem().getUri());
    fc.create(notSymlink, EnumSet.of(CreateFlag.CREATE));
    DFSClient client = new DFSClient(cluster.getFileSystem().getUri(),
        cluster.getConfiguration(0));
    try {
      client.getLinkTarget(notSymlink.toString());
      fail("Expected exception for resolving non-symlink");
    } catch (IOException e) {
      GenericTestUtils.assertExceptionContains("is not a symbolic link", e);
    }
  } finally {
    if (fc != null) {
      fc.delete(notSymlink, false);
    }
  }
}
 
Example #12
Source File: TestDFSClientCache.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testEviction() throws IOException {
  NfsConfiguration conf = new NfsConfiguration();
  conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "hdfs://localhost");

  // Only one entry will be in the cache
  final int MAX_CACHE_SIZE = 1;

  DFSClientCache cache = new DFSClientCache(conf, MAX_CACHE_SIZE);

  DFSClient c1 = cache.getDfsClient("test1");
  assertTrue(cache.getDfsClient("test1").toString().contains("ugi=test1"));
  assertEquals(c1, cache.getDfsClient("test1"));
  assertFalse(isDfsClientClose(c1));

  cache.getDfsClient("test2");
  assertTrue(isDfsClientClose(c1));
  assertTrue("cache size should be the max size or less",
      cache.clientCache.size() <= MAX_CACHE_SIZE);
}
 
Example #13
Source File: NamenodeFsck.java    From hadoop-gpu with Apache License 2.0 6 votes vote down vote up
private void lostFoundInit(DFSClient dfs) {
  lfInited = true;
  try {
    String lfName = "/lost+found";
    // check that /lost+found exists
    if (!dfs.exists(lfName)) {
      lfInitedOk = dfs.mkdirs(lfName);
      lostFound = lfName;
    } else        if (!dfs.isDirectory(lfName)) {
      LOG.warn("Cannot use /lost+found : a regular file with this name exists.");
      lfInitedOk = false;
    }  else { // exists and isDirectory
      lostFound = lfName;
      lfInitedOk = true;
    }
  }  catch (Exception e) {
    e.printStackTrace();
    lfInitedOk = false;
  }
  if (lostFound == null) {
    LOG.warn("Cannot initialize /lost+found .");
    lfInitedOk = false;
  }
}
 
Example #14
Source File: DataNodeLocatorUtils.java    From twister2 with Apache License 2.0 6 votes vote down vote up
/**
 * This method retrieve all the datanodes of a hdfs cluster
 */
private List<String> getDataNodes() throws IOException {

  Configuration conf = new Configuration(false);
  conf.addResource(new org.apache.hadoop.fs.Path(HdfsDataContext.getHdfsConfigDirectory(config)));

  List<String> datanodesList = new ArrayList<>();
  InetSocketAddress namenodeAddress = new InetSocketAddress(
      HdfsDataContext.getHdfsNamenodeDefault(config),
      HdfsDataContext.getHdfsNamenodePortDefault(config));
  DFSClient dfsClient = new DFSClient(namenodeAddress, conf);
  ClientProtocol nameNode = dfsClient.getNamenode();
  DatanodeInfo[] datanodeReport =
      nameNode.getDatanodeReport(HdfsConstants.DatanodeReportType.ALL);
  for (DatanodeInfo di : datanodeReport) {
    datanodesList.add(di.getHostName());
  }
  return datanodesList;
}
 
Example #15
Source File: DFSClientCache.java    From big-c with Apache License 2.0 5 votes vote down vote up
private CacheLoader<DFSInputStreamCaheKey, FSDataInputStream> inputStreamLoader() {
  return new CacheLoader<DFSInputStreamCaheKey, FSDataInputStream>() {

    @Override
    public FSDataInputStream load(DFSInputStreamCaheKey key) throws Exception {
      DFSClient client = getDfsClient(key.userId);
      DFSInputStream dis = client.open(key.inodePath);
      return client.createWrappedInputStream(dis);
    }
  };
}
 
Example #16
Source File: OpenFileCtx.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Check the commit status with the given offset
 * @param commitOffset the offset to commit
 * @param channel the channel to return response
 * @param xid the xid of the commit request
 * @param preOpAttr the preOp attribute
 * @param fromRead whether the commit is triggered from read request
 * @return one commit status: COMMIT_FINISHED, COMMIT_WAIT,
 * COMMIT_INACTIVE_CTX, COMMIT_INACTIVE_WITH_PENDING_WRITE, COMMIT_ERROR
 */
public COMMIT_STATUS checkCommit(DFSClient dfsClient, long commitOffset,
    Channel channel, int xid, Nfs3FileAttributes preOpAttr, boolean fromRead) {
  if (!fromRead) {
    Preconditions.checkState(channel != null && preOpAttr != null);
    // Keep stream active
    updateLastAccessTime();
  }
  Preconditions.checkState(commitOffset >= 0);

  COMMIT_STATUS ret = checkCommitInternal(commitOffset, channel, xid,
      preOpAttr, fromRead);
  if (LOG.isDebugEnabled()) {
    LOG.debug("Got commit status: " + ret.name());
  }
  // Do the sync outside the lock
  if (ret == COMMIT_STATUS.COMMIT_DO_SYNC
      || ret == COMMIT_STATUS.COMMIT_FINISHED) {
    try {
      // Sync file data and length
      fos.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
      ret = COMMIT_STATUS.COMMIT_FINISHED; // Remove COMMIT_DO_SYNC status 
      // Nothing to do for metadata since attr related change is pass-through
    } catch (ClosedChannelException cce) {
      if (pendingWrites.isEmpty()) {
        ret = COMMIT_STATUS.COMMIT_FINISHED;
      } else {
        ret = COMMIT_STATUS.COMMIT_ERROR;
      }
    } catch (IOException e) {
      LOG.error("Got stream error during data sync: " + e);
      // Do nothing. Stream will be closed eventually by StreamMonitor.
      // status = Nfs3Status.NFS3ERR_IO;
      ret = COMMIT_STATUS.COMMIT_ERROR;
    }
  }
  return ret;
}
 
Example #17
Source File: FanOutOneBlockAsyncDFSOutputSaslHelper.java    From hbase with Apache License 2.0 5 votes vote down vote up
static Encryptor createEncryptor(Configuration conf, HdfsFileStatus stat, DFSClient client)
    throws IOException {
  FileEncryptionInfo feInfo = stat.getFileEncryptionInfo();
  if (feInfo == null) {
    return null;
  }
  return TRANSPARENT_CRYPTO_HELPER.createEncryptor(conf, feInfo, client);
}
 
Example #18
Source File: TestFailoverWithBlockTokensEnabled.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void ensureInvalidBlockTokensAreRejected() throws IOException,
    URISyntaxException {
  cluster.transitionToActive(0);
  FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
  
  DFSTestUtil.writeFile(fs, TEST_PATH, TEST_DATA);
  assertEquals(TEST_DATA, DFSTestUtil.readFile(fs, TEST_PATH));
  
  DFSClient dfsClient = DFSClientAdapter.getDFSClient((DistributedFileSystem) fs);
  DFSClient spyDfsClient = Mockito.spy(dfsClient);
  Mockito.doAnswer(
      new Answer<LocatedBlocks>() {
        @Override
        public LocatedBlocks answer(InvocationOnMock arg0) throws Throwable {
          LocatedBlocks locatedBlocks = (LocatedBlocks)arg0.callRealMethod();
          for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
            Token<BlockTokenIdentifier> token = lb.getBlockToken();
            BlockTokenIdentifier id = lb.getBlockToken().decodeIdentifier();
            // This will make the token invalid, since the password
            // won't match anymore
            id.setExpiryDate(Time.now() + 10);
            Token<BlockTokenIdentifier> newToken =
                new Token<BlockTokenIdentifier>(id.getBytes(),
                    token.getPassword(), token.getKind(), token.getService());
            lb.setBlockToken(newToken);
          }
          return locatedBlocks;
        }
      }).when(spyDfsClient).getLocatedBlocks(Mockito.anyString(),
          Mockito.anyLong(), Mockito.anyLong());
  DFSClientAdapter.setDFSClient((DistributedFileSystem)fs, spyDfsClient);
  
  try {
    assertEquals(TEST_DATA, DFSTestUtil.readFile(fs, TEST_PATH));
    fail("Shouldn't have been able to read a file with invalid block tokens");
  } catch (IOException ioe) {
    GenericTestUtils.assertExceptionContains("Could not obtain block", ioe);
  }
}
 
Example #19
Source File: TestFailoverWithBlockTokensEnabled.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void ensureInvalidBlockTokensAreRejected() throws IOException,
    URISyntaxException {
  cluster.transitionToActive(0);
  FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
  
  DFSTestUtil.writeFile(fs, TEST_PATH, TEST_DATA);
  assertEquals(TEST_DATA, DFSTestUtil.readFile(fs, TEST_PATH));
  
  DFSClient dfsClient = DFSClientAdapter.getDFSClient((DistributedFileSystem) fs);
  DFSClient spyDfsClient = Mockito.spy(dfsClient);
  Mockito.doAnswer(
      new Answer<LocatedBlocks>() {
        @Override
        public LocatedBlocks answer(InvocationOnMock arg0) throws Throwable {
          LocatedBlocks locatedBlocks = (LocatedBlocks)arg0.callRealMethod();
          for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
            Token<BlockTokenIdentifier> token = lb.getBlockToken();
            BlockTokenIdentifier id = lb.getBlockToken().decodeIdentifier();
            // This will make the token invalid, since the password
            // won't match anymore
            id.setExpiryDate(Time.now() + 10);
            Token<BlockTokenIdentifier> newToken =
                new Token<BlockTokenIdentifier>(id.getBytes(),
                    token.getPassword(), token.getKind(), token.getService());
            lb.setBlockToken(newToken);
          }
          return locatedBlocks;
        }
      }).when(spyDfsClient).getLocatedBlocks(Mockito.anyString(),
          Mockito.anyLong(), Mockito.anyLong());
  DFSClientAdapter.setDFSClient((DistributedFileSystem)fs, spyDfsClient);
  
  try {
    assertEquals(TEST_DATA, DFSTestUtil.readFile(fs, TEST_PATH));
    fail("Shouldn't have been able to read a file with invalid block tokens");
  } catch (IOException ioe) {
    GenericTestUtils.assertExceptionContains("Could not obtain block", ioe);
  }
}
 
Example #20
Source File: TestHBaseWalOnEC.java    From hbase with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  try {
    MiniDFSCluster cluster = UTIL.startMiniDFSCluster(3); // Need 3 DNs for RS-3-2 policy
    DistributedFileSystem fs = cluster.getFileSystem();

    Method enableAllECPolicies =
      DFSTestUtil.class.getMethod("enableAllECPolicies", DistributedFileSystem.class);
    enableAllECPolicies.invoke(null, fs);

    DFSClient client = fs.getClient();
    Method setErasureCodingPolicy =
      DFSClient.class.getMethod("setErasureCodingPolicy", String.class, String.class);
    setErasureCodingPolicy.invoke(client, "/", "RS-3-2-1024k"); // try a built-in policy

    try (FSDataOutputStream out = fs.create(new Path("/canary"))) {
      // If this comes back as having hflush then some test setup assumption is wrong.
      // Fail the test so that a developer has to look and triage
      assertFalse("Did not enable EC!", out.hasCapability(StreamCapabilities.HFLUSH));
    }
  } catch (NoSuchMethodException e) {
    // We're not testing anything interesting if EC is not available, so skip the rest of the test
    Assume.assumeNoException("Using an older version of hadoop; EC not available.", e);
  }

  UTIL.getConfiguration().setBoolean(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE, true);

}
 
Example #21
Source File: TestWrites.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testCheckCommitAixCompatMode() throws IOException {
  DFSClient dfsClient = Mockito.mock(DFSClient.class);
  Nfs3FileAttributes attr = new Nfs3FileAttributes();
  HdfsDataOutputStream fos = Mockito.mock(HdfsDataOutputStream.class);

  NfsConfiguration conf = new NfsConfiguration();
  conf.setBoolean(NfsConfigKeys.LARGE_FILE_UPLOAD, false);
  // Enable AIX compatibility mode.
  OpenFileCtx ctx = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient,
      new ShellBasedIdMapping(new NfsConfiguration()), true, conf);
  
  // Test fall-through to pendingWrites check in the event that commitOffset
  // is greater than the number of bytes we've so far flushed.
  Mockito.when(fos.getPos()).thenReturn((long) 2);
  COMMIT_STATUS status = ctx.checkCommitInternal(5, null, 1, attr, false);
  Assert.assertTrue(status == COMMIT_STATUS.COMMIT_FINISHED);
  
  // Test the case when we actually have received more bytes than we're trying
  // to commit.
  ctx.getPendingWritesForTest().put(new OffsetRange(0, 10),
      new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null));
  Mockito.when(fos.getPos()).thenReturn((long) 10);
  ctx.setNextOffsetForTest((long)10);
  status = ctx.checkCommitInternal(5, null, 1, attr, false);
  Assert.assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC);
}
 
Example #22
Source File: TestBalancer.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
private void test(long[] capacities, String[] racks, 
    long newCapacity, String newRack) throws Exception {
  int numOfDatanodes = capacities.length;
  assertEquals(numOfDatanodes, racks.length);
  cluster = new MiniDFSCluster(0, CONF, capacities.length, true, true, null, 
      racks, capacities);
  try {
    cluster.waitActive();
    client = DFSClient.createNamenode(CONF);

    long totalCapacity=0L;
    for(long capacity:capacities) {
      totalCapacity += capacity;
    }
    // fill up the cluster to be 30% full
    long totalUsedSpace = totalCapacity*3/10;
    createFile(totalUsedSpace/numOfDatanodes, (short)numOfDatanodes);
    // start up an empty node with the same capacity and on the same rack
    cluster.startDataNodes(CONF, 1, true, null,
        new String[]{newRack}, new long[]{newCapacity});

    totalCapacity += newCapacity;

    // run balancer and validate results
    runBalancer(totalUsedSpace, totalCapacity);
  } finally {
    cluster.shutdown();
  }
}
 
Example #23
Source File: WebHdfsHandler.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void onOpen(ChannelHandlerContext ctx) throws IOException {
  final String nnId = params.namenodeId();
  final int bufferSize = params.bufferSize();
  final long offset = params.offset();
  final long length = params.length();

  DefaultHttpResponse response = new DefaultHttpResponse(HTTP_1_1, OK);
  HttpHeaders headers = response.headers();
  // Allow the UI to access the file
  headers.set(ACCESS_CONTROL_ALLOW_METHODS, GET);
  headers.set(ACCESS_CONTROL_ALLOW_ORIGIN, "*");
  headers.set(CONTENT_TYPE, APPLICATION_OCTET_STREAM);
  headers.set(CONNECTION, CLOSE);

  final DFSClient dfsclient = newDfsClient(nnId, conf);
  HdfsDataInputStream in = dfsclient.createWrappedInputStream(
    dfsclient.open(path, bufferSize, true));
  in.seek(offset);

  long contentLength = in.getVisibleLength() - offset;
  if (length >= 0) {
    contentLength = Math.min(contentLength, length);
  }
  final InputStream data;
  if (contentLength >= 0) {
    headers.set(CONTENT_LENGTH, contentLength);
    data = new LimitInputStream(in, contentLength);
  } else {
    data = in;
  }

  ctx.write(response);
  ctx.writeAndFlush(new ChunkedStream(data) {
    @Override
    public void close() throws Exception {
      super.close();
      dfsclient.close();
    }
  }).addListener(ChannelFutureListener.CLOSE);
}
 
Example #24
Source File: DatanodeJspHelper.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/** Get DFSClient for a namenode corresponding to the BPID from a datanode */
public static DFSClient getDFSClient(final HttpServletRequest request,
    final DataNode datanode, final Configuration conf,
    final UserGroupInformation ugi) throws IOException, InterruptedException {
  final String nnAddr = request.getParameter(JspHelper.NAMENODE_ADDRESS);
  return getDFSClient(ugi, nnAddr, conf);
}
 
Example #25
Source File: TestBlockReplacement.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
private void checkBlocks(DatanodeInfo[] includeNodes, String fileName, 
    long fileLen, short replFactor, DFSClient client) throws IOException {
  Boolean notDone;
  do {
    try {
      Thread.sleep(100);
    } catch(InterruptedException e) {
    }
    List<LocatedBlock> blocks = client.namenode.
    getBlockLocations(fileName, 0, fileLen).getLocatedBlocks();
    assertEquals(1, blocks.size());
    DatanodeInfo[] nodes = blocks.get(0).getLocations();
    notDone = (nodes.length != replFactor);
    if (notDone) {
      LOG.info("Expected replication factor is " + replFactor +
          " but the real replication factor is " + nodes.length );
    } else {
      List<DatanodeInfo> nodeLocations = Arrays.asList(nodes);
      for (DatanodeInfo node : includeNodes) {
        if (!nodeLocations.contains(node) ) {
          notDone=true; 
          LOG.info("Block is not located at " + node.getName() );
          break;
        }
      }
    }
  } while(notDone);
}
 
Example #26
Source File: WriteManager.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * If the file is in cache, update the size based on the cached data size
 */
Nfs3FileAttributes getFileAttr(DFSClient client, FileHandle fileHandle,
    IdMappingServiceProvider iug) throws IOException {
  String fileIdPath = Nfs3Utils.getFileIdPath(fileHandle);
  Nfs3FileAttributes attr = Nfs3Utils.getFileAttr(client, fileIdPath, iug);
  if (attr != null) {
    OpenFileCtx openFileCtx = fileContextCache.get(fileHandle);
    if (openFileCtx != null) {
      attr.setSize(openFileCtx.getNextOffset());
      attr.setUsed(openFileCtx.getNextOffset());
    }
  }
  return attr;
}
 
Example #27
Source File: TestRetryCacheWithHA.java    From big-c with Apache License 2.0 5 votes vote down vote up
RemoveCacheDirectiveInfoOp(DFSClient client, String pool,
    String path) {
  super("removeCacheDirective", client);
  this.directive = new CacheDirectiveInfo.Builder().
      setPool(pool).
      setPath(new Path(path)).
      build();
}
 
Example #28
Source File: SnapshotTestHelper.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/** Disable the logs that are not very useful for snapshot related tests. */
public static void disableLogs() {
  final String[] lognames = {
      "org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner",
      "org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl",
      "org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService",
  };
  for(String n : lognames) {
    GenericTestUtils.disableLog(LogFactory.getLog(n));
  }
  
  GenericTestUtils.disableLog(LogFactory.getLog(UserGroupInformation.class));
  GenericTestUtils.disableLog(LogFactory.getLog(BlockManager.class));
  GenericTestUtils.disableLog(LogFactory.getLog(FSNamesystem.class));
  GenericTestUtils.disableLog(LogFactory.getLog(DirectoryScanner.class));
  GenericTestUtils.disableLog(LogFactory.getLog(MetricsSystemImpl.class));
  
  GenericTestUtils.disableLog(BlockScanner.LOG);
  GenericTestUtils.disableLog(HttpServer2.LOG);
  GenericTestUtils.disableLog(DataNode.LOG);
  GenericTestUtils.disableLog(BlockPoolSliceStorage.LOG);
  GenericTestUtils.disableLog(LeaseManager.LOG);
  GenericTestUtils.disableLog(NameNode.stateChangeLog);
  GenericTestUtils.disableLog(NameNode.blockStateChangeLog);
  GenericTestUtils.disableLog(DFSClient.LOG);
  GenericTestUtils.disableLog(Server.LOG);
}
 
Example #29
Source File: NamenodeFsck.java    From big-c with Apache License 2.0 5 votes vote down vote up
private DatanodeInfo bestNode(DFSClient dfs, DatanodeInfo[] nodes,
                              TreeSet<DatanodeInfo> deadNodes) throws IOException {
  if ((nodes == null) ||
      (nodes.length - deadNodes.size() < 1)) {
    throw new IOException("No live nodes contain current block");
  }
  DatanodeInfo chosenNode;
  do {
    chosenNode = nodes[DFSUtil.getRandom().nextInt(nodes.length)];
  } while (deadNodes.contains(chosenNode));
  return chosenNode;
}
 
Example #30
Source File: TestRetryCacheWithHA.java    From hadoop with Apache License 2.0 5 votes vote down vote up
RenameSnapshotOp(DFSClient client, String dir, String oldName,
    String newName) {
  super("renameSnapshot", client);
  this.dir = dir;
  this.oldName = oldName;
  this.newName = newName;
}