org.apache.hadoop.hdfs.protocol.DSQuotaExceededException Java Examples

The following examples show how to use org.apache.hadoop.hdfs.protocol.DSQuotaExceededException. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ClientNamenodeProtocolTranslatorPB.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Override
public void rename2(String src, String dst, Rename... options)
    throws AccessControlException, DSQuotaExceededException,
    FileAlreadyExistsException, FileNotFoundException,
    NSQuotaExceededException, ParentNotDirectoryException, SafeModeException,
    UnresolvedLinkException, IOException {
  boolean overwrite = false;
  if (options != null) {
    for (Rename option : options) {
      if (option == Rename.OVERWRITE) {
        overwrite = true;
      }
    }
  }
  Rename2RequestProto req = Rename2RequestProto.newBuilder().
      setSrc(src).
      setDst(dst).setOverwriteDest(overwrite).
      build();
  try {
    rpcProxy.rename2(null, req);
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }

}
 
Example #2
Source File: ClientNamenodeProtocolTranslatorPB.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Override
public LastBlockWithStatus append(String src, String clientName,
    EnumSetWritable<CreateFlag> flag) throws AccessControlException,
    DSQuotaExceededException, FileNotFoundException, SafeModeException,
    UnresolvedLinkException, IOException {
  AppendRequestProto req = AppendRequestProto.newBuilder().setSrc(src)
      .setClientName(clientName).setFlag(PBHelper.convertCreateFlag(flag))
      .build();
  try {
    AppendResponseProto res = rpcProxy.append(null, req);
    LocatedBlock lastBlock = res.hasBlock() ? PBHelper
        .convert(res.getBlock()) : null;
    HdfsFileStatus stat = (res.hasStat()) ? PBHelper.convert(res.getStat())
        : null;
    return new LastBlockWithStatus(lastBlock, stat);
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
Example #3
Source File: ClientNamenodeProtocolTranslatorPB.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Override
public void rename2(String src, String dst, Rename... options)
    throws AccessControlException, DSQuotaExceededException,
    FileAlreadyExistsException, FileNotFoundException,
    NSQuotaExceededException, ParentNotDirectoryException, SafeModeException,
    UnresolvedLinkException, IOException {
  boolean overwrite = false;
  if (options != null) {
    for (Rename option : options) {
      if (option == Rename.OVERWRITE) {
        overwrite = true;
      }
    }
  }
  Rename2RequestProto req = Rename2RequestProto.newBuilder().
      setSrc(src).
      setDst(dst).setOverwriteDest(overwrite).
      build();
  try {
    rpcProxy.rename2(null, req);
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }

}
 
Example #4
Source File: DFSClient.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Rename file or directory.
 * @see ClientProtocol#rename2(String, String, Options.Rename...)
 */
public void rename(String src, String dst, Options.Rename... options)
    throws IOException {
  checkOpen();
  TraceScope scope = getSrcDstTraceScope("rename2", src, dst);
  try {
    namenode.rename2(src, dst, options);
  } catch(RemoteException re) {
    throw re.unwrapRemoteException(AccessControlException.class,
                                   DSQuotaExceededException.class,
                                   FileAlreadyExistsException.class,
                                   FileNotFoundException.class,
                                   ParentNotDirectoryException.class,
                                   SafeModeException.class,
                                   NSQuotaExceededException.class,
                                   UnresolvedPathException.class,
                                   SnapshotAccessControlException.class);
  } finally {
    scope.close();
  }
}
 
Example #5
Source File: DFSClient.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Rename file or directory.
 * @see ClientProtocol#rename(String, String)
 * @deprecated Use {@link #rename(String, String, Options.Rename...)} instead.
 */
@Deprecated
public boolean rename(String src, String dst) throws IOException {
  checkOpen();
  TraceScope scope = getSrcDstTraceScope("rename", src, dst);
  try {
    return namenode.rename(src, dst);
  } catch(RemoteException re) {
    throw re.unwrapRemoteException(AccessControlException.class,
                                   NSQuotaExceededException.class,
                                   DSQuotaExceededException.class,
                                   UnresolvedPathException.class,
                                   SnapshotAccessControlException.class);
  } finally {
    scope.close();
  }
}
 
Example #6
Source File: DFSClient.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Set replication for an existing file.
 * @param src file name
 * @param replication replication to set the file to
 * 
 * @see ClientProtocol#setReplication(String, short)
 */
public boolean setReplication(String src, short replication)
    throws IOException {
  TraceScope scope = getPathTraceScope("setReplication", src);
  try {
    return namenode.setReplication(src, replication);
  } catch(RemoteException re) {
    throw re.unwrapRemoteException(AccessControlException.class,
                                   FileNotFoundException.class,
                                   SafeModeException.class,
                                   DSQuotaExceededException.class,
                                   UnresolvedPathException.class,
                                   SnapshotAccessControlException.class);
  } finally {
    scope.close();
  }
}
 
Example #7
Source File: DFSClient.java    From big-c with Apache License 2.0 6 votes vote down vote up
/** Method to get stream returned by append call */
private DFSOutputStream callAppend(String src, int buffersize,
    EnumSet<CreateFlag> flag, Progressable progress, String[] favoredNodes)
    throws IOException {
  CreateFlag.validateForAppend(flag);
  try {
    LastBlockWithStatus blkWithStatus = namenode.append(src, clientName,
        new EnumSetWritable<>(flag, CreateFlag.class));
    return DFSOutputStream.newStreamForAppend(this, src, flag, buffersize,
        progress, blkWithStatus.getLastBlock(),
        blkWithStatus.getFileStatus(), dfsClientConf.createChecksum(),
        favoredNodes);
  } catch(RemoteException re) {
    throw re.unwrapRemoteException(AccessControlException.class,
                                   FileNotFoundException.class,
                                   SafeModeException.class,
                                   DSQuotaExceededException.class,
                                   UnsupportedOperationException.class,
                                   UnresolvedPathException.class,
                                   SnapshotAccessControlException.class);
  }
}
 
Example #8
Source File: DFSClient.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Creates a symbolic link.
 * 
 * @see ClientProtocol#createSymlink(String, String,FsPermission, boolean) 
 */
public void createSymlink(String target, String link, boolean createParent)
    throws IOException {
  TraceScope scope = getPathTraceScope("createSymlink", target);
  try {
    FsPermission dirPerm = 
        FsPermission.getDefault().applyUMask(dfsClientConf.uMask); 
    namenode.createSymlink(target, link, dirPerm, createParent);
  } catch (RemoteException re) {
    throw re.unwrapRemoteException(AccessControlException.class,
                                   FileAlreadyExistsException.class, 
                                   FileNotFoundException.class,
                                   ParentNotDirectoryException.class,
                                   NSQuotaExceededException.class, 
                                   DSQuotaExceededException.class,
                                   UnresolvedPathException.class,
                                   SnapshotAccessControlException.class);
  } finally {
    scope.close();
  }
}
 
Example #9
Source File: DFSClient.java    From RDFS with Apache License 2.0 6 votes vote down vote up
/**
 * Move blocks from src to trg and delete src
 * See {@link ClientProtocol#concat(String, String [])}.
 */
public void concat(String trg, String[] srcs, boolean restricted)
    throws IOException {
  checkOpen();
  try {
    if (namenodeProtocolProxy != null
        && namenodeProtocolProxy.isMethodSupported("concat", String.class,
            String[].class, boolean.class)) {
      namenode.concat(trg, srcs, restricted);
    } else if (!restricted){
      throw new UnsupportedOperationException(
          "Namenode does not support variable length blocks");
    } else {
      namenode.concat(trg, srcs);
    }
  } catch(RemoteException re) {
    throw re.unwrapRemoteException(AccessControlException.class,
                                   NSQuotaExceededException.class,
                                   DSQuotaExceededException.class);
  }
}
 
Example #10
Source File: ClientNamenodeProtocolTranslatorPB.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Override
public LastBlockWithStatus append(String src, String clientName,
    EnumSetWritable<CreateFlag> flag) throws AccessControlException,
    DSQuotaExceededException, FileNotFoundException, SafeModeException,
    UnresolvedLinkException, IOException {
  AppendRequestProto req = AppendRequestProto.newBuilder().setSrc(src)
      .setClientName(clientName).setFlag(PBHelper.convertCreateFlag(flag))
      .build();
  try {
    AppendResponseProto res = rpcProxy.append(null, req);
    LocatedBlock lastBlock = res.hasBlock() ? PBHelper
        .convert(res.getBlock()) : null;
    HdfsFileStatus stat = (res.hasStat()) ? PBHelper.convert(res.getStat())
        : null;
    return new LastBlockWithStatus(lastBlock, stat);
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
Example #11
Source File: DFSClient.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Creates a symbolic link.
 * 
 * @see ClientProtocol#createSymlink(String, String,FsPermission, boolean) 
 */
public void createSymlink(String target, String link, boolean createParent)
    throws IOException {
  TraceScope scope = getPathTraceScope("createSymlink", target);
  try {
    FsPermission dirPerm = 
        FsPermission.getDefault().applyUMask(dfsClientConf.uMask); 
    namenode.createSymlink(target, link, dirPerm, createParent);
  } catch (RemoteException re) {
    throw re.unwrapRemoteException(AccessControlException.class,
                                   FileAlreadyExistsException.class, 
                                   FileNotFoundException.class,
                                   ParentNotDirectoryException.class,
                                   NSQuotaExceededException.class, 
                                   DSQuotaExceededException.class,
                                   UnresolvedPathException.class,
                                   SnapshotAccessControlException.class);
  } finally {
    scope.close();
  }
}
 
Example #12
Source File: DFSClient.java    From RDFS with Apache License 2.0 6 votes vote down vote up
/**
 * Create a directory (or hierarchy of directories) with the given
 * name and permission.
 *
 * @param src The path of the directory being created
 * @param permission The permission of the directory being created.
 * If permission == null, use {@link FsPermission#getDefault()}.
 * @return True if the operation success.
 * @see ClientProtocol#mkdirs(String, FsPermission)
 */
public boolean mkdirs(String src, FsPermission permission)throws IOException{
  checkOpen();
  if (permission == null) {
    permission = FsPermission.getDefault();
  }
  FsPermission masked = permission.applyUMask(FsPermission.getUMask(conf));
  LOG.debug(src + ": masked=" + masked);
  try {
    metrics.incNumCreateDirOps();
    return namenode.mkdirs(src, masked);
  } catch(RemoteException re) {
    throw re.unwrapRemoteException(AccessControlException.class,
                                   NSQuotaExceededException.class,
                                   DSQuotaExceededException.class);
  }
}
 
Example #13
Source File: DFSClient.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/** Method to get stream returned by append call */
private DFSOutputStream callAppend(String src, int buffersize,
    EnumSet<CreateFlag> flag, Progressable progress, String[] favoredNodes)
    throws IOException {
  CreateFlag.validateForAppend(flag);
  try {
    LastBlockWithStatus blkWithStatus = namenode.append(src, clientName,
        new EnumSetWritable<>(flag, CreateFlag.class));
    return DFSOutputStream.newStreamForAppend(this, src, flag, buffersize,
        progress, blkWithStatus.getLastBlock(),
        blkWithStatus.getFileStatus(), dfsClientConf.createChecksum(),
        favoredNodes);
  } catch(RemoteException re) {
    throw re.unwrapRemoteException(AccessControlException.class,
                                   FileNotFoundException.class,
                                   SafeModeException.class,
                                   DSQuotaExceededException.class,
                                   UnsupportedOperationException.class,
                                   UnresolvedPathException.class,
                                   SnapshotAccessControlException.class);
  }
}
 
Example #14
Source File: DFSClient.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Set replication for an existing file.
 * @param src file name
 * @param replication replication to set the file to
 * 
 * @see ClientProtocol#setReplication(String, short)
 */
public boolean setReplication(String src, short replication)
    throws IOException {
  TraceScope scope = getPathTraceScope("setReplication", src);
  try {
    return namenode.setReplication(src, replication);
  } catch(RemoteException re) {
    throw re.unwrapRemoteException(AccessControlException.class,
                                   FileNotFoundException.class,
                                   SafeModeException.class,
                                   DSQuotaExceededException.class,
                                   UnresolvedPathException.class,
                                   SnapshotAccessControlException.class);
  } finally {
    scope.close();
  }
}
 
Example #15
Source File: DFSClient.java    From RDFS with Apache License 2.0 6 votes vote down vote up
/**
 * Sets or resets quotas for a directory.
 * @see org.apache.hadoop.hdfs.protocol.ClientProtocol#setQuota(String, long, long)
 */
void setQuota(String src, long namespaceQuota, long diskspaceQuota)
                                               throws IOException {
  // sanity check
  if ((namespaceQuota <= 0 && namespaceQuota != FSConstants.QUOTA_DONT_SET &&
       namespaceQuota != FSConstants.QUOTA_RESET) ||
      (diskspaceQuota <= 0 && diskspaceQuota != FSConstants.QUOTA_DONT_SET &&
       diskspaceQuota != FSConstants.QUOTA_RESET)) {
    throw new IllegalArgumentException("Invalid values for quota : " +
                                       namespaceQuota + " and " +
                                       diskspaceQuota);

  }

  try {
    namenode.setQuota(src, namespaceQuota, diskspaceQuota);
  } catch(RemoteException re) {
    throw re.unwrapRemoteException(AccessControlException.class,
                                   FileNotFoundException.class,
                                   NSQuotaExceededException.class,
                                   DSQuotaExceededException.class);
  }
}
 
Example #16
Source File: DFSClient.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Rename file or directory.
 * @see ClientProtocol#rename2(String, String, Options.Rename...)
 */
public void rename(String src, String dst, Options.Rename... options)
    throws IOException {
  checkOpen();
  TraceScope scope = getSrcDstTraceScope("rename2", src, dst);
  try {
    namenode.rename2(src, dst, options);
  } catch(RemoteException re) {
    throw re.unwrapRemoteException(AccessControlException.class,
                                   DSQuotaExceededException.class,
                                   FileAlreadyExistsException.class,
                                   FileNotFoundException.class,
                                   ParentNotDirectoryException.class,
                                   SafeModeException.class,
                                   NSQuotaExceededException.class,
                                   UnresolvedPathException.class,
                                   SnapshotAccessControlException.class);
  } finally {
    scope.close();
  }
}
 
Example #17
Source File: DFSClient.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Rename file or directory.
 * @see ClientProtocol#rename(String, String)
 * @deprecated Use {@link #rename(String, String, Options.Rename...)} instead.
 */
@Deprecated
public boolean rename(String src, String dst) throws IOException {
  checkOpen();
  TraceScope scope = getSrcDstTraceScope("rename", src, dst);
  try {
    return namenode.rename(src, dst);
  } catch(RemoteException re) {
    throw re.unwrapRemoteException(AccessControlException.class,
                                   NSQuotaExceededException.class,
                                   DSQuotaExceededException.class,
                                   UnresolvedPathException.class,
                                   SnapshotAccessControlException.class);
  } finally {
    scope.close();
  }
}
 
Example #18
Source File: INodeDirectoryWithQuota.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/** Verify if the namespace count disk space satisfies the quota restriction 
 * @throws QuotaExceededException if the given quota is less than the count
 */
void verifyQuota(long nsDelta, long dsDelta) throws QuotaExceededException {
  long newCount = nsCount + nsDelta;
  long newDiskspace = diskspace + dsDelta;
  if (nsDelta>0 || dsDelta>0) {
    if (nsQuota >= 0 && nsQuota < newCount) {
      throw new NSQuotaExceededException(nsQuota, newCount);
    }
    if (dsQuota >= 0 && dsQuota < newDiskspace) {
      throw new DSQuotaExceededException(dsQuota, newDiskspace);
  }
  }
}
 
Example #19
Source File: DFSClient.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
 * Rename file or directory.
 * See {@link ClientProtocol#rename(String, String)}.
 */
public boolean rename(String src, String dst) throws IOException {
  checkOpen();
  try {
    return namenode.rename(src, dst);
  } catch(RemoteException re) {
    throw re.unwrapRemoteException(AccessControlException.class,
                                   NSQuotaExceededException.class,
                                   DSQuotaExceededException.class);
  }
}
 
Example #20
Source File: ClientNamenodeProtocolTranslatorPB.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
public boolean setReplication(String src, short replication)
    throws AccessControlException, DSQuotaExceededException,
    FileNotFoundException, SafeModeException, UnresolvedLinkException,
    IOException {
  SetReplicationRequestProto req = SetReplicationRequestProto.newBuilder()
      .setSrc(src)
      .setReplication(replication)
      .build();
  try {
    return rpcProxy.setReplication(null, req).getResult();
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
Example #21
Source File: TestDiskspaceQuotaUpdate.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Test append over storage quota does not mark file as UC or create lease
 */
@Test (timeout=60000)
public void testAppendOverStorageQuota() throws Exception {
  final Path dir = new Path("/TestAppendOverQuota");
  final Path file = new Path(dir, "file");

  // create partial block file
  dfs.mkdirs(dir);
  DFSTestUtil.createFile(dfs, file, BLOCKSIZE/2, REPLICATION, seed);

  // lower quota to cause exception when appending to partial block
  dfs.setQuota(dir, Long.MAX_VALUE - 1, 1);
  final INodeDirectory dirNode = fsdir.getINode4Write(dir.toString())
      .asDirectory();
  final long spaceUsed = dirNode.getDirectoryWithQuotaFeature()
      .getSpaceConsumed().getStorageSpace();
  try {
    DFSTestUtil.appendFile(dfs, file, BLOCKSIZE);
    Assert.fail("append didn't fail");
  } catch (DSQuotaExceededException e) {
    // ignore
  }

  // check that the file exists, isn't UC, and has no dangling lease
  INodeFile inode = fsdir.getINode(file.toString()).asFile();
  Assert.assertNotNull(inode);
  Assert.assertFalse("should not be UC", inode.isUnderConstruction());
  Assert.assertNull("should not have a lease", cluster.getNamesystem().getLeaseManager().getLeaseByPath(file.toString()));
  // make sure the quota usage is unchanged
  final long newSpaceUsed = dirNode.getDirectoryWithQuotaFeature()
      .getSpaceConsumed().getStorageSpace();
  assertEquals(spaceUsed, newSpaceUsed);
  // make sure edits aren't corrupted
  dfs.recoverLease(file);
  cluster.restartNameNodes();
}
 
Example #22
Source File: TestQuota.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Tests to make sure we're getting human readable Quota exception messages
 * Test for @link{ NSQuotaExceededException, DSQuotaExceededException}
 * @throws Exception
 */
@Test
public void testDSQuotaExceededExceptionIsHumanReadable() throws Exception {
  Integer bytes = 1024;
  try {
    throw new DSQuotaExceededException(bytes, bytes);
  } catch(DSQuotaExceededException e) {
    
    assertEquals("The DiskSpace quota is exceeded: quota = 1024 B = 1 KB"
        + " but diskspace consumed = 1024 B = 1 KB", e.getMessage());
  }
}
 
Example #23
Source File: DFSOutputStream.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
 * Create a new output stream to the given DataNode.
 * @see ClientProtocol#create(String, FsPermission, String, boolean, short, long)
 */
DFSOutputStream(DFSClient dfsClient, String src, FsPermission masked,
    boolean overwrite, boolean createParent, short replication, long blockSize,
    Progressable progress,int buffersize, int bytesPerChecksum,
    boolean forceSync, boolean doParallelWrites,
    DatanodeInfo[] favoredNodes) throws IOException {
  this(dfsClient, src, blockSize, progress, bytesPerChecksum, replication,
      forceSync, doParallelWrites, favoredNodes);

  computePacketChunkSize(dfsClient.writePacketSize, bytesPerChecksum);

  try {
    if (dfsClient.namenodeProtocolProxy != null && 
          dfsClient.namenodeProtocolProxy.isMethodSupported("create", String.class, 
             FsPermission.class, String.class, boolean.class, boolean.class,
             short.class, long.class)) {
      dfsClient.namenode.create(src, masked, dfsClient.clientName, overwrite,
                      createParent, replication, blockSize);
    } else {
      dfsClient.namenode.create(src, masked, dfsClient.clientName, overwrite,
                      replication, blockSize);
    }
  } catch(RemoteException re) {
    dfsClient.incWriteExpCntToStats();

    throw re.unwrapRemoteException(AccessControlException.class,
                                   FileAlreadyExistsException.class,
                                   FileNotFoundException.class,
                                   NSQuotaExceededException.class,
                                   DSQuotaExceededException.class);
  }
  streamer.start();
}
 
Example #24
Source File: DFSClient.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
 * Set replication for an existing file.
 *
 * @see ClientProtocol#setReplication(String, short)
 * @param replication
 * @throws IOException
 * @return true is successful or false if file does not exist
 */
public boolean setReplication(String src,
                              short replication
                              ) throws IOException {
  try {
    return namenode.setReplication(src, replication);
  } catch(RemoteException re) {
    throw re.unwrapRemoteException(AccessControlException.class,
                                   NSQuotaExceededException.class,
                                   DSQuotaExceededException.class);
  }
}
 
Example #25
Source File: INodeDirectoryWithQuota.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
/** Verify if the namespace count disk space satisfies the quota restriction 
 * @throws QuotaExceededException if the given quota is less than the count
 */
private static void verifyQuota(long nsQuota, long nsCount, 
                                long dsQuota, long diskspace)
                                throws QuotaExceededException {
  if (nsQuota >= 0 && nsQuota < nsCount) {
    throw new NSQuotaExceededException(nsQuota, nsCount);
  }
  if (dsQuota >= 0 && dsQuota < diskspace) {
    throw new DSQuotaExceededException(dsQuota, diskspace);
  }
}
 
Example #26
Source File: DFSClient.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/** 
 * See {@link ClientProtocol#hardLink(String, String)}. 
 */ 
public boolean hardLink(String src, String dst) throws IOException {  
  checkOpen();  
  try { 
    return namenode.hardLink(src, dst); 
  } catch(RemoteException re) { 
    throw re.unwrapRemoteException(AccessControlException.class,  
                                   NSQuotaExceededException.class,  
                                   DSQuotaExceededException.class); 
  } 
}
 
Example #27
Source File: DFSClient.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
 * Fetch the list of files that have been open longer than a
 * specified amount of time.
 * @param prefix path prefix specifying subset of files to examine
 * @param millis select files that have been open longer that this
 * @param where to start searching when there are large numbers of
 * files returned. pass null the first time, then pass the last
 * value returned by the previous call for subsequent calls.
 * @return array of OpenFileInfo objects
 * @throw IOException
 */
public OpenFileInfo[] iterativeGetOpenFiles(
  Path prefix, int millis, String start) throws IOException {
  checkOpen();
  try {
    return namenode.iterativeGetOpenFiles(prefix.toString(), millis, start);
  } catch (RemoteException re) {
    throw re.unwrapRemoteException(AccessControlException.class,
                                   NSQuotaExceededException.class,
                                   DSQuotaExceededException.class);
  }
}
 
Example #28
Source File: DirectoryWithQuotaFeature.java    From big-c with Apache License 2.0 5 votes vote down vote up
/** Verify if the storagespace quota is violated after applying delta. */
private void verifyStoragespaceQuota(long delta) throws DSQuotaExceededException {
  if (Quota.isViolated(quota.getStorageSpace(), usage.getStorageSpace(), delta)) {
    throw new DSQuotaExceededException(quota.getStorageSpace(),
        usage.getStorageSpace() + delta);
  }
}
 
Example #29
Source File: DFSClient.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Same {{@link #mkdirs(String, FsPermission, boolean)} except
 * that the permissions has already been masked against umask.
 */
public boolean primitiveMkdir(String src, FsPermission absPermission, 
  boolean createParent)
  throws IOException {
  checkOpen();
  if (absPermission == null) {
    absPermission = 
      FsPermission.getDefault().applyUMask(dfsClientConf.uMask);
  } 

  if(LOG.isDebugEnabled()) {
    LOG.debug(src + ": masked=" + absPermission);
  }
  TraceScope scope = Trace.startSpan("mkdir", traceSampler);
  try {
    return namenode.mkdirs(src, absPermission, createParent);
  } catch(RemoteException re) {
    throw re.unwrapRemoteException(AccessControlException.class,
                                   InvalidPathException.class,
                                   FileAlreadyExistsException.class,
                                   FileNotFoundException.class,
                                   ParentNotDirectoryException.class,
                                   SafeModeException.class,
                                   NSQuotaExceededException.class,
                                   DSQuotaExceededException.class,
                                   UnresolvedPathException.class,
                                   SnapshotAccessControlException.class);
  } finally {
    scope.close();
  }
}
 
Example #30
Source File: DFSClient.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Sets or resets quotas for a directory.
 * @see ClientProtocol#setQuota(String, long, long, StorageType)
 */
void setQuota(String src, long namespaceQuota, long storagespaceQuota)
    throws IOException {
  // sanity check
  if ((namespaceQuota <= 0 && namespaceQuota != HdfsConstants.QUOTA_DONT_SET &&
       namespaceQuota != HdfsConstants.QUOTA_RESET) ||
      (storagespaceQuota <= 0 && storagespaceQuota != HdfsConstants.QUOTA_DONT_SET &&
       storagespaceQuota != HdfsConstants.QUOTA_RESET)) {
    throw new IllegalArgumentException("Invalid values for quota : " +
                                       namespaceQuota + " and " +
                                       storagespaceQuota);
                                       
  }
  TraceScope scope = getPathTraceScope("setQuota", src);
  try {
    // Pass null as storage type for traditional namespace/storagespace quota.
    namenode.setQuota(src, namespaceQuota, storagespaceQuota, null);
  } catch(RemoteException re) {
    throw re.unwrapRemoteException(AccessControlException.class,
                                   FileNotFoundException.class,
                                   NSQuotaExceededException.class,
                                   DSQuotaExceededException.class,
                                   UnresolvedPathException.class,
                                   SnapshotAccessControlException.class);
  } finally {
    scope.close();
  }
}