org.apache.hadoop.ipc.RemoteException Java Examples

The following examples show how to use org.apache.hadoop.ipc.RemoteException. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: SpliceFailFastInterceptor.java    From spliceengine with GNU Affero General Public License v3.0 6 votes vote down vote up
@Override
public void handleFailure(RetryingCallerInterceptorContext context, Throwable t) throws IOException {

    if (t instanceof UndeclaredThrowableException) {
        t = t.getCause();
    }
    if (t instanceof RemoteException) {
        RemoteException re = (RemoteException)t;
        t = re.unwrapRemoteException();
    }
    if (t instanceof DoNotRetryIOException) {
        throw (DoNotRetryIOException)t;
    }
    if (t instanceof IOException) {
        throw (IOException) t;
    }
    throw new IOException(t);
}
 
Example #2
Source File: TestSnapshotDeletion.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Deleting directory with snapshottable descendant with snapshots must fail.
 */
@Test (timeout=300000)
public void testDeleteDirectoryWithSnapshot2() throws Exception {
  Path file0 = new Path(sub, "file0");
  Path file1 = new Path(sub, "file1");
  DFSTestUtil.createFile(hdfs, file0, BLOCKSIZE, REPLICATION, seed);
  DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
  
  Path subfile1 = new Path(subsub, "file0");
  Path subfile2 = new Path(subsub, "file1");
  DFSTestUtil.createFile(hdfs, subfile1, BLOCKSIZE, REPLICATION, seed);
  DFSTestUtil.createFile(hdfs, subfile2, BLOCKSIZE, REPLICATION, seed);

  // Allow snapshot for subsub1, and create snapshot for it
  hdfs.allowSnapshot(subsub);
  hdfs.createSnapshot(subsub, "s1");

  // Deleting dir while its descedant subsub1 having snapshots should fail
  exception.expect(RemoteException.class);
  String error = subsub.toString()
      + " is snapshottable and already has snapshots";
  exception.expectMessage(error);
  hdfs.delete(dir, true);
}
 
Example #3
Source File: TestEditLogJournalFailures.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test
public void testAllEditsDirFailOnWrite() throws IOException {
  assertTrue(doAnEdit());
  // Invalidate both edits journals.
  invalidateEditsDirAtIndex(0, true, true);
  invalidateEditsDirAtIndex(1, true, true);
  // The NN has not terminated (no ExitException thrown)
  try {
    doAnEdit();
    fail("The previous edit could not be synced to any persistent storage, "
        + " should have halted the NN");
  } catch (RemoteException re) {
    assertTrue(re.getClassName().contains("ExitException"));
    GenericTestUtils.assertExceptionContains(
        "Could not sync enough journals to persistent storage due to " +
        "No journals available to flush. " +
        "Unsynced transactions: 1", re);
  }
}
 
Example #4
Source File: TestReadWhileWriting.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/** Try openning a file for append. */
private static FSDataOutputStream append(FileSystem fs, Path p) throws Exception {
  for(int i = 0; i < 10; i++) {
    try {
      return fs.append(p);
    } catch(RemoteException re) {
      if (re.getClassName().equals(RecoveryInProgressException.class.getName())) {
        AppendTestUtil.LOG.info("Will sleep and retry, i=" + i +", p="+p, re);
        Thread.sleep(1000);
      }
      else
        throw re;
    }
  }
  throw new IOException("Cannot append to " + p);
}
 
Example #5
Source File: ReportBadBlockAction.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Override
public void reportTo(DatanodeProtocolClientSideTranslatorPB bpNamenode, 
  DatanodeRegistration bpRegistration) throws BPServiceActorActionException {
  if (bpRegistration == null) {
    return;
  }
  DatanodeInfo[] dnArr = { new DatanodeInfo(bpRegistration) };
  String[] uuids = { storageUuid };
  StorageType[] types = { storageType };
  LocatedBlock[] locatedBlock = { new LocatedBlock(block,
      dnArr, uuids, types) };

  try {
    bpNamenode.reportBadBlocks(locatedBlock);
  } catch (RemoteException re) {
    DataNode.LOG.info("reportBadBlock encountered RemoteException for "
        + "block:  " + block , re);
  } catch (IOException e) {
    throw new BPServiceActorActionException("Failed to report bad block "
        + block + " to namenode: ");
  }
}
 
Example #6
Source File: TestEditLogJournalFailures.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testAllEditsDirFailOnWrite() throws IOException {
  assertTrue(doAnEdit());
  // Invalidate both edits journals.
  invalidateEditsDirAtIndex(0, true, true);
  invalidateEditsDirAtIndex(1, true, true);
  // The NN has not terminated (no ExitException thrown)
  try {
    doAnEdit();
    fail("The previous edit could not be synced to any persistent storage, "
        + " should have halted the NN");
  } catch (RemoteException re) {
    assertTrue(re.getClassName().contains("ExitException"));
    GenericTestUtils.assertExceptionContains(
        "Could not sync enough journals to persistent storage due to " +
        "No journals available to flush. " +
        "Unsynced transactions: 1", re);
  }
}
 
Example #7
Source File: DFSClient.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public void modifyAclEntries(String src, List<AclEntry> aclSpec)
    throws IOException {
  checkOpen();
  TraceScope scope = getPathTraceScope("modifyAclEntries", src);
  try {
    namenode.modifyAclEntries(src, aclSpec);
  } catch(RemoteException re) {
    throw re.unwrapRemoteException(AccessControlException.class,
                                   AclException.class,
                                   FileNotFoundException.class,
                                   NSQuotaExceededException.class,
                                   SafeModeException.class,
                                   SnapshotAccessControlException.class,
                                   UnresolvedPathException.class);
  } finally {
    scope.close();
  }
}
 
Example #8
Source File: DFSClient.java    From RDFS with Apache License 2.0 6 votes vote down vote up
/**
 * Get the data transfer protocol version supported in the cluster
 * assuming all the datanodes have the same version.
 *
 * @return the data transfer protocol version supported in the cluster
 */
int getDataTransferProtocolVersion() throws IOException {
  synchronized (dataTransferVersion) {
    if (dataTransferVersion == -1) {
      // Get the version number from NN
      try {
        int remoteDataTransferVersion = namenode.getDataTransferProtocolVersion();
        updateDataTransferProtocolVersionIfNeeded(remoteDataTransferVersion);
      } catch (RemoteException re) {
        IOException ioe = re.unwrapRemoteException(IOException.class);
        if (ioe.getMessage().startsWith(IOException.class.getName() + ": " +
            NoSuchMethodException.class.getName())) {
          dataTransferVersion = 14; // last version not supportting this RPC
        } else {
          throw ioe;
        }
      }
      if (LOG.isDebugEnabled()) {
LOG.debug("Data Transfer Protocal Version is "+ dataTransferVersion);
      }
    }
    return dataTransferVersion;
  }
}
 
Example #9
Source File: TestSubmitJob.java    From hadoop-gpu with Apache License 2.0 6 votes vote down vote up
private void runJobAndVerifyFailure(JobConf jobConf, long memForMapTasks,
    long memForReduceTasks, String expectedMsg)
    throws Exception,
    IOException {
  String[] args = { "-m", "0", "-r", "0", "-mt", "0", "-rt", "0" };
  boolean throwsException = false;
  String msg = null;
  try {
    ToolRunner.run(jobConf, new SleepJob(), args);
  } catch (RemoteException re) {
    throwsException = true;
    msg = re.unwrapRemoteException().getMessage();
  }
  assertTrue(throwsException);
  assertNotNull(msg);

  String overallExpectedMsg =
      "(" + memForMapTasks + " memForMapTasks " + memForReduceTasks
          + " memForReduceTasks): " + expectedMsg;
  assertTrue("Observed message - " + msg
      + " - doesn't contain expected message - " + overallExpectedMsg, msg
      .contains(overallExpectedMsg));
}
 
Example #10
Source File: DFSClient.java    From big-c with Apache License 2.0 6 votes vote down vote up
public void removeAcl(String src) throws IOException {
  checkOpen();
  TraceScope scope = Trace.startSpan("removeAcl", traceSampler);
  try {
    namenode.removeAcl(src);
  } catch(RemoteException re) {
    throw re.unwrapRemoteException(AccessControlException.class,
                                   AclException.class,
                                   FileNotFoundException.class,
                                   NSQuotaExceededException.class,
                                   SafeModeException.class,
                                   SnapshotAccessControlException.class,
                                   UnresolvedPathException.class);
  } finally {
    scope.close();
  }
}
 
Example #11
Source File: NameNodeConnector.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * The idea for making sure that there is no more than one instance
 * running in an HDFS is to create a file in the HDFS, writes the hostname
 * of the machine on which the instance is running to the file, but did not
 * close the file until it exits. 
 * 
 * This prevents the second instance from running because it can not
 * creates the file while the first one is running.
 * 
 * This method checks if there is any running instance. If no, mark yes.
 * Note that this is an atomic operation.
 * 
 * @return null if there is a running instance;
 *         otherwise, the output stream to the newly created file.
 */
private OutputStream checkAndMarkRunning() throws IOException {
  try {
    if (fs.exists(idPath)) {
      // try appending to it so that it will fail fast if another balancer is
      // running.
      IOUtils.closeStream(fs.append(idPath));
      fs.delete(idPath, true);
    }
    final FSDataOutputStream fsout = fs.create(idPath, false);
    // mark balancer idPath to be deleted during filesystem closure
    fs.deleteOnExit(idPath);
    if (write2IdFile) {
      fsout.writeBytes(InetAddress.getLocalHost().getHostName());
      fsout.hflush();
    }
    return fsout;
  } catch(RemoteException e) {
    if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){
      return null;
    } else {
      throw e;
    }
  }
}
 
Example #12
Source File: DFSClient.java    From big-c with Apache License 2.0 6 votes vote down vote up
public void removeXAttr(String src, String name) throws IOException {
  checkOpen();
  TraceScope scope = getPathTraceScope("removeXAttr", src);
  try {
    namenode.removeXAttr(src, XAttrHelper.buildXAttr(name));
  } catch(RemoteException re) {
    throw re.unwrapRemoteException(AccessControlException.class,
                                   FileNotFoundException.class,
                                   NSQuotaExceededException.class,
                                   SafeModeException.class,
                                   SnapshotAccessControlException.class,
                                   UnresolvedPathException.class);
  } finally {
    scope.close();
  }
}
 
Example #13
Source File: DFSClient.java    From big-c with Apache License 2.0 6 votes vote down vote up
public void setXAttr(String src, String name, byte[] value, 
    EnumSet<XAttrSetFlag> flag) throws IOException {
  checkOpen();
  TraceScope scope = getPathTraceScope("setXAttr", src);
  try {
    namenode.setXAttr(src, XAttrHelper.buildXAttr(name, value), flag);
  } catch (RemoteException re) {
    throw re.unwrapRemoteException(AccessControlException.class,
                                   FileNotFoundException.class,
                                   NSQuotaExceededException.class,
                                   SafeModeException.class,
                                   SnapshotAccessControlException.class,
                                   UnresolvedPathException.class);
  } finally {
    scope.close();
  }
}
 
Example #14
Source File: HAUtil.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Used to ensure that at least one of the given HA NNs is currently in the
 * active state..
 * 
 * @param namenodes list of RPC proxies for each NN to check.
 * @return true if at least one NN is active, false if all are in the standby state.
 * @throws IOException in the event of error.
 */
public static boolean isAtLeastOneActive(List<ClientProtocol> namenodes)
    throws IOException {
  for (ClientProtocol namenode : namenodes) {
    try {
      namenode.getFileInfo("/");
      return true;
    } catch (RemoteException re) {
      IOException cause = re.unwrapRemoteException();
      if (cause instanceof StandbyException) {
        // This is expected to happen for a standby NN.
      } else {
        throw re;
      }
    }
  }
  return false;
}
 
Example #15
Source File: DFSClient.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Creates a symbolic link.
 * 
 * @see ClientProtocol#createSymlink(String, String,FsPermission, boolean) 
 */
public void createSymlink(String target, String link, boolean createParent)
    throws IOException {
  TraceScope scope = getPathTraceScope("createSymlink", target);
  try {
    FsPermission dirPerm = 
        FsPermission.getDefault().applyUMask(dfsClientConf.uMask); 
    namenode.createSymlink(target, link, dirPerm, createParent);
  } catch (RemoteException re) {
    throw re.unwrapRemoteException(AccessControlException.class,
                                   FileAlreadyExistsException.class, 
                                   FileNotFoundException.class,
                                   ParentNotDirectoryException.class,
                                   NSQuotaExceededException.class, 
                                   DSQuotaExceededException.class,
                                   UnresolvedPathException.class,
                                   SnapshotAccessControlException.class);
  } finally {
    scope.close();
  }
}
 
Example #16
Source File: DFSClient.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/** Method to get stream returned by append call */
private DFSOutputStream callAppend(String src, int buffersize,
    EnumSet<CreateFlag> flag, Progressable progress, String[] favoredNodes)
    throws IOException {
  CreateFlag.validateForAppend(flag);
  try {
    LastBlockWithStatus blkWithStatus = namenode.append(src, clientName,
        new EnumSetWritable<>(flag, CreateFlag.class));
    return DFSOutputStream.newStreamForAppend(this, src, flag, buffersize,
        progress, blkWithStatus.getLastBlock(),
        blkWithStatus.getFileStatus(), dfsClientConf.createChecksum(),
        favoredNodes);
  } catch(RemoteException re) {
    throw re.unwrapRemoteException(AccessControlException.class,
                                   FileNotFoundException.class,
                                   SafeModeException.class,
                                   DSQuotaExceededException.class,
                                   UnsupportedOperationException.class,
                                   UnresolvedPathException.class,
                                   SnapshotAccessControlException.class);
  }
}
 
Example #17
Source File: TestReadWhileWriting.java    From big-c with Apache License 2.0 6 votes vote down vote up
/** Try openning a file for append. */
private static FSDataOutputStream append(FileSystem fs, Path p) throws Exception {
  for(int i = 0; i < 10; i++) {
    try {
      return fs.append(p);
    } catch(RemoteException re) {
      if (re.getClassName().equals(RecoveryInProgressException.class.getName())) {
        AppendTestUtil.LOG.info("Will sleep and retry, i=" + i +", p="+p, re);
        Thread.sleep(1000);
      }
      else
        throw re;
    }
  }
  throw new IOException("Cannot append to " + p);
}
 
Example #18
Source File: FileChecksumServlets.java    From hadoop-gpu with Apache License 2.0 6 votes vote down vote up
/** {@inheritDoc} */
public void doGet(HttpServletRequest request, HttpServletResponse response
    ) throws ServletException, IOException {
  final UnixUserGroupInformation ugi = getUGI(request);
  final PrintWriter out = response.getWriter();
  final String filename = getFilename(request, response);
  final XMLOutputter xml = new XMLOutputter(out, "UTF-8");
  xml.declaration();

  final Configuration conf = new Configuration(DataNode.getDataNode().getConf());
  final int socketTimeout = conf.getInt("dfs.socket.timeout", HdfsConstants.READ_TIMEOUT);
  final SocketFactory socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class);
  UnixUserGroupInformation.saveToConf(conf,
      UnixUserGroupInformation.UGI_PROPERTY_NAME, ugi);
  final ClientProtocol nnproxy = DFSClient.createNamenode(conf);

  try {
    final MD5MD5CRC32FileChecksum checksum = DFSClient.getFileChecksum(
        filename, nnproxy, socketFactory, socketTimeout);
    MD5MD5CRC32FileChecksum.write(xml, checksum);
  } catch(IOException ioe) {
    new RemoteException(ioe.getClass().getName(), ioe.getMessage()
        ).writeXml(filename, xml);
  }
  xml.endDocument();
}
 
Example #19
Source File: HBaseInterClusterReplicationEndpoint.java    From hbase with Apache License 2.0 6 votes vote down vote up
/**
 * Check if there's an {@link NoSuchColumnFamilyException} in the caused by stacktrace.
 */
@VisibleForTesting
public static boolean isNoSuchColumnFamilyException(Throwable io) {
  if (io instanceof RemoteException) {
    io = ((RemoteException) io).unwrapRemoteException();
  }
  if (io != null && io.getMessage().contains("NoSuchColumnFamilyException")) {
    return true;
  }
  for (; io != null; io = io.getCause()) {
    if (io instanceof NoSuchColumnFamilyException) {
      return true;
    }
  }
  return false;
}
 
Example #20
Source File: AvatarZKShell.java    From RDFS with Apache License 2.0 6 votes vote down vote up
private static AvatarProtocol createAvatarnode(AvatarProtocol rpcAvatarnode)
    throws IOException {
  RetryPolicy createPolicy = RetryPolicies
      .retryUpToMaximumCountWithFixedSleep(5, 5000, TimeUnit.MILLISECONDS);

  Map<Class<? extends Exception>, RetryPolicy> remoteExceptionToPolicyMap = new HashMap<Class<? extends Exception>, RetryPolicy>();

  Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap = new HashMap<Class<? extends Exception>, RetryPolicy>();
  exceptionToPolicyMap.put(RemoteException.class, RetryPolicies
      .retryByRemoteException(RetryPolicies.TRY_ONCE_THEN_FAIL,
          remoteExceptionToPolicyMap));
  RetryPolicy methodPolicy = RetryPolicies.retryByException(
      RetryPolicies.TRY_ONCE_THEN_FAIL, exceptionToPolicyMap);
  Map<String, RetryPolicy> methodNameToPolicyMap = new HashMap<String, RetryPolicy>();

  methodNameToPolicyMap.put("create", methodPolicy);

  return (AvatarProtocol) RetryProxy.create(AvatarProtocol.class,
      rpcAvatarnode, methodNameToPolicyMap);
}
 
Example #21
Source File: HBaseSaslRpcClient.java    From hbase with Apache License 2.0 5 votes vote down vote up
private static void readStatus(DataInputStream inStream) throws IOException {
  int status = inStream.readInt(); // read status
  if (status != SaslStatus.SUCCESS.state) {
    throw new RemoteException(WritableUtils.readString(inStream),
        WritableUtils.readString(inStream));
  }
}
 
Example #22
Source File: DFSClient.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
 * Fetch the list of files that have been open longer than a
 * specified amount of time.
 * @param prefix path prefix specifying subset of files to examine
 * @param millis select files that have been open longer that this
 * @param where to start searching when there are large numbers of
 * files returned. pass null the first time, then pass the last
 * value returned by the previous call for subsequent calls.
 * @return array of OpenFileInfo objects
 * @throw IOException
 */
public OpenFileInfo[] iterativeGetOpenFiles(
  Path prefix, int millis, String start) throws IOException {
  checkOpen();
  try {
    return namenode.iterativeGetOpenFiles(prefix.toString(), millis, start);
  } catch (RemoteException re) {
    throw re.unwrapRemoteException(AccessControlException.class,
                                   NSQuotaExceededException.class,
                                   DSQuotaExceededException.class);
  }
}
 
Example #23
Source File: ClientExceptionsUtil.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * Look for an exception we know in the remote exception:
 * - hadoop.ipc wrapped exceptions
 * - nested exceptions
 *
 * Looks for: RegionMovedException / RegionOpeningException / RegionTooBusyException /
 *            RpcThrottlingException
 * @return null if we didn't find the exception, the exception otherwise.
 */
public static Throwable findException(Object exception) {
  if (exception == null || !(exception instanceof Throwable)) {
    return null;
  }
  Throwable cur = (Throwable) exception;
  while (cur != null) {
    if (isSpecialException(cur)) {
      return cur;
    }
    if (cur instanceof RemoteException) {
      RemoteException re = (RemoteException) cur;
      cur = re.unwrapRemoteException();

      // unwrapRemoteException can return the exception given as a parameter when it cannot
      //  unwrap it. In this case, there is no need to look further
      // noinspection ObjectEquality
      if (cur == re) {
        return cur;
      }

      // When we receive RemoteException which wraps IOException which has a cause as
      // RemoteException we can get into infinite loop here; so if the cause of the exception
      // is RemoteException, we shouldn't look further.
    } else if (cur.getCause() != null && !(cur.getCause() instanceof RemoteException)) {
      cur = cur.getCause();
    } else {
      return cur;
    }
  }

  return null;
}
 
Example #24
Source File: RetryPolicies.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public RetryAction shouldRetry(Exception e, int retries,
    int failovers, boolean isIdempotentOrAtMostOnce) throws Exception {
  if (failovers >= maxFailovers) {
    return new RetryAction(RetryAction.RetryDecision.FAIL, 0,
        "failovers (" + failovers + ") exceeded maximum allowed ("
        + maxFailovers + ")");
  }
  if (retries - failovers > maxRetries) {
    return new RetryAction(RetryAction.RetryDecision.FAIL, 0, "retries ("
        + retries + ") exceeded maximum allowed (" + maxRetries + ")");
  }
  
  if (e instanceof ConnectException ||
      e instanceof NoRouteToHostException ||
      e instanceof UnknownHostException ||
      e instanceof StandbyException ||
      e instanceof ConnectTimeoutException ||
      isWrappedStandbyException(e)) {
    return new RetryAction(RetryAction.RetryDecision.FAILOVER_AND_RETRY,
        getFailoverOrRetrySleepTime(failovers));
  } else if (e instanceof RetriableException
      || getWrappedRetriableException(e) != null) {
    // RetriableException or RetriableException wrapped 
    return new RetryAction(RetryAction.RetryDecision.RETRY,
          getFailoverOrRetrySleepTime(retries));
  } else if (e instanceof SocketException
      || (e instanceof IOException && !(e instanceof RemoteException))) {
    if (isIdempotentOrAtMostOnce) {
      return RetryAction.FAILOVER_AND_RETRY;
    } else {
      return new RetryAction(RetryAction.RetryDecision.FAIL, 0,
          "the invoked method is not idempotent, and unable to determine "
              + "whether it was invoked");
    }
  } else {
      return fallbackPolicy.shouldRetry(e, retries, failovers,
          isIdempotentOrAtMostOnce);
  }
}
 
Example #25
Source File: DFSClient.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * set the modification and access time of a file
 * 
 * @see ClientProtocol#setTimes(String, long, long)
 */
public void setTimes(String src, long mtime, long atime) throws IOException {
  checkOpen();
  TraceScope scope = getPathTraceScope("setTimes", src);
  try {
    namenode.setTimes(src, mtime, atime);
  } catch(RemoteException re) {
    throw re.unwrapRemoteException(AccessControlException.class,
                                   FileNotFoundException.class,
                                   UnresolvedPathException.class,
                                   SnapshotAccessControlException.class);
  } finally {
    scope.close();
  }
}
 
Example #26
Source File: DFSOutputStream.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
 * Create a new output stream to the given DataNode.
 * @see ClientProtocol#create(String, FsPermission, String, boolean, short, long)
 */
DFSOutputStream(DFSClient dfsClient, String src, FsPermission masked,
    boolean overwrite, boolean createParent, short replication, long blockSize,
    Progressable progress,int buffersize, int bytesPerChecksum,
    boolean forceSync, boolean doParallelWrites,
    DatanodeInfo[] favoredNodes) throws IOException {
  this(dfsClient, src, blockSize, progress, bytesPerChecksum, replication,
      forceSync, doParallelWrites, favoredNodes);

  computePacketChunkSize(dfsClient.writePacketSize, bytesPerChecksum);

  try {
    if (dfsClient.namenodeProtocolProxy != null && 
          dfsClient.namenodeProtocolProxy.isMethodSupported("create", String.class, 
             FsPermission.class, String.class, boolean.class, boolean.class,
             short.class, long.class)) {
      dfsClient.namenode.create(src, masked, dfsClient.clientName, overwrite,
                      createParent, replication, blockSize);
    } else {
      dfsClient.namenode.create(src, masked, dfsClient.clientName, overwrite,
                      replication, blockSize);
    }
  } catch(RemoteException re) {
    dfsClient.incWriteExpCntToStats();

    throw re.unwrapRemoteException(AccessControlException.class,
                                   FileAlreadyExistsException.class,
                                   FileNotFoundException.class,
                                   NSQuotaExceededException.class,
                                   DSQuotaExceededException.class);
  }
  streamer.start();
}
 
Example #27
Source File: HealthMonitor.java    From big-c with Apache License 2.0 5 votes vote down vote up
private boolean isHealthCheckFailedException(Throwable t) {
  return ((t instanceof HealthCheckFailedException) ||
      (t instanceof RemoteException &&
      ((RemoteException)t).unwrapRemoteException(
          HealthCheckFailedException.class) instanceof
          HealthCheckFailedException));
}
 
Example #28
Source File: DFSClient.java    From big-c with Apache License 2.0 5 votes vote down vote up
public void removeCacheDirective(long id)
    throws IOException {
  checkOpen();
  TraceScope scope = Trace.startSpan("removeCacheDirective", traceSampler);
  try {
    namenode.removeCacheDirective(id);
  } catch (RemoteException re) {
    throw re.unwrapRemoteException();
  } finally {
    scope.close();
  }
}
 
Example #29
Source File: HftpFileSystem.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
public void startElement(String ns, String localname, String qname,
            Attributes attrs) throws SAXException {
  if (!ContentSummary.class.getName().equals(qname)) {
    if (RemoteException.class.getSimpleName().equals(qname)) {
      throw new SAXException(RemoteException.valueOf(attrs));
    }
    throw new SAXException("Unrecognized entry: " + qname);
  }

  contentsummary = toContentSummary(attrs);
}
 
Example #30
Source File: AvatarDataNode.java    From RDFS with Apache License 2.0 5 votes vote down vote up
void handleRegistrationError(RemoteException re) {
  // If either the primary or standby NN throws these exceptions, this
  // datanode will exit. I think this is the right behaviour because
  // the excludes list on both namenode better be the same.
  String reClass = re.getClassName(); 
  if (UnregisteredDatanodeException.class.getName().equals(reClass) ||
      DisallowedDatanodeException.class.getName().equals(reClass) ||
      IncorrectVersionException.class.getName().equals(reClass)) {
    LOG.warn("DataNode is shutting down: ", re);
    shutdownDN();
  } else {
    LOG.warn(re);
  }
}