org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException Java Examples

The following examples show how to use org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ClientNamenodeProtocolTranslatorPB.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Override
public LocatedBlock addBlock(String src, String clientName,
    ExtendedBlock previous, DatanodeInfo[] excludeNodes, long fileId,
    String[] favoredNodes)
    throws AccessControlException, FileNotFoundException,
    NotReplicatedYetException, SafeModeException, UnresolvedLinkException,
    IOException {
  AddBlockRequestProto.Builder req = AddBlockRequestProto.newBuilder()
      .setSrc(src).setClientName(clientName).setFileId(fileId);
  if (previous != null) 
    req.setPrevious(PBHelper.convert(previous)); 
  if (excludeNodes != null) 
    req.addAllExcludeNodes(PBHelper.convert(excludeNodes));
  if (favoredNodes != null) {
    req.addAllFavoredNodes(Arrays.asList(favoredNodes));
  }
  try {
    return PBHelper.convert(rpcProxy.addBlock(null, req.build()).getBlock());
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
Example #2
Source File: ClientNamenodeProtocolTranslatorPB.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Override
public LocatedBlock addBlock(String src, String clientName,
    ExtendedBlock previous, DatanodeInfo[] excludeNodes, long fileId,
    String[] favoredNodes)
    throws AccessControlException, FileNotFoundException,
    NotReplicatedYetException, SafeModeException, UnresolvedLinkException,
    IOException {
  AddBlockRequestProto.Builder req = AddBlockRequestProto.newBuilder()
      .setSrc(src).setClientName(clientName).setFileId(fileId);
  if (previous != null) 
    req.setPrevious(PBHelper.convert(previous)); 
  if (excludeNodes != null) 
    req.addAllExcludeNodes(PBHelper.convert(excludeNodes));
  if (favoredNodes != null) {
    req.addAllFavoredNodes(Arrays.asList(favoredNodes));
  }
  try {
    return PBHelper.convert(rpcProxy.addBlock(null, req.build()).getBlock());
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
Example #3
Source File: TestDFSClientRetries.java    From RDFS with Apache License 2.0 5 votes vote down vote up
public LocatedBlock addBlock(String src, String clientName)
throws IOException
{
  num_calls++;
  if (num_calls > num_calls_allowed) { 
    throw new IOException("addBlock called more times than "
                          + RETRY_CONFIG
                          + " allows.");
  } else {
      throw new RemoteException(NotReplicatedYetException.class.getName(),
                                ADD_BLOCK_EXCEPTION);
  }
}
 
Example #4
Source File: TestDFSClientRetries.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
public LocatedBlock addBlock(String src, String clientName)
throws IOException
{
  num_calls++;
  if (num_calls > num_calls_allowed) { 
    throw new IOException("addBlock called more times than "
                          + RETRY_CONFIG
                          + " allows.");
  } else {
      throw new RemoteException(NotReplicatedYetException.class.getName(),
                                ADD_BLOCK_EXCEPTION);
  }
}
 
Example #5
Source File: DFSOutputStream.java    From hadoop with Apache License 2.0 4 votes vote down vote up
private LocatedBlock locateFollowingBlock(DatanodeInfo[] excludedNodes)  throws IOException {
  int retries = dfsClient.getConf().nBlockWriteLocateFollowingRetry;
  long sleeptime = 400;
  while (true) {
    long localstart = Time.monotonicNow();
    while (true) {
      try {
        return dfsClient.namenode.addBlock(src, dfsClient.clientName,
            block, excludedNodes, fileId, favoredNodes);
      } catch (RemoteException e) {
        IOException ue = 
          e.unwrapRemoteException(FileNotFoundException.class,
                                  AccessControlException.class,
                                  NSQuotaExceededException.class,
                                  DSQuotaExceededException.class,
                                  UnresolvedPathException.class);
        if (ue != e) { 
          throw ue; // no need to retry these exceptions
        }
        
        
        if (NotReplicatedYetException.class.getName().
            equals(e.getClassName())) {
          if (retries == 0) { 
            throw e;
          } else {
            --retries;
            DFSClient.LOG.info("Exception while adding a block", e);
            long elapsed = Time.monotonicNow() - localstart;
            if (elapsed > 5000) {
              DFSClient.LOG.info("Waiting for replication for "
                  + (elapsed / 1000) + " seconds");
            }
            try {
              DFSClient.LOG.warn("NotReplicatedYetException sleeping " + src
                  + " retries left " + retries);
              Thread.sleep(sleeptime);
              sleeptime *= 2;
            } catch (InterruptedException ie) {
              DFSClient.LOG.warn("Caught exception ", ie);
            }
          }
        } else {
          throw e;
        }

      }
    }
  } 
}
 
Example #6
Source File: TestDFSClientRetries.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Verify that client will correctly give up after the specified number
 * of times trying to add a block
 */
@SuppressWarnings({ "serial", "unchecked" })
@Test
public void testNotYetReplicatedErrors() throws IOException
{ 
  final String exceptionMsg = "Nope, not replicated yet...";
  final int maxRetries = 1; // Allow one retry (total of two calls)
  conf.setInt(DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY, maxRetries);
  
  NamenodeProtocols mockNN = mock(NamenodeProtocols.class);
  Answer<Object> answer = new ThrowsException(new IOException()) {
    int retryCount = 0;
    
    @Override
    public Object answer(InvocationOnMock invocation) 
                     throws Throwable {
      retryCount++;
      System.out.println("addBlock has been called "  + retryCount + " times");
      if(retryCount > maxRetries + 1) // First call was not a retry
        throw new IOException("Retried too many times: " + retryCount);
      else
        throw new RemoteException(NotReplicatedYetException.class.getName(),
                                  exceptionMsg);
    }
  };
  when(mockNN.addBlock(anyString(), 
                       anyString(),
                       any(ExtendedBlock.class),
                       any(DatanodeInfo[].class),
                       anyLong(), any(String[].class))).thenAnswer(answer);
  
  Mockito.doReturn(
          new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
              (short) 777), "owner", "group", new byte[0], new byte[0],
              1010, 0, null, (byte) 0)).when(mockNN).getFileInfo(anyString());
  
  Mockito.doReturn(
          new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
              (short) 777), "owner", "group", new byte[0], new byte[0],
              1010, 0, null, (byte) 0))
      .when(mockNN)
      .create(anyString(), (FsPermission) anyObject(), anyString(),
          (EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
          anyShort(), anyLong(), (CryptoProtocolVersion[]) anyObject());

  final DFSClient client = new DFSClient(null, mockNN, conf, null);
  OutputStream os = client.create("testfile", true);
  os.write(20); // write one random byte
  
  try {
    os.close();
  } catch (Exception e) {
    assertTrue("Retries are not being stopped correctly: " + e.getMessage(),
         e.getMessage().equals(exceptionMsg));
  }
}
 
Example #7
Source File: ProxyClientProtocolHandler.java    From nnproxy with Apache License 2.0 4 votes vote down vote up
@Override
public LocatedBlock addBlock(String src, String clientName, ExtendedBlock previous, DatanodeInfo[] excludeNodes, long fileId, String[] favoredNodes) throws AccessControlException, FileNotFoundException, NotReplicatedYetException, SafeModeException, UnresolvedLinkException, IOException {
    RouteInfo routeInfo = router.route(src);
    return routeInfo.upstream.addBlock(routeInfo.realPath, clientName, previous, excludeNodes, fileId, favoredNodes);
}
 
Example #8
Source File: DFSOutputStream.java    From big-c with Apache License 2.0 4 votes vote down vote up
private LocatedBlock locateFollowingBlock(DatanodeInfo[] excludedNodes)  throws IOException {
  int retries = dfsClient.getConf().nBlockWriteLocateFollowingRetry;
  long sleeptime = 400;
  while (true) {
    long localstart = Time.monotonicNow();
    while (true) {
      try {
        return dfsClient.namenode.addBlock(src, dfsClient.clientName,
            block, excludedNodes, fileId, favoredNodes);
      } catch (RemoteException e) {
        IOException ue = 
          e.unwrapRemoteException(FileNotFoundException.class,
                                  AccessControlException.class,
                                  NSQuotaExceededException.class,
                                  DSQuotaExceededException.class,
                                  UnresolvedPathException.class);
        if (ue != e) { 
          throw ue; // no need to retry these exceptions
        }
        
        
        if (NotReplicatedYetException.class.getName().
            equals(e.getClassName())) {
          if (retries == 0) { 
            throw e;
          } else {
            --retries;
            DFSClient.LOG.info("Exception while adding a block", e);
            long elapsed = Time.monotonicNow() - localstart;
            if (elapsed > 5000) {
              DFSClient.LOG.info("Waiting for replication for "
                  + (elapsed / 1000) + " seconds");
            }
            try {
              DFSClient.LOG.warn("NotReplicatedYetException sleeping " + src
                  + " retries left " + retries);
              Thread.sleep(sleeptime);
              sleeptime *= 2;
            } catch (InterruptedException ie) {
              DFSClient.LOG.warn("Caught exception ", ie);
            }
          }
        } else {
          throw e;
        }

      }
    }
  } 
}
 
Example #9
Source File: TestDFSClientRetries.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Verify that client will correctly give up after the specified number
 * of times trying to add a block
 */
@SuppressWarnings({ "serial", "unchecked" })
@Test
public void testNotYetReplicatedErrors() throws IOException
{ 
  final String exceptionMsg = "Nope, not replicated yet...";
  final int maxRetries = 1; // Allow one retry (total of two calls)
  conf.setInt(DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY, maxRetries);
  
  NamenodeProtocols mockNN = mock(NamenodeProtocols.class);
  Answer<Object> answer = new ThrowsException(new IOException()) {
    int retryCount = 0;
    
    @Override
    public Object answer(InvocationOnMock invocation) 
                     throws Throwable {
      retryCount++;
      System.out.println("addBlock has been called "  + retryCount + " times");
      if(retryCount > maxRetries + 1) // First call was not a retry
        throw new IOException("Retried too many times: " + retryCount);
      else
        throw new RemoteException(NotReplicatedYetException.class.getName(),
                                  exceptionMsg);
    }
  };
  when(mockNN.addBlock(anyString(), 
                       anyString(),
                       any(ExtendedBlock.class),
                       any(DatanodeInfo[].class),
                       anyLong(), any(String[].class))).thenAnswer(answer);
  
  Mockito.doReturn(
          new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
              (short) 777), "owner", "group", new byte[0], new byte[0],
              1010, 0, null, (byte) 0)).when(mockNN).getFileInfo(anyString());
  
  Mockito.doReturn(
          new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
              (short) 777), "owner", "group", new byte[0], new byte[0],
              1010, 0, null, (byte) 0))
      .when(mockNN)
      .create(anyString(), (FsPermission) anyObject(), anyString(),
          (EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
          anyShort(), anyLong(), (CryptoProtocolVersion[]) anyObject());

  final DFSClient client = new DFSClient(null, mockNN, conf, null);
  OutputStream os = client.create("testfile", true);
  os.write(20); // write one random byte
  
  try {
    os.close();
  } catch (Exception e) {
    assertTrue("Retries are not being stopped correctly: " + e.getMessage(),
         e.getMessage().equals(exceptionMsg));
  }
}
 
Example #10
Source File: DFSOutputStream.java    From RDFS with Apache License 2.0 4 votes vote down vote up
private LocatedBlock locateFollowingBlock(long start,
                                          DatanodeInfo[] excludedNodes
                                          ) throws IOException {
  int retries = dfsClient.conf.getInt(
      "dfs.client.block.write.locateFollowingBlock.retries", 5);
  
  long sleeptime = 400;
  while (true) {
    long localstart = System.currentTimeMillis();
    while (true) {
      try {
        VersionedLocatedBlock loc = null;
        if (dfsClient.namenodeProtocolProxy != null
            && dfsClient.namenodeProtocolProxy.isMethodSupported(
                "addBlockAndFetchMetaInfo", String.class, String.class,
                DatanodeInfo[].class, DatanodeInfo[].class, long.class,
                Block.class)) {
         loc = dfsClient.namenode.addBlockAndFetchMetaInfo(src, 
             dfsClient.clientName, excludedNodes, favoredNodes,
             this.lastBlkOffset, getLastBlock());
        } else if (dfsClient.namenodeProtocolProxy != null
            && dfsClient.namenodeProtocolProxy.isMethodSupported(
                "addBlockAndFetchMetaInfo", String.class, String.class,
                DatanodeInfo[].class, DatanodeInfo[].class, long.class)) {
          loc = dfsClient.namenode.addBlockAndFetchMetaInfo(src,
              dfsClient.clientName, excludedNodes, favoredNodes, this.lastBlkOffset);
        } else if (dfsClient.namenodeProtocolProxy != null
            && dfsClient.namenodeProtocolProxy.isMethodSupported(
                "addBlockAndFetchMetaInfo", String.class, String.class,
                DatanodeInfo[].class, long.class)) {
          loc = dfsClient.namenode.addBlockAndFetchMetaInfo(src,
              dfsClient.clientName, excludedNodes, this.lastBlkOffset);
        } else if (dfsClient.namenodeProtocolProxy != null
            && dfsClient.namenodeProtocolProxy.isMethodSupported(
                "addBlockAndFetchMetaInfo", String.class, String.class,
                DatanodeInfo[].class)) {
          loc = dfsClient.namenode.addBlockAndFetchMetaInfo(src,
              dfsClient.clientName, excludedNodes);
        } else if (dfsClient.namenodeProtocolProxy != null
            && dfsClient.namenodeProtocolProxy.isMethodSupported(
                "addBlockAndFetchVersion", String.class, String.class,
                DatanodeInfo[].class)) {
          loc = dfsClient.namenode.addBlockAndFetchVersion(src,
              dfsClient.clientName, excludedNodes);
        } else if (dfsClient.namenodeProtocolProxy != null
            && dfsClient.namenodeProtocolProxy.isMethodSupported("addBlock",
                String.class, String.class, DatanodeInfo[].class)) {
          return dfsClient.namenode.addBlock(src, dfsClient.clientName,
              excludedNodes);
        } else {
          return dfsClient.namenode.addBlock(src, dfsClient.clientName);
        }
        dfsClient.updateDataTransferProtocolVersionIfNeeded(
            loc.getDataProtocolVersion());
        if (loc instanceof LocatedBlockWithMetaInfo) {
          LocatedBlockWithMetaInfo metaLoc = (LocatedBlockWithMetaInfo)loc;
          this.namespaceId = metaLoc.getNamespaceID();
          dfsClient.getNewNameNodeIfNeeded(metaLoc.getMethodFingerPrint());
        }
        return loc;
      } catch (RemoteException e) {
        IOException ue =
          e.unwrapRemoteException(FileNotFoundException.class,
                                  AccessControlException.class,
                                  NSQuotaExceededException.class,
                                  DSQuotaExceededException.class);
        if (ue != e) {
          throw ue; // no need to retry these exceptions
        }

        if (NotReplicatedYetException.class.getName().
            equals(e.getClassName())) {

            if (retries == 0) {
              throw e;
            } else {
              --retries;
              DFSClient.LOG.info(StringUtils.stringifyException(e));
              if (System.currentTimeMillis() - localstart > 5000) {
                DFSClient.LOG.info("Waiting for replication for "
                    + (System.currentTimeMillis() - localstart) / 1000
                    + " seconds");
              }
              try {
                DFSClient.LOG.warn("NotReplicatedYetException sleeping " + src
                    + " retries left " + retries);
                Thread.sleep(sleeptime);
                sleeptime *= 2;
              } catch (InterruptedException ie) {
              }
            }
        } else {
          throw e;
        }
      }
    }
  }
}
 
Example #11
Source File: DFSClient.java    From hadoop-gpu with Apache License 2.0 4 votes vote down vote up
private LocatedBlock locateFollowingBlock(long start
                                          ) throws IOException {     
  int retries = conf.getInt("dfs.client.block.write.locateFollowingBlock.retries", 5);
  long sleeptime = 400;
  while (true) {
    long localstart = System.currentTimeMillis();
    while (true) {
      try {
        return namenode.addBlock(src, clientName);
      } catch (RemoteException e) {
        IOException ue = 
          e.unwrapRemoteException(FileNotFoundException.class,
                                  AccessControlException.class,
                                  NSQuotaExceededException.class,
                                  DSQuotaExceededException.class);
        if (ue != e) { 
          throw ue; // no need to retry these exceptions
        }
        
        if (NotReplicatedYetException.class.getName().
            equals(e.getClassName())) {

            if (retries == 0) { 
              throw e;
            } else {
              --retries;
              LOG.info(StringUtils.stringifyException(e));
              if (System.currentTimeMillis() - localstart > 5000) {
                LOG.info("Waiting for replication for "
                    + (System.currentTimeMillis() - localstart) / 1000
                    + " seconds");
              }
              try {
                LOG.warn("NotReplicatedYetException sleeping " + src
                    + " retries left " + retries);
                Thread.sleep(sleeptime);
                sleeptime *= 2;
              } catch (InterruptedException ie) {
              }
            }
        } else {
          throw e;
        }
      }
    }
  } 
}
 
Example #12
Source File: ClientProtocol.java    From hadoop with Apache License 2.0 3 votes vote down vote up
/**
 * A client that wants to write an additional block to the 
 * indicated filename (which must currently be open for writing)
 * should call addBlock().  
 *
 * addBlock() allocates a new block and datanodes the block data
 * should be replicated to.
 * 
 * addBlock() also commits the previous block by reporting
 * to the name-node the actual generation stamp and the length
 * of the block that the client has transmitted to data-nodes.
 *
 * @param src the file being created
 * @param clientName the name of the client that adds the block
 * @param previous  previous block
 * @param excludeNodes a list of nodes that should not be
 * allocated for the current block
 * @param fileId the id uniquely identifying a file
 * @param favoredNodes the list of nodes where the client wants the blocks.
 *          Nodes are identified by either host name or address.
 *
 * @return LocatedBlock allocated block information.
 *
 * @throws AccessControlException If access is denied
 * @throws FileNotFoundException If file <code>src</code> is not found
 * @throws NotReplicatedYetException previous blocks of the file are not
 *           replicated yet. Blocks cannot be added until replication
 *           completes.
 * @throws SafeModeException create not allowed in safemode
 * @throws UnresolvedLinkException If <code>src</code> contains a symlink
 * @throws IOException If an I/O error occurred
 */
@Idempotent
public LocatedBlock addBlock(String src, String clientName,
    ExtendedBlock previous, DatanodeInfo[] excludeNodes, long fileId, 
    String[] favoredNodes)
    throws AccessControlException, FileNotFoundException,
    NotReplicatedYetException, SafeModeException, UnresolvedLinkException,
    IOException;
 
Example #13
Source File: ClientProtocol.java    From big-c with Apache License 2.0 3 votes vote down vote up
/**
 * A client that wants to write an additional block to the 
 * indicated filename (which must currently be open for writing)
 * should call addBlock().  
 *
 * addBlock() allocates a new block and datanodes the block data
 * should be replicated to.
 * 
 * addBlock() also commits the previous block by reporting
 * to the name-node the actual generation stamp and the length
 * of the block that the client has transmitted to data-nodes.
 *
 * @param src the file being created
 * @param clientName the name of the client that adds the block
 * @param previous  previous block
 * @param excludeNodes a list of nodes that should not be
 * allocated for the current block
 * @param fileId the id uniquely identifying a file
 * @param favoredNodes the list of nodes where the client wants the blocks.
 *          Nodes are identified by either host name or address.
 *
 * @return LocatedBlock allocated block information.
 *
 * @throws AccessControlException If access is denied
 * @throws FileNotFoundException If file <code>src</code> is not found
 * @throws NotReplicatedYetException previous blocks of the file are not
 *           replicated yet. Blocks cannot be added until replication
 *           completes.
 * @throws SafeModeException create not allowed in safemode
 * @throws UnresolvedLinkException If <code>src</code> contains a symlink
 * @throws IOException If an I/O error occurred
 */
@Idempotent
public LocatedBlock addBlock(String src, String clientName,
    ExtendedBlock previous, DatanodeInfo[] excludeNodes, long fileId, 
    String[] favoredNodes)
    throws AccessControlException, FileNotFoundException,
    NotReplicatedYetException, SafeModeException, UnresolvedLinkException,
    IOException;