Java Code Examples for org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState#FINALIZED

The following examples show how to use org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState#FINALIZED . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: FsDatasetImpl.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Override  // FsDatasetSpi
public synchronized ReplicaHandler recoverAppend(
    ExtendedBlock b, long newGS, long expectedBlockLen) throws IOException {
  LOG.info("Recover failed append to " + b);

  ReplicaInfo replicaInfo = recoverCheck(b, newGS, expectedBlockLen);

  FsVolumeReference ref = replicaInfo.getVolume().obtainReference();
  ReplicaBeingWritten replica;
  try {
    // change the replica's state/gs etc.
    if (replicaInfo.getState() == ReplicaState.FINALIZED) {
      replica = append(b.getBlockPoolId(), (FinalizedReplica) replicaInfo,
                       newGS, b.getNumBytes());
    } else { //RBW
      bumpReplicaGS(replicaInfo, newGS);
      replica = (ReplicaBeingWritten) replicaInfo;
    }
  } catch (IOException e) {
    IOUtils.cleanup(null, ref);
    throw e;
  }
  return new ReplicaHandler(replica, ref);
}
 
Example 2
Source File: BlockListAsLongs.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public void add(Replica replica) {
  try {
    // zig-zag to reduce size of legacy blocks
    cos.writeSInt64NoTag(replica.getBlockId());
    cos.writeRawVarint64(replica.getBytesOnDisk());
    cos.writeRawVarint64(replica.getGenerationStamp());
    ReplicaState state = replica.getState();
    // although state is not a 64-bit value, using a long varint to
    // allow for future use of the upper bits
    cos.writeRawVarint64(state.getValue());
    if (state == ReplicaState.FINALIZED) {
      numFinalized++;
    }
    numBlocks++;
  } catch (IOException ioe) {
    // shouldn't happen, ByteString.Output doesn't throw IOE
    throw new IllegalStateException(ioe);
  }
}
 
Example 3
Source File: FsDatasetImpl.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private synchronized FinalizedReplica finalizeReplica(String bpid,
    ReplicaInfo replicaInfo) throws IOException {
  FinalizedReplica newReplicaInfo = null;
  if (replicaInfo.getState() == ReplicaState.RUR &&
     ((ReplicaUnderRecovery)replicaInfo).getOriginalReplica().getState() == 
       ReplicaState.FINALIZED) {
    newReplicaInfo = (FinalizedReplica)
           ((ReplicaUnderRecovery)replicaInfo).getOriginalReplica();
  } else {
    FsVolumeImpl v = (FsVolumeImpl)replicaInfo.getVolume();
    File f = replicaInfo.getBlockFile();
    if (v == null) {
      throw new IOException("No volume for temporary file " + f + 
          " for block " + replicaInfo);
    }

    File dest = v.addFinalizedBlock(
        bpid, replicaInfo, f, replicaInfo.getBytesReserved());
    newReplicaInfo = new FinalizedReplica(replicaInfo, v, dest.getParentFile());

    if (v.isTransientStorage()) {
      ramDiskReplicaTracker.addReplica(bpid, replicaInfo.getBlockId(), v);
      datanode.getMetrics().addRamDiskBytesWrite(replicaInfo.getNumBytes());
    }
  }
  volumeMap.add(bpid, newReplicaInfo);

  return newReplicaInfo;
}
 
Example 4
Source File: FsDatasetImpl.java    From big-c with Apache License 2.0 5 votes vote down vote up
private synchronized FinalizedReplica finalizeReplica(String bpid,
    ReplicaInfo replicaInfo) throws IOException {
  FinalizedReplica newReplicaInfo = null;
  if (replicaInfo.getState() == ReplicaState.RUR &&
     ((ReplicaUnderRecovery)replicaInfo).getOriginalReplica().getState() == 
       ReplicaState.FINALIZED) {
    newReplicaInfo = (FinalizedReplica)
           ((ReplicaUnderRecovery)replicaInfo).getOriginalReplica();
  } else {
    FsVolumeImpl v = (FsVolumeImpl)replicaInfo.getVolume();
    File f = replicaInfo.getBlockFile();
    if (v == null) {
      throw new IOException("No volume for temporary file " + f + 
          " for block " + replicaInfo);
    }

    File dest = v.addFinalizedBlock(
        bpid, replicaInfo, f, replicaInfo.getBytesReserved());
    newReplicaInfo = new FinalizedReplica(replicaInfo, v, dest.getParentFile());

    if (v.isTransientStorage()) {
      ramDiskReplicaTracker.addReplica(bpid, replicaInfo.getBlockId(), v);
      datanode.getMetrics().addRamDiskBytesWrite(replicaInfo.getNumBytes());
    }
  }
  volumeMap.add(bpid, newReplicaInfo);

  return newReplicaInfo;
}
 
Example 5
Source File: SimulatedFSDataset.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Check if a block is valid.
 *
 * @param b           The block to check.
 * @param minLength   The minimum length that the block must have.  May be 0.
 * @param state       If this is null, it is ignored.  If it is non-null, we
 *                        will check that the replica has this state.
 *
 * @throws ReplicaNotFoundException          If the replica is not found
 *
 * @throws UnexpectedReplicaStateException   If the replica is not in the 
 *                                             expected state.
 */
@Override // {@link FsDatasetSpi}
public void checkBlock(ExtendedBlock b, long minLength, ReplicaState state)
    throws ReplicaNotFoundException, UnexpectedReplicaStateException {
  final BInfo binfo = getBInfo(b);
  
  if (binfo == null) {
    throw new ReplicaNotFoundException(b);
  }
  if ((state == ReplicaState.FINALIZED && !binfo.isFinalized()) ||
      (state != ReplicaState.FINALIZED && binfo.isFinalized())) {
    throw new UnexpectedReplicaStateException(b,state);
  }
}
 
Example 6
Source File: FsDatasetImpl.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override  // FsDatasetSpi
public synchronized ReplicaHandler append(ExtendedBlock b,
    long newGS, long expectedBlockLen) throws IOException {
  // If the block was successfully finalized because all packets
  // were successfully processed at the Datanode but the ack for
  // some of the packets were not received by the client. The client 
  // re-opens the connection and retries sending those packets.
  // The other reason is that an "append" is occurring to this block.
  
  // check the validity of the parameter
  if (newGS < b.getGenerationStamp()) {
    throw new IOException("The new generation stamp " + newGS + 
        " should be greater than the replica " + b + "'s generation stamp");
  }
  ReplicaInfo replicaInfo = getReplicaInfo(b);
  LOG.info("Appending to " + replicaInfo);
  if (replicaInfo.getState() != ReplicaState.FINALIZED) {
    throw new ReplicaNotFoundException(
        ReplicaNotFoundException.UNFINALIZED_REPLICA + b);
  }
  if (replicaInfo.getNumBytes() != expectedBlockLen) {
    throw new IOException("Corrupted replica " + replicaInfo + 
        " with a length of " + replicaInfo.getNumBytes() + 
        " expected length is " + expectedBlockLen);
  }

  FsVolumeReference ref = replicaInfo.getVolume().obtainReference();
  ReplicaBeingWritten replica = null;
  try {
    replica = append(b.getBlockPoolId(), (FinalizedReplica)replicaInfo, newGS,
        b.getNumBytes());
  } catch (IOException e) {
    IOUtils.cleanup(null, ref);
    throw e;
  }
  return new ReplicaHandler(replica, ref);
}
 
Example 7
Source File: PBHelper.java    From big-c with Apache License 2.0 5 votes vote down vote up
public static ReplicaState convert(ReplicaStateProto state) {
  switch (state) {
  case RBW:
    return ReplicaState.RBW;
  case RUR:
    return ReplicaState.RUR;
  case RWR:
    return ReplicaState.RWR;
  case TEMPORARY:
    return ReplicaState.TEMPORARY;
  case FINALIZED:
  default:
    return ReplicaState.FINALIZED;
  }
}
 
Example 8
Source File: FsDatasetImpl.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Get the list of finalized blocks from in-memory blockmap for a block pool.
 */
@Override
public synchronized List<FinalizedReplica> getFinalizedBlocks(String bpid) {
  ArrayList<FinalizedReplica> finalized =
      new ArrayList<FinalizedReplica>(volumeMap.size(bpid));
  for (ReplicaInfo b : volumeMap.replicas(bpid)) {
    if(b.getState() == ReplicaState.FINALIZED) {
      finalized.add(new FinalizedReplica((FinalizedReplica)b));
    }
  }
  return finalized;
}
 
Example 9
Source File: FsDatasetImpl.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Complete the block write!
 */
@Override // FsDatasetSpi
public synchronized void finalizeBlock(ExtendedBlock b) throws IOException {
  if (Thread.interrupted()) {
    // Don't allow data modifications from interrupted threads
    throw new IOException("Cannot finalize block from Interrupted Thread");
  }
  ReplicaInfo replicaInfo = getReplicaInfo(b);
  if (replicaInfo.getState() == ReplicaState.FINALIZED) {
    // this is legal, when recovery happens on a file that has
    // been opened for append but never modified
    return;
  }
  finalizeReplica(b.getBlockPoolId(), replicaInfo);
}
 
Example 10
Source File: FsDatasetImpl.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Complete the block write!
 */
@Override // FsDatasetSpi
public synchronized void finalizeBlock(ExtendedBlock b) throws IOException {
  if (Thread.interrupted()) {
    // Don't allow data modifications from interrupted threads
    throw new IOException("Cannot finalize block from Interrupted Thread");
  }
  ReplicaInfo replicaInfo = getReplicaInfo(b);
  if (replicaInfo.getState() == ReplicaState.FINALIZED) {
    // this is legal, when recovery happens on a file that has
    // been opened for append but never modified
    return;
  }
  finalizeReplica(b.getBlockPoolId(), replicaInfo);
}
 
Example 11
Source File: ReplicaUnderRecovery.java    From big-c with Apache License 2.0 5 votes vote down vote up
public ReplicaUnderRecovery(ReplicaInfo replica, long recoveryId) {
  super(replica, replica.getVolume(), replica.getDir());
  if ( replica.getState() != ReplicaState.FINALIZED &&
       replica.getState() != ReplicaState.RBW &&
       replica.getState() != ReplicaState.RWR ) {
    throw new IllegalArgumentException("Cannot recover replica: " + replica);
  }
  this.original = replica;
  this.recoveryId = recoveryId;
}
 
Example 12
Source File: FsDatasetImpl.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override  // FsDatasetSpi
public synchronized ReplicaHandler append(ExtendedBlock b,
    long newGS, long expectedBlockLen) throws IOException {
  // If the block was successfully finalized because all packets
  // were successfully processed at the Datanode but the ack for
  // some of the packets were not received by the client. The client 
  // re-opens the connection and retries sending those packets.
  // The other reason is that an "append" is occurring to this block.
  
  // check the validity of the parameter
  if (newGS < b.getGenerationStamp()) {
    throw new IOException("The new generation stamp " + newGS + 
        " should be greater than the replica " + b + "'s generation stamp");
  }
  ReplicaInfo replicaInfo = getReplicaInfo(b);
  LOG.info("Appending to " + replicaInfo);
  if (replicaInfo.getState() != ReplicaState.FINALIZED) {
    throw new ReplicaNotFoundException(
        ReplicaNotFoundException.UNFINALIZED_REPLICA + b);
  }
  if (replicaInfo.getNumBytes() != expectedBlockLen) {
    throw new IOException("Corrupted replica " + replicaInfo + 
        " with a length of " + replicaInfo.getNumBytes() + 
        " expected length is " + expectedBlockLen);
  }

  FsVolumeReference ref = replicaInfo.getVolume().obtainReference();
  ReplicaBeingWritten replica = null;
  try {
    replica = append(b.getBlockPoolId(), (FinalizedReplica)replicaInfo, newGS,
        b.getNumBytes());
  } catch (IOException e) {
    IOUtils.cleanup(null, ref);
    throw e;
  }
  return new ReplicaHandler(replica, ref);
}
 
Example 13
Source File: BlockListAsLongs.java    From big-c with Apache License 2.0 5 votes vote down vote up
public BlockReportReplica(Block block) {
  super(block);
  if (block instanceof BlockReportReplica) {
    this.state = ((BlockReportReplica)block).getState();
  } else {
    this.state = ReplicaState.FINALIZED;
  }
}
 
Example 14
Source File: ExternalDatasetImpl.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Override
public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock)
    throws IOException {
  return new ReplicaRecoveryInfo(0, 0, 0, ReplicaState.FINALIZED);
}
 
Example 15
Source File: FsDatasetImpl.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Asynchronously attempts to cache a single block via {@link FsDatasetCache}.
 */
private void cacheBlock(String bpid, long blockId) {
  FsVolumeImpl volume;
  String blockFileName;
  long length, genstamp;
  Executor volumeExecutor;

  synchronized (this) {
    ReplicaInfo info = volumeMap.get(bpid, blockId);
    boolean success = false;
    try {
      if (info == null) {
        LOG.warn("Failed to cache block with id " + blockId + ", pool " +
            bpid + ": ReplicaInfo not found.");
        return;
      }
      if (info.getState() != ReplicaState.FINALIZED) {
        LOG.warn("Failed to cache block with id " + blockId + ", pool " +
            bpid + ": replica is not finalized; it is in state " +
            info.getState());
        return;
      }
      try {
        volume = (FsVolumeImpl)info.getVolume();
        if (volume == null) {
          LOG.warn("Failed to cache block with id " + blockId + ", pool " +
              bpid + ": volume not found.");
          return;
        }
      } catch (ClassCastException e) {
        LOG.warn("Failed to cache block with id " + blockId +
            ": volume was not an instance of FsVolumeImpl.");
        return;
      }
      if (volume.isTransientStorage()) {
        LOG.warn("Caching not supported on block with id " + blockId +
            " since the volume is backed by RAM.");
        return;
      }
      success = true;
    } finally {
      if (!success) {
        cacheManager.numBlocksFailedToCache.incrementAndGet();
      }
    }
    blockFileName = info.getBlockFile().getAbsolutePath();
    length = info.getVisibleLength();
    genstamp = info.getGenerationStamp();
    volumeExecutor = volume.getCacheExecutor();
  }
  cacheManager.cacheBlock(blockId, bpid, 
      blockFileName, length, genstamp, volumeExecutor);
}
 
Example 16
Source File: FinalizedReplica.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Override  // ReplicaInfo
public ReplicaState getState() {
  return ReplicaState.FINALIZED;
}
 
Example 17
Source File: BlockListAsLongs.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Override
public long[] getBlockListAsLongs() {
  // terribly inefficient but only occurs if server tries to transcode
  // an undecoded buffer into longs - ie. it will never happen but let's
  // handle it anyway
  if (numFinalized == -1) {
    int n = 0;
    for (Replica replica : this) {
      if (replica.getState() == ReplicaState.FINALIZED) {
        n++;
      }
    }
    numFinalized = n;
  }
  int numUc = numBlocks - numFinalized;
  int size = 2 + 3*(numFinalized+1) + 4*(numUc);
  long[] longs = new long[size];
  longs[0] = numFinalized;
  longs[1] = numUc;

  int idx = 2;
  int ucIdx = idx + 3*numFinalized;
  // delimiter block
  longs[ucIdx++] = -1;
  longs[ucIdx++] = -1;
  longs[ucIdx++] = -1;

  for (BlockReportReplica block : this) {
    switch (block.getState()) {
      case FINALIZED: {
        longs[idx++] = block.getBlockId();
        longs[idx++] = block.getNumBytes();
        longs[idx++] = block.getGenerationStamp();
        break;
      }
      default: {
        longs[ucIdx++] = block.getBlockId();
        longs[ucIdx++] = block.getNumBytes();
        longs[ucIdx++] = block.getGenerationStamp();
        longs[ucIdx++] = block.getState().getValue();
        break;
      }
    }
  }
  return longs;
}
 
Example 18
Source File: FsDatasetImpl.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Asynchronously attempts to cache a single block via {@link FsDatasetCache}.
 */
private void cacheBlock(String bpid, long blockId) {
  FsVolumeImpl volume;
  String blockFileName;
  long length, genstamp;
  Executor volumeExecutor;

  synchronized (this) {
    ReplicaInfo info = volumeMap.get(bpid, blockId);
    boolean success = false;
    try {
      if (info == null) {
        LOG.warn("Failed to cache block with id " + blockId + ", pool " +
            bpid + ": ReplicaInfo not found.");
        return;
      }
      if (info.getState() != ReplicaState.FINALIZED) {
        LOG.warn("Failed to cache block with id " + blockId + ", pool " +
            bpid + ": replica is not finalized; it is in state " +
            info.getState());
        return;
      }
      try {
        volume = (FsVolumeImpl)info.getVolume();
        if (volume == null) {
          LOG.warn("Failed to cache block with id " + blockId + ", pool " +
              bpid + ": volume not found.");
          return;
        }
      } catch (ClassCastException e) {
        LOG.warn("Failed to cache block with id " + blockId +
            ": volume was not an instance of FsVolumeImpl.");
        return;
      }
      if (volume.isTransientStorage()) {
        LOG.warn("Caching not supported on block with id " + blockId +
            " since the volume is backed by RAM.");
        return;
      }
      success = true;
    } finally {
      if (!success) {
        cacheManager.numBlocksFailedToCache.incrementAndGet();
      }
    }
    blockFileName = info.getBlockFile().getAbsolutePath();
    length = info.getVisibleLength();
    genstamp = info.getGenerationStamp();
    volumeExecutor = volume.getCacheExecutor();
  }
  cacheManager.cacheBlock(blockId, bpid, 
      blockFileName, length, genstamp, volumeExecutor);
}
 
Example 19
Source File: FsDatasetImpl.java    From big-c with Apache License 2.0 4 votes vote down vote up
private ReplicaInfo recoverCheck(ExtendedBlock b, long newGS, 
    long expectedBlockLen) throws IOException {
  ReplicaInfo replicaInfo = getReplicaInfo(b.getBlockPoolId(), b.getBlockId());
  
  // check state
  if (replicaInfo.getState() != ReplicaState.FINALIZED &&
      replicaInfo.getState() != ReplicaState.RBW) {
    throw new ReplicaNotFoundException(
        ReplicaNotFoundException.UNFINALIZED_AND_NONRBW_REPLICA + replicaInfo);
  }

  // check generation stamp
  long replicaGenerationStamp = replicaInfo.getGenerationStamp();
  if (replicaGenerationStamp < b.getGenerationStamp() ||
      replicaGenerationStamp > newGS) {
    throw new ReplicaNotFoundException(
        ReplicaNotFoundException.UNEXPECTED_GS_REPLICA + replicaGenerationStamp
        + ". Expected GS range is [" + b.getGenerationStamp() + ", " + 
        newGS + "].");
  }
  
  // stop the previous writer before check a replica's length
  long replicaLen = replicaInfo.getNumBytes();
  if (replicaInfo.getState() == ReplicaState.RBW) {
    ReplicaBeingWritten rbw = (ReplicaBeingWritten)replicaInfo;
    // kill the previous writer
    rbw.stopWriter(datanode.getDnConf().getXceiverStopTimeout());
    rbw.setWriter(Thread.currentThread());
    // check length: bytesRcvd, bytesOnDisk, and bytesAcked should be the same
    if (replicaLen != rbw.getBytesOnDisk() 
        || replicaLen != rbw.getBytesAcked()) {
      throw new ReplicaAlreadyExistsException("RBW replica " + replicaInfo + 
          "bytesRcvd(" + rbw.getNumBytes() + "), bytesOnDisk(" + 
          rbw.getBytesOnDisk() + "), and bytesAcked(" + rbw.getBytesAcked() +
          ") are not the same.");
    }
  }
  
  // check block length
  if (replicaLen != expectedBlockLen) {
    throw new IOException("Corrupted replica " + replicaInfo + 
        " with a length of " + replicaLen + 
        " expected length is " + expectedBlockLen);
  }
  
  return replicaInfo;
}
 
Example 20
Source File: BlockListAsLongs.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Override
public long[] getBlockListAsLongs() {
  // terribly inefficient but only occurs if server tries to transcode
  // an undecoded buffer into longs - ie. it will never happen but let's
  // handle it anyway
  if (numFinalized == -1) {
    int n = 0;
    for (Replica replica : this) {
      if (replica.getState() == ReplicaState.FINALIZED) {
        n++;
      }
    }
    numFinalized = n;
  }
  int numUc = numBlocks - numFinalized;
  int size = 2 + 3*(numFinalized+1) + 4*(numUc);
  long[] longs = new long[size];
  longs[0] = numFinalized;
  longs[1] = numUc;

  int idx = 2;
  int ucIdx = idx + 3*numFinalized;
  // delimiter block
  longs[ucIdx++] = -1;
  longs[ucIdx++] = -1;
  longs[ucIdx++] = -1;

  for (BlockReportReplica block : this) {
    switch (block.getState()) {
      case FINALIZED: {
        longs[idx++] = block.getBlockId();
        longs[idx++] = block.getNumBytes();
        longs[idx++] = block.getGenerationStamp();
        break;
      }
      default: {
        longs[ucIdx++] = block.getBlockId();
        longs[ucIdx++] = block.getNumBytes();
        longs[ucIdx++] = block.getGenerationStamp();
        longs[ucIdx++] = block.getState().getValue();
        break;
      }
    }
  }
  return longs;
}