Java Code Examples for org.apache.hadoop.hdfs.protocolPB.PBHelper#convert()

The following examples show how to use org.apache.hadoop.hdfs.protocolPB.PBHelper#convert() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: Receiver.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/** Receive {@link Op#TRANSFER_BLOCK} */
private void opTransferBlock(DataInputStream in) throws IOException {
  final OpTransferBlockProto proto =
    OpTransferBlockProto.parseFrom(vintPrefixed(in));
  final DatanodeInfo[] targets = PBHelper.convert(proto.getTargetsList());
  TraceScope traceScope = continueTraceSpan(proto.getHeader(),
      proto.getClass().getSimpleName());
  try {
    transferBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()),
        PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
        proto.getHeader().getClientName(),
        targets,
        PBHelper.convertStorageTypes(proto.getTargetStorageTypesList(), targets.length));
  } finally {
    if (traceScope != null) traceScope.close();
  }
}
 
Example 2
Source File: Receiver.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/** Receive {@link Op#REQUEST_SHORT_CIRCUIT_FDS} */
private void opRequestShortCircuitFds(DataInputStream in) throws IOException {
  final OpRequestShortCircuitAccessProto proto =
    OpRequestShortCircuitAccessProto.parseFrom(vintPrefixed(in));
  SlotId slotId = (proto.hasSlotId()) ? 
      PBHelper.convert(proto.getSlotId()) : null;
  TraceScope traceScope = continueTraceSpan(proto.getHeader(),
      proto.getClass().getSimpleName());
  try {
    requestShortCircuitFds(PBHelper.convert(proto.getHeader().getBlock()),
        PBHelper.convert(proto.getHeader().getToken()),
        slotId, proto.getMaxVersion(),
        proto.getSupportsReceiptVerification());
  } finally {
    if (traceScope != null) traceScope.close();
  }
}
 
Example 3
Source File: DFSClient.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Infer the checksum type for a replica by sending an OP_READ_BLOCK
 * for the first byte of that replica. This is used for compatibility
 * with older HDFS versions which did not include the checksum type in
 * OpBlockChecksumResponseProto.
 *
 * @param lb the located block
 * @param dn the connected datanode
 * @return the inferred checksum type
 * @throws IOException if an error occurs
 */
private Type inferChecksumTypeByReading(LocatedBlock lb, DatanodeInfo dn)
    throws IOException {
  IOStreamPair pair = connectToDN(dn, dfsClientConf.socketTimeout, lb);

  try {
    DataOutputStream out = new DataOutputStream(new BufferedOutputStream(pair.out,
        HdfsConstants.SMALL_BUFFER_SIZE));
    DataInputStream in = new DataInputStream(pair.in);

    new Sender(out).readBlock(lb.getBlock(), lb.getBlockToken(), clientName,
        0, 1, true, CachingStrategy.newDefaultStrategy());
    final BlockOpResponseProto reply =
        BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
    String logInfo = "trying to read " + lb.getBlock() + " from datanode " + dn;
    DataTransferProtoUtil.checkBlockOpStatus(reply, logInfo);

    return PBHelper.convert(reply.getReadOpChecksumInfo().getChecksum().getType());
  } finally {
    IOUtils.cleanup(null, pair.in, pair.out);
  }
}
 
Example 4
Source File: DFSClient.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Infer the checksum type for a replica by sending an OP_READ_BLOCK
 * for the first byte of that replica. This is used for compatibility
 * with older HDFS versions which did not include the checksum type in
 * OpBlockChecksumResponseProto.
 *
 * @param lb the located block
 * @param dn the connected datanode
 * @return the inferred checksum type
 * @throws IOException if an error occurs
 */
private Type inferChecksumTypeByReading(LocatedBlock lb, DatanodeInfo dn)
    throws IOException {
  IOStreamPair pair = connectToDN(dn, dfsClientConf.socketTimeout, lb);

  try {
    DataOutputStream out = new DataOutputStream(new BufferedOutputStream(pair.out,
        HdfsConstants.SMALL_BUFFER_SIZE));
    DataInputStream in = new DataInputStream(pair.in);

    new Sender(out).readBlock(lb.getBlock(), lb.getBlockToken(), clientName,
        0, 1, true, CachingStrategy.newDefaultStrategy());
    final BlockOpResponseProto reply =
        BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
    String logInfo = "trying to read " + lb.getBlock() + " from datanode " + dn;
    DataTransferProtoUtil.checkBlockOpStatus(reply, logInfo);

    return PBHelper.convert(reply.getReadOpChecksumInfo().getChecksum().getType());
  } finally {
    IOUtils.cleanup(null, pair.in, pair.out);
  }
}
 
Example 5
Source File: Receiver.java    From big-c with Apache License 2.0 6 votes vote down vote up
/** Receive {@link Op#REQUEST_SHORT_CIRCUIT_FDS} */
private void opRequestShortCircuitFds(DataInputStream in) throws IOException {
  final OpRequestShortCircuitAccessProto proto =
    OpRequestShortCircuitAccessProto.parseFrom(vintPrefixed(in));
  SlotId slotId = (proto.hasSlotId()) ? 
      PBHelper.convert(proto.getSlotId()) : null;
  TraceScope traceScope = continueTraceSpan(proto.getHeader(),
      proto.getClass().getSimpleName());
  try {
    requestShortCircuitFds(PBHelper.convert(proto.getHeader().getBlock()),
        PBHelper.convert(proto.getHeader().getToken()),
        slotId, proto.getMaxVersion(),
        proto.getSupportsReceiptVerification());
  } finally {
    if (traceScope != null) traceScope.close();
  }
}
 
Example 6
Source File: Receiver.java    From big-c with Apache License 2.0 6 votes vote down vote up
/** Receive {@link Op#TRANSFER_BLOCK} */
private void opTransferBlock(DataInputStream in) throws IOException {
  final OpTransferBlockProto proto =
    OpTransferBlockProto.parseFrom(vintPrefixed(in));
  final DatanodeInfo[] targets = PBHelper.convert(proto.getTargetsList());
  TraceScope traceScope = continueTraceSpan(proto.getHeader(),
      proto.getClass().getSimpleName());
  try {
    transferBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()),
        PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
        proto.getHeader().getClientName(),
        targets,
        PBHelper.convertStorageTypes(proto.getTargetStorageTypesList(), targets.length));
  } finally {
    if (traceScope != null) traceScope.close();
  }
}
 
Example 7
Source File: DataTransferProtoUtil.java    From big-c with Apache License 2.0 5 votes vote down vote up
public static ChecksumProto toProto(DataChecksum checksum) {
  ChecksumTypeProto type = PBHelper.convert(checksum.getChecksumType());
  // ChecksumType#valueOf never returns null
  return ChecksumProto.newBuilder()
    .setBytesPerChecksum(checksum.getBytesPerChecksum())
    .setType(type)
    .build();
}
 
Example 8
Source File: QJournalProtocolServerSideTranslatorPB.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
public CanRollBackResponseProto canRollBack(RpcController controller,
    CanRollBackRequestProto request) throws ServiceException {
  try {
    StorageInfo si = PBHelper.convert(request.getStorage(), NodeType.JOURNAL_NODE);
    Boolean result = impl.canRollBack(convert(request.getJid()), si,
        PBHelper.convert(request.getPrevStorage(), NodeType.JOURNAL_NODE),
        request.getTargetLayoutVersion());
    return CanRollBackResponseProto.newBuilder()
        .setCanRollBack(result)
        .build();
  } catch (IOException e) {
    throw new ServiceException(e);
  }
}
 
Example 9
Source File: QJournalProtocolServerSideTranslatorPB.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
public DoUpgradeResponseProto doUpgrade(RpcController controller,
    DoUpgradeRequestProto request) throws ServiceException {
  StorageInfo si = PBHelper.convert(request.getSInfo(), NodeType.JOURNAL_NODE);
  try {
    impl.doUpgrade(convert(request.getJid()), si);
    return DoUpgradeResponseProto.getDefaultInstance();
  } catch (IOException e) {
    throw new ServiceException(e);
  }
}
 
Example 10
Source File: Receiver.java    From big-c with Apache License 2.0 5 votes vote down vote up
/** Receive OP_WRITE_BLOCK */
private void opWriteBlock(DataInputStream in) throws IOException {
  final OpWriteBlockProto proto = OpWriteBlockProto.parseFrom(vintPrefixed(in));
  final DatanodeInfo[] targets = PBHelper.convert(proto.getTargetsList());
  TraceScope traceScope = continueTraceSpan(proto.getHeader(),
      proto.getClass().getSimpleName());
  try {
    writeBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()),
        PBHelper.convertStorageType(proto.getStorageType()),
        PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
        proto.getHeader().getClientName(),
        targets,
        PBHelper.convertStorageTypes(proto.getTargetStorageTypesList(), targets.length),
        PBHelper.convert(proto.getSource()),
        fromProto(proto.getStage()),
        proto.getPipelineSize(),
        proto.getMinBytesRcvd(), proto.getMaxBytesRcvd(),
        proto.getLatestGenerationStamp(),
        fromProto(proto.getRequestedChecksum()),
        (proto.hasCachingStrategy() ?
            getCachingStrategy(proto.getCachingStrategy()) :
          CachingStrategy.newDefaultStrategy()),
        (proto.hasAllowLazyPersist() ? proto.getAllowLazyPersist() : false),
        (proto.hasPinning() ? proto.getPinning(): false),
        (PBHelper.convertBooleanList(proto.getTargetPinningsList())));
  } finally {
   if (traceScope != null) traceScope.close();
  }
}
 
Example 11
Source File: DataTransferProtoUtil.java    From big-c with Apache License 2.0 5 votes vote down vote up
public static DataChecksum fromProto(ChecksumProto proto) {
  if (proto == null) return null;

  int bytesPerChecksum = proto.getBytesPerChecksum();
  DataChecksum.Type type = PBHelper.convert(proto.getType());
  return DataChecksum.newDataChecksum(type, bytesPerChecksum);
}
 
Example 12
Source File: DataTransferProtoUtil.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public static ChecksumProto toProto(DataChecksum checksum) {
  ChecksumTypeProto type = PBHelper.convert(checksum.getChecksumType());
  // ChecksumType#valueOf never returns null
  return ChecksumProto.newBuilder()
    .setBytesPerChecksum(checksum.getBytesPerChecksum())
    .setType(type)
    .build();
}
 
Example 13
Source File: QJournalProtocolServerSideTranslatorPB.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public CanRollBackResponseProto canRollBack(RpcController controller,
    CanRollBackRequestProto request) throws ServiceException {
  try {
    StorageInfo si = PBHelper.convert(request.getStorage(), NodeType.JOURNAL_NODE);
    Boolean result = impl.canRollBack(convert(request.getJid()), si,
        PBHelper.convert(request.getPrevStorage(), NodeType.JOURNAL_NODE),
        request.getTargetLayoutVersion());
    return CanRollBackResponseProto.newBuilder()
        .setCanRollBack(result)
        .build();
  } catch (IOException e) {
    throw new ServiceException(e);
  }
}
 
Example 14
Source File: QJournalProtocolServerSideTranslatorPB.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public DoUpgradeResponseProto doUpgrade(RpcController controller,
    DoUpgradeRequestProto request) throws ServiceException {
  StorageInfo si = PBHelper.convert(request.getSInfo(), NodeType.JOURNAL_NODE);
  try {
    impl.doUpgrade(convert(request.getJid()), si);
    return DoUpgradeResponseProto.getDefaultInstance();
  } catch (IOException e) {
    throw new ServiceException(e);
  }
}
 
Example 15
Source File: Receiver.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/** Receive OP_WRITE_BLOCK */
private void opWriteBlock(DataInputStream in) throws IOException {
  final OpWriteBlockProto proto = OpWriteBlockProto.parseFrom(vintPrefixed(in));
  final DatanodeInfo[] targets = PBHelper.convert(proto.getTargetsList());
  TraceScope traceScope = continueTraceSpan(proto.getHeader(),
      proto.getClass().getSimpleName());
  try {
    writeBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()),
        PBHelper.convertStorageType(proto.getStorageType()),
        PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
        proto.getHeader().getClientName(),
        targets,
        PBHelper.convertStorageTypes(proto.getTargetStorageTypesList(), targets.length),
        PBHelper.convert(proto.getSource()),
        fromProto(proto.getStage()),
        proto.getPipelineSize(),
        proto.getMinBytesRcvd(), proto.getMaxBytesRcvd(),
        proto.getLatestGenerationStamp(),
        fromProto(proto.getRequestedChecksum()),
        (proto.hasCachingStrategy() ?
            getCachingStrategy(proto.getCachingStrategy()) :
          CachingStrategy.newDefaultStrategy()),
        (proto.hasAllowLazyPersist() ? proto.getAllowLazyPersist() : false),
        (proto.hasPinning() ? proto.getPinning(): false),
        (PBHelper.convertBooleanList(proto.getTargetPinningsList())));
  } finally {
   if (traceScope != null) traceScope.close();
  }
}
 
Example 16
Source File: DataTransferProtoUtil.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public static DataChecksum fromProto(ChecksumProto proto) {
  if (proto == null) return null;

  int bytesPerChecksum = proto.getBytesPerChecksum();
  DataChecksum.Type type = PBHelper.convert(proto.getType());
  return DataChecksum.newDataChecksum(type, bytesPerChecksum);
}
 
Example 17
Source File: FSDirectory.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * This function combines the per-file encryption info (obtained
 * from the inode's XAttrs), and the encryption info from its zone, and
 * returns a consolidated FileEncryptionInfo instance. Null is returned
 * for non-encrypted files.
 *
 * @param inode inode of the file
 * @param snapshotId ID of the snapshot that
 *                   we want to get encryption info from
 * @param iip inodes in the path containing the file, passed in to
 *            avoid obtaining the list of inodes again; if iip is
 *            null then the list of inodes will be obtained again
 * @return consolidated file encryption info; null for non-encrypted files
 */
FileEncryptionInfo getFileEncryptionInfo(INode inode, int snapshotId,
    INodesInPath iip) throws IOException {
  if (!inode.isFile()) {
    return null;
  }
  readLock();
  try {
    EncryptionZone encryptionZone = getEZForPath(iip);
    if (encryptionZone == null) {
      // not an encrypted file
      return null;
    } else if(encryptionZone.getPath() == null
        || encryptionZone.getPath().isEmpty()) {
      if (NameNode.LOG.isDebugEnabled()) {
        NameNode.LOG.debug("Encryption zone " +
            encryptionZone.getPath() + " does not have a valid path.");
      }
    }

    final CryptoProtocolVersion version = encryptionZone.getVersion();
    final CipherSuite suite = encryptionZone.getSuite();
    final String keyName = encryptionZone.getKeyName();

    XAttr fileXAttr = FSDirXAttrOp.unprotectedGetXAttrByName(inode,
                                                             snapshotId,
                                                             CRYPTO_XATTR_FILE_ENCRYPTION_INFO);

    if (fileXAttr == null) {
      NameNode.LOG.warn("Could not find encryption XAttr for file " +
          iip.getPath() + " in encryption zone " + encryptionZone.getPath());
      return null;
    }

    try {
      HdfsProtos.PerFileEncryptionInfoProto fileProto =
          HdfsProtos.PerFileEncryptionInfoProto.parseFrom(
              fileXAttr.getValue());
      return PBHelper.convert(fileProto, suite, version, keyName);
    } catch (InvalidProtocolBufferException e) {
      throw new IOException("Could not parse file encryption info for " +
          "inode " + inode, e);
    }
  } finally {
    readUnlock();
  }
}
 
Example 18
Source File: DfsClientShmManager.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Ask the DataNode for a new shared memory segment.  This function must be
 * called with the manager lock held.  We will release the lock while
 * communicating with the DataNode.
 *
 * @param clientName    The current client name.
 * @param peer          The peer to use to talk to the DataNode.
 *
 * @return              Null if the DataNode does not support shared memory
 *                        segments, or experienced an error creating the
 *                        shm.  The shared memory segment itself on success.
 * @throws IOException  If there was an error communicating over the socket.
 *                        We will not throw an IOException unless the socket
 *                        itself (or the network) is the problem.
 */
private DfsClientShm requestNewShm(String clientName, DomainPeer peer)
    throws IOException {
  final DataOutputStream out = 
      new DataOutputStream(
          new BufferedOutputStream(peer.getOutputStream()));
  new Sender(out).requestShortCircuitShm(clientName);
  ShortCircuitShmResponseProto resp = 
      ShortCircuitShmResponseProto.parseFrom(
          PBHelper.vintPrefixed(peer.getInputStream()));
  String error = resp.hasError() ? resp.getError() : "(unknown)";
  switch (resp.getStatus()) {
  case SUCCESS:
    DomainSocket sock = peer.getDomainSocket();
    byte buf[] = new byte[1];
    FileInputStream fis[] = new FileInputStream[1];
    if (sock.recvFileInputStreams(fis, buf, 0, buf.length) < 0) {
      throw new EOFException("got EOF while trying to transfer the " +
          "file descriptor for the shared memory segment.");
    }
    if (fis[0] == null) {
      throw new IOException("the datanode " + datanode + " failed to " +
          "pass a file descriptor for the shared memory segment.");
    }
    try {
      DfsClientShm shm = 
          new DfsClientShm(PBHelper.convert(resp.getId()),
              fis[0], this, peer);
      if (LOG.isTraceEnabled()) {
        LOG.trace(this + ": createNewShm: created " + shm);
      }
      return shm;
    } finally {
      IOUtils.cleanup(LOG,  fis[0]);
    }
  case ERROR_UNSUPPORTED:
    // The DataNode just does not support short-circuit shared memory
    // access, and we should stop asking.
    LOG.info(this + ": datanode does not support short-circuit " +
        "shared memory access: " + error);
    disabled = true;
    return null;
  default:
    // The datanode experienced some kind of unexpected error when trying to
    // create the short-circuit shared memory segment.
    LOG.warn(this + ": error requesting short-circuit shared memory " +
        "access: " + error);
    return null;
  }
}
 
Example 19
Source File: DfsClientShmManager.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Ask the DataNode for a new shared memory segment.  This function must be
 * called with the manager lock held.  We will release the lock while
 * communicating with the DataNode.
 *
 * @param clientName    The current client name.
 * @param peer          The peer to use to talk to the DataNode.
 *
 * @return              Null if the DataNode does not support shared memory
 *                        segments, or experienced an error creating the
 *                        shm.  The shared memory segment itself on success.
 * @throws IOException  If there was an error communicating over the socket.
 *                        We will not throw an IOException unless the socket
 *                        itself (or the network) is the problem.
 */
private DfsClientShm requestNewShm(String clientName, DomainPeer peer)
    throws IOException {
  final DataOutputStream out = 
      new DataOutputStream(
          new BufferedOutputStream(peer.getOutputStream()));
  new Sender(out).requestShortCircuitShm(clientName);
  ShortCircuitShmResponseProto resp = 
      ShortCircuitShmResponseProto.parseFrom(
          PBHelper.vintPrefixed(peer.getInputStream()));
  String error = resp.hasError() ? resp.getError() : "(unknown)";
  switch (resp.getStatus()) {
  case SUCCESS:
    DomainSocket sock = peer.getDomainSocket();
    byte buf[] = new byte[1];
    FileInputStream fis[] = new FileInputStream[1];
    if (sock.recvFileInputStreams(fis, buf, 0, buf.length) < 0) {
      throw new EOFException("got EOF while trying to transfer the " +
          "file descriptor for the shared memory segment.");
    }
    if (fis[0] == null) {
      throw new IOException("the datanode " + datanode + " failed to " +
          "pass a file descriptor for the shared memory segment.");
    }
    try {
      DfsClientShm shm = 
          new DfsClientShm(PBHelper.convert(resp.getId()),
              fis[0], this, peer);
      if (LOG.isTraceEnabled()) {
        LOG.trace(this + ": createNewShm: created " + shm);
      }
      return shm;
    } finally {
      IOUtils.cleanup(LOG,  fis[0]);
    }
  case ERROR_UNSUPPORTED:
    // The DataNode just does not support short-circuit shared memory
    // access, and we should stop asking.
    LOG.info(this + ": datanode does not support short-circuit " +
        "shared memory access: " + error);
    disabled = true;
    return null;
  default:
    // The datanode experienced some kind of unexpected error when trying to
    // create the short-circuit shared memory segment.
    LOG.warn(this + ": error requesting short-circuit shared memory " +
        "access: " + error);
    return null;
  }
}
 
Example 20
Source File: FSDirectory.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * This function combines the per-file encryption info (obtained
 * from the inode's XAttrs), and the encryption info from its zone, and
 * returns a consolidated FileEncryptionInfo instance. Null is returned
 * for non-encrypted files.
 *
 * @param inode inode of the file
 * @param snapshotId ID of the snapshot that
 *                   we want to get encryption info from
 * @param iip inodes in the path containing the file, passed in to
 *            avoid obtaining the list of inodes again; if iip is
 *            null then the list of inodes will be obtained again
 * @return consolidated file encryption info; null for non-encrypted files
 */
FileEncryptionInfo getFileEncryptionInfo(INode inode, int snapshotId,
    INodesInPath iip) throws IOException {
  if (!inode.isFile()) {
    return null;
  }
  readLock();
  try {
    EncryptionZone encryptionZone = getEZForPath(iip);
    if (encryptionZone == null) {
      // not an encrypted file
      return null;
    } else if(encryptionZone.getPath() == null
        || encryptionZone.getPath().isEmpty()) {
      if (NameNode.LOG.isDebugEnabled()) {
        NameNode.LOG.debug("Encryption zone " +
            encryptionZone.getPath() + " does not have a valid path.");
      }
    }

    final CryptoProtocolVersion version = encryptionZone.getVersion();
    final CipherSuite suite = encryptionZone.getSuite();
    final String keyName = encryptionZone.getKeyName();

    XAttr fileXAttr = FSDirXAttrOp.unprotectedGetXAttrByName(inode,
                                                             snapshotId,
                                                             CRYPTO_XATTR_FILE_ENCRYPTION_INFO);

    if (fileXAttr == null) {
      NameNode.LOG.warn("Could not find encryption XAttr for file " +
          iip.getPath() + " in encryption zone " + encryptionZone.getPath());
      return null;
    }

    try {
      HdfsProtos.PerFileEncryptionInfoProto fileProto =
          HdfsProtos.PerFileEncryptionInfoProto.parseFrom(
              fileXAttr.getValue());
      return PBHelper.convert(fileProto, suite, version, keyName);
    } catch (InvalidProtocolBufferException e) {
      throw new IOException("Could not parse file encryption info for " +
          "inode " + inode, e);
    }
  } finally {
    readUnlock();
  }
}