Java Code Examples for org.apache.hadoop.net.NetUtils#getOutputStream()

The following examples show how to use org.apache.hadoop.net.NetUtils#getOutputStream() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DFSClient.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Connect to the given datanode's datantrasfer port, and return
 * the resulting IOStreamPair. This includes encryption wrapping, etc.
 */
private IOStreamPair connectToDN(DatanodeInfo dn, int timeout,
    LocatedBlock lb) throws IOException {
  boolean success = false;
  Socket sock = null;
  try {
    sock = socketFactory.createSocket();
    String dnAddr = dn.getXferAddr(getConf().connectToDnViaHostname);
    if (LOG.isDebugEnabled()) {
      LOG.debug("Connecting to datanode " + dnAddr);
    }
    NetUtils.connect(sock, NetUtils.createSocketAddr(dnAddr), timeout);
    sock.setSoTimeout(timeout);

    OutputStream unbufOut = NetUtils.getOutputStream(sock);
    InputStream unbufIn = NetUtils.getInputStream(sock);
    IOStreamPair ret = saslClient.newSocketSend(sock, unbufOut, unbufIn, this,
      lb.getBlockToken(), dn);
    success = true;
    return ret;
  } finally {
    if (!success) {
      IOUtils.closeSocket(sock);
    }
  }
}
 
Example 2
Source File: DFSTestUtil.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/** For {@link TestTransferRbw} */
public static BlockOpResponseProto transferRbw(final ExtendedBlock b, 
    final DFSClient dfsClient, final DatanodeInfo... datanodes) throws IOException {
  assertEquals(2, datanodes.length);
  final Socket s = DFSOutputStream.createSocketForPipeline(datanodes[0],
      datanodes.length, dfsClient);
  final long writeTimeout = dfsClient.getDatanodeWriteTimeout(datanodes.length);
  final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
      NetUtils.getOutputStream(s, writeTimeout),
      HdfsConstants.SMALL_BUFFER_SIZE));
  final DataInputStream in = new DataInputStream(NetUtils.getInputStream(s));

  // send the request
  new Sender(out).transferBlock(b, new Token<BlockTokenIdentifier>(),
      dfsClient.clientName, new DatanodeInfo[]{datanodes[1]},
      new StorageType[]{StorageType.DEFAULT});
  out.flush();

  return BlockOpResponseProto.parseDelimitedFrom(in);
}
 
Example 3
Source File: DFSClient.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Connect to the given datanode's datantrasfer port, and return
 * the resulting IOStreamPair. This includes encryption wrapping, etc.
 */
private IOStreamPair connectToDN(DatanodeInfo dn, int timeout,
    LocatedBlock lb) throws IOException {
  boolean success = false;
  Socket sock = null;
  try {
    sock = socketFactory.createSocket();
    String dnAddr = dn.getXferAddr(getConf().connectToDnViaHostname);
    if (LOG.isDebugEnabled()) {
      LOG.debug("Connecting to datanode " + dnAddr);
    }
    NetUtils.connect(sock, NetUtils.createSocketAddr(dnAddr), timeout);
    sock.setSoTimeout(timeout);

    OutputStream unbufOut = NetUtils.getOutputStream(sock);
    InputStream unbufIn = NetUtils.getInputStream(sock);
    IOStreamPair ret = saslClient.newSocketSend(sock, unbufOut, unbufIn, this,
      lb.getBlockToken(), dn);
    success = true;
    return ret;
  } finally {
    if (!success) {
      IOUtils.closeSocket(sock);
    }
  }
}
 
Example 4
Source File: DFSTestUtil.java    From big-c with Apache License 2.0 6 votes vote down vote up
/** For {@link TestTransferRbw} */
public static BlockOpResponseProto transferRbw(final ExtendedBlock b, 
    final DFSClient dfsClient, final DatanodeInfo... datanodes) throws IOException {
  assertEquals(2, datanodes.length);
  final Socket s = DFSOutputStream.createSocketForPipeline(datanodes[0],
      datanodes.length, dfsClient);
  final long writeTimeout = dfsClient.getDatanodeWriteTimeout(datanodes.length);
  final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
      NetUtils.getOutputStream(s, writeTimeout),
      HdfsConstants.SMALL_BUFFER_SIZE));
  final DataInputStream in = new DataInputStream(NetUtils.getInputStream(s));

  // send the request
  new Sender(out).transferBlock(b, new Token<BlockTokenIdentifier>(),
      dfsClient.clientName, new DatanodeInfo[]{datanodes[1]},
      new StorageType[]{StorageType.DEFAULT});
  out.flush();

  return BlockOpResponseProto.parseDelimitedFrom(in);
}
 
Example 5
Source File: BlockReader.java    From RDFS with Apache License 2.0 5 votes vote down vote up
private void checksumOk(Socket sock) {
  try {
    OutputStream out = NetUtils.getOutputStream(sock, HdfsConstants.WRITE_TIMEOUT);
    byte buf[] = { (DataTransferProtocol.OP_STATUS_CHECKSUM_OK >>> 8) & 0xff,
                   (DataTransferProtocol.OP_STATUS_CHECKSUM_OK) & 0xff };
    out.write(buf);
    out.flush();
  } catch (IOException e) {
    // its ok not to be able to send this.
    LOG.debug("Could not write to datanode " + sock.getInetAddress() +
              ": " + e.getMessage());
  }
}
 
Example 6
Source File: DataXceiver.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
 * Utility function for sending a response.
 * 
 * @param s
 *            socket to write to
 * @param opStatus
 *            status message to write
 * @param timeout
 *            send timeout
 **/
private void sendResponse(Socket s, short opStatus, long timeout)
		throws IOException {
	DataOutputStream reply = new DataOutputStream(NetUtils.getOutputStream(
			s, timeout));
	try {
		reply.writeShort(opStatus);
		reply.flush();
	} finally {
		IOUtils.closeStream(reply);
	}
}
 
Example 7
Source File: DFSClient.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
private void checksumOk(Socket sock) {
  try {
    OutputStream out = NetUtils.getOutputStream(sock, HdfsConstants.WRITE_TIMEOUT);
    byte buf[] = { (DataTransferProtocol.OP_STATUS_CHECKSUM_OK >>> 8) & 0xff,
                   (DataTransferProtocol.OP_STATUS_CHECKSUM_OK) & 0xff };
    out.write(buf);
    out.flush();
  } catch (IOException e) {
    // its ok not to be able to send this.
    LOG.debug("Could not write to datanode " + sock.getInetAddress() +
              ": " + e.getMessage());
  }
}
 
Example 8
Source File: DataXceiver.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
/**
 * Utility function for sending a response.
 * @param s socket to write to
 * @param opStatus status message to write
 * @param timeout send timeout
 **/
private void sendResponse(Socket s, short opStatus, long timeout) 
                                                     throws IOException {
  DataOutputStream reply = 
    new DataOutputStream(NetUtils.getOutputStream(s, timeout));
  try {
    reply.writeShort(opStatus);
    reply.flush();
  } finally {
    IOUtils.closeStream(reply);
  }
}
 
Example 9
Source File: BlockReconstructor.java    From RDFS with Apache License 2.0 4 votes vote down vote up
/**
 * Send a generated block to a datanode.
 * 
 * @param datanode
 *            Chosen datanode name in host:port form.
 * @param blockContents
 *            Stream with the block contents.
 * @param block
 *            Block object identifying the block to be sent.
 * @param blockSize
 *            size of the block.
 * @param dataTransferVersion
 *            the data transfer version
 * @param namespaceId
 *            namespace id the block belongs to
 * @throws IOException
 */
private void sendReconstructedBlock(String datanode,
		final InputStream blockContents, DataInputStream metadataIn,
		Block block, long blockSize, int dataTransferVersion,
		int namespaceId, Progressable progress) throws IOException {
	InetSocketAddress target = NetUtils.createSocketAddr(datanode);
	Socket sock = SocketChannel.open().socket();

	int readTimeout = getConf().getInt(
			BlockIntegrityMonitor.BLOCKFIX_READ_TIMEOUT,
			HdfsConstants.READ_TIMEOUT);
	NetUtils.connect(sock, target, readTimeout);
	sock.setSoTimeout(readTimeout);

	int writeTimeout = getConf().getInt(
			BlockIntegrityMonitor.BLOCKFIX_WRITE_TIMEOUT,
			HdfsConstants.WRITE_TIMEOUT);

	OutputStream baseStream = NetUtils.getOutputStream(sock, writeTimeout);
	DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
			baseStream, FSConstants.SMALL_BUFFER_SIZE));

	boolean corruptChecksumOk = false;
	boolean chunkOffsetOK = false;
	boolean verifyChecksum = true;
	boolean transferToAllowed = false;

	try {
		LOG.info("Sending block " + block + " from "
				+ sock.getLocalSocketAddress().toString() + " to "
				+ sock.getRemoteSocketAddress().toString());
		BlockSender blockSender = new BlockSender(namespaceId, block,
				blockSize, 0, blockSize, corruptChecksumOk, chunkOffsetOK,
				verifyChecksum, transferToAllowed, metadataIn,
				new BlockSender.InputStreamFactory() {
					@Override
					public InputStream createStream(long offset)
							throws IOException {
						// we are passing 0 as the offset above,
						// so we can safely ignore
						// the offset passed
						return blockContents;
					}
				});

		// Header info
		out.writeShort(dataTransferVersion);
		out.writeByte(DataTransferProtocol.OP_WRITE_BLOCK);
		if (dataTransferVersion >= DataTransferProtocol.FEDERATION_VERSION) {
			out.writeInt(namespaceId);
		}
		out.writeLong(block.getBlockId());
		out.writeLong(block.getGenerationStamp());
		out.writeInt(0); // no pipelining
		out.writeBoolean(false); // not part of recovery
		Text.writeString(out, ""); // client
		out.writeBoolean(true); // sending src node information
		DatanodeInfo srcNode = new DatanodeInfo();
		srcNode.write(out); // Write src node DatanodeInfo
		// write targets
		out.writeInt(0); // num targets
		// send data & checksum
		blockSender.sendBlock(out, baseStream, null, progress);

		LOG.info("Sent block " + block + " to " + datanode);
	} finally {
		sock.close();
		out.close();
	}
}
 
Example 10
Source File: Client.java    From RDFS with Apache License 2.0 4 votes vote down vote up
/** Connect to the server and set up the I/O streams. It then sends
 * a header to the server and starts
 * the connection thread that waits for responses.
 */
private synchronized void setupIOstreamsWithInternal() {
  if (socket != null || shouldCloseConnection.get()) {
    return;
  }
  short ioFailures = 0;
  short timeoutFailures = 0;
  try {
    if (LOG.isDebugEnabled()) {
      LOG.debug("Connecting to "+server);
    }
    while (true) {
      try {
        this.socket = socketFactory.createSocket();
        this.socket.setTcpNoDelay(tcpNoDelay);
        // connection time out is 20s by default
        NetUtils.connect(this.socket, remoteId.getAddress(), connectTimeout);
        if (rpcTimeout > 0) {
          pingInterval = rpcTimeout;  // rpcTimeout overwrites pingInterval
        }
        this.socket.setSoTimeout(pingInterval);
        break;
      } catch (SocketTimeoutException toe) {
        /* The max number of retries is 45,
         * which amounts to 20s*45 = 15 minutes retries.
         */
        handleConnectionFailure(timeoutFailures++, maxRetries, toe);
      } catch (IOException ie) {
        handleConnectionFailure(ioFailures++, maxRetries, ie);
      }
    }
    this.in = new DataInputStream(new BufferedInputStream
        (new PingInputStream(NetUtils.getInputStream(socket))));
    this.out = new DataOutputStream
        (new BufferedOutputStream(NetUtils.getOutputStream(socket)));
    writeHeader();

    // update last activity time
    touch();

    // start the receiver thread after the socket connection has been set up
    start();
  } catch (IOException e) {
    markClosed(e);
    close();
  }
}
 
Example 11
Source File: BlockReader.java    From RDFS with Apache License 2.0 4 votes vote down vote up
public static BlockReader newBlockReader( int dataTransferVersion,
                                   int namespaceId,
                                   Socket sock, String file,
                                   long blockId,
                                   long genStamp,
                                   long startOffset, long len,
                                   int bufferSize, boolean verifyChecksum,
                                   String clientName, long minSpeedBps)
                                   throws IOException {
  // in and out will be closed when sock is closed (by the caller)
  DataOutputStream out = new DataOutputStream(
    new BufferedOutputStream(NetUtils.getOutputStream(sock,HdfsConstants.WRITE_TIMEOUT)));

  //write the header.
  ReadBlockHeader readBlockHeader = new ReadBlockHeader(
      dataTransferVersion, namespaceId, blockId, genStamp, startOffset, len,
      clientName);
  readBlockHeader.writeVersionAndOpCode(out);
  readBlockHeader.write(out);
  out.flush();

  //
  // Get bytes in block, set streams
  //

  DataInputStream in = new DataInputStream(
      new BufferedInputStream(NetUtils.getInputStream(sock),
                              bufferSize));

  if ( in.readShort() != DataTransferProtocol.OP_STATUS_SUCCESS ) {
    throw new IOException("Got error in response to OP_READ_BLOCK " +
                          "self=" + sock.getLocalSocketAddress() +
                          ", remote=" + sock.getRemoteSocketAddress() +
                          " for file " + file +
                          " for block " + blockId);
  }
  DataChecksum checksum = DataChecksum.newDataChecksum( in , new PureJavaCrc32());
  //Warning when we get CHECKSUM_NULL?

  // Read the first chunk offset.
  long firstChunkOffset = in.readLong();

  if ( firstChunkOffset < 0 || firstChunkOffset > startOffset ||
      firstChunkOffset >= (startOffset + checksum.getBytesPerChecksum())) {
    throw new IOException("BlockReader: error in first chunk offset (" +
                          firstChunkOffset + ") startOffset is " +
                          startOffset + " for file " + file);
  }

  return new BlockReader(file, blockId, in, checksum, verifyChecksum,
      startOffset, firstChunkOffset, sock, minSpeedBps, dataTransferVersion);
}
 
Example 12
Source File: DataXceiver.java    From RDFS with Apache License 2.0 4 votes vote down vote up
private void xcodeBlock(DataInputStream in,
		VersionAndOpcode versionAndOpcode, MergeBlockHeader header,
		RecoverTreeNode treeNode) throws IOException {
	// TODO Auto-generated method stub
	long startTime = System.currentTimeMillis();
	RecoverTreeNodeElement element = treeNode.getElement();
	int namespaceId = element.getNamespaceId();
	long blockId = element.getBlockId();
	Block block = new Block(blockId, 0, element.getGenStamp());
	long startOffset = header.getOffsetInBlock();
	long length = header.getLength();
	
	// xcode the block
	OutputStream baseStream = NetUtils.getOutputStream(s,
			datanode.socketWriteTimeout * header.getLevel());
	DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
			baseStream, SMALL_BUFFER_SIZE));

	BlockXCodingSender xcoder = null;
			
	updateCurrentThreadName("xcoding block " + block);
	try {
		try {
			/*
			xcoder = new BlockXCodingSender(namespaceId, block, startOffset,
					length, datanode.ignoreChecksumWhenRead, false, 
					!datanode.ignoreChecksumWhenRead, datanode, 
					element.getCoefficient());
			*/
			xcoder = new BlockXCodingSender(namespaceId, block, startOffset,
					length, datanode.ignoreChecksumWhenRead, true, 
					false, datanode, element.getCoefficient());
		} catch (IOException e) {
			LOG.error("NTar: Constructing xcoder failed. Telling parent site the error....");
			DataTransferPacket packet = new DataTransferPacket(4096);
			packet.dataLength = -1;
			packet.errMessage = s.getLocalAddress() + ":constructing xcoder failed:" 
					+ StringUtils.stringifyException(e); 
			packet.write(out);
			out.flush();
			throw e;
		}
		
		long read = xcoder.sendBlock(out, baseStream, null); 	// xcode and send data
		
		long readDuration = System.currentTimeMillis() - startTime;
		datanode.myMetrics.bytesReadLatency.inc(readDuration);
		datanode.myMetrics.bytesRead.inc((int) read);
		if (read > KB_RIGHT_SHIFT_MIN) {
			datanode.myMetrics.bytesReadRate.inc(
					(int) (read >> KB_RIGHT_SHIFT_BITS), readDuration);
		}
		datanode.myMetrics.blocksRead.inc();
		
	} catch (SocketException ignored) {
		LOG.error("NTar: Ignore exception while xcoding block. namespaceId: "
				+ namespaceId + " block: " + block
				+ " to " + remoteAddress + ": " + ignored.getMessage());
	} catch (IOException ioe) {
		/*
		 * What exactly should we do here? Earlier version shutdown()
		 * datanode if there is disk error.
		 */
		LOG.error("NTar: " + datanode.getDatanodeInfo()
				+ " :Got exception while xcoding " + " namespaceId: "
				+ namespaceId + " block: " + block + " to "
				+ s.getInetAddress() + ":\n"
				+ StringUtils.stringifyException(ioe));
	} finally {
		IOUtils.closeStream(out);
		IOUtils.closeStream(xcoder);
	}
}
 
Example 13
Source File: BlockReaderAccelerator.java    From RDFS with Apache License 2.0 4 votes vote down vote up
/**
 * Return all the data [startOffset , length] in one shot!
 */
public ByteBuffer readAll() throws IOException {

  // in and out will be closed when sock is closed (by the caller)
  DataOutputStream out = new DataOutputStream(
      new BufferedOutputStream(NetUtils.getOutputStream(sock,HdfsConstants.WRITE_TIMEOUT)));

  //write the header.
  ReadBlockAccelaratorHeader readBlockAccelaratorHeader =
      new ReadBlockAccelaratorHeader(dataTransferVersion, namespaceId,
          blk.getBlock().getBlockId(), blk.getBlock().getGenerationStamp(),
          startOffset, length, clientName);
  readBlockAccelaratorHeader.writeVersionAndOpCode(out);
  readBlockAccelaratorHeader.write(out);
  out.flush();
  if (LOG.isDebugEnabled()) {
    LOG.debug("BlockReaderAccelerator client blkid " + blk.getBlock().getBlockId() +
              " offset " + startOffset + " length " + length);
  }

  in = new DataInputStream(NetUtils.getInputStream(sock));

  // read the checksum header. 
  // 1 byte of checksum type and 4 bytes of bytes-per-checksum
  byte[] cksumHeader = new byte[DataChecksum.HEADER_LEN];
  in.readFully(cksumHeader);
  DataChecksum dsum = DataChecksum.newDataChecksum(cksumHeader, 0);
  this.bytesPerChecksum = dsum.getBytesPerChecksum();

  // align the startOffset with the previous crc chunk
  long delta = startOffset % bytesPerChecksum;
  long newOffset = startOffset - delta;
  long newlength = length + delta;
 
  // align the length to encompass the entire last checksum chunk
  long del = newlength % bytesPerChecksum;
  if (del != 0) {
    del = bytesPerChecksum - del;
    newlength += del;
  }

  // find the number of checksum chunks
  long numChunks = newlength / bytesPerChecksum;
  long sizeChecksumData = numChunks * dsum.getChecksumSize();

  // read in all checksums and data in one shot.
  this.dataBuffer = new byte[(int)newlength + (int)sizeChecksumData];
  in.readFully(dataBuffer);
  if (LOG.isDebugEnabled()) {
    LOG.debug("BlockReaderAccelerator client read in " + dataBuffer.length + 
              " bytes.");
  }

  // verify checksums of all chunks
  if (this.verifyChecksum) {
    for (int i = 0; i < numChunks; i++) {
      long dataOffset = sizeChecksumData + i * bytesPerChecksum;
      checker.reset();
      checker.update(dataBuffer, (int)dataOffset, bytesPerChecksum);

      int ckOffset = i * dsum.getChecksumSize();
      long expected = FSInputChecker.checksum2long(dataBuffer, ckOffset, 
                       dsum.getChecksumSize());

      if (expected != checker.getValue()) {
        String msg = "Checksum failure for file " + hdfsfile +
                     " block " + blk.getBlock() +
                     " at blockoffet " + (startOffset + i * bytesPerChecksum) +
                     " chunk " + i +
                     " expected " + expected +
                     " got " + checker.getValue();
        LOG.warn(msg);
        throw new ChecksumException(msg, startOffset + i * bytesPerChecksum);
      }
    }
  }
  // The offset in the ByteBuffer skips over the
  // portion that stores the checksums. It also skips over the additional
  // data portion that was read while aligning with the previous chunk boundary
  return ByteBuffer.wrap(dataBuffer, 
                         (int)(sizeChecksumData + delta),
                         (int)length);
}
 
Example 14
Source File: Client.java    From hadoop-gpu with Apache License 2.0 4 votes vote down vote up
/** Connect to the server and set up the I/O streams. It then sends
 * a header to the server and starts
 * the connection thread that waits for responses.
 */
private synchronized void setupIOstreams() {
  if (socket != null || shouldCloseConnection.get()) {
    return;
  }
  
  short ioFailures = 0;
  short timeoutFailures = 0;
  try {
    if (LOG.isDebugEnabled()) {
      LOG.debug("Connecting to "+server);
    }
    while (true) {
      try {
        this.socket = socketFactory.createSocket();
        this.socket.setTcpNoDelay(tcpNoDelay);
        // connection time out is 20s
        NetUtils.connect(this.socket, remoteId.getAddress(), 20000);
        this.socket.setSoTimeout(pingInterval);
        break;
      } catch (SocketTimeoutException toe) {
        /* The max number of retries is 45,
         * which amounts to 20s*45 = 15 minutes retries.
         */
        handleConnectionFailure(timeoutFailures++, 45, toe);
      } catch (IOException ie) {
        handleConnectionFailure(ioFailures++, maxRetries, ie);
      }
    }
    this.in = new DataInputStream(new BufferedInputStream
        (new PingInputStream(NetUtils.getInputStream(socket))));
    this.out = new DataOutputStream
        (new BufferedOutputStream(NetUtils.getOutputStream(socket)));
    writeHeader();

    // update last activity time
    touch();

    // start the receiver thread after the socket connection has been set up
    start();
  } catch (IOException e) {
    markClosed(e);
    close();
  }
}
 
Example 15
Source File: DFSClient.java    From hadoop-gpu with Apache License 2.0 4 votes vote down vote up
public static BlockReader newBlockReader( Socket sock, String file,
                                   long blockId, 
                                   long genStamp,
                                   long startOffset, long len,
                                   int bufferSize, boolean verifyChecksum,
                                   String clientName)
                                   throws IOException {
  // in and out will be closed when sock is closed (by the caller)
  DataOutputStream out = new DataOutputStream(
    new BufferedOutputStream(NetUtils.getOutputStream(sock,HdfsConstants.WRITE_TIMEOUT)));

  //write the header.
  out.writeShort( DataTransferProtocol.DATA_TRANSFER_VERSION );
  out.write( DataTransferProtocol.OP_READ_BLOCK );
  out.writeLong( blockId );
  out.writeLong( genStamp );
  out.writeLong( startOffset );
  out.writeLong( len );
  Text.writeString(out, clientName);
  out.flush();
  
  //
  // Get bytes in block, set streams
  //

  DataInputStream in = new DataInputStream(
      new BufferedInputStream(NetUtils.getInputStream(sock), 
                              bufferSize));
  
  if ( in.readShort() != DataTransferProtocol.OP_STATUS_SUCCESS ) {
    throw new IOException("Got error in response to OP_READ_BLOCK " +
                          "for file " + file + 
                          " for block " + blockId);
  }
  DataChecksum checksum = DataChecksum.newDataChecksum( in );
  //Warning when we get CHECKSUM_NULL?
  
  // Read the first chunk offset.
  long firstChunkOffset = in.readLong();
  
  if ( firstChunkOffset < 0 || firstChunkOffset > startOffset ||
      firstChunkOffset >= (startOffset + checksum.getBytesPerChecksum())) {
    throw new IOException("BlockReader: error in first chunk offset (" +
                          firstChunkOffset + ") startOffset is " + 
                          startOffset + " for file " + file);
  }

  return new BlockReader( file, blockId, in, checksum, verifyChecksum,
                          startOffset, firstChunkOffset, sock );
}
 
Example 16
Source File: DFSClient.java    From hadoop-gpu with Apache License 2.0 4 votes vote down vote up
private boolean createBlockOutputStream(DatanodeInfo[] nodes, String client,
                boolean recoveryFlag) {
  String firstBadLink = "";
  if (LOG.isDebugEnabled()) {
    for (int i = 0; i < nodes.length; i++) {
      LOG.debug("pipeline = " + nodes[i].getName());
    }
  }

  // persist blocks on namenode on next flush
  persistBlocks = true;

  try {
    LOG.debug("Connecting to " + nodes[0].getName());
    InetSocketAddress target = NetUtils.createSocketAddr(nodes[0].getName());
    s = socketFactory.createSocket();
    int timeoutValue = 3000 * nodes.length + socketTimeout;
    NetUtils.connect(s, target, timeoutValue);
    s.setSoTimeout(timeoutValue);
    s.setSendBufferSize(DEFAULT_DATA_SOCKET_SIZE);
    LOG.debug("Send buf size " + s.getSendBufferSize());
    long writeTimeout = HdfsConstants.WRITE_TIMEOUT_EXTENSION * nodes.length +
                        datanodeWriteTimeout;

    //
    // Xmit header info to datanode
    //
    DataOutputStream out = new DataOutputStream(
        new BufferedOutputStream(NetUtils.getOutputStream(s, writeTimeout), 
                                 DataNode.SMALL_BUFFER_SIZE));
    blockReplyStream = new DataInputStream(NetUtils.getInputStream(s));

    out.writeShort( DataTransferProtocol.DATA_TRANSFER_VERSION );
    out.write( DataTransferProtocol.OP_WRITE_BLOCK );
    out.writeLong( block.getBlockId() );
    out.writeLong( block.getGenerationStamp() );
    out.writeInt( nodes.length );
    out.writeBoolean( recoveryFlag );       // recovery flag
    Text.writeString( out, client );
    out.writeBoolean(false); // Not sending src node information
    out.writeInt( nodes.length - 1 );
    for (int i = 1; i < nodes.length; i++) {
      nodes[i].write(out);
    }
    checksum.writeHeader( out );
    out.flush();

    // receive ack for connect
    firstBadLink = Text.readString(blockReplyStream);
    if (firstBadLink.length() != 0) {
      throw new IOException("Bad connect ack with firstBadLink " + firstBadLink);
    }

    blockStream = out;
    return true;     // success

  } catch (IOException ie) {

    LOG.info("Exception in createBlockOutputStream " + ie);

    // find the datanode that matches
    if (firstBadLink.length() != 0) {
      for (int i = 0; i < nodes.length; i++) {
        if (nodes[i].getName().equals(firstBadLink)) {
          errorIndex = i;
          break;
        }
      }
    }
    hasError = true;
    setLastException(ie);
    blockReplyStream = null;
    return false;  // error
  }
}