Java Code Examples for io.netty.buffer.ByteBuf#setShort()

The following examples show how to use io.netty.buffer.ByteBuf#setShort() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SQLBatchCommandCodec.java    From vertx-sql-client with Apache License 2.0 5 votes vote down vote up
private void sendBatchClientRequest() {
  ChannelHandlerContext chctx = encoder.chctx;

  ByteBuf packet = chctx.alloc().ioBuffer();

  // packet header
  packet.writeByte(MessageType.SQL_BATCH.value());
  packet.writeByte(MessageStatus.NORMAL.value() | MessageStatus.END_OF_MESSAGE.value());
  int packetLenIdx = packet.writerIndex();
  packet.writeShort(0); // set length later
  packet.writeShort(0x00);
  packet.writeByte(0x00); // FIXME packet ID
  packet.writeByte(0x00);

  int start = packet.writerIndex();
  packet.writeIntLE(0x00); // TotalLength for ALL_HEADERS
  encodeTransactionDescriptor(packet, 0, 1);
  // set TotalLength for ALL_HEADERS
  packet.setIntLE(start, packet.writerIndex() - start);

  // SQLText
  packet.writeCharSequence(cmd.sql(), StandardCharsets.UTF_16LE);

  int packetLen = packet.writerIndex() - packetLenIdx + 2;
  packet.setShort(packetLenIdx, packetLen);

  chctx.writeAndFlush(packet);
}
 
Example 2
Source File: FecEncode.java    From java-Kcp with Apache License 2.0 4 votes vote down vote up
/**
 *
 *  使用方法:
 *  1,入bytebuf后 把bytebuf发送出去,并释放bytebuf
 *  2,判断返回值是否为null,如果不为null发送出去并释放它
 *
 *  headerOffset +6字节fectHead +  2字节bodylenth(lenth-headerOffset-6)
 *
 * 1,对数据写入头标记为数据类型  markData
 * 2,写入消息长度
 * 3,获得缓存数据中最大长度,其他的缓存进行扩容到同样长度
 * 4,去掉头长度,进行fec编码
 * 5,对冗余字节数组进行标记为fec  makefec
 * 6,返回完整长度
 *
 *  注意: 传入的bytebuf如果需要释放在传入后手动释放。
 *  返回的bytebuf 也需要自己释放
 * @param byteBuf
 * @return
 */
public ByteBuf[] encode(ByteBuf byteBuf){
    markData(byteBuf,headerOffset);
    int sz = byteBuf.writerIndex();
    byteBuf.setShort(payloadOffset,sz-headerOffset- Fec.fecHeaderSizePlus2);
    this.shardCache[shardCount] = byteBuf.retainedDuplicate();
    this.shardCount ++;
    if (sz > this.maxSize) {
        this.maxSize = sz;
    }
    if(shardCount!=dataShards) {
        return null;
    }
    //填充parityShards
    for (int i = 0; i < parityShards; i++) {
        ByteBuf parityByte = ByteBufAllocator.DEFAULT.buffer(this.maxSize);
        shardCache[i+dataShards]  = parityByte;
        encodeCache[i] = parityByte;
        markParity(parityByte,headerOffset);
        parityByte.writerIndex(this.maxSize);
    }

    //按着最大长度不足补充0
    for (int i = 0; i < this.dataShards; i++) {
        ByteBuf shard = shardCache[i];
        int left = this.maxSize-shard.writerIndex();
        if(left<=0) {
            continue;
        }
        //是否需要扩容  会出现吗??
        //if(shard.capacity()<this.maxSize){
        //    ByteBuf newByteBuf = ByteBufAllocator.DEFAULT.buffer(this.maxSize);
        //    newByteBuf.writeBytes(shard);
        //    shard.release();
        //    shard = newByteBuf;
        //    shardCache[i] = shard;
        //}
        shard.writeBytes(zeros,left);
        zeros.readerIndex(0);
    }
    codec.encodeParity(shardCache,payloadOffset,this.maxSize-payloadOffset);
    //释放dataShards
    for (int i = 0; i < dataShards; i++) {
        this.shardCache[i].release();
        this.shardCache[i]=null;
    }
    this.shardCount = 0;
    this.maxSize = 0;
    return this.encodeCache;
}
 
Example 3
Source File: ExtendedQueryCommandCodec.java    From vertx-sql-client with Apache License 2.0 4 votes vote down vote up
private void sendPrepexecRequest() {
  ChannelHandlerContext chctx = encoder.chctx;

  ByteBuf packet = chctx.alloc().ioBuffer();

  // packet header
  packet.writeByte(MessageType.RPC.value());
  packet.writeByte(MessageStatus.NORMAL.value() | MessageStatus.END_OF_MESSAGE.value());
  int packetLenIdx = packet.writerIndex();
  packet.writeShort(0); // set length later
  packet.writeShort(0x00);
  packet.writeByte(0x00); // FIXME packet ID
  packet.writeByte(0x00);

  int start = packet.writerIndex();
  packet.writeIntLE(0x00); // TotalLength for ALL_HEADERS
  encodeTransactionDescriptor(packet, 0, 1);
  // set TotalLength for ALL_HEADERS
  packet.setIntLE(start, packet.writerIndex() - start);

  /*
    RPCReqBatch
   */
  packet.writeShortLE(0xFFFF);
  packet.writeShortLE(ProcId.Sp_PrepExec);

  // Option flags
  packet.writeShortLE(0x0000);

  // Parameter

  // OUT Parameter
  packet.writeByte(0x00);
  packet.writeByte(0x01); // By reference
  packet.writeByte(MSSQLDataTypeId.INTNTYPE_ID);
  packet.writeByte(0x04);
  packet.writeByte(0x04);
  packet.writeIntLE(0x00);

  Tuple params = cmd.params();

  // Param definitions
  String paramDefinitions = parseParamDefinitions(params);
  encodeNVarcharParameter(packet, paramDefinitions);

  // SQL text
  encodeNVarcharParameter(packet, cmd.sql());

  // Param values
  for (int i = 0; i < params.size(); i++) {
    encodeParamValue(packet, params.getValue(i));
  }

  int packetLen = packet.writerIndex() - packetLenIdx + 2;
  packet.setShort(packetLenIdx, packetLen);

  chctx.writeAndFlush(packet);
}
 
Example 4
Source File: Packet.java    From riiablo with Apache License 2.0 4 votes vote down vote up
static void setSEQ(ByteBuf bb, int value) {
  bb.setShort(SEQ_OFFSET, value);
}
 
Example 5
Source File: Packet.java    From riiablo with Apache License 2.0 4 votes vote down vote up
static void setACK(ByteBuf bb, int value) {
  bb.setShort(ACK_OFFSET, value);
}
 
Example 6
Source File: Packet.java    From riiablo with Apache License 2.0 4 votes vote down vote up
static void setContentSize(ByteBuf bb, int value) {
  Validate.isTrue(value <= 0xFFFF, "cannot encode content size as ushort, src.remaining()=" + value);
  bb.setShort(CONTENT_SIZE_OFFSET, value);
}
 
Example 7
Source File: Packet.java    From riiablo with Apache License 2.0 4 votes vote down vote up
static void setFragmentSize(ByteBuf bb, int value) {
  bb.setShort(FRAGSIZE_OFFSET, value);
}
 
Example 8
Source File: ChunkManager.java    From ThinkMap with Apache License 2.0 4 votes vote down vote up
private void gzipChunk(ChunkSnapshot chunk, ByteBuf out) {
    int mask = 0;
    int count = 0;
    for (int i = 0; i < 16; i++) {
        if (!chunk.isSectionEmpty(i)) {
            mask |= 1 << i;
            count++;
        }
    }
    ByteBuf data = allocator.buffer(16 * 16 * 16 * 4 * count + 3 + 256);
    data.writeByte(1); // The chunk exists
    data.writeShort(mask);
    int offset = 0;
    int blockDataOffset = 16 * 16 * 16 * 2 * count;
    int skyDataOffset = blockDataOffset + 16 * 16 * 16 * count;
    for (int i = 0; i < 16; i++) {
        if (!chunk.isSectionEmpty(i)) {
            for (int oy = 0; oy < 16; oy++) {
                for (int oz = 0; oz < 16; oz++) {
                    for (int ox = 0; ox < 16; ox++) {
                        int y = oy + (i << 4);
                        int id = chunk.getBlockTypeId(ox, y, oz);
                        int dValue = chunk.getBlockData(ox, y, oz);
                        data.setShort((offset << 1) + 3, (id << 4) | dValue);

                        data.setByte(blockDataOffset + offset + 3, chunk.getBlockEmittedLight(ox, y, oz));
                        data.setByte(skyDataOffset + offset + 3, chunk.getBlockSkyLight(ox, y, oz));

                        offset++;
                    }
                }
            }
        }
    }
    for (int x = 0; x < 16; x++) {
        for (int z = 0; z < 16; z++) {
            data.setByte(skyDataOffset + offset + 3 + x + z * 16, ThinkBiome.bukkitToId(chunk.getBiome(x, z)));
        }
    }
    data.writerIndex(data.capacity());
    try {
        GZIPOutputStream gzip = new GZIPOutputStream(new ByteBufOutputStream(out));
        byte[] bytes = new byte[data.readableBytes()];
        data.readBytes(bytes);
        gzip.write(bytes);
        gzip.close();
    } catch (IOException e) {
        throw new RuntimeException();
    } finally {
        data.release();
    }
}