Java Code Examples for io.netty.buffer.ByteBuf#readShortLE()

The following examples show how to use io.netty.buffer.ByteBuf#readShortLE() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: LocalDateCodec.java    From r2dbc-mysql with Apache License 2.0 7 votes vote down vote up
@Nullable
static LocalDate readDateBinary(ByteBuf buf, int bytes) {
    if (bytes < DateTimes.DATE_SIZE) {
        return null;
    }

    short year = buf.readShortLE();
    byte month = buf.readByte();
    byte day = buf.readByte();

    if (month == 0 || day == 0) {
        return null;
    }

    return LocalDate.of(year, month, day);
}
 
Example 2
Source File: LocalDateCodec.java    From r2dbc-mysql with Apache License 2.0 6 votes vote down vote up
@Nullable
static LocalDate readDateBinary(ByteBuf buf, int bytes) {
    if (bytes < DateTimes.DATE_SIZE) {
        return null;
    }

    short year = buf.readShortLE();
    byte month = buf.readByte();
    byte day = buf.readByte();

    if (month == 0 || day == 0) {
        return null;
    }

    return LocalDate.of(year, month, day);
}
 
Example 3
Source File: ShortCodec.java    From r2dbc-mysql with Apache License 2.0 6 votes vote down vote up
@Override
public Short decode(ByteBuf value, FieldInformation info, Class<?> target, boolean binary, CodecContext context) {
    if (binary) {
        boolean isUnsigned = (info.getDefinitions() & ColumnDefinitions.UNSIGNED) != 0;

        switch (info.getType()) {
            case DataTypes.SMALLINT: // Already check overflow in `doCanDecode`
            case DataTypes.YEAR:
                return value.readShortLE();
            default: // TINYINT
                if (isUnsigned) {
                    return value.readUnsignedByte();
                } else {
                    return (short) value.readByte();
                }
        }
    } else {
        return (short) IntegerCodec.parse(value);
    }
}
 
Example 4
Source File: MSSQLDataTypeCodec.java    From vertx-sql-client with Apache License 2.0 6 votes vote down vote up
private static Object decodeIntN(ByteBuf buffer) {
  int intNDataTypeLength = buffer.readByte();
  switch (intNDataTypeLength) {
    case 0:
      // this means we read a NULL value(nullable data type).
      return null;
    case 1:
      return buffer.readUnsignedByte();
    case 2:
      return buffer.readShortLE();
    case 4:
      return buffer.readIntLE();
    case 8:
      return buffer.readLongLE();
    default:
      throw new UnsupportedOperationException(String.format("SEVERE: Unsupported length=[%d] for decoding IntNDataType row value.", intNDataTypeLength));
  }
}
 
Example 5
Source File: DefinitionMetadataMessage.java    From r2dbc-mysql with Apache License 2.0 5 votes vote down vote up
private static DefinitionMetadataMessage decode320(ByteBuf buf, ConnectionContext context) {
    CharCollation collation = context.getClientCollation();
    Charset charset = collation.getCharset();
    String table = readVarIntSizedString(buf, charset);
    String column = readVarIntSizedString(buf, charset);

    buf.skipBytes(1); // Constant 0x3
    int size = buf.readUnsignedMediumLE();

    buf.skipBytes(1); // Constant 0x1
    short type = buf.readUnsignedByte();

    buf.skipBytes(1); // Constant 0x3
    short definitions = buf.readShortLE();
    short decimals = buf.readUnsignedByte();

    return new DefinitionMetadataMessage(
        null,
        table,
        null,
        column,
        null,
        collation.getId(),
        size,
        type,
        definitions,
        decimals
    );
}
 
Example 6
Source File: ResourcePackClientResponseSerializer_v361.java    From Protocol with Apache License 2.0 5 votes vote down vote up
@Override
public void deserialize(ByteBuf buffer, ResourcePackClientResponsePacket packet) {
    Status status = Status.values()[buffer.readByte()];
    packet.setStatus(status);

    List<String> packIds = packet.getPackIds();
    int packIdsCount = buffer.readShortLE();
    for (int i = 0; i < packIdsCount; i++) {
        packIds.add(BedrockUtils.readString(buffer));
    }
}
 
Example 7
Source File: ResourcePackClientResponseSerializer_v332.java    From Protocol with Apache License 2.0 5 votes vote down vote up
@Override
public void deserialize(ByteBuf buffer, ResourcePackClientResponsePacket packet) {
    Status status = Status.values()[buffer.readByte()];
    packet.setStatus(status);

    List<String> packIds = packet.getPackIds();
    int packIdsCount = buffer.readShortLE();
    for (int i = 0; i < packIdsCount; i++) {
        packIds.add(BedrockUtils.readString(buffer));
    }
}
 
Example 8
Source File: ResourcePackClientResponseSerializer_v388.java    From Protocol with Apache License 2.0 5 votes vote down vote up
@Override
public void deserialize(ByteBuf buffer, ResourcePackClientResponsePacket packet) {
    Status status = Status.values()[buffer.readByte()];
    packet.setStatus(status);

    List<String> packIds = packet.getPackIds();
    int packIdsCount = buffer.readShortLE();
    for (int i = 0; i < packIdsCount; i++) {
        packIds.add(BedrockUtils.readString(buffer));
    }
}
 
Example 9
Source File: SQLBatchCommandCodec.java    From vertx-sql-client with Apache License 2.0 5 votes vote down vote up
@Override
void decodeMessage(TdsMessage message, TdsMessageEncoder encoder) {
  ByteBuf messageBody = message.content();
  while (messageBody.isReadable()) {
    int tokenByte = messageBody.readUnsignedByte();
    switch (tokenByte) {
      case DataPacketStreamTokenType.COLMETADATA_TOKEN:
        MSSQLRowDesc rowDesc = decodeColmetadataToken(messageBody);
        rowResultDecoder = new RowResultDecoder<>(cmd.collector(), rowDesc);
        break;
      case DataPacketStreamTokenType.ROW_TOKEN:
        handleRow(messageBody);
        break;
      case DataPacketStreamTokenType.NBCROW_TOKEN:
        handleNbcRow(messageBody);
        break;
      case DataPacketStreamTokenType.DONE_TOKEN:
        short status = messageBody.readShortLE();
        short curCmd = messageBody.readShortLE();
        long doneRowCount = messageBody.readLongLE();
        handleResultSetDone((int) doneRowCount);
        handleDoneToken();
        break;
      case DataPacketStreamTokenType.INFO_TOKEN:
        int infoTokenLength = messageBody.readUnsignedShortLE();
        //TODO not used for now
        messageBody.skipBytes(infoTokenLength);
        break;
      case DataPacketStreamTokenType.ERROR_TOKEN:
        handleErrorToken(messageBody);
        break;
      default:
        throw new UnsupportedOperationException("Unsupported token: " + tokenByte);
    }
  }
}
 
Example 10
Source File: ResourcePackClientResponseSerializer_v291.java    From Protocol with Apache License 2.0 5 votes vote down vote up
@Override
public void deserialize(ByteBuf buffer, ResourcePackClientResponsePacket packet) {
    Status status = Status.values()[buffer.readByte()];
    packet.setStatus(status);

    List<String> packIds = packet.getPackIds();
    int packIdsCount = buffer.readShortLE();
    for (int i = 0; i < packIdsCount; i++) {
        packIds.add(BedrockUtils.readString(buffer));
    }
}
 
Example 11
Source File: ExtendedQueryCommandCodec.java    From vertx-sql-client with Apache License 2.0 4 votes vote down vote up
@Override
void decodeMessage(TdsMessage message, TdsMessageEncoder encoder) {
  ByteBuf messageBody = message.content();
  while (messageBody.isReadable()) {
    int tokenByte = messageBody.readUnsignedByte();
    switch (tokenByte) {
      case DataPacketStreamTokenType.COLMETADATA_TOKEN:
        MSSQLRowDesc rowDesc = decodeColmetadataToken(messageBody);
        rowResultDecoder = new RowResultDecoder<>(cmd.collector(), rowDesc);
        break;
      case DataPacketStreamTokenType.ROW_TOKEN:
        handleRow(messageBody);
        break;
      case DataPacketStreamTokenType.NBCROW_TOKEN:
        handleNbcRow(messageBody);
        break;
      case DataPacketStreamTokenType.DONE_TOKEN:
        messageBody.skipBytes(12); // this should only be after ERROR_TOKEN?
        handleDoneToken();
        break;
      case DataPacketStreamTokenType.INFO_TOKEN:
        int infoTokenLength = messageBody.readUnsignedShortLE();
        //TODO not used for now
        messageBody.skipBytes(infoTokenLength);
        break;
      case DataPacketStreamTokenType.ERROR_TOKEN:
        handleErrorToken(messageBody);
        break;
      case DataPacketStreamTokenType.DONEINPROC_TOKEN:
        short status = messageBody.readShortLE();
        short curCmd = messageBody.readShortLE();
        long doneRowCount = messageBody.readLongLE();
        handleResultSetDone((int) doneRowCount);
        handleDoneToken();
        break;
      case DataPacketStreamTokenType.RETURNSTATUS_TOKEN:
        messageBody.skipBytes(4);
        break;
      case DataPacketStreamTokenType.RETURNVALUE_TOKEN:
        messageBody.skipBytes(messageBody.readableBytes()); // FIXME
        break;
      default:
        throw new UnsupportedOperationException("Unsupported token: " + tokenByte);
    }
  }
}
 
Example 12
Source File: BedrockUtils.java    From Protocol with Apache License 2.0 4 votes vote down vote up
public static void readEntityData(ByteBuf buffer, EntityDataMap entityDataMap) {
    Preconditions.checkNotNull(buffer, "buffer");
    Preconditions.checkNotNull(entityDataMap, "entityDataDictionary");

    int length = VarInts.readUnsignedInt(buffer);

    for (int i = 0; i < length; i++) {
        int metadataInt = VarInts.readUnsignedInt(buffer);
        EntityData entityData = METADATAS.get(metadataInt);
        EntityData.Type type = METADATA_TYPES.get(VarInts.readUnsignedInt(buffer));
        if (entityData != null && entityData.isFlags()) {
            if (type != Type.LONG) {
                throw new IllegalArgumentException("Expected long value for flags, got " + type.name());
            }
            type = Type.FLAGS;
        }

        Object object;
        switch (type) {
            case BYTE:
                object = buffer.readByte();
                break;
            case SHORT:
                object = buffer.readShortLE();
                break;
            case INT:
                object = VarInts.readInt(buffer);
                break;
            case FLOAT:
                object = buffer.readFloatLE();
                break;
            case STRING:
                object = BedrockUtils.readString(buffer);
                break;
            case NBT:
                object = BedrockUtils.readItemData(buffer);
                break;
            case VECTOR3I:
                object = BedrockUtils.readVector3i(buffer);
                break;
            case FLAGS:
                int index = entityData == FLAGS_2 ? 1 : 0;
                entityDataMap.getOrCreateFlags().set(VarInts.readLong(buffer), index, METADATA_FLAGS);
                continue;
            case LONG:
                object = VarInts.readLong(buffer);
                break;
            case VECTOR3F:
                object = BedrockUtils.readVector3f(buffer);
                break;
            default:
                throw new IllegalArgumentException("Unknown entity data type received");
        }
        if (entityData != null) {
            entityDataMap.put(entityData, object);
        } else {
            log.debug("Unknown entity data: {} type {} value {}", metadataInt, type, object);
        }
    }
}
 
Example 13
Source File: BedrockUtils.java    From Protocol with Apache License 2.0 4 votes vote down vote up
public static void readEntityData(ByteBuf buffer, EntityDataMap entityDataMap) {
    Preconditions.checkNotNull(buffer, "buffer");
    Preconditions.checkNotNull(entityDataMap, "entityDataDictionary");

    int length = VarInts.readUnsignedInt(buffer);

    for (int i = 0; i < length; i++) {
        int metadataInt = VarInts.readUnsignedInt(buffer);
        EntityData entityData = METADATAS.get(metadataInt);
        EntityData.Type type = METADATA_TYPES.get(VarInts.readUnsignedInt(buffer));
        if (entityData != null && entityData.isFlags()) {
            if (type != Type.LONG) {
                throw new IllegalArgumentException("Expected long value for flags, got " + type.name());
            }
            type = Type.FLAGS;
        }

        Object object;
        switch (type) {
            case BYTE:
                object = buffer.readByte();
                break;
            case SHORT:
                object = buffer.readShortLE();
                break;
            case INT:
                object = VarInts.readInt(buffer);
                break;
            case FLOAT:
                object = buffer.readFloatLE();
                break;
            case STRING:
                object = BedrockUtils.readString(buffer);
                break;
            case NBT:
                object = BedrockUtils.readItemData(buffer);
                break;
            case VECTOR3I:
                object = BedrockUtils.readVector3i(buffer);
                break;
            case FLAGS:
                int index = entityData == FLAGS_2 ? 1 : 0;
                entityDataMap.getOrCreateFlags().set(VarInts.readLong(buffer), index, METADATA_FLAGS);
                continue;
            case LONG:
                object = VarInts.readLong(buffer);
                break;
            case VECTOR3F:
                object = BedrockUtils.readVector3f(buffer);
                break;
            default:
                throw new IllegalArgumentException("Unknown entity data type received");
        }
        if (entityData != null) {
            entityDataMap.put(entityData, object);
        } else {
            log.debug("Unknown entity data: {} type {} value {}", metadataInt, type, object);
        }
    }
}
 
Example 14
Source File: QueryCommandBaseCodec.java    From vertx-sql-client with Apache License 2.0 4 votes vote down vote up
private MSSQLDataType decodeDataTypeMetadata(ByteBuf payload) {
  int typeInfo = payload.readUnsignedByte();
  switch (typeInfo) {
    /*
     * FixedLen DataType
     */
    case INT1TYPE_ID:
      return FixedLenDataType.INT1TYPE;
    case INT2TYPE_ID:
      return FixedLenDataType.INT2TYPE;
    case INT4TYPE_ID:
      return FixedLenDataType.INT4TYPE;
    case INT8TYPE_ID:
      return FixedLenDataType.INT8TYPE;
    case FLT4TYPE_ID:
      return FixedLenDataType.FLT4TYPE;
    case FLT8TYPE_ID:
      return FixedLenDataType.FLT8TYPE;
    case BITTYPE_ID:
      return FixedLenDataType.BITTYPE;
    /*
     * Variable Length Data Type
     */
    case NUMERICNTYPE_ID:
    case DECIMALNTYPE_ID:
      short numericTypeSize = payload.readUnsignedByte();
      byte numericPrecision = payload.readByte();
      byte numericScale = payload.readByte();
      return new NumericDataType(NUMERICNTYPE_ID, Numeric.class, numericPrecision, numericScale);
    case INTNTYPE_ID:
      byte intNTypeLength = payload.readByte();
      return IntNDataType.valueOf(intNTypeLength);
    case FLTNTYPE_ID:
      byte fltNTypeLength = payload.readByte();
      return FloatNDataType.valueOf(fltNTypeLength);
    case BITNTYPE_ID:
      payload.skipBytes(1); // should only be 1
      return BitNDataType.BIT_1_DATA_TYPE;
    case DATENTYPE_ID:
      return FixedLenDataType.DATENTYPE;
    case TIMENTYPE_ID:
      byte scale = payload.readByte();
      return new TimeNDataType(scale);
    case BIGCHARTYPE_ID:
    case BIGVARCHRTYPE_ID:
      int size = payload.readUnsignedShortLE();
      short collateCodepage = payload.readShortLE();
      short collateFlags = payload.readShortLE();
      byte collateCharsetId = payload.readByte();
      return new TextWithCollationDataType(BIGVARCHRTYPE_ID, String.class, null);
    default:
      throw new UnsupportedOperationException("Unsupported type with typeinfo: " + typeInfo);
  }
}
 
Example 15
Source File: OkMessage.java    From r2dbc-mysql with Apache License 2.0 4 votes vote down vote up
static OkMessage decode(ByteBuf buf, ConnectionContext context) {
    buf.skipBytes(1); // OK message header, 0x00 or 0xFE

    int capabilities = context.getCapabilities();
    long affectedRows = VarIntUtils.readVarInt(buf);
    long lastInsertId = VarIntUtils.readVarInt(buf);
    short serverStatuses;
    int warnings;

    if ((capabilities & Capabilities.PROTOCOL_41) != 0) {
        serverStatuses = buf.readShortLE();
        warnings = buf.readUnsignedShortLE();
    } else if ((capabilities & Capabilities.TRANSACTIONS) != 0) {
        serverStatuses = buf.readShortLE();
        warnings = 0;
    } else {
        warnings = serverStatuses = 0;
    }

    if (buf.isReadable()) {
        Charset charset = context.getClientCollation().getCharset();
        int sizeAfterVarInt = VarIntUtils.checkNextVarInt(buf);

        if (sizeAfterVarInt < 0) {
            return new OkMessage(affectedRows, lastInsertId, serverStatuses, warnings, buf.toString(charset));
        } else {
            int readerIndex = buf.readerIndex();
            long size = VarIntUtils.readVarInt(buf);
            String information;

            if (size > sizeAfterVarInt) {
                information = buf.toString(readerIndex, buf.writerIndex() - readerIndex, charset);
            } else {
                information = buf.toString(buf.readerIndex(), (int) size, charset);
            }
            // Ignore state information of session track, it is not human readable and useless for R2DBC client.
            return new OkMessage(affectedRows, lastInsertId, serverStatuses, warnings, information);
        }
    } else { // maybe have no human-readable message
        return new OkMessage(affectedRows, lastInsertId, serverStatuses, warnings, "");
    }
}
 
Example 16
Source File: MSSQLDataTypeCodec.java    From vertx-sql-client with Apache License 2.0 4 votes vote down vote up
private static short decodeSmallInt(ByteBuf in) {
  return in.readShortLE();
}
 
Example 17
Source File: PrepareStatementCodec.java    From vertx-sql-client with Apache License 2.0 4 votes vote down vote up
@Override
void decodePayload(ByteBuf payload, int payloadLength) {
  switch (commandHandlerState) {
    case INIT:
      int firstByte = payload.getUnsignedByte(payload.readerIndex());
      if (firstByte == ERROR_PACKET_HEADER) {
        handleErrorPacketPayload(payload);
      } else {
        // handle COM_STMT_PREPARE response
        payload.readUnsignedByte(); // 0x00: OK
        long statementId = payload.readUnsignedIntLE();
        int numberOfColumns = payload.readUnsignedShortLE();
        int numberOfParameters = payload.readUnsignedShortLE();
        payload.readByte(); // [00] filler
        int numberOfWarnings = payload.readShortLE();

        // handle metadata here
        this.statementId = statementId;
        this.paramDescs = new ColumnDefinition[numberOfParameters];
        this.columnDescs = new ColumnDefinition[numberOfColumns];

        if (numberOfParameters != 0) {
          processingIndex = 0;
          this.commandHandlerState = CommandHandlerState.HANDLING_PARAM_COLUMN_DEFINITION;
        } else if (numberOfColumns != 0) {
          processingIndex = 0;
          this.commandHandlerState = CommandHandlerState.HANDLING_COLUMN_COLUMN_DEFINITION;
        } else {
          handleReadyForQuery();
          resetIntermediaryResult();
        }
      }
      break;
    case HANDLING_PARAM_COLUMN_DEFINITION:
      paramDescs[processingIndex++] = decodeColumnDefinitionPacketPayload(payload);
      if (processingIndex == paramDescs.length) {
        if (isDeprecatingEofFlagEnabled()) {
          // we enabled the DEPRECATED_EOF flag and don't need to accept an EOF_Packet
          handleParamDefinitionsDecodingCompleted();
        } else {
          // we need to decode an EOF_Packet before handling rows, to be compatible with MySQL version below 5.7.5
          commandHandlerState = CommandHandlerState.PARAM_DEFINITIONS_DECODING_COMPLETED;
        }
      }
      break;
    case PARAM_DEFINITIONS_DECODING_COMPLETED:
      skipEofPacketIfNeeded(payload);
      handleParamDefinitionsDecodingCompleted();
      break;
    case HANDLING_COLUMN_COLUMN_DEFINITION:
      columnDescs[processingIndex++] = decodeColumnDefinitionPacketPayload(payload);
      if (processingIndex == columnDescs.length) {
        if (isDeprecatingEofFlagEnabled()) {
          // we enabled the DEPRECATED_EOF flag and don't need to accept an EOF_Packet
          handleColumnDefinitionsDecodingCompleted();
        } else {
          // we need to decode an EOF_Packet before handling rows, to be compatible with MySQL version below 5.7.5
          commandHandlerState = CommandHandlerState.COLUMN_DEFINITIONS_DECODING_COMPLETED;
        }
      }
      break;
    case COLUMN_DEFINITIONS_DECODING_COMPLETED:
      handleColumnDefinitionsDecodingCompleted();
      break;
  }
}
 
Example 18
Source File: SourceQueryPacketAssembler.java    From async-gamequery-lib with MIT License 4 votes vote down vote up
/**
 * Process split-packet data
 *
 * @param data
 *         The {@link ByteBuf} containing the split-packet data
 * @param allocator
 *         The {@link ByteBufAllocator} used to create/allocate pooled buffers
 *
 * @return Returns a non-null {@link ByteBuf} if the split-packets have been assembled. Null if the
 *
 * @throws Exception
 */
private ByteBuf processSplitPackets(ByteBuf data, ByteBufAllocator allocator, InetSocketAddress senderAddress) throws Exception {
    int packetCount, packetNumber, requestId, splitSize, packetChecksum = 0;
    boolean isCompressed;

    //Start processing
    requestId = data.readIntLE();
    //read the most significant bit is set
    isCompressed = ((requestId & 0x80000000) != 0);
    //The total number of packets in the response.
    packetCount = data.readByte();
    //The number of the packet. Starts at 0.
    packetNumber = data.readByte();

    //Create our key for this request (request id + sender ip)
    final SplitPacketKey key = new SplitPacketKey(requestId, senderAddress);

    log.debug("Processing split packet {}", key);

    log.debug("Split Packet Received = (AbstractRequest {}, Packet Number {}, Packet Count {}, Is Compressed: {})", requestId, packetNumber, packetCount, isCompressed);

    //Try to retrieve the split packet container for this request (if existing)
    //If request is not yet on the map, create and retrieve
    SplitPacketContainer splitPackets = this.requestMap.computeIfAbsent(key, k -> new SplitPacketContainer(packetCount));

    //As per protocol specs, the size is only present in the first packet of the response and only if the response is being compressed.
    //split size = Maximum size of packet before packet switching occurs. The default value is 1248 bytes (0x04E0
    if (isCompressed) {
        splitSize = data.readIntLE();
        packetChecksum = data.readIntLE();
    } else {
        splitSize = data.readShortLE();
    }

    //TODO: Handle compressed split packets
    int bufferSize = Math.min(splitSize, data.readableBytes());
    byte[] splitPacket = new byte[bufferSize];
    data.readBytes(splitPacket); //transfer the split data into this buffer

    //Add the split packet to the container
    splitPackets.addPacket(packetNumber, splitPacket);

    //Have we received all packets for this request?
    if (splitPackets.isComplete()) {
        log.debug("Split Packets have all been successfully received from AbstractRequest {}. Re-assembling packets.", requestId);

        //Retrieve total split packets received based on their length
        int packetSize = splitPackets.getPacketSize();
        //Allocate a new buffer to store the re-assembled packets
        final ByteBuf packetBuffer = allocator.buffer(packetSize);
        boolean done = false;
        try {
            //Start re-assembling split-packets from the container
            done = reassembleSplitPackets(splitPackets, packetBuffer, isCompressed, splitSize, packetChecksum);
        } catch (Exception e) {
            //If an error occurs during re-assembly, make sure we release the allocated buffer
            packetBuffer.release();
            throw e;
        } finally {
            if (done)
                requestMap.remove(key);
        }

        return packetBuffer;
    }

    //Return null, indicating that we still don't have a complete packet
    return null;
}
 
Example 19
Source File: DefinitionMetadataMessage.java    From r2dbc-mysql with Apache License 2.0 4 votes vote down vote up
private static DefinitionMetadataMessage decode41(ByteBuf buf, ConnectionContext context) {
    buf.skipBytes(4); // "def" which sized by var integer

    CharCollation collation = context.getClientCollation();
    Charset charset = collation.getCharset();
    String database = readVarIntSizedString(buf, charset);
    String table = readVarIntSizedString(buf, charset);
    String originTable = readVarIntSizedString(buf, charset);
    String column = readVarIntSizedString(buf, charset);
    String originColumn = readVarIntSizedString(buf, charset);

    VarIntUtils.readVarInt(buf); // skip constant 0x0c encoded by var integer

    int collationId = buf.readUnsignedShortLE();
    long size = buf.readUnsignedIntLE();
    short type = buf.readUnsignedByte();
    short definitions = buf.readShortLE();

    if (DataTypes.JSON == type && collationId == CharCollation.BINARY_ID) {
        collationId = collation.getId();
    }

    if ((definitions & ColumnDefinitions.SET) != 0) {
        // Maybe need to check if it is a string-like type?
        type = DataTypes.SET;
    } else if ((definitions & ColumnDefinitions.ENUMERABLE) != 0) {
        // Maybe need to check if it is a string-like type?
        type = DataTypes.ENUMERABLE;
    }

    return new DefinitionMetadataMessage(
        database,
        table,
        originTable,
        column,
        originColumn,
        collationId,
        size,
        type,
        definitions,
        buf.readUnsignedByte()
    );
}
 
Example 20
Source File: Eof41Message.java    From r2dbc-mysql with Apache License 2.0 4 votes vote down vote up
static Eof41Message decode(ByteBuf buf) {
    buf.skipBytes(1); // skip generic header 0xFE of EOF messages

    int warnings = buf.readUnsignedShortLE();
    return new Eof41Message(warnings, buf.readShortLE());
}