Java Code Examples for com.datastax.driver.core.Row#isNull()

The following examples show how to use com.datastax.driver.core.Row#isNull() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: VoiceDAO.java    From arcusplatform with Apache License 2.0 6 votes vote down vote up
public Set<String> readAssistants(UUID placeId) {
   try(Timer.Context ctxt = readAssistantsTimer.time()) {
      ResultSet rs = session.execute(new BoundStatement(readAssistants).bind(placeId));
      Row r = rs.one();
      if(r == null) {
         return ImmutableSet.of();
      }

      // read repair google home
      Set<String> authorizations = new HashSet<>(r.getSet(Columns.voiceAssistants.name(), String.class));
      if(!r.isNull(Columns.googlehome.name()) && r.getBool(Columns.googlehome.name()) && !authorizations.contains(StartPlaceRequest.ASSISTANT_GOOGLE)) {
         recordAssistant(placeId, StartPlaceRequest.ASSISTANT_GOOGLE);
         authorizations.add(StartPlaceRequest.ASSISTANT_GOOGLE);
      }
      return authorizations;
   }
}
 
Example 2
Source File: BaseRuleEnvironmentDaoImpl.java    From arcusplatform with Apache License 2.0 6 votes vote down vote up
protected int nextId(UUID placeId) {
   Preconditions.checkNotNull(placeId, "Must specify a place id");

   Integer currentId;
   try(Context c = metrics.startCurrentSequenceTimer()) {
      BoundStatement bs = currentSequenceNumber.bind(placeId);
      Row row = session.execute( bs ).one();
      if(row == null) {
         // TODO not found exception
         throw new IllegalArgumentException("No place with id [" + placeId + "] exists");
      }
      if(row.isNull(sequenceName)) {
         currentId = null;
      }
      else {
         currentId = row.getInt(sequenceName);
      }
   }
   return nextId(placeId, currentId);
}
 
Example 3
Source File: CassandraPairingDeviceDao.java    From arcusplatform with Apache License 2.0 6 votes vote down vote up
@Override
protected PairingDevice toModel(Row row) {
   // strange side-effect of using a static column is that there is a special row with a null protocoladdress that holds static value when all other rows have been deleted
   if(row.isNull(PairingDeviceTable.Column.protocolAddress.name())) {
      return null;
   }

   Map<String, Object> attributes = decode( row.getMap(PairingDeviceTable.Column.attributes.name(), String.class, String.class) );
   // construct with attributes so those fields aren't marked as dirty
   // the existence of the mock attribute indicates this is a mock or not
   PairingDevice entity = attributes.containsKey(PairingDeviceMockCapability.ATTR_TARGETPRODUCTADDRESS) ? new PairingDeviceMock(attributes) : new PairingDevice(attributes);
   entity.setId( row.getUUID(PairingDeviceTable.Column.placeId.name()), row.getInt(PairingDeviceTable.Column.sequenceId.name()) );
   entity.setProtocolAddress( (DeviceProtocolAddress) Address.fromString( row.getString(PairingDeviceTable.Column.protocolAddress.name()) ) );
   entity.setModified( row.getTimestamp(PairingDeviceTable.Column.modified.name()) );
   entity.setCreated( row.getTimestamp(PairingDeviceTable.Column.created.name()) );
   entity.setPopulation(populationCacheMgr.getPopulationByPlaceId(entity.getPlaceId()));
   return entity;
}
 
Example 4
Source File: AggregateDaoImpl.java    From glowroot with Apache License 2.0 6 votes vote down vote up
private ListenableFuture<?> rollupQueriesFromRows(RollupParams rollup, AggregateQuery query,
        Iterable<Row> rows) throws Exception {
    QueryCollector collector =
            new QueryCollector(rollup.maxQueryAggregatesPerTransactionAggregate());
    for (Row row : rows) {
        int i = 0;
        String queryType = checkNotNull(row.getString(i++));
        String truncatedText = checkNotNull(row.getString(i++));
        // full_query_text_sha1 cannot be null since it is used in clustering key
        String fullTextSha1 = Strings.emptyToNull(row.getString(i++));
        double totalDurationNanos = row.getDouble(i++);
        long executionCount = row.getLong(i++);
        boolean hasTotalRows = !row.isNull(i);
        long totalRows = row.getLong(i++);
        collector.mergeQuery(queryType, truncatedText, fullTextSha1, totalDurationNanos,
                executionCount, hasTotalRows, totalRows);
    }
    return insertQueries(collector.getSortedAndTruncatedQueries(), rollup.rollupLevel(),
            rollup.agentRollupId(), query.transactionType(), query.transactionName(),
            query.to(), rollup.adjustedTTL());
}
 
Example 5
Source File: AggregateDaoImpl.java    From glowroot with Apache License 2.0 6 votes vote down vote up
@Override
public List<ThroughputAggregate> readThroughputAggregates(String agentRollupId,
        AggregateQuery query) throws Exception {
    ResultSet results = executeQuery(agentRollupId, query, throughputTable);
    List<ThroughputAggregate> throughputAggregates = new ArrayList<>();
    for (Row row : results) {
        int i = 0;
        long captureTime = checkNotNull(row.getTimestamp(i++)).getTime();
        long transactionCount = row.getLong(i++);
        boolean hasErrorCount = !row.isNull(i);
        long errorCount = row.getLong(i++);
        throughputAggregates.add(ImmutableThroughputAggregate.builder()
                .captureTime(captureTime)
                .transactionCount(transactionCount)
                .errorCount(hasErrorCount ? errorCount : null)
                .build());
    }
    return throughputAggregates;
}
 
Example 6
Source File: AggregateDaoImpl.java    From glowroot with Apache License 2.0 6 votes vote down vote up
@Override
public void mergeQueriesInto(String agentRollupId, AggregateQuery query,
        QueryCollector collector) throws Exception {
    ResultSet results = executeQuery(agentRollupId, query, queryTable);
    long captureTime = Long.MIN_VALUE;
    for (Row row : results) {
        int i = 0;
        captureTime = Math.max(captureTime, checkNotNull(row.getTimestamp(i++)).getTime());
        String queryType = checkNotNull(row.getString(i++));
        String truncatedText = checkNotNull(row.getString(i++));
        // full_query_text_sha1 cannot be null since it is used in clustering key
        String fullTextSha1 = Strings.emptyToNull(row.getString(i++));
        double totalDurationNanos = row.getDouble(i++);
        long executionCount = row.getLong(i++);
        boolean hasTotalRows = !row.isNull(i);
        long totalRows = row.getLong(i++);
        collector.mergeQuery(queryType, truncatedText, fullTextSha1, totalDurationNanos,
                executionCount, hasTotalRows, totalRows);
        collector.updateLastCaptureTime(captureTime);
    }
}
 
Example 7
Source File: SchemaUpgrade.java    From glowroot with Apache License 2.0 5 votes vote down vote up
private void removeInvalidAgentRollupRows() throws Exception {
    ResultSet results =
            session.read("select agent_rollup_id, agent from agent_rollup");
    PreparedStatement deletePS =
            session.prepare("delete from agent_rollup where one = 1 and agent_rollup_id = ?");
    for (Row row : results) {
        if (row.isNull(1)) {
            BoundStatement boundStatement = deletePS.bind();
            boundStatement.setString(0, checkNotNull(row.getString(0)));
            session.write(boundStatement);
        }
    }
}
 
Example 8
Source File: CassandraAlarmIncidentDAO.java    From arcusplatform with Apache License 2.0 5 votes vote down vote up
private AlarmIncident buildIncident(Row r) {
   if(r == null) {
      return null;
   }
   try {
      List<TrackerEvent> events = r.getList(Column.tracker.name(), String.class).stream().map((s) -> new TrackerEvent(JSON.fromJson(s, trackerMarker))).collect(Collectors.toList());
      AlarmIncident.Builder builder = AlarmIncident.builder()
         .withAlert(AlertType.valueOf(r.getString(Column.alert.name())))
         .withMonitoringState(AlarmIncident.MonitoringState.valueOf(r.getString(Column.monitoringstate.name())))
         .withId(r.getUUID(Column.incidentid.name()))
         .withAlertState(AlarmIncident.AlertState.valueOf(r.getString(Column.alertstate.name())))
         .withPrealertEndTime(r.getTimestamp(Column.prealertendtime.name()))
         .withEndTime(r.getTimestamp(Column.endtime.name()))
         .withPlaceId(r.getUUID(Column.placeid.name()))
         .withMonitored(r.getBool(Column.monitored.name()))
         .withMockIncident(r.getBool(Column.mockincident.name()))
         .addActiveAlertIds(r.getSet(Column.activealerts.name(), UUID.class))
         .addAdditionalAlerts(r.getSet(Column.additionalalerts.name(), String.class).stream().map(AlertType::valueOf).collect(Collectors.toSet()))
         .addTrackerEvents(events)
         .withConfirmed(r.getBool(Column.confirmed.name()));
      
      if(!r.isNull(Column.cancelledby.name())) {
         builder.withCancelledBy(Address.fromString(r.getString(Column.cancelledby.name())));
      }
      if(!r.isNull(Column.platformstate.name())) {
         builder.withPlatformAlertState(AlarmIncident.AlertState.valueOf(r.getString(Column.platformstate.name())));
      }
      if(!r.isNull(Column.hubstate.name())) {
         builder.withHubAlertState(AlarmIncident.AlertState.valueOf(r.getString(Column.hubstate.name())));
      }

      return builder.build();
   }
   catch(Exception e) {
      logger.warn("Invalid row [{}]", r, e);
      return null;
   }
}
 
Example 9
Source File: HubRegistrationDAOImpl.java    From arcusplatform with Apache License 2.0 5 votes vote down vote up
@Override
protected void populateEntity(Row row, HubRegistration entity) {
	entity.setLastConnected(row.getTimestamp(EntityColumns.LAST_CONNECTED));
	if(!row.isNull(EntityColumns.STATE)) {
		entity.setState(HubRegistration.RegistrationState.valueOf(row.getString(EntityColumns.STATE)));
	}	      
    entity.setUpgradeRequestTime(row.getTimestamp(EntityColumns.UPGRADE_REQUEST_TIME));
    entity.setFirmwareVersion(row.getString(EntityColumns.FIRMWARE_VERSION));
    entity.setTargetVersion(row.getString(EntityColumns.TARGET_VERSION));
    entity.setUpgradeErrorCode(row.getString(EntityColumns.UPGRADE_ERROR_CODE));
    entity.setUpgradeErrorMessage(row.getString(EntityColumns.UPGRADE_ERROR_MSG));
    entity.setDownloadProgress(row.getInt(EntityColumns.DOWNLOAD_PROGRESS));
    entity.setUpgradeErrorTime(row.getTimestamp(EntityColumns.UPGRADE_ERROR_TIME));
}
 
Example 10
Source File: PlacePurgeRecordingTable.java    From arcusplatform with Apache License 2.0 5 votes vote down vote up
public PlacePurgeRecord buildEntity(Row row) {
	PurgeMode mode = PurgeMode.ALL;
	if(!row.isNull(COL_MODE)) {
		mode = PurgeMode.valueOf(row.getString(COL_MODE));
	}
	return new PlacePurgeRecord(row.getUUID(COL_PLACEID), row.getTimestamp(COL_DELETE_TIME), mode);
	
}
 
Example 11
Source File: AggregateDaoImpl.java    From glowroot with Apache License 2.0 5 votes vote down vote up
private ListenableFuture<?> rollupThroughputFromRows(RollupParams rollup, AggregateQuery query,
        Iterable<Row> rows) throws Exception {
    long transactionCount = 0;
    // error_count is null for data inserted prior to glowroot central 0.9.18
    // rolling up any interval with null error_count should result in null error_count
    boolean hasMissingErrorCount = false;
    long errorCount = 0;
    for (Row row : rows) {
        transactionCount += row.getLong(0);
        if (row.isNull(1)) {
            hasMissingErrorCount = true;
        } else {
            errorCount += row.getLong(1);
        }
    }
    BoundStatement boundStatement;
    if (query.transactionName() == null) {
        boundStatement = getInsertOverallPS(throughputTable, rollup.rollupLevel()).bind();
    } else {
        boundStatement = getInsertTransactionPS(throughputTable, rollup.rollupLevel()).bind();
    }
    int i = 0;
    boundStatement.setString(i++, rollup.agentRollupId());
    boundStatement.setString(i++, query.transactionType());
    if (query.transactionName() != null) {
        boundStatement.setString(i++, query.transactionName());
    }
    boundStatement.setTimestamp(i++, new Date(query.to()));
    boundStatement.setLong(i++, transactionCount);
    if (hasMissingErrorCount) {
        boundStatement.setToNull(i++);
    } else {
        boundStatement.setLong(i++, errorCount);
    }
    boundStatement.setInt(i++, rollup.adjustedTTL().generalTTL());
    return session.writeAsync(boundStatement);
}
 
Example 12
Source File: AbstractPlaceRecordingIndexV2Table.java    From arcusplatform with Apache License 2.0 4 votes vote down vote up
public boolean isCompleteRecording(Row row) {
	return !row.isNull(COL_SIZE);
}
 
Example 13
Source File: QueryCassandra.java    From nifi with Apache License 2.0 4 votes vote down vote up
/**
 * Converts a result set into an Avro record and writes it to the given stream.
 *
 * @param rs        The result set to convert
 * @param outStream The stream to which the Avro record will be written
 * @param timeout   The max number of timeUnits to wait for a result set fetch to complete
 * @param timeUnit  The unit of time (SECONDS, e.g.) associated with the timeout amount
 * @return The number of rows from the result set written to the stream
 * @throws IOException          If the Avro record cannot be written
 * @throws InterruptedException If a result set fetch is interrupted
 * @throws TimeoutException     If a result set fetch has taken longer than the specified timeout
 * @throws ExecutionException   If any error occurs during the result set fetch
 */
public static long convertToAvroStream(final ResultSet rs, final OutputStream outStream,
                                       long timeout, TimeUnit timeUnit)
        throws IOException, InterruptedException, TimeoutException, ExecutionException {

    final Schema schema = createSchema(rs);
    final GenericRecord rec = new GenericData.Record(schema);

    final DatumWriter<GenericRecord> datumWriter = new GenericDatumWriter<>(schema);
    try (final DataFileWriter<GenericRecord> dataFileWriter = new DataFileWriter<>(datumWriter)) {
        dataFileWriter.create(schema, outStream);

        final ColumnDefinitions columnDefinitions = rs.getColumnDefinitions();
        long nrOfRows = 0;
        if (columnDefinitions != null) {
            do {

                // Grab the ones we have
                int rowsAvailableWithoutFetching = rs.getAvailableWithoutFetching();
                if (rowsAvailableWithoutFetching == 0) {
                    // Get more
                    if (timeout <= 0 || timeUnit == null) {
                        rs.fetchMoreResults().get();
                    } else {
                        rs.fetchMoreResults().get(timeout, timeUnit);
                    }
                }

                for (Row row : rs) {

                    for (int i = 0; i < columnDefinitions.size(); i++) {
                        final DataType dataType = columnDefinitions.getType(i);

                        if (row.isNull(i)) {
                            rec.put(i, null);
                        } else {
                            rec.put(i, getCassandraObject(row, i, dataType));
                        }
                    }
                    dataFileWriter.append(rec);
                    nrOfRows += 1;

                }
            } while (!rs.isFullyFetched());
        }
        return nrOfRows;
    }
}
 
Example 14
Source File: TraceDaoImpl.java    From glowroot with Apache License 2.0 4 votes vote down vote up
private List<Trace.Entry> readEntriesUsingPS(String agentId, String traceId,
        PreparedStatement readPS) throws Exception {
    BoundStatement boundStatement = readPS.bind();
    boundStatement.setString(0, agentId);
    boundStatement.setString(1, traceId);
    ResultSet results = session.read(boundStatement);
    List<Trace.Entry> entries = new ArrayList<>();
    while (!results.isExhausted()) {
        Row row = results.one();
        int i = 0;
        Trace.Entry.Builder entry = Trace.Entry.newBuilder()
                .setDepth(row.getInt(i++))
                .setStartOffsetNanos(row.getLong(i++))
                .setDurationNanos(row.getLong(i++))
                .setActive(row.getBool(i++));
        if (row.isNull(i + 1)) { // shared_query_text_index
            // message is null for trace entries added using addErrorEntry()
            entry.setMessage(Strings.nullToEmpty(row.getString(i++)));
            i++; // shared_query_text_index
            i++; // query_message_prefix
            i++; // query_message_suffix
        } else {
            i++; // message
            Trace.QueryEntryMessage queryEntryMessage = Trace.QueryEntryMessage.newBuilder()
                    .setSharedQueryTextIndex(row.getInt(i++))
                    .setPrefix(Strings.nullToEmpty(row.getString(i++)))
                    .setSuffix(Strings.nullToEmpty(row.getString(i++)))
                    .build();
            entry.setQueryEntryMessage(queryEntryMessage);
        }
        ByteBuffer detailBytes = row.getBytes(i++);
        if (detailBytes != null) {
            entry.addAllDetailEntry(
                    Messages.parseDelimitedFrom(detailBytes, Trace.DetailEntry.parser()));
        }
        ByteBuffer locationBytes = row.getBytes(i++);
        if (locationBytes != null) {
            entry.addAllLocationStackTraceElement(Messages.parseDelimitedFrom(locationBytes,
                    Proto.StackTraceElement.parser()));
        }
        ByteBuffer errorBytes = row.getBytes(i++);
        if (errorBytes != null) {
            entry.setError(Trace.Error.parseFrom(errorBytes));
        }
        entries.add(entry.build());
    }
    return entries;
}
 
Example 15
Source File: QueryCassandra.java    From localization_nifi with Apache License 2.0 4 votes vote down vote up
/**
 * Converts a result set into an Avro record and writes it to the given stream.
 *
 * @param rs        The result set to convert
 * @param outStream The stream to which the Avro record will be written
 * @param timeout   The max number of timeUnits to wait for a result set fetch to complete
 * @param timeUnit  The unit of time (SECONDS, e.g.) associated with the timeout amount
 * @return The number of rows from the result set written to the stream
 * @throws IOException          If the Avro record cannot be written
 * @throws InterruptedException If a result set fetch is interrupted
 * @throws TimeoutException     If a result set fetch has taken longer than the specified timeout
 * @throws ExecutionException   If any error occurs during the result set fetch
 */
public static long convertToAvroStream(final ResultSet rs, final OutputStream outStream,
                                       long timeout, TimeUnit timeUnit)
        throws IOException, InterruptedException, TimeoutException, ExecutionException {

    final Schema schema = createSchema(rs);
    final GenericRecord rec = new GenericData.Record(schema);

    final DatumWriter<GenericRecord> datumWriter = new GenericDatumWriter<>(schema);
    try (final DataFileWriter<GenericRecord> dataFileWriter = new DataFileWriter<>(datumWriter)) {
        dataFileWriter.create(schema, outStream);

        final ColumnDefinitions columnDefinitions = rs.getColumnDefinitions();
        long nrOfRows = 0;
        if (columnDefinitions != null) {
            do {

                // Grab the ones we have
                int rowsAvailableWithoutFetching = rs.getAvailableWithoutFetching();
                if (rowsAvailableWithoutFetching == 0) {
                    // Get more
                    if (timeout <= 0 || timeUnit == null) {
                        rs.fetchMoreResults().get();
                    } else {
                        rs.fetchMoreResults().get(timeout, timeUnit);
                    }
                }

                for (Row row : rs) {

                    for (int i = 0; i < columnDefinitions.size(); i++) {
                        final DataType dataType = columnDefinitions.getType(i);

                        if (row.isNull(i)) {
                            rec.put(i, null);
                        } else {
                            rec.put(i, getCassandraObject(row, i, dataType));
                        }
                    }
                    dataFileWriter.append(rec);
                    nrOfRows += 1;

                }
            } while (!rs.isFullyFetched());
        }
        return nrOfRows;
    }
}
 
Example 16
Source File: CassandraQuery.java    From micro-integrator with Apache License 2.0 4 votes vote down vote up
private DataEntry getDataEntryFromRow(Row row, ColumnDefinitions defs) throws DataServiceFault {
    boolean useColumnNumbers = this.isUsingColumnNumbers();
    DataType columnType;
    DataEntry entry = new DataEntry();
    ParamValue paramValue = null;
    for (int i = 0; i < defs.size(); i++) {
        columnType = defs.getType(i);
        if (columnType.getName().equals(DataType.Name.ASCII)) {
            paramValue = new ParamValue(row.getString(i));
        } else if (columnType.getName().equals(DataType.Name.VARCHAR)) {
            paramValue = new ParamValue(row.getString(i));
        } else if (columnType.getName().equals(DataType.Name.TEXT)) {
            paramValue = new ParamValue(row.getString(i));
        } else if (columnType.getName().equals(DataType.Name.BIGINT)) {
            paramValue = new ParamValue(row.isNull(i) ? null : Long.toString(row.getLong(i)));
        } else if (columnType.getName().equals(DataType.Name.BLOB)) {
            paramValue = new ParamValue(this.base64EncodeByteBuffer(row.getBytes(i)));
        } else if (columnType.getName().equals(DataType.Name.BOOLEAN)) {
            paramValue = new ParamValue(row.isNull(i) ? null : Boolean.toString(row.getBool(i)));
        } else if (columnType.getName().equals(DataType.Name.COUNTER)) {
            paramValue = new ParamValue(row.isNull(i) ? null : Long.toString(row.getLong(i)));
        } else if (columnType.getName().equals(DataType.Name.CUSTOM)) {
            paramValue = new ParamValue(this.base64EncodeByteBuffer(row.getBytes(i)));
        } else if (columnType.getName().equals(DataType.Name.DECIMAL)) {
            paramValue = new ParamValue(row.isNull(i) ? null : row.getDecimal(i).toString());
        } else if (columnType.getName().equals(DataType.Name.DOUBLE)) {
            paramValue = new ParamValue(row.isNull(i) ? null : Double.toString(row.getDouble(i)));
        } else if (columnType.getName().equals(DataType.Name.FLOAT)) {
            paramValue = new ParamValue(row.isNull(i) ? null : Float.toString(row.getFloat(i)));
        } else if (columnType.getName().equals(DataType.Name.INET)) {
            paramValue = new ParamValue(row.isNull(i) ? null :row.getInet(i).toString());
        } else if (columnType.getName().equals(DataType.Name.INT)) {
            paramValue = new ParamValue(row.isNull(i) ? null : Integer.toString(row.getInt(i)));
        } else if (columnType.getName().equals(DataType.Name.LIST)) {
            paramValue = new ParamValue(row.isNull(i) ? null : Arrays.toString(row.getList(i, Object.class)
                                                                                  .toArray()));
        } else if (columnType.getName().equals(DataType.Name.MAP)) {
            paramValue = new ParamValue(row.getMap(i, Object.class, Object.class).toString());
        } else if (columnType.getName().equals(DataType.Name.SET)) {
            paramValue = new ParamValue(row.getSet(i, Object.class).toString());
        } else if (columnType.getName().equals(DataType.Name.TIMESTAMP)) {
            paramValue = new ParamValue(row.isNull(i) ? null : ConverterUtil.convertToString(row.getTimestamp(i)));
        } else if (columnType.getName().equals(DataType.Name.TIMEUUID)) {
            paramValue = new ParamValue(row.getUUID(i).toString());
        } else if (columnType.getName().equals(DataType.Name.UUID)) {
            paramValue = new ParamValue(row.getUUID(i).toString());
        } else if (columnType.getName().equals(DataType.Name.VARINT)) {
            paramValue = new ParamValue(row.getVarint(i).toString());
        } else if (columnType.getName().equals(DataType.Name.UDT)) {
            paramValue = new ParamValue(row.isNull(i) ? null : ConverterUtil.convertToString(row.getUDTValue(i)));
        } else if (columnType.getName().equals(DataType.Name.TUPLE)) {
            paramValue = new ParamValue(row.isNull(i) ? null : ConverterUtil.convertToString(row.getTupleValue(i)));
        } else if (columnType.getName().equals(DataType.Name.TINYINT)) {
            paramValue = new ParamValue(row.isNull(i) ? null : Integer.toString(row.getByte(i)));
        } else if (columnType.getName().equals(DataType.Name.SMALLINT)) {
            paramValue = new ParamValue(row.isNull(i) ? null : Integer.toString(row.getShort(i)));
        } else if (columnType.getName().equals(DataType.Name.TIME)) {
            paramValue = new ParamValue(row.isNull(i) ? null : ConverterUtil.convertToString(row.getTime(i)));
        } else if (columnType.getName().equals(DataType.Name.DATE)) {
            paramValue = new ParamValue(row.isNull(i) ? null : ConverterUtil.convertToString(row.getDate(i)));
        }
        entry.addValue(useColumnNumbers ? Integer.toString(i) : defs.getName(i), paramValue);
    }
    return entry;
}
 
Example 17
Source File: CassandraType.java    From presto with Apache License 2.0 4 votes vote down vote up
public String getColumnValueForCql(Row row, int position)
{
    if (row.isNull(position)) {
        return null;
    }

    switch (this) {
        case ASCII:
        case TEXT:
        case VARCHAR:
            return quoteStringLiteral(row.getString(position));
        case INT:
            return Integer.toString(row.getInt(position));
        case SMALLINT:
            return Short.toString(row.getShort(position));
        case TINYINT:
            return Byte.toString(row.getByte(position));
        case BIGINT:
        case COUNTER:
            return Long.toString(row.getLong(position));
        case BOOLEAN:
            return Boolean.toString(row.getBool(position));
        case DOUBLE:
            return Double.toString(row.getDouble(position));
        case FLOAT:
            return Float.toString(row.getFloat(position));
        case DECIMAL:
            return row.getDecimal(position).toString();
        case UUID:
        case TIMEUUID:
            return row.getUUID(position).toString();
        case TIMESTAMP:
            return Long.toString(row.getTimestamp(position).getTime());
        case DATE:
            return row.getDate(position).toString();
        case INET:
            return quoteStringLiteral(toAddrString(row.getInet(position)));
        case VARINT:
            return row.getVarint(position).toString();
        case BLOB:
        case CUSTOM:
            return Bytes.toHexString(row.getBytesUnsafe(position));
        default:
            throw new IllegalStateException("Handling of type " + this + " is not implemented");
    }
}
 
Example 18
Source File: CassandraType.java    From presto with Apache License 2.0 4 votes vote down vote up
public NullableValue getColumnValue(Row row, int position)
{
    if (row.isNull(position)) {
        return NullableValue.asNull(prestoType);
    }

    switch (this) {
        case ASCII:
        case TEXT:
        case VARCHAR:
            return NullableValue.of(prestoType, utf8Slice(row.getString(position)));
        case INT:
            return NullableValue.of(prestoType, (long) row.getInt(position));
        case SMALLINT:
            return NullableValue.of(prestoType, (long) row.getShort(position));
        case TINYINT:
            return NullableValue.of(prestoType, (long) row.getByte(position));
        case BIGINT:
        case COUNTER:
            return NullableValue.of(prestoType, row.getLong(position));
        case BOOLEAN:
            return NullableValue.of(prestoType, row.getBool(position));
        case DOUBLE:
            return NullableValue.of(prestoType, row.getDouble(position));
        case FLOAT:
            return NullableValue.of(prestoType, (long) floatToRawIntBits(row.getFloat(position)));
        case DECIMAL:
            return NullableValue.of(prestoType, row.getDecimal(position).doubleValue());
        case UUID:
        case TIMEUUID:
            return NullableValue.of(prestoType, utf8Slice(row.getUUID(position).toString()));
        case TIMESTAMP:
            return NullableValue.of(prestoType, row.getTimestamp(position).getTime());
        case DATE:
            return NullableValue.of(prestoType, (long) row.getDate(position).getDaysSinceEpoch());
        case INET:
            return NullableValue.of(prestoType, utf8Slice(toAddrString(row.getInet(position))));
        case VARINT:
            return NullableValue.of(prestoType, utf8Slice(row.getVarint(position).toString()));
        case BLOB:
        case CUSTOM:
            return NullableValue.of(prestoType, wrappedBuffer(row.getBytesUnsafe(position)));
        case SET:
        case LIST:
            return NullableValue.of(prestoType, utf8Slice(buildArrayValue(row, position)));
        case MAP:
            return NullableValue.of(prestoType, utf8Slice(buildMapValue(row, position)));
        default:
            throw new IllegalStateException("Handling of type " + this + " is not implemented");
    }
}
 
Example 19
Source File: CassandraDataHandler.java    From micro-integrator with Apache License 2.0 4 votes vote down vote up
/**
 * This method wraps result set data in to DataEntry and creates a list of DataEntry.
 *
 * @param tableName         Table Name
 * @param row               Row
 * @param columnDefinitions Column Definition
 * @return DataEntry
 * @throws ODataServiceFault
 */
private ODataEntry createDataEntryFromRow(String tableName, Row row, ColumnDefinitions columnDefinitions)
        throws ODataServiceFault {
    String paramValue;
    ODataEntry entry = new ODataEntry();
    //Creating a unique string to represent the
    try {
        for (int i = 0; i < columnDefinitions.size(); i++) {
            String columnName = columnDefinitions.getName(i);
            DataType columnType = columnDefinitions.getType(i);

            switch (columnType.getName()) {
                case ASCII:
                    paramValue = row.getString(i);
                    break;
                case BIGINT:
                    paramValue = row.isNull(i) ? null : ConverterUtil.convertToString(row.getLong(i));
                    break;
                case BLOB:
                    paramValue = this.base64EncodeByteBuffer(row.getBytes(i));
                    break;
                case BOOLEAN:
                    paramValue = row.isNull(i) ? null : ConverterUtil.convertToString(row.getBool(i));
                    break;
                case COUNTER:
                    paramValue = row.isNull(i) ? null : ConverterUtil.convertToString(row.getLong(i));
                    break;
                case DECIMAL:
                    paramValue = row.isNull(i) ? null : ConverterUtil.convertToString(row.getDecimal(i));
                    break;
                case DOUBLE:
                    paramValue = row.isNull(i) ? null : ConverterUtil.convertToString(row.getDouble(i));
                    break;
                case FLOAT:
                    paramValue = row.isNull(i) ? null : ConverterUtil.convertToString(row.getFloat(i));
                    break;
                case INET:
                    paramValue = row.getInet(i).toString();
                    break;
                case INT:
                    paramValue = row.isNull(i) ? null : ConverterUtil.convertToString(row.getInt(i));
                    break;
                case TEXT:
                    paramValue = row.getString(i);
                    break;
                case TIMESTAMP:
                    paramValue = row.isNull(i) ? null : ConverterUtil.convertToString(row.getDate(i));
                    break;
                case UUID:
                    paramValue = row.isNull(i) ? null : ConverterUtil.convertToString(row.getUUID(i));
                    break;
                case VARCHAR:
                    paramValue = row.getString(i);
                    break;
                case VARINT:
                    paramValue = row.isNull(i) ? null : ConverterUtil.convertToString(row.getVarint(i));
                    break;
                case TIMEUUID:
                    paramValue = row.isNull(i) ? null : ConverterUtil.convertToString(row.getUUID(i));
                    break;
                case LIST:
                    paramValue = row.isNull(i) ? null : Arrays.toString(row.getList(i, Object.class).toArray());
                    break;
                case SET:
                    paramValue = row.isNull(i) ? null : row.getSet(i, Object.class).toString();
                    break;
                case MAP:
                    paramValue = row.isNull(i) ? null : row.getMap(i, Object.class, Object.class).toString();
                    break;
                case UDT:
                    paramValue = row.isNull(i) ? null : row.getUDTValue(i).toString();
                    break;
                case TUPLE:
                    paramValue = row.isNull(i) ? null : row.getTupleValue(i).toString();
                    break;
                case CUSTOM:
                    paramValue = row.isNull(i) ? null : this.base64EncodeByteBuffer(row.getBytes(i));
                    break;
                default:
                    paramValue = row.getString(i);
                    break;
            }
            entry.addValue(columnName, paramValue);
        }
    } catch (DataServiceFault e) {
        throw new ODataServiceFault(e, "Error occurred when creating OData entry. :" + e.getMessage());
    }
    //Set E-Tag to the entity
    entry.addValue("ETag", ODataUtils.generateETag(this.configID, tableName, entry));
    return entry;
}