Java Code Examples for com.datastax.driver.core.ColumnMetadata#getName()

The following examples show how to use com.datastax.driver.core.ColumnMetadata#getName() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: CassandraDataHandler.java    From micro-integrator with Apache License 2.0 6 votes vote down vote up
private Map<String, Map<String, DataColumn>> generateMetaData() {
    Map<String, Map<String, DataColumn>> metadata = new HashMap<>();
    for (String tableName : this.tableList) {
        Map<String, DataColumn> dataColumnMap = new HashMap<>();
        for (ColumnMetadata columnMetadata : this.session.getCluster().getMetadata().getKeyspace(this.keyspace)
                                                         .getTable(tableName).getColumns()) {
            DataColumn dataColumn;
            if (this.primaryKeys.get(tableName).contains(columnMetadata.getName())) {
                dataColumn = new DataColumn(columnMetadata.getName(),
                                            getDataType(columnMetadata.getType().getName()), false);
            } else {
                dataColumn = new DataColumn(columnMetadata.getName(),
                                            getDataType(columnMetadata.getType().getName()), true);
            }
            dataColumnMap.put(dataColumn.getColumnName(), dataColumn);
        }
        metadata.put(tableName, dataColumnMap);
    }
    return metadata;
}
 
Example 2
Source File: SnapshotProcessor.java    From debezium-incubator with Apache License 2.0 6 votes vote down vote up
/**
 * This function extracts the relevant row data from {@link Row} and updates the maximum writetime for each row.
 */
private static RowData extractRowData(Row row, List<ColumnMetadata> columns, Set<String> partitionKeyNames, Set<String> clusteringKeyNames, Object executionTime) {
    RowData rowData = new RowData();

    for (ColumnMetadata columnMetadata : columns) {
        String name = columnMetadata.getName();
        Object value = readCol(row, name, columnMetadata);
        Object deletionTs = null;
        CellData.ColumnType type = getType(name, partitionKeyNames, clusteringKeyNames);

        if (type == CellData.ColumnType.REGULAR && value != null && !collectionTypes.contains(columnMetadata.getType().getName())) {
            Object ttl = readColTtl(row, name);
            if (ttl != null && executionTime != null) {
                deletionTs = calculateDeletionTs(executionTime, ttl);
            }
        }

        CellData cellData = new CellData(name, value, deletionTs, type);
        rowData.addCell(cellData);
    }

    return rowData;
}
 
Example 3
Source File: CassandraDeepJobConfig.java    From deep-spark with Apache License 2.0 6 votes vote down vote up
private void validateAdditionalFilters(TableMetadata tableMetadata) {
    for (Map.Entry<String, Serializable> entry : additionalFilters.entrySet()) {
        /* check if there's an index specified on the provided column */
        ColumnMetadata columnMetadata = tableMetadata.getColumn(entry.getKey());

        if (columnMetadata == null) {
            throw new DeepNoSuchFieldException("No column with name " + entry.getKey() + " has been found on " +
                    "table " + this.catalog + "." + this.table);
        }

        if (columnMetadata.getIndex() == null) {
            throw new DeepIndexNotFoundException("No index has been found on column " + columnMetadata.getName()
                    + " on table " + this.catalog + "." + this.table);
        }
    }
}
 
Example 4
Source File: CassandraTable.java    From ingestion with Apache License 2.0 6 votes vote down vote up
public Map<String, Object> parse(final Event event) {
  // translate to lowercase for ignorecase option
  final Map<String, String> headers = ignoreCase ? processHeadersIgnoreCase(event.getHeaders())
      : event.getHeaders();
  final int maxValues = Math.min(headers.size(), totalColumns);
  final Map<String, Object> result = new HashMap<String, Object>(maxValues);

  for (final ColumnMetadata column : columns) {
    final String columnName = ignoreCase ? column.getName().toLowerCase() : column.getName();

    if (headers.containsKey(columnName) && !columnName.equals(bodyColumn)) {
      result.put(columnName, parseValue(column.getType(), headers.get(columnName)));
    } else if (columnName.equals(bodyColumn)) {
      result.put(columnName, parseValue(column.getType(), new String(event.getBody(), Charsets.UTF_8)));
    }
  }

  return result;
}
 
Example 5
Source File: CommitLogReadHandlerImpl.java    From debezium-incubator with Apache License 2.0 5 votes vote down vote up
/**
 * Handle a valid deletion event resulted from a partition-level deletion by converting Cassandra representation
 * of this event into a {@link Record} object and queue the record to {@link ChangeEventQueue}. A valid deletion
 * event means a partition only has a single row, this implies there are no clustering keys.
 *
 * The steps are:
 *      (1) Populate the "source" field for this event
 *      (2) Fetch the cached key/value schemas from {@link SchemaHolder}
 *      (3) Populate the "after" field for this event
 *          a. populate partition columns
 *          b. populate regular columns with null values
 *      (4) Assemble a {@link Record} object from the populated data and queue the record
 */
private void handlePartitionDeletion(PartitionUpdate pu, OffsetPosition offsetPosition, KeyspaceTable keyspaceTable) {
    try {

        SchemaHolder.KeyValueSchema keyValueSchema = schemaHolder.getOrUpdateKeyValueSchema(keyspaceTable);
        Schema keySchema = keyValueSchema.keySchema();
        Schema valueSchema = keyValueSchema.valueSchema();

        RowData after = new RowData();

        populatePartitionColumns(after, pu);

        // For partition deletions, the PartitionUpdate only specifies the partition key, it does not
        // contains any info on regular (non-partition) columns, as if they were not modified. In order
        // to differentiate deleted columns from unmodified columns, we populate the deleted columns
        // with null value and timestamps
        TableMetadata tableMetadata = keyValueSchema.tableMetadata();
        List<ColumnMetadata> clusteringColumns = tableMetadata.getClusteringColumns();
        if (!clusteringColumns.isEmpty()) {
            throw new CassandraConnectorSchemaException("Uh-oh... clustering key should not exist for partition deletion");
        }
        List<ColumnMetadata> columns = tableMetadata.getColumns();
        columns.removeAll(tableMetadata.getPartitionKey());
        for (ColumnMetadata cm : columns) {
            String name = cm.getName();
            long deletionTs = pu.deletionInfo().getPartitionDeletion().markedForDeleteAt();
            CellData cellData = new CellData(name, null, deletionTs, CellData.ColumnType.REGULAR);
            after.addCell(cellData);
        }

        recordMaker.delete(DatabaseDescriptor.getClusterName(), offsetPosition, keyspaceTable, false,
                Conversions.toInstantFromMicros(pu.maxTimestamp()), after, keySchema, valueSchema,
                MARK_OFFSET, queue::enqueue);
    }
    catch (Exception e) {
        LOGGER.error("Fail to delete partition at {}. Reason: {}", offsetPosition, e);
    }
}
 
Example 6
Source File: DeepRecordReader.java    From deep-spark with Apache License 2.0 5 votes vote down vote up
/**
 * Retrieve the column name for the lucene indexes. Null if there is no lucene index.
 *
 * @return Lucene index; null, if doesn't exist.
 */
private String getLuceneIndex() {
    String indexName = "";

    TableMetadata tableMetadata = config.fetchTableMetadata();
    List<ColumnMetadata> columns = tableMetadata.getColumns();
    for (ColumnMetadata column : columns) {
        if (column.getIndex() != null) {
            if (column.getIndex().isCustomIndex()) {
                indexName = column.getName();
            }
        }
    }
    return indexName;
}
 
Example 7
Source File: DatastaxColumnKey.java    From SimpleFlatMapper with MIT License 4 votes vote down vote up
public static DatastaxColumnKey of(ColumnMetadata metaData, int column) {
	return new DatastaxColumnKey(metaData.getName(), column , metaData.getType());
}