Java Code Examples for com.datastax.driver.core.TableMetadata#getColumns()

The following examples show how to use com.datastax.driver.core.TableMetadata#getColumns() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: CassandraConnectorTableService.java    From metacat with Apache License 2.0 6 votes vote down vote up
private TableInfo getTableInfo(
    @Nonnull @NonNull final QualifiedName name,
    @Nonnull @NonNull final TableMetadata tableMetadata
) {
    final ImmutableList.Builder<FieldInfo> fieldInfoBuilder = ImmutableList.builder();
    // TODO: Ignores clustering, primary key, index, etc columns. We need to rework TableInfo to support
    for (final ColumnMetadata column : tableMetadata.getColumns()) {
        final String dataType = column.getType().toString();
        fieldInfoBuilder.add(
            FieldInfo.builder()
                .name(column.getName())
                .sourceType(dataType)
                .type(this.typeConverter.toMetacatType(dataType))
                .build()
        );
    }
    return TableInfo.builder()
        .name(QualifiedName.ofTable(name.getCatalogName(), name.getDatabaseName(), tableMetadata.getName()))
        .fields(fieldInfoBuilder.build())
        .build();
}
 
Example 2
Source File: DatastaxCrudFactory.java    From SimpleFlatMapper with MIT License 6 votes vote down vote up
private static String insertQuery(TableMetadata tableMetadata, String... options) {
    Insert insert = QueryBuilder.insertInto(tableMetadata);

    if (options != null) {
        Insert.Options using = insert.using();
        for (String option : options) {
            if ("TTL".equals(option)) {
                using.and(QueryBuilder.ttl(QueryBuilder.bindMarker()));
            } else {
                using.and(QueryBuilder.timestamp(QueryBuilder.bindMarker()));
            }
        }
    }

    List<ColumnMetadata> columns = tableMetadata.getColumns();

    for(ColumnMetadata column : columns) {
        insert.value(column.getName(), QueryBuilder.bindMarker());
    }

    return insert.toString();
}
 
Example 3
Source File: CassandraTable.java    From ingestion with Apache License 2.0 6 votes vote down vote up
public CassandraTable(
    final Session session,
    final TableMetadata table,
    final ConsistencyLevel consistencyLevel,
    final String bodyColumn,
    final boolean ignoreCase) {
  this.session = session;
  this.table = table;
  this.consistencyLevel = consistencyLevel;
  this.bodyColumn = bodyColumn;

  this.columns = table.getColumns();
  this.totalColumns = this.columns.size();
  this.primaryKeys = new ArrayList<String>();
  for (final ColumnMetadata column : table.getPrimaryKey()) {
    primaryKeys.add(column.getName());
  }

  this.ignoreCase = ignoreCase;
}
 
Example 4
Source File: CommitLogReadHandlerImpl.java    From debezium-incubator with Apache License 2.0 5 votes vote down vote up
/**
 * Handle a valid deletion event resulted from a partition-level deletion by converting Cassandra representation
 * of this event into a {@link Record} object and queue the record to {@link ChangeEventQueue}. A valid deletion
 * event means a partition only has a single row, this implies there are no clustering keys.
 *
 * The steps are:
 *      (1) Populate the "source" field for this event
 *      (2) Fetch the cached key/value schemas from {@link SchemaHolder}
 *      (3) Populate the "after" field for this event
 *          a. populate partition columns
 *          b. populate regular columns with null values
 *      (4) Assemble a {@link Record} object from the populated data and queue the record
 */
private void handlePartitionDeletion(PartitionUpdate pu, OffsetPosition offsetPosition, KeyspaceTable keyspaceTable) {
    try {

        SchemaHolder.KeyValueSchema keyValueSchema = schemaHolder.getOrUpdateKeyValueSchema(keyspaceTable);
        Schema keySchema = keyValueSchema.keySchema();
        Schema valueSchema = keyValueSchema.valueSchema();

        RowData after = new RowData();

        populatePartitionColumns(after, pu);

        // For partition deletions, the PartitionUpdate only specifies the partition key, it does not
        // contains any info on regular (non-partition) columns, as if they were not modified. In order
        // to differentiate deleted columns from unmodified columns, we populate the deleted columns
        // with null value and timestamps
        TableMetadata tableMetadata = keyValueSchema.tableMetadata();
        List<ColumnMetadata> clusteringColumns = tableMetadata.getClusteringColumns();
        if (!clusteringColumns.isEmpty()) {
            throw new CassandraConnectorSchemaException("Uh-oh... clustering key should not exist for partition deletion");
        }
        List<ColumnMetadata> columns = tableMetadata.getColumns();
        columns.removeAll(tableMetadata.getPartitionKey());
        for (ColumnMetadata cm : columns) {
            String name = cm.getName();
            long deletionTs = pu.deletionInfo().getPartitionDeletion().markedForDeleteAt();
            CellData cellData = new CellData(name, null, deletionTs, CellData.ColumnType.REGULAR);
            after.addCell(cellData);
        }

        recordMaker.delete(DatabaseDescriptor.getClusterName(), offsetPosition, keyspaceTable, false,
                Conversions.toInstantFromMicros(pu.maxTimestamp()), after, keySchema, valueSchema,
                MARK_OFFSET, queue::enqueue);
    }
    catch (Exception e) {
        LOGGER.error("Fail to delete partition at {}. Reason: {}", offsetPosition, e);
    }
}
 
Example 5
Source File: RowData.java    From debezium-incubator with Apache License 2.0 5 votes vote down vote up
/**
 * Assemble the Kafka connect {@link Schema} for the "after" field of the change event
 * based on the Cassandra table schema.
 * @param tm metadata of a table that contains the Cassandra table schema
 * @return a schema for the "after" field of a change event
 */
static Schema rowSchema(TableMetadata tm) {
    SchemaBuilder schemaBuilder = SchemaBuilder.struct().name(Record.AFTER);
    for (ColumnMetadata cm : tm.getColumns()) {
        Schema optionalCellSchema = CellData.cellSchema(cm, true);
        if (optionalCellSchema != null) {
            schemaBuilder.field(cm.getName(), optionalCellSchema);
        }
    }
    return schemaBuilder.build();
}
 
Example 6
Source File: AbstractUpsertOutputOperator.java    From attic-apex-malhar with Apache License 2.0 5 votes vote down vote up
private void registerNonPKColumnDefinitions(final TableMetadata tableMetadata)
{
  List<ColumnMetadata> colInfoForTable = tableMetadata.getColumns();
  for (ColumnMetadata aColumnDefinition : colInfoForTable) {
    if (aColumnDefinition.getType().isCollection()) {
      collectionColumns.add(aColumnDefinition.getName());
    }
    if (!pkColumnNames.contains(aColumnDefinition.getName())) {
      columnDefinitions.put(aColumnDefinition.getName(), aColumnDefinition.getType());
      regularColumns.add(aColumnDefinition.getName());
    }
    parseForSpecialDataType(aColumnDefinition);
  }
}
 
Example 7
Source File: DatastaxCrudFactory.java    From SimpleFlatMapper with MIT License 5 votes vote down vote up
private static <T> DatastaxMapper<T> selectMapper(Type target, TableMetadata tableMetadata, DatastaxMapperFactory mapperFactory) {
    DatastaxMapperBuilder<T> mapperBuilder = mapperFactory.newBuilder(target);
    int i = 0;
    for(ColumnMetadata columnMetadata : tableMetadata.getColumns()) {
        mapperBuilder.addMapping(DatastaxColumnKey.of(columnMetadata, i++));
    }
    return mapperBuilder.mapper();
}
 
Example 8
Source File: DatastaxCrudFactory.java    From SimpleFlatMapper with MIT License 5 votes vote down vote up
private static <T> BoundStatementMapper<T> insertSetter(Type target, TableMetadata tableMetadata, DatastaxMapperFactory mapperFactory, int offset) {
    SettableDataMapperBuilder<T> mapperBuilder = mapperFactory.newBuilderFrom(target);
    int i = offset;
    for(ColumnMetadata columnMetadata : tableMetadata.getColumns()) {
        mapperBuilder.addColumn(DatastaxColumnKey.of(columnMetadata, i++));
    }
    return new BoundStatementMapper<T>(mapperBuilder.mapper());
}
 
Example 9
Source File: DeepRecordReader.java    From deep-spark with Apache License 2.0 5 votes vote down vote up
/**
 * Retrieve the column name for the lucene indexes. Null if there is no lucene index.
 *
 * @return Lucene index; null, if doesn't exist.
 */
private String getLuceneIndex() {
    String indexName = "";

    TableMetadata tableMetadata = config.fetchTableMetadata();
    List<ColumnMetadata> columns = tableMetadata.getColumns();
    for (ColumnMetadata column : columns) {
        if (column.getIndex() != null) {
            if (column.getIndex().isCustomIndex()) {
                indexName = column.getName();
            }
        }
    }
    return indexName;
}
 
Example 10
Source File: SaveToCassandraOperationsService.java    From Decision with Apache License 2.0 5 votes vote down vote up
public void refreshTablenames() {
    Collection<TableMetadata> tableMetadatas = session.getCluster().getMetadata()
            .getKeyspace(STREAMING.STREAMING_KEYSPACE_NAME).getTables();
    tablenames = new HashMap<>();
    for (TableMetadata tableMetadata : tableMetadatas) {
        Set<String> columns = new HashSet<>();
        for (ColumnMetadata columnMetadata : tableMetadata.getColumns()) {
            columns.add(columnMetadata.getName());
        }
        tablenames.put(tableMetadata.getName(), columns.hashCode());
    }
}
 
Example 11
Source File: CassandraClusterInfo.java    From hdfs2cass with Apache License 2.0 4 votes vote down vote up
public void init(final String keyspace, final String columnFamily) {

    this.keyspace = keyspace;
    this.columnFamily = columnFamily;

    // connect to the cluster
    Cluster.Builder clusterBuilder = Cluster.builder();
    clusterBuilder.addContactPoints(host);
    if (port != -1) {
      clusterBuilder.withPort(port);
    }

    // ask for some metadata
    logger.info("getting cluster metadata for {}.{}", keyspace, columnFamily);
    final TableMetadata tableMetadata;
    try (final Cluster cluster = clusterBuilder.build()) {
      Metadata clusterMetadata = cluster.getMetadata();
      KeyspaceMetadata keyspaceMetadata = clusterMetadata.getKeyspace('"' + keyspace + '"');
      tableMetadata = keyspaceMetadata.getTable('"' + columnFamily + '"');
      cqlSchema = tableMetadata.asCQLQuery();
      partitionerClass = clusterMetadata.getPartitioner();
      Class.forName(partitionerClass);
      numClusterNodes = clusterMetadata.getAllHosts().size();
      columns = tableMetadata.getColumns();
    } catch (ClassNotFoundException cnfe) {
      throw new CrunchRuntimeException("No such partitioner: " + partitionerClass, cnfe);
    } catch (NullPointerException npe) {
      String msg = String.format("No such keyspace/table: %s/%s", keyspace, columnFamily);
      throw new CrunchRuntimeException(msg, npe);
    }

    // map the partition key columns
    final List<ColumnMetadata> partitionKeyColumns = tableMetadata.getPartitionKey();
    partitionKeyIndexes = new int[partitionKeyColumns.size()];
    for (int i = 0; i < partitionKeyColumns.size(); i++) {
      final String keyColName = partitionKeyColumns.get(i).getName();
      int j;
      for (j = 0; j < columns.size(); j++) {
        if (columns.get(j).getName().equals(keyColName)) {
          partitionKeyIndexes[i] = j;
          logger.info("partition key column {} index {}", keyColName, j);
          break;
        }
      }
      if (j == columns.size()) {
        throw new CrunchRuntimeException("no matching column for key " + keyColName);
      }
    }
  }