org.apache.cassandra.thrift.KeySlice Java Examples

The following examples show how to use org.apache.cassandra.thrift.KeySlice. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: CassandraInputData.java    From learning-hadoop with Apache License 2.0 6 votes vote down vote up
private void advanceToNonEmptyRow() {
  KeySlice row = m_cassandraRows.get(m_rowIndex);
  m_currentCols = row.getColumns();

  int skipSize = 0;
  while (m_currentCols.size() == skipSize
      && m_rowIndex < m_cassandraRows.size() - 1) {
    m_rowIndex++;
    row = m_cassandraRows.get(m_rowIndex);
    m_currentCols = row.getColumns();
  }

  if (m_currentCols.size() == skipSize) {
    // we've been through the batch and there are no columns in any of these
    // rows -
    // so nothing to output! Indicate this by setting currentCols to null
    m_currentCols = null;
  }
}
 
Example #2
Source File: ThriftService.java    From Doradus with Apache License 2.0 6 votes vote down vote up
@Override
public List<String> getRows(String storeName, String continuationToken, int count) {
    DBConn dbConn = getDBConnection();
    try {
        List<KeySlice> keys = dbConn.getRangeSlices(
                CassandraDefs.columnParent(storeName), 
                CassandraDefs.slicePredicateStartEndCol(null, null, 1),
                CassandraDefs.keyRangeStartRow(Utils.toBytes(continuationToken), count));
        List<String> result = new ArrayList<>(keys.size());
        for(KeySlice key: keys) {
            result.add(Utils.toString(key.getKey()));
        }
        return result;
    } finally {
        returnDBConnection(dbConn);
    }
}
 
Example #3
Source File: ColumnFamilyWideRowRecordReader.java    From Hive-Cassandra with Apache License 2.0 6 votes vote down vote up
@Override
protected Pair<ByteBuffer, SortedMap<ByteBuffer, IColumn>> computeNext() {
  maybeInit();
  if (rows == null) {
    return endOfData();
  }

  KeySlice ks = rows.get(0);
  SortedMap<ByteBuffer, IColumn> map = new TreeMap<ByteBuffer, IColumn>(comparator);
  for (ColumnOrSuperColumn cosc : ks.columns) {
    IColumn column = unthriftify(cosc);
    map.put(column.name(), column);
  }
  // return new Pair<ByteBuffer, SortedMap<ByteBuffer, IColumn>>(ks.key, map);
  return Pair.create(ks.key, map);
}
 
Example #4
Source File: CassandraColumnMetaData.java    From learning-hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Return the decoded key value of a row. Assumes that the supplied row comes
 * from the column family that this meta data represents!!
 * 
 * @param row a Cassandra row
 * @return the decoded key value
 * @throws KettleException if a deserializer can't be determined
 */
public Object getKeyValue(KeySlice row) throws KettleException {
  ByteBuffer key = row.bufferForKey();

  if (m_keyValidator.indexOf("BytesType") > 0) {
    return row.getKey();
  }

  return getColumnValue(key, m_keyValidator);
}
 
Example #5
Source File: ColumnFamilyWideRowRecordReader.java    From Hive-Cassandra with Apache License 2.0 4 votes vote down vote up
private void maybeInit() {
  // check if we need another row
  if (rows != null && columnsRead < rowPageSize) {
    columnsRead = 0;
    startToken = partitioner.getTokenFactory().toString(partitioner.getToken(rows.get(0).key));
    predicate.getSlice_range().setStart(startSlicePredicate);
    rows = null;
    prevStartSlice = null;
    totalRead++;
  }

  if (startToken == null) {
    startToken = split.getStartToken();
  } else if (startToken.equals(split.getEndToken()) && rows == null) {
    // reached end of the split
    return;
  }

  KeyRange keyRange = new KeyRange(batchRowCount)
                            .setStart_token(startToken)
                            .setEnd_token(split.getEndToken());
  try {
    rows = client.get_range_slices(new ColumnParent(cfName),
                                           predicate,
                                           keyRange,
                                           consistencyLevel);

    // nothing new? reached the end
    if (rows.isEmpty()) {
      rows = null;
      return;
    }

    //detect infinite loop
    if (prevStartSlice != null && ByteBufferUtil.compareUnsigned(prevStartSlice, predicate.slice_range.start) == 0) {
        rows = null;
        return;
    }

    // prepare for the next slice to be read
    KeySlice row = rows.get(0);

    if (row.getColumnsSize() > 0) {

      ColumnOrSuperColumn cosc = row.getColumns().get(row.getColumnsSize() - 1);

      prevStartSlice = predicate.slice_range.start;

      //prepare next slice
      if (cosc.column != null) {
        predicate.slice_range.start = cosc.column.name;
      }

      if (cosc.super_column != null) {
        predicate.slice_range.start = cosc.super_column.name;
      }

      if (cosc.counter_column != null) {
        predicate.slice_range.start = cosc.counter_column.name;
      }

      if (cosc.counter_super_column != null) {
        predicate.slice_range.start = cosc.counter_super_column.name;
      }

      columnsRead = row.getColumnsSize();

      //If we've hit the max columns then rm the last column
      //to make sure we don't know where to start next without overlap
      if (columnsRead == rowPageSize) {
        row.getColumns().remove(columnsRead - 1);
      }
    } 
  } catch (Exception e) {
    throw new RuntimeException(e);
  }
}