org.apache.cassandra.thrift.SlicePredicate Java Examples
The following examples show how to use
org.apache.cassandra.thrift.SlicePredicate.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: CassandraBinaryInputFormat.java From titan1withtp3.1 with Apache License 2.0 | 6 votes |
@Override public void setConf(final Configuration config) { super.setConf(config); // Copy some Titan configuration keys to the Hadoop Configuration keys used by Cassandra's ColumnFamilyInputFormat ConfigHelper.setInputInitialAddress(config, titanConf.get(GraphDatabaseConfiguration.STORAGE_HOSTS)[0]); if (titanConf.has(GraphDatabaseConfiguration.STORAGE_PORT)) ConfigHelper.setInputRpcPort(config, String.valueOf(titanConf.get(GraphDatabaseConfiguration.STORAGE_PORT))); if (titanConf.has(GraphDatabaseConfiguration.AUTH_USERNAME)) ConfigHelper.setInputKeyspaceUserName(config, titanConf.get(GraphDatabaseConfiguration.AUTH_USERNAME)); if (titanConf.has(GraphDatabaseConfiguration.AUTH_PASSWORD)) ConfigHelper.setInputKeyspacePassword(config, titanConf.get(GraphDatabaseConfiguration.AUTH_PASSWORD)); // Copy keyspace, force the CF setting to edgestore, honor widerows when set final boolean wideRows = config.getBoolean(INPUT_WIDEROWS_CONFIG, false); // Use the setInputColumnFamily overload that includes a widerows argument; using the overload without this argument forces it false ConfigHelper.setInputColumnFamily(config, titanConf.get(AbstractCassandraStoreManager.CASSANDRA_KEYSPACE), mrConf.get(TitanHadoopConfiguration.COLUMN_FAMILY_NAME), wideRows); log.debug("Set keyspace: {}", titanConf.get(AbstractCassandraStoreManager.CASSANDRA_KEYSPACE)); // Set the column slice bounds via Faunus's vertex query filter final SlicePredicate predicate = new SlicePredicate(); final int rangeBatchSize = config.getInt(RANGE_BATCH_SIZE_CONFIG, Integer.MAX_VALUE); predicate.setSlice_range(getSliceRange(TitanHadoopSetupCommon.DEFAULT_SLICE_QUERY, rangeBatchSize)); // TODO stop slicing the whole row ConfigHelper.setInputSlicePredicate(config, predicate); }
Example #2
Source File: ColumnFamilyWideRowRecordReader.java From Hive-Cassandra with Apache License 2.0 | 6 votes |
static boolean isSliceRangePredicate(SlicePredicate predicate) { if (predicate == null) { return false; } if (predicate.isSetColumn_names() && predicate.getSlice_range() == null) { return false; } if (predicate.getSlice_range() == null) { return false; } byte[] start = predicate.getSlice_range().getStart(); byte[] finish = predicate.getSlice_range().getFinish(); if (start != null && finish != null) { return true; } return false; }
Example #3
Source File: ThriftCounterGetter.java From stratio-cassandra with Apache License 2.0 | 6 votes |
public void run(final ThriftClient client) throws IOException { final SlicePredicate predicate = select().predicate(); final ByteBuffer key = getKey(); timeWithRetry(new RunOp() { @Override public boolean run() throws Exception { List<?> r = client.get_slice(key, new ColumnParent(type.table), predicate, settings.command.consistencyLevel); return r != null && r.size() > 0; } @Override public int partitionCount() { return 1; } @Override public int rowCount() { return 1; } }); }
Example #4
Source File: InputFormatCQL.java From grakn with GNU Affero General Public License v3.0 | 5 votes |
@Override public void setConf(Configuration config) { this.hadoopConf = config; HadoopPoolsConfigurable.super.setConf(config); ModifiableConfigurationHadoop mrConf = ModifiableConfigurationHadoop.of(ModifiableConfigurationHadoop.MAPRED_NS, config); BasicConfiguration janusgraphConf = mrConf.getJanusGraphConf(); // Copy some JanusGraph configuration keys to the Hadoop Configuration keys used by Cassandra's ColumnFamilyInputFormat ConfigHelper.setInputInitialAddress(config, janusgraphConf.get(GraphDatabaseConfiguration.STORAGE_HOSTS)[0]); if (janusgraphConf.has(GraphDatabaseConfiguration.STORAGE_PORT)) { ConfigHelper.setInputRpcPort(config, String.valueOf(janusgraphConf.get(GraphDatabaseConfiguration.STORAGE_PORT))); } if (janusgraphConf.has(GraphDatabaseConfiguration.AUTH_USERNAME) && janusgraphConf.has(GraphDatabaseConfiguration.AUTH_PASSWORD)) { String username = janusgraphConf.get(GraphDatabaseConfiguration.AUTH_PASSWORD); if (StringUtils.isNotBlank(username)) { config.set(INPUT_NATIVE_AUTH_PROVIDER, PlainTextAuthProvider.class.getName()); config.set(USERNAME, username); config.set(PASSWORD, janusgraphConf.get(GraphDatabaseConfiguration.AUTH_USERNAME)); } } // Copy keyspace, force the CF setting to edgestore, honor widerows when set boolean wideRows = config.getBoolean(INPUT_WIDEROWS_CONFIG, false); // Use the setInputColumnFamily overload that includes a widerows argument; using the overload without this argument forces it false ConfigHelper.setInputColumnFamily(config, janusgraphConf.get(CQLConfigOptions.KEYSPACE), mrConf.get(ModifiableConfigurationHadoop.COLUMN_FAMILY_NAME), wideRows); LOG.debug("Set keyspace: {}", janusgraphConf.get(CQLConfigOptions.KEYSPACE)); // Set the column slice bounds via Faunus' vertex query filter SlicePredicate predicate = new SlicePredicate(); int rangeBatchSize = config.getInt(RANGE_BATCH_SIZE_CONFIG, Integer.MAX_VALUE); predicate.setSlice_range(getSliceRange(rangeBatchSize)); // TODO stop slicing the whole row ConfigHelper.setInputSlicePredicate(config, predicate); }
Example #5
Source File: CassandraTransaction.java From Doradus with Apache License 2.0 | 5 votes |
private static Mutation createDeleteColumnMutation(byte[] colName, long timestamp) { SlicePredicate slicePred = new SlicePredicate(); slicePred.addToColumn_names(ByteBuffer.wrap(colName)); Deletion deletion = new Deletion(); deletion.setPredicate(slicePred); deletion.setTimestamp(timestamp); Mutation mutation = new Mutation(); mutation.setDeletion(deletion); return mutation; }
Example #6
Source File: DBConn.java From Doradus with Apache License 2.0 | 5 votes |
private static String toString(SlicePredicate slicePred) { StringBuilder buffer = new StringBuilder(); if (slicePred.isSetColumn_names()) { buffer.append("Columns("); buffer.append(slicePred.getColumn_names().size()); buffer.append(" total)"); } else if (slicePred.isSetSlice_range()) { SliceRange sliceRange = slicePred.getSlice_range(); ByteBuffer startCol = sliceRange.start; String startColStr = "<null>"; if (startCol != null) { startColStr = Utils.toString(startCol.array(), startCol.arrayOffset(), startCol.limit()); } if (startColStr.length() == 0) { startColStr = "<first>"; } ByteBuffer endCol = sliceRange.finish; String endColStr = "<null>"; if (endCol != null) { endColStr = Utils.toString(endCol.array(), endCol.arrayOffset(), endCol.limit()); } if (endColStr.length() == 0) { endColStr = "<last>"; } if (startColStr.equals("<first>") && endColStr.equals("<last>")) { buffer.append("Slice(<all>)"); } else { buffer.append("Slice('"); buffer.append(startColStr); buffer.append("' to '"); buffer.append(endColStr); buffer.append("')"); } } return buffer.toString(); }
Example #7
Source File: CassandraDefs.java From Doradus with Apache License 2.0 | 5 votes |
/** * Create a SlicePredicate that selects the given column names. * * @param colNames A collection of column names as byte[]s. * @return SlicePredicate that selects the given column names only. */ static SlicePredicate slicePredicateColNames(Collection<byte[]> colNames) { SlicePredicate slicePred = new SlicePredicate(); for (byte[] colName : colNames) { slicePred.addToColumn_names(ByteBuffer.wrap(colName)); } return slicePred; }
Example #8
Source File: CassandraDefs.java From Doradus with Apache License 2.0 | 5 votes |
/** * Create a SlicePredicate that starts at the given column name, selecting up to * {@link #MAX_COLS_BATCH_SIZE} columns. * * @param startColName Starting column name as a byte[]. * @param endColName Ending column name as a byte[] * @return SlicePredicate that starts at the given starting column name, * ends at the given ending column name, selecting up to * {@link #MAX_COLS_BATCH_SIZE} columns. */ static SlicePredicate slicePredicateStartEndCol(byte[] startColName, byte[] endColName, boolean reversed) { if(startColName == null) startColName = EMPTY_BYTES; if(endColName == null) endColName = EMPTY_BYTES; SliceRange sliceRange = new SliceRange( ByteBuffer.wrap(startColName), ByteBuffer.wrap(endColName), reversed, CassandraDefs.MAX_COLS_BATCH_SIZE); SlicePredicate slicePred = new SlicePredicate(); slicePred.setSlice_range(sliceRange); return slicePred; }
Example #9
Source File: CassandraEmbeddedKeyColumnValueStore.java From titan1withtp3.1 with Apache License 2.0 | 4 votes |
/** * Create a RangeSliceCommand and run it against the StorageProxy. * <p> * To match the behavior of the standard Cassandra thrift API endpoint, the * {@code nowMillis} argument should be the number of milliseconds since the * UNIX Epoch (e.g. System.currentTimeMillis() or equivalent obtained * through a {@link TimestampProvider}). This is per * {@link org.apache.cassandra.thrift.CassandraServer#get_range_slices(ColumnParent, SlicePredicate, KeyRange, ConsistencyLevel)}, * which passes the server's System.currentTimeMillis() to the * {@code RangeSliceCommand} constructor. */ private List<Row> getKeySlice(Token start, Token end, @Nullable SliceQuery sliceQuery, int pageSize, long nowMillis) throws BackendException { IPartitioner partitioner = StorageService.getPartitioner(); SliceRange columnSlice = new SliceRange(); if (sliceQuery == null) { columnSlice.setStart(ArrayUtils.EMPTY_BYTE_ARRAY) .setFinish(ArrayUtils.EMPTY_BYTE_ARRAY) .setCount(5); } else { columnSlice.setStart(sliceQuery.getSliceStart().asByteBuffer()) .setFinish(sliceQuery.getSliceEnd().asByteBuffer()) .setCount(sliceQuery.hasLimit() ? sliceQuery.getLimit() : Integer.MAX_VALUE); } /* Note: we need to fetch columns for each row as well to remove "range ghosts" */ SlicePredicate predicate = new SlicePredicate().setSlice_range(columnSlice); // DAVID CASSANDRA // Old cassandra code did not use partitioner anyway in this call...so new code removed it as a parmaeter // RowPosition startPosition = start.minKeyBound(partitioner); RowPosition startPosition = start.minKeyBound(); // DAVID CASSANDRA // RowPosition endPosition = end.minKeyBound(partitioner); RowPosition endPosition = end.minKeyBound(); List<Row> rows; try { CFMetaData cfm = Schema.instance.getCFMetaData(keyspace, columnFamily); IDiskAtomFilter filter = ThriftValidation.asIFilter(predicate, cfm, null); RangeSliceCommand cmd = new RangeSliceCommand(keyspace, columnFamily, nowMillis, filter, new Bounds<RowPosition>(startPosition, endPosition), pageSize); rows = StorageProxy.getRangeSlice(cmd, ConsistencyLevel.QUORUM); } catch (Exception e) { throw new PermanentBackendException(e); } return rows; }
Example #10
Source File: HiveCassandraStandardColumnInputFormat.java From Hive-Cassandra with Apache License 2.0 | 4 votes |
@Override public InputSplit[] getSplits(JobConf jobConf, int numSplits) throws IOException { String ks = jobConf.get(AbstractColumnSerDe.CASSANDRA_KEYSPACE_NAME); String cf = jobConf.get(AbstractColumnSerDe.CASSANDRA_CF_NAME); int slicePredicateSize = jobConf.getInt(AbstractColumnSerDe.CASSANDRA_SLICE_PREDICATE_SIZE, AbstractColumnSerDe.DEFAULT_SLICE_PREDICATE_SIZE); int sliceRangeSize = jobConf.getInt( AbstractColumnSerDe.CASSANDRA_RANGE_BATCH_SIZE, AbstractColumnSerDe.DEFAULT_RANGE_BATCH_SIZE); int splitSize = jobConf.getInt( AbstractColumnSerDe.CASSANDRA_SPLIT_SIZE, AbstractColumnSerDe.DEFAULT_SPLIT_SIZE); String cassandraColumnMapping = jobConf.get(AbstractColumnSerDe.CASSANDRA_COL_MAPPING); int rpcPort = jobConf.getInt(AbstractColumnSerDe.CASSANDRA_PORT, 9160); String host = jobConf.get(AbstractColumnSerDe.CASSANDRA_HOST); String partitioner = jobConf.get(AbstractColumnSerDe.CASSANDRA_PARTITIONER); if (cassandraColumnMapping == null) { throw new IOException("cassandra.columns.mapping required for Cassandra Table."); } SliceRange range = new SliceRange(); range.setStart(new byte[0]); range.setFinish(new byte[0]); range.setReversed(false); range.setCount(slicePredicateSize); SlicePredicate predicate = new SlicePredicate(); predicate.setSlice_range(range); ConfigHelper.setInputRpcPort(jobConf, "" + rpcPort); ConfigHelper.setInputInitialAddress(jobConf, host); ConfigHelper.setInputPartitioner(jobConf, partitioner); ConfigHelper.setInputSlicePredicate(jobConf, predicate); ConfigHelper.setInputColumnFamily(jobConf, ks, cf); ConfigHelper.setRangeBatchSize(jobConf, sliceRangeSize); ConfigHelper.setInputSplitSize(jobConf, splitSize); Job job = new Job(jobConf); JobContext jobContext = new JobContext(job.getConfiguration(), job.getJobID()); Path[] tablePaths = FileInputFormat.getInputPaths(jobContext); List<org.apache.hadoop.mapreduce.InputSplit> splits = getSplits(jobContext); InputSplit[] results = new InputSplit[splits.size()]; for (int i = 0; i < splits.size(); ++i) { HiveCassandraStandardSplit csplit = new HiveCassandraStandardSplit( (ColumnFamilySplit) splits.get(i), cassandraColumnMapping, tablePaths[0]); csplit.setKeyspace(ks); csplit.setColumnFamily(cf); csplit.setRangeBatchSize(sliceRangeSize); csplit.setSplitSize(splitSize); csplit.setHost(host); csplit.setPort(rpcPort); csplit.setSlicePredicateSize(slicePredicateSize); csplit.setPartitioner(partitioner); csplit.setColumnMapping(cassandraColumnMapping); results[i] = csplit; } return results; }
Example #11
Source File: HiveCassandraStandardColumnInputFormat.java From Hive-Cassandra with Apache License 2.0 | 4 votes |
@Override public RecordReader<BytesWritable, MapWritable> getRecordReader(InputSplit split, JobConf jobConf, final Reporter reporter) throws IOException { HiveCassandraStandardSplit cassandraSplit = (HiveCassandraStandardSplit) split; List<String> columns = AbstractColumnSerDe.parseColumnMapping(cassandraSplit.getColumnMapping()); isTransposed = AbstractColumnSerDe.isTransposed(columns); List<Integer> readColIDs = ColumnProjectionUtils.getReadColumnIDs(jobConf); if (columns.size() < readColIDs.size()) { throw new IOException("Cannot read more columns than the given table contains."); } org.apache.cassandra.hadoop.ColumnFamilySplit cfSplit = cassandraSplit.getSplit(); Job job = new Job(jobConf); TaskAttemptContext tac = new TaskAttemptContext(job.getConfiguration(), new TaskAttemptID()) { @Override public void progress() { reporter.progress(); } }; SlicePredicate predicate = new SlicePredicate(); if (isTransposed || readColIDs.size() == columns.size() || readColIDs.size() == 0) { SliceRange range = new SliceRange(); AbstractType comparator = BytesType.instance; String comparatorType = jobConf.get(AbstractColumnSerDe.CASSANDRA_SLICE_PREDICATE_RANGE_COMPARATOR); if (comparatorType != null && !comparatorType.equals("")) { try { comparator = TypeParser.parse(comparatorType); } catch (Exception ex) { throw new IOException("Comparator class not found."); } } String sliceStart = jobConf.get(AbstractColumnSerDe.CASSANDRA_SLICE_PREDICATE_RANGE_START); String sliceEnd = jobConf.get(AbstractColumnSerDe.CASSANDRA_SLICE_PREDICATE_RANGE_FINISH); String reversed = jobConf.get(AbstractColumnSerDe.CASSANDRA_SLICE_PREDICATE_RANGE_REVERSED); range.setStart(comparator.fromString(sliceStart == null ? "" : sliceStart)); range.setFinish(comparator.fromString(sliceEnd == null ? "" : sliceEnd)); range.setReversed(reversed == null ? false : reversed.equals("true")); range.setCount(cassandraSplit.getSlicePredicateSize()); predicate.setSlice_range(range); } else { int iKey = columns.indexOf(AbstractColumnSerDe.CASSANDRA_KEY_COLUMN); predicate.setColumn_names(getColumnNames(iKey, columns, readColIDs)); } try { ConfigHelper.setInputColumnFamily(tac.getConfiguration(), cassandraSplit.getKeyspace(), cassandraSplit.getColumnFamily()); ConfigHelper.setInputSlicePredicate(tac.getConfiguration(), predicate); ConfigHelper.setRangeBatchSize(tac.getConfiguration(), cassandraSplit.getRangeBatchSize()); ConfigHelper.setInputRpcPort(tac.getConfiguration(), cassandraSplit.getPort() + ""); ConfigHelper.setInputInitialAddress(tac.getConfiguration(), cassandraSplit.getHost()); ConfigHelper.setInputPartitioner(tac.getConfiguration(), cassandraSplit.getPartitioner()); // Set Split Size ConfigHelper.setInputSplitSize(tac.getConfiguration(), cassandraSplit.getSplitSize()); CassandraHiveRecordReader rr = null; if(isTransposed && tac.getConfiguration().getBoolean(AbstractColumnSerDe.CASSANDRA_ENABLE_WIDEROW_ITERATOR, true)) { rr = new CassandraHiveRecordReader(new ColumnFamilyWideRowRecordReader(), isTransposed); } else { rr = new CassandraHiveRecordReader(new ColumnFamilyRecordReader(), isTransposed); } rr.initialize(cfSplit, tac); return rr; } catch (Exception ie) { throw new IOException(ie); } }
Example #12
Source File: ColumnFamilyStoreTest.java From stratio-cassandra with Apache License 2.0 | 4 votes |
@Test public void testDeleteStandardRowSticksAfterFlush() throws Throwable { // test to make sure flushing after a delete doesn't resurrect delted cols. String keyspaceName = "Keyspace1"; String cfName = "Standard1"; Keyspace keyspace = Keyspace.open(keyspaceName); ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfName); DecoratedKey key = Util.dk("f-flush-resurrection"); SlicePredicate sp = new SlicePredicate(); sp.setSlice_range(new SliceRange()); sp.getSlice_range().setCount(100); sp.getSlice_range().setStart(ArrayUtils.EMPTY_BYTE_ARRAY); sp.getSlice_range().setFinish(ArrayUtils.EMPTY_BYTE_ARRAY); // insert putColsStandard(cfs, key, column("col1", "val1", 1), column("col2", "val2", 1)); assertRowAndColCount(1, 2, false, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, null), 100)); // flush. cfs.forceBlockingFlush(); // insert, don't flush putColsStandard(cfs, key, column("col3", "val3", 1), column("col4", "val4", 1)); assertRowAndColCount(1, 4, false, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, null), 100)); // delete (from sstable and memtable) Mutation rm = new Mutation(keyspace.getName(), key.getKey()); rm.delete(cfs.name, 2); rm.apply(); // verify delete assertRowAndColCount(1, 0, true, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, null), 100)); // flush cfs.forceBlockingFlush(); // re-verify delete. // first breakage is right here because of CASSANDRA-1837. assertRowAndColCount(1, 0, true, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, null), 100)); // simulate a 'late' insertion that gets put in after the deletion. should get inserted, but fail on read. putColsStandard(cfs, key, column("col5", "val5", 1), column("col2", "val2", 1)); // should still be nothing there because we deleted this row. 2nd breakage, but was undetected because of 1837. assertRowAndColCount(1, 0, true, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, null), 100)); // make sure that new writes are recognized. putColsStandard(cfs, key, column("col6", "val6", 3), column("col7", "val7", 3)); assertRowAndColCount(1, 2, true, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, null), 100)); // and it remains so after flush. (this wasn't failing before, but it's good to check.) cfs.forceBlockingFlush(); assertRowAndColCount(1, 2, true, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, null), 100)); }
Example #13
Source File: ColumnFamilyStoreTest.java From stratio-cassandra with Apache License 2.0 | 4 votes |
@Test public void testDeleteSuperRowSticksAfterFlush() throws Throwable { String keyspaceName = "Keyspace1"; String cfName= "Super1"; ByteBuffer scfName = ByteBufferUtil.bytes("SuperDuper"); Keyspace keyspace = Keyspace.open(keyspaceName); ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfName); DecoratedKey key = Util.dk("flush-resurrection"); // create an isolated sstable. putColsSuper(cfs, key, scfName, new BufferCell(cellname(1L), ByteBufferUtil.bytes("val1"), 1), new BufferCell(cellname(2L), ByteBufferUtil.bytes("val2"), 1), new BufferCell(cellname(3L), ByteBufferUtil.bytes("val3"), 1)); cfs.forceBlockingFlush(); // insert, don't flush. putColsSuper(cfs, key, scfName, new BufferCell(cellname(4L), ByteBufferUtil.bytes("val4"), 1), new BufferCell(cellname(5L), ByteBufferUtil.bytes("val5"), 1), new BufferCell(cellname(6L), ByteBufferUtil.bytes("val6"), 1)); // verify insert. final SlicePredicate sp = new SlicePredicate(); sp.setSlice_range(new SliceRange()); sp.getSlice_range().setCount(100); sp.getSlice_range().setStart(ArrayUtils.EMPTY_BYTE_ARRAY); sp.getSlice_range().setFinish(ArrayUtils.EMPTY_BYTE_ARRAY); assertRowAndColCount(1, 6, false, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, scfName), 100)); // delete Mutation rm = new Mutation(keyspace.getName(), key.getKey()); rm.deleteRange(cfName, SuperColumns.startOf(scfName), SuperColumns.endOf(scfName), 2); rm.apply(); // verify delete. assertRowAndColCount(1, 0, false, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, scfName), 100)); // flush cfs.forceBlockingFlush(); // re-verify delete. assertRowAndColCount(1, 0, false, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, scfName), 100)); // late insert. putColsSuper(cfs, key, scfName, new BufferCell(cellname(4L), ByteBufferUtil.bytes("val4"), 1L), new BufferCell(cellname(7L), ByteBufferUtil.bytes("val7"), 1L)); // re-verify delete. assertRowAndColCount(1, 0, false, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, scfName), 100)); // make sure new writes are recognized. putColsSuper(cfs, key, scfName, new BufferCell(cellname(3L), ByteBufferUtil.bytes("val3"), 3), new BufferCell(cellname(8L), ByteBufferUtil.bytes("val8"), 3), new BufferCell(cellname(9L), ByteBufferUtil.bytes("val9"), 3)); assertRowAndColCount(1, 3, false, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, scfName), 100)); }
Example #14
Source File: CassandraInputData.java From learning-hadoop with Apache License 2.0 | 4 votes |
public void sliceModeInit(CassandraColumnMetaData meta, List<String> colNames, int maxRows, int maxCols, int rowBatchSize, int colBatchSize) throws KettleException { m_newSliceQuery = true; m_requestedCols = colNames; m_sliceRowsMax = maxRows; m_sliceColsMax = maxCols; m_sliceRowsBatchSize = rowBatchSize; m_sliceColsBatchSize = colBatchSize; m_rowIndex = 0; m_colIndex = 0; if (m_sliceColsBatchSize <= 0) { m_sliceColsBatchSize = Integer.MAX_VALUE; } if (m_sliceRowsBatchSize <= 0) { m_sliceRowsBatchSize = Integer.MAX_VALUE; } List<ByteBuffer> specificCols = null; if (m_requestedCols != null && m_requestedCols.size() > 0) { specificCols = new ArrayList<ByteBuffer>(); // encode the textual column names for (String colName : m_requestedCols) { ByteBuffer encoded = meta.columnNameToByteBuffer(colName); specificCols.add(encoded); } } m_slicePredicate = new SlicePredicate(); if (specificCols == null) { m_sliceRange = new SliceRange(ByteBuffer.wrap(new byte[0]), ByteBuffer.wrap(new byte[0]), false, m_sliceColsBatchSize); m_slicePredicate.setSlice_range(m_sliceRange); } else { m_slicePredicate.setColumn_names(specificCols); } m_keyRange = new KeyRange(m_sliceRowsBatchSize); m_keyRange.setStart_key(new byte[0]); m_keyRange.setEnd_key(new byte[0]); m_colParent = new ColumnParent(meta.getColumnFamilyName()); m_converted = new ArrayList<Object[]>(); }
Example #15
Source File: CassandraDefs.java From Doradus with Apache License 2.0 | 3 votes |
/** * Create a SlicePredicate that starts at the given column name, selecting up to * {@link #MAX_COLS_BATCH_SIZE} columns. * * @param startColName Starting column name as a byte[]. * @param endColName Ending column name as a byte[] * @return SlicePredicate that starts at the given starting column name, * ends at the given ending column name, selecting up to * {@link #MAX_COLS_BATCH_SIZE} columns. */ static SlicePredicate slicePredicateStartEndCol(byte[] startColName, byte[] endColName, int count) { if(startColName == null) startColName = EMPTY_BYTES; if(endColName == null) endColName = EMPTY_BYTES; SliceRange sliceRange = new SliceRange(ByteBuffer.wrap(startColName), ByteBuffer.wrap(endColName), false, count); SlicePredicate slicePred = new SlicePredicate(); slicePred.setSlice_range(sliceRange); return slicePred; }
Example #16
Source File: CassandraDefs.java From Doradus with Apache License 2.0 | 3 votes |
/** * Create a SlicePredicate that starts at the given column name, selecting up to * {@link #MAX_COLS_BATCH_SIZE} columns. * * @param startColName Starting column name as a byte[]. * @return SlicePredicate that starts at the given column name, * open-ended, selecting up to {@link #MAX_COLS_BATCH_SIZE} * columns. */ static SlicePredicate slicePredicateStartCol(byte[] startColName) { if(startColName == null) startColName = EMPTY_BYTES; SliceRange sliceRange = new SliceRange(ByteBuffer.wrap(startColName), EMPTY_BYTE_BUFFER, false, CassandraDefs.MAX_COLS_BATCH_SIZE); SlicePredicate slicePred = new SlicePredicate(); slicePred.setSlice_range(sliceRange); return slicePred; }
Example #17
Source File: CassandraDefs.java From Doradus with Apache License 2.0 | 2 votes |
/** * Create a SlicePredicate that starts at the given column name, selecting up to * {@link #MAX_COLS_BATCH_SIZE} columns. * * @param startColName Starting column name as a byte[]. * @param endColName Ending column name as a byte[] * @return SlicePredicate that starts at the given starting column name, * ends at the given ending column name, selecting up to * {@link #MAX_COLS_BATCH_SIZE} columns. */ static SlicePredicate slicePredicateStartEndCol(byte[] startColName, byte[] endColName) { return slicePredicateStartEndCol(startColName, endColName, false); }
Example #18
Source File: CassandraDefs.java From Doradus with Apache License 2.0 | 2 votes |
/** * Create a SlicePredicate that selects a single column. * * @param colName Column name as a byte[]. * @return SlicePredicate that select the given column name only. */ static SlicePredicate slicePredicateColName(byte[] colName) { SlicePredicate slicePred = new SlicePredicate(); slicePred.addToColumn_names(ByteBuffer.wrap(colName)); return slicePred; }