org.apache.cassandra.thrift.ConsistencyLevel Java Examples

The following examples show how to use org.apache.cassandra.thrift.ConsistencyLevel. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: CassandraThriftFacade.java    From emodb with Apache License 2.0 5 votes vote down vote up
public void executeCql3Script(String script) {
    try {
        for (String cqlStatement : toCqlStatements(script)) {
            if (StringUtils.isNotBlank(cqlStatement)) {
                cqlStatement += ";";
                _log.info("executing cql3 statement: " + cqlStatement);
                _client.execute_cql3_query(ByteBuffer.wrap(cqlStatement.getBytes("UTF-8")), Compression.NONE, ConsistencyLevel.LOCAL_QUORUM);
            }
        }
    } catch (Exception e) {
        throw Throwables.propagate(e);
    }
}
 
Example #2
Source File: CassandraInputData.java    From learning-hadoop with Apache License 2.0 5 votes vote down vote up
private void getNextBatchOfRows(CassandraConnection conn) throws Exception {

    // reset the column range (if necessary)
    if (m_requestedCols == null) {
      m_sliceRange = m_sliceRange.setStart(ByteBuffer.wrap(new byte[0]));
      m_sliceRange = m_sliceRange.setFinish(ByteBuffer.wrap(new byte[0]));

      m_slicePredicate.setSlice_range(m_sliceRange);
    }

    // set the key range start to the last key from the last batch of rows
    m_keyRange.setStart_key(m_cassandraRows.get(m_cassandraRows.size() - 1)
        .getKey());
    m_cassandraRows = conn.getClient().get_range_slices(m_colParent,
        m_slicePredicate, m_keyRange, ConsistencyLevel.ONE);

    m_colCount = 0;

    // key ranges are *inclusive* of the start key - we will have already
    // processed the first
    // row in the last batch. Hence start at index 1 of this batch
    m_rowIndex = 1;
    if (m_cassandraRows == null || m_cassandraRows.size() <= 1
        || m_rowCount == m_sliceRowsMax) {
      // indicate done
      m_currentCols = null;
      m_cassandraRows = null;
    } else {
      advanceToNonEmptyRow();
    }
  }
 
Example #3
Source File: AbstractCassandraInputAdapter.java    From OpenRate with Apache License 2.0 5 votes vote down vote up
private void insertColumnValue(String parentName, String id, String name, String value, long ts) throws TException, TimedOutException, UnavailableException, InvalidRequestException, UnsupportedEncodingException {
  ColumnParent parent = new ColumnParent(parentName);
  Column column = new Column(toByteBuffer(name));
  column.setValue(toByteBuffer(value));
  column.setTimestamp(ts);
  getClient().insert(toByteBuffer(id), parent, column, ConsistencyLevel.ONE);
}
 
Example #4
Source File: ThriftColumnFamilyTest.java    From stratio-cassandra with Apache License 2.0 5 votes vote down vote up
private String getColumnValue(String ks, String cf, String colName, String key, String validator)
throws AuthenticationException, AuthorizationException, InvalidRequestException, UnavailableException, TimedOutException, TException, NotFoundException, IOException
{
    Cassandra.Client client = getClient();
    client.set_keyspace(ks);

    ByteBuffer key_user_id = ByteBufferUtil.bytes(key);
    ColumnPath cp = new ColumnPath(cf);
    cp.column = ByteBufferUtil.bytes(colName);

    // read
    ColumnOrSuperColumn got = client.get(key_user_id, cp, ConsistencyLevel.ONE);
    return parseType(validator).getString(got.getColumn().value);
}
 
Example #5
Source File: CassandraAbstractPut.java    From Hive-Cassandra with Apache License 2.0 5 votes vote down vote up
/**
 * Parse consistency level from job configuration. If none is defined,  or if the specified value is not a valid
 * <code>ConsistencyLevel</code>, return default consistency level ONE.
 *
 * @param jc job configuration
 * @return cassandra consistency level
 */
protected static ConsistencyLevel getConsistencyLevel(JobConf jc) {
  String consistencyLevel = jc.get(AbstractColumnSerDe.CASSANDRA_CONSISTENCY_LEVEL,
      AbstractColumnSerDe.DEFAULT_CONSISTENCY_LEVEL);
  ConsistencyLevel level = null;
  try {
    level = ConsistencyLevel.valueOf(consistencyLevel);
  } catch (IllegalArgumentException e) {
    level = ConsistencyLevel.ONE;
  }

  return level;
}
 
Example #6
Source File: CassandraThriftKeyColumnValueStore.java    From titan1withtp3.1 with Apache License 2.0 4 votes vote down vote up
private List<KeySlice> getRangeSlices(org.apache.cassandra.thrift.KeyRange keyRange, @Nullable SliceQuery sliceQuery) throws BackendException {
    SliceRange sliceRange = new SliceRange();

    if (sliceQuery == null) {
        sliceRange.setStart(ArrayUtils.EMPTY_BYTE_ARRAY)
                .setFinish(ArrayUtils.EMPTY_BYTE_ARRAY)
                .setCount(5);
    } else {
        sliceRange.setStart(sliceQuery.getSliceStart().asByteBuffer())
                .setFinish(sliceQuery.getSliceEnd().asByteBuffer())
                .setCount((sliceQuery.hasLimit()) ? sliceQuery.getLimit() : Integer.MAX_VALUE);
    }


    CTConnection connection = null;
    try {
        connection = pool.borrowObject(keyspace);

        List<KeySlice> slices =
                connection.getClient().get_range_slices(new ColumnParent(columnFamily),
                        new SlicePredicate()
                                .setSlice_range(sliceRange),
                        keyRange,
                        ConsistencyLevel.QUORUM);

        for (KeySlice s : slices) {
            logger.debug("Key {}", ByteBufferUtil.toString(s.key, "-"));
        }

        /* Note: we need to fetch columns for each row as well to remove "range ghosts" */
        List<KeySlice> result = new ArrayList<>(slices.size());
        KeyIterationPredicate pred = new KeyIterationPredicate();
        for (KeySlice ks : slices)
            if (pred.apply(ks))
                result.add(ks);
        return result;
    } catch (Exception e) {
        throw convertException(e);
    } finally {
        if (connection != null)
            pool.returnObjectUnsafe(keyspace, connection);
    }
}
 
Example #7
Source File: CassandraInputData.java    From learning-hadoop with Apache License 2.0 4 votes vote down vote up
private void getNextBatchOfColumns(CassandraConnection conn) throws Exception {
  m_sliceRange = m_sliceRange.setStart(m_currentCols
      .get(m_currentCols.size() - 1).getColumn().bufferForName());
  m_slicePredicate.setSlice_range(m_sliceRange);

  // fetch the next bunch of columns for the current row
  m_currentCols = conn.getClient().get_slice(
      m_cassandraRows.get(m_rowIndex).bufferForKey(), m_colParent,
      m_slicePredicate, ConsistencyLevel.ONE);

  // as far as I understand it - these things are always inclusive of the
  // start element,
  // so we need to skip the first element cause it was processed already in
  // the last batch
  // of columns
  if (m_currentCols == null || m_currentCols.size() <= 1) {
    // no more columns in the current row - move to the next row
    m_rowCount++;
    m_rowIndex++;
    m_colCount = 0;

    if (m_rowIndex == m_cassandraRows.size()) {
      getNextBatchOfRows(conn);

      while (m_cassandraRows != null && m_currentCols == null) {
        // keep going until we get some rows with columns!
        getNextBatchOfRows(conn);
      }
    } else {
      advanceToNonEmptyRow();

      while (m_cassandraRows != null && m_currentCols == null) {
        // keep going until we get some rows with columns!
        getNextBatchOfRows(conn);
      }
    }
  } else {
    // we need to discard the first col in the list since we will have
    // processed
    // that already in the batch
    m_currentCols.remove(0);
  }
}
 
Example #8
Source File: SettingsCommand.java    From stratio-cassandra with Apache License 2.0 4 votes vote down vote up
public SettingsCommand(Command type, Options options, Count count, Duration duration, Uncertainty uncertainty)
{
    this.type = type;
    this.consistencyLevel = ConsistencyLevel.valueOf(options.consistencyLevel.value().toUpperCase());
    this.noWarmup = options.noWarmup.setByUser();
    this.truncate = TruncateWhen.valueOf(options.truncate.value().toUpperCase());

    if (count != null)
    {
        this.count = OptionDistribution.parseLong(count.count.value());
        this.duration = 0;
        this.durationUnits = null;
        this.targetUncertainty = -1;
        this.minimumUncertaintyMeasurements = -1;
        this.maximumUncertaintyMeasurements = -1;
    }
    else if (duration != null)
    {
        this.count = -1;
        this.duration = Long.parseLong(duration.duration.value().substring(0, duration.duration.value().length() - 1));
        switch (duration.duration.value().toLowerCase().charAt(duration.duration.value().length() - 1))
        {
            case 's':
                this.durationUnits = TimeUnit.SECONDS;
                break;
            case 'm':
                this.durationUnits = TimeUnit.MINUTES;
                break;
            case 'h':
                this.durationUnits = TimeUnit.HOURS;
                break;
            default:
                throw new IllegalStateException();
        }
        this.targetUncertainty = -1;
        this.minimumUncertaintyMeasurements = -1;
        this.maximumUncertaintyMeasurements = -1;
    }
    else
    {
        this.count = -1;
        this.duration = 0;
        this.durationUnits = null;
        this.targetUncertainty = Double.parseDouble(uncertainty.uncertainty.value());
        this.minimumUncertaintyMeasurements = Integer.parseInt(uncertainty.minMeasurements.value());
        this.maximumUncertaintyMeasurements = Integer.parseInt(uncertainty.maxMeasurements.value());
    }
}
 
Example #9
Source File: TestRingCache.java    From stratio-cassandra with Apache License 2.0 4 votes vote down vote up
/**
 * usage: java -cp <configpath> org.apache.cassandra.client.TestRingCache [keyspace row-id-prefix row-id-int]
 * to test a single keyspace/row, use the parameters. row-id-prefix and row-id-int are appended together to form a
 * single row id.  If you supply now parameters, 'Keyspace1' is assumed and will check 9 rows ('row1' through 'row9').
 * @param args
 * @throws Exception
 */
public static void main(String[] args) throws Throwable
{
    int minRow;
    int maxRow;
    String rowPrefix, keyspace = "Keyspace1";

    if (args.length > 0)
    {
        keyspace = args[0];
        rowPrefix = args[1];
        minRow = Integer.parseInt(args[2]);
        maxRow = minRow + 1;
    }
    else
    {
        minRow = 1;
        maxRow = 10;
        rowPrefix = "row";
    }

    TestRingCache tester = new TestRingCache(keyspace);

    for (int nRows = minRow; nRows < maxRow; nRows++)
    {
        ByteBuffer row = ByteBufferUtil.bytes((rowPrefix + nRows));
        ColumnPath col = new ColumnPath("Standard1").setSuper_column((ByteBuffer)null).setColumn("col1".getBytes());
        ColumnParent parent = new ColumnParent("Standard1").setSuper_column((ByteBuffer)null);

        Collection<InetAddress> endpoints = tester.ringCache.getEndpoint(row);
        InetAddress firstEndpoint = endpoints.iterator().next();
        System.out.printf("hosts with key %s : %s; choose %s%n",
                          new String(row.array()), StringUtils.join(endpoints, ","), firstEndpoint);

        // now, read the row back directly from the host owning the row locally
        tester.setup(firstEndpoint.getHostAddress(), DatabaseDescriptor.getRpcPort());
        tester.thriftClient.set_keyspace(keyspace);
        tester.thriftClient.insert(row, parent, new Column(ByteBufferUtil.bytes("col1")).setValue(ByteBufferUtil.bytes("val1")).setTimestamp(1), ConsistencyLevel.ONE);
        Column column = tester.thriftClient.get(row, col, ConsistencyLevel.ONE).column;
        System.out.println("read row " + new String(row.array()) + " " + new String(column.name.array()) + ":" + new String(column.value.array()) + ":" + column.timestamp);
    }

    System.exit(1);
}
 
Example #10
Source File: CassandraPut.java    From Hive-Cassandra with Apache License 2.0 4 votes vote down vote up
@Override
public void write(String keySpace, CassandraProxyClient client, JobConf jc) throws IOException {
  ConsistencyLevel flevel = getConsistencyLevel(jc);
  int batchMutation = getBatchMutationSize(jc);
  Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map = new HashMap<ByteBuffer,Map<String,List<Mutation>>>();

  Map<String, List<Mutation>> maps = new HashMap<String, List<Mutation>>();

  int count = 0;
  // TODO check for counter
  for (CassandraColumn col : columns) {
    Column cassCol = new Column();
    cassCol.setValue(col.getValue());
    cassCol.setTimestamp(col.getTimeStamp());
    cassCol.setName(col.getColumn());

    ColumnOrSuperColumn thisCol = new ColumnOrSuperColumn();
    thisCol.setColumn(cassCol);

    Mutation mutation = new Mutation();
    mutation.setColumn_or_supercolumn(thisCol);

    List<Mutation> mutList = maps.get(col.getColumnFamily());
    if (mutList == null) {
      mutList = new ArrayList<Mutation>();
      maps.put(col.getColumnFamily(), mutList);
    }

    mutList.add(mutation);
    count ++;

    if (count == batchMutation) {
      mutation_map.put(key, maps);

      commitChanges(keySpace, client, flevel, mutation_map);

      //reset mutation map, maps and count;
      mutation_map = new HashMap<ByteBuffer,Map<String,List<Mutation>>>();
      maps = new HashMap<String, List<Mutation>>();
      count = 0;
    }
  }

  if(count > 0) {
    mutation_map.put(key, maps);
    commitChanges(keySpace, client, flevel, mutation_map);
  }
}
 
Example #11
Source File: CassandraSuperPut.java    From Hive-Cassandra with Apache License 2.0 4 votes vote down vote up
@Override
public void write(String keySpace, CassandraProxyClient client, JobConf jc) throws IOException {
  ConsistencyLevel flevel = getConsistencyLevel(jc);
  int batchMutation = getBatchMutationSize(jc);
  Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map = new HashMap<ByteBuffer,Map<String,List<Mutation>>>();
  Map<String, List<Mutation>> maps = new HashMap<String, List<Mutation>>();

  int count = 0;
  for (CassandraPut c : subColumns) {
    List<Column> columns = new ArrayList<Column>();
    for (CassandraColumn col : c.getColumns()) {
      Column cassCol = new Column();
      cassCol.setValue(col.getValue());
      cassCol.setTimestamp(col.getTimeStamp());
      cassCol.setName(col.getColumn());
      columns.add(cassCol);

      ColumnOrSuperColumn thisSuperCol = new ColumnOrSuperColumn();
      thisSuperCol.setSuper_column(new SuperColumn(c.getKey(), columns));

      Mutation mutation = new Mutation();
      mutation.setColumn_or_supercolumn(thisSuperCol);

      List<Mutation> mutList = maps.get(col.getColumnFamily());
      if (mutList == null) {
        mutList = new ArrayList<Mutation>();
        maps.put(col.getColumnFamily(), mutList);
      }

      mutList.add(mutation);

      count ++;

      if (count == batchMutation) {
        mutation_map.put(key, maps);

        commitChanges(keySpace, client, flevel, mutation_map);

        //reset mutation map, maps and count;
        mutation_map = new HashMap<ByteBuffer,Map<String,List<Mutation>>>();
        maps = new HashMap<String, List<Mutation>>();
        count = 0;
      }

    }
  }

  if(count > 0) {
    mutation_map.put(key, maps);
    commitChanges(keySpace, client, flevel, mutation_map);
  }
}