org.apache.cassandra.thrift.ColumnOrSuperColumn Java Examples

The following examples show how to use org.apache.cassandra.thrift.ColumnOrSuperColumn. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ThriftService.java    From Doradus with Apache License 2.0 6 votes vote down vote up
@Override
public List<DColumn> getColumns(String storeName, String rowKey, String startColumn, String endColumn, int count) {
    DBConn dbConn = getDBConnection();
    try {
        List<ColumnOrSuperColumn> columns = dbConn.getSlice(
                CassandraDefs.columnParent(storeName),
                CassandraDefs.slicePredicateStartEndCol(Utils.toBytes(startColumn), Utils.toBytes(endColumn), count),
                Utils.toByteBuffer(rowKey));
        List<DColumn> result = new ArrayList<>(columns.size());
        for(ColumnOrSuperColumn column: columns) {
            result.add(new DColumn(column.getColumn().getName(), column.getColumn().getValue()));
        }
        return result;
    } finally {
        returnDBConnection(dbConn);
    }
}
 
Example #2
Source File: ThriftService.java    From Doradus with Apache License 2.0 6 votes vote down vote up
@Override
public List<DColumn> getColumns(String storeName, String rowKey, Collection<String> columnNames) {
    DBConn dbConn = getDBConnection();
    try {
        List<byte[]> colNameList = new ArrayList<>(columnNames.size());
        for (String colName : columnNames) {
            colNameList.add(Utils.toBytes(colName));
        }

        List<ColumnOrSuperColumn> columns = dbConn.getSlice(
                CassandraDefs.columnParent(storeName),
                CassandraDefs.slicePredicateColNames(colNameList),
                Utils.toByteBuffer(rowKey));
        List<DColumn> result = new ArrayList<>(columns.size());
        for(ColumnOrSuperColumn column: columns) {
            result.add(new DColumn(column.getColumn().getName(), column.getColumn().getValue()));
        }
        return result;
    } finally {
        returnDBConnection(dbConn);
    }
}
 
Example #3
Source File: CassandraTransaction.java    From Doradus with Apache License 2.0 6 votes vote down vote up
private static Mutation createMutation(byte[] colName, byte[] colValue, long timestamp) {
    if (colValue == null) {
        colValue = EMPTY_BYTES;
    }
    Column col = new Column();
    col.setName(colName);
    col.setValue(colValue);
    col.setTimestamp(timestamp);
    
    ColumnOrSuperColumn cosc = new ColumnOrSuperColumn();
    cosc.setColumn(col);
    
    Mutation mutation = new Mutation();
    mutation.setColumn_or_supercolumn(cosc);
    return mutation;
}
 
Example #4
Source File: ColumnFamilyWideRowRecordReader.java    From Hive-Cassandra with Apache License 2.0 6 votes vote down vote up
@Override
protected Pair<ByteBuffer, SortedMap<ByteBuffer, IColumn>> computeNext() {
  maybeInit();
  if (rows == null) {
    return endOfData();
  }

  KeySlice ks = rows.get(0);
  SortedMap<ByteBuffer, IColumn> map = new TreeMap<ByteBuffer, IColumn>(comparator);
  for (ColumnOrSuperColumn cosc : ks.columns) {
    IColumn column = unthriftify(cosc);
    map.put(column.name(), column);
  }
  // return new Pair<ByteBuffer, SortedMap<ByteBuffer, IColumn>>(ks.key, map);
  return Pair.create(ks.key, map);
}
 
Example #5
Source File: ThriftCounterAdder.java    From stratio-cassandra with Apache License 2.0 5 votes vote down vote up
public void run(final ThriftClient client) throws IOException
{
    List<CounterColumn> columns = new ArrayList<>();
    for (ByteBuffer name : select().select(settings.columns.names))
        columns.add(new CounterColumn(name, counteradd.next()));

    List<Mutation> mutations = new ArrayList<>(columns.size());
    for (CounterColumn c : columns)
    {
        ColumnOrSuperColumn cosc = new ColumnOrSuperColumn().setCounter_column(c);
        mutations.add(new Mutation().setColumn_or_supercolumn(cosc));
    }
    Map<String, List<Mutation>> row = Collections.singletonMap(type.table, mutations);

    final ByteBuffer key = getKey();
    final Map<ByteBuffer, Map<String, List<Mutation>>> record = Collections.singletonMap(key, row);

    timeWithRetry(new RunOp()
    {
        @Override
        public boolean run() throws Exception
        {
            client.batch_mutate(record, settings.command.consistencyLevel);
            return true;
        }

        @Override
        public int partitionCount()
        {
            return 1;
        }

        @Override
        public int rowCount()
        {
            return 1;
        }
    });
}
 
Example #6
Source File: ThriftReader.java    From stratio-cassandra with Apache License 2.0 5 votes vote down vote up
public void run(final ThriftClient client) throws IOException
{
    final ColumnSelection select = select();
    final ByteBuffer key = getKey();
    final List<ByteBuffer> expect = getColumnValues(select);
    timeWithRetry(new RunOp()
    {
        @Override
        public boolean run() throws Exception
        {
            List<ColumnOrSuperColumn> row = client.get_slice(key, new ColumnParent(type.table), select.predicate(), settings.command.consistencyLevel);
            if (expect == null)
                return !row.isEmpty();
            if (row == null)
                return false;
            if (row.size() != expect.size())
                return false;
            for (int i = 0 ; i < row.size() ; i++)
                if (!row.get(i).getColumn().bufferForValue().equals(expect.get(i)))
                    return false;
            return true;
        }

        @Override
        public int partitionCount()
        {
            return 1;
        }

        @Override
        public int rowCount()
        {
            return 1;
        }
    });
}
 
Example #7
Source File: ThriftInserter.java    From stratio-cassandra with Apache License 2.0 5 votes vote down vote up
public void run(final ThriftClient client) throws IOException
{
    final ByteBuffer key = getKey();
    final List<Column> columns = getColumns();

    List<Mutation> mutations = new ArrayList<>(columns.size());
    for (Column c : columns)
    {
        ColumnOrSuperColumn column = new ColumnOrSuperColumn().setColumn(c);
        mutations.add(new Mutation().setColumn_or_supercolumn(column));
    }
    Map<String, List<Mutation>> row = Collections.singletonMap(type.table, mutations);

    final Map<ByteBuffer, Map<String, List<Mutation>>> record = Collections.singletonMap(key, row);

    timeWithRetry(new RunOp()
    {
        @Override
        public boolean run() throws Exception
        {
            client.batch_mutate(record, settings.command.consistencyLevel);
            return true;
        }

        @Override
        public int partitionCount()
        {
            return 1;
        }

        @Override
        public int rowCount()
        {
            return 1;
        }
    });
}
 
Example #8
Source File: ThriftColumnFamilyTest.java    From stratio-cassandra with Apache License 2.0 5 votes vote down vote up
private String getColumnValue(String ks, String cf, String colName, String key, String validator)
throws AuthenticationException, AuthorizationException, InvalidRequestException, UnavailableException, TimedOutException, TException, NotFoundException, IOException
{
    Cassandra.Client client = getClient();
    client.set_keyspace(ks);

    ByteBuffer key_user_id = ByteBufferUtil.bytes(key);
    ColumnPath cp = new ColumnPath(cf);
    cp.column = ByteBufferUtil.bytes(colName);

    // read
    ColumnOrSuperColumn got = client.get(key_user_id, cp, ConsistencyLevel.ONE);
    return parseType(validator).getString(got.getColumn().value);
}
 
Example #9
Source File: ColumnFamilyWideRowRecordReader.java    From Hive-Cassandra with Apache License 2.0 5 votes vote down vote up
private IColumn unthriftify(ColumnOrSuperColumn cosc) {
  if (cosc.counter_column != null) {
    return unthriftifyCounter(cosc.counter_column);
  }
  if (cosc.counter_super_column != null) {
    return unthriftifySuperCounter(cosc.counter_super_column);
  }
  if (cosc.super_column != null) {
    return unthriftifySuper(cosc.super_column);
  }
  assert cosc.column != null;
  return unthriftifySimple(cosc.column);
}
 
Example #10
Source File: CassandraRecordUtils.java    From hdfs2cass with Apache License 2.0 5 votes vote down vote up
public static Mutation createMutation(Object name, Object value, long timestamp, int ttl) {
  Column column = new Column();
  column.setName(toByteBuffer(name));
  column.setValue(toByteBuffer(value));
  column.setTimestamp(timestamp);
  if (ttl > 0) {
    column.setTtl(ttl);
  }

  Mutation mutation = new Mutation();
  mutation.column_or_supercolumn = new ColumnOrSuperColumn();
  mutation.column_or_supercolumn.column = column;
  return mutation;
}
 
Example #11
Source File: ThriftByFieldNamesFn.java    From hdfs2cass with Apache License 2.0 5 votes vote down vote up
private List<Mutation> getMutations(final T input) {
  List<Mutation> mutations = Lists.newArrayList();

  long timestamp = getTimestamp(input);
  Optional<Integer> ttl = getTtl(input);

  for (Schema.Field field : input.getSchema().getFields()) {
    int fieldPos = field.pos();
    if (fieldPos == rowKeyIndex || fieldPos == ttlIndex || fieldPos == timestampIndex) {
      continue;
    }

    Object fieldValue = input.get(fieldPos);

    Column column = new Column();
    column.setName(ByteBufferUtil.bytes(field.name()));
    column.setTimestamp(timestamp);
    if (ttl.isPresent()) {
      column.setTtl(ttl.get());
    }
    column.setValue(CassandraRecordUtils.toByteBuffer(fieldValue));

    Mutation mutation = new Mutation();
    mutation.column_or_supercolumn = new ColumnOrSuperColumn();
    mutation.column_or_supercolumn.column = column;

    mutations.add(mutation);
  }


  return mutations;
}
 
Example #12
Source File: CassandraOutputData.java    From learning-hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Adds a kettle row to a thrift-based batch (builds the map of keys to
 * mutations).
 * 
 * @param thriftBatch
 *            the map of keys to mutations
 * @param colFamilyName
 *            the name of the column family (table) to insert into
 * @param inputMeta
 *            Kettle input row meta data
 * @param keyIndex
 *            the index of the incoming field to use as the key for
 *            inserting
 * @param row
 *            the Kettle row
 * @param cassandraMeta
 *            meta data on the columns in the cassandra column family
 *            (table)
 * @param insertFieldsNotInMetaData
 *            true if any Kettle fields that are not in the Cassandra column
 *            family (table) meta data are to be inserted. This is
 *            irrelevant if the user has opted to have the step initially
 *            update the Cassandra meta data for incoming fields that are
 *            not known about.
 * 
 * @return true if the row was added to the batch
 * 
 * @throws KettleException
 *             if a problem occurs
 */
public static boolean addRowToThriftBatch(
		Map<ByteBuffer, Map<String, List<Mutation>>> thriftBatch,
		String colFamilyName, RowMetaInterface inputMeta, int keyIndex,
		Object[] row, CassandraColumnMetaData cassandraMeta,
		boolean insertFieldsNotInMetaData, LogChannelInterface log,
		boolean isAsIndexColumn) throws KettleException {

	if (!preAddChecks(inputMeta, keyIndex, row, log)) {
		return false;
	}
	ValueMetaInterface keyMeta = inputMeta.getValueMeta(keyIndex);
	ByteBuffer keyBuff = cassandraMeta.kettleValueToByteBuffer(keyMeta,
			row[keyIndex], true);

	Map<String, List<Mutation>> mapCF = thriftBatch.get(keyBuff);
	List<Mutation> mutList = null;

	// check to see if we have already got some mutations for this key in
	// the batch
	if (mapCF != null) {
		mutList = mapCF.get(colFamilyName);
	} else {
		mapCF = new HashMap<String, List<Mutation>>(1);
		mutList = new ArrayList<Mutation>();
	}

	for (int i = 0; i < inputMeta.size(); i++) {
		if (i != keyIndex) {
			ValueMetaInterface colMeta = inputMeta.getValueMeta(i);
			String colName = colMeta.getName();
			if (!cassandraMeta.columnExistsInSchema(colName)
					&& !insertFieldsNotInMetaData) {
				continue;
			}

			// don't insert if null!
			if (colMeta.isNull(row[i])) {
				continue;
			}

			Column col = new Column(
					cassandraMeta.columnNameToByteBuffer(colName));
			if (isAsIndexColumn) {
				col = col.setValue(cassandraMeta.kettleValueToByteBuffer(
						colMeta, "-", false));
			} else {
				col = col.setValue(cassandraMeta.kettleValueToByteBuffer(
						colMeta, row[i], false));
			}
	
			col = col.setTimestamp(System.currentTimeMillis());
			ColumnOrSuperColumn cosc = new ColumnOrSuperColumn();
			cosc.setColumn(col);
			Mutation mut = new Mutation();
			mut.setColumn_or_supercolumn(cosc);
			mutList.add(mut);
		}
	}

	// column family name -> mutations
	mapCF.put(colFamilyName, mutList);

	// row key -> column family - > mutations
	thriftBatch.put(keyBuff, mapCF);

	return true;
}
 
Example #13
Source File: CassandraPut.java    From Hive-Cassandra with Apache License 2.0 4 votes vote down vote up
@Override
public void write(String keySpace, CassandraProxyClient client, JobConf jc) throws IOException {
  ConsistencyLevel flevel = getConsistencyLevel(jc);
  int batchMutation = getBatchMutationSize(jc);
  Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map = new HashMap<ByteBuffer,Map<String,List<Mutation>>>();

  Map<String, List<Mutation>> maps = new HashMap<String, List<Mutation>>();

  int count = 0;
  // TODO check for counter
  for (CassandraColumn col : columns) {
    Column cassCol = new Column();
    cassCol.setValue(col.getValue());
    cassCol.setTimestamp(col.getTimeStamp());
    cassCol.setName(col.getColumn());

    ColumnOrSuperColumn thisCol = new ColumnOrSuperColumn();
    thisCol.setColumn(cassCol);

    Mutation mutation = new Mutation();
    mutation.setColumn_or_supercolumn(thisCol);

    List<Mutation> mutList = maps.get(col.getColumnFamily());
    if (mutList == null) {
      mutList = new ArrayList<Mutation>();
      maps.put(col.getColumnFamily(), mutList);
    }

    mutList.add(mutation);
    count ++;

    if (count == batchMutation) {
      mutation_map.put(key, maps);

      commitChanges(keySpace, client, flevel, mutation_map);

      //reset mutation map, maps and count;
      mutation_map = new HashMap<ByteBuffer,Map<String,List<Mutation>>>();
      maps = new HashMap<String, List<Mutation>>();
      count = 0;
    }
  }

  if(count > 0) {
    mutation_map.put(key, maps);
    commitChanges(keySpace, client, flevel, mutation_map);
  }
}
 
Example #14
Source File: CassandraSuperPut.java    From Hive-Cassandra with Apache License 2.0 4 votes vote down vote up
@Override
public void write(String keySpace, CassandraProxyClient client, JobConf jc) throws IOException {
  ConsistencyLevel flevel = getConsistencyLevel(jc);
  int batchMutation = getBatchMutationSize(jc);
  Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map = new HashMap<ByteBuffer,Map<String,List<Mutation>>>();
  Map<String, List<Mutation>> maps = new HashMap<String, List<Mutation>>();

  int count = 0;
  for (CassandraPut c : subColumns) {
    List<Column> columns = new ArrayList<Column>();
    for (CassandraColumn col : c.getColumns()) {
      Column cassCol = new Column();
      cassCol.setValue(col.getValue());
      cassCol.setTimestamp(col.getTimeStamp());
      cassCol.setName(col.getColumn());
      columns.add(cassCol);

      ColumnOrSuperColumn thisSuperCol = new ColumnOrSuperColumn();
      thisSuperCol.setSuper_column(new SuperColumn(c.getKey(), columns));

      Mutation mutation = new Mutation();
      mutation.setColumn_or_supercolumn(thisSuperCol);

      List<Mutation> mutList = maps.get(col.getColumnFamily());
      if (mutList == null) {
        mutList = new ArrayList<Mutation>();
        maps.put(col.getColumnFamily(), mutList);
      }

      mutList.add(mutation);

      count ++;

      if (count == batchMutation) {
        mutation_map.put(key, maps);

        commitChanges(keySpace, client, flevel, mutation_map);

        //reset mutation map, maps and count;
        mutation_map = new HashMap<ByteBuffer,Map<String,List<Mutation>>>();
        maps = new HashMap<String, List<Mutation>>();
        count = 0;
      }

    }
  }

  if(count > 0) {
    mutation_map.put(key, maps);
    commitChanges(keySpace, client, flevel, mutation_map);
  }
}
 
Example #15
Source File: ColumnFamilyWideRowRecordReader.java    From Hive-Cassandra with Apache License 2.0 4 votes vote down vote up
private void maybeInit() {
  // check if we need another row
  if (rows != null && columnsRead < rowPageSize) {
    columnsRead = 0;
    startToken = partitioner.getTokenFactory().toString(partitioner.getToken(rows.get(0).key));
    predicate.getSlice_range().setStart(startSlicePredicate);
    rows = null;
    prevStartSlice = null;
    totalRead++;
  }

  if (startToken == null) {
    startToken = split.getStartToken();
  } else if (startToken.equals(split.getEndToken()) && rows == null) {
    // reached end of the split
    return;
  }

  KeyRange keyRange = new KeyRange(batchRowCount)
                            .setStart_token(startToken)
                            .setEnd_token(split.getEndToken());
  try {
    rows = client.get_range_slices(new ColumnParent(cfName),
                                           predicate,
                                           keyRange,
                                           consistencyLevel);

    // nothing new? reached the end
    if (rows.isEmpty()) {
      rows = null;
      return;
    }

    //detect infinite loop
    if (prevStartSlice != null && ByteBufferUtil.compareUnsigned(prevStartSlice, predicate.slice_range.start) == 0) {
        rows = null;
        return;
    }

    // prepare for the next slice to be read
    KeySlice row = rows.get(0);

    if (row.getColumnsSize() > 0) {

      ColumnOrSuperColumn cosc = row.getColumns().get(row.getColumnsSize() - 1);

      prevStartSlice = predicate.slice_range.start;

      //prepare next slice
      if (cosc.column != null) {
        predicate.slice_range.start = cosc.column.name;
      }

      if (cosc.super_column != null) {
        predicate.slice_range.start = cosc.super_column.name;
      }

      if (cosc.counter_column != null) {
        predicate.slice_range.start = cosc.counter_column.name;
      }

      if (cosc.counter_super_column != null) {
        predicate.slice_range.start = cosc.counter_super_column.name;
      }

      columnsRead = row.getColumnsSize();

      //If we've hit the max columns then rm the last column
      //to make sure we don't know where to start next without overlap
      if (columnsRead == rowPageSize) {
        row.getColumns().remove(columnsRead - 1);
      }
    } 
  } catch (Exception e) {
    throw new RuntimeException(e);
  }
}