org.apache.cassandra.db.marshal.TypeParser Java Examples

The following examples show how to use org.apache.cassandra.db.marshal.TypeParser. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: CassandraColumnSerDe.java    From Hive-Cassandra with Apache License 2.0 6 votes vote down vote up
/**
 * Parses the cassandra columns mapping to identify the column name.
 * One of the Hive table columns maps to the cassandra row key, by default the
 * first column.
 *
 * @param columnList a list of column validator type in String format
 * @return a list of cassandra validator type
 */
private List<AbstractType> parseValidatorType(List<String> columnList)
  throws SerDeException {
  List<AbstractType> types = new ArrayList<AbstractType>();

  for (String str : columnList) {
    if (StringUtils.isBlank(str)) {
      types.add(DEFAULT_VALIDATOR_TYPE);
    } else {
      try {
        types.add(TypeParser.parse(str));
      } catch (Exception e) {
        throw new SerDeException("Invalid Cassandra validator type ' " + str + "'");
      }
    }
  }

  return types;
}
 
Example #2
Source File: CassSSTableReducer.java    From aegisthus with Apache License 2.0 6 votes vote down vote up
@Override protected void setup(
        Context context)
        throws IOException, InterruptedException {
    super.setup(context);

    maxRowSize = context.getConfiguration().getLong(Aegisthus.Feature.CONF_MAXCOLSIZE, Long.MAX_VALUE);

    String columnType = context.getConfiguration().get(Aegisthus.Feature.CONF_COLUMNTYPE, "BytesType");
    String rowKeyType = context.getConfiguration().get(Aegisthus.Feature.CONF_KEYTYPE, "BytesType");

    try {
        columnComparator = TypeParser.parse(columnType);
        rowKeyComparator = TypeParser.parse(rowKeyType);
    } catch (SyntaxException | ConfigurationException e) {
        throw new RuntimeException(e);
    }
}
 
Example #3
Source File: CqlRecordWriter.java    From stratio-cassandra with Apache License 2.0 5 votes vote down vote up
private AbstractType<?> parseType(String type) throws ConfigurationException
{
    try
    {
        // always treat counters like longs, specifically CCT.serialize is not what we need
        if (type != null && type.equals("org.apache.cassandra.db.marshal.CounterColumnType"))
            return LongType.instance;
        return TypeParser.parse(type);
    }
    catch (SyntaxException e)
    {
        throw new ConfigurationException(e.getMessage(), e);
    }
}
 
Example #4
Source File: JsonOutputFormat.java    From aegisthus with Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("unchecked")
private AbstractType<ByteBuffer> getConverter(Configuration conf, String key) {
    String converterType = conf.get(key);
    if (Strings.isNullOrEmpty(key)) {
        return BytesType.instance;
    }

    try {
        return (AbstractType<ByteBuffer>) TypeParser.parse(converterType);
    } catch (SyntaxException | ConfigurationException e) {
        throw Throwables.propagate(e);
    }
}
 
Example #5
Source File: AegisthusKeySortingComparator.java    From aegisthus with Apache License 2.0 5 votes vote down vote up
@Override
public void setConf(Configuration conf) {
    this.conf = conf;
    String columnType = conf.get(Aegisthus.Feature.CONF_COLUMNTYPE, "BytesType");
    legacyColumnNameFormatting = conf.getBoolean(Aegisthus.Feature.CONF_LEGACY_COLUMN_NAME_FORMATTING, false);
    sortColumnsByName = conf.getBoolean(Aegisthus.Feature.CONF_SORT_COLUMNS_BY_NAME, false);
    try {
        //noinspection unchecked
        columnNameConverter = (AbstractType<ByteBuffer>) TypeParser.parse(columnType);
    } catch (SyntaxException | ConfigurationException e) {
        throw new RuntimeException(e);
    }
}
 
Example #6
Source File: CassandraRunner.java    From staash with Apache License 2.0 5 votes vote down vote up
private List<CFMetaData> extractColumnFamily(RequiresColumnFamily rcf) {
  logger.debug("RequiresColumnFamily  has name: {} for ks: {}", rcf.cfName(), rcf.ksName());
  List<CFMetaData> cfms = new ArrayList<CFMetaData>();
  if ( rcf != null ) {
    try {
      cfms.add(new CFMetaData(rcf.ksName(), rcf.cfName(),
              ColumnFamilyType.Standard, TypeParser.parse(rcf.comparator()), null));

    } catch (Exception ex) {
      throw new RuntimeException("unable to create column family for: " + rcf.cfName(), ex);
    }
  }
  return cfms;
}
 
Example #7
Source File: CreateColumnFamilyStatement.java    From stratio-cassandra with Apache License 2.0 4 votes vote down vote up
/**
 * Returns a CFMetaData instance based on the parameters parsed from this
 * <code>CREATE</code> statement, or defaults where applicable.
 *
 * @param keyspace keyspace to apply this column family to
 * @return a CFMetaData instance corresponding to the values parsed from this statement
 * @throws InvalidRequestException on failure to validate parsed parameters
 */
public CFMetaData getCFMetaData(String keyspace, List<ByteBuffer> variables) throws InvalidRequestException
{
    validate(variables);

    try
    {
        boolean isDense = columns.isEmpty();
        CFMetaData newCFMD = new CFMetaData(keyspace,
                                            name,
                                            ColumnFamilyType.Standard,
                                            CellNames.fromAbstractType(cfProps.getComparator(), isDense));

        if (CFMetaData.DEFAULT_COMPRESSOR != null && cfProps.compressionParameters.isEmpty())
            cfProps.compressionParameters.put(CompressionParameters.SSTABLE_COMPRESSION, CFMetaData.DEFAULT_COMPRESSOR);
        int maxCompactionThreshold = getPropertyInt(CFPropDefs.KW_MAXCOMPACTIONTHRESHOLD, CFMetaData.DEFAULT_MAX_COMPACTION_THRESHOLD);
        int minCompactionThreshold = getPropertyInt(CFPropDefs.KW_MINCOMPACTIONTHRESHOLD, CFMetaData.DEFAULT_MIN_COMPACTION_THRESHOLD);
        if (minCompactionThreshold <= 0 || maxCompactionThreshold <= 0)
            throw new ConfigurationException("Disabling compaction by setting compaction thresholds to 0 has been deprecated, set the compaction option 'enabled' to false instead.");

        newCFMD.isDense(isDense)
               .addAllColumnDefinitions(getColumns(newCFMD))
               .comment(cfProps.getProperty(CFPropDefs.KW_COMMENT))
               .readRepairChance(getPropertyDouble(CFPropDefs.KW_READREPAIRCHANCE, CFMetaData.DEFAULT_READ_REPAIR_CHANCE))
               .dcLocalReadRepairChance(getPropertyDouble(CFPropDefs.KW_DCLOCALREADREPAIRCHANCE, CFMetaData.DEFAULT_DCLOCAL_READ_REPAIR_CHANCE))
               .gcGraceSeconds(getPropertyInt(CFPropDefs.KW_GCGRACESECONDS, CFMetaData.DEFAULT_GC_GRACE_SECONDS))
               .defaultValidator(cfProps.getValidator())
               .minCompactionThreshold(minCompactionThreshold)
               .maxCompactionThreshold(maxCompactionThreshold)
               .keyValidator(TypeParser.parse(CFPropDefs.comparators.get(getKeyType())))
               .compactionStrategyClass(cfProps.compactionStrategyClass)
               .compactionStrategyOptions(cfProps.compactionStrategyOptions)
               .compressionParameters(CompressionParameters.create(cfProps.compressionParameters))
               .caching(CachingOptions.fromString(getPropertyString(CFPropDefs.KW_CACHING, CFMetaData.DEFAULT_CACHING_STRATEGY.toString())))
               .speculativeRetry(CFMetaData.SpeculativeRetry.fromString(getPropertyString(CFPropDefs.KW_SPECULATIVE_RETRY, CFMetaData.DEFAULT_SPECULATIVE_RETRY.toString())))
               .bloomFilterFpChance(getPropertyDouble(CFPropDefs.KW_BF_FP_CHANCE, null))
               .memtableFlushPeriod(getPropertyInt(CFPropDefs.KW_MEMTABLE_FLUSH_PERIOD, 0))
               .defaultTimeToLive(getPropertyInt(CFPropDefs.KW_DEFAULT_TIME_TO_LIVE, CFMetaData.DEFAULT_DEFAULT_TIME_TO_LIVE));

        // CQL2 can have null keyAliases
        if (keyAlias != null)
            newCFMD.addColumnDefinition(ColumnDefinition.partitionKeyDef(newCFMD, keyAlias, newCFMD.getKeyValidator(), null));

        return newCFMD.rebuild();
    }
    catch (ConfigurationException | SyntaxException e)
    {
        throw new InvalidRequestException(e.toString());
    }
}
 
Example #8
Source File: CFPropDefs.java    From stratio-cassandra with Apache License 2.0 4 votes vote down vote up
public AbstractType<?> getComparator() throws ConfigurationException, SyntaxException
{
    return TypeParser.parse((comparators.get(getPropertyString(KW_COMPARATOR, "text")) != null)
                              ? comparators.get(getPropertyString(KW_COMPARATOR, "text"))
                              : getPropertyString(KW_COMPARATOR, "text"));
}
 
Example #9
Source File: CFPropDefs.java    From stratio-cassandra with Apache License 2.0 4 votes vote down vote up
public AbstractType<?> getValidator() throws ConfigurationException, SyntaxException
{
    return TypeParser.parse((comparators.get(getPropertyString(KW_DEFAULTVALIDATION, "text")) != null)
                              ? comparators.get(getPropertyString(KW_DEFAULTVALIDATION, "text"))
                              : getPropertyString(KW_DEFAULTVALIDATION, "text"));
}
 
Example #10
Source File: HiveCassandraStandardColumnInputFormat.java    From Hive-Cassandra with Apache License 2.0 4 votes vote down vote up
@Override
public RecordReader<BytesWritable, MapWritable> getRecordReader(InputSplit split,
    JobConf jobConf, final Reporter reporter) throws IOException {
  HiveCassandraStandardSplit cassandraSplit = (HiveCassandraStandardSplit) split;

  List<String> columns = AbstractColumnSerDe.parseColumnMapping(cassandraSplit.getColumnMapping());
  isTransposed = AbstractColumnSerDe.isTransposed(columns);


  List<Integer> readColIDs = ColumnProjectionUtils.getReadColumnIDs(jobConf);

  if (columns.size() < readColIDs.size()) {
    throw new IOException("Cannot read more columns than the given table contains.");
  }

  org.apache.cassandra.hadoop.ColumnFamilySplit cfSplit = cassandraSplit.getSplit();
  Job job = new Job(jobConf);

  TaskAttemptContext tac = new TaskAttemptContext(job.getConfiguration(), new TaskAttemptID()) {
    @Override
    public void progress() {
      reporter.progress();
    }
  };

  SlicePredicate predicate = new SlicePredicate();

  if (isTransposed || readColIDs.size() == columns.size() || readColIDs.size() == 0) {
    SliceRange range = new SliceRange();
    AbstractType comparator = BytesType.instance;

    String comparatorType = jobConf.get(AbstractColumnSerDe.CASSANDRA_SLICE_PREDICATE_RANGE_COMPARATOR);
    if (comparatorType != null && !comparatorType.equals("")) {
      try {
        comparator = TypeParser.parse(comparatorType);
      } catch (Exception ex) {
        throw new IOException("Comparator class not found.");
      }
    }

    String sliceStart = jobConf.get(AbstractColumnSerDe.CASSANDRA_SLICE_PREDICATE_RANGE_START);
    String sliceEnd = jobConf.get(AbstractColumnSerDe.CASSANDRA_SLICE_PREDICATE_RANGE_FINISH);
    String reversed = jobConf.get(AbstractColumnSerDe.CASSANDRA_SLICE_PREDICATE_RANGE_REVERSED);

    range.setStart(comparator.fromString(sliceStart == null ? "" : sliceStart));
    range.setFinish(comparator.fromString(sliceEnd == null ? "" : sliceEnd));
    range.setReversed(reversed == null ? false : reversed.equals("true"));
    range.setCount(cassandraSplit.getSlicePredicateSize());
    predicate.setSlice_range(range);
  } else {
    int iKey = columns.indexOf(AbstractColumnSerDe.CASSANDRA_KEY_COLUMN);
    predicate.setColumn_names(getColumnNames(iKey, columns, readColIDs));
  }


  try {
    ConfigHelper.setInputColumnFamily(tac.getConfiguration(),
        cassandraSplit.getKeyspace(), cassandraSplit.getColumnFamily());

    ConfigHelper.setInputSlicePredicate(tac.getConfiguration(), predicate);
    ConfigHelper.setRangeBatchSize(tac.getConfiguration(), cassandraSplit.getRangeBatchSize());
    ConfigHelper.setInputRpcPort(tac.getConfiguration(), cassandraSplit.getPort() + "");
    ConfigHelper.setInputInitialAddress(tac.getConfiguration(), cassandraSplit.getHost());
    ConfigHelper.setInputPartitioner(tac.getConfiguration(), cassandraSplit.getPartitioner());
    // Set Split Size
    ConfigHelper.setInputSplitSize(tac.getConfiguration(), cassandraSplit.getSplitSize());

    CassandraHiveRecordReader rr = null;

    if(isTransposed && tac.getConfiguration().getBoolean(AbstractColumnSerDe.CASSANDRA_ENABLE_WIDEROW_ITERATOR, true)) {
      rr = new CassandraHiveRecordReader(new ColumnFamilyWideRowRecordReader(), isTransposed);
    } else {
      rr = new CassandraHiveRecordReader(new ColumnFamilyRecordReader(), isTransposed);
    }
    rr.initialize(cfSplit, tac);

    return rr;

  } catch (Exception ie) {
    throw new IOException(ie);
  }
}