com.datastax.driver.core.exceptions.InvalidConfigurationInQueryException Java Examples

The following examples show how to use com.datastax.driver.core.exceptions.InvalidConfigurationInQueryException. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ErrorResultIntegrationTest.java    From simulacron with Apache License 2.0 5 votes vote down vote up
@Test
public void testShouldReturnConfigurationError() throws Exception {
  String message = "This is a config error";
  server.prime(when(query).then(configurationError(message)));

  thrown.expect(InvalidConfigurationInQueryException.class);
  thrown.expectMessage(endsWith(message));
  query();
}
 
Example #2
Source File: SchemaUpgrade.java    From glowroot with Apache License 2.0 5 votes vote down vote up
private void revertCompressionChunkLength() throws Exception {
    try {
        // try with compression options for Cassandra 3.x
        // see https://docs.datastax.com/en/cql/3.3/cql/cql_reference/compressSubprop.html
        session.updateSchemaWithRetry("alter table trace_entry with compression = {'class':"
                + " 'org.apache.cassandra.io.compress.LZ4Compressor', 'chunk_length_kb' :"
                + " 64};");
    } catch (InvalidConfigurationInQueryException e) {
        logger.debug(e.getMessage(), e);
        // try with compression options for Cassandra 2.x
        // see https://docs.datastax.com/en/cql/3.1/cql/cql_reference/compressSubprop.html
        session.updateSchemaWithRetry("alter table trace_entry with compression"
                + " = {'sstable_compression': 'SnappyCompressor', 'chunk_length_kb' : 64};");
    }
}
 
Example #3
Source File: CassandraCqlMapState.java    From storm-cassandra-cql with Apache License 2.0 5 votes vote down vote up
protected void checkCassandraException(Exception e) {
    _mexceptions.incr();
    if (e instanceof AlreadyExistsException ||
            e instanceof AuthenticationException ||
            e instanceof DriverException ||
            e instanceof DriverInternalError ||
            e instanceof InvalidConfigurationInQueryException ||
            e instanceof InvalidQueryException ||
            e instanceof InvalidTypeException ||
            e instanceof QueryExecutionException ||
            e instanceof QueryValidationException ||
            e instanceof ReadTimeoutException ||
            e instanceof SyntaxError ||
            e instanceof TraceRetrievalException ||
            e instanceof TruncateException ||
            e instanceof UnauthorizedException ||
            e instanceof UnavailableException ||
            e instanceof ReadTimeoutException ||
            e instanceof WriteTimeoutException ||
            e instanceof ReadFailureException ||
            e instanceof WriteFailureException ||
            e instanceof FunctionExecutionException) {
        throw new ReportedFailedException(e);
    } else {
        throw new RuntimeException(e);
    }
}
 
Example #4
Source File: Session.java    From glowroot with Apache License 2.0 4 votes vote down vote up
public void createTableWithTWCS(String createTableQuery, int expirationHours,
        boolean useAndInsteadOfWith, boolean fallbackToSTCS) throws InterruptedException {
    // as long as gc_grace_seconds is less than TTL, then tombstones can be collected
    // immediately (https://issues.apache.org/jira/browse/CASSANDRA-4917)
    //
    // not using gc_grace_seconds of 0 since that disables hinted handoff
    // (http://www.uberobert.com/cassandra_gc_grace_disables_hinted_handoff)
    //
    // it seems any value over max_hint_window_in_ms (which defaults to 3 hours) is good
    long gcGraceSeconds = HOURS.toSeconds(4);

    // using unchecked_tombstone_compaction=true for better tombstone purging
    // see http://thelastpickle.com/blog/2016/12/08/TWCS-part1.html
    String term = useAndInsteadOfWith ? "and" : "with";
    try {
        // using small min_sstable_size to avoid scenario where small sstables get written and
        // continually merged with "large" (but under default min_sstable_size of 50mb) sstable,
        // essentially recompacting the data in that "large" sstable over and over until it
        // finally reaches the default min_sstable_size of 50mb
        //
        // it's ok if a few smaller sstables don't get compacted due to reduced min_sstable_size
        // since major compaction is never too far away at the end of the window
        // bucket_high is increased a bit to compensate for lower min_sstable_size so that worst
        // case number of sstables will be three 5mb sstables, three 10mb sstables, three 20mb
        // sstables, three 40mb sstables, etc
        createTableWithTracking(createTableQuery + " " + term + " "
                + getTwcsCompactionClause(expirationHours) + " and gc_grace_seconds = "
                + gcGraceSeconds);
    } catch (InvalidConfigurationInQueryException e) {
        logger.debug(e.getMessage(), e);
        if (fallbackToSTCS) {
            createTableWithTracking(createTableQuery + " " + term + " compaction = { 'class' :"
                    + " 'SizeTieredCompactionStrategy', 'unchecked_tombstone_compaction' :"
                    + " true } and gc_grace_seconds = " + gcGraceSeconds);
        } else {
            createTableWithTracking(createTableQuery + " " + term + " compaction = { 'class' :"
                    + " 'DateTieredCompactionStrategy', 'unchecked_tombstone_compaction' :"
                    + " true } and gc_grace_seconds = " + gcGraceSeconds);
        }
    }
}