org.apache.cassandra.dht.RandomPartitioner Java Examples

The following examples show how to use org.apache.cassandra.dht.RandomPartitioner. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: SerializationsTest.java    From stratio-cassandra with Apache License 2.0 6 votes vote down vote up
private void testValidationCompleteWrite() throws IOException
{
    IPartitioner p = new RandomPartitioner();
    // empty validation
    MerkleTree mt = new MerkleTree(p, FULL_RANGE, MerkleTree.RECOMMENDED_DEPTH, (int) Math.pow(2, 15));
    Validator v0 = new Validator(DESC, FBUtilities.getBroadcastAddress(),  -1);
    ValidationComplete c0 = new ValidationComplete(DESC, mt);

    // validation with a tree
    mt = new MerkleTree(p, FULL_RANGE, MerkleTree.RECOMMENDED_DEPTH, Integer.MAX_VALUE);
    for (int i = 0; i < 10; i++)
        mt.split(p.getRandomToken());
    Validator v1 = new Validator(DESC, FBUtilities.getBroadcastAddress(), -1);
    ValidationComplete c1 = new ValidationComplete(DESC, mt);

    // validation failed
    ValidationComplete c3 = new ValidationComplete(DESC);

    testRepairMessageWrite("service.ValidationComplete.bin", c0, c1, c3);
}
 
Example #2
Source File: IndexSummaryTest.java    From stratio-cassandra with Apache License 2.0 6 votes vote down vote up
@Test
public void testAddEmptyKey() throws Exception
{
    IPartitioner p = new RandomPartitioner();
    try (IndexSummaryBuilder builder = new IndexSummaryBuilder(1, 1, BASE_SAMPLING_LEVEL))
    {
        builder.maybeAddEntry(p.decorateKey(ByteBufferUtil.EMPTY_BYTE_BUFFER), 0);
        IndexSummary summary = builder.build(p);
        assertEquals(1, summary.size());
        assertEquals(0, summary.getPosition(0));
        assertArrayEquals(new byte[0], summary.getKey(0));

        DataOutputBuffer dos = new DataOutputBuffer();
        IndexSummary.serializer.serialize(summary, dos, false);
        DataInputStream dis = new DataInputStream(new ByteArrayInputStream(dos.toByteArray()));
        IndexSummary loaded = IndexSummary.serializer.deserialize(dis, p, false, 1, 1);

        assertEquals(1, loaded.size());
        assertEquals(summary.getPosition(0), loaded.getPosition(0));
        assertArrayEquals(summary.getKey(0), summary.getKey(0));
        summary.close();
        loaded.close();
    }
}
 
Example #3
Source File: VerifyRandomPartitioner.java    From emodb with Apache License 2.0 4 votes vote down vote up
@Override
public void start() throws Exception {
    // Not using the RandomPartitioner could lead to hotspots in the Cassandra ring.
    _keyspace.warnIfPartitionerMismatch(RandomPartitioner.class);
}
 
Example #4
Source File: ThriftRangeUtilsTest.java    From deep-spark with Apache License 2.0 4 votes vote down vote up
@Test
public void testFetchSortedTokensWithRandomPartitioner() {
    testStringComparableConversion(new RandomPartitioner());
}
 
Example #5
Source File: ThriftRangeUtilsTest.java    From deep-spark with Apache License 2.0 4 votes vote down vote up
@Test
public void testDeepTokenRangesWithRandomPartitioner() {
    testDeepTokenRanges(new RandomPartitioner(), BigInteger.valueOf(10), BigInteger.valueOf(20));
}
 
Example #6
Source File: MetadataSerializerTest.java    From stratio-cassandra with Apache License 2.0 4 votes vote down vote up
@Test
public void testSerialization() throws IOException
{
    EstimatedHistogram rowSizes = new EstimatedHistogram(new long[] { 1L, 2L },
                                                         new long[] { 3L, 4L, 5L });
    EstimatedHistogram columnCounts = new EstimatedHistogram(new long[] { 6L, 7L },
                                                             new long[] { 8L, 9L, 10L });
    ReplayPosition rp = new ReplayPosition(11L, 12);
    long minTimestamp = 2162517136L;
    long maxTimestamp = 4162517136L;

    MetadataCollector collector = new MetadataCollector(new SimpleDenseCellNameType(BytesType.instance))
                                                  .estimatedRowSize(rowSizes)
                                                  .estimatedColumnCount(columnCounts)
                                                  .replayPosition(rp);
    collector.updateMinTimestamp(minTimestamp);
    collector.updateMaxTimestamp(maxTimestamp);

    Set<Integer> ancestors = Sets.newHashSet(1, 2, 3, 4);
    for (int i : ancestors)
        collector.addAncestor(i);

    String partitioner = RandomPartitioner.class.getCanonicalName();
    double bfFpChance = 0.1;
    Map<MetadataType, MetadataComponent> originalMetadata = collector.finalizeMetadata(partitioner, bfFpChance, 0);

    MetadataSerializer serializer = new MetadataSerializer();
    // Serialize to tmp file
    File statsFile = File.createTempFile(Component.STATS.name, null);
    try (DataOutputStreamAndChannel out = new DataOutputStreamAndChannel(new FileOutputStream(statsFile)))
    {
        serializer.serialize(originalMetadata, out);
    }

    Descriptor desc = new Descriptor(Descriptor.Version.CURRENT, statsFile.getParentFile(), "", "", 0, Descriptor.Type.FINAL);
    try (RandomAccessReader in = RandomAccessReader.open(statsFile))
    {
        Map<MetadataType, MetadataComponent> deserialized = serializer.deserialize(desc, in, EnumSet.allOf(MetadataType.class));

        for (MetadataType type : MetadataType.values())
        {
            assertEquals(originalMetadata.get(type), deserialized.get(type));
        }
    }
}
 
Example #7
Source File: SSTableRecordReader.java    From hadoop-sstable with Apache License 2.0 4 votes vote down vote up
protected AbstractPartitioner getPartitioner() {
    return new RandomPartitioner();
}
 
Example #8
Source File: CassandraParams.java    From hdfs2cass with Apache License 2.0 4 votes vote down vote up
public GroupingOptions createGroupingOptions() {
  logger.info("GroupingOptions.numReducers: " + this.getReducers());
  GroupingOptions.Builder builder = GroupingOptions.builder()
      .partitionerClass(CassandraPartitioner.class)
      .sortComparatorClass(CassandraKeyComparator.class)
      .numReducers(this.getReducers());

  final BigInteger maxToken;
  final BigInteger minToken;
  switch (clusterInfo.getPartitionerClass()) {
    case "org.apache.cassandra.dht.RandomPartitioner":
      maxToken = RandomPartitioner.MAXIMUM.subtract(BigInteger.ONE);
      minToken = RandomPartitioner.ZERO;
      break;
    case "org.apache.cassandra.dht.Murmur3Partitioner":
      maxToken = BigInteger.valueOf(Murmur3Partitioner.MAXIMUM);
      minToken = BigInteger.valueOf(Murmur3Partitioner.MINIMUM.token);
      break;
    default:
      throw new IllegalArgumentException("Unknown partitioner class: " + clusterInfo.getPartitionerClass());
  }

  final BigInteger[] rangeWidth = maxToken
      .subtract(minToken)
      .add(BigInteger.ONE)
      .divideAndRemainder(BigInteger.valueOf(this.getReducers()));
  if (!rangeWidth[1].equals(BigInteger.ZERO)) {
    rangeWidth[0] = rangeWidth[0].add(BigInteger.ONE);
  }
  BigInteger rangePerReducer = rangeWidth[0];

  ArrayList<Integer> reducerList = new ArrayList<>(this.getReducers());
  for (int i = 0; i < this.getReducers(); i++) {
    reducerList.add(i);
  }

  Collections.shuffle(reducerList, new Random());

  builder.conf(SCRUB_CASSANDRACLUSTER_PARTITIONER_CONFIG, clusterInfo.getPartitionerClass());
  builder.conf(SCRUB_CASSANDRACLUSTER_RANGE_PER_REDUCER_CONFIG, rangePerReducer.toString());
  builder.conf(SCRUB_CASSANDRACLUSTER_REDUCERS_CONFIG, StringUtils.join(reducerList, ","));
  if (this.getDistributeRandomly()) {
    builder.conf(SCRUB_CASSANDRACLUSTER_DISTRIBUTE_RANDOMLY_CONFIG, Boolean.TRUE.toString());
  }

  return builder.build();
}