org.apache.cassandra.io.sstable.Component Java Examples

The following examples show how to use org.apache.cassandra.io.sstable.Component. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ActiveRepairService.java    From stratio-cassandra with Apache License 2.0 6 votes vote down vote up
public synchronized Refs<SSTableReader> getAndReferenceSSTables(UUID cfId)
{
    Set<SSTableReader> sstables = sstableMap.get(cfId);
    Iterator<SSTableReader> sstableIterator = sstables.iterator();
    ImmutableMap.Builder<SSTableReader, Ref<SSTableReader>> references = ImmutableMap.builder();
    while (sstableIterator.hasNext())
    {
        SSTableReader sstable = sstableIterator.next();
        if (!new File(sstable.descriptor.filenameFor(Component.DATA)).exists())
        {
            sstableIterator.remove();
        }
        else
        {
            Ref<SSTableReader> ref = sstable.tryRef();
            if (ref == null)
                sstableIterator.remove();
            else
                references.put(sstable, ref);
        }
    }
    return new Refs<>(references.build());
}
 
Example #2
Source File: DataIntegrityMetadata.java    From stratio-cassandra with Apache License 2.0 6 votes vote down vote up
public void writeFullChecksum(Descriptor descriptor)
{
    File outFile = new File(descriptor.filenameFor(Component.DIGEST));
    BufferedWriter out = null;
    try
    {
        out = Files.newBufferedWriter(outFile.toPath(), Charsets.UTF_8);
        out.write(String.valueOf(fullChecksum.getValue()));
    }
    catch (IOException e)
    {
        throw new FSWriteError(e, outFile);
    }
    finally
    {
        FileUtils.closeQuietly(out);
    }
}
 
Example #3
Source File: MetadataCollector.java    From stratio-cassandra with Apache License 2.0 6 votes vote down vote up
public MetadataCollector(Collection<SSTableReader> sstables, CellNameType columnNameComparator, int level)
{
    this(columnNameComparator);

    replayPosition(ReplayPosition.getReplayPosition(sstables));
    sstableLevel(level);
    // Get the max timestamp of the precompacted sstables
    // and adds generation of live ancestors
    for (SSTableReader sstable : sstables)
    {
        addAncestor(sstable.descriptor.generation);
        for (Integer i : sstable.getAncestors())
            if (new File(sstable.descriptor.withGeneration(i).filenameFor(Component.DATA)).exists())
                addAncestor(i);
    }
}
 
Example #4
Source File: MetadataSerializer.java    From stratio-cassandra with Apache License 2.0 6 votes vote down vote up
public Map<MetadataType, MetadataComponent> deserialize(Descriptor descriptor, EnumSet<MetadataType> types) throws IOException
{
    Map<MetadataType, MetadataComponent> components;
    logger.debug("Load metadata for {}", descriptor);
    File statsFile = new File(descriptor.filenameFor(Component.STATS));
    if (!statsFile.exists())
    {
        logger.debug("No sstable stats for {}", descriptor);
        components = Maps.newHashMap();
        components.put(MetadataType.STATS, MetadataCollector.defaultStatsMetadata());
    }
    else
    {
        try (RandomAccessReader r = RandomAccessReader.open(statsFile))
        {
            components = deserialize(descriptor, r, types);
        }
    }
    return components;
}
 
Example #5
Source File: Compact.java    From sstable-tools with Apache License 2.0 5 votes vote down vote up
public void run() {
    try {
        Descriptor desc = new Descriptor(SSTableFormat.Type.BIG.info.getLatestVersion(),
                new File("."),
                "keyspace",
                "table",
                0,
                SSTableFormat.Type.BIG,
                Component.digestFor(BigFormat.latestVersion.uncompressedChecksumType()));

        SSTableTxnWriter out = SSTableTxnWriter.create(metadata,
                desc,
                0,
                ActiveRepairService.UNREPAIRED_SSTABLE,
                0,
                SerializationHeader.make(metadata, sstables),
                Collections.emptySet());

        System.out.println("Merging " + sstables.size() + " sstables to " + desc.filenameFor(Component.DATA));

        UnfilteredPartitionIterator merged =
                UnfilteredPartitionIterators.mergeLazily(
                        sstables.stream()
                                .map(SSTableReader::getScanner)
                                .collect(Collectors.toList()),
                        FBUtilities.nowInSeconds());
        while (merged.hasNext()) {
            out.append(merged.next());
        }
        out.finish(false);
    } catch (Exception e) {
        e.printStackTrace();
    }
}
 
Example #6
Source File: ColumnIndex.java    From sasi with Apache License 2.0 5 votes vote down vote up
public ColumnIndex(AbstractType<?> keyValidator, ColumnDefinition column, AbstractType<?> comparator)
{
    this.column = column;
    this.comparator = comparator;
    this.mode = IndexMode.getMode(column);
    this.tracker = new DataTracker(keyValidator, this);
    this.component = new Component(Component.Type.SECONDARY_INDEX, String.format(FILE_NAME_FORMAT, column.getIndexName()));
}
 
Example #7
Source File: DirectoriesTest.java    From stratio-cassandra with Apache License 2.0 5 votes vote down vote up
private static void createFakeSSTable(File dir, String cf, int gen, boolean temp, List<File> addTo) throws IOException
{
    Descriptor desc = new Descriptor(dir, KS, cf, gen, temp ? Descriptor.Type.TEMP : Descriptor.Type.FINAL);
    for (Component c : new Component[]{ Component.DATA, Component.PRIMARY_INDEX, Component.FILTER })
    {
        File f = new File(desc.filenameFor(c));
        f.createNewFile();
        addTo.add(f);
    }
}
 
Example #8
Source File: ColumnFamilyStoreTest.java    From stratio-cassandra with Apache License 2.0 5 votes vote down vote up
@Test
public void testBackupAfterFlush() throws Throwable
{
    ColumnFamilyStore cfs = insertKey1Key2();

    for (int version = 1; version <= 2; ++version)
    {
        Descriptor existing = new Descriptor(cfs.directories.getDirectoryForNewSSTables(), "Keyspace2", "Standard1", version, Descriptor.Type.FINAL);
        Descriptor desc = new Descriptor(Directories.getBackupsDirectory(existing), "Keyspace2", "Standard1", version, Descriptor.Type.FINAL);
        for (Component c : new Component[]{ Component.DATA, Component.PRIMARY_INDEX, Component.FILTER, Component.STATS })
            assertTrue("can not find backedup file:" + desc.filenameFor(c), new File(desc.filenameFor(c)).exists());
    }
}
 
Example #9
Source File: DataIntegrityMetadata.java    From stratio-cassandra with Apache License 2.0 5 votes vote down vote up
public void validate(byte[] bytes, int start, int end) throws IOException
{
    checksum.update(bytes, start, end);
    int current = (int) checksum.getValue();
    checksum.reset();
    int actual = reader.readInt();
    if (current != actual)
        throw new IOException("Corrupted SSTable : " + descriptor.filenameFor(Component.DATA));
}
 
Example #10
Source File: StreamReader.java    From stratio-cassandra with Apache License 2.0 5 votes vote down vote up
protected SSTableWriter createWriter(ColumnFamilyStore cfs, long totalSize, long repairedAt) throws IOException
{
    Directories.DataDirectory localDir = cfs.directories.getWriteableLocation(totalSize);
    if (localDir == null)
        throw new IOException("Insufficient disk space to store " + totalSize + " bytes");
    desc = Descriptor.fromFilename(cfs.getTempSSTablePath(cfs.directories.getLocationForDisk(localDir)));

    return new SSTableWriter(desc.filenameFor(Component.DATA), estimatedKeys, repairedAt);
}
 
Example #11
Source File: DataIntegrityMetadata.java    From stratio-cassandra with Apache License 2.0 5 votes vote down vote up
public ChecksumValidator(Descriptor descriptor) throws IOException
{
    this.descriptor = descriptor;
    checksum = descriptor.version.hasAllAdlerChecksums ? new Adler32() : new PureJavaCrc32();
    reader = RandomAccessReader.open(new File(descriptor.filenameFor(Component.CRC)));
    chunkSize = reader.readInt();
}
 
Example #12
Source File: MetadataSerializer.java    From stratio-cassandra with Apache License 2.0 5 votes vote down vote up
private void rewriteSSTableMetadata(Descriptor descriptor, Map<MetadataType, MetadataComponent> currentComponents) throws IOException
{
    Descriptor tmpDescriptor = descriptor.asType(Descriptor.Type.TEMP);

    try (DataOutputStreamAndChannel out = new DataOutputStreamAndChannel(new FileOutputStream(tmpDescriptor.filenameFor(Component.STATS))))
    {
        serialize(currentComponents, out);
        out.flush();
    }
    // we cant move a file on top of another file in windows:
    if (FBUtilities.isWindows())
        FileUtils.delete(descriptor.filenameFor(Component.STATS));
    FileUtils.renameWithConfirm(tmpDescriptor.filenameFor(Component.STATS), descriptor.filenameFor(Component.STATS));

}
 
Example #13
Source File: MetadataSerializer.java    From stratio-cassandra with Apache License 2.0 5 votes vote down vote up
public void mutateLevel(Descriptor descriptor, int newLevel) throws IOException
{
    logger.debug("Mutating {} to level {}", descriptor.filenameFor(Component.STATS), newLevel);
    Map<MetadataType, MetadataComponent> currentComponents = deserialize(descriptor, EnumSet.allOf(MetadataType.class));
    StatsMetadata stats = (StatsMetadata) currentComponents.remove(MetadataType.STATS);
    // mutate level
    currentComponents.put(MetadataType.STATS, stats.mutateLevel(newLevel));
    rewriteSSTableMetadata(descriptor, currentComponents);
}
 
Example #14
Source File: MetadataSerializer.java    From stratio-cassandra with Apache License 2.0 5 votes vote down vote up
public void mutateRepairedAt(Descriptor descriptor, long newRepairedAt) throws IOException
{
    logger.debug("Mutating {} to repairedAt time {}", descriptor.filenameFor(Component.STATS), newRepairedAt);
    Map<MetadataType, MetadataComponent> currentComponents = deserialize(descriptor, EnumSet.allOf(MetadataType.class));
    StatsMetadata stats = (StatsMetadata) currentComponents.remove(MetadataType.STATS);
    // mutate level
    currentComponents.put(MetadataType.STATS, stats.mutateRepairedAt(newRepairedAt));
    rewriteSSTableMetadata(descriptor, currentComponents);
}
 
Example #15
Source File: MetadataSerializerTest.java    From stratio-cassandra with Apache License 2.0 4 votes vote down vote up
@Test
public void testSerialization() throws IOException
{
    EstimatedHistogram rowSizes = new EstimatedHistogram(new long[] { 1L, 2L },
                                                         new long[] { 3L, 4L, 5L });
    EstimatedHistogram columnCounts = new EstimatedHistogram(new long[] { 6L, 7L },
                                                             new long[] { 8L, 9L, 10L });
    ReplayPosition rp = new ReplayPosition(11L, 12);
    long minTimestamp = 2162517136L;
    long maxTimestamp = 4162517136L;

    MetadataCollector collector = new MetadataCollector(new SimpleDenseCellNameType(BytesType.instance))
                                                  .estimatedRowSize(rowSizes)
                                                  .estimatedColumnCount(columnCounts)
                                                  .replayPosition(rp);
    collector.updateMinTimestamp(minTimestamp);
    collector.updateMaxTimestamp(maxTimestamp);

    Set<Integer> ancestors = Sets.newHashSet(1, 2, 3, 4);
    for (int i : ancestors)
        collector.addAncestor(i);

    String partitioner = RandomPartitioner.class.getCanonicalName();
    double bfFpChance = 0.1;
    Map<MetadataType, MetadataComponent> originalMetadata = collector.finalizeMetadata(partitioner, bfFpChance, 0);

    MetadataSerializer serializer = new MetadataSerializer();
    // Serialize to tmp file
    File statsFile = File.createTempFile(Component.STATS.name, null);
    try (DataOutputStreamAndChannel out = new DataOutputStreamAndChannel(new FileOutputStream(statsFile)))
    {
        serializer.serialize(originalMetadata, out);
    }

    Descriptor desc = new Descriptor(Descriptor.Version.CURRENT, statsFile.getParentFile(), "", "", 0, Descriptor.Type.FINAL);
    try (RandomAccessReader in = RandomAccessReader.open(statsFile))
    {
        Map<MetadataType, MetadataComponent> deserialized = serializer.deserialize(desc, in, EnumSet.allOf(MetadataType.class));

        for (MetadataType type : MetadataType.values())
        {
            assertEquals(originalMetadata.get(type), deserialized.get(type));
        }
    }
}
 
Example #16
Source File: ColumnFamilyStoreTest.java    From stratio-cassandra with Apache License 2.0 4 votes vote down vote up
@Test
public void testRemoveUnfinishedCompactionLeftovers() throws Throwable
{
    String ks = "Keyspace1";
    String cf = "Standard3"; // should be empty

    final CFMetaData cfmeta = Schema.instance.getCFMetaData(ks, cf);
    Directories dir = new Directories(cfmeta);
    ByteBuffer key = bytes("key");

    // 1st sstable
    SSTableSimpleWriter writer = new SSTableSimpleWriter(dir.getDirectoryForNewSSTables(), cfmeta, StorageService.getPartitioner());
    writer.newRow(key);
    writer.addColumn(bytes("col"), bytes("val"), 1);
    writer.close();

    Map<Descriptor, Set<Component>> sstables = dir.sstableLister().list();
    assertEquals(1, sstables.size());

    Map.Entry<Descriptor, Set<Component>> sstableToOpen = sstables.entrySet().iterator().next();
    final SSTableReader sstable1 = SSTableReader.open(sstableToOpen.getKey());

    // simulate incomplete compaction
    writer = new SSTableSimpleWriter(dir.getDirectoryForNewSSTables(),
                                     cfmeta, StorageService.getPartitioner())
    {
        protected SSTableWriter getWriter()
        {
            MetadataCollector collector = new MetadataCollector(cfmeta.comparator);
            collector.addAncestor(sstable1.descriptor.generation); // add ancestor from previously written sstable
            return new SSTableWriter(makeFilename(directory, metadata.ksName, metadata.cfName),
                                     0,
                                     ActiveRepairService.UNREPAIRED_SSTABLE,
                                     metadata,
                                     StorageService.getPartitioner(),
                                     collector);
        }
    };
    writer.newRow(key);
    writer.addColumn(bytes("col"), bytes("val"), 1);
    writer.close();

    // should have 2 sstables now
    sstables = dir.sstableLister().list();
    assertEquals(2, sstables.size());

    SSTableReader sstable2 = SSTableReader.open(sstable1.descriptor);
    UUID compactionTaskID = SystemKeyspace.startCompaction(
            Keyspace.open(ks).getColumnFamilyStore(cf),
            Collections.singleton(sstable2));

    Map<Integer, UUID> unfinishedCompaction = new HashMap<>();
    unfinishedCompaction.put(sstable1.descriptor.generation, compactionTaskID);
    ColumnFamilyStore.removeUnfinishedCompactionLeftovers(cfmeta, unfinishedCompaction);

    // 2nd sstable should be removed (only 1st sstable exists in set of size 1)
    sstables = dir.sstableLister().list();
    assertEquals(1, sstables.size());
    assertTrue(sstables.containsKey(sstable1.descriptor));

    Map<Pair<String, String>, Map<Integer, UUID>> unfinished = SystemKeyspace.getUnfinishedCompactions();
    assertTrue(unfinished.isEmpty());
    sstable1.selfRef().release();
    sstable2.selfRef().release();
}
 
Example #17
Source File: LegacyMetadataSerializer.java    From stratio-cassandra with Apache License 2.0 4 votes vote down vote up
/**
 * Legacy serializer deserialize all components no matter what types are specified.
 */
@Override
public Map<MetadataType, MetadataComponent> deserialize(Descriptor descriptor, EnumSet<MetadataType> types) throws IOException
{
    Map<MetadataType, MetadataComponent> components = Maps.newHashMap();

    File statsFile = new File(descriptor.filenameFor(Component.STATS));
    if (!statsFile.exists() && types.contains(MetadataType.STATS))
    {
        components.put(MetadataType.STATS, MetadataCollector.defaultStatsMetadata());
    }
    else
    {
        try (DataInputStream in = new DataInputStream(new BufferedInputStream(new FileInputStream(statsFile))))
        {
            EstimatedHistogram rowSizes = EstimatedHistogram.serializer.deserialize(in);
            EstimatedHistogram columnCounts = EstimatedHistogram.serializer.deserialize(in);
            ReplayPosition replayPosition = ReplayPosition.serializer.deserialize(in);
            long minTimestamp = in.readLong();
            long maxTimestamp = in.readLong();
            int maxLocalDeletionTime = in.readInt();
            double bloomFilterFPChance = in.readDouble();
            double compressionRatio = in.readDouble();
            String partitioner = in.readUTF();
            int nbAncestors = in.readInt();
            Set<Integer> ancestors = new HashSet<>(nbAncestors);
            for (int i = 0; i < nbAncestors; i++)
                ancestors.add(in.readInt());
            StreamingHistogram tombstoneHistogram = StreamingHistogram.serializer.deserialize(in);
            int sstableLevel = 0;
            if (in.available() > 0)
                sstableLevel = in.readInt();

            int colCount = in.readInt();
            List<ByteBuffer> minColumnNames = new ArrayList<>(colCount);
            for (int i = 0; i < colCount; i++)
                minColumnNames.add(ByteBufferUtil.readWithShortLength(in));

            colCount = in.readInt();
            List<ByteBuffer> maxColumnNames = new ArrayList<>(colCount);
            for (int i = 0; i < colCount; i++)
                maxColumnNames.add(ByteBufferUtil.readWithShortLength(in));

            if (types.contains(MetadataType.VALIDATION))
                components.put(MetadataType.VALIDATION,
                               new ValidationMetadata(partitioner, bloomFilterFPChance));
            if (types.contains(MetadataType.STATS))
                components.put(MetadataType.STATS,
                               new StatsMetadata(rowSizes,
                                                 columnCounts,
                                                 replayPosition,
                                                 minTimestamp,
                                                 maxTimestamp,
                                                 maxLocalDeletionTime,
                                                 compressionRatio,
                                                 tombstoneHistogram,
                                                 sstableLevel,
                                                 minColumnNames,
                                                 maxColumnNames,
                                                 true,
                                                 ActiveRepairService.UNREPAIRED_SSTABLE));
            if (types.contains(MetadataType.COMPACTION))
                components.put(MetadataType.COMPACTION,
                               new CompactionMetadata(ancestors, null));
        }
    }
    return components;
}
 
Example #18
Source File: StreamWriter.java    From stratio-cassandra with Apache License 2.0 4 votes vote down vote up
/**
 * Stream file of specified sections to given channel.
 *
 * StreamWriter uses LZF compression on wire to decrease size to transfer.
 *
 * @param channel where this writes data to
 * @throws IOException on any I/O error
 */
public void write(WritableByteChannel channel) throws IOException
{
    long totalSize = totalSize();
    RandomAccessReader file = sstable.openDataReader();
    ChecksumValidator validator = new File(sstable.descriptor.filenameFor(Component.CRC)).exists()
                                ? DataIntegrityMetadata.checksumValidator(sstable.descriptor)
                                : null;
    transferBuffer = validator == null ? new byte[DEFAULT_CHUNK_SIZE] : new byte[validator.chunkSize];

    // setting up data compression stream
    compressedOutput = new LZFOutputStream(Channels.newOutputStream(channel));
    long progress = 0L;

    try
    {
        // stream each of the required sections of the file
        for (Pair<Long, Long> section : sections)
        {
            long start = validator == null ? section.left : validator.chunkStart(section.left);
            int readOffset = (int) (section.left - start);
            // seek to the beginning of the section
            file.seek(start);
            if (validator != null)
                validator.seek(start);

            // length of the section to read
            long length = section.right - start;
            // tracks write progress
            long bytesRead = 0;
            while (bytesRead < length)
            {
                long lastBytesRead = write(file, validator, readOffset, length, bytesRead);
                bytesRead += lastBytesRead;
                progress += (lastBytesRead - readOffset);
                session.progress(sstable.descriptor, ProgressInfo.Direction.OUT, progress, totalSize);
                readOffset = 0;
            }

            // make sure that current section is send
            compressedOutput.flush();
        }
    }
    finally
    {
        // no matter what happens close file
        FileUtils.closeQuietly(file);
        FileUtils.closeQuietly(validator);
    }
}
 
Example #19
Source File: SSTableRepairedAtSetter.java    From stratio-cassandra with Apache License 2.0 4 votes vote down vote up
/**
 * @param args a list of sstables whose metadata we are changing
 */
public static void main(final String[] args) throws IOException
{
    PrintStream out = System.out;
    if (args.length == 0)
    {
        out.println("This command should be run with Cassandra stopped!");
        out.println("Usage: sstablerepairedset [--is-repaired | --is-unrepaired] [-f <sstable-list> | <sstables>]");
        System.exit(1);
    }

    if (args.length < 3 || !args[0].equals("--really-set") || (!args[1].equals("--is-repaired") && !args[1].equals("--is-unrepaired")))
    {
        out.println("This command should be run with Cassandra stopped, otherwise you will get very strange behavior");
        out.println("Verify that Cassandra is not running and then execute the command like this:");
        out.println("Usage: sstablerepairedset --really-set [--is-repaired | --is-unrepaired] [-f <sstable-list> | <sstables>]");
        System.exit(1);
    }

    boolean setIsRepaired = args[1].equals("--is-repaired");

    List<String> fileNames;
    if (args[2].equals("-f"))
    {
        fileNames = Files.readAllLines(Paths.get(args[3]), Charset.defaultCharset());
    }
    else
    {
        fileNames = Arrays.asList(args).subList(2, args.length);
    }

    for (String fname: fileNames)
    {
        Descriptor descriptor = Descriptor.fromFilename(fname);
        if (descriptor.version.hasRepairedAt)
        {
            if (setIsRepaired)
            {
                FileTime f = Files.getLastModifiedTime(new File(descriptor.filenameFor(Component.DATA)).toPath());
                descriptor.getMetadataSerializer().mutateRepairedAt(descriptor, f.toMillis());
            }
            else
            {
                descriptor.getMetadataSerializer().mutateRepairedAt(descriptor, ActiveRepairService.UNREPAIRED_SSTABLE);
            }
        }
        else
        {
            System.err.println("SSTable " + fname + " does not have repaired property, run upgradesstables");
        }
    }
}
 
Example #20
Source File: SSTableLevelResetter.java    From stratio-cassandra with Apache License 2.0 4 votes vote down vote up
/**
 * @param args a list of sstables whose metadata we are changing
 */
public static void main(String[] args) throws IOException
{
    PrintStream out = System.out;
    if (args.length == 0)
    {
        out.println("This command should be run with Cassandra stopped!");
        out.println("Usage: sstablelevelreset <keyspace> <columnfamily>");
        System.exit(1);
    }

    if (!args[0].equals("--really-reset") || args.length != 3)
    {
        out.println("This command should be run with Cassandra stopped, otherwise you will get very strange behavior");
        out.println("Verify that Cassandra is not running and then execute the command like this:");
        out.println("Usage: sstablelevelreset --really-reset <keyspace> <columnfamily>");
        System.exit(1);
    }

    // TODO several daemon threads will run from here.
    // So we have to explicitly call System.exit.
    try
    {
        // load keyspace descriptions.
        DatabaseDescriptor.loadSchemas();

        String keyspaceName = args[1];
        String columnfamily = args[2];
        // validate columnfamily
        if (Schema.instance.getCFMetaData(keyspaceName, columnfamily) == null)
        {
            System.err.println("ColumnFamily not found: " + keyspaceName + "/" + columnfamily);
            System.exit(1);
        }

        Keyspace keyspace = Keyspace.openWithoutSSTables(keyspaceName);
        ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(columnfamily);
        boolean foundSSTable = false;
        for (Map.Entry<Descriptor, Set<Component>> sstable : cfs.directories.sstableLister().list().entrySet())
        {
            if (sstable.getValue().contains(Component.STATS))
            {
                foundSSTable = true;
                Descriptor descriptor = sstable.getKey();
                StatsMetadata metadata = (StatsMetadata) descriptor.getMetadataSerializer().deserialize(descriptor, MetadataType.STATS);
                if (metadata.sstableLevel > 0)
                {
                    out.println("Changing level from " + metadata.sstableLevel + " to 0 on " + descriptor.filenameFor(Component.DATA));
                    descriptor.getMetadataSerializer().mutateLevel(descriptor, 0);
                }
                else
                {
                    out.println("Skipped " + descriptor.filenameFor(Component.DATA) + " since it is already on level 0");
                }
            }
        }

        if (!foundSSTable)
        {
            out.println("Found no sstables, did you give the correct keyspace/columnfamily?");
        }
    }
    catch (Throwable t)
    {
        JVMStabilityInspector.inspectThrowable(t);
        t.printStackTrace();
        System.exit(1);
    }
    System.exit(0);
}
 
Example #21
Source File: SSTableOfflineRelevel.java    From stratio-cassandra with Apache License 2.0 4 votes vote down vote up
/**
 * @param args a list of sstables whose metadata we are changing
 */
public static void main(String[] args) throws IOException
{
    PrintStream out = System.out;
    if (args.length < 2)
    {
        out.println("This command should be run with Cassandra stopped!");
        out.println("Usage: sstableofflinerelevel [--dry-run] <keyspace> <columnfamily>");
        System.exit(1);
    }
    boolean dryRun = args[0].equals("--dry-run");
    String keyspace = args[args.length - 2];
    String columnfamily = args[args.length - 1];
    DatabaseDescriptor.loadSchemas(false);

    if (Schema.instance.getCFMetaData(keyspace, columnfamily) == null)
        throw new IllegalArgumentException(String.format("Unknown keyspace/columnFamily %s.%s",
                keyspace,
                columnfamily));

    Keyspace ks = Keyspace.openWithoutSSTables(keyspace);
    ColumnFamilyStore cfs = ks.getColumnFamilyStore(columnfamily);
    Directories.SSTableLister lister = cfs.directories.sstableLister().skipTemporary(true);
    Set<SSTableReader> sstables = new HashSet<>();
    for (Map.Entry<Descriptor, Set<Component>> sstable : lister.list().entrySet())
    {
        if (sstable.getKey() != null)
        {
            try
            {
                SSTableReader reader = SSTableReader.open(sstable.getKey());
                sstables.add(reader);
            }
            catch (Throwable t)
            {
                out.println("Couldn't open sstable: "+sstable.getKey().filenameFor(Component.DATA));
                Throwables.propagate(t);
            }
        }
    }
    if (sstables.isEmpty())
    {
        out.println("No sstables to relevel for "+keyspace+"."+columnfamily);
        System.exit(1);
    }
    Relevel rl = new Relevel(sstables);
    rl.relevel(dryRun);
    System.exit(0);

}
 
Example #22
Source File: AbstractCompactionStrategy.java    From stratio-cassandra with Apache License 2.0 4 votes vote down vote up
/**
 * Check if given sstable is worth dropping tombstones at gcBefore.
 * Check is skipped if tombstone_compaction_interval time does not elapse since sstable creation and returns false.
 *
 * @param sstable SSTable to check
 * @param gcBefore time to drop tombstones
 * @return true if given sstable's tombstones are expected to be removed
 */
protected boolean worthDroppingTombstones(SSTableReader sstable, int gcBefore)
{
    // since we use estimations to calculate, there is a chance that compaction will not drop tombstones actually.
    // if that happens we will end up in infinite compaction loop, so first we check enough if enough time has
    // elapsed since SSTable created.
    if (System.currentTimeMillis() < sstable.getCreationTimeFor(Component.DATA) + tombstoneCompactionInterval * 1000)
       return false;

    double droppableRatio = sstable.getEstimatedDroppableTombstoneRatio(gcBefore);
    if (droppableRatio <= tombstoneThreshold)
        return false;

    //sstable range overlap check is disabled. See CASSANDRA-6563.
    if (uncheckedTombstoneCompaction)
        return true;

    Collection<SSTableReader> overlaps = cfs.getOverlappingSSTables(Collections.singleton(sstable));
    if (overlaps.isEmpty())
    {
        // there is no overlap, tombstones are safely droppable
        return true;
    }
    else if (CompactionController.getFullyExpiredSSTables(cfs, Collections.singleton(sstable), overlaps, gcBefore).size() > 0)
    {
        return true;
    }
    else
    {
        // what percentage of columns do we expect to compact outside of overlap?
        if (sstable.getIndexSummarySize() < 2)
        {
            // we have too few samples to estimate correct percentage
            return false;
        }
        // first, calculate estimated keys that do not overlap
        long keys = sstable.estimatedKeys();
        Set<Range<Token>> ranges = new HashSet<Range<Token>>(overlaps.size());
        for (SSTableReader overlap : overlaps)
            ranges.add(new Range<Token>(overlap.first.getToken(), overlap.last.getToken(), overlap.partitioner));
        long remainingKeys = keys - sstable.estimatedKeysForRanges(ranges);
        // next, calculate what percentage of columns we have within those keys
        long columns = sstable.getEstimatedColumnCount().mean() * remainingKeys;
        double remainingColumnsRatio = ((double) columns) / (sstable.getEstimatedColumnCount().count() * sstable.getEstimatedColumnCount().mean());

        // return if we still expect to have droppable tombstones in rest of columns
        return remainingColumnsRatio * droppableRatio > tombstoneThreshold;
    }
}
 
Example #23
Source File: ColumnIndex.java    From sasi with Apache License 2.0 4 votes vote down vote up
public Component getComponent()
{
    return component;
}
 
Example #24
Source File: CassandraUtils.java    From sstable-tools with Apache License 2.0 4 votes vote down vote up
@SuppressWarnings("unchecked")
public static CFMetaData tableFromSSTable(File path) throws IOException, NoSuchFieldException, IllegalAccessException {
    Preconditions.checkNotNull(path);
    Descriptor desc = Descriptor.fromFilename(path.getAbsolutePath());

    EnumSet<MetadataType> types = EnumSet.of(MetadataType.VALIDATION, MetadataType.STATS, MetadataType.HEADER);
    Map<MetadataType, MetadataComponent> sstableMetadata = desc.getMetadataSerializer().deserialize(desc, types);
    ValidationMetadata validationMetadata = (ValidationMetadata) sstableMetadata.get(MetadataType.VALIDATION);
    Preconditions.checkNotNull(validationMetadata, "Validation Metadata could not be resolved, accompanying Statistics.db file must be missing.");
    SerializationHeader.Component header = (SerializationHeader.Component) sstableMetadata.get(MetadataType.HEADER);
    Preconditions.checkNotNull(header, "Metadata could not be resolved, accompanying Statistics.db file must be missing.");

    IPartitioner partitioner = validationMetadata.partitioner.endsWith("LocalPartitioner") ?
            new LocalPartitioner(header.getKeyType()) :
            FBUtilities.newPartitioner(validationMetadata.partitioner);

    DatabaseDescriptor.setPartitionerUnsafe(partitioner);
    AbstractType<?> keyType = header.getKeyType();
    List<AbstractType<?>> clusteringTypes = header.getClusteringTypes();
    Map<ByteBuffer, AbstractType<?>> staticColumns = header.getStaticColumns();
    Map<ByteBuffer, AbstractType<?>> regularColumns = header.getRegularColumns();
    int id = cfCounter.incrementAndGet();
    CFMetaData.Builder builder = CFMetaData.Builder.create("turtle" + id, "turtles" + id);
    staticColumns.entrySet().stream()
            .forEach(entry ->
                    builder.addStaticColumn(UTF8Type.instance.getString(entry.getKey()), entry.getValue()));
    regularColumns.entrySet().stream()
            .forEach(entry ->
                    builder.addRegularColumn(UTF8Type.instance.getString(entry.getKey()), entry.getValue()));
    List<AbstractType<?>> partTypes = keyType.getComponents();
    for(int i = 0; i < partTypes.size(); i++) {
        builder.addPartitionKey("partition" + (i > 0 ? i : ""), partTypes.get(i));
    }
    for (int i = 0; i < clusteringTypes.size(); i++) {
        builder.addClusteringColumn("row" + (i > 0 ? i : ""), clusteringTypes.get(i));
    }
    CFMetaData metaData = builder.build();
    Schema.instance.setKeyspaceMetadata(KeyspaceMetadata.create(metaData.ksName, KeyspaceParams.local(),
            Tables.of(metaData), Views.none(), getTypes(), Functions.none()));
    return metaData;
}
 
Example #25
Source File: CompressionMetadata.java    From stratio-cassandra with Apache License 2.0 2 votes vote down vote up
/**
 * Create metadata about given compressed file including uncompressed data length, chunk size
 * and list of the chunk offsets of the compressed data.
 *
 * This is an expensive operation! Don't create more than one for each
 * sstable.
 *
 * @param dataFilePath Path to the compressed file
 *
 * @return metadata about given compressed file.
 */
public static CompressionMetadata create(String dataFilePath)
{
    Descriptor desc = Descriptor.fromFilename(dataFilePath);
    return new CompressionMetadata(desc.filenameFor(Component.COMPRESSION_INFO), new File(dataFilePath).length(), desc.version.hasPostCompressionAdlerChecksums);
}