org.apache.cassandra.io.FSReadError Java Examples

The following examples show how to use org.apache.cassandra.io.FSReadError. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: RandomAccessReader.java    From hadoop-sstable with Apache License 2.0 6 votes vote down vote up
/**
 * Read data from file starting from current currentOffset to populate buffer.
 */
protected void reBuffer() {
    resetBuffer();

    try {
        if (bufferOffset >= fs.getFileStatus(inputPath).getLen()) // TODO: is this equivalent?
            return;

        input.seek(bufferOffset);

        int read = 0;

        while (read < buffer.length) {
            int n = input.read(buffer, read, buffer.length - read);
            if (n < 0)
                break;
            read += n;
        }

        validBufferBytes = read;
        bytesSinceCacheFlush += read;
    } catch (IOException e) {
        throw new FSReadError(e, filePath);
    }
}
 
Example #2
Source File: CompressionMetadata.java    From hadoop-sstable with Apache License 2.0 6 votes vote down vote up
/**
 * Get a chunk offset by it's index.
 *
 * @param chunkIndex Index of the chunk.
 * @return offset of the chunk in the compressed file.
 */
public long chunkOffsetBy(int chunkIndex) {
    if (dataLengthOffset == -1)
        throw new IllegalStateException("writeHeader wasn't called");

    try {
        long position = getFilePointer();

        // seek to the position of the given chunk
        seek(dataLengthOffset
                + 8 // size reserved for uncompressed data length
                + 4 // size reserved for chunk count
                + (chunkIndex * 8L));

        try {
            return readLong();
        } finally {
            // back to the original position
            seek(position);
        }
    } catch (IOException e) {
        throw new FSReadError(e, filePath);
    }
}
 
Example #3
Source File: FileUtils.java    From stratio-cassandra with Apache License 2.0 6 votes vote down vote up
public static void handleFSError(FSError e)
{
    JVMStabilityInspector.inspectThrowable(e);
    switch (DatabaseDescriptor.getDiskFailurePolicy())
    {
        case stop_paranoid:
        case stop:
            StorageService.instance.stopTransports();
            break;
        case best_effort:
            // for both read and write errors mark the path as unwritable.
            BlacklistedDirectories.maybeMarkUnwritable(e.path);
            if (e instanceof FSReadError)
            {
                File directory = BlacklistedDirectories.maybeMarkUnreadable(e.path);
                if (directory != null)
                    Keyspace.removeUnreadableSSTables(directory);
            }
            break;
        case ignore:
            // already logged, so left nothing to do
            break;
        default:
            throw new IllegalStateException();
    }
}
 
Example #4
Source File: SegmentedFile.java    From stratio-cassandra with Apache License 2.0 6 votes vote down vote up
public FileDataInput next()
{
    long position = nextpos;
    if (position >= length)
        throw new NoSuchElementException();

    FileDataInput segment = getSegment(nextpos);
    try
    {
        nextpos = nextpos + segment.bytesRemaining();
    }
    catch (IOException e)
    {
        throw new FSReadError(e, path);
    }
    return segment;
}
 
Example #5
Source File: RandomAccessReader.java    From stratio-cassandra with Apache License 2.0 5 votes vote down vote up
/**
 * Read data from file starting from current currentOffset to populate buffer.
 */
protected void reBuffer()
{
    resetBuffer();

    try
    {
        int read = buffer.length;
        if (bufferOffset + read > fileLength)
        {
            if (bufferOffset >= fileLength)
                return;
            read = (int) (fileLength - bufferOffset);
        }

        channel.position(bufferOffset); // setting channel position

        int offset = 0;
        while (read > 0)
        {
            int n = super.read(buffer, offset, read);
            if (n < 0)
                throw new IllegalStateException();
            read -= n;
            offset += n;
        }

        validBufferBytes = offset;
    }
    catch (IOException e)
    {
        throw new FSReadError(e, filePath);
    }
}
 
Example #6
Source File: SegmentedFile.java    From hadoop-sstable with Apache License 2.0 5 votes vote down vote up
public FileDataInput next() {
    long position = nextpos;
    if (position >= length)
        throw new NoSuchElementException();

    FileDataInput segment = getSegment(nextpos);
    try {
        nextpos = nextpos + segment.bytesRemaining();
    } catch (IOException e) {
        throw new FSReadError(e, path);
    }
    return segment;
}
 
Example #7
Source File: RandomAccessReader.java    From hadoop-sstable with Apache License 2.0 5 votes vote down vote up
public void deallocate() {
    buffer = null; // makes sure we don't use this after it's ostensibly closed

    try {
        input.close();
    } catch (IOException e) {
        throw new FSReadError(e, filePath);
    }
}
 
Example #8
Source File: JVMStabilityInspectorTest.java    From stratio-cassandra with Apache License 2.0 5 votes vote down vote up
@Test
public void testKill() throws Exception
{
    KillerForTests killerForTests = new KillerForTests();
    JVMStabilityInspector.Killer originalKiller = JVMStabilityInspector.replaceKiller(killerForTests);

    Config.DiskFailurePolicy oldPolicy = DatabaseDescriptor.getDiskFailurePolicy();
    Config.CommitFailurePolicy oldCommitPolicy = DatabaseDescriptor.getCommitFailurePolicy();
    try
    {
        killerForTests.reset();
        JVMStabilityInspector.inspectThrowable(new IOException());
        assertFalse(killerForTests.wasKilled());

        killerForTests.reset();
        JVMStabilityInspector.inspectThrowable(new OutOfMemoryError());
        assertTrue(killerForTests.wasKilled());

        DatabaseDescriptor.setDiskFailurePolicy(Config.DiskFailurePolicy.die);
        killerForTests.reset();
        JVMStabilityInspector.inspectThrowable(new FSReadError(new IOException(), "blah"));
        assertTrue(killerForTests.wasKilled());

        DatabaseDescriptor.setCommitFailurePolicy(Config.CommitFailurePolicy.die);
        killerForTests.reset();
        JVMStabilityInspector.inspectCommitLogThrowable(new Throwable());
        assertTrue(killerForTests.wasKilled());
    }
    finally
    {
        JVMStabilityInspector.replaceKiller(originalKiller);
        DatabaseDescriptor.setDiskFailurePolicy(oldPolicy);
        DatabaseDescriptor.setCommitFailurePolicy(oldCommitPolicy);
    }
}
 
Example #9
Source File: SequentialWriter.java    From stratio-cassandra with Apache License 2.0 5 votes vote down vote up
public long length()
{
    try
    {
        return Math.max(Math.max(current, out.length()), bufferOffset + validBufferBytes);
    }
    catch (IOException e)
    {
        throw new FSReadError(e, getPath());
    }
}
 
Example #10
Source File: RandomAccessReader.java    From stratio-cassandra with Apache License 2.0 5 votes vote down vote up
public void deallocate()
{
    buffer = null; // makes sure we don't use this after it's ostensibly closed

    try
    {
        super.close();
    }
    catch (IOException e)
    {
        throw new FSReadError(e, filePath);
    }
}
 
Example #11
Source File: RandomAccessReader.java    From stratio-cassandra with Apache License 2.0 5 votes vote down vote up
protected RandomAccessReader(File file, int bufferSize, long overrideLength, PoolingSegmentedFile owner) throws FileNotFoundException
{
    super(file, "r");

    this.owner = owner;

    channel = super.getChannel();
    filePath = file.getAbsolutePath();

    // allocating required size of the buffer
    if (bufferSize <= 0)
        throw new IllegalArgumentException("bufferSize must be positive");

    buffer = new byte[bufferSize];

    // we can cache file length in read-only mode
    long fileLength = overrideLength;
    if (fileLength <= 0)
    {
        try
        {
            fileLength = channel.size();
        }
        catch (IOException e)
        {
            throw new FSReadError(e, filePath);
        }
    }

    this.fileLength = fileLength;
    validBufferBytes = -1; // that will trigger reBuffer() on demand by read/seek operations
}
 
Example #12
Source File: FileUtils.java    From stratio-cassandra with Apache License 2.0 5 votes vote down vote up
public static String getCanonicalPath(File file)
{
    try
    {
        return file.getCanonicalPath();
    }
    catch (IOException e)
    {
        throw new FSReadError(e, file);
    }
}
 
Example #13
Source File: FileUtils.java    From stratio-cassandra with Apache License 2.0 5 votes vote down vote up
public static String getCanonicalPath(String filename)
{
    try
    {
        return new File(filename).getCanonicalPath();
    }
    catch (IOException e)
    {
        throw new FSReadError(e, filename);
    }
}
 
Example #14
Source File: CompressedSequentialWriter.java    From stratio-cassandra with Apache License 2.0 5 votes vote down vote up
/**
 * Seek to the offset where next compressed data chunk should be stored.
 */
private void seekToChunkStart()
{
    if (getOnDiskFilePointer() != chunkOffset)
    {
        try
        {
            out.seek(chunkOffset);
        }
        catch (IOException e)
        {
            throw new FSReadError(e, getPath());
        }
    }
}
 
Example #15
Source File: CompressedSequentialWriter.java    From stratio-cassandra with Apache License 2.0 5 votes vote down vote up
@Override
public long getOnDiskFilePointer()
{
    try
    {
        return out.getFilePointer();
    }
    catch (IOException e)
    {
        throw new FSReadError(e, getPath());
    }
}
 
Example #16
Source File: SSTableIndex.java    From sasi with Apache License 2.0 5 votes vote down vote up
@Override
public DecoratedKey apply(Long offset)
{
    try
    {
        return sstable.keyAt(offset);
    }
    catch (IOException e)
    {
        throw new FSReadError(new IOException("Failed to read key from " + sstable.descriptor, e), sstable.getFilename());
    }
}
 
Example #17
Source File: OnDiskIndex.java    From sasi with Apache License 2.0 5 votes vote down vote up
public T getBlock(int idx) throws FSReadError
{
    assert idx >= 0 && idx < blockCount;

    // calculate block offset and move there
    // (long is intentional, we'll just need mmap implementation which supports long positions)
    long blockOffset = indexFile.getLong(blockOffsets + idx * 8);
    return cast(indexFile.duplicate().position(blockOffset));
}
 
Example #18
Source File: SSTableAttachedSecondaryIndex.java    From sasi with Apache License 2.0 4 votes vote down vote up
@Override
public void build()
{
    for (Map.Entry<SSTableReader, Map<ByteBuffer, ColumnIndex>> e : sstables.entrySet())
    {
        SSTableReader sstable = e.getKey();
        Map<ByteBuffer, ColumnIndex> indexes = e.getValue();

        if (!sstable.acquireReference())
        {
            bytesProcessed += getPrimaryIndexLength(sstable);
            continue;
        }

        try
        {
            PerSSTableIndexWriter indexWriter = newWriter(sstable.descriptor.asTemporary(true), indexes, Source.COMPACTION);

            long previousKeyPosition = 0;
            try (KeyIterator keys = new KeyIterator(sstable.descriptor))
            {
                while (keys.hasNext())
                {
                    if (isStopRequested())
                        throw new CompactionInterruptedException(getCompactionInfo());

                    final DecoratedKey key = keys.next();
                    final long keyPosition = keys.getKeyPosition();

                    indexWriter.startRow(key, keyPosition);
                    try (SSTableSliceIterator row = new SSTableSliceIterator(sstable, key, ColumnSlice.ALL_COLUMNS_ARRAY, false))
                    {
                        while (row.hasNext())
                        {
                            OnDiskAtom atom = row.next();
                            if (atom != null && atom instanceof Column)
                                indexWriter.nextColumn((Column) atom);
                        }
                    }
                    catch (IOException ex)
                    {
                        throw new FSReadError(ex, sstable.getFilename());
                    }

                    bytesProcessed += keyPosition - previousKeyPosition;
                    previousKeyPosition = keyPosition;
                }

                completeSSTable(indexWriter, sstable, indexes.values());
            }
        }
        finally
        {
            sstable.releaseReference();
        }
    }
}