org.apache.hadoop.hbase.io.hfile.HFileContext Java Examples

The following examples show how to use org.apache.hadoop.hbase.io.hfile.HFileContext. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: SpliceDefaultCompactor.java    From spliceengine with GNU Affero General Public License v3.0 6 votes vote down vote up
/**
 *
 * This is borrowed from DefaultCompactor.
 *
 * @param compression
 * @param includeMVCCReadpoint
 * @param includesTag
 * @param cryptoContext
 * @return
 */
private HFileContext createFileContext(Compression.Algorithm compression,
                                       boolean includeMVCCReadpoint, boolean includesTag, Encryption.Context cryptoContext) {
    if (compression == null) {
        compression = HFile.DEFAULT_COMPRESSION_ALGORITHM;
    }
    HFileContext hFileContext = new HFileContextBuilder()
            .withIncludesMvcc(includeMVCCReadpoint)
            .withIncludesTags(includesTag)
            .withCompression(compression)
            .withCompressTags(store.getColumnFamilyDescriptor().isCompressTags())
            .withChecksumType(HStore.getChecksumType(conf))
            .withBytesPerCheckSum(HStore.getBytesPerChecksum(conf))
            .withBlockSize(store.getColumnFamilyDescriptor().getBlocksize())
            .withHBaseCheckSum(true)
            .withDataBlockEncoding(store.getColumnFamilyDescriptor().getDataBlockEncoding())
            .withEncryptionContext(cryptoContext)
            .withCreateTime(EnvironmentEdgeManager.currentTime())
            .build();
    return hFileContext;
}
 
Example #2
Source File: TestHRegionServerBulkLoad.java    From hbase with Apache License 2.0 6 votes vote down vote up
/**
 * Create an HFile with the given number of rows with a specified value.
 */
public static void createHFile(FileSystem fs, Path path, byte[] family,
    byte[] qualifier, byte[] value, int numRows) throws IOException {
  HFileContext context = new HFileContextBuilder().withBlockSize(BLOCKSIZE)
                          .withCompression(COMPRESSION)
                          .build();
  HFile.Writer writer = HFile
      .getWriterFactory(conf, new CacheConfig(conf))
      .withPath(fs, path)
      .withFileContext(context)
      .create();
  long now = System.currentTimeMillis();
  try {
    // subtract 2 since iterateOnSplits doesn't include boundary keys
    for (int i = 0; i < numRows; i++) {
      KeyValue kv = new KeyValue(rowkey(i), family, qualifier, now, value);
      writer.append(kv);
    }
    writer.appendFileInfo(BULKLOAD_TIME_KEY, Bytes.toBytes(now));
  } finally {
    writer.close();
  }
}
 
Example #3
Source File: TestStoreScannerClosure.java    From hbase with Apache License 2.0 6 votes vote down vote up
private Path writeStoreFile() throws IOException {
  Path storeFileParentDir = new Path(TEST_UTIL.getDataTestDir(), "TestHFile");
  HFileContext meta = new HFileContextBuilder().withBlockSize(64 * 1024).build();
  StoreFileWriter sfw = new StoreFileWriter.Builder(CONF, fs).withOutputDir(storeFileParentDir)
      .withFileContext(meta).build();

  final int rowLen = 32;
  Random RNG = new Random();
  for (int i = 0; i < 1000; ++i) {
    byte[] k = RandomKeyValueUtil.randomOrderedKey(RNG, i);
    byte[] v = RandomKeyValueUtil.randomValue(RNG);
    int cfLen = RNG.nextInt(k.length - rowLen + 1);
    KeyValue kv = new KeyValue(k, 0, rowLen, k, rowLen, cfLen, k, rowLen + cfLen,
        k.length - rowLen - cfLen, RNG.nextLong(), generateKeyType(RNG), v, 0, v.length);
    sfw.append(kv);
  }

  sfw.close();
  return sfw.getPath();
}
 
Example #4
Source File: TestHStore.java    From hbase with Apache License 2.0 6 votes vote down vote up
private void addStoreFile() throws IOException {
  HStoreFile f = this.store.getStorefiles().iterator().next();
  Path storedir = f.getPath().getParent();
  long seqid = this.store.getMaxSequenceId().orElse(0L);
  Configuration c = TEST_UTIL.getConfiguration();
  FileSystem fs = FileSystem.get(c);
  HFileContext fileContext = new HFileContextBuilder().withBlockSize(BLOCKSIZE_SMALL).build();
  StoreFileWriter w = new StoreFileWriter.Builder(c, new CacheConfig(c),
      fs)
          .withOutputDir(storedir)
          .withFileContext(fileContext)
          .build();
  w.appendMetadata(seqid + 1, false);
  w.close();
  LOG.info("Added store file:" + w.getPath());
}
 
Example #5
Source File: TestSeekToBlockWithEncoders.java    From hbase with Apache License 2.0 6 votes vote down vote up
private void seekToTheKey(KeyValue expected, List<KeyValue> kvs, Cell toSeek)
    throws IOException {
  // create all seekers
  List<DataBlockEncoder.EncodedSeeker> encodedSeekers = new ArrayList<>();
  for (DataBlockEncoding encoding : DataBlockEncoding.values()) {
    if (encoding.getEncoder() == null) {
      continue;
    }
    DataBlockEncoder encoder = encoding.getEncoder();
    HFileContext meta = new HFileContextBuilder().withHBaseCheckSum(false)
        .withIncludesMvcc(false).withIncludesTags(false)
        .withCompression(Compression.Algorithm.NONE).build();
    HFileBlockEncodingContext encodingContext = encoder.newDataBlockEncodingContext(encoding,
        HFILEBLOCK_DUMMY_HEADER, meta);
    ByteBuffer encodedBuffer = TestDataBlockEncoders.encodeKeyValues(encoding, kvs,
        encodingContext, this.useOffheapData);
    DataBlockEncoder.EncodedSeeker seeker =
      encoder.createSeeker(encoder.newDataBlockDecodingContext(meta));
    seeker.setCurrentBuffer(new SingleByteBuff(encodedBuffer));
    encodedSeekers.add(seeker);
  }
  // test it!
  // try a few random seeks
  checkSeekingConsistency(encodedSeekers, toSeek, expected);
}
 
Example #6
Source File: TestHStoreFile.java    From hbase with Apache License 2.0 6 votes vote down vote up
/**
 * Write a file and then assert that we can read from top and bottom halves using two
 * HalfMapFiles.
 */
@Test
public void testBasicHalfMapFile() throws Exception {
  final RegionInfo hri =
    RegionInfoBuilder.newBuilder(TableName.valueOf("testBasicHalfMapFileTb")).build();
  HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs,
    new Path(testDir, hri.getTable().getNameAsString()), hri);

  HFileContext meta = new HFileContextBuilder().withBlockSize(2 * 1024).build();
  StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs)
    .withFilePath(regionFs.createTempName()).withFileContext(meta).build();
  writeStoreFile(writer);

  Path sfPath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());
  HStoreFile sf = new HStoreFile(this.fs, sfPath, conf, cacheConf, BloomType.NONE, true);
  checkHalfHFile(regionFs, sf);
}
 
Example #7
Source File: TestHeapSize.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Test
public void testHFileBlockSize() throws IOException {
  long expected;
  long actual;

  actual = HFileContext.FIXED_OVERHEAD;
  expected = ClassSize.estimateBase(HFileContext.class, false);
  assertEquals(expected, actual);

  actual = HFileBlock.FIXED_OVERHEAD;
  expected = ClassSize.estimateBase(HFileBlock.class, false);
  assertEquals(expected, actual);

  actual = ExclusiveMemHFileBlock.FIXED_OVERHEAD;
  expected = ClassSize.estimateBase(ExclusiveMemHFileBlock.class, false);
  assertEquals(expected, actual);

  actual = SharedMemHFileBlock.FIXED_OVERHEAD;
  expected = ClassSize.estimateBase(SharedMemHFileBlock.class, false);
  assertEquals(expected, actual);
}
 
Example #8
Source File: TestDataBlockEncoders.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Test
public void testRowIndexWithTagsButNoTagsInCell() throws IOException {
  List<KeyValue> kvList = new ArrayList<>();
  byte[] row = new byte[0];
  byte[] family = new byte[0];
  byte[] qualifier = new byte[0];
  byte[] value = new byte[0];
  KeyValue expectedKV = new KeyValue(row, family, qualifier, 1L, Type.Put, value);
  kvList.add(expectedKV);
  DataBlockEncoding encoding = DataBlockEncoding.ROW_INDEX_V1;
  DataBlockEncoder encoder = encoding.getEncoder();
  ByteBuffer encodedBuffer =
      encodeKeyValues(encoding, kvList, getEncodingContext(Algorithm.NONE, encoding), false);
  HFileContext meta =
      new HFileContextBuilder().withHBaseCheckSum(false).withIncludesMvcc(includesMemstoreTS)
          .withIncludesTags(includesTags).withCompression(Compression.Algorithm.NONE).build();
  DataBlockEncoder.EncodedSeeker seeker =
    encoder.createSeeker(encoder.newDataBlockDecodingContext(meta));
  seeker.setCurrentBuffer(new SingleByteBuff(encodedBuffer));
  Cell cell = seeker.getCell();
  Assert.assertEquals(expectedKV.getLength(), ((KeyValue) cell).getLength());
}
 
Example #9
Source File: TestRegionObserverInterface.java    From hbase with Apache License 2.0 6 votes vote down vote up
private static void createHFile(Configuration conf, FileSystem fs, Path path, byte[] family,
    byte[] qualifier) throws IOException {
  HFileContext context = new HFileContextBuilder().build();
  HFile.Writer writer = HFile.getWriterFactory(conf, new CacheConfig(conf)).withPath(fs, path)
      .withFileContext(context).create();
  long now = System.currentTimeMillis();
  try {
    for (int i = 1; i <= 9; i++) {
      KeyValue kv =
          new KeyValue(Bytes.toBytes(i + ""), family, qualifier, now, Bytes.toBytes(i + ""));
      writer.append(kv);
    }
  } finally {
    writer.close();
  }
}
 
Example #10
Source File: HFilePerformanceEvaluation.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Override
void setUp() throws Exception {

  HFileContextBuilder builder = new HFileContextBuilder()
      .withCompression(HFileWriterImpl.compressionByName(codec))
      .withBlockSize(RFILE_BLOCKSIZE);
  
  if (cipher == "aes") {
    byte[] cipherKey = new byte[AES.KEY_LENGTH];
    new SecureRandom().nextBytes(cipherKey);
    builder.withEncryptionContext(Encryption.newContext(conf)
        .setCipher(Encryption.getCipher(conf, cipher))
        .setKey(cipherKey));
  } else if (!"none".equals(cipher)) {
    throw new IOException("Cipher " + cipher + " not supported.");
  }
  
  HFileContext hFileContext = builder.build();

  writer = HFile.getWriterFactoryNoCache(conf)
      .withPath(fs, mf)
      .withFileContext(hFileContext)
      .create();
}
 
Example #11
Source File: TestCachedMobFile.java    From hbase with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("SelfComparison")
@Test
public void testCompare() throws Exception {
  String caseName = testName.getMethodName();
  Path testDir = TEST_UTIL.getDataTestDir();
  FileSystem fs = testDir.getFileSystem(conf);
  Path outputDir1 = new Path(testDir, FAMILY1);
  HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
  StoreFileWriter writer1 = new StoreFileWriter.Builder(conf, cacheConf, fs)
      .withOutputDir(outputDir1).withFileContext(meta).build();
  MobTestUtil.writeStoreFile(writer1, caseName);
  CachedMobFile cachedMobFile1 = CachedMobFile.create(fs, writer1.getPath(), conf, cacheConf);
  Path outputDir2 = new Path(testDir, FAMILY2);
  StoreFileWriter writer2 = new StoreFileWriter.Builder(conf, cacheConf, fs)
      .withOutputDir(outputDir2)
      .withFileContext(meta)
      .build();
  MobTestUtil.writeStoreFile(writer2, caseName);
  CachedMobFile cachedMobFile2 = CachedMobFile.create(fs, writer2.getPath(), conf, cacheConf);
  cachedMobFile1.access(1);
  cachedMobFile2.access(2);
  assertEquals(1, cachedMobFile1.compareTo(cachedMobFile2));
  assertEquals(-1, cachedMobFile2.compareTo(cachedMobFile1));
  assertEquals(0, cachedMobFile1.compareTo(cachedMobFile1));
}
 
Example #12
Source File: TestMobFile.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Test
public void testGetScanner() throws Exception {
  Path testDir = TEST_UTIL.getDataTestDir();
  FileSystem fs = testDir.getFileSystem(conf);
  HFileContext meta = new HFileContextBuilder().withBlockSize(8*1024).build();
  StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, fs)
          .withOutputDir(testDir)
          .withFileContext(meta)
          .build();
  MobTestUtil.writeStoreFile(writer, testName.getMethodName());

  MobFile mobFile =
      new MobFile(new HStoreFile(fs, writer.getPath(), conf, cacheConf, BloomType.NONE, true));
  assertNotNull(mobFile.getScanner());
  assertTrue(mobFile.getScanner() instanceof StoreFileScanner);
}
 
Example #13
Source File: TestAccessController.java    From hbase with Apache License 2.0 6 votes vote down vote up
private void createHFile(Path path,
    byte[] family, byte[] qualifier,
    byte[] startKey, byte[] endKey, int numRows) throws IOException {
  HFile.Writer writer = null;
  long now = System.currentTimeMillis();
  try {
    HFileContext context = new HFileContextBuilder().build();
    writer = HFile.getWriterFactory(conf, new CacheConfig(conf)).withPath(fs, path)
        .withFileContext(context).create();
    // subtract 2 since numRows doesn't include boundary keys
    for (byte[] key : Bytes.iterateOnSplits(startKey, endKey, true, numRows - 2)) {
      KeyValue kv = new KeyValue(key, family, qualifier, now, key);
      writer.append(kv);
    }
  } finally {
    if (writer != null) {
      writer.close();
    }
  }
}
 
Example #14
Source File: HStore.java    From hbase with Apache License 2.0 6 votes vote down vote up
private HFileContext createFileContext(Compression.Algorithm compression,
    boolean includeMVCCReadpoint, boolean includesTag, Encryption.Context cryptoContext) {
  if (compression == null) {
    compression = HFile.DEFAULT_COMPRESSION_ALGORITHM;
  }
  HFileContext hFileContext = new HFileContextBuilder()
                              .withIncludesMvcc(includeMVCCReadpoint)
                              .withIncludesTags(includesTag)
                              .withCompression(compression)
                              .withCompressTags(family.isCompressTags())
                              .withChecksumType(checksumType)
                              .withBytesPerCheckSum(bytesPerChecksum)
                              .withBlockSize(blocksize)
                              .withHBaseCheckSum(true)
                              .withDataBlockEncoding(family.getDataBlockEncoding())
                              .withEncryptionContext(cryptoContext)
                              .withCreateTime(EnvironmentEdgeManager.currentTime())
                              .withColumnFamily(family.getName())
                              .withTableName(region.getTableDescriptor()
                                  .getTableName().getName())
                              .withCellComparator(this.comparator)
                              .build();
  return hFileContext;
}
 
Example #15
Source File: TestDataBlockEncoders.java    From hbase with Apache License 2.0 6 votes vote down vote up
private void testAlgorithm(byte[] encodedData, ByteBuffer unencodedDataBuf,
    DataBlockEncoder encoder) throws IOException {
  // decode
  ByteArrayInputStream bais = new ByteArrayInputStream(encodedData, ENCODED_DATA_OFFSET,
      encodedData.length - ENCODED_DATA_OFFSET);
  DataInputStream dis = new DataInputStream(bais);
  ByteBuffer actualDataset;
  HFileContext meta = new HFileContextBuilder().withHBaseCheckSum(false)
      .withIncludesMvcc(includesMemstoreTS).withIncludesTags(includesTags)
      .withCompression(Compression.Algorithm.NONE).build();
  actualDataset = encoder.decodeKeyValues(dis, encoder.newDataBlockDecodingContext(meta));
  actualDataset.rewind();

  // this is because in case of prefix tree the decoded stream will not have
  // the
  // mvcc in it.
  assertEquals("Encoding -> decoding gives different results for " + encoder,
      Bytes.toStringBinary(unencodedDataBuf), Bytes.toStringBinary(actualDataset));
}
 
Example #16
Source File: TestHStoreFile.java    From hbase with Apache License 2.0 6 votes vote down vote up
/**
 * Check if data block encoding information is saved correctly in HFile's file info.
 */
@Test
public void testDataBlockEncodingMetaData() throws IOException {
  // Make up a directory hierarchy that has a regiondir ("7e0102") and familyname.
  Path dir = new Path(new Path(testDir, "7e0102"), "familyname");
  Path path = new Path(dir, "1234567890");

  DataBlockEncoding dataBlockEncoderAlgo = DataBlockEncoding.FAST_DIFF;
  cacheConf = new CacheConfig(conf);
  HFileContext meta =
    new HFileContextBuilder().withBlockSize(BLOCKSIZE_SMALL).withChecksumType(CKTYPE)
      .withBytesPerCheckSum(CKBYTES).withDataBlockEncoding(dataBlockEncoderAlgo).build();
  // Make a store file and write data to it.
  StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs)
    .withFilePath(path).withMaxKeyCount(2000).withFileContext(meta).build();
  writer.close();

  HStoreFile storeFile =
    new HStoreFile(fs, writer.getPath(), conf, cacheConf, BloomType.NONE, true);
  storeFile.initReader();
  StoreFileReader reader = storeFile.getReader();

  Map<byte[], byte[]> fileInfo = reader.loadFileInfo();
  byte[] value = fileInfo.get(HFileDataBlockEncoder.DATA_BLOCK_ENCODING);
  assertArrayEquals(dataBlockEncoderAlgo.getNameInBytes(), value);
}
 
Example #17
Source File: TestCachedMobFile.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Test
public void testOpenClose() throws Exception {
  String caseName = testName.getMethodName();
  Path testDir = TEST_UTIL.getDataTestDir();
  FileSystem fs = testDir.getFileSystem(conf);
  HFileContext meta = new HFileContextBuilder().withBlockSize(8*1024).build();
  StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, fs)
      .withOutputDir(testDir).withFileContext(meta).build();
  MobTestUtil.writeStoreFile(writer, caseName);
  CachedMobFile cachedMobFile = CachedMobFile.create(fs, writer.getPath(), conf, cacheConf);
  assertEquals(EXPECTED_REFERENCE_ZERO, cachedMobFile.getReferenceCount());
  cachedMobFile.open();
  assertEquals(EXPECTED_REFERENCE_ONE, cachedMobFile.getReferenceCount());
  cachedMobFile.open();
  assertEquals(EXPECTED_REFERENCE_TWO, cachedMobFile.getReferenceCount());
  cachedMobFile.close();
  assertEquals(EXPECTED_REFERENCE_ONE, cachedMobFile.getReferenceCount());
  cachedMobFile.close();
  assertEquals(EXPECTED_REFERENCE_ZERO, cachedMobFile.getReferenceCount());
}
 
Example #18
Source File: BoundedRecoveredHFilesOutputSink.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * @return Returns a base HFile without compressions or encodings; good enough for recovery
 *   given hfile has metadata on how it was written.
 */
private StoreFileWriter createRecoveredHFileWriter(TableName tableName, String regionName,
    long seqId, String familyName, boolean isMetaTable) throws IOException {
  Path outputDir = WALSplitUtil.tryCreateRecoveredHFilesDir(walSplitter.rootFS, walSplitter.conf,
    tableName, regionName, familyName);
  StoreFileWriter.Builder writerBuilder =
      new StoreFileWriter.Builder(walSplitter.conf, CacheConfig.DISABLED, walSplitter.rootFS)
          .withOutputDir(outputDir);
  HFileContext hFileContext = new HFileContextBuilder().
    withChecksumType(HStore.getChecksumType(walSplitter.conf)).
    withBytesPerCheckSum(HStore.getBytesPerChecksum(walSplitter.conf)).
    withCellComparator(isMetaTable?
      CellComparatorImpl.META_COMPARATOR: CellComparatorImpl.COMPARATOR).build();
  return writerBuilder.withFileContext(hFileContext).build();
}
 
Example #19
Source File: TestDataBlockEncoders.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Test
public void testNextOnSample() throws IOException {
  List<KeyValue> sampleKv = generator.generateTestKeyValues(NUMBER_OF_KV, includesTags);

  for (DataBlockEncoding encoding : DataBlockEncoding.values()) {
    if (encoding.getEncoder() == null) {
      continue;
    }
    DataBlockEncoder encoder = encoding.getEncoder();
    ByteBuffer encodedBuffer = encodeKeyValues(encoding, sampleKv,
        getEncodingContext(Compression.Algorithm.NONE, encoding), this.useOffheapData);
    HFileContext meta = new HFileContextBuilder()
                        .withHBaseCheckSum(false)
                        .withIncludesMvcc(includesMemstoreTS)
                        .withIncludesTags(includesTags)
                        .withCompression(Compression.Algorithm.NONE)
                        .build();
    DataBlockEncoder.EncodedSeeker seeker =
      encoder.createSeeker(encoder.newDataBlockDecodingContext(meta));
    seeker.setCurrentBuffer(new SingleByteBuff(encodedBuffer));
    int i = 0;
    do {
      KeyValue expectedKeyValue = sampleKv.get(i);
      Cell cell = seeker.getCell();
      if (PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, expectedKeyValue,
        cell) != 0) {
        int commonPrefix = PrivateCellUtil
            .findCommonPrefixInFlatKey(expectedKeyValue, cell, false, true);
        fail(String.format("next() produces wrong results "
            + "encoder: %s i: %d commonPrefix: %d" + "\n expected %s\n actual      %s", encoder
            .toString(), i, commonPrefix, Bytes.toStringBinary(expectedKeyValue.getBuffer(),
            expectedKeyValue.getKeyOffset(), expectedKeyValue.getKeyLength()), CellUtil.toString(
            cell, false)));
      }
      i++;
    } while (seeker.next());
  }
}
 
Example #20
Source File: TestBucketCache.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Test
public void testRAMCache() {
  int size = 100;
  int length = HConstants.HFILEBLOCK_HEADER_SIZE + size;
  byte[] byteArr = new byte[length];
  ByteBuffer buf = ByteBuffer.wrap(byteArr, 0, size);
  HFileContext meta = new HFileContextBuilder().build();

  RAMCache cache = new RAMCache();
  BlockCacheKey key1 = new BlockCacheKey("file-1", 1);
  BlockCacheKey key2 = new BlockCacheKey("file-2", 2);
  HFileBlock blk1 = new HFileBlock(BlockType.DATA, size, size, -1, ByteBuff.wrap(buf),
      HFileBlock.FILL_HEADER, -1, 52, -1, meta, ByteBuffAllocator.HEAP);
  HFileBlock blk2 = new HFileBlock(BlockType.DATA, size, size, -1, ByteBuff.wrap(buf),
      HFileBlock.FILL_HEADER, -1, -1, -1, meta, ByteBuffAllocator.HEAP);
  RAMQueueEntry re1 = new RAMQueueEntry(key1, blk1, 1, false, ByteBuffAllocator.NONE);
  RAMQueueEntry re2 = new RAMQueueEntry(key1, blk2, 1, false, ByteBuffAllocator.NONE);

  assertFalse(cache.containsKey(key1));
  assertNull(cache.putIfAbsent(key1, re1));
  assertEquals(2, ((HFileBlock) re1.getData()).getBufferReadOnly().refCnt());

  assertNotNull(cache.putIfAbsent(key1, re2));
  assertEquals(2, ((HFileBlock) re1.getData()).getBufferReadOnly().refCnt());
  assertEquals(1, ((HFileBlock) re2.getData()).getBufferReadOnly().refCnt());

  assertNull(cache.putIfAbsent(key2, re2));
  assertEquals(2, ((HFileBlock) re1.getData()).getBufferReadOnly().refCnt());
  assertEquals(2, ((HFileBlock) re2.getData()).getBufferReadOnly().refCnt());

  cache.remove(key1);
  assertEquals(1, ((HFileBlock) re1.getData()).getBufferReadOnly().refCnt());
  assertEquals(2, ((HFileBlock) re2.getData()).getBufferReadOnly().refCnt());

  cache.clear();
  assertEquals(1, ((HFileBlock) re1.getData()).getBufferReadOnly().refCnt());
  assertEquals(1, ((HFileBlock) re2.getData()).getBufferReadOnly().refCnt());
}
 
Example #21
Source File: TestBucketCache.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Test
public void testFreeBlockWhenIOEngineWriteFailure() throws IOException {
  // initialize an block.
  int size = 100, offset = 20;
  int length = HConstants.HFILEBLOCK_HEADER_SIZE + size;
  ByteBuffer buf = ByteBuffer.allocate(length);
  HFileContext meta = new HFileContextBuilder().build();
  HFileBlock block = new HFileBlock(BlockType.DATA, size, size, -1, ByteBuff.wrap(buf),
      HFileBlock.FILL_HEADER, offset, 52, -1, meta, ByteBuffAllocator.HEAP);

  // initialize an mocked ioengine.
  IOEngine ioEngine = Mockito.mock(IOEngine.class);
  Mockito.when(ioEngine.usesSharedMemory()).thenReturn(false);
  // Mockito.doNothing().when(ioEngine).write(Mockito.any(ByteBuffer.class), Mockito.anyLong());
  Mockito.doThrow(RuntimeException.class).when(ioEngine).write(Mockito.any(ByteBuffer.class),
    Mockito.anyLong());
  Mockito.doThrow(RuntimeException.class).when(ioEngine).write(Mockito.any(ByteBuff.class),
    Mockito.anyLong());

  // create an bucket allocator.
  long availableSpace = 1024 * 1024 * 1024L;
  BucketAllocator allocator = new BucketAllocator(availableSpace, null);

  BlockCacheKey key = new BlockCacheKey("dummy", 1L);
  RAMQueueEntry re = new RAMQueueEntry(key, block, 1, true, ByteBuffAllocator.NONE);

  Assert.assertEquals(0, allocator.getUsedSize());
  try {
    re.writeToCache(ioEngine, allocator, null);
    Assert.fail();
  } catch (Exception e) {
  }
  Assert.assertEquals(0, allocator.getUsedSize());
}
 
Example #22
Source File: TestDataBlockEncoders.java    From hbase with Apache License 2.0 5 votes vote down vote up
private HFileBlockEncodingContext getEncodingContext(Compression.Algorithm algo,
    DataBlockEncoding encoding) {
  DataBlockEncoder encoder = encoding.getEncoder();
  HFileContext meta = new HFileContextBuilder()
                      .withHBaseCheckSum(false)
                      .withIncludesMvcc(includesMemstoreTS)
                      .withIncludesTags(includesTags)
                      .withCompression(algo).build();
  if (encoder != null) {
    return encoder.newDataBlockEncodingContext(encoding, HFILEBLOCK_DUMMY_HEADER, meta);
  } else {
    return new HFileBlockDefaultEncodingContext(encoding, HFILEBLOCK_DUMMY_HEADER, meta);
  }
}
 
Example #23
Source File: TestHalfStoreFileReader.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * Test the scanner and reseek of a half hfile scanner. The scanner API demands that seekTo and
 * reseekTo() only return < 0 if the key lies before the start of the file (with no position on
 * the scanner). Returning 0 if perfect match (rare), and return > 1 if we got an imperfect match.
 * The latter case being the most common, we should generally be returning 1, and if we do, there
 * may or may not be a 'next' in the scanner/file. A bug in the half file scanner was returning -1
 * at the end of the bottom half, and that was causing the infrastructure above to go null causing
 * NPEs and other problems. This test reproduces that failure, and also tests both the bottom and
 * top of the file while we are at it.
 * @throws IOException
 */
@Test
public void testHalfScanAndReseek() throws IOException {
  String root_dir = TEST_UTIL.getDataTestDir().toString();
  Path p = new Path(root_dir, "test");

  Configuration conf = TEST_UTIL.getConfiguration();
  FileSystem fs = FileSystem.get(conf);
  CacheConfig cacheConf = new CacheConfig(conf);
  HFileContext meta = new HFileContextBuilder().withBlockSize(1024).build();
  HFile.Writer w =
      HFile.getWriterFactory(conf, cacheConf).withPath(fs, p).withFileContext(meta).create();

  // write some things.
  List<KeyValue> items = genSomeKeys();
  for (KeyValue kv : items) {
    w.append(kv);
  }
  w.close();

  HFile.Reader r = HFile.createReader(fs, p, cacheConf, true, conf);
  Cell midKV = r.midKey().get();
  byte[] midkey = CellUtil.cloneRow(midKV);

  // System.out.println("midkey: " + midKV + " or: " + Bytes.toStringBinary(midkey));

  Reference bottom = new Reference(midkey, Reference.Range.bottom);
  doTestOfScanAndReseek(p, fs, bottom, cacheConf);

  Reference top = new Reference(midkey, Reference.Range.top);
  doTestOfScanAndReseek(p, fs, top, cacheConf);

  r.close();
}
 
Example #24
Source File: EncodedDataBlock.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * Create a buffer which will be encoded using dataBlockEncoder.
 * @param dataBlockEncoder Algorithm used for compression.
 * @param encoding encoding type used
 * @param rawKVs
 * @param meta
 */
public EncodedDataBlock(DataBlockEncoder dataBlockEncoder, DataBlockEncoding encoding,
    byte[] rawKVs, HFileContext meta) {
  Preconditions.checkNotNull(encoding,
      "Cannot create encoded data block with null encoder");
  this.dataBlockEncoder = dataBlockEncoder;
  this.encoding = encoding;
  encodingCtx = dataBlockEncoder.newDataBlockEncodingContext(encoding,
      HConstants.HFILEBLOCK_DUMMY_HEADER, meta);
  this.rawKVs = rawKVs;
  this.meta = meta;
}
 
Example #25
Source File: HFileBlockDefaultEncodingContext.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * @param encoding encoding used
 * @param headerBytes dummy header bytes
 * @param fileContext HFile meta data
 */
public HFileBlockDefaultEncodingContext(DataBlockEncoding encoding, byte[] headerBytes,
    HFileContext fileContext) {
  this.encodingAlgo = encoding;
  this.fileContext = fileContext;
  Compression.Algorithm compressionAlgorithm =
      fileContext.getCompression() == null ? NONE : fileContext.getCompression();
  if (compressionAlgorithm != NONE) {
    compressor = compressionAlgorithm.getCompressor();
    compressedByteStream = new ByteArrayOutputStream();
    try {
      compressionStream =
          compressionAlgorithm.createPlainCompressionStream(
              compressedByteStream, compressor);
    } catch (IOException e) {
      throw new RuntimeException(
          "Could not create compression stream for algorithm "
              + compressionAlgorithm, e);
    }
  }

  Encryption.Context cryptoContext = fileContext.getEncryptionContext();
  if (cryptoContext != Encryption.Context.NONE) {
    cryptoByteStream = new ByteArrayOutputStream();
    iv = new byte[cryptoContext.getCipher().getIvLength()];
    new SecureRandom().nextBytes(iv);
  }

  dummyHeader = Preconditions.checkNotNull(headerBytes,
    "Please pass HConstants.HFILEBLOCK_DUMMY_HEADER instead of null for param headerBytes");
}
 
Example #26
Source File: TestDataBlockEncoders.java    From hbase with Apache License 2.0 5 votes vote down vote up
private void testEncodersOnDataset(List<KeyValue> kvList, boolean includesMemstoreTS,
    boolean includesTags) throws IOException {
  ByteBuffer unencodedDataBuf = RedundantKVGenerator.convertKvToByteBuffer(kvList,
      includesMemstoreTS);
  HFileContext fileContext = new HFileContextBuilder().withIncludesMvcc(includesMemstoreTS)
      .withIncludesTags(includesTags).build();
  for (DataBlockEncoding encoding : DataBlockEncoding.values()) {
    DataBlockEncoder encoder = encoding.getEncoder();
    if (encoder == null) {
      continue;
    }
    HFileBlockEncodingContext encodingContext = new HFileBlockDefaultEncodingContext(encoding,
        HFILEBLOCK_DUMMY_HEADER, fileContext);

    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    baos.write(HFILEBLOCK_DUMMY_HEADER);
    DataOutputStream dos = new DataOutputStream(baos);
    encoder.startBlockEncoding(encodingContext, dos);
    for (KeyValue kv : kvList) {
      encoder.encode(kv, encodingContext, dos);
    }
    encoder.endBlockEncoding(encodingContext, dos, baos.getBuffer());
    byte[] encodedData = baos.toByteArray();

    testAlgorithm(encodedData, unencodedDataBuf, encoder);
  }
}
 
Example #27
Source File: TestHStoreFile.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * Test for HBASE-8012
 */
@Test
public void testReseek() throws Exception {
  // write the file
  Path f = new Path(ROOT_DIR, name.getMethodName());
  HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
  // Make a store file and write data to it.
  StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs).withFilePath(f)
    .withFileContext(meta).build();

  writeStoreFile(writer);
  writer.close();

  ReaderContext context = new ReaderContextBuilder().withFileSystemAndPath(fs, f).build();
  HFileInfo fileInfo = new HFileInfo(context, conf);
  StoreFileReader reader =
    new StoreFileReader(context, fileInfo, cacheConf, new AtomicInteger(0), conf);
  fileInfo.initMetaAndIndex(reader.getHFileReader());

  // Now do reseek with empty KV to position to the beginning of the file

  KeyValue k = KeyValueUtil.createFirstOnRow(HConstants.EMPTY_BYTE_ARRAY);
  StoreFileScanner s = getStoreFileScanner(reader, false, false);
  s.reseek(k);

  assertNotNull("Intial reseek should position at the beginning of the file", s.peek());
}
 
Example #28
Source File: TestRAMCache.java    From hbase with Apache License 2.0 5 votes vote down vote up
MockHFileBlock(BlockType blockType, int onDiskSizeWithoutHeader,
    int uncompressedSizeWithoutHeader, long prevBlockOffset, ByteBuffer b, boolean fillHeader,
    long offset, int nextBlockOnDiskSize, int onDiskDataSizeWithHeader,
    HFileContext fileContext, ByteBuffAllocator allocator) {
  super(blockType, onDiskSizeWithoutHeader, uncompressedSizeWithoutHeader, prevBlockOffset,
      ByteBuff.wrap(b), fillHeader, offset, nextBlockOnDiskSize, onDiskDataSizeWithHeader,
      fileContext, allocator);
}
 
Example #29
Source File: TestCompoundBloomFilter.java    From hbase with Apache License 2.0 5 votes vote down vote up
private Path writeStoreFile(int t, BloomType bt, List<KeyValue> kvs)
    throws IOException {
  conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_BLOCK_SIZE,
      BLOOM_BLOCK_SIZES[t]);
  conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, true);
  cacheConf = new CacheConfig(conf, blockCache);
  HFileContext meta = new HFileContextBuilder().withBlockSize(BLOCK_SIZES[t]).build();
  StoreFileWriter w = new StoreFileWriter.Builder(conf, cacheConf, fs)
          .withOutputDir(TEST_UTIL.getDataTestDir())
          .withBloomType(bt)
          .withFileContext(meta)
          .build();

  assertTrue(w.hasGeneralBloom());
  assertTrue(w.getGeneralBloomWriter() instanceof CompoundBloomFilterWriter);
  CompoundBloomFilterWriter cbbf =
      (CompoundBloomFilterWriter) w.getGeneralBloomWriter();

  int keyCount = 0;
  KeyValue prev = null;
  LOG.debug("Total keys/values to insert: " + kvs.size());
  for (KeyValue kv : kvs) {
    w.append(kv);

    // Validate the key count in the Bloom filter.
    boolean newKey = true;
    if (prev != null) {
      newKey = !(bt == BloomType.ROW ? CellUtil.matchingRows(kv,
          prev) : CellUtil.matchingRowColumn(kv, prev));
    }
    if (newKey)
      ++keyCount;
    assertEquals(keyCount, cbbf.getKeyCount());

    prev = kv;
  }
  w.close();

  return w.getPath();
}
 
Example #30
Source File: TestReversibleScanners.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Test
public void testReversibleStoreFileScanner() throws IOException {
  FileSystem fs = TEST_UTIL.getTestFileSystem();
  Path hfilePath = new Path(new Path(
      TEST_UTIL.getDataTestDir("testReversibleStoreFileScanner"),
      "regionname"), "familyname");
  CacheConfig cacheConf = new CacheConfig(TEST_UTIL.getConfiguration());
  for (DataBlockEncoding encoding : DataBlockEncoding.values()) {
    HFileContextBuilder hcBuilder = new HFileContextBuilder();
    hcBuilder.withBlockSize(2 * 1024);
    hcBuilder.withDataBlockEncoding(encoding);
    HFileContext hFileContext = hcBuilder.build();
    StoreFileWriter writer = new StoreFileWriter.Builder(
        TEST_UTIL.getConfiguration(), cacheConf, fs).withOutputDir(hfilePath)
        .withFileContext(hFileContext).build();
    writeStoreFile(writer);

    HStoreFile sf = new HStoreFile(fs, writer.getPath(), TEST_UTIL.getConfiguration(), cacheConf,
        BloomType.NONE, true);

    List<StoreFileScanner> scanners = StoreFileScanner
        .getScannersForStoreFiles(Collections.singletonList(sf),
            false, true, false, false, Long.MAX_VALUE);
    StoreFileScanner scanner = scanners.get(0);
    seekTestOfReversibleKeyValueScanner(scanner);
    for (int readPoint = 0; readPoint < MAXMVCC; readPoint++) {
      LOG.info("Setting read point to " + readPoint);
      scanners = StoreFileScanner.getScannersForStoreFiles(
          Collections.singletonList(sf), false, true, false, false, readPoint);
      seekTestOfReversibleKeyValueScannerWithMVCC(scanners, readPoint);
    }
  }

}