Java Code Examples for org.apache.hadoop.hbase.io.hfile.HFileContextBuilder#build()

The following examples show how to use org.apache.hadoop.hbase.io.hfile.HFileContextBuilder#build() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: HFilePerformanceEvaluation.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Override
void setUp() throws Exception {

  HFileContextBuilder builder = new HFileContextBuilder()
      .withCompression(HFileWriterImpl.compressionByName(codec))
      .withBlockSize(RFILE_BLOCKSIZE);
  
  if (cipher == "aes") {
    byte[] cipherKey = new byte[AES.KEY_LENGTH];
    new SecureRandom().nextBytes(cipherKey);
    builder.withEncryptionContext(Encryption.newContext(conf)
        .setCipher(Encryption.getCipher(conf, cipher))
        .setKey(cipherKey));
  } else if (!"none".equals(cipher)) {
    throw new IOException("Cipher " + cipher + " not supported.");
  }
  
  HFileContext hFileContext = builder.build();

  writer = HFile.getWriterFactoryNoCache(conf)
      .withPath(fs, mf)
      .withFileContext(hFileContext)
      .create();
}
 
Example 2
Source File: TestReversibleScanners.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Test
public void testReversibleStoreFileScanner() throws IOException {
  FileSystem fs = TEST_UTIL.getTestFileSystem();
  Path hfilePath = new Path(new Path(
      TEST_UTIL.getDataTestDir("testReversibleStoreFileScanner"),
      "regionname"), "familyname");
  CacheConfig cacheConf = new CacheConfig(TEST_UTIL.getConfiguration());
  for (DataBlockEncoding encoding : DataBlockEncoding.values()) {
    HFileContextBuilder hcBuilder = new HFileContextBuilder();
    hcBuilder.withBlockSize(2 * 1024);
    hcBuilder.withDataBlockEncoding(encoding);
    HFileContext hFileContext = hcBuilder.build();
    StoreFileWriter writer = new StoreFileWriter.Builder(
        TEST_UTIL.getConfiguration(), cacheConf, fs).withOutputDir(hfilePath)
        .withFileContext(hFileContext).build();
    writeStoreFile(writer);

    HStoreFile sf = new HStoreFile(fs, writer.getPath(), TEST_UTIL.getConfiguration(), cacheConf,
        BloomType.NONE, true);

    List<StoreFileScanner> scanners = StoreFileScanner
        .getScannersForStoreFiles(Collections.singletonList(sf),
            false, true, false, false, Long.MAX_VALUE);
    StoreFileScanner scanner = scanners.get(0);
    seekTestOfReversibleKeyValueScanner(scanner);
    for (int readPoint = 0; readPoint < MAXMVCC; readPoint++) {
      LOG.info("Setting read point to " + readPoint);
      scanners = StoreFileScanner.getScannersForStoreFiles(
          Collections.singletonList(sf), false, true, false, false, readPoint);
      seekTestOfReversibleKeyValueScannerWithMVCC(scanners, readPoint);
    }
  }

}
 
Example 3
Source File: TestReversibleScanners.java    From hbase with Apache License 2.0 4 votes vote down vote up
@Test
public void testReversibleKeyValueHeap() throws IOException {
  // write data to one memstore and two store files
  FileSystem fs = TEST_UTIL.getTestFileSystem();
  Path hfilePath = new Path(new Path(
      TEST_UTIL.getDataTestDir("testReversibleKeyValueHeap"), "regionname"),
      "familyname");
  CacheConfig cacheConf = new CacheConfig(TEST_UTIL.getConfiguration());
  HFileContextBuilder hcBuilder = new HFileContextBuilder();
  hcBuilder.withBlockSize(2 * 1024);
  HFileContext hFileContext = hcBuilder.build();
  StoreFileWriter writer1 = new StoreFileWriter.Builder(
      TEST_UTIL.getConfiguration(), cacheConf, fs).withOutputDir(
      hfilePath).withFileContext(hFileContext).build();
  StoreFileWriter writer2 = new StoreFileWriter.Builder(
      TEST_UTIL.getConfiguration(), cacheConf, fs).withOutputDir(
      hfilePath).withFileContext(hFileContext).build();

  MemStore memstore = new DefaultMemStore();
  writeMemstoreAndStoreFiles(memstore, new StoreFileWriter[] { writer1,
      writer2 });

  HStoreFile sf1 = new HStoreFile(fs, writer1.getPath(), TEST_UTIL.getConfiguration(), cacheConf,
      BloomType.NONE, true);

  HStoreFile sf2 = new HStoreFile(fs, writer2.getPath(), TEST_UTIL.getConfiguration(), cacheConf,
      BloomType.NONE, true);
  /**
   * Test without MVCC
   */
  int startRowNum = ROWSIZE / 2;
  ReversedKeyValueHeap kvHeap = getReversibleKeyValueHeap(memstore, sf1, sf2,
      ROWS[startRowNum], MAXMVCC);
  internalTestSeekAndNextForReversibleKeyValueHeap(kvHeap, startRowNum);

  startRowNum = ROWSIZE - 1;
  kvHeap = getReversibleKeyValueHeap(memstore, sf1, sf2,
      HConstants.EMPTY_START_ROW, MAXMVCC);
  internalTestSeekAndNextForReversibleKeyValueHeap(kvHeap, startRowNum);

  /**
   * Test with MVCC
   */
  for (int readPoint = 0; readPoint < MAXMVCC; readPoint++) {
    LOG.info("Setting read point to " + readPoint);
    startRowNum = ROWSIZE - 1;
    kvHeap = getReversibleKeyValueHeap(memstore, sf1, sf2,
        HConstants.EMPTY_START_ROW, readPoint);
    for (int i = startRowNum; i >= 0; i--) {
      if (i - 2 < 0) break;
      i = i - 2;
      kvHeap.seekToPreviousRow(KeyValueUtil.createFirstOnRow(ROWS[i + 1]));
      Pair<Integer, Integer> nextReadableNum = getNextReadableNumWithBackwardScan(
          i, 0, readPoint);
      if (nextReadableNum == null) break;
      KeyValue expecedKey = makeKV(nextReadableNum.getFirst(),
          nextReadableNum.getSecond());
      assertEquals(expecedKey, kvHeap.peek());
      i = nextReadableNum.getFirst();
      int qualNum = nextReadableNum.getSecond();
      if (qualNum + 1 < QUALSIZE) {
        kvHeap.backwardSeek(makeKV(i, qualNum + 1));
        nextReadableNum = getNextReadableNumWithBackwardScan(i, qualNum + 1,
            readPoint);
        if (nextReadableNum == null) break;
        expecedKey = makeKV(nextReadableNum.getFirst(),
            nextReadableNum.getSecond());
        assertEquals(expecedKey, kvHeap.peek());
        i = nextReadableNum.getFirst();
        qualNum = nextReadableNum.getSecond();
      }

      kvHeap.next();

      if (qualNum + 1 >= QUALSIZE) {
        nextReadableNum = getNextReadableNumWithBackwardScan(i - 1, 0,
            readPoint);
      } else {
        nextReadableNum = getNextReadableNumWithBackwardScan(i, qualNum + 1,
            readPoint);
      }
      if (nextReadableNum == null) break;
      expecedKey = makeKV(nextReadableNum.getFirst(),
          nextReadableNum.getSecond());
      assertEquals(expecedKey, kvHeap.peek());
      i = nextReadableNum.getFirst();
    }
  }
}
 
Example 4
Source File: TestReversibleScanners.java    From hbase with Apache License 2.0 4 votes vote down vote up
@Test
public void testReversibleStoreScanner() throws IOException {
  // write data to one memstore and two store files
  FileSystem fs = TEST_UTIL.getTestFileSystem();
  Path hfilePath = new Path(new Path(
      TEST_UTIL.getDataTestDir("testReversibleStoreScanner"), "regionname"),
      "familyname");
  CacheConfig cacheConf = new CacheConfig(TEST_UTIL.getConfiguration());
  HFileContextBuilder hcBuilder = new HFileContextBuilder();
  hcBuilder.withBlockSize(2 * 1024);
  HFileContext hFileContext = hcBuilder.build();
  StoreFileWriter writer1 = new StoreFileWriter.Builder(
      TEST_UTIL.getConfiguration(), cacheConf, fs).withOutputDir(
      hfilePath).withFileContext(hFileContext).build();
  StoreFileWriter writer2 = new StoreFileWriter.Builder(
      TEST_UTIL.getConfiguration(), cacheConf, fs).withOutputDir(
      hfilePath).withFileContext(hFileContext).build();

  MemStore memstore = new DefaultMemStore();
  writeMemstoreAndStoreFiles(memstore, new StoreFileWriter[] { writer1,
      writer2 });

  HStoreFile sf1 = new HStoreFile(fs, writer1.getPath(), TEST_UTIL.getConfiguration(), cacheConf,
      BloomType.NONE, true);

  HStoreFile sf2 = new HStoreFile(fs, writer2.getPath(), TEST_UTIL.getConfiguration(), cacheConf,
      BloomType.NONE, true);

  ScanInfo scanInfo =
      new ScanInfo(TEST_UTIL.getConfiguration(), FAMILYNAME, 0, Integer.MAX_VALUE, Long.MAX_VALUE,
          KeepDeletedCells.FALSE, HConstants.DEFAULT_BLOCKSIZE, 0, CellComparatorImpl.COMPARATOR, false);

  // Case 1.Test a full reversed scan
  Scan scan = new Scan();
  scan.setReversed(true);
  StoreScanner storeScanner =
      getReversibleStoreScanner(memstore, sf1, sf2, scan, scanInfo, MAXMVCC);
  verifyCountAndOrder(storeScanner, QUALSIZE * ROWSIZE, ROWSIZE, false);

  // Case 2.Test reversed scan with a specified start row
  int startRowNum = ROWSIZE / 2;
  byte[] startRow = ROWS[startRowNum];
  scan.withStartRow(startRow);
  storeScanner = getReversibleStoreScanner(memstore, sf1, sf2, scan, scanInfo, MAXMVCC);
  verifyCountAndOrder(storeScanner, QUALSIZE * (startRowNum + 1),
      startRowNum + 1, false);

  // Case 3.Test reversed scan with a specified start row and specified
  // qualifiers
  assertTrue(QUALSIZE > 2);
  scan.addColumn(FAMILYNAME, QUALS[0]);
  scan.addColumn(FAMILYNAME, QUALS[2]);
  storeScanner = getReversibleStoreScanner(memstore, sf1, sf2, scan, scanInfo, MAXMVCC);
  verifyCountAndOrder(storeScanner, 2 * (startRowNum + 1), startRowNum + 1,
      false);

  // Case 4.Test reversed scan with mvcc based on case 3
  for (int readPoint = 0; readPoint < MAXMVCC; readPoint++) {
    LOG.info("Setting read point to " + readPoint);
    storeScanner = getReversibleStoreScanner(memstore, sf1, sf2, scan, scanInfo, readPoint);
    int expectedRowCount = 0;
    int expectedKVCount = 0;
    for (int i = startRowNum; i >= 0; i--) {
      int kvCount = 0;
      if (makeMVCC(i, 0) <= readPoint) {
        kvCount++;
      }
      if (makeMVCC(i, 2) <= readPoint) {
        kvCount++;
      }
      if (kvCount > 0) {
        expectedRowCount++;
        expectedKVCount += kvCount;
      }
    }
    verifyCountAndOrder(storeScanner, expectedKVCount, expectedRowCount,
        false);
  }
}
 
Example 5
Source File: HFileGenerationFunction.java    From spliceengine with GNU Affero General Public License v3.0 4 votes vote down vote up
private StoreFileWriter getNewWriter(Configuration conf, BulkImportPartition partition)
        throws IOException {

    Compression.Algorithm compression = Compression.getCompressionAlgorithmByName(compressionAlgorithm);
    BloomType bloomType = BloomType.ROW;
    Integer blockSize = HConstants.DEFAULT_BLOCKSIZE;
    DataBlockEncoding encoding = DataBlockEncoding.NONE;
    Configuration tempConf = new Configuration(conf);
    tempConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f);
    HFileContextBuilder contextBuilder = new HFileContextBuilder()
            .withCompression(compression)
            .withChecksumType(HStore.getChecksumType(conf))
            .withBytesPerCheckSum(HStore.getBytesPerChecksum(conf))
            .withBlockSize(blockSize);

    if (HFile.getFormatVersion(conf) >= HFile.MIN_FORMAT_VERSION_WITH_TAGS) {
        contextBuilder.withIncludesTags(true);
    }

    contextBuilder.withDataBlockEncoding(encoding);
    HFileContext hFileContext = contextBuilder.build();
    try {
        Path familyPath = new Path(partition.getFilePath());
        // Get favored nodes as late as possible. This is the best we can do. If the region gets moved after this
        // point, locality is not guaranteed.
        InetSocketAddress favoredNode = getFavoredNode(partition);
        StoreFileWriter.Builder builder =
                new StoreFileWriter.Builder(conf, new CacheConfig(tempConf), new HFileSystem(fs))
                        .withOutputDir(familyPath).withBloomType(bloomType)
                        .withFileContext(hFileContext);

        if (favoredNode != null) {
            InetSocketAddress[] favoredNodes = new InetSocketAddress[1];
            favoredNodes[0] = favoredNode;
            builder.withFavoredNodes(favoredNodes);
        }

        return builder.build();
    } catch (Exception e) {
        throw new IOException(e);
    }
}