Java Code Examples for org.apache.hadoop.hbase.HConstants#DEFAULT_BLOCKSIZE

The following examples show how to use org.apache.hadoop.hbase.HConstants#DEFAULT_BLOCKSIZE . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestCompactionScanQueryMatcher.java    From hbase with Apache License 2.0 6 votes vote down vote up
private void testDropDeletes(byte[] from, byte[] to, byte[][] rows, MatchCode... expected)
    throws IOException {
  long now = EnvironmentEdgeManager.currentTime();
  // Set time to purge deletes to negative value to avoid it ever happening.
  ScanInfo scanInfo = new ScanInfo(this.conf, fam2, 0, 1, ttl, KeepDeletedCells.FALSE,
      HConstants.DEFAULT_BLOCKSIZE, -1L, rowComparator, false);

  CompactionScanQueryMatcher qm = CompactionScanQueryMatcher.create(scanInfo,
    ScanType.COMPACT_RETAIN_DELETES, Long.MAX_VALUE, HConstants.OLDEST_TIMESTAMP,
    HConstants.OLDEST_TIMESTAMP, now, from, to, null);
  List<ScanQueryMatcher.MatchCode> actual = new ArrayList<>(rows.length);
  byte[] prevRow = null;
  for (byte[] row : rows) {
    if (prevRow == null || !Bytes.equals(prevRow, row)) {
      qm.setToNewRow(KeyValueUtil.createFirstOnRow(row));
      prevRow = row;
    }
    actual.add(qm.match(new KeyValue(row, fam2, null, now, Type.Delete)));
  }

  assertEquals(expected.length, actual.size());
  for (int i = 0; i < expected.length; i++) {
    LOG.debug("expected " + expected[i] + ", actual " + actual.get(i));
    assertEquals(expected[i], actual.get(i));
  }
}
 
Example 2
Source File: TestStoreScanner.java    From hbase with Apache License 2.0 6 votes vote down vote up
/**
 * Ensure that expired delete family markers don't override valid puts
 */
@Test
public void testExpiredDeleteFamily() throws Exception {
  long now = System.currentTimeMillis();
  KeyValue[] kvs = new KeyValue[] {
    new KeyValue(Bytes.toBytes("R1"), Bytes.toBytes("cf"), null, now-1000,
      KeyValue.Type.DeleteFamily),
    create("R1", "cf", "a", now-10, KeyValue.Type.Put,
      "dont-care"),
  };
  List<KeyValueScanner> scanners = scanFixture(kvs);
  Scan scan = new Scan();
  scan.readVersions(1);
  // scanner with ttl equal to 500
  ScanInfo scanInfo = new ScanInfo(CONF, CF, 0, 1, 500, KeepDeletedCells.FALSE,
      HConstants.DEFAULT_BLOCKSIZE, 0, CellComparator.getInstance(), false);
  try (StoreScanner scanner = new StoreScanner(scan, scanInfo, null, scanners)) {
    List<Cell> results = new ArrayList<>();
    assertEquals(true, scanner.next(results));
    assertEquals(1, results.size());
    assertEquals(kvs[1], results.get(0));
    results.clear();

    assertEquals(false, scanner.next(results));
  }
}
 
Example 3
Source File: TestStoreScanner.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Test
public void testReadVersionWithRawAndFilter() throws IOException {
  ScanInfo scanInfo = new ScanInfo(CONF, CF, 0, 1, Long.MAX_VALUE,
          KeepDeletedCells.FALSE, HConstants.DEFAULT_BLOCKSIZE, 0
          , CellComparator.getInstance(), false);
  KeyValue [] kvs = new KeyValue[] {
          create("R1", "cf", "a", 3, KeyValue.Type.Put, "dont-care"),
          create("R1", "cf", "a", 2, KeyValue.Type.Put, "dont-care"),
          create("R1", "cf", "a", 1, KeyValue.Type.Put, "dont-care")
  };
  List<KeyValueScanner> scanners = Arrays.asList(
    new KeyValueScanner[]{
      new KeyValueScanFixture(CellComparator.getInstance(), kvs)
    });

  BinaryComparator comp = new BinaryComparator(Bytes.toBytes("a"));
  Filter filter = new QualifierFilter(CompareOperator.EQUAL, comp);
  Scan scanSpec = new Scan().withStartRow(Bytes.toBytes("R1")).readVersions(2).setRaw(true);
  scanSpec.setFilter(filter);
  try (StoreScanner scan = new StoreScanner(scanSpec, scanInfo, null, scanners)) {
    List<Cell> results = new ArrayList<>();
    assertEquals(true, scan.next(results));
    assertEquals(2, results.size());
  }
}
 
Example 4
Source File: FSUtils.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * Check if short circuit read buffer size is set and if not, set it to hbase value.
 * @param conf
 */
public static void checkShortCircuitReadBufferSize(final Configuration conf) {
  final int defaultSize = HConstants.DEFAULT_BLOCKSIZE * 2;
  final int notSet = -1;
  // DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_BUFFER_SIZE_KEY is only defined in h2
  final String dfsKey = "dfs.client.read.shortcircuit.buffer.size";
  int size = conf.getInt(dfsKey, notSet);
  // If a size is set, return -- we will use it.
  if (size != notSet) return;
  // But short circuit buffer size is normally not set.  Put in place the hbase wanted size.
  int hbaseSize = conf.getInt("hbase." + dfsKey, defaultSize);
  conf.setIfUnset(dfsKey, Integer.toString(hbaseSize));
}
 
Example 5
Source File: TestStoreScanner.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Test
public void testWildCardTtlScan() throws IOException {
  long now = System.currentTimeMillis();
  KeyValue [] kvs = new KeyValue[] {
      create("R1", "cf", "a", now-1000, KeyValue.Type.Put, "dont-care"),
      create("R1", "cf", "b", now-10, KeyValue.Type.Put, "dont-care"),
      create("R1", "cf", "c", now-200, KeyValue.Type.Put, "dont-care"),
      create("R1", "cf", "d", now-10000, KeyValue.Type.Put, "dont-care"),
      create("R2", "cf", "a", now, KeyValue.Type.Put, "dont-care"),
      create("R2", "cf", "b", now-10, KeyValue.Type.Put, "dont-care"),
      create("R2", "cf", "c", now-200, KeyValue.Type.Put, "dont-care"),
      create("R2", "cf", "c", now-1000, KeyValue.Type.Put, "dont-care")
  };
  List<KeyValueScanner> scanners = scanFixture(kvs);
  Scan scan = new Scan();
  scan.readVersions(1);
  ScanInfo scanInfo = new ScanInfo(CONF, CF, 0, 1, 500, KeepDeletedCells.FALSE,
      HConstants.DEFAULT_BLOCKSIZE, 0, CellComparator.getInstance(), false);
  try (StoreScanner scanner = new StoreScanner(scan, scanInfo, null, scanners)) {
    List<Cell> results = new ArrayList<>();
    assertEquals(true, scanner.next(results));
    assertEquals(2, results.size());
    assertEquals(kvs[1], results.get(0));
    assertEquals(kvs[2], results.get(1));
    results.clear();

    assertEquals(true, scanner.next(results));
    assertEquals(3, results.size());
    assertEquals(kvs[4], results.get(0));
    assertEquals(kvs[5], results.get(1));
    assertEquals(kvs[6], results.get(2));
    results.clear();

    assertEquals(false, scanner.next(results));
  }
}
 
Example 6
Source File: TestStoreScanner.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Test
public void testPreadNotEnabledForCompactionStoreScanners() throws Exception {
  long now = System.currentTimeMillis();
  KeyValue[] kvs = new KeyValue[] {
    new KeyValue(Bytes.toBytes("R1"), Bytes.toBytes("cf"), null, now - 1000,
      KeyValue.Type.DeleteFamily),
    create("R1", "cf", "a", now - 10, KeyValue.Type.Put, "dont-care"), };
  List<KeyValueScanner> scanners = scanFixture(kvs);
  ScanInfo scanInfo = new ScanInfo(CONF, CF, 0, 1, 500, KeepDeletedCells.FALSE,
      HConstants.DEFAULT_BLOCKSIZE, 0, CellComparator.getInstance(), false);
  try (StoreScanner storeScanner = new StoreScanner(scanInfo, -1,
    ScanType.COMPACT_RETAIN_DELETES, scanners)) {
    assertFalse(storeScanner.isScanUsePread());
  }
}
 
Example 7
Source File: CommonFSUtils.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * Check if short circuit read buffer size is set and if not, set it to hbase value.
 * @param conf must not be null
 */
public static void checkShortCircuitReadBufferSize(final Configuration conf) {
  final int defaultSize = HConstants.DEFAULT_BLOCKSIZE * 2;
  final int notSet = -1;
  // DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_BUFFER_SIZE_KEY is only defined in h2
  final String dfsKey = "dfs.client.read.shortcircuit.buffer.size";
  int size = conf.getInt(dfsKey, notSet);
  // If a size is set, return -- we will use it.
  if (size != notSet) {
    return;
  }
  // But short circuit buffer size is normally not set.  Put in place the hbase wanted size.
  int hbaseSize = conf.getInt("hbase." + dfsKey, defaultSize);
  conf.setIfUnset(dfsKey, Integer.toString(hbaseSize));
}
 
Example 8
Source File: TestDefaultMemStore.java    From hbase with Apache License 2.0 4 votes vote down vote up
/** Test getNextRow from memstore
 * @throws InterruptedException
 */
@Test
public void testGetNextRow() throws Exception {
  addRows(this.memstore);
  // Add more versions to make it a little more interesting.
  Thread.sleep(1);
  addRows(this.memstore);
  Cell closestToEmpty = ((DefaultMemStore) this.memstore).getNextRow(KeyValue.LOWESTKEY);
  assertTrue(CellComparatorImpl.COMPARATOR.compareRows(closestToEmpty,
      new KeyValue(Bytes.toBytes(0), System.currentTimeMillis())) == 0);
  for (int i = 0; i < ROW_COUNT; i++) {
    Cell nr = ((DefaultMemStore) this.memstore).getNextRow(new KeyValue(Bytes.toBytes(i),
        System.currentTimeMillis()));
    if (i + 1 == ROW_COUNT) {
      assertNull(nr);
    } else {
      assertTrue(CellComparatorImpl.COMPARATOR.compareRows(nr,
          new KeyValue(Bytes.toBytes(i + 1), System.currentTimeMillis())) == 0);
    }
  }
  //starting from each row, validate results should contain the starting row
  Configuration conf = HBaseConfiguration.create();
  for (int startRowId = 0; startRowId < ROW_COUNT; startRowId++) {
    ScanInfo scanInfo =
        new ScanInfo(conf, FAMILY, 0, 1, Integer.MAX_VALUE, KeepDeletedCells.FALSE,
            HConstants.DEFAULT_BLOCKSIZE, 0, this.memstore.getComparator(), false);
    try (InternalScanner scanner =
        new StoreScanner(new Scan().withStartRow(Bytes.toBytes(startRowId)), scanInfo, null,
            memstore.getScanners(0))) {
      List<Cell> results = new ArrayList<>();
      for (int i = 0; scanner.next(results); i++) {
        int rowId = startRowId + i;
        Cell left = results.get(0);
        byte[] row1 = Bytes.toBytes(rowId);
        assertTrue("Row name",
          CellComparatorImpl.COMPARATOR.compareRows(left, row1, 0, row1.length) == 0);
        assertEquals("Count of columns", QUALIFIER_COUNT, results.size());
        List<Cell> row = new ArrayList<>();
        for (Cell kv : results) {
          row.add(kv);
        }
        isExpectedRowWithoutTimestamps(rowId, row);
        // Clear out set. Otherwise row results accumulate.
        results.clear();
      }
    }
  }
}
 
Example 9
Source File: TestCompactingMemStore.java    From hbase with Apache License 2.0 4 votes vote down vote up
/** Test getNextRow from memstore
 * @throws InterruptedException
 */
@Override
@Test
public void testGetNextRow() throws Exception {
  addRows(this.memstore);
  // Add more versions to make it a little more interesting.
  Thread.sleep(1);
  addRows(this.memstore);
  Cell closestToEmpty = ((CompactingMemStore)this.memstore).getNextRow(KeyValue.LOWESTKEY);
  assertTrue(CellComparator.getInstance().compareRows(closestToEmpty,
      new KeyValue(Bytes.toBytes(0), System.currentTimeMillis())) == 0);
  for (int i = 0; i < ROW_COUNT; i++) {
    Cell nr = ((CompactingMemStore)this.memstore).getNextRow(new KeyValue(Bytes.toBytes(i),
        System.currentTimeMillis()));
    if (i + 1 == ROW_COUNT) {
      assertNull(nr);
    } else {
      assertTrue(CellComparator.getInstance().compareRows(nr,
          new KeyValue(Bytes.toBytes(i + 1), System.currentTimeMillis())) == 0);
    }
  }
  //starting from each row, validate results should contain the starting row
  Configuration conf = HBaseConfiguration.create();
  for (int startRowId = 0; startRowId < ROW_COUNT; startRowId++) {
    ScanInfo scanInfo = new ScanInfo(conf, FAMILY, 0, 1, Integer.MAX_VALUE,
        KeepDeletedCells.FALSE, HConstants.DEFAULT_BLOCKSIZE, 0, this.memstore.getComparator(), false);
    try (InternalScanner scanner =
        new StoreScanner(new Scan().withStartRow(Bytes.toBytes(startRowId)), scanInfo, null,
            memstore.getScanners(0))) {
      List<Cell> results = new ArrayList<>();
      for (int i = 0; scanner.next(results); i++) {
        int rowId = startRowId + i;
        Cell left = results.get(0);
        byte[] row1 = Bytes.toBytes(rowId);
        assertTrue("Row name",
          CellComparator.getInstance().compareRows(left, row1, 0, row1.length) == 0);
        assertEquals("Count of columns", QUALIFIER_COUNT, results.size());
        List<Cell> row = new ArrayList<>();
        for (Cell kv : results) {
          row.add(kv);
        }
        isExpectedRowWithoutTimestamps(rowId, row);
        // Clear out set. Otherwise row results accumulate.
        results.clear();
      }
    }
  }
}
 
Example 10
Source File: TestStoreScanner.java    From hbase with Apache License 2.0 4 votes vote down vote up
@Test
public void testDeleteMarkerLongevity() throws Exception {
  try {
    final long now = System.currentTimeMillis();
    EnvironmentEdgeManagerTestHelper.injectEdge(new EnvironmentEdge() {
      @Override
      public long currentTime() {
        return now;
      }
    });
    KeyValue[] kvs = new KeyValue[]{
      /*0*/ new KeyValue(Bytes.toBytes("R1"), Bytes.toBytes("cf"), null,
      now - 100, KeyValue.Type.DeleteFamily), // live
      /*1*/ new KeyValue(Bytes.toBytes("R1"), Bytes.toBytes("cf"), null,
      now - 1000, KeyValue.Type.DeleteFamily), // expired
      /*2*/ create("R1", "cf", "a", now - 50,
      KeyValue.Type.Put, "v3"), // live
      /*3*/ create("R1", "cf", "a", now - 55,
      KeyValue.Type.Delete, "dontcare"), // live
      /*4*/ create("R1", "cf", "a", now - 55,
      KeyValue.Type.Put, "deleted-version v2"), // deleted
      /*5*/ create("R1", "cf", "a", now - 60,
      KeyValue.Type.Put, "v1"), // live
      /*6*/ create("R1", "cf", "a", now - 65,
      KeyValue.Type.Put, "v0"), // max-version reached
      /*7*/ create("R1", "cf", "a",
      now - 100, KeyValue.Type.DeleteColumn, "dont-care"), // max-version
      /*8*/ create("R1", "cf", "b", now - 600,
      KeyValue.Type.DeleteColumn, "dont-care"), //expired
      /*9*/ create("R1", "cf", "b", now - 70,
      KeyValue.Type.Put, "v2"), //live
      /*10*/ create("R1", "cf", "b", now - 750,
      KeyValue.Type.Put, "v1"), //expired
      /*11*/ create("R1", "cf", "c", now - 500,
      KeyValue.Type.Delete, "dontcare"), //expired
      /*12*/ create("R1", "cf", "c", now - 600,
      KeyValue.Type.Put, "v1"), //expired
      /*13*/ create("R1", "cf", "c", now - 1000,
      KeyValue.Type.Delete, "dontcare"), //expired
      /*14*/ create("R1", "cf", "d", now - 60,
      KeyValue.Type.Put, "expired put"), //live
      /*15*/ create("R1", "cf", "d", now - 100,
      KeyValue.Type.Delete, "not-expired delete"), //live
    };
    List<KeyValueScanner> scanners = scanFixture(kvs);
    ScanInfo scanInfo = new ScanInfo(CONF, Bytes.toBytes("cf"),
      0 /* minVersions */,
      2 /* maxVersions */, 500 /* ttl */,
      KeepDeletedCells.FALSE /* keepDeletedCells */,
      HConstants.DEFAULT_BLOCKSIZE /* block size */,
      200, /* timeToPurgeDeletes */
      CellComparator.getInstance(), false);
    try (StoreScanner scanner =
        new StoreScanner(scanInfo, 2, ScanType.COMPACT_DROP_DELETES, scanners)) {
      List<Cell> results = new ArrayList<>();
      results = new ArrayList<>();
      assertEquals(true, scanner.next(results));
      assertEquals(kvs[0], results.get(0));
      assertEquals(kvs[2], results.get(1));
      assertEquals(kvs[3], results.get(2));
      assertEquals(kvs[5], results.get(3));
      assertEquals(kvs[9], results.get(4));
      assertEquals(kvs[14], results.get(5));
      assertEquals(kvs[15], results.get(6));
      assertEquals(7, results.size());
    }
  } finally {
    EnvironmentEdgeManagerTestHelper.reset();
  }
}
 
Example 11
Source File: TestReversibleScanners.java    From hbase with Apache License 2.0 4 votes vote down vote up
@Test
public void testReversibleStoreScanner() throws IOException {
  // write data to one memstore and two store files
  FileSystem fs = TEST_UTIL.getTestFileSystem();
  Path hfilePath = new Path(new Path(
      TEST_UTIL.getDataTestDir("testReversibleStoreScanner"), "regionname"),
      "familyname");
  CacheConfig cacheConf = new CacheConfig(TEST_UTIL.getConfiguration());
  HFileContextBuilder hcBuilder = new HFileContextBuilder();
  hcBuilder.withBlockSize(2 * 1024);
  HFileContext hFileContext = hcBuilder.build();
  StoreFileWriter writer1 = new StoreFileWriter.Builder(
      TEST_UTIL.getConfiguration(), cacheConf, fs).withOutputDir(
      hfilePath).withFileContext(hFileContext).build();
  StoreFileWriter writer2 = new StoreFileWriter.Builder(
      TEST_UTIL.getConfiguration(), cacheConf, fs).withOutputDir(
      hfilePath).withFileContext(hFileContext).build();

  MemStore memstore = new DefaultMemStore();
  writeMemstoreAndStoreFiles(memstore, new StoreFileWriter[] { writer1,
      writer2 });

  HStoreFile sf1 = new HStoreFile(fs, writer1.getPath(), TEST_UTIL.getConfiguration(), cacheConf,
      BloomType.NONE, true);

  HStoreFile sf2 = new HStoreFile(fs, writer2.getPath(), TEST_UTIL.getConfiguration(), cacheConf,
      BloomType.NONE, true);

  ScanInfo scanInfo =
      new ScanInfo(TEST_UTIL.getConfiguration(), FAMILYNAME, 0, Integer.MAX_VALUE, Long.MAX_VALUE,
          KeepDeletedCells.FALSE, HConstants.DEFAULT_BLOCKSIZE, 0, CellComparatorImpl.COMPARATOR, false);

  // Case 1.Test a full reversed scan
  Scan scan = new Scan();
  scan.setReversed(true);
  StoreScanner storeScanner =
      getReversibleStoreScanner(memstore, sf1, sf2, scan, scanInfo, MAXMVCC);
  verifyCountAndOrder(storeScanner, QUALSIZE * ROWSIZE, ROWSIZE, false);

  // Case 2.Test reversed scan with a specified start row
  int startRowNum = ROWSIZE / 2;
  byte[] startRow = ROWS[startRowNum];
  scan.withStartRow(startRow);
  storeScanner = getReversibleStoreScanner(memstore, sf1, sf2, scan, scanInfo, MAXMVCC);
  verifyCountAndOrder(storeScanner, QUALSIZE * (startRowNum + 1),
      startRowNum + 1, false);

  // Case 3.Test reversed scan with a specified start row and specified
  // qualifiers
  assertTrue(QUALSIZE > 2);
  scan.addColumn(FAMILYNAME, QUALS[0]);
  scan.addColumn(FAMILYNAME, QUALS[2]);
  storeScanner = getReversibleStoreScanner(memstore, sf1, sf2, scan, scanInfo, MAXMVCC);
  verifyCountAndOrder(storeScanner, 2 * (startRowNum + 1), startRowNum + 1,
      false);

  // Case 4.Test reversed scan with mvcc based on case 3
  for (int readPoint = 0; readPoint < MAXMVCC; readPoint++) {
    LOG.info("Setting read point to " + readPoint);
    storeScanner = getReversibleStoreScanner(memstore, sf1, sf2, scan, scanInfo, readPoint);
    int expectedRowCount = 0;
    int expectedKVCount = 0;
    for (int i = startRowNum; i >= 0; i--) {
      int kvCount = 0;
      if (makeMVCC(i, 0) <= readPoint) {
        kvCount++;
      }
      if (makeMVCC(i, 2) <= readPoint) {
        kvCount++;
      }
      if (kvCount > 0) {
        expectedRowCount++;
        expectedKVCount += kvCount;
      }
    }
    verifyCountAndOrder(storeScanner, expectedKVCount, expectedRowCount,
        false);
  }
}
 
Example 12
Source File: HFileGenerationFunction.java    From spliceengine with GNU Affero General Public License v3.0 4 votes vote down vote up
private StoreFileWriter getNewWriter(Configuration conf, BulkImportPartition partition)
        throws IOException {

    Compression.Algorithm compression = Compression.getCompressionAlgorithmByName(compressionAlgorithm);
    BloomType bloomType = BloomType.ROW;
    Integer blockSize = HConstants.DEFAULT_BLOCKSIZE;
    DataBlockEncoding encoding = DataBlockEncoding.NONE;
    Configuration tempConf = new Configuration(conf);
    tempConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f);
    HFileContextBuilder contextBuilder = new HFileContextBuilder()
            .withCompression(compression)
            .withChecksumType(HStore.getChecksumType(conf))
            .withBytesPerCheckSum(HStore.getBytesPerChecksum(conf))
            .withBlockSize(blockSize);

    if (HFile.getFormatVersion(conf) >= HFile.MIN_FORMAT_VERSION_WITH_TAGS) {
        contextBuilder.withIncludesTags(true);
    }

    contextBuilder.withDataBlockEncoding(encoding);
    HFileContext hFileContext = contextBuilder.build();
    try {
        Path familyPath = new Path(partition.getFilePath());
        // Get favored nodes as late as possible. This is the best we can do. If the region gets moved after this
        // point, locality is not guaranteed.
        InetSocketAddress favoredNode = getFavoredNode(partition);
        StoreFileWriter.Builder builder =
                new StoreFileWriter.Builder(conf, new CacheConfig(tempConf), new HFileSystem(fs))
                        .withOutputDir(familyPath).withBloomType(bloomType)
                        .withFileContext(hFileContext);

        if (favoredNode != null) {
            InetSocketAddress[] favoredNodes = new InetSocketAddress[1];
            favoredNodes[0] = favoredNode;
            builder.withFavoredNodes(favoredNodes);
        }

        return builder.build();
    } catch (Exception e) {
        throw new IOException(e);
    }
}