org.apache.hadoop.hbase.io.hfile.CacheConfig Java Examples

The following examples show how to use org.apache.hadoop.hbase.io.hfile.CacheConfig. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: RegionCoprocessorHost.java    From hbase with Apache License 2.0 6 votes vote down vote up
/**
 * @param fs fileystem to read from
 * @param p path to the file
 * @param in {@link FSDataInputStreamWrapper}
 * @param size Full size of the file
 * @param cacheConf
 * @param r original reference file. This will be not null only when reading a split file.
 * @return a Reader instance to use instead of the base reader if overriding
 * default behavior, null otherwise
 * @throws IOException
 */
public StoreFileReader preStoreFileReaderOpen(final FileSystem fs, final Path p,
    final FSDataInputStreamWrapper in, final long size, final CacheConfig cacheConf,
    final Reference r) throws IOException {
  if (coprocEnvironments.isEmpty()) {
    return null;
  }
  return execOperationWithResult(
      new ObserverOperationWithResult<RegionObserver, StoreFileReader>(regionObserverGetter, null) {
        @Override
        public StoreFileReader call(RegionObserver observer) throws IOException {
          return observer.preStoreFileReaderOpen(this, fs, p, in, size, cacheConf, r,
              getResult());
        }
      });
}
 
Example #2
Source File: IndexHalfStoreFileReader.java    From phoenix with Apache License 2.0 6 votes vote down vote up
/**
 * @param fs
 * @param p
 * @param cacheConf
 * @param in
 * @param size
 * @param r
 * @param conf
 * @param indexMaintainers
 * @param viewConstants
 * @param regionInfo
 * @param regionStartKeyInHFile
 * @param splitKey
 * @throws IOException
 */
public IndexHalfStoreFileReader(final FileSystem fs, final Path p, final CacheConfig cacheConf,
        final FSDataInputStreamWrapper in, long size, final Reference r,
        final Configuration conf,
        final Map<ImmutableBytesWritable, IndexMaintainer> indexMaintainers,
        final byte[][] viewConstants, final RegionInfo regionInfo,
        byte[] regionStartKeyInHFile, byte[] splitKey, boolean primaryReplicaStoreFile,
        AtomicInteger refCount, RegionInfo currentRegion) throws IOException {
    super(fs, p, in, size, cacheConf, primaryReplicaStoreFile, refCount, false,
            conf);
    this.splitkey = splitKey == null ? r.getSplitKey() : splitKey;
    // Is it top or bottom half?
    this.top = Reference.isTopFileRegion(r.getFileRegion());
    this.splitRow = CellUtil.cloneRow(new KeyValue.KeyOnlyKeyValue(splitkey));
    this.indexMaintainers = indexMaintainers;
    this.viewConstants = viewConstants;
    this.childRegionInfo = regionInfo;
    this.regionStartKeyInHFile = regionStartKeyInHFile;
    this.offset = regionStartKeyInHFile.length;
    this.refCount = refCount;
    this.currentRegion = currentRegion;
}
 
Example #3
Source File: TestHStoreFile.java    From hbase with Apache License 2.0 6 votes vote down vote up
/**
 * Check if data block encoding information is saved correctly in HFile's file info.
 */
@Test
public void testDataBlockEncodingMetaData() throws IOException {
  // Make up a directory hierarchy that has a regiondir ("7e0102") and familyname.
  Path dir = new Path(new Path(testDir, "7e0102"), "familyname");
  Path path = new Path(dir, "1234567890");

  DataBlockEncoding dataBlockEncoderAlgo = DataBlockEncoding.FAST_DIFF;
  cacheConf = new CacheConfig(conf);
  HFileContext meta =
    new HFileContextBuilder().withBlockSize(BLOCKSIZE_SMALL).withChecksumType(CKTYPE)
      .withBytesPerCheckSum(CKBYTES).withDataBlockEncoding(dataBlockEncoderAlgo).build();
  // Make a store file and write data to it.
  StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs)
    .withFilePath(path).withMaxKeyCount(2000).withFileContext(meta).build();
  writer.close();

  HStoreFile storeFile =
    new HStoreFile(fs, writer.getPath(), conf, cacheConf, BloomType.NONE, true);
  storeFile.initReader();
  StoreFileReader reader = storeFile.getReader();

  Map<byte[], byte[]> fileInfo = reader.loadFileInfo();
  byte[] value = fileInfo.get(HFileDataBlockEncoder.DATA_BLOCK_ENCODING);
  assertArrayEquals(dataBlockEncoderAlgo.getNameInBytes(), value);
}
 
Example #4
Source File: RangerAuthorizationCoprocessor.java    From ranger with Apache License 2.0 6 votes vote down vote up
@Override
public StoreFileReader preStoreFileReaderOpen(ObserverContext<RegionCoprocessorEnvironment> ctx, FileSystem fs, Path p, FSDataInputStreamWrapper in, long size,
											  CacheConfig cacheConf, Reference r, StoreFileReader reader) throws IOException {
	final StoreFileReader ret;

	if(LOG.isDebugEnabled()) {
		LOG.debug("==> RangerAuthorizationCoprocessor.preStoreFileReaderOpen()");
	}

	try {
		activatePluginClassLoader();
		ret = implRegionObserver.preStoreFileReaderOpen(ctx, fs, p, in, size, cacheConf, r, reader);
	} finally {
		deactivatePluginClassLoader();
	}

	if(LOG.isDebugEnabled()) {
		LOG.debug("<== RangerAuthorizationCoprocessor.preStoreFileReaderOpen()");
	}

	return ret;
}
 
Example #5
Source File: IndexHalfStoreFileReader.java    From phoenix with Apache License 2.0 6 votes vote down vote up
/**
 * @param fs
 * @param p
 * @param cacheConf
 * @param in
 * @param size
 * @param r
 * @param conf
 * @param indexMaintainers
 * @param viewConstants
 * @param regionInfo
 * @param regionStartKeyInHFile
 * @param splitKey
 * @throws IOException
 */
public IndexHalfStoreFileReader(final FileSystem fs, final Path p, final CacheConfig cacheConf,
        final FSDataInputStreamWrapper in, long size, final Reference r,
        final Configuration conf,
        final Map<ImmutableBytesWritable, IndexMaintainer> indexMaintainers,
        final byte[][] viewConstants, final HRegionInfo regionInfo,
        byte[] regionStartKeyInHFile, byte[] splitKey) throws IOException {
    super(fs, p, in, size, cacheConf, conf);
    this.splitkey = splitKey == null ? r.getSplitKey() : splitKey;
    // Is it top or bottom half?
    this.top = Reference.isTopFileRegion(r.getFileRegion());
    this.splitRow = CellUtil.cloneRow(KeyValue.createKeyValueFromKey(splitkey));
    this.indexMaintainers = indexMaintainers;
    this.viewConstants = viewConstants;
    this.regionInfo = regionInfo;
    this.regionStartKeyInHFile = regionStartKeyInHFile;
    this.offset = regionStartKeyInHFile.length;
}
 
Example #6
Source File: IndexHalfStoreFileReader.java    From phoenix with Apache License 2.0 6 votes vote down vote up
/**
 * @param fs
 * @param p
 * @param cacheConf
 * @param r
 * @param conf
 * @param indexMaintainers
 * @param viewConstants
 * @param regionInfo
 * @param regionStartKeyInHFile
 * @param splitKey
 * @throws IOException
 */
public IndexHalfStoreFileReader(final FileSystem fs, final Path p, final CacheConfig cacheConf,
        final Reference r, final Configuration conf,
        final Map<ImmutableBytesWritable, IndexMaintainer> indexMaintainers,
        final byte[][] viewConstants, final HRegionInfo regionInfo,
        final byte[] regionStartKeyInHFile, byte[] splitKey) throws IOException {
    super(fs, p, cacheConf, conf);
    this.splitkey = splitKey == null ? r.getSplitKey() : splitKey;
    // Is it top or bottom half?
    this.top = Reference.isTopFileRegion(r.getFileRegion());
    this.splitRow = CellUtil.cloneRow(KeyValue.createKeyValueFromKey(splitkey));
    this.indexMaintainers = indexMaintainers;
    this.viewConstants = viewConstants;
    this.regionInfo = regionInfo;
    this.regionStartKeyInHFile = regionStartKeyInHFile;
    this.offset = regionStartKeyInHFile.length;
}
 
Example #7
Source File: TestRegionObserverInterface.java    From hbase with Apache License 2.0 6 votes vote down vote up
private static void createHFile(Configuration conf, FileSystem fs, Path path, byte[] family,
    byte[] qualifier) throws IOException {
  HFileContext context = new HFileContextBuilder().build();
  HFile.Writer writer = HFile.getWriterFactory(conf, new CacheConfig(conf)).withPath(fs, path)
      .withFileContext(context).create();
  long now = System.currentTimeMillis();
  try {
    for (int i = 1; i <= 9; i++) {
      KeyValue kv =
          new KeyValue(Bytes.toBytes(i + ""), family, qualifier, now, Bytes.toBytes(i + ""));
      writer.append(kv);
    }
  } finally {
    writer.close();
  }
}
 
Example #8
Source File: RegionCoprocessorHost.java    From hbase with Apache License 2.0 6 votes vote down vote up
/**
 * @param fs fileystem to read from
 * @param p path to the file
 * @param in {@link FSDataInputStreamWrapper}
 * @param size Full size of the file
 * @param cacheConf
 * @param r original reference file. This will be not null only when reading a split file.
 * @param reader the base reader instance
 * @return The reader to use
 * @throws IOException
 */
public StoreFileReader postStoreFileReaderOpen(final FileSystem fs, final Path p,
    final FSDataInputStreamWrapper in, final long size, final CacheConfig cacheConf,
    final Reference r, final StoreFileReader reader) throws IOException {
  if (this.coprocEnvironments.isEmpty()) {
    return reader;
  }
  return execOperationWithResult(
      new ObserverOperationWithResult<RegionObserver, StoreFileReader>(regionObserverGetter, reader) {
        @Override
        public StoreFileReader call(RegionObserver observer) throws IOException {
          return observer.postStoreFileReaderOpen(this, fs, p, in, size, cacheConf, r,
              getResult());
        }
      });
}
 
Example #9
Source File: TestAccessController.java    From hbase with Apache License 2.0 6 votes vote down vote up
private void createHFile(Path path,
    byte[] family, byte[] qualifier,
    byte[] startKey, byte[] endKey, int numRows) throws IOException {
  HFile.Writer writer = null;
  long now = System.currentTimeMillis();
  try {
    HFileContext context = new HFileContextBuilder().build();
    writer = HFile.getWriterFactory(conf, new CacheConfig(conf)).withPath(fs, path)
        .withFileContext(context).create();
    // subtract 2 since numRows doesn't include boundary keys
    for (byte[] key : Bytes.iterateOnSplits(startKey, endKey, true, numRows - 2)) {
      KeyValue kv = new KeyValue(key, family, qualifier, now, key);
      writer.append(kv);
    }
  } finally {
    if (writer != null) {
      writer.close();
    }
  }
}
 
Example #10
Source File: OnlineOfflineStateModelFactory.java    From terrapin with Apache License 2.0 6 votes vote down vote up
@Transition(from = "OFFLINE", to = "ONLINE")
public void onBecomeOnlineFromOffline(Message message,
                                      NotificationContext context) {
  Pair<String, String> hdfsPathAndPartition = getHdfsPathAndPartitionNum(message);
  String hdfsPath = hdfsPathAndPartition.getLeft();
  LOG.info("Opening " + hdfsPath);
  try {
    // TODO(varun): Maybe retry here.
    HColumnDescriptor family = new HColumnDescriptor(Constants.HFILE_COLUMN_FAMILY);
    family.setBlockCacheEnabled(isBlockCacheEnabled);
    Reader r = readerFactory.createHFileReader(hdfsPath, new CacheConfig(conf, family));
    resourcePartitionMap.addReader(
        message.getResourceName(), hdfsPathAndPartition.getRight(), r);
  } catch (IOException e) {
    throw new RuntimeException(e);
  }
}
 
Example #11
Source File: TestMobStoreCompaction.java    From hbase with Apache License 2.0 6 votes vote down vote up
private long countMobCellsInMetadata() throws IOException {
  long mobCellsCount = 0;
  Path mobDirPath = MobUtils.getMobFamilyPath(conf, tableDescriptor.getTableName(),
    familyDescriptor.getNameAsString());
  Configuration copyOfConf = new Configuration(conf);
  copyOfConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0f);
  CacheConfig cacheConfig = new CacheConfig(copyOfConf);
  if (fs.exists(mobDirPath)) {
    FileStatus[] files = UTIL.getTestFileSystem().listStatus(mobDirPath);
    for (FileStatus file : files) {
      HStoreFile sf = new HStoreFile(fs, file.getPath(), conf, cacheConfig, BloomType.NONE, true);
      sf.initReader();
      Map<byte[], byte[]> fileInfo = sf.getReader().loadFileInfo();
      byte[] count = fileInfo.get(MOB_CELLS_COUNT);
      assertTrue(count != null);
      mobCellsCount += Bytes.toLong(count);
    }
  }
  return mobCellsCount;
}
 
Example #12
Source File: HFileOutputFormat.java    From terrapin with Apache License 2.0 6 votes vote down vote up
public RecordWriter<BytesWritable, BytesWritable> getRecordWriter(
        TaskAttemptContext context) throws IOException {
  // Get the path of the temporary output file
  final Path outputPath = FileOutputFormat.getOutputPath(context);
  final Path outputDir = new FileOutputCommitter(outputPath, context).getWorkPath();
  final Configuration conf = context.getConfiguration();
  final FileSystem fs = outputDir.getFileSystem(conf);

  int blockSize = conf.getInt(Constants.HFILE_BLOCKSIZE, 16384);
  // Default to snappy.
  Compression.Algorithm compressionAlgorithm = getAlgorithm(
      conf.get(Constants.HFILE_COMPRESSION));
  final StoreFile.Writer writer =
      new StoreFile.WriterBuilder(conf, new CacheConfig(conf), fs, blockSize)
          .withFilePath(hfilePath(outputPath, context.getTaskAttemptID().getTaskID().getId()))
          .withCompression(compressionAlgorithm)
          .build();
  return new HFileRecordWriter(writer);
}
 
Example #13
Source File: BloomFilterFactory.java    From hbase with Apache License 2.0 6 votes vote down vote up
/**
 * Creates a new Delete Family Bloom filter at the time of
 * {@link org.apache.hadoop.hbase.regionserver.HStoreFile} writing.
 * @param conf
 * @param cacheConf
 * @param maxKeys an estimate of the number of keys we expect to insert.
 *        Irrelevant if compound Bloom filters are enabled.
 * @param writer the HFile writer
 * @return the new Bloom filter, or null in case Bloom filters are disabled
 *         or when failed to create one.
 */
public static BloomFilterWriter createDeleteBloomAtWrite(Configuration conf,
    CacheConfig cacheConf, int maxKeys, HFile.Writer writer) {
  if (!isDeleteFamilyBloomEnabled(conf)) {
    LOG.info("Delete Bloom filters are disabled by configuration for "
        + writer.getPath()
        + (conf == null ? " (configuration is null)" : ""));
    return null;
  }

  float err = getErrorRate(conf);

  int maxFold = getMaxFold(conf);
  // In case of compound Bloom filters we ignore the maxKeys hint.
  CompoundBloomFilterWriter bloomWriter = new CompoundBloomFilterWriter(getBloomBlockSize(conf),
      err, Hash.getHashType(conf), maxFold, cacheConf.shouldCacheBloomsOnWrite(),
      null, BloomType.ROW);
  writer.addInlineBlockWriter(bloomWriter);
  return bloomWriter;
}
 
Example #14
Source File: TestHStore.java    From hbase with Apache License 2.0 6 votes vote down vote up
private void addStoreFile() throws IOException {
  HStoreFile f = this.store.getStorefiles().iterator().next();
  Path storedir = f.getPath().getParent();
  long seqid = this.store.getMaxSequenceId().orElse(0L);
  Configuration c = TEST_UTIL.getConfiguration();
  FileSystem fs = FileSystem.get(c);
  HFileContext fileContext = new HFileContextBuilder().withBlockSize(BLOCKSIZE_SMALL).build();
  StoreFileWriter w = new StoreFileWriter.Builder(c, new CacheConfig(c),
      fs)
          .withOutputDir(storedir)
          .withFileContext(fileContext)
          .build();
  w.appendMetadata(seqid + 1, false);
  w.close();
  LOG.info("Added store file:" + w.getPath());
}
 
Example #15
Source File: SimpleRegionObserver.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Override
public StoreFileReader preStoreFileReaderOpen(ObserverContext<RegionCoprocessorEnvironment> ctx,
    FileSystem fs, Path p, FSDataInputStreamWrapper in, long size, CacheConfig cacheConf,
    Reference r, StoreFileReader reader) throws IOException {
  ctPreStoreFileReaderOpen.incrementAndGet();
  return null;
}
 
Example #16
Source File: StoreFileInfo.java    From hbase with Apache License 2.0 5 votes vote down vote up
StoreFileReader postStoreFileReaderOpen(ReaderContext context, CacheConfig cacheConf,
    StoreFileReader reader) throws IOException {
  StoreFileReader res = reader;
  if (this.coprocessorHost != null) {
    res = this.coprocessorHost.postStoreFileReaderOpen(fs, this.getPath(),
      context.getInputStreamWrapper(), context.getFileSize(), cacheConf, reference, reader);
  }
  return res;
}
 
Example #17
Source File: TestMobStoreCompaction.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * Create an HFile with the given number of bytes
 */
private void createHFile(Path path, int rowIdx, byte[] dummyData) throws IOException {
  HFileContext meta = new HFileContextBuilder().build();
  HFile.Writer writer = HFile.getWriterFactory(conf, new CacheConfig(conf)).withPath(fs, path)
      .withFileContext(meta).create();
  long now = System.currentTimeMillis();
  try {
    KeyValue kv = new KeyValue(Bytes.add(STARTROW, Bytes.toBytes(rowIdx)), COLUMN_FAMILY,
        Bytes.toBytes("colX"), now, dummyData);
    writer.append(kv);
  } finally {
    writer.appendFileInfo(BULKLOAD_TIME_KEY, Bytes.toBytes(System.currentTimeMillis()));
    writer.close();
  }
}
 
Example #18
Source File: TestEncryptionRandomKeying.java    From hbase with Apache License 2.0 5 votes vote down vote up
private static byte[] extractHFileKey(Path path) throws Exception {
  HFile.Reader reader = HFile.createReader(TEST_UTIL.getTestFileSystem(), path,
    new CacheConfig(conf), true, conf);
  try {
    Encryption.Context cryptoContext = reader.getFileContext().getEncryptionContext();
    assertNotNull("Reader has a null crypto context", cryptoContext);
    Key key = cryptoContext.getKey();
    if (key == null) {
      return null;
    }
    return key.getEncoded();
  } finally {
    reader.close();
  }
}
 
Example #19
Source File: TestBulkLoadHFiles.java    From hbase with Apache License 2.0 5 votes vote down vote up
private int verifyHFile(Path p) throws IOException {
  Configuration conf = util.getConfiguration();
  HFile.Reader reader =
    HFile.createReader(p.getFileSystem(conf), p, new CacheConfig(conf), true, conf);
  HFileScanner scanner = reader.getScanner(false, false);
  scanner.seekTo();
  int count = 0;
  do {
    count++;
  } while (scanner.next());
  assertTrue(count > 0);
  reader.close();
  return count;
}
 
Example #20
Source File: TestBlockEvictionFromClient.java    From hbase with Apache License 2.0 5 votes vote down vote up
private BlockCache setCacheProperties(HRegion region) {
  Iterator<HStore> strItr = region.getStores().iterator();
  BlockCache cache = null;
  while (strItr.hasNext()) {
    HStore store = strItr.next();
    CacheConfig cacheConf = store.getCacheConfig();
    cacheConf.setCacheDataOnWrite(true);
    cacheConf.setEvictOnClose(true);
    // Use the last one
    cache = cacheConf.getBlockCache().get();
  }
  return cache;
}
 
Example #21
Source File: TestCacheOnWriteInSchema.java    From hbase with Apache License 2.0 5 votes vote down vote up
private void readStoreFile(Path path) throws IOException {
  CacheConfig cacheConf = store.getCacheConfig();
  BlockCache cache = cacheConf.getBlockCache().get();
  HStoreFile sf = new HStoreFile(fs, path, conf, cacheConf, BloomType.ROWCOL, true);
  sf.initReader();
  HFile.Reader reader = sf.getReader().getHFileReader();
  try {
    // Open a scanner with (on read) caching disabled
    HFileScanner scanner = reader.getScanner(false, false);
    assertTrue(testDescription, scanner.seekTo());
    // Cribbed from io.hfile.TestCacheOnWrite
    long offset = 0;
    while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
      // Flags: don't cache the block, use pread, this is not a compaction.
      // Also, pass null for expected block type to avoid checking it.
      HFileBlock block = reader.readBlock(offset, -1, false, true,
        false, true, null, DataBlockEncoding.NONE);
      BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(),
        offset);
      boolean isCached = cache.getBlock(blockCacheKey, true, false, true) != null;
      boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType());
      final BlockType blockType = block.getBlockType();

      if (shouldBeCached != isCached &&
          (cowType.blockType1.equals(blockType) || cowType.blockType2.equals(blockType))) {
        throw new AssertionError(
          "shouldBeCached: " + shouldBeCached+ "\n" +
          "isCached: " + isCached + "\n" +
          "Test description: " + testDescription + "\n" +
          "block: " + block + "\n" +
          "blockCacheKey: " + blockCacheKey);
      }
      offset += block.getOnDiskSizeWithHeader();
    }
  } finally {
    reader.close();
  }
}
 
Example #22
Source File: StoreFileInfo.java    From hbase with Apache License 2.0 5 votes vote down vote up
StoreFileReader preStoreFileReaderOpen(ReaderContext context, CacheConfig cacheConf)
    throws IOException {
  StoreFileReader reader = null;
  if (this.coprocessorHost != null) {
    reader = this.coprocessorHost.preStoreFileReaderOpen(fs, this.getPath(),
      context.getInputStreamWrapper(), context.getFileSize(), cacheConf, reference);
  }
  return reader;
}
 
Example #23
Source File: HalfStoreFileReader.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * Creates a half file reader for a hfile referred to by an hfilelink.
 * @param context Reader context info
 * @param fileInfo HFile info
 * @param cacheConf CacheConfig
 * @param r original reference file (contains top or bottom)
 * @param refCount reference count
 * @param conf Configuration
 */
public HalfStoreFileReader(final ReaderContext context, final HFileInfo fileInfo,
    final CacheConfig cacheConf, final Reference r,
    AtomicInteger refCount, final Configuration conf) throws IOException {
  super(context, fileInfo, cacheConf, refCount, conf);
  // This is not actual midkey for this half-file; its just border
  // around which we split top and bottom.  Have to look in files to find
  // actual last and first keys for bottom and top halves.  Half-files don't
  // have an actual midkey themselves. No midkey is how we indicate file is
  // not splittable.
  this.splitkey = r.getSplitKey();
  this.splitCell = new KeyValue.KeyOnlyKeyValue(this.splitkey, 0, this.splitkey.length);
  // Is it top or bottom half?
  this.top = Reference.isTopFileRegion(r.getFileRegion());
}
 
Example #24
Source File: SimpleRegionObserver.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Override
public StoreFileReader postStoreFileReaderOpen(ObserverContext<RegionCoprocessorEnvironment> ctx,
    FileSystem fs, Path p, FSDataInputStreamWrapper in, long size, CacheConfig cacheConf,
    Reference r, StoreFileReader reader) throws IOException {
  ctPostStoreFileReaderOpen.incrementAndGet();
  return reader;
}
 
Example #25
Source File: TestHalfStoreFileReader.java    From hbase with Apache License 2.0 5 votes vote down vote up
private Cell doTestOfSeekBefore(Path p, FileSystem fs, Reference bottom, Cell seekBefore,
    CacheConfig cacheConfig) throws IOException {
  ReaderContext context = new ReaderContextBuilder().withFileSystemAndPath(fs, p).build();
  HFileInfo fileInfo = new HFileInfo(context, TEST_UTIL.getConfiguration());
  final HalfStoreFileReader halfreader = new HalfStoreFileReader(context, fileInfo, cacheConfig,
      bottom, new AtomicInteger(0), TEST_UTIL.getConfiguration());
  fileInfo.initMetaAndIndex(halfreader.getHFileReader());
  halfreader.loadFileInfo();
  final HFileScanner scanner = halfreader.getScanner(false, false);
  scanner.seekBefore(seekBefore);
  return scanner.getCell();
}
 
Example #26
Source File: HFileCorruptionChecker.java    From hbase-operator-tools with Apache License 2.0 5 votes vote down vote up
public HFileCorruptionChecker(Configuration conf, ExecutorService executor,
    boolean quarantine) throws IOException {
  this.conf = conf;
  this.fs = FileSystem.get(conf);
  this.cacheConf = CacheConfig.DISABLED;
  this.executor = executor;
  this.inQuarantineMode = quarantine;
}
 
Example #27
Source File: TestHStore.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * Verify that compression and data block encoding are respected by the
 * Store.createWriterInTmp() method, used on store flush.
 */
@Test
public void testCreateWriter() throws Exception {
  Configuration conf = HBaseConfiguration.create();
  FileSystem fs = FileSystem.get(conf);

  ColumnFamilyDescriptor hcd = ColumnFamilyDescriptorBuilder.newBuilder(family)
      .setCompressionType(Compression.Algorithm.GZ).setDataBlockEncoding(DataBlockEncoding.DIFF)
      .build();
  init(name.getMethodName(), conf, hcd);

  // Test createWriterInTmp()
  StoreFileWriter writer =
      store.createWriterInTmp(4, hcd.getCompressionType(), false, true, false, false);
  Path path = writer.getPath();
  writer.append(new KeyValue(row, family, qf1, Bytes.toBytes(1)));
  writer.append(new KeyValue(row, family, qf2, Bytes.toBytes(2)));
  writer.append(new KeyValue(row2, family, qf1, Bytes.toBytes(3)));
  writer.append(new KeyValue(row2, family, qf2, Bytes.toBytes(4)));
  writer.close();

  // Verify that compression and encoding settings are respected
  HFile.Reader reader = HFile.createReader(fs, path, new CacheConfig(conf), true, conf);
  assertEquals(hcd.getCompressionType(), reader.getTrailer().getCompressionCodec());
  assertEquals(hcd.getDataBlockEncoding(), reader.getDataBlockEncoding());
  reader.close();
}
 
Example #28
Source File: TestAtomicOperation.java    From hbase with Apache License 2.0 5 votes vote down vote up
@After
public void teardown() throws IOException {
  if (region != null) {
    CacheConfig cacheConfig = region.getStores().get(0).getCacheConfig();
    region.close();
    WAL wal = region.getWAL();
    if (wal != null) {
      wal.close();
    }
    cacheConfig.getBlockCache().ifPresent(BlockCache::shutdown);
    region = null;
  }
}
 
Example #29
Source File: HFileCorruptionChecker.java    From hbase with Apache License 2.0 5 votes vote down vote up
public HFileCorruptionChecker(Configuration conf, ExecutorService executor,
    boolean quarantine) throws IOException {
  this.conf = conf;
  this.fs = FileSystem.get(conf);
  this.cacheConf = CacheConfig.DISABLED;
  this.executor = executor;
  this.inQuarantineMode = quarantine;
}
 
Example #30
Source File: TestHStore.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * Test for hbase-1686.
 * @throws IOException
 */
@Test
public void testEmptyStoreFile() throws IOException {
  init(this.name.getMethodName());
  // Write a store file.
  this.store.add(new KeyValue(row, family, qf1, 1, (byte[])null), null);
  this.store.add(new KeyValue(row, family, qf2, 1, (byte[])null), null);
  flush(1);
  // Now put in place an empty store file.  Its a little tricky.  Have to
  // do manually with hacked in sequence id.
  HStoreFile f = this.store.getStorefiles().iterator().next();
  Path storedir = f.getPath().getParent();
  long seqid = f.getMaxSequenceId();
  Configuration c = HBaseConfiguration.create();
  FileSystem fs = FileSystem.get(c);
  HFileContext meta = new HFileContextBuilder().withBlockSize(BLOCKSIZE_SMALL).build();
  StoreFileWriter w = new StoreFileWriter.Builder(c, new CacheConfig(c),
      fs)
          .withOutputDir(storedir)
          .withFileContext(meta)
          .build();
  w.appendMetadata(seqid + 1, false);
  w.close();
  this.store.close();
  // Reopen it... should pick up two files
  this.store =
      new HStore(this.store.getHRegion(), this.store.getColumnFamilyDescriptor(), c, false);
  assertEquals(2, this.store.getStorefilesCount());

  result = HBaseTestingUtility.getFromStoreFile(store,
      get.getRow(),
      qualifiers);
  assertEquals(1, result.size());
}