org.apache.hadoop.hbase.io.hfile.HFile Java Examples

The following examples show how to use org.apache.hadoop.hbase.io.hfile.HFile. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestBulkLoad.java    From hbase with Apache License 2.0 6 votes vote down vote up
private String createHFileForFamilies(byte[] family) throws IOException {
  HFile.WriterFactory hFileFactory = HFile.getWriterFactoryNoCache(conf);
  // TODO We need a way to do this without creating files
  File hFileLocation = testFolder.newFile();
  FSDataOutputStream out = new FSDataOutputStream(new FileOutputStream(hFileLocation), null);
  try {
    hFileFactory.withOutputStream(out);
    hFileFactory.withFileContext(new HFileContextBuilder().build());
    HFile.Writer writer = hFileFactory.create();
    try {
      writer.append(new KeyValue(ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY)
        .setRow(randomBytes)
        .setFamily(family)
        .setQualifier(randomBytes)
        .setTimestamp(0L)
        .setType(KeyValue.Type.Put.getCode())
        .setValue(randomBytes)
        .build()));
    } finally {
      writer.close();
    }
  } finally {
    out.close();
  }
  return hFileLocation.getAbsoluteFile().getAbsolutePath();
}
 
Example #2
Source File: HFileSortedOplog.java    From gemfirexd-oss with Apache License 2.0 6 votes vote down vote up
public HFileSortedOplogWriter() throws IOException {
      writer = HFile.getWriterFactory(hconf, hcache)
          .withPath(fs, path)
          .withBlockSize(sopConfig.getBlockSize())
          .withBytesPerChecksum(sopConfig.getBytesPerChecksum())
          .withChecksumType(HFileSortedOplogFactory.convertChecksum(sopConfig.getChecksum()))
//          .withComparator(sopConfig.getComparator())
          .withCompression(HFileSortedOplogFactory.convertCompression(sopConfig.getCompression()))
          .withDataBlockEncoder(HFileSortedOplogFactory.convertEncoding(sopConfig.getKeyEncoding()))
          .create();
      
      bfw = sopConfig.isBloomFilterEnabled() ?
//          BloomFilterFactory.createGeneralBloomAtWrite(hconf, hcache, BloomType.ROW,
//              0, writer, sopConfig.getComparator())
          BloomFilterFactory.createGeneralBloomAtWrite(hconf, hcache, BloomType.ROW,
              0, writer)
          : null;
    }
 
Example #3
Source File: HFileSortedOplog.java    From gemfirexd-oss with Apache License 2.0 6 votes vote down vote up
public HFileSortedOplogWriter(int keys) throws IOException {
      try {
        int hfileBlockSize = Integer.getInteger(
            HoplogConfig.HFILE_BLOCK_SIZE_CONF, (1 << 16));

        Algorithm compress = Algorithm.valueOf(System.getProperty(HoplogConfig.COMPRESSION,
            HoplogConfig.COMPRESSION_DEFAULT));

//        ByteComparator bc = new ByteComparator();
        writer = HFile.getWriterFactory(conf, cacheConf)
            .withPath(fsProvider.getFS(), path)
            .withBlockSize(hfileBlockSize)
//            .withComparator(bc)
            .withCompression(compress)
            .create();
        bfw = BloomFilterFactory.createGeneralBloomAtWrite(conf, cacheConf, BloomType.ROW, keys,
            writer);

        logger.fine("Created hoplog writer with compression " + compress);
      } catch (IOException e) {
        logger.fine("IO Error while creating writer");
        throw e;
      }
    }
 
Example #4
Source File: SpliceDefaultCompactor.java    From spliceengine with GNU Affero General Public License v3.0 6 votes vote down vote up
/**
 *
 * This is borrowed from DefaultCompactor.
 *
 * @param compression
 * @param includeMVCCReadpoint
 * @param includesTag
 * @param cryptoContext
 * @return
 */
private HFileContext createFileContext(Compression.Algorithm compression,
                                       boolean includeMVCCReadpoint, boolean includesTag, Encryption.Context cryptoContext) {
    if (compression == null) {
        compression = HFile.DEFAULT_COMPRESSION_ALGORITHM;
    }
    HFileContext hFileContext = new HFileContextBuilder()
            .withIncludesMvcc(includeMVCCReadpoint)
            .withIncludesTags(includesTag)
            .withCompression(compression)
            .withCompressTags(store.getColumnFamilyDescriptor().isCompressTags())
            .withChecksumType(HStore.getChecksumType(conf))
            .withBytesPerCheckSum(HStore.getBytesPerChecksum(conf))
            .withBlockSize(store.getColumnFamilyDescriptor().getBlocksize())
            .withHBaseCheckSum(true)
            .withDataBlockEncoding(store.getColumnFamilyDescriptor().getDataBlockEncoding())
            .withEncryptionContext(cryptoContext)
            .withCreateTime(EnvironmentEdgeManager.currentTime())
            .build();
    return hFileContext;
}
 
Example #5
Source File: IntegrationTestIngestWithEncryption.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Override
public void setUpCluster() throws Exception {
  util = getTestingUtil(null);
  Configuration conf = util.getConfiguration();
  if (!util.isDistributedCluster()) {
    // Inject required configuration if we are not running in distributed mode
    conf.setInt(HFile.FORMAT_VERSION_KEY, 3);
    conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyProviderForTesting.class.getName());
    conf.set(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, "hbase");
    conf.setClass("hbase.regionserver.hlog.reader.impl", SecureProtobufLogReader.class,
      Reader.class);
    conf.setClass("hbase.regionserver.hlog.writer.impl", SecureProtobufLogWriter.class,
      Writer.class);
    conf.setBoolean(HConstants.ENABLE_WAL_ENCRYPTION, true);
  }
  // Check if the cluster configuration can support this test
  try {
    EncryptionTest.testEncryption(conf, "AES", null);
  } catch (Exception e) {
    LOG.warn("Encryption configuration test did not pass, skipping test", e);
    return;
  }
  super.setUpCluster();
  initialized = true;
}
 
Example #6
Source File: HFileSortedOplog.java    From gemfirexd-oss with Apache License 2.0 6 votes vote down vote up
public HFileSortedOplogWriter(int keys) throws IOException {
      try {
        int hfileBlockSize = Integer.getInteger(
            HoplogConfig.HFILE_BLOCK_SIZE_CONF, (1 << 16));

        Algorithm compress = Algorithm.valueOf(System.getProperty(HoplogConfig.COMPRESSION,
            HoplogConfig.COMPRESSION_DEFAULT));

//        ByteComparator bc = new ByteComparator();
        writer = HFile.getWriterFactory(conf, cacheConf)
            .withPath(fsProvider.getFS(), path)
            .withBlockSize(hfileBlockSize)
//            .withComparator(bc)
            .withCompression(compress)
            .create();
        bfw = BloomFilterFactory.createGeneralBloomAtWrite(conf, cacheConf, BloomType.ROW, keys,
            writer);

        logger.fine("Created hoplog writer with compression " + compress);
      } catch (IOException e) {
        logger.fine("IO Error while creating writer");
        throw e;
      }
    }
 
Example #7
Source File: VisibilityController.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Override
public void start(CoprocessorEnvironment env) throws IOException {
  this.conf = env.getConfiguration();

  authorizationEnabled = AccessChecker.isAuthorizationSupported(conf);
  if (!authorizationEnabled) {
    LOG.warn("The VisibilityController has been loaded with authorization checks disabled.");
  }

  if (HFile.getFormatVersion(conf) < HFile.MIN_FORMAT_VERSION_WITH_TAGS) {
    throw new RuntimeException("A minimum HFile version of " + HFile.MIN_FORMAT_VERSION_WITH_TAGS
      + " is required to persist visibility labels. Consider setting " + HFile.FORMAT_VERSION_KEY
      + " accordingly.");
  }

  // Do not create for master CPs
  if (!(env instanceof MasterCoprocessorEnvironment)) {
    visibilityLabelService = VisibilityLabelServiceManager.getInstance()
        .getVisibilityLabelService(this.conf);
  }
}
 
Example #8
Source File: BloomFilterFactory.java    From hbase with Apache License 2.0 6 votes vote down vote up
/**
 * Creates a new Delete Family Bloom filter at the time of
 * {@link org.apache.hadoop.hbase.regionserver.HStoreFile} writing.
 * @param conf
 * @param cacheConf
 * @param maxKeys an estimate of the number of keys we expect to insert.
 *        Irrelevant if compound Bloom filters are enabled.
 * @param writer the HFile writer
 * @return the new Bloom filter, or null in case Bloom filters are disabled
 *         or when failed to create one.
 */
public static BloomFilterWriter createDeleteBloomAtWrite(Configuration conf,
    CacheConfig cacheConf, int maxKeys, HFile.Writer writer) {
  if (!isDeleteFamilyBloomEnabled(conf)) {
    LOG.info("Delete Bloom filters are disabled by configuration for "
        + writer.getPath()
        + (conf == null ? " (configuration is null)" : ""));
    return null;
  }

  float err = getErrorRate(conf);

  int maxFold = getMaxFold(conf);
  // In case of compound Bloom filters we ignore the maxKeys hint.
  CompoundBloomFilterWriter bloomWriter = new CompoundBloomFilterWriter(getBloomBlockSize(conf),
      err, Hash.getHashType(conf), maxFold, cacheConf.shouldCacheBloomsOnWrite(),
      null, BloomType.ROW);
  writer.addInlineBlockWriter(bloomWriter);
  return bloomWriter;
}
 
Example #9
Source File: DeleteDataFilesAction.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Override
public void perform() throws Exception {
  getLogger().info("Start deleting data files");
  FileSystem fs = CommonFSUtils.getRootDirFileSystem(getConf());
  Path rootDir = CommonFSUtils.getRootDir(getConf());
  Path defaultDir = rootDir.suffix("/data/default");
  RemoteIterator<LocatedFileStatus> iterator =  fs.listFiles(defaultDir, true);
  while (iterator.hasNext()){
    LocatedFileStatus status = iterator.next();
    if(!HFile.isHFileFormat(fs, status.getPath())){
      continue;
    }
    if(RandomUtils.nextFloat(0, 100) > chance){
      continue;
    }
    fs.delete(status.getPath(), true);
    getLogger().info("Deleting {}", status.getPath());
  }
  getLogger().info("Done deleting data files");
}
 
Example #10
Source File: HStore.java    From hbase with Apache License 2.0 6 votes vote down vote up
private HFileContext createFileContext(Compression.Algorithm compression,
    boolean includeMVCCReadpoint, boolean includesTag, Encryption.Context cryptoContext) {
  if (compression == null) {
    compression = HFile.DEFAULT_COMPRESSION_ALGORITHM;
  }
  HFileContext hFileContext = new HFileContextBuilder()
                              .withIncludesMvcc(includeMVCCReadpoint)
                              .withIncludesTags(includesTag)
                              .withCompression(compression)
                              .withCompressTags(family.isCompressTags())
                              .withChecksumType(checksumType)
                              .withBytesPerCheckSum(bytesPerChecksum)
                              .withBlockSize(blocksize)
                              .withHBaseCheckSum(true)
                              .withDataBlockEncoding(family.getDataBlockEncoding())
                              .withEncryptionContext(cryptoContext)
                              .withCreateTime(EnvironmentEdgeManager.currentTime())
                              .withColumnFamily(family.getName())
                              .withTableName(region.getTableDescriptor()
                                  .getTableName().getName())
                              .withCellComparator(this.comparator)
                              .build();
  return hFileContext;
}
 
Example #11
Source File: TestAccessController.java    From hbase with Apache License 2.0 6 votes vote down vote up
private void createHFile(Path path,
    byte[] family, byte[] qualifier,
    byte[] startKey, byte[] endKey, int numRows) throws IOException {
  HFile.Writer writer = null;
  long now = System.currentTimeMillis();
  try {
    HFileContext context = new HFileContextBuilder().build();
    writer = HFile.getWriterFactory(conf, new CacheConfig(conf)).withPath(fs, path)
        .withFileContext(context).create();
    // subtract 2 since numRows doesn't include boundary keys
    for (byte[] key : Bytes.iterateOnSplits(startKey, endKey, true, numRows - 2)) {
      KeyValue kv = new KeyValue(key, family, qualifier, now, key);
      writer.append(kv);
    }
  } finally {
    if (writer != null) {
      writer.close();
    }
  }
}
 
Example #12
Source File: HFilePerformanceEvaluation.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Override
void setUp() throws Exception {

  HFileContextBuilder builder = new HFileContextBuilder()
      .withCompression(HFileWriterImpl.compressionByName(codec))
      .withBlockSize(RFILE_BLOCKSIZE);
  
  if (cipher == "aes") {
    byte[] cipherKey = new byte[AES.KEY_LENGTH];
    new SecureRandom().nextBytes(cipherKey);
    builder.withEncryptionContext(Encryption.newContext(conf)
        .setCipher(Encryption.getCipher(conf, cipher))
        .setKey(cipherKey));
  } else if (!"none".equals(cipher)) {
    throw new IOException("Cipher " + cipher + " not supported.");
  }
  
  HFileContext hFileContext = builder.build();

  writer = HFile.getWriterFactoryNoCache(conf)
      .withPath(fs, mf)
      .withFileContext(hFileContext)
      .create();
}
 
Example #13
Source File: TestRegionObserverInterface.java    From hbase with Apache License 2.0 6 votes vote down vote up
private static void createHFile(Configuration conf, FileSystem fs, Path path, byte[] family,
    byte[] qualifier) throws IOException {
  HFileContext context = new HFileContextBuilder().build();
  HFile.Writer writer = HFile.getWriterFactory(conf, new CacheConfig(conf)).withPath(fs, path)
      .withFileContext(context).create();
  long now = System.currentTimeMillis();
  try {
    for (int i = 1; i <= 9; i++) {
      KeyValue kv =
          new KeyValue(Bytes.toBytes(i + ""), family, qualifier, now, Bytes.toBytes(i + ""));
      writer.append(kv);
    }
  } finally {
    writer.close();
  }
}
 
Example #14
Source File: TestHRegionServerBulkLoad.java    From hbase with Apache License 2.0 6 votes vote down vote up
/**
 * Create an HFile with the given number of rows with a specified value.
 */
public static void createHFile(FileSystem fs, Path path, byte[] family,
    byte[] qualifier, byte[] value, int numRows) throws IOException {
  HFileContext context = new HFileContextBuilder().withBlockSize(BLOCKSIZE)
                          .withCompression(COMPRESSION)
                          .build();
  HFile.Writer writer = HFile
      .getWriterFactory(conf, new CacheConfig(conf))
      .withPath(fs, path)
      .withFileContext(context)
      .create();
  long now = System.currentTimeMillis();
  try {
    // subtract 2 since iterateOnSplits doesn't include boundary keys
    for (int i = 0; i < numRows; i++) {
      KeyValue kv = new KeyValue(rowkey(i), family, qualifier, now, value);
      writer.append(kv);
    }
    writer.appendFileInfo(BULKLOAD_TIME_KEY, Bytes.toBytes(now));
  } finally {
    writer.close();
  }
}
 
Example #15
Source File: TestEncryptionRandomKeying.java    From hbase with Apache License 2.0 5 votes vote down vote up
private static byte[] extractHFileKey(Path path) throws Exception {
  HFile.Reader reader = HFile.createReader(TEST_UTIL.getTestFileSystem(), path,
    new CacheConfig(conf), true, conf);
  try {
    Encryption.Context cryptoContext = reader.getFileContext().getEncryptionContext();
    assertNotNull("Reader has a null crypto context", cryptoContext);
    Key key = cryptoContext.getKey();
    if (key == null) {
      return null;
    }
    return key.getEncoded();
  } finally {
    reader.close();
  }
}
 
Example #16
Source File: TestCacheOnWriteInSchema.java    From hbase with Apache License 2.0 5 votes vote down vote up
private void readStoreFile(Path path) throws IOException {
  CacheConfig cacheConf = store.getCacheConfig();
  BlockCache cache = cacheConf.getBlockCache().get();
  HStoreFile sf = new HStoreFile(fs, path, conf, cacheConf, BloomType.ROWCOL, true);
  sf.initReader();
  HFile.Reader reader = sf.getReader().getHFileReader();
  try {
    // Open a scanner with (on read) caching disabled
    HFileScanner scanner = reader.getScanner(false, false);
    assertTrue(testDescription, scanner.seekTo());
    // Cribbed from io.hfile.TestCacheOnWrite
    long offset = 0;
    while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
      // Flags: don't cache the block, use pread, this is not a compaction.
      // Also, pass null for expected block type to avoid checking it.
      HFileBlock block = reader.readBlock(offset, -1, false, true,
        false, true, null, DataBlockEncoding.NONE);
      BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(),
        offset);
      boolean isCached = cache.getBlock(blockCacheKey, true, false, true) != null;
      boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType());
      final BlockType blockType = block.getBlockType();

      if (shouldBeCached != isCached &&
          (cowType.blockType1.equals(blockType) || cowType.blockType2.equals(blockType))) {
        throw new AssertionError(
          "shouldBeCached: " + shouldBeCached+ "\n" +
          "isCached: " + isCached + "\n" +
          "Test description: " + testDescription + "\n" +
          "block: " + block + "\n" +
          "blockCacheKey: " + blockCacheKey);
      }
      offset += block.getOnDiskSizeWithHeader();
    }
  } finally {
    reader.close();
  }
}
 
Example #17
Source File: MizoHFileIterator.java    From mizo with Apache License 2.0 5 votes vote down vote up
/**
 * Creates an inner HFileScanner object for a given HFile path
 */
public static HFileScanner createScanner(FileSystem fs, Path path) throws IOException {
    Configuration config = fs.getConf();
    HFile.Reader reader = HFile.createReader(fs, path, getCacheConfig(config), config);

    HFileScanner scanner = reader.getScanner(false, false);
    scanner.seekTo();

    return scanner;
}
 
Example #18
Source File: TestEncryptionKeyRotation.java    From hbase with Apache License 2.0 5 votes vote down vote up
private static byte[] extractHFileKey(Path path) throws Exception {
  HFile.Reader reader = HFile.createReader(TEST_UTIL.getTestFileSystem(), path,
    new CacheConfig(conf), true, conf);
  try {
    Encryption.Context cryptoContext = reader.getFileContext().getEncryptionContext();
    assertNotNull("Reader has a null crypto context", cryptoContext);
    Key key = cryptoContext.getKey();
    assertNotNull("Crypto context has no key", key);
    return key.getEncoded();
  } finally {
    reader.close();
  }
}
 
Example #19
Source File: TestHMobStore.java    From hbase with Apache License 2.0 5 votes vote down vote up
private void checkMobHFileEncrytption(Collection<HStoreFile> storefiles) {
  HStoreFile storeFile = storefiles.iterator().next();
  HFile.Reader reader = storeFile.getReader().getHFileReader();
  byte[] encryptionKey = reader.getTrailer().getEncryptionKey();
  Assert.assertTrue(null != encryptionKey);
  Assert.assertTrue(reader.getFileContext().getEncryptionContext().getCipher().getName()
      .equals(HConstants.CIPHER_AES));
}
 
Example #20
Source File: TestHalfStoreFileReader.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * Test the scanner and reseek of a half hfile scanner. The scanner API demands that seekTo and
 * reseekTo() only return < 0 if the key lies before the start of the file (with no position on
 * the scanner). Returning 0 if perfect match (rare), and return > 1 if we got an imperfect match.
 * The latter case being the most common, we should generally be returning 1, and if we do, there
 * may or may not be a 'next' in the scanner/file. A bug in the half file scanner was returning -1
 * at the end of the bottom half, and that was causing the infrastructure above to go null causing
 * NPEs and other problems. This test reproduces that failure, and also tests both the bottom and
 * top of the file while we are at it.
 * @throws IOException
 */
@Test
public void testHalfScanAndReseek() throws IOException {
  String root_dir = TEST_UTIL.getDataTestDir().toString();
  Path p = new Path(root_dir, "test");

  Configuration conf = TEST_UTIL.getConfiguration();
  FileSystem fs = FileSystem.get(conf);
  CacheConfig cacheConf = new CacheConfig(conf);
  HFileContext meta = new HFileContextBuilder().withBlockSize(1024).build();
  HFile.Writer w =
      HFile.getWriterFactory(conf, cacheConf).withPath(fs, p).withFileContext(meta).create();

  // write some things.
  List<KeyValue> items = genSomeKeys();
  for (KeyValue kv : items) {
    w.append(kv);
  }
  w.close();

  HFile.Reader r = HFile.createReader(fs, p, cacheConf, true, conf);
  Cell midKV = r.midKey().get();
  byte[] midkey = CellUtil.cloneRow(midKV);

  // System.out.println("midkey: " + midKV + " or: " + Bytes.toStringBinary(midkey));

  Reference bottom = new Reference(midkey, Reference.Range.bottom);
  doTestOfScanAndReseek(p, fs, bottom, cacheConf);

  Reference top = new Reference(midkey, Reference.Range.top);
  doTestOfScanAndReseek(p, fs, top, cacheConf);

  r.close();
}
 
Example #21
Source File: TestHStore.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * Verify that compression and data block encoding are respected by the
 * Store.createWriterInTmp() method, used on store flush.
 */
@Test
public void testCreateWriter() throws Exception {
  Configuration conf = HBaseConfiguration.create();
  FileSystem fs = FileSystem.get(conf);

  ColumnFamilyDescriptor hcd = ColumnFamilyDescriptorBuilder.newBuilder(family)
      .setCompressionType(Compression.Algorithm.GZ).setDataBlockEncoding(DataBlockEncoding.DIFF)
      .build();
  init(name.getMethodName(), conf, hcd);

  // Test createWriterInTmp()
  StoreFileWriter writer =
      store.createWriterInTmp(4, hcd.getCompressionType(), false, true, false, false);
  Path path = writer.getPath();
  writer.append(new KeyValue(row, family, qf1, Bytes.toBytes(1)));
  writer.append(new KeyValue(row, family, qf2, Bytes.toBytes(2)));
  writer.append(new KeyValue(row2, family, qf1, Bytes.toBytes(3)));
  writer.append(new KeyValue(row2, family, qf2, Bytes.toBytes(4)));
  writer.close();

  // Verify that compression and encoding settings are respected
  HFile.Reader reader = HFile.createReader(fs, path, new CacheConfig(conf), true, conf);
  assertEquals(hcd.getCompressionType(), reader.getTrailer().getCompressionCodec());
  assertEquals(hcd.getDataBlockEncoding(), reader.getDataBlockEncoding());
  reader.close();
}
 
Example #22
Source File: TestScannerWithBulkload.java    From hbase with Apache License 2.0 5 votes vote down vote up
private Path writeToHFile(long l, String hFilePath, String pathStr, boolean nativeHFile)
    throws IOException {
  FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
  final Path hfilePath = new Path(hFilePath);
  fs.mkdirs(hfilePath);
  Path path = new Path(pathStr);
  HFile.WriterFactory wf = HFile.getWriterFactoryNoCache(TEST_UTIL.getConfiguration());
  Assert.assertNotNull(wf);
  HFileContext context = new HFileContextBuilder().build();
  HFile.Writer writer = wf.withPath(fs, path).withFileContext(context).create();
  KeyValue kv = new KeyValue(Bytes.toBytes("row1"), Bytes.toBytes("col"), Bytes.toBytes("q"), l,
      Bytes.toBytes("version2"));

  // Set cell seq id to test bulk load native hfiles.
  if (nativeHFile) {
    // Set a big seq id. Scan should not look at this seq id in a bulk loaded file.
    // Scan should only look at the seq id appended at the bulk load time, and not skip
    // this kv.
    kv.setSequenceId(9999999);
  }

  writer.append(kv);

  if (nativeHFile) {
    // Set a big MAX_SEQ_ID_KEY. Scan should not look at this seq id in a bulk loaded file.
    // Scan should only look at the seq id appended at the bulk load time, and not skip its
    // kv.
    writer.appendFileInfo(MAX_SEQ_ID_KEY, Bytes.toBytes(new Long(9999999)));
  }
  else {
  writer.appendFileInfo(BULKLOAD_TIME_KEY, Bytes.toBytes(System.currentTimeMillis()));
  }
  writer.close();
  return hfilePath;
}
 
Example #23
Source File: TestImportTSVWithVisibilityLabels.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * Method returns the total KVs in given hfile
 * @param fs File System
 * @param p HFile path
 * @return KV count in the given hfile
 * @throws IOException
 */
private static int getKVCountFromHfile(FileSystem fs, Path p) throws IOException {
  Configuration conf = util.getConfiguration();
  HFile.Reader reader = HFile.createReader(fs, p, new CacheConfig(conf), true, conf);
  HFileScanner scanner = reader.getScanner(false, false);
  scanner.seekTo();
  int count = 0;
  do {
    count++;
  } while (scanner.next());
  reader.close();
  return count;
}
 
Example #24
Source File: HFileTestUtil.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * Create an HFile with the given number of rows between a given
 * start key and end key @ family:qualifier.
 * If withTag is true, we add the rowKey as the tag value for
 * tagtype MOB_TABLE_NAME_TAG_TYPE
 */
public static void createHFile(
    Configuration configuration,
    FileSystem fs, Path path, DataBlockEncoding encoding,
    byte[] family, byte[] qualifier,
    byte[] startKey, byte[] endKey, int numRows, boolean withTag) throws IOException {
  HFileContext meta = new HFileContextBuilder()
      .withIncludesTags(withTag)
      .withDataBlockEncoding(encoding)
      .withColumnFamily(family)
      .build();
  HFile.Writer writer = HFile.getWriterFactory(configuration, new CacheConfig(configuration))
      .withPath(fs, path)
      .withFileContext(meta)
      .create();
  long now = System.currentTimeMillis();
  try {
    // subtract 2 since iterateOnSplits doesn't include boundary keys
    for (byte[] key : Bytes.iterateOnSplits(startKey, endKey, numRows - 2)) {
      Cell kv = new KeyValue(key, family, qualifier, now, key);
      if (withTag) {
        // add a tag.  Arbitrarily chose mob tag since we have a helper already.
        Tag tableNameTag = new ArrayBackedTag(TagType.MOB_TABLE_NAME_TAG_TYPE, key);
        kv = MobUtils.createMobRefCell(kv, key, tableNameTag);

        // verify that the kv has the tag.
        Optional<Tag> tag = PrivateCellUtil.getTag(kv, TagType.MOB_TABLE_NAME_TAG_TYPE);
        if (!tag.isPresent()) {
          throw new IllegalStateException("Tag didn't stick to KV " + kv.toString());
        }
      }
      writer.append(kv);
    }
  } finally {
    writer.appendFileInfo(BULKLOAD_TIME_KEY, Bytes.toBytes(System.currentTimeMillis()));
    writer.close();
  }
}
 
Example #25
Source File: HBaseTestingUtility.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * Get supported compression algorithms.
 * @return supported compression algorithms.
 */
public static Compression.Algorithm[] getSupportedCompressionAlgorithms() {
  String[] allAlgos = HFile.getSupportedCompressionAlgorithms();
  List<Compression.Algorithm> supportedAlgos = new ArrayList<>();
  for (String algoName : allAlgos) {
    try {
      Compression.Algorithm algo = Compression.getCompressionAlgorithmByName(algoName);
      algo.getCompressor();
      supportedAlgos.add(algo);
    } catch (Throwable t) {
      // this algo is not available
    }
  }
  return supportedAlgos.toArray(new Algorithm[supportedAlgos.size()]);
}
 
Example #26
Source File: IntegrationTestIngestWithACL.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Override
public void setUpCluster() throws Exception {
  util = getTestingUtil(null);
  Configuration conf = util.getConfiguration();
  conf.setInt(HFile.FORMAT_VERSION_KEY, 3);
  conf.set("hbase.coprocessor.master.classes", AccessController.class.getName());
  conf.set("hbase.coprocessor.region.classes", AccessController.class.getName());
  conf.setBoolean("hbase.security.access.early_out", false);
  // conf.set("hbase.superuser", "admin");
  super.setUpCluster();
}
 
Example #27
Source File: HFileGeneratorTest.java    From terrapin with Apache License 2.0 5 votes vote down vote up
@Test
public void testGenerateHFiles() throws IOException {
  Configuration conf = new Configuration();
  FileSystem fs = FileSystem.get(conf);
  int numOfPart = 10;
  int numOfKeys = 1000;
  HFileGenerator.generateHFiles(fs, conf, outputDir,
      PartitionerType.CASCADING, numOfPart, numOfKeys);
  FilenameFilter hfileFilter = new FilenameFilter() {
    @Override
    public boolean accept(File dir, String name) {
      return name.startsWith(Constants.FILE_PREFIX);
    }
  };
  File[] hfiles = outputDir.listFiles(hfileFilter);
  assertEquals(numOfPart, hfiles.length);

  int count = 0;
  for(File hfile : hfiles) {
    HColumnDescriptor columnDescriptor = new HColumnDescriptor();
    columnDescriptor.setBlockCacheEnabled(false);
    HFile.Reader reader =
        HFile.createReader(fs, new Path(hfile.toURI()), new CacheConfig(conf, columnDescriptor));
    count += reader.getEntries();
    reader.close();
  }
  assertEquals(numOfKeys, count);
}
 
Example #28
Source File: HFileSortedOplog.java    From gemfirexd-oss with Apache License 2.0 5 votes vote down vote up
public HFileReader() throws IOException {
  try {
    FileSystem fs = fsProvider.getFS();
    reader = HFile.createReader(fs, path, cacheConf);
    fileInfo = reader.loadFileInfo();
    closed = new AtomicBoolean(false);

    validate();
    if (reader.getComparator() instanceof DelegatingSerializedComparator) {
      loadComparators((DelegatingSerializedComparator) reader.getComparator());
    }

    // read the old HLL if it exists so that a CardinalityMergeException will trigger a Major Compaction
    byte[] hll = fileInfo.get(Meta.LOCAL_CARDINALITY_ESTIMATE.toBytes());
    if (hll != null) {
      entryCountEstimate = estimator = HyperLogLog.Builder.build(hll);
    } else if ((hll = fileInfo.get(Meta.LOCAL_CARDINALITY_ESTIMATE_V2.toBytes())) != null) {
      entryCountEstimate = estimator = HyperLogLog.Builder.build(hll);
    } else {
      estimator = new HyperLogLog(HdfsSortedOplogOrganizer.HLL_CONSTANT);
    }
    
    previousFS = fs;
  } catch (IOException e) {
    logger.fine("IO Error while creating reader", e);
    throw e;
  }
}
 
Example #29
Source File: SecureTestUtil.java    From hbase with Apache License 2.0 5 votes vote down vote up
public static void enableSecurity(Configuration conf) throws IOException {
  conf.set("hadoop.security.authorization", "false");
  conf.set("hadoop.security.authentication", "simple");
  conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, AccessController.class.getName() +
    "," + MasterSyncObserver.class.getName());
  conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, AccessController.class.getName());
  conf.set(CoprocessorHost.REGIONSERVER_COPROCESSOR_CONF_KEY, AccessController.class.getName());
  // Need HFile V3 for tags for security features
  conf.setInt(HFile.FORMAT_VERSION_KEY, 3);
  conf.set(User.HBASE_SECURITY_AUTHORIZATION_CONF_KEY, "true");
  configureSuperuser(conf);
}
 
Example #30
Source File: TestStripeCompactionPolicy.java    From hbase with Apache License 2.0 5 votes vote down vote up
private static HStoreFile createFile(long size) throws Exception {
  HStoreFile sf = mock(HStoreFile.class);
  when(sf.getPath()).thenReturn(new Path("moo"));
  StoreFileReader r = mock(StoreFileReader.class);
  when(r.getEntries()).thenReturn(size);
  when(r.length()).thenReturn(size);
  when(r.getBloomFilterType()).thenReturn(BloomType.NONE);
  when(r.getHFileReader()).thenReturn(mock(HFile.Reader.class));
  when(r.getStoreFileScanner(anyBoolean(), anyBoolean(), anyBoolean(), anyLong(), anyLong(),
    anyBoolean())).thenReturn(mock(StoreFileScanner.class));
  when(sf.getReader()).thenReturn(r);
  when(sf.getBulkLoadTimestamp()).thenReturn(OptionalLong.empty());
  return sf;
}