Java Code Examples for org.apache.hadoop.hbase.HColumnDescriptor#setBlockCacheEnabled()

The following examples show how to use org.apache.hadoop.hbase.HColumnDescriptor#setBlockCacheEnabled() . These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may want to check out the right sidebar which shows the related API usage.
Example 1
@Transition(from = "OFFLINE", to = "ONLINE")
public void onBecomeOnlineFromOffline(Message message,
                                      NotificationContext context) {
  Pair<String, String> hdfsPathAndPartition = getHdfsPathAndPartitionNum(message);
  String hdfsPath = hdfsPathAndPartition.getLeft();
  LOG.info("Opening " + hdfsPath);
  try {
    // TODO(varun): Maybe retry here.
    HColumnDescriptor family = new HColumnDescriptor(Constants.HFILE_COLUMN_FAMILY);
    family.setBlockCacheEnabled(isBlockCacheEnabled);
    Reader r = readerFactory.createHFileReader(hdfsPath, new CacheConfig(conf, family));
    resourcePartitionMap.addReader(
        message.getResourceName(), hdfsPathAndPartition.getRight(), r);
  } catch (IOException e) {
    throw new RuntimeException(e);
  }
}
 
Example 2
Source Project: terrapin   File: BaseUploader.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Validates the first non-empty partition hfile has right partitioning function.
 * It reads several keys, then calculates the partition according to the partitioning function
 * client offering. If the calculated partition number is different with actual partition number
 * an exception is thrown. If all partition hfiles are empty, an exception is thrown.
 *
 * @param parts full absolute path for all partitions
 * @param partitionerType type of paritioning function
 * @param numShards total number of partitions
 * @throws IOException if something goes wrong when reading the hfiles
 * @throws IllegalArgumentException if the partitioner type is wrong or all partitions are empty
 */
public void validate(List<Path> parts, PartitionerType partitionerType, int numShards)
    throws IOException {
  boolean hasNonEmptyPartition = false;
  HColumnDescriptor columnDescriptor = new HColumnDescriptor();
  // Disable block cache to ensure it reads the actual file content.
  columnDescriptor.setBlockCacheEnabled(false);
  for (int shardIndex = 0; shardIndex < parts.size(); shardIndex++) {
    Path fileToBeValidated = parts.get(shardIndex);
    HFile.Reader reader = null;
    try {
      FileSystem fs = FileSystem.newInstance(fileToBeValidated.toUri(), conf);
      CacheConfig cc = new CacheConfig(conf, columnDescriptor);
      reader = HFile.createReader(fs, fileToBeValidated, cc);
      Partitioner partitioner = PartitionerFactory.getPartitioner(partitionerType);
      byte[] rowKey = reader.getFirstRowKey();
      if (rowKey == null) {
        LOG.warn(String.format("empty partition %s", fileToBeValidated.toString()));
        reader.close();
        continue;
      }
      hasNonEmptyPartition = true;
      BytesWritable key = new BytesWritable(rowKey);
      int partition = partitioner.getPartition(key, null,  numShards);
      if (partition != shardIndex) {
        throw new IllegalArgumentException(
            String.format("wrong partition type %s for key %s in partition %d, expected %d",
                partitionerType.toString(), new String(key.getBytes()), shardIndex, partition)
        );
      }
    } finally {
      if (reader != null) {
        reader.close();
      }
    }
  }
  if (!hasNonEmptyPartition) {
    throw new IllegalArgumentException("all partitions are empty");
  }
}
 
Example 3
Source Project: terrapin   File: HFileGeneratorTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testGenerateHFiles() throws IOException {
  Configuration conf = new Configuration();
  FileSystem fs = FileSystem.get(conf);
  int numOfPart = 10;
  int numOfKeys = 1000;
  HFileGenerator.generateHFiles(fs, conf, outputDir,
      PartitionerType.CASCADING, numOfPart, numOfKeys);
  FilenameFilter hfileFilter = new FilenameFilter() {
    @Override
    public boolean accept(File dir, String name) {
      return name.startsWith(Constants.FILE_PREFIX);
    }
  };
  File[] hfiles = outputDir.listFiles(hfileFilter);
  assertEquals(numOfPart, hfiles.length);

  int count = 0;
  for(File hfile : hfiles) {
    HColumnDescriptor columnDescriptor = new HColumnDescriptor();
    columnDescriptor.setBlockCacheEnabled(false);
    HFile.Reader reader =
        HFile.createReader(fs, new Path(hfile.toURI()), new CacheConfig(conf, columnDescriptor));
    count += reader.getEntries();
    reader.close();
  }
  assertEquals(numOfKeys, count);
}
 
Example 4
private static HTableDescriptor generateTransactionTable() throws IOException{
    HTableDescriptor desc = new HTableDescriptor(TableName.valueOf("splice",HConfiguration.TRANSACTION_TABLE));
    desc.addCoprocessor(TxnLifecycleEndpoint.class.getName());

    HColumnDescriptor columnDescriptor = new HColumnDescriptor(SIConstants.DEFAULT_FAMILY_BYTES);
    columnDescriptor.setMaxVersions(5);
    columnDescriptor.setCompressionType(Compression.Algorithm.NONE);
    columnDescriptor.setInMemory(true);
    columnDescriptor.setBlockCacheEnabled(true);
    columnDescriptor.setBloomFilterType(BloomType.ROWCOL);
    desc.addFamily(columnDescriptor);
    desc.addFamily(new HColumnDescriptor(Bytes.toBytes(SIConstants.SI_PERMISSION_FAMILY)));
    return desc;
}
 
Example 5
public static HColumnDescriptor createDataFamily() {
    HColumnDescriptor snapshot = new HColumnDescriptor(SIConstants.DEFAULT_FAMILY_BYTES);
    snapshot.setMaxVersions(Integer.MAX_VALUE);
    snapshot.setCompressionType(Compression.Algorithm.NONE);
    snapshot.setInMemory(true);
    snapshot.setBlockCacheEnabled(true);
    snapshot.setBloomFilterType(BloomType.ROW);
    return snapshot;
}
 
Example 6
Source Project: terrapin   File: HFileRecordWriterTest.java    License: Apache License 2.0 4 votes vote down vote up
@Test
public void testWrite() throws Exception {
  Configuration conf = new Configuration();
  HColumnDescriptor columnDescriptor = new HColumnDescriptor();
  // Disable block cache to ensure it reads the actual file content.
  columnDescriptor.setBlockCacheEnabled(false);
  FileSystem fs = FileSystem.get(conf);
  int blockSize = conf.getInt(Constants.HFILE_BLOCKSIZE, 16384);
  final StoreFile.Writer writer =
      new StoreFile.WriterBuilder(conf, new CacheConfig(conf, columnDescriptor), fs, blockSize)
          .withFilePath(new Path(tempFile.toURI()))
          .build();
  /* Create our RecordWriter */
  RecordWriter<BytesWritable, BytesWritable> hfileWriter =
      new HFileRecordWriter(writer);

  List<String> keys = Lists.newArrayList();
  List<String> values = Lists.newArrayList();
  for (int i = 0; i < 100; ++i) {
    String key = String.format("%03d", i);
    String val = "value " + i;
    keys.add(key);
    values.add(val);
    hfileWriter.write(new BytesWritable(key.getBytes()), new BytesWritable(val.getBytes()));
  }
  /* This internally closes the StoreFile.Writer */
  hfileWriter.close(null);

  HFile.Reader reader = HFile.createReader(fs, new Path(tempFile.toURI()),
      new CacheConfig(conf, columnDescriptor));
  HFileScanner scanner = reader.getScanner(false, false, false);
  boolean valid = scanner.seekTo();
  List<String> gotKeys = Lists.newArrayListWithCapacity(keys.size());
  List<String> gotValues = Lists.newArrayListWithCapacity(values.size());
  while(valid) {
    KeyValue keyValue = scanner.getKeyValue();
    gotKeys.add(new String(keyValue.getRow()));
    gotValues.add(new String(keyValue.getValue()));
    valid = scanner.next();
  }
  assertEquals(keys, gotKeys);
  assertEquals(values, gotValues);
  reader.close();
}
 
Example 7
Source Project: pinpoint   File: TableCommand.java    License: Apache License 2.0 4 votes vote down vote up
private HColumnDescriptor newColumnDescriptor(ColumnFamilyChange columnFamilyChange) {
    HColumnDescriptor hcd = new HColumnDescriptor(columnFamilyChange.getName());
    ColumnFamilyConfiguration columnFamilyConfiguration = columnFamilyChange.getColumnFamilyConfiguration();
    Boolean blockCacheEnabled = columnFamilyConfiguration.getBlockCacheEnabled();
    if (blockCacheEnabled != null) {
        hcd.setBlockCacheEnabled(blockCacheEnabled);
    }
    Integer replicationScope = columnFamilyConfiguration.getReplicationScope();
    if (replicationScope != null) {
        hcd.setScope(replicationScope);
    }
    Boolean inMemory = columnFamilyConfiguration.getInMemory();
    if (inMemory != null) {
        hcd.setInMemory(inMemory);
    }
    Integer timeToLive = columnFamilyConfiguration.getTimeToLive();
    if (timeToLive != null) {
        hcd.setTimeToLive(timeToLive);
    }
    ColumnFamilyConfiguration.DataBlockEncoding dataBlockEncoding =
            columnFamilyConfiguration.getDataBlockEncoding();
    if (dataBlockEncoding != null) {
        hcd.setDataBlockEncoding(DataBlockEncoding.valueOf(dataBlockEncoding.name()));
    }
    Integer blockSize = columnFamilyConfiguration.getBlockSize();
    if (blockSize != null) {
        hcd.setBlocksize(blockSize);
    }
    Integer maxVersions = columnFamilyConfiguration.getMaxVersions();
    if (maxVersions != null) {
        hcd.setMaxVersions(maxVersions);
    }
    Integer minVersions = columnFamilyConfiguration.getMinVersions();
    if (minVersions != null) {
        hcd.setMinVersions(minVersions);
    }
    ColumnFamilyConfiguration.BloomFilter bloomFilter = columnFamilyConfiguration.getBloomFilter();
    if (bloomFilter != null) {
        hcd.setBloomFilterType(BloomType.valueOf(bloomFilter.name()));
    }
    if (compressionAlgorithm != Compression.Algorithm.NONE) {
        hcd.setCompressionType(compressionAlgorithm);
    }
    return hcd;
}