Java Code Examples for org.apache.hadoop.hbase.util.Bytes#add()

The following examples show how to use org.apache.hadoop.hbase.util.Bytes#add() . These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may want to check out the right sidebar which shows the related API usage.
Example 1
/**
 * Test that we don't cover other columns when we have a delete column.
 */
@Test
public void testDeleteColumnCorrectlyCoversColumns() {
  ApplyAndFilterDeletesFilter filter = new ApplyAndFilterDeletesFilter(EMPTY_SET);
  KeyValue d = createKvForType(Type.DeleteColumn, 12);
  byte[] qual2 = Bytes.add(qualifier, Bytes.toBytes("-other"));
  KeyValue put = new KeyValue(row, family, qual2, 11, Type.Put, value);

  assertEquals("Didn't filter out delete column", ReturnCode.SKIP, filter.filterKeyValue(d));
  // different column put should still be visible
  assertEquals("Filtered out put with different column than the delete", ReturnCode.INCLUDE,
    filter.filterKeyValue(put));

  // set a delete family, but in the past
  d = createKvForType(Type.DeleteFamily, 10);
  assertEquals("Didn't filter out delete column", ReturnCode.SKIP, filter.filterKeyValue(d));
  // add back in the original delete column
  d = createKvForType(Type.DeleteColumn, 11);
  assertEquals("Didn't filter out delete column", ReturnCode.SKIP, filter.filterKeyValue(d));
  // onto a different family, so that must be visible too
  assertEquals("Filtered out put with different column than the delete", ReturnCode.INCLUDE,
    filter.filterKeyValue(put));
}
 
Example 2
public Result[] getByIndex(byte[] value) throws IOException {
  try {
    transactionContext.start();
    Scan scan = new Scan(value, Bytes.add(value, new byte[0]));
    scan.addColumn(secondaryIndexFamily, secondaryIndexQualifier);
    ResultScanner indexScanner = secondaryIndexTable.getScanner(scan);

    ArrayList<Get> gets = new ArrayList<>();
    for (Result result : indexScanner) {
      for (Cell cell : result.listCells()) {
        gets.add(new Get(cell.getValue()));
      }
    }
    Result[] results = transactionAwareHTable.get(gets);
    transactionContext.finish();
    return results;
  } catch (Exception e) {
    try {
      transactionContext.abort();
    } catch (TransactionFailureException e1) {
      throw new IOException("Could not rollback transaction", e1);
    }
  }
  return null;
}
 
Example 3
Source Project: spork   File: HBaseTableInputFormat.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void setScan(Scan scan) {
    super.setScan(scan);

    startRow_ = scan.getStartRow();
    endRow_ = scan.getStopRow();
    byte[] startPadded;
    byte[] endPadded;
    if (startRow_.length < endRow_.length) {
        startPadded = Bytes.padTail(startRow_, endRow_.length - startRow_.length);
        endPadded = endRow_;
    } else if (endRow_.length < startRow_.length) {
        startPadded = startRow_;
        endPadded = Bytes.padTail(endRow_, startRow_.length - endRow_.length);
    } else {
        startPadded = startRow_;
        endPadded = endRow_;
    }
    currRow_ = startRow_;
    byte [] prependHeader = {1, 0};
    bigStart_ = new BigInteger(Bytes.add(prependHeader, startPadded));
    bigEnd_ = new BigInteger(Bytes.add(prependHeader, endPadded));
    bigRange_ = new BigDecimal(bigEnd_.subtract(bigStart_));
    LOG.info("setScan with ranges: " + bigStart_ + " - " + bigEnd_ + " ( " + bigRange_ + ")");
}
 
Example 4
Source Project: hraven   File: JobHistoryService.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * sets the hRavenQueueName in the jobPut so that it's independent of
 * hadoop1/hadoop2 queue/pool names
 *
 * @param jobConf
 * @param jobPut
 * @param jobKey
 * @param jobConfColumnPrefix
 *
 * @throws IllegalArgumentException if neither config param is found
 */
static void setHravenQueueNamePut(Configuration jobConf, Put jobPut,
    JobKey jobKey, byte[] jobConfColumnPrefix) {

  String hRavenQueueName = HadoopConfUtil.getQueueName(jobConf);
  if (hRavenQueueName.equalsIgnoreCase(Constants.DEFAULT_VALUE_QUEUENAME)) {
    // due to a bug in hadoop2, the queue name value is the string "default"
    // hence set it to username
    hRavenQueueName = jobKey.getUserName();
  }

  // set the "queue" property defined by hRaven
  // this makes it independent of hadoop version config parameters
  byte[] column =
      Bytes.add(jobConfColumnPrefix, Constants.HRAVEN_QUEUE_BYTES);
  jobPut.addColumn(Constants.INFO_FAM_BYTES, column,
      Bytes.toBytes(hRavenQueueName));
}
 
Example 5
public Result[] getByIndex(byte[] value) throws IOException {
  try {
    transactionContext.start();
    Scan scan = new Scan(value, Bytes.add(value, new byte[0]));
    scan.addColumn(secondaryIndexFamily, secondaryIndexQualifier);
    ResultScanner indexScanner = secondaryIndexTable.getScanner(scan);

    ArrayList<Get> gets = new ArrayList<Get>();
    for (Result result : indexScanner) {
      for (Cell cell : result.listCells()) {
        gets.add(new Get(cell.getValue()));
      }
    }
    Result[] results = transactionAwareHTable.get(gets);
    transactionContext.finish();
    return results;
  } catch (Exception e) {
    try {
      transactionContext.abort();
    } catch (TransactionFailureException e1) {
      throw new IOException("Could not rollback transaction", e1);
    }
  }
  return null;
}
 
Example 6
Source Project: hraven   File: FlowEventService.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Retrieves all the event rows matching a single
 * {@link com.twitter.hraven.Flow}.
 * @param flowKey
 * @return
 */
public List<FlowEvent> getFlowEvents(FlowKey flowKey) throws IOException {
  byte[] startKey =
      Bytes.add(flowKeyConverter.toBytes(flowKey), Constants.SEP_BYTES);
  Scan scan = new Scan(startKey);
  scan.setFilter(new WhileMatchFilter(new PrefixFilter(startKey)));

  List<FlowEvent> results = new ArrayList<FlowEvent>();
  ResultScanner scanner = null;
  Table eventTable = null;
  try {
    eventTable = hbaseConnection
        .getTable(TableName.valueOf(Constants.FLOW_EVENT_TABLE));
    scanner = eventTable.getScanner(scan);
    for (Result r : scanner) {
      FlowEvent event = createEventFromResult(r);
      if (event != null) {
        results.add(event);
      }
    }
  } finally {
    try {
      if (scanner != null) {
        scanner.close();
      }
    } finally {
      if (eventTable != null) {
        eventTable.close();
      }
    }
  }
  return results;
}
 
Example 7
@Override
public void run() {
    try {
        Table htable = connection.getTable(TableName.valueOf(DemoSchema.MESSAGE_TABLE));

        while (true) {
            List<Put> puts = new ArrayList<Put>();
            for (int i = 0; i < batchSize; i++) {
                int userId = random.nextInt(userCount);
                byte[] rowkey = Bytes.toBytes(String.valueOf(userId));
                Put put = new Put(rowkey);

                long timestamp = Long.MAX_VALUE - System.currentTimeMillis();
                int nonce = random.nextInt();

                byte[] qualifier = Bytes.add(Bytes.toBytes(timestamp), Bytes.toBytes("_"), Bytes.toBytes(nonce));
                put.addColumn(contentCf, qualifier, Bytes.toBytes(createMessageText()));
                puts.add(put);
            }

            htable.put(puts);
            totalPuts.addAndGet(puts.size());
        }
    } catch (Throwable t) {
        System.err.println("Thread " + name + " dying because of an error");
        t.printStackTrace(System.err);
    }
}
 
Example 8
Source Project: phoenix-tephra   File: DataJanitorState.java    License: Apache License 2.0 4 votes vote down vote up
private byte[] makeInactiveTransactionBoundTimeKey(byte[] time) {
  return Bytes.add(INACTIVE_TRANSACTION_BOUND_TIME_KEY_PREFIX, time);
}
 
Example 9
Source Project: phoenix-tephra   File: DataJanitorState.java    License: Apache License 2.0 4 votes vote down vote up
private byte[] makeEmptyRegionTimeKey(byte[] time, byte[] regionId) {
  return Bytes.add(EMPTY_REGION_TIME_KEY_PREFIX, time, regionId);
}
 
Example 10
Source Project: hbase   File: QuotaTableUtil.java    License: Apache License 2.0 4 votes vote down vote up
protected static byte[] getSnapshotSizeQualifier(String snapshotName) {
  return Bytes.add(QUOTA_SNAPSHOT_SIZE_QUALIFIER, Bytes.toBytes(snapshotName));
}
 
Example 11
Source Project: phoenix-tephra   File: DataJanitorState.java    License: Apache License 2.0 4 votes vote down vote up
private byte[] makeEmptyRegionTimeKey(byte[] time, byte[] regionId) {
  return Bytes.add(EMPTY_REGION_TIME_KEY_PREFIX, time, regionId);
}
 
Example 12
Source Project: hraven   File: JobHistoryService.java    License: Apache License 2.0 4 votes vote down vote up
/**
 * Returns the {@link Flow} runs' stats - summed up per flow If the
 * {@code version} parameter is non-null, the returned results will be
 * restricted to those matching this app version.
 *
 * <p>
 * <strong>Note:</strong> this retrieval method will omit the configuration
 * data from all of the returned jobs.
 * </p>
 *
 * @param cluster the cluster where the jobs were run
 * @param user the user running the jobs
 * @param appId the application identifier for the jobs
 * @param version if non-null, only flows matching this application version
 *          will be returned
 * @param startTime the start time for the flows to be looked at
 * @param endTime the end time for the flows to be looked at
 * @param limit the maximum number of flows to return
 * @return
 */
public List<Flow> getFlowTimeSeriesStats(String cluster, String user,
    String appId, String version, long startTime, long endTime, int limit,
    byte[] startRow) throws IOException {

  // app portion of row key
  byte[] rowPrefix = Bytes.toBytes((cluster + Constants.SEP + user
      + Constants.SEP + appId + Constants.SEP));
  byte[] scanStartRow;

  if (startRow != null) {
    scanStartRow = startRow;
  } else {
    if (endTime != 0) {
      // use end time in start row, if present
      long endRunId = FlowKey.encodeRunId(endTime);
      scanStartRow =
          Bytes.add(rowPrefix, Bytes.toBytes(endRunId), Constants.SEP_BYTES);
    } else {
      scanStartRow = rowPrefix;
    }
  }

  // TODO: use RunMatchFilter to limit scan on the server side
  Scan scan = new Scan();
  scan.setStartRow(scanStartRow);
  FilterList filters = new FilterList(FilterList.Operator.MUST_PASS_ALL);

  if (startTime != 0) {
    // if limited by start time, early out as soon as we hit it
    long startRunId = FlowKey.encodeRunId(startTime);
    // zero byte at the end makes the startRunId inclusive
    byte[] scanEndRow = Bytes.add(rowPrefix, Bytes.toBytes(startRunId),
        Constants.ZERO_SINGLE_BYTE);
    scan.setStopRow(scanEndRow);
  } else {
    // require that all rows match the app prefix we're looking for
    filters.addFilter(new WhileMatchFilter(new PrefixFilter(rowPrefix)));
  }

  // if version is passed, restrict the rows returned to that version
  if (version != null && version.length() > 0) {
    filters.addFilter(new SingleColumnValueFilter(Constants.INFO_FAM_BYTES,
        Constants.VERSION_COLUMN_BYTES, CompareFilter.CompareOp.EQUAL,
        Bytes.toBytes(version)));
  }

  // filter out all config columns except the queue name
  filters.addFilter(new QualifierFilter(CompareFilter.CompareOp.NOT_EQUAL,
      new RegexStringComparator(
          "^c\\!((?!" + Constants.HRAVEN_QUEUE + ").)*$")));

  scan.setFilter(filters);

  LOG.info("scan : \n " + scan.toJSON() + " \n");
  return createFromResults(scan, false, limit);
}
 
Example 13
Source Project: hbase   File: TestMobStoreCompaction.java    License: Apache License 2.0 4 votes vote down vote up
private Put createPut(int rowIdx, byte[] dummyData) throws IOException {
  Put p = new Put(Bytes.add(STARTROW, Bytes.toBytes(rowIdx)));
  p.setDurability(Durability.SKIP_WAL);
  p.addColumn(COLUMN_FAMILY, Bytes.toBytes("colX"), dummyData);
  return p;
}
 
Example 14
Source Project: phoenix-tephra   File: DataJanitorState.java    License: Apache License 2.0 4 votes vote down vote up
private byte[] makeInactiveTransactionBoundTimeKey(byte[] time) {
  return Bytes.add(INACTIVE_TRANSACTION_BOUND_TIME_KEY_PREFIX, time);
}
 
Example 15
Source Project: phoenix-tephra   File: DataJanitorState.java    License: Apache License 2.0 4 votes vote down vote up
private byte[] makeEmptyRegionTimeKey(byte[] time, byte[] regionId) {
  return Bytes.add(EMPTY_REGION_TIME_KEY_PREFIX, time, regionId);
}
 
Example 16
Source Project: phoenix-tephra   File: DataJanitorState.java    License: Apache License 2.0 4 votes vote down vote up
private byte[] makeRegionKey(byte[] regionId) {
  return Bytes.add(REGION_KEY_PREFIX, regionId);
}
 
Example 17
@Test
// TODO : check how block index works here
public void testGetsWithMultiColumnsAndExplicitTracker()
    throws IOException, InterruptedException {
  Table table = null;
  try {
    latch = new CountDownLatch(1);
    // Check if get() returns blocks on its close() itself
    getLatch = new CountDownLatch(1);
    final TableName tableName = TableName.valueOf(name.getMethodName());
    // Create KV that will give you two blocks
    // Create a table with block size as 1024
    table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024,
        CustomInnerRegionObserver.class.getName());
    // get the block cache and region
    RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
    String regionName = locator.getAllRegionLocations().get(0).getRegion().getEncodedName();
    HRegion region =
        TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
    BlockCache cache = setCacheProperties(region);
    Put put = new Put(ROW);
    put.addColumn(FAMILY, QUALIFIER, data);
    table.put(put);
    region.flush(true);
    put = new Put(ROW1);
    put.addColumn(FAMILY, QUALIFIER, data);
    table.put(put);
    region.flush(true);
    for (int i = 1; i < 10; i++) {
      put = new Put(ROW);
      put.addColumn(FAMILY, Bytes.toBytes("testQualifier" + i), data2);
      table.put(put);
      if (i % 2 == 0) {
        region.flush(true);
      }
    }
    byte[] QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER);
    put = new Put(ROW);
    put.addColumn(FAMILY, QUALIFIER2, data2);
    table.put(put);
    region.flush(true);
    // flush the data
    System.out.println("Flushing cache");
    // Should create one Hfile with 2 blocks
    CustomInnerRegionObserver.waitForGets.set(true);
    // Create three sets of gets
    GetThread[] getThreads = initiateGet(table, true, false);
    Thread.sleep(200);
    Iterator<CachedBlock> iterator = cache.iterator();
    boolean usedBlocksFound = false;
    int refCount = 0;
    int noOfBlocksWithRef = 0;
    while (iterator.hasNext()) {
      CachedBlock next = iterator.next();
      BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset());
      if (cache instanceof BucketCache) {
        refCount = ((BucketCache) cache).getRpcRefCount(cacheKey);
      } else if (cache instanceof CombinedBlockCache) {
        refCount = ((CombinedBlockCache) cache).getRpcRefCount(cacheKey);
      } else {
        continue;
      }
      if (refCount != 0) {
        // Blocks will be with count 3
        System.out.println("The refCount is " + refCount);
        assertEquals(NO_OF_THREADS, refCount);
        usedBlocksFound = true;
        noOfBlocksWithRef++;
      }
    }
    assertTrue(usedBlocksFound);
    // the number of blocks referred
    assertEquals(10, noOfBlocksWithRef);
    CustomInnerRegionObserver.getCdl().get().countDown();
    for (GetThread thread : getThreads) {
      thread.join();
    }
    // Verify whether the gets have returned the blocks that it had
    CustomInnerRegionObserver.waitForGets.set(true);
    // giving some time for the block to be decremented
    checkForBlockEviction(cache, true, false);
    getLatch.countDown();
    System.out.println("Gets should have returned the bloks");
  } finally {
    if (table != null) {
      table.close();
    }
  }
}
 
Example 18
Source Project: phoenix-tephra   File: DataJanitorState.java    License: Apache License 2.0 4 votes vote down vote up
private byte[] makeInactiveTransactionBoundTimeKey(byte[] time) {
  return Bytes.add(INACTIVE_TRANSACTION_BOUND_TIME_KEY_PREFIX, time);
}
 
Example 19
Source Project: phoenix-tephra   File: DataJanitorState.java    License: Apache License 2.0 4 votes vote down vote up
private byte[] makeTimeRegionKey(byte[] time, byte[] regionId) {
  return Bytes.add(REGION_TIME_KEY_PREFIX, time, regionId);
}
 
Example 20
Source Project: phoenix-tephra   File: DataJanitorState.java    License: Apache License 2.0 4 votes vote down vote up
private byte[] makeEmptyRegionTimeKey(byte[] time, byte[] regionId) {
  return Bytes.add(EMPTY_REGION_TIME_KEY_PREFIX, time, regionId);
}