Java Code Examples for org.apache.hadoop.hbase.CellUtil#cloneRow()

The following examples show how to use org.apache.hadoop.hbase.CellUtil#cloneRow() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: IndexHalfStoreFileReader.java    From phoenix with Apache License 2.0 6 votes vote down vote up
/**
 * @param fs
 * @param p
 * @param cacheConf
 * @param in
 * @param size
 * @param r
 * @param conf
 * @param indexMaintainers
 * @param viewConstants
 * @param regionInfo
 * @param regionStartKeyInHFile
 * @param splitKey
 * @throws IOException
 */
public IndexHalfStoreFileReader(final FileSystem fs, final Path p, final CacheConfig cacheConf,
        final FSDataInputStreamWrapper in, long size, final Reference r,
        final Configuration conf,
        final Map<ImmutableBytesWritable, IndexMaintainer> indexMaintainers,
        final byte[][] viewConstants, final RegionInfo regionInfo,
        byte[] regionStartKeyInHFile, byte[] splitKey, boolean primaryReplicaStoreFile,
        AtomicInteger refCount, RegionInfo currentRegion) throws IOException {
    super(fs, p, in, size, cacheConf, primaryReplicaStoreFile, refCount, false,
            conf);
    this.splitkey = splitKey == null ? r.getSplitKey() : splitKey;
    // Is it top or bottom half?
    this.top = Reference.isTopFileRegion(r.getFileRegion());
    this.splitRow = CellUtil.cloneRow(new KeyValue.KeyOnlyKeyValue(splitkey));
    this.indexMaintainers = indexMaintainers;
    this.viewConstants = viewConstants;
    this.childRegionInfo = regionInfo;
    this.regionStartKeyInHFile = regionStartKeyInHFile;
    this.offset = regionStartKeyInHFile.length;
    this.refCount = refCount;
    this.currentRegion = currentRegion;
}
 
Example 2
Source File: TransactionVisibilityFilter.java    From phoenix-tephra with Apache License 2.0 6 votes vote down vote up
@Override
public Cell transformCell(Cell cell) throws IOException {
  // Convert Tephra deletes back into HBase deletes
  if (tx.getVisibilityLevel() == Transaction.VisibilityLevel.SNAPSHOT_ALL) {
    if (DeleteTracker.isFamilyDelete(cell)) {
      return new KeyValue(CellUtil.cloneRow(cell), CellUtil.cloneFamily(cell), null, cell.getTimestamp(),
                          KeyValue.Type.DeleteFamily);
    } else if (isColumnDelete(cell)) {
      // Note: in some cases KeyValue.Type.Delete is used in Delete object,
      // and in some other cases KeyValue.Type.DeleteColumn is used.
      // Since Tephra cannot distinguish between the two, we return KeyValue.Type.DeleteColumn.
      // KeyValue.Type.DeleteColumn makes both CellUtil.isDelete and CellUtil.isDeleteColumns return true, and will
      // work in both cases.
      return new KeyValue(CellUtil.cloneRow(cell), CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell),
                          cell.getTimestamp(), KeyValue.Type.DeleteColumn);
    }
  }
  return cell;
}
 
Example 3
Source File: HBaseCLI.java    From cloud-bigtable-examples with Apache License 2.0 6 votes vote down vote up
public void run(Connection connection, List<String> args) throws InvalidArgsException, IOException {
    if (args.size() != 2) {
        throw new InvalidArgsException(args);
    }

    String tableName = args.get(0);
    String rowId = args.get(1);

    Table table = connection.getTable(TableName.valueOf(tableName));

    // Create a new Get request and specify the rowId passed by the user.
    Result result = table.get(new Get(rowId.getBytes()));

    // Iterate of the results. Each Cell is a value for column
    // so multiple Cells will be processed for each row.
    for (Cell cell : result.listCells()) {
        // We use the CellUtil class to clone values
        // from the returned cells.
        String row = new String(CellUtil.cloneRow(cell));
        String family = new String(CellUtil.cloneFamily(cell));
        String column = new String(CellUtil.cloneQualifier(cell));
        String value = new String(CellUtil.cloneValue(cell));
        long timestamp = cell.getTimestamp();
        System.out.printf("%-20s column=%s:%s, timestamp=%s, value=%s\n", row, family, column, timestamp, value);
    }
}
 
Example 4
Source File: RegionReplicaReplicationEndpoint.java    From hbase with Apache License 2.0 6 votes vote down vote up
private CompletableFuture<Long> replicate(TableDescriptor tableDesc, byte[] encodedRegionName,
    List<Entry> entries) {
  if (disabledTableCache.getIfPresent(tableDesc.getTableName()) != null) {
    logSkipped(tableDesc.getTableName(), entries, "cached as a disabled table");
    return CompletableFuture.completedFuture(Long.valueOf(entries.size()));
  }
  byte[] row = CellUtil.cloneRow(entries.get(0).getEdit().getCells().get(0));
  CompletableFuture<RegionLocations> locateFuture = new CompletableFuture<>();
  getRegionLocations(locateFuture, tableDesc, encodedRegionName, row, false);
  CompletableFuture<Long> future = new CompletableFuture<>();
  FutureUtils.addListener(locateFuture, (locs, error) -> {
    if (error != null) {
      future.completeExceptionally(error);
    } else if (locs.getDefaultRegionLocation() == null) {
      future.completeExceptionally(
        new HBaseIOException("No location found for default replica of table=" +
          tableDesc.getTableName() + " row='" + Bytes.toStringBinary(row) + "'"));
    } else {
      replicate(future, locs, tableDesc, encodedRegionName, row, entries);
    }
  });
  return future;
}
 
Example 5
Source File: TransactionVisibilityFilter.java    From phoenix-tephra with Apache License 2.0 6 votes vote down vote up
@Override
public Cell transformCell(Cell cell) throws IOException {
  // Convert Tephra deletes back into HBase deletes
  if (tx.getVisibilityLevel() == Transaction.VisibilityLevel.SNAPSHOT_ALL) {
    if (DeleteTracker.isFamilyDelete(cell)) {
      return new KeyValue(CellUtil.cloneRow(cell), CellUtil.cloneFamily(cell), null, cell.getTimestamp(),
                          KeyValue.Type.DeleteFamily);
    } else if (isColumnDelete(cell)) {
      // Note: in some cases KeyValue.Type.Delete is used in Delete object,
      // and in some other cases KeyValue.Type.DeleteColumn is used.
      // Since Tephra cannot distinguish between the two, we return KeyValue.Type.DeleteColumn.
      // KeyValue.Type.DeleteColumn makes both CellUtil.isDelete and CellUtil.isDeleteColumns return true, and will
      // work in both cases.
      return new KeyValue(CellUtil.cloneRow(cell), CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell),
                          cell.getTimestamp(), KeyValue.Type.DeleteColumn);
    }
  }
  return cell;
}
 
Example 6
Source File: PcapGetterHBaseImpl.java    From opensoc-streaming with Apache License 2.0 6 votes vote down vote up
/**
 * Adds the to response.
 * 
 * @param pcapsResponse
 *          the pcaps response
 * @param scannedCells
 *          the scanned cells
 * @param maxResultSize
 *          the max result size
 */
private void addToResponse(PcapsResponse pcapsResponse,
    List<Cell> scannedCells, long maxResultSize) {
  String lastKeyFromCurrentScan = null;
  if (scannedCells != null && scannedCells.size() > 0) {
    lastKeyFromCurrentScan = new String(CellUtil.cloneRow(scannedCells
        .get(scannedCells.size() - 1)));
  }
  // 4. calculate the response size
  Collections.sort(scannedCells, PcapHelper.getCellTimestampComparator());
  for (Cell sortedCell : scannedCells) {
    pcapsResponse.addPcaps(CellUtil.cloneValue(sortedCell));
  }
  if (!pcapsResponse.isResonseSizeWithinLimit(maxResultSize)) {
    pcapsResponse.setStatus(PcapsResponse.Status.PARTIAL); // response size
                                                           // reached
    pcapsResponse.setLastRowKey(new String(lastKeyFromCurrentScan));
  }
}
 
Example 7
Source File: BackupSystemTable.java    From hbase with Apache License 2.0 6 votes vote down vote up
/**
 * Get the Region Servers log information after the last log roll from backup system table.
 * @param backupRoot root directory path to backup
 * @return RS log info
 * @throws IOException exception
 */
public HashMap<String, Long> readRegionServerLastLogRollResult(String backupRoot)
    throws IOException {
  LOG.trace("read region server last roll log result to backup system table");

  Scan scan = createScanForReadRegionServerLastLogRollResult(backupRoot);

  try (Table table = connection.getTable(tableName);
      ResultScanner scanner = table.getScanner(scan)) {
    Result res;
    HashMap<String, Long> rsTimestampMap = new HashMap<>();
    while ((res = scanner.next()) != null) {
      res.advance();
      Cell cell = res.current();
      byte[] row = CellUtil.cloneRow(cell);
      String server = getServerNameForReadRegionServerLastLogRollResult(row);
      byte[] data = CellUtil.cloneValue(cell);
      rsTimestampMap.put(server, Bytes.toLong(data));
    }
    return rsTimestampMap;
  }
}
 
Example 8
Source File: BackupSystemTable.java    From hbase with Apache License 2.0 6 votes vote down vote up
Map<byte[], String> readBulkLoadedFiles(String backupId) throws IOException {
  Scan scan = BackupSystemTable.createScanForBulkLoadedFiles(backupId);
  try (Table table = connection.getTable(bulkLoadTableName);
      ResultScanner scanner = table.getScanner(scan)) {
    Result res = null;
    Map<byte[], String> map = new TreeMap<>(Bytes.BYTES_COMPARATOR);
    while ((res = scanner.next()) != null) {
      res.advance();
      byte[] row = CellUtil.cloneRow(res.listCells().get(0));
      for (Cell cell : res.listCells()) {
        if (CellUtil.compareQualifiers(cell, BackupSystemTable.PATH_COL, 0,
          BackupSystemTable.PATH_COL.length) == 0) {
          map.put(row, Bytes.toString(CellUtil.cloneValue(cell)));
        }
      }
    }
    return map;
  }
}
 
Example 9
Source File: SnapshotFilterImpl.java    From phoenix-omid with Apache License 2.0 5 votes vote down vote up
private void healShadowCell(Cell cell, long commitTimestamp) {
    Put put = new Put(CellUtil.cloneRow(cell));
    byte[] family = CellUtil.cloneFamily(cell);
    byte[] shadowCellQualifier = CellUtils.addShadowCellSuffixPrefix(cell.getQualifierArray(),
                                                               cell.getQualifierOffset(),
                                                               cell.getQualifierLength());
    put.addColumn(family, shadowCellQualifier, cell.getTimestamp(), Bytes.toBytes(commitTimestamp));
    try {
        tableAccessWrapper.put(put);
    } catch (IOException e) {
        LOG.warn("Failed healing shadow cell for kv {}", cell, e);
    }
}
 
Example 10
Source File: IndexRegionObserver.java    From phoenix with Apache License 2.0 5 votes vote down vote up
/**
 * Retrieve the the last committed data row state. This method is called only for regular data mutations since for
 * rebuild (i.e., index replay) mutations include all row versions.
 */

private void getCurrentRowStates(ObserverContext<RegionCoprocessorEnvironment> c,
                                 BatchMutateContext context) throws IOException {
    Set<KeyRange> keys = new HashSet<KeyRange>(context.rowsToLock.size());
    for (ImmutableBytesPtr rowKeyPtr : context.rowsToLock) {
        keys.add(PVarbinary.INSTANCE.getKeyRange(rowKeyPtr.get()));
    }
    Scan scan = new Scan();
    ScanRanges scanRanges = ScanRanges.createPointLookup(new ArrayList<KeyRange>(keys));
    scanRanges.initializeScan(scan);
    SkipScanFilter skipScanFilter = scanRanges.getSkipScanFilter();
    scan.setFilter(skipScanFilter);
    context.dataRowStates = new HashMap<ImmutableBytesPtr, Pair<Put, Put>>(context.rowsToLock.size());
    try (RegionScanner scanner = c.getEnvironment().getRegion().getScanner(scan)) {
        boolean more = true;
        while(more) {
            List<Cell> cells = new ArrayList<Cell>();
            more = scanner.next(cells);
            if (cells.isEmpty()) {
                continue;
            }
            byte[] rowKey = CellUtil.cloneRow(cells.get(0));
            Put put = new Put(rowKey);
            for (Cell cell : cells) {
                put.add(cell);
            }
            context.dataRowStates.put(new ImmutableBytesPtr(rowKey), new Pair<Put, Put>(put, new Put(put)));
        }
    }
}
 
Example 11
Source File: TestTags.java    From hbase with Apache License 2.0 5 votes vote down vote up
private void updateMutationAddingTags(final Mutation m) {
  byte[] attribute = m.getAttribute("visibility");
  byte[] cf = null;
  List<Cell> updatedCells = new ArrayList<>();
  if (attribute != null) {
    for (List<? extends Cell> edits : m.getFamilyCellMap().values()) {
      for (Cell cell : edits) {
        KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
        if (cf == null) {
          cf = CellUtil.cloneFamily(kv);
        }
        Tag tag = new ArrayBackedTag((byte) 1, attribute);
        List<Tag> tagList = new ArrayList<>();
        tagList.add(tag);

        KeyValue newKV = new KeyValue(CellUtil.cloneRow(kv), 0, kv.getRowLength(),
            CellUtil.cloneFamily(kv), 0, kv.getFamilyLength(), CellUtil.cloneQualifier(kv), 0,
            kv.getQualifierLength(), kv.getTimestamp(),
            KeyValue.Type.codeToType(kv.getTypeByte()), CellUtil.cloneValue(kv), 0,
            kv.getValueLength(), tagList);
        ((List<Cell>) updatedCells).add(newKV);
      }
    }
    m.getFamilyCellMap().remove(cf);
    // Update the family map
    m.getFamilyCellMap().put(cf, updatedCells);
  }
}
 
Example 12
Source File: TestFiltersWithBinaryComponentComparator.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Test
public void testRowAndValueFilterWithBinaryComponentComparator() throws IOException {
  //SELECT * from table where a=1 and b > 10 and b < 20 and c > 90 and c < 100 and d=1
  //and value has 'y' at position 1"
  tableName = TableName.valueOf(name.getMethodName());
  Table ht = TEST_UTIL.createTable(tableName, family, Integer.MAX_VALUE);
  generateRows(ht, family, qf);
  FilterList filterList = new FilterList(FilterList.Operator.MUST_PASS_ALL);
  setRowFilters(filterList);
  setValueFilters(filterList);
  Scan scan = new Scan();
  scan.setFilter(filterList);
  List<Cell> result = getResults(ht,scan);
  for(Cell cell: result){
    byte[] key = CellUtil.cloneRow(cell);
    int a = Bytes.readAsInt(key,aOffset,4);
    int b = Bytes.readAsInt(key,bOffset,4);
    int c = Bytes.readAsInt(key,cOffset,4);
    int d = Bytes.readAsInt(key,dOffset,4);
    assertTrue(a == 1 &&
               b > 10 &&
               b < 20 &&
               c > 90 &&
               c < 100 &&
               d == 1);
    byte[] value = CellUtil.cloneValue(cell);
    assertTrue(Bytes.toString(value).charAt(1) == 'y');
  }
  ht.close();
}
 
Example 13
Source File: StripeMultiFileWriter.java    From hbase with Apache License 2.0 4 votes vote down vote up
@Override
public void append(Cell cell) throws IOException {
  // If we are waiting for opportunity to close and we started writing different row,
  // discard the writer and stop waiting.
  boolean doCreateWriter = false;
  if (currentWriter == null) {
    // First append ever, do a sanity check.
    sanityCheckLeft(left, cell);
    doCreateWriter = true;
  } else if (lastRowInCurrentWriter != null
      && !PrivateCellUtil.matchingRows(cell, lastRowInCurrentWriter, 0,
        lastRowInCurrentWriter.length)) {
    if (LOG.isDebugEnabled()) {
      LOG.debug("Stopping to use a writer after [" + Bytes.toString(lastRowInCurrentWriter)
          + "] row; wrote out " + cellsInCurrentWriter + " kvs");
    }
    lastRowInCurrentWriter = null;
    cellsInCurrentWriter = 0;
    cellsSeenInPrevious += cellsSeen;
    doCreateWriter = true;
  }
  if (doCreateWriter) {
    // make a copy
    byte[] boundary = existingWriters.isEmpty() ? left : CellUtil.cloneRow(cell);
    if (LOG.isDebugEnabled()) {
      LOG.debug("Creating new writer starting at [" + Bytes.toString(boundary) + "]");
    }
    currentWriter = writerFactory.createWriter();
    boundaries.add(boundary);
    existingWriters.add(currentWriter);
  }

  currentWriter.append(cell);
  lastCell = cell; // for the sanity check
  ++cellsInCurrentWriter;
  cellsSeen = cellsInCurrentWriter;
  if (this.sourceScanner != null) {
    cellsSeen =
        Math.max(cellsSeen, this.sourceScanner.getEstimatedNumberOfKvsScanned()
            - cellsSeenInPrevious);
  }

  // If we are not already waiting for opportunity to close, start waiting if we can
  // create any more writers and if the current one is too big.
  if (lastRowInCurrentWriter == null && existingWriters.size() < targetCount
      && cellsSeen >= targetCells) {
    lastRowInCurrentWriter = CellUtil.cloneRow(cell); // make a copy
    if (LOG.isDebugEnabled()) {
      LOG.debug("Preparing to start a new writer after ["
          + Bytes.toString(lastRowInCurrentWriter) + "] row; observed " + cellsSeen
          + " kvs and wrote out " + cellsInCurrentWriter + " kvs");
    }
  }
}
 
Example 14
Source File: HBaseCLI.java    From cloud-bigtable-examples with Apache License 2.0 4 votes vote down vote up
public void run(Connection connection, List<String> args) throws InvalidArgsException, IOException {
    if (args.size() < 1 || args.size() > 2) {
        throw new InvalidArgsException(args);
    }
    String tableName = args.get(0);
    String filterVal = null;
    if (args.size() > 1) {
        filterVal = args.get(1);
    }

    Table table = connection.getTable(TableName.valueOf(tableName));

    // Create a new Scan instance.
    Scan scan = new Scan();

    // This command supports using a columnvalue filter.
    // The filter takes the form of <columnfamily>:<column><operator><value>
    // An example would be cf:col>=10
    if (filterVal != null) {
        String splitVal = "=";
        CompareFilter.CompareOp op = CompareFilter.CompareOp.EQUAL;

        if (filterVal.contains(">=")) {
             op = CompareFilter.CompareOp.GREATER_OR_EQUAL;
             splitVal = ">=";
        } else if (filterVal.contains("<=")) {
             op = CompareFilter.CompareOp.LESS_OR_EQUAL;
             splitVal = "<=";
        } else if (filterVal.contains(">")) {
             op = CompareFilter.CompareOp.GREATER;
             splitVal = ">";
        } else if (filterVal.contains("<")) {
             op = CompareFilter.CompareOp.LESS;
             splitVal = "<";
        }
        String[] filter = filterVal.split(splitVal);
        String[] filterCol = filter[0].split(":");
        scan.setFilter(new SingleColumnValueFilter(filterCol[0].getBytes(), filterCol[1].getBytes(), op, filter[1].getBytes()));
    }
    ResultScanner resultScanner = table.getScanner(scan);
    for (Result result : resultScanner) {
        for (Cell cell : result.listCells()) {
            String row = new String(CellUtil.cloneRow(cell));
            String family = new String(CellUtil.cloneFamily(cell));
            String column = new String(CellUtil.cloneQualifier(cell));
            String value = new String(CellUtil.cloneValue(cell));
            long timestamp = cell.getTimestamp();
            System.out.printf("%-20s column=%s:%s, timestamp=%s, value=%s\n", row, family, column, timestamp, value);
        }
    }
}
 
Example 15
Source File: HStore.java    From hbase with Apache License 2.0 4 votes vote down vote up
/**
 * This throws a WrongRegionException if the HFile does not fit in this region, or an
 * InvalidHFileException if the HFile is not valid.
 */
public void assertBulkLoadHFileOk(Path srcPath) throws IOException {
  HFile.Reader reader  = null;
  try {
    LOG.info("Validating hfile at " + srcPath + " for inclusion in " + this);
    FileSystem srcFs = srcPath.getFileSystem(conf);
    srcFs.access(srcPath, FsAction.READ_WRITE);
    reader = HFile.createReader(srcFs, srcPath, cacheConf, isPrimaryReplicaStore(), conf);

    Optional<byte[]> firstKey = reader.getFirstRowKey();
    Preconditions.checkState(firstKey.isPresent(), "First key can not be null");
    Optional<Cell> lk = reader.getLastKey();
    Preconditions.checkState(lk.isPresent(), "Last key can not be null");
    byte[] lastKey =  CellUtil.cloneRow(lk.get());

    if (LOG.isDebugEnabled()) {
      LOG.debug("HFile bounds: first=" + Bytes.toStringBinary(firstKey.get()) +
          " last=" + Bytes.toStringBinary(lastKey));
      LOG.debug("Region bounds: first=" +
          Bytes.toStringBinary(getRegionInfo().getStartKey()) +
          " last=" + Bytes.toStringBinary(getRegionInfo().getEndKey()));
    }

    if (!this.getRegionInfo().containsRange(firstKey.get(), lastKey)) {
      throw new WrongRegionException(
          "Bulk load file " + srcPath.toString() + " does not fit inside region "
          + this.getRegionInfo().getRegionNameAsString());
    }

    if(reader.length() > conf.getLong(HConstants.HREGION_MAX_FILESIZE,
        HConstants.DEFAULT_MAX_FILE_SIZE)) {
      LOG.warn("Trying to bulk load hfile " + srcPath + " with size: " +
          reader.length() + " bytes can be problematic as it may lead to oversplitting.");
    }

    if (verifyBulkLoads) {
      long verificationStartTime = EnvironmentEdgeManager.currentTime();
      LOG.info("Full verification started for bulk load hfile: {}", srcPath);
      Cell prevCell = null;
      HFileScanner scanner = reader.getScanner(false, false, false);
      scanner.seekTo();
      do {
        Cell cell = scanner.getCell();
        if (prevCell != null) {
          if (comparator.compareRows(prevCell, cell) > 0) {
            throw new InvalidHFileException("Previous row is greater than"
                + " current row: path=" + srcPath + " previous="
                + CellUtil.getCellKeyAsString(prevCell) + " current="
                + CellUtil.getCellKeyAsString(cell));
          }
          if (CellComparator.getInstance().compareFamilies(prevCell, cell) != 0) {
            throw new InvalidHFileException("Previous key had different"
                + " family compared to current key: path=" + srcPath
                + " previous="
                + Bytes.toStringBinary(prevCell.getFamilyArray(), prevCell.getFamilyOffset(),
                    prevCell.getFamilyLength())
                + " current="
                + Bytes.toStringBinary(cell.getFamilyArray(), cell.getFamilyOffset(),
                    cell.getFamilyLength()));
          }
        }
        prevCell = cell;
      } while (scanner.next());
      LOG.info("Full verification complete for bulk load hfile: " + srcPath.toString() +
        " took " + (EnvironmentEdgeManager.currentTime() - verificationStartTime) + " ms");
    }
  } finally {
    if (reader != null) {
      reader.close();
    }
  }
}
 
Example 16
Source File: PermissionStorage.java    From hbase with Apache License 2.0 4 votes vote down vote up
/**
 * Loads all of the permission grants stored in a region of the {@code _acl_}
 * table.
 *
 * @param aclRegion the acl region
 * @return a map of the permissions for this table.
 * @throws IOException if an error occurs
 */
static Map<byte[], ListMultimap<String, UserPermission>> loadAll(Region aclRegion)
    throws IOException {
  if (!isAclRegion(aclRegion)) {
    throw new IOException("Can only load permissions from "+ACL_TABLE_NAME);
  }

  Map<byte[], ListMultimap<String, UserPermission>> allPerms =
    new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR);

  // do a full scan of _acl_ table

  Scan scan = new Scan();
  scan.addFamily(ACL_LIST_FAMILY);

  InternalScanner iScanner = null;
  try {
    iScanner = aclRegion.getScanner(scan);

    while (true) {
      List<Cell> row = new ArrayList<>();

      boolean hasNext = iScanner.next(row);
      ListMultimap<String, UserPermission> perms = ArrayListMultimap.create();
      byte[] entry = null;
      for (Cell kv : row) {
        if (entry == null) {
          entry = CellUtil.cloneRow(kv);
        }
        Pair<String, Permission> permissionsOfUserOnTable =
            parsePermissionRecord(entry, kv, null, null, false, null);
        if (permissionsOfUserOnTable != null) {
          String username = permissionsOfUserOnTable.getFirst();
          Permission permission = permissionsOfUserOnTable.getSecond();
          perms.put(username, new UserPermission(username, permission));
        }
      }
      if (entry != null) {
        allPerms.put(entry, perms);
      }
      if (!hasNext) {
        break;
      }
    }
  } finally {
    if (iScanner != null) {
      iScanner.close();
    }
  }

  return allPerms;
}
 
Example 17
Source File: WriteHeavyIncrementObserver.java    From hbase with Apache License 2.0 4 votes vote down vote up
private InternalScanner wrap(byte[] family, InternalScanner scanner) {
  return new InternalScanner() {

    private List<Cell> srcResult = new ArrayList<>();

    private byte[] row;

    private byte[] qualifier;

    private long timestamp;

    private long sum;

    @Override
    public boolean next(List<Cell> result, ScannerContext scannerContext) throws IOException {
      boolean moreRows = scanner.next(srcResult, scannerContext);
      if (srcResult.isEmpty()) {
        if (!moreRows && row != null) {
          result.add(createCell(row, family, qualifier, timestamp, sum));
        }
        return moreRows;
      }
      Cell firstCell = srcResult.get(0);
      // Check if there is a row change first. All the cells will come from the same row so just
      // check the first one once is enough.
      if (row == null) {
        row = CellUtil.cloneRow(firstCell);
        qualifier = CellUtil.cloneQualifier(firstCell);
      } else if (!CellUtil.matchingRows(firstCell, row)) {
        result.add(createCell(row, family, qualifier, timestamp, sum));
        row = CellUtil.cloneRow(firstCell);
        qualifier = CellUtil.cloneQualifier(firstCell);
        sum = 0;
      }
      srcResult.forEach(c -> {
        if (CellUtil.matchingQualifier(c, qualifier)) {
          sum += Bytes.toLong(c.getValueArray(), c.getValueOffset());
        } else {
          result.add(createCell(row, family, qualifier, timestamp, sum));
          qualifier = CellUtil.cloneQualifier(c);
          sum = Bytes.toLong(c.getValueArray(), c.getValueOffset());
        }
        timestamp = c.getTimestamp();
      });
      if (!moreRows) {
        result.add(createCell(row, family, qualifier, timestamp, sum));
      }
      srcResult.clear();
      return moreRows;
    }

    @Override
    public void close() throws IOException {
      scanner.close();
    }
  };
}
 
Example 18
Source File: TestCompoundBloomFilter.java    From hbase with Apache License 2.0 4 votes vote down vote up
private void readStoreFile(int t, BloomType bt, List<KeyValue> kvs,
    Path sfPath) throws IOException {
  HStoreFile sf = new HStoreFile(fs, sfPath, conf, cacheConf, bt, true);
  sf.initReader();
  StoreFileReader r = sf.getReader();
  final boolean pread = true; // does not really matter
  StoreFileScanner scanner = r.getStoreFileScanner(true, pread, false, 0, 0, false);

  {
    // Test for false negatives (not allowed).
    int numChecked = 0;
    for (KeyValue kv : kvs) {
      byte[] row = CellUtil.cloneRow(kv);
      boolean present = isInBloom(scanner, row, CellUtil.cloneQualifier(kv));
      assertTrue(testIdMsg + " Bloom filter false negative on row "
          + Bytes.toStringBinary(row) + " after " + numChecked
          + " successful checks", present);
      ++numChecked;
    }
  }

  // Test for false positives (some percentage allowed). We test in two modes:
  // "fake lookup" which ignores the key distribution, and production mode.
  for (boolean fakeLookupEnabled : new boolean[] { true, false }) {
    if (fakeLookupEnabled) {
      BloomFilterUtil.setRandomGeneratorForTest(new Random(283742987L));
    }
    try {
      String fakeLookupModeStr = ", fake lookup is " + (fakeLookupEnabled ?
          "enabled" : "disabled");
      CompoundBloomFilter cbf = (CompoundBloomFilter) r.getGeneralBloomFilter();
      cbf.enableTestingStats();
      int numFalsePos = 0;
      Random rand = new Random(EVALUATION_SEED);
      int nTrials = NUM_KV[t] * 10;
      for (int i = 0; i < nTrials; ++i) {
        byte[] query = RandomKeyValueUtil.randomRowOrQualifier(rand);
        if (isInBloom(scanner, query, bt, rand)) {
          numFalsePos += 1;
        }
      }
      double falsePosRate = numFalsePos * 1.0 / nTrials;
      LOG.debug(String.format(testIdMsg
          + " False positives: %d out of %d (%f)",
          numFalsePos, nTrials, falsePosRate) + fakeLookupModeStr);

      // Check for obvious Bloom filter crashes.
      assertTrue("False positive is too high: " + falsePosRate + " (greater "
          + "than " + TOO_HIGH_ERROR_RATE + ")" + fakeLookupModeStr,
          falsePosRate < TOO_HIGH_ERROR_RATE);

      // Now a more precise check to see if the false positive rate is not
      // too high. The reason we use a relaxed restriction for the real-world
      // case as opposed to the "fake lookup" case is that our hash functions
      // are not completely independent.

      double maxZValue = fakeLookupEnabled ? 1.96 : 2.5;
      validateFalsePosRate(falsePosRate, nTrials, maxZValue, cbf,
          fakeLookupModeStr);

      // For checking the lower bound we need to eliminate the last chunk,
      // because it is frequently smaller and the false positive rate in it
      // is too low. This does not help if there is only one under-sized
      // chunk, though.
      int nChunks = cbf.getNumChunks();
      if (nChunks > 1) {
        numFalsePos -= cbf.getNumPositivesForTesting(nChunks - 1);
        nTrials -= cbf.getNumQueriesForTesting(nChunks - 1);
        falsePosRate = numFalsePos * 1.0 / nTrials;
        LOG.info(testIdMsg + " False positive rate without last chunk is " +
            falsePosRate + fakeLookupModeStr);
      }

      validateFalsePosRate(falsePosRate, nTrials, -2.58, cbf,
          fakeLookupModeStr);
    } finally {
      BloomFilterUtil.setRandomGeneratorForTest(null);
    }
  }

  r.close(true); // end of test so evictOnClose
}
 
Example 19
Source File: TestHFileBlock.java    From hbase with Apache License 2.0 4 votes vote down vote up
static int writeTestKeyValues(HFileBlock.Writer hbw, int seed, boolean includesMemstoreTS,
    boolean useTag) throws IOException {
  List<KeyValue> keyValues = new ArrayList<>();
  Random randomizer = new Random(42L + seed); // just any fixed number

  // generate keyValues
  for (int i = 0; i < NUM_KEYVALUES; ++i) {
    byte[] row;
    long timestamp;
    byte[] family;
    byte[] qualifier;
    byte[] value;

    // generate it or repeat, it should compress well
    if (0 < i && randomizer.nextFloat() < CHANCE_TO_REPEAT) {
      row = CellUtil.cloneRow(keyValues.get(randomizer.nextInt(keyValues.size())));
    } else {
      row = new byte[FIELD_LENGTH];
      randomizer.nextBytes(row);
    }
    if (0 == i) {
      family = new byte[FIELD_LENGTH];
      randomizer.nextBytes(family);
    } else {
      family = CellUtil.cloneFamily(keyValues.get(0));
    }
    if (0 < i && randomizer.nextFloat() < CHANCE_TO_REPEAT) {
      qualifier = CellUtil.cloneQualifier(keyValues.get(randomizer.nextInt(keyValues.size())));
    } else {
      qualifier = new byte[FIELD_LENGTH];
      randomizer.nextBytes(qualifier);
    }
    if (0 < i && randomizer.nextFloat() < CHANCE_TO_REPEAT) {
      value = CellUtil.cloneValue(keyValues.get(randomizer.nextInt(keyValues.size())));
    } else {
      value = new byte[FIELD_LENGTH];
      randomizer.nextBytes(value);
    }
    if (0 < i && randomizer.nextFloat() < CHANCE_TO_REPEAT) {
      timestamp = keyValues.get(
          randomizer.nextInt(keyValues.size())).getTimestamp();
    } else {
      timestamp = randomizer.nextLong();
    }
    if (!useTag) {
      keyValues.add(new KeyValue(row, family, qualifier, timestamp, value));
    } else {
      keyValues.add(new KeyValue(row, family, qualifier, timestamp, value,
          new Tag[] { new ArrayBackedTag((byte) 1, Bytes.toBytes("myTagVal")) }));
    }
  }

  // sort it and write to stream
  int totalSize = 0;
  Collections.sort(keyValues, CellComparatorImpl.COMPARATOR);

  for (KeyValue kv : keyValues) {
    totalSize += kv.getLength();
    if (includesMemstoreTS) {
      long memstoreTS = randomizer.nextLong();
      kv.setSequenceId(memstoreTS);
      totalSize += WritableUtils.getVIntSize(memstoreTS);
    }
    hbw.write(kv);
  }
  return totalSize;
}
 
Example 20
Source File: TestMutationGetCellBuilder.java    From hbase with Apache License 2.0 4 votes vote down vote up
@Test
public void testMutationGetCellBuilder() throws Exception {
  final TableName tableName = TableName.valueOf(name.getMethodName());
  final byte[] rowKey = Bytes.toBytes("12345678");
  final byte[] uselessRowKey = Bytes.toBytes("123");
  final byte[] family = Bytes.toBytes("cf");
  final byte[] qualifier = Bytes.toBytes("foo");
  final long now = System.currentTimeMillis();
  try (Table table = TEST_UTIL.createTable(tableName, family)) {
    TEST_UTIL.waitTableAvailable(tableName.getName(), 5000);
    // put one row
    Put put = new Put(rowKey);
    CellBuilder cellBuilder = put.getCellBuilder().setQualifier(qualifier)
            .setFamily(family).setValue(Bytes.toBytes("bar")).setTimestamp(now);
    //setRow is useless
    cellBuilder.setRow(uselessRowKey);
    put.add(cellBuilder.build());
    byte[] cloneRow = CellUtil.cloneRow(cellBuilder.build());
    assertTrue("setRow must be useless", !Arrays.equals(cloneRow, uselessRowKey));
    table.put(put);

    // get the row back and assert the values
    Get get = new Get(rowKey);
    get.setTimestamp(now);
    Result result = table.get(get);
    assertTrue("row key must be same", Arrays.equals(result.getRow(), rowKey));
    assertTrue("Column foo value should be bar",
        Bytes.toString(result.getValue(family, qualifier)).equals("bar"));

    //Delete that row
    Delete delete = new Delete(rowKey);
    cellBuilder = delete.getCellBuilder().setQualifier(qualifier)
            .setFamily(family);
    //if this row has been deleted,then can check setType is useless.
    cellBuilder.setType(Cell.Type.Put);
    delete.add(cellBuilder.build());
    table.delete(delete);

    //check this row whether exist
    get = new Get(rowKey);
    get.setTimestamp(now);
    result = table.get(get);
    assertTrue("Column foo should not exist",
            result.getValue(family, qualifier) == null);
  }
}