Java Code Examples for org.apache.hadoop.hbase.util.Pair#getFirst()

The following examples show how to use org.apache.hadoop.hbase.util.Pair#getFirst() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DefaultVisibilityLabelServiceImpl.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Override
public List<String> listLabels(String regex) throws IOException {
  assert (labelsRegion != null);
  Pair<Map<String, Integer>, Map<String, List<Integer>>> labelsAndUserAuths =
      extractLabelsAndAuths(getExistingLabelsWithAuths());
  Map<String, Integer> labels = labelsAndUserAuths.getFirst();
  labels.remove(SYSTEM_LABEL);
  if (regex != null) {
    Pattern pattern = Pattern.compile(regex);
    ArrayList<String> matchedLabels = new ArrayList<>();
    for (String label : labels.keySet()) {
      if (pattern.matcher(label).matches()) {
        matchedLabels.add(label);
      }
    }
    return matchedLabels;
  }
  return new ArrayList<>(labels.keySet());
}
 
Example 2
Source File: TestHbckChore.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Test
public void testForMeta() {
  byte[] metaRegionNameAsBytes = RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionName();
  String metaRegionName = RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionNameAsString();
  List<ServerName> serverNames = master.getServerManager().getOnlineServersList();
  assertEquals(NSERVERS, serverNames.size());

  hbckChore.choreForTesting();
  Map<String, Pair<ServerName, List<ServerName>>> inconsistentRegions =
      hbckChore.getInconsistentRegions();

  // Test for case1: Master thought this region opened, but no regionserver reported it.
  assertTrue(inconsistentRegions.containsKey(metaRegionName));
  Pair<ServerName, List<ServerName>> pair = inconsistentRegions.get(metaRegionName);
  ServerName locationInMeta = pair.getFirst();
  List<ServerName> reportedRegionServers = pair.getSecond();
  assertTrue(serverNames.contains(locationInMeta));
  assertEquals(0, reportedRegionServers.size());

  // Reported right region location. Then not in problematic regions.
  am.reportOnlineRegions(locationInMeta, Collections.singleton(metaRegionNameAsBytes));
  hbckChore.choreForTesting();
  inconsistentRegions = hbckChore.getInconsistentRegions();
  assertFalse(inconsistentRegions.containsKey(metaRegionName));
}
 
Example 3
Source File: IndexScrutinyMapper.java    From phoenix with Apache License 2.0 6 votes vote down vote up
protected void checkIfInvalidRowsExpired(Context context,
        Map<String, Pair<Long,
        List<Object>>> targetPkToSourceValues) {
    Set<Map.Entry<String, Pair<Long, List<Object>>>>
            entrySet = targetPkToSourceValues.entrySet();

    Iterator<Map.Entry<String, Pair<Long, List<Object>>>> itr = entrySet.iterator();

    // iterate and remove items simultaneously
    while(itr.hasNext()) {
        Map.Entry<String, Pair<Long, List<Object>>> entry = itr.next();
        Pair<Long, List<Object>> sourceValues = entry.getValue();
        Long sourceTS = sourceValues.getFirst();
        if (hasRowExpiredOnSource(sourceTS, ttl)) {
            context.getCounter(PhoenixScrutinyJobCounters.EXPIRED_ROW_COUNT).increment(1);
            itr.remove();
        }
    }
}
 
Example 4
Source File: ExpressionUtil.java    From phoenix with Apache License 2.0 6 votes vote down vote up
/**
 * Infer OrderBys from the rowkey columns of {@link PTable}.
 * The second part of the return pair is the rowkey column offset we must skip when we create OrderBys, because for table with salted/multiTenant/viewIndexId,
 * some leading rowkey columns should be skipped.
 * @param table
 * @param phoenixConnection
 * @param orderByReverse
 * @return
 */
public static Pair<OrderBy,Integer> getOrderByFromTableByRowKeyColumn(
        PTable table,
        PhoenixConnection phoenixConnection,
        boolean orderByReverse) {
    Pair<List<RowKeyColumnExpression>,Integer> rowKeyColumnExpressionsAndRowKeyColumnOffset =
            ExpressionUtil.getRowKeyColumnExpressionsFromTable(table, phoenixConnection);
    List<RowKeyColumnExpression> rowKeyColumnExpressions = rowKeyColumnExpressionsAndRowKeyColumnOffset.getFirst();
    int rowKeyColumnOffset = rowKeyColumnExpressionsAndRowKeyColumnOffset.getSecond();
    if(rowKeyColumnExpressions.isEmpty()) {
        return new Pair<OrderBy,Integer>(OrderBy.EMPTY_ORDER_BY,0);
    }
    return new Pair<OrderBy,Integer>(
            convertRowKeyColumnExpressionsToOrderBy(rowKeyColumnExpressions, orderByReverse),
            rowKeyColumnOffset);
}
 
Example 5
Source File: ZKReplicationQueueStorage.java    From hbase with Apache License 2.0 6 votes vote down vote up
private void addLastSeqIdsToOps(String queueId, Map<String, Long> lastSeqIds,
    List<ZKUtilOp> listOfOps) throws KeeperException, ReplicationException {
  String peerId = new ReplicationQueueInfo(queueId).getPeerId();
  for (Entry<String, Long> lastSeqEntry : lastSeqIds.entrySet()) {
    String path = getSerialReplicationRegionPeerNode(lastSeqEntry.getKey(), peerId);
    Pair<Long, Integer> p = getLastSequenceIdWithVersion(lastSeqEntry.getKey(), peerId);
    byte[] data = ZKUtil.positionToByteArray(lastSeqEntry.getValue());
    if (p.getSecond() < 0) { // ZNode does not exist.
      ZKUtil.createWithParents(zookeeper,
        path.substring(0, path.lastIndexOf(ZNodePaths.ZNODE_PATH_SEPARATOR)));
      listOfOps.add(ZKUtilOp.createAndFailSilent(path, data));
      continue;
    }
    // Perform CAS in a specific version v0 (HBASE-20138)
    int v0 = p.getSecond();
    long lastPushedSeqId = p.getFirst();
    if (lastSeqEntry.getValue() <= lastPushedSeqId) {
      continue;
    }
    listOfOps.add(ZKUtilOp.setData(path, data, v0));
  }
}
 
Example 6
Source File: HRegionServer.java    From hbase with Apache License 2.0 6 votes vote down vote up
protected void initializeMemStoreChunkCreator() {
  if (MemStoreLAB.isEnabled(conf)) {
    // MSLAB is enabled. So initialize MemStoreChunkPool
    // By this time, the MemstoreFlusher is already initialized. We can get the global limits from
    // it.
    Pair<Long, MemoryType> pair = MemorySizeUtil.getGlobalMemStoreSize(conf);
    long globalMemStoreSize = pair.getFirst();
    boolean offheap = this.regionServerAccounting.isOffheap();
    // When off heap memstore in use, take full area for chunk pool.
    float poolSizePercentage = offheap? 1.0F:
        conf.getFloat(MemStoreLAB.CHUNK_POOL_MAXSIZE_KEY, MemStoreLAB.POOL_MAX_SIZE_DEFAULT);
    float initialCountPercentage = conf.getFloat(MemStoreLAB.CHUNK_POOL_INITIALSIZE_KEY,
        MemStoreLAB.POOL_INITIAL_SIZE_DEFAULT);
    int chunkSize = conf.getInt(MemStoreLAB.CHUNK_SIZE_KEY, MemStoreLAB.CHUNK_SIZE_DEFAULT);
    // init the chunkCreator
    ChunkCreator.initialize(chunkSize, offheap, globalMemStoreSize, poolSizePercentage,
      initialCountPercentage, this.hMemManager);
  }
}
 
Example 7
Source File: PhoenixIndexCodec.java    From phoenix with Apache License 2.0 6 votes vote down vote up
@Override
public Iterable<IndexUpdate> getIndexUpserts(TableState state, IndexMetaData context, byte[] regionStartKey, byte[] regionEndKey) throws IOException {
    PhoenixIndexMetaData metaData = (PhoenixIndexMetaData)context;
    List<IndexMaintainer> indexMaintainers = metaData.getIndexMaintainers();
    if (indexMaintainers.get(0).isRowDeleted(state.getPendingUpdate())) {
        return Collections.emptyList();
    }
    ImmutableBytesWritable ptr = new ImmutableBytesWritable();
    ptr.set(state.getCurrentRowKey());
    List<IndexUpdate> indexUpdates = Lists.newArrayList();
    for (IndexMaintainer maintainer : indexMaintainers) {
        Pair<ValueGetter, IndexUpdate> statePair = state.getIndexUpdateState(maintainer.getAllColumns(), metaData.getReplayWrite() != null, false, context);
        ValueGetter valueGetter = statePair.getFirst();
        IndexUpdate indexUpdate = statePair.getSecond();
        indexUpdate.setTable(maintainer.isLocalIndex() ? tableName : maintainer.getIndexTableName());
        Put put = maintainer.buildUpdateMutation(KV_BUILDER, valueGetter, ptr, state.getCurrentTimestamp(),
                regionStartKey, regionEndKey);
        indexUpdate.setUpdate(put);
        indexUpdates.add(indexUpdate);
    }
    return indexUpdates;
}
 
Example 8
Source File: CoveredColumnIndexCodec.java    From phoenix with Apache License 2.0 6 votes vote down vote up
/**
 * Get all the deletes necessary for a group of columns - logically, the cleanup the index table for a given index.
 * 
 * @param group
 *            index information
 * @return the cleanup for the given index, or <tt>null</tt> if no cleanup is necessary
 */
private IndexUpdate getDeleteForGroup(ColumnGroup group, TableState state, IndexMetaData indexMetaData) {
    List<CoveredColumn> refs = group.getColumns();
    try {
        Pair<CoveredDeleteScanner, IndexUpdate> kvs = ((LocalTableState)state).getIndexedColumnsTableState(refs, false, false, indexMetaData);
        Pair<Integer, List<ColumnEntry>> columns = getNextEntries(refs, kvs.getFirst(), state.getCurrentRowKey());
        // make sure we close the scanner reference
        kvs.getFirst().close();
        // no change, just return the passed update
        if (columns.getFirst() == 0) { return kvs.getSecond(); }
        // have all the column entries, so just turn it into a Delete for the row
        // convert the entries to the needed values
        byte[] rowKey = composeRowKey(state.getCurrentRowKey(), columns.getFirst(), columns.getSecond());
        Delete d = new Delete(rowKey);
        d.setTimestamp(state.getCurrentTimestamp());
        IndexUpdate update = kvs.getSecond();
        update.setUpdate(d);
        update.setTable(Bytes.toBytes(group.getTable()));
        return update;
    } catch (IOException e) {
        throw new RuntimeException("Unexpected exception when getting state for columns: " + refs);
    }
}
 
Example 9
Source File: PhoenixIndexCodec.java    From phoenix with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
@Override
public Iterable<IndexUpdate> getIndexUpserts(TableState state) throws IOException {
    List<IndexMaintainer> indexMaintainers = getIndexMaintainers(state.getUpdateAttributes());
    if (indexMaintainers.isEmpty()) {
        return Collections.emptyList();
    }
    ImmutableBytesWritable ptr = new ImmutableBytesWritable();
    List<IndexUpdate> indexUpdates = Lists.newArrayList();
    // TODO: state.getCurrentRowKey() should take an ImmutableBytesWritable arg to prevent byte copy
    byte[] dataRowKey = state.getCurrentRowKey();
    for (IndexMaintainer maintainer : indexMaintainers) {
        // Short-circuit building state when we know it's a row deletion
        if (maintainer.isRowDeleted(state.getPendingUpdate())) {
            continue;
        }

        // Get a scanner over the columns this maintainer would like to look at
        // Any updates that we would make for those columns are then added to the index update
        Pair<Scanner,IndexUpdate> statePair = state.getIndexedColumnsTableState(maintainer.getAllColumns());
        IndexUpdate indexUpdate = statePair.getSecond();
        Scanner scanner = statePair.getFirst();

        // get the values from the scanner so we can actually use them
        ValueGetter valueGetter = IndexManagementUtil.createGetterFromScanner(scanner, dataRowKey);
        ptr.set(dataRowKey);
        Put put = maintainer.buildUpdateMutation(valueGetter, ptr, state.getCurrentTimestamp());
        indexUpdate.setTable(maintainer.getIndexTableName());
        indexUpdate.setUpdate(put);
        //make sure we close the scanner when we are done
        scanner.close();
        indexUpdates.add(indexUpdate);
    }
    return indexUpdates;
}
 
Example 10
Source File: AsyncFSWALProvider.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Override
protected void doInit(Configuration conf) throws IOException {
  Pair<EventLoopGroup, Class<? extends Channel>> eventLoopGroupAndChannelClass =
    NettyAsyncFSWALConfigHelper.getEventLoopConfig(conf);
  eventLoopGroup = eventLoopGroupAndChannelClass.getFirst();
  channelClass = eventLoopGroupAndChannelClass.getSecond();
}
 
Example 11
Source File: SyncReplicationWALProvider.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Override
public void init(WALFactory factory, Configuration conf, String providerId) throws IOException {
  if (!initialized.compareAndSet(false, true)) {
    throw new IllegalStateException("WALProvider.init should only be called once.");
  }
  provider.init(factory, conf, providerId);
  this.conf = conf;
  this.factory = factory;
  Pair<EventLoopGroup, Class<? extends Channel>> eventLoopGroupAndChannelClass =
    NettyAsyncFSWALConfigHelper.getEventLoopConfig(conf);
  eventLoopGroup = eventLoopGroupAndChannelClass.getFirst();
  channelClass = eventLoopGroupAndChannelClass.getSecond();
}
 
Example 12
Source File: SpliceReplicationSinkChore.java    From spliceengine with GNU Affero General Public License v3.0 4 votes vote down vote up
/**
 * Try to update replication progress
 * @throws IOException
 * @throws KeeperException
 * @throws InterruptedException
 */
private void updateProgress() throws IOException, KeeperException, InterruptedException {
    Table snapshotTable = connection.getTable(masterSnapshotTable);
    Scan scan = new Scan();
    try (ResultScanner scanner = snapshotTable.getScanner(scan)) {
        for (Result r : scanner) {
            byte[] rowKey = r.getRow();
            long timestamp = new Long(new String(rowKey));
            //if (LOG.isDebugEnabled()) {
                SpliceLogUtils.info(LOG, "Checking snapshot taken at %d", timestamp);
            //}
            CellScanner s = r.cellScanner();
            long ts = -1;
            while (s.advance()) {
                Cell cell = s.current();
                byte[] colName = CellUtil.cloneQualifier(cell);
                if(Arrays.equals(colName, HBaseConfiguration.REPLICATION_SNAPSHOT_TSCOL_BYTES)){
                    ts = Bytes.toLong(CellUtil.cloneValue(cell));
                    //if (LOG.isDebugEnabled()) {
                    SpliceLogUtils.info(LOG, "Process snapshot take at %s", new DateTime(ts).toString());
                    //}
                }
                else {
                    String walName = Bytes.toString(colName);
                    int index = walName.lastIndexOf(".");
                    String walGroup = walName.substring(0, index);
                    long logNum = new Long(walName.substring(index + 1));
                    long position = Bytes.toLong(CellUtil.cloneValue(cell));
                    if (replicationProgress.containsKey(walGroup)) {
                        Pair<Long, Long> pair = replicationProgress.get(walGroup);
                        long appliedLogNum = pair.getFirst();
                        long appliedPosition = pair.getSecond();
                        //if (LOG.isDebugEnabled()) {
                        SpliceLogUtils.info(LOG,
                                "WAL=%s, snapshot=%d, logNum=%d, progress=%d", walName, position,
                                appliedLogNum, appliedPosition);
                        //}
                        if (appliedLogNum < logNum){
                            // it is still replicating older wals, cannot move timestamp forward
                            return;
                        }
                        else if (logNum == appliedLogNum) {
                            if (appliedPosition < position) {
                                // applied wal position is behind snapshot wal position,cannot move timestamp forward
                                return;
                            }
                        }
                    }
                }
            }
            Delete d = new Delete(rowKey);
            // We have replicated beyond this snapshot, delete it and bump up timestamp
            snapshotTable.delete(d);
            //if (LOG.isDebugEnabled()) {
                SpliceLogUtils.info(LOG, "Deleted snapshot %d.", timestamp);
            //}
            ReplicationUtils.setTimestamp(timestamp);

            updateZkProgress(ts);
        }
    }finally {
        replicationProgress.clear();
    }
}
 
Example 13
Source File: EquiDepthStreamHistogram.java    From phoenix with Apache License 2.0 4 votes vote down vote up
@VisibleForTesting
boolean mergeBars() {
    Preconditions.checkState(bars.size() > 1, "Need at least two bars to merge");
    // pairwise search for the two bars with the smallest summed count
    int currIdx = 0;
    Bar currBar = bars.get(currIdx);
    Bar nextBar = bars.get(currIdx + 1);
    long currMinSum = Long.MAX_VALUE;
    int currMinIdx = currIdx; // keep this for fast removal from ArrayList later
    Pair<Bar, Bar> minBars = new Pair<>(currBar, nextBar);
    while (nextBar != null) {
        long sum = currBar.getSize() + nextBar.getSize();
        if (sum < currMinSum) {
            currMinSum = sum;
            minBars = new Pair<>(currBar, nextBar);
            currMinIdx = currIdx;
        }
        currBar = nextBar;
        nextBar = ++currIdx < bars.size() - 1 ? bars.get(currIdx+1) : null;
    }
    // don't want to merge bars into one that will just need an immediate split again
    if (currMinSum >= getMaxBarSize()) {
        return false;
    }
    // do the merge
    Bar leftBar = minBars.getFirst();
    Bar rightBar = minBars.getSecond();
    Bar newBar = new Bar(leftBar.getLeftBoundInclusive(), rightBar.getRightBoundExclusive());
    if (leftBar.getSize() >= rightBar.getSize()) {
        newBar.incrementCount(rightBar.getCount()); // count of rightBar without its blocked bars
        // this just adds the leftBar without its blocked bars, as we don't want nested blocked bars
        // the leftBar's blocked bars are added later below
        newBar.addBlockedBar(new Bar(leftBar));
    } else {
        newBar.incrementCount(leftBar.getCount());
        newBar.addBlockedBar(new Bar(rightBar));
    }
    newBar.addBlockedBars(leftBar.getBlockedBars());
    newBar.addBlockedBars(rightBar.getBlockedBars());
    bars.subList(currMinIdx, currMinIdx + 2).clear(); // remove minBars
    bars.add(newBar);
    Collections.sort(bars);
    if (LOGGER.isTraceEnabled()) {
        LOGGER.trace(String.format("Merged left=%s , right=%s , newBar=%s", leftBar, rightBar, newBar));
    }
    return true;
}
 
Example 14
Source File: TestWALSplitToHFile.java    From hbase with Apache License 2.0 4 votes vote down vote up
/**
 * Test writing edits into an HRegion, closing it, splitting logs, opening
 * Region again.  Verify seqids.
 */
@Test
public void testWrittenViaHRegion()
    throws IOException, SecurityException, IllegalArgumentException, InterruptedException {
  Pair<TableDescriptor, RegionInfo> pair = setupTableAndRegion();
  TableDescriptor td = pair.getFirst();
  RegionInfo ri = pair.getSecond();

  // Write countPerFamily edits into the three families.  Do a flush on one
  // of the families during the load of edits so its seqid is not same as
  // others to test we do right thing when different seqids.
  WAL wal = createWAL(this.conf, rootDir, logName);
  HRegion region = HRegion.openHRegion(this.conf, this.fs, rootDir, ri, td, wal);
  long seqid = region.getOpenSeqNum();
  boolean first = true;
  for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) {
    addRegionEdits(ROW, cfd.getName(), countPerFamily, this.ee, region, "x");
    if (first) {
      // If first, so we have at least one family w/ different seqid to rest.
      region.flush(true);
      first = false;
    }
  }
  // Now assert edits made it in.
  final Get g = new Get(ROW);
  Result result = region.get(g);
  assertEquals(countPerFamily * td.getColumnFamilies().length, result.size());
  // Now close the region (without flush), split the log, reopen the region and assert that
  // replay of log has the correct effect, that our seqids are calculated correctly so
  // all edits in logs are seen as 'stale'/old.
  region.close(true);
  wal.shutdown();
  try {
    WALSplitter.split(rootDir, logDir, oldLogDir, FileSystem.get(this.conf), this.conf, wals);
  } catch (Exception e) {
    LOG.debug("Got exception", e);
  }

  WAL wal2 = createWAL(this.conf, rootDir, logName);
  HRegion region2 = HRegion.openHRegion(conf, this.fs, rootDir, ri, td, wal2);
  long seqid2 = region2.getOpenSeqNum();
  assertTrue(seqid + result.size() < seqid2);
  final Result result1b = region2.get(g);
  assertEquals(result.size(), result1b.size());

  // Next test.  Add more edits, then 'crash' this region by stealing its wal
  // out from under it and assert that replay of the log adds the edits back
  // correctly when region is opened again.
  for (ColumnFamilyDescriptor hcd : td.getColumnFamilies()) {
    addRegionEdits(ROW, hcd.getName(), countPerFamily, this.ee, region2, "y");
  }
  // Get count of edits.
  final Result result2 = region2.get(g);
  assertEquals(2 * result.size(), result2.size());
  wal2.sync();
  final Configuration newConf = HBaseConfiguration.create(this.conf);
  User user = HBaseTestingUtility.getDifferentUser(newConf, td.getTableName().getNameAsString());
  user.runAs(new PrivilegedExceptionAction<Object>() {
    @Override
    public Object run() throws Exception {
      WALSplitter.split(rootDir, logDir, oldLogDir, FileSystem.get(conf), conf, wals);
      FileSystem newFS = FileSystem.get(newConf);
      // Make a new wal for new region open.
      WAL wal3 = createWAL(newConf, rootDir, logName);
      Path tableDir = CommonFSUtils.getTableDir(rootDir, td.getTableName());
      HRegion region3 = new HRegion(tableDir, wal3, newFS, newConf, ri, td, null);
      long seqid3 = region3.initialize();
      Result result3 = region3.get(g);
      // Assert that count of cells is same as before crash.
      assertEquals(result2.size(), result3.size());

      // I can't close wal1.  Its been appropriated when we split.
      region3.close();
      wal3.close();
      return null;
    }
  });
}
 
Example 15
Source File: TestReversibleScanners.java    From hbase with Apache License 2.0 4 votes vote down vote up
@Test
public void testReversibleKeyValueHeap() throws IOException {
  // write data to one memstore and two store files
  FileSystem fs = TEST_UTIL.getTestFileSystem();
  Path hfilePath = new Path(new Path(
      TEST_UTIL.getDataTestDir("testReversibleKeyValueHeap"), "regionname"),
      "familyname");
  CacheConfig cacheConf = new CacheConfig(TEST_UTIL.getConfiguration());
  HFileContextBuilder hcBuilder = new HFileContextBuilder();
  hcBuilder.withBlockSize(2 * 1024);
  HFileContext hFileContext = hcBuilder.build();
  StoreFileWriter writer1 = new StoreFileWriter.Builder(
      TEST_UTIL.getConfiguration(), cacheConf, fs).withOutputDir(
      hfilePath).withFileContext(hFileContext).build();
  StoreFileWriter writer2 = new StoreFileWriter.Builder(
      TEST_UTIL.getConfiguration(), cacheConf, fs).withOutputDir(
      hfilePath).withFileContext(hFileContext).build();

  MemStore memstore = new DefaultMemStore();
  writeMemstoreAndStoreFiles(memstore, new StoreFileWriter[] { writer1,
      writer2 });

  HStoreFile sf1 = new HStoreFile(fs, writer1.getPath(), TEST_UTIL.getConfiguration(), cacheConf,
      BloomType.NONE, true);

  HStoreFile sf2 = new HStoreFile(fs, writer2.getPath(), TEST_UTIL.getConfiguration(), cacheConf,
      BloomType.NONE, true);
  /**
   * Test without MVCC
   */
  int startRowNum = ROWSIZE / 2;
  ReversedKeyValueHeap kvHeap = getReversibleKeyValueHeap(memstore, sf1, sf2,
      ROWS[startRowNum], MAXMVCC);
  internalTestSeekAndNextForReversibleKeyValueHeap(kvHeap, startRowNum);

  startRowNum = ROWSIZE - 1;
  kvHeap = getReversibleKeyValueHeap(memstore, sf1, sf2,
      HConstants.EMPTY_START_ROW, MAXMVCC);
  internalTestSeekAndNextForReversibleKeyValueHeap(kvHeap, startRowNum);

  /**
   * Test with MVCC
   */
  for (int readPoint = 0; readPoint < MAXMVCC; readPoint++) {
    LOG.info("Setting read point to " + readPoint);
    startRowNum = ROWSIZE - 1;
    kvHeap = getReversibleKeyValueHeap(memstore, sf1, sf2,
        HConstants.EMPTY_START_ROW, readPoint);
    for (int i = startRowNum; i >= 0; i--) {
      if (i - 2 < 0) break;
      i = i - 2;
      kvHeap.seekToPreviousRow(KeyValueUtil.createFirstOnRow(ROWS[i + 1]));
      Pair<Integer, Integer> nextReadableNum = getNextReadableNumWithBackwardScan(
          i, 0, readPoint);
      if (nextReadableNum == null) break;
      KeyValue expecedKey = makeKV(nextReadableNum.getFirst(),
          nextReadableNum.getSecond());
      assertEquals(expecedKey, kvHeap.peek());
      i = nextReadableNum.getFirst();
      int qualNum = nextReadableNum.getSecond();
      if (qualNum + 1 < QUALSIZE) {
        kvHeap.backwardSeek(makeKV(i, qualNum + 1));
        nextReadableNum = getNextReadableNumWithBackwardScan(i, qualNum + 1,
            readPoint);
        if (nextReadableNum == null) break;
        expecedKey = makeKV(nextReadableNum.getFirst(),
            nextReadableNum.getSecond());
        assertEquals(expecedKey, kvHeap.peek());
        i = nextReadableNum.getFirst();
        qualNum = nextReadableNum.getSecond();
      }

      kvHeap.next();

      if (qualNum + 1 >= QUALSIZE) {
        nextReadableNum = getNextReadableNumWithBackwardScan(i - 1, 0,
            readPoint);
      } else {
        nextReadableNum = getNextReadableNumWithBackwardScan(i, qualNum + 1,
            readPoint);
      }
      if (nextReadableNum == null) break;
      expecedKey = makeKV(nextReadableNum.getFirst(),
          nextReadableNum.getSecond());
      assertEquals(expecedKey, kvHeap.peek());
      i = nextReadableNum.getFirst();
    }
  }
}
 
Example 16
Source File: FavoredNodeLoadBalancer.java    From hbase with Apache License 2.0 4 votes vote down vote up
@Override
public Map<ServerName, List<RegionInfo>> roundRobinAssignment(List<RegionInfo> regions,
    List<ServerName> servers) throws HBaseIOException {
  Map<ServerName, List<RegionInfo>> assignmentMap;
  try {
    FavoredNodeAssignmentHelper assignmentHelper =
        new FavoredNodeAssignmentHelper(servers, rackManager);
    assignmentHelper.initialize();
    if (!assignmentHelper.canPlaceFavoredNodes()) {
      return super.roundRobinAssignment(regions, servers);
    }
    // Segregate the regions into two types:
    // 1. The regions that have favored node assignment, and where at least
    //    one of the favored node is still alive. In this case, try to adhere
    //    to the current favored nodes assignment as much as possible - i.e.,
    //    if the current primary is gone, then make the secondary or tertiary
    //    as the new host for the region (based on their current load).
    //    Note that we don't change the favored
    //    node assignments here (even though one or more favored node is currently
    //    down). It is up to the balanceCluster to do this hard work. The HDFS
    //    can handle the fact that some nodes in the favored nodes hint is down
    //    It'd allocate some other DNs. In combination with stale settings for HDFS,
    //    we should be just fine.
    // 2. The regions that currently don't have favored node assignment. We will
    //    need to come up with favored nodes assignments for them. The corner case
    //    in (1) above is that all the nodes are unavailable and in that case, we
    //    will note that this region doesn't have favored nodes.
    Pair<Map<ServerName,List<RegionInfo>>, List<RegionInfo>> segregatedRegions =
        segregateRegionsAndAssignRegionsWithFavoredNodes(regions, servers);
    Map<ServerName,List<RegionInfo>> regionsWithFavoredNodesMap = segregatedRegions.getFirst();
    List<RegionInfo> regionsWithNoFavoredNodes = segregatedRegions.getSecond();
    assignmentMap = new HashMap<>();
    roundRobinAssignmentImpl(assignmentHelper, assignmentMap, regionsWithNoFavoredNodes,
        servers);
    // merge the assignment maps
    assignmentMap.putAll(regionsWithFavoredNodesMap);
  } catch (Exception ex) {
    LOG.warn("Encountered exception while doing favored-nodes assignment " + ex +
        " Falling back to regular assignment");
    assignmentMap = super.roundRobinAssignment(regions, servers);
  }
  return assignmentMap;
}
 
Example 17
Source File: PermissionStorage.java    From hbase with Apache License 2.0 4 votes vote down vote up
/**
 * Loads all of the permission grants stored in a region of the {@code _acl_}
 * table.
 *
 * @param aclRegion the acl region
 * @return a map of the permissions for this table.
 * @throws IOException if an error occurs
 */
static Map<byte[], ListMultimap<String, UserPermission>> loadAll(Region aclRegion)
    throws IOException {
  if (!isAclRegion(aclRegion)) {
    throw new IOException("Can only load permissions from "+ACL_TABLE_NAME);
  }

  Map<byte[], ListMultimap<String, UserPermission>> allPerms =
    new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR);

  // do a full scan of _acl_ table

  Scan scan = new Scan();
  scan.addFamily(ACL_LIST_FAMILY);

  InternalScanner iScanner = null;
  try {
    iScanner = aclRegion.getScanner(scan);

    while (true) {
      List<Cell> row = new ArrayList<>();

      boolean hasNext = iScanner.next(row);
      ListMultimap<String, UserPermission> perms = ArrayListMultimap.create();
      byte[] entry = null;
      for (Cell kv : row) {
        if (entry == null) {
          entry = CellUtil.cloneRow(kv);
        }
        Pair<String, Permission> permissionsOfUserOnTable =
            parsePermissionRecord(entry, kv, null, null, false, null);
        if (permissionsOfUserOnTable != null) {
          String username = permissionsOfUserOnTable.getFirst();
          Permission permission = permissionsOfUserOnTable.getSecond();
          perms.put(username, new UserPermission(username, permission));
        }
      }
      if (entry != null) {
        allPerms.put(entry, perms);
      }
      if (!hasNext) {
        break;
      }
    }
  } finally {
    if (iScanner != null) {
      iScanner.close();
    }
  }

  return allPerms;
}
 
Example 18
Source File: TestLocalTableState.java    From phoenix with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
@SuppressWarnings("unchecked")
@Test
public void testOnlyLoadsRequestedColumns() throws Exception {
  // setup mocks
  RegionCoprocessorEnvironment env = Mockito.mock(RegionCoprocessorEnvironment.class);

  HRegion region = Mockito.mock(HRegion.class);
  Mockito.when(env.getRegion()).thenReturn(region);
  RegionScanner scanner = Mockito.mock(RegionScanner.class);
  Mockito.when(region.getScanner(Mockito.any(Scan.class))).thenReturn(scanner);
  final KeyValue storedKv =
      new KeyValue(row, fam, qual, ts, Type.Put, Bytes.toBytes("stored-value"));
  storedKv.setMemstoreTS(2);
  Mockito.when(scanner.next(Mockito.any(List.class))).thenAnswer(new Answer<Boolean>() {
    @Override
    public Boolean answer(InvocationOnMock invocation) throws Throwable {
      List<KeyValue> list = (List<KeyValue>) invocation.getArguments()[0];

      list.add(storedKv);
      return false;
    }
  });
  LocalHBaseState state = new LocalTable(env);
  Put pendingUpdate = new Put(row);
  pendingUpdate.add(fam, qual, ts, val);
  LocalTableState table = new LocalTableState(env, state, pendingUpdate);

  // do the lookup for the given column
  ColumnReference col = new ColumnReference(fam, qual);
  table.setCurrentTimestamp(ts);
  // check that the value is there
  Pair<Scanner, IndexUpdate> p = table.getIndexedColumnsTableState(Arrays.asList(col));
  Scanner s = p.getFirst();
  // make sure it read the table the one time
  assertEquals("Didn't get the stored keyvalue!", storedKv, s.next());

  // on the second lookup it shouldn't access the underlying table again - the cached columns
  // should know they are done
  p = table.getIndexedColumnsTableState(Arrays.asList(col));
  s = p.getFirst();
  assertEquals("Lost already loaded update!", storedKv, s.next());
  Mockito.verify(env, Mockito.times(1)).getRegion();
  Mockito.verify(region, Mockito.times(1)).getScanner(Mockito.any(Scan.class));
}
 
Example 19
Source File: TestPerColumnFamilyFlush.java    From hbase with Apache License 2.0 4 votes vote down vote up
private void doTestLogReplay() throws Exception {
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 10000);
  // Carefully chosen limits so that the memstore just flushes when we're done
  conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY, FlushAllLargeStoresPolicy.class.getName());
  conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN, 2500);
  final int numRegionServers = 4;
  try {
    TEST_UTIL.startMiniCluster(numRegionServers);
    TEST_UTIL.getAdmin().createNamespace(
      NamespaceDescriptor.create(TABLENAME.getNamespaceAsString()).build());
    Table table = TEST_UTIL.createTable(TABLENAME, FAMILIES);

    // Add 100 edits for CF1, 20 for CF2, 20 for CF3.
    // These will all be interleaved in the log.
    for (int i = 1; i <= 80; i++) {
      table.put(createPut(1, i));
      if (i <= 10) {
        table.put(createPut(2, i));
        table.put(createPut(3, i));
      }
    }
    Thread.sleep(1000);

    Pair<HRegion, HRegionServer> desiredRegionAndServer = getRegionWithName(TABLENAME);
    HRegion desiredRegion = desiredRegionAndServer.getFirst();
    assertTrue("Could not find a region which hosts the new region.", desiredRegion != null);

    // Flush the region selectively.
    desiredRegion.flush(false);

    long totalMemstoreSize;
    long cf1MemstoreSize, cf2MemstoreSize, cf3MemstoreSize;
    totalMemstoreSize = desiredRegion.getMemStoreDataSize();

    // Find the sizes of the memstores of each CF.
    cf1MemstoreSize = desiredRegion.getStore(FAMILY1).getMemStoreSize().getDataSize();
    cf2MemstoreSize = desiredRegion.getStore(FAMILY2).getMemStoreSize().getDataSize();
    cf3MemstoreSize = desiredRegion.getStore(FAMILY3).getMemStoreSize().getDataSize();

    // CF1 Should have been flushed
    assertEquals(0, cf1MemstoreSize);
    // CF2 and CF3 shouldn't have been flushed.
    // TODO: This test doesn't allow for this case:
    // " Since none of the CFs were above the size, flushing all."
    // i.e. a flush happens before we get to here and its a flush-all.
    assertTrue(cf2MemstoreSize >= 0);
    assertTrue(cf3MemstoreSize >= 0);
    assertEquals(totalMemstoreSize, cf2MemstoreSize + cf3MemstoreSize);

    // Wait for the RS report to go across to the master, so that the master
    // is aware of which sequence ids have been flushed, before we kill the RS.
    // If in production, the RS dies before the report goes across, we will
    // safely replay all the edits.
    Thread.sleep(2000);

    // Abort the region server where we have the region hosted.
    HRegionServer rs = desiredRegionAndServer.getSecond();
    rs.abort("testing");

    // The aborted region server's regions will be eventually assigned to some
    // other region server, and the get RPC call (inside verifyEdit()) will
    // retry for some time till the regions come back up.

    // Verify that all the edits are safe.
    for (int i = 1; i <= 80; i++) {
      verifyEdit(1, i, table);
      if (i <= 10) {
        verifyEdit(2, i, table);
        verifyEdit(3, i, table);
      }
    }
  } finally {
    TEST_UTIL.shutdownMiniCluster();
  }
}
 
Example 20
Source File: GroupedAggregateRegionObserver.java    From phoenix with Apache License 2.0 4 votes vote down vote up
/**
 * Used for an aggregate query in which the key order match the group by key order. In this
 * case, we can do the aggregation as we scan, by detecting when the group by key changes.
 * @param limit TODO
 * @throws IOException
 */
private RegionScanner scanOrdered(final ObserverContext<RegionCoprocessorEnvironment> c,
        final Scan scan, final RegionScanner scanner, final List<Expression> expressions,
        final ServerAggregators aggregators, final long limit) throws IOException {

    if (LOGGER.isDebugEnabled()) {
        LOGGER.debug(LogUtil.addCustomAnnotations(
                "Grouped aggregation over ordered rows with scan " + scan + ", group by "
                + expressions + ", aggregators " + aggregators,
                ScanUtil.getCustomAnnotations(scan)));
    }
    final Pair<Integer, Integer> minMaxQualifiers = EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan);
    final boolean useQualifierAsIndex = EncodedColumnsUtil.useQualifierAsIndex(minMaxQualifiers);
    final PTable.QualifierEncodingScheme encodingScheme = EncodedColumnsUtil.getQualifierEncodingScheme(scan);
    return new BaseRegionScanner(scanner) {
        private long rowCount = 0;
        private ImmutableBytesPtr currentKey = null;

        @Override
        public boolean next(List<Cell> results) throws IOException {
            boolean hasMore;
            boolean atLimit;
            boolean aggBoundary = false;
            Tuple result = useQualifierAsIndex ? new PositionBasedMultiKeyValueTuple() : new MultiKeyValueTuple();
            ImmutableBytesPtr key = null;
            Aggregator[] rowAggregators = aggregators.getAggregators();
            // If we're calculating no aggregate functions, we can exit at the
            // start of a new row. Otherwise, we have to wait until an agg
            int countOffset = rowAggregators.length == 0 ? 1 : 0;
            Region region = c.getEnvironment().getRegion();
            boolean acquiredLock = false;
            try {
                region.startRegionOperation();
                acquiredLock = true;
                synchronized (scanner) {
                    do {
                        List<Cell> kvs = useQualifierAsIndex ? new EncodedColumnQualiferCellsList(minMaxQualifiers.getFirst(), minMaxQualifiers.getSecond(), encodingScheme) : new ArrayList<Cell>();
                        // Results are potentially returned even when the return
                        // value of s.next is false
                        // since this is an indication of whether or not there
                        // are more values after the
                        // ones returned
                        hasMore = scanner.nextRaw(kvs);
                        if (!kvs.isEmpty()) {
                            result.setKeyValues(kvs);
                            key = TupleUtil.getConcatenatedValue(result, expressions);
                            aggBoundary = currentKey != null && currentKey.compareTo(key) != 0;
                            if (!aggBoundary) {
                                aggregators.aggregate(rowAggregators, result);
                                if (LOGGER.isDebugEnabled()) {
                                    LOGGER.debug(LogUtil.addCustomAnnotations(
                                        "Row passed filters: " + kvs
                                        + ", aggregated values: "
                                        + Arrays.asList(rowAggregators),
                                        ScanUtil.getCustomAnnotations(scan)));
                                }
                                currentKey = key;
                            }
                        }
                        atLimit = rowCount + countOffset >= limit;
                        // Do rowCount + 1 b/c we don't have to wait for a complete
                        // row in the case of a DISTINCT with a LIMIT
                    } while (hasMore && !aggBoundary && !atLimit);
                }
            } finally {
                if (acquiredLock) region.closeRegionOperation();
            }

            if (currentKey != null) {
                byte[] value = aggregators.toBytes(rowAggregators);
                Cell keyValue =
                        PhoenixKeyValueUtil.newKeyValue(currentKey.get(), currentKey.getOffset(),
                            currentKey.getLength(), SINGLE_COLUMN_FAMILY, SINGLE_COLUMN,
                            AGG_TIMESTAMP, value, 0, value.length);
                results.add(keyValue);
                // If we're at an aggregation boundary, reset the
                // aggregators and
                // aggregate with the current result (which is not a part of
                // the returned result).
                if (aggBoundary) {
                    aggregators.reset(rowAggregators);
                    aggregators.aggregate(rowAggregators, result);
                    currentKey = key;
                    rowCount++;
                    atLimit |= rowCount >= limit;
                }
            }
            // Continue if there are more
            if (!atLimit && (hasMore || aggBoundary)) {
                return true;
            }
            currentKey = null;
            return false;
        }
    };
}