org.apache.hadoop.hbase.HConstants Java Examples

The following examples show how to use org.apache.hadoop.hbase.HConstants. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: WhereOptimizerTest.java    From phoenix with Apache License 2.0 6 votes vote down vote up
/**
 * With only a subset of row key cols present (which includes the leading key), 
 * Phoenix should have optimized the start row for the scan to include the 
 * row keys cols that occur contiguously in the RVC.
 * 
 * Table entity_history has the row key defined as (organization_id, parent_id, created_date, entity_history_id). 
 * This test uses (organization_id, parent_id, entity_id) in RVC. So the start row should be comprised of
 * organization_id and parent_id.
 * @throws SQLException
 */
@Test
public void testRVCExpressionWithSubsetOfPKCols() throws SQLException {
    String tenantId = "000000000000001";
    String parentId = "000000000000002";
    String entityHistId = "000000000000003";
    
    String query = "select * from entity_history where (organization_id, parent_id, entity_history_id) >= (?,?,?)";
    List<Object> binds = Arrays.<Object>asList(tenantId, parentId, entityHistId);
    StatementContext context = compileStatement(query, binds);
    Scan scan = context.getScan();
    Filter filter = scan.getFilter();
    assertNotNull(filter);
    assertTrue(filter instanceof RowKeyComparisonFilter);
    byte[] expectedStartRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), PVarchar.INSTANCE.toBytes(parentId));
    assertArrayEquals(expectedStartRow, scan.getStartRow());
    assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStopRow());
}
 
Example #2
Source File: TestGetClosestAtOrBefore.java    From hbase with Apache License 2.0 6 votes vote down vote up
/**
 * @param answer Pass -1 if we're not to find anything.
 * @return Row found.
 */
private byte[] findRow(final Region mr, final char table, final int rowToFind, final int answer)
  throws IOException {
  TableName tableb = TableName.valueOf("" + table);
  // Find the row.
  byte[] tofindBytes = Bytes.toBytes((short) rowToFind);
  byte[] metaKey = RegionInfo.createRegionName(tableb, tofindBytes, HConstants.NINES, false);
  LOG.info("find=" + new String(metaKey, StandardCharsets.UTF_8));
  Result r = UTIL.getClosestRowBefore(mr, metaKey, HConstants.CATALOG_FAMILY);
  if (answer == -1) {
    assertNull(r);
    return null;
  }
  assertTrue(
    Bytes.compareTo(Bytes.toBytes((short) answer), extractRowFromMetaRow(r.getRow())) == 0);
  return r.getRow();
}
 
Example #3
Source File: MetricsRegionServerWrapperImpl.java    From hbase with Apache License 2.0 6 votes vote down vote up
public MetricsRegionServerWrapperImpl(final HRegionServer regionServer) {
  this.regionServer = regionServer;
  initBlockCache();
  initMobFileCache();

  this.period = regionServer.getConfiguration().getLong(HConstants.REGIONSERVER_METRICS_PERIOD,
    HConstants.DEFAULT_REGIONSERVER_METRICS_PERIOD);

  this.executor = CompatibilitySingletonFactory.getInstance(MetricsExecutor.class).getExecutor();
  this.runnable = new RegionServerMetricsWrapperRunnable();
  this.executor.scheduleWithFixedDelay(this.runnable, this.period, this.period,
    TimeUnit.MILLISECONDS);
  this.metricsWALSource = CompatibilitySingletonFactory.getInstance(MetricsWALSource.class);
  this.allocator = regionServer.getRpcServer().getByteBuffAllocator();

  try {
    this.dfsHedgedReadMetrics = FSUtils.getDFSHedgedReadMetrics(regionServer.getConfiguration());
  } catch (IOException e) {
    LOG.warn("Failed to get hedged metrics", e);
  }
  if (LOG.isInfoEnabled()) {
    LOG.info("Computing regionserver metrics every " + this.period + " milliseconds");
  }
}
 
Example #4
Source File: TestHFileEncryption.java    From hbase with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void setUp() throws Exception {
  Configuration conf = TEST_UTIL.getConfiguration();
  // Disable block cache in this test.
  conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f);
  conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyProviderForTesting.class.getName());
  conf.set(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, "hbase");
  conf.setInt("hfile.format.version", 3);

  fs = FileSystem.get(conf);

  cryptoContext = Encryption.newContext(conf);
  String algorithm =
      conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES);
  Cipher aes = Encryption.getCipher(conf, algorithm);
  assertNotNull(aes);
  cryptoContext.setCipher(aes);
  byte[] key = new byte[aes.getKeyLength()];
  RNG.nextBytes(key);
  cryptoContext.setKey(key);
}
 
Example #5
Source File: TestThriftConnection.java    From hbase with Apache License 2.0 6 votes vote down vote up
private static Connection createConnection(int port, boolean useHttp) throws IOException {
  Configuration conf = HBaseConfiguration.create(TEST_UTIL.getConfiguration());
  conf.set(ConnectionUtils.HBASE_CLIENT_CONNECTION_IMPL,
      ThriftConnection.class.getName());
  if (useHttp) {
    conf.set(Constants.HBASE_THRIFT_CLIENT_BUIDLER_CLASS,
        ThriftConnection.HTTPThriftClientBuilder.class.getName());
  }
  String host = HConstants.LOCALHOST;
  if (useHttp) {
    host = "http://" + host;
  }
  conf.set(Constants.HBASE_THRIFT_SERVER_NAME, host);
  conf.setInt(Constants.HBASE_THRIFT_SERVER_PORT, port);
  return ConnectionFactory.createConnection(conf);
}
 
Example #6
Source File: PerformanceEvaluation.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Override
void testRow(final int i) throws IOException {
  byte[] row = getRandomRow(this.rand, this.totalRows);
  Put put = new Put(row);
  byte[] value = generateData(this.rand, ROW_LENGTH);
  if (useTags) {
    byte[] tag = generateData(this.rand, TAG_LENGTH);
    Tag[] tags = new Tag[noOfTags];
    for (int n = 0; n < noOfTags; n++) {
      Tag t = new ArrayBackedTag((byte) n, tag);
      tags[n] = t;
    }
    KeyValue kv = new KeyValue(row, FAMILY_NAME, QUALIFIER_NAME, HConstants.LATEST_TIMESTAMP,
        value, tags);
    put.add(kv);
  } else {
    put.addColumn(FAMILY_NAME, QUALIFIER_NAME, value);
  }
  put.setDurability(writeToWAL ? Durability.SYNC_WAL : Durability.SKIP_WAL);
  mutator.mutate(put);
}
 
Example #7
Source File: TestFavoredNodeAssignmentHelper.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Test
public void testGetFavoredNodes() throws IOException {
  Map<String,Integer> rackToServerCount = new HashMap<>();
  Set<String> rackList = Sets.newHashSet("rack1", "rack2", "rack3");
  for (String rack : rackList) {
    rackToServerCount.put(rack, 4);
  }
  List<ServerName> servers = getServersFromRack(rackToServerCount);

  FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager);
  helper.initialize();
  assertTrue(helper.canPlaceFavoredNodes());

  RegionInfo region = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
      .setStartKey(HConstants.EMPTY_START_ROW)
      .setEndKey(HConstants.EMPTY_END_ROW)
      .build();

  for (int maxattempts = 0; maxattempts < MAX_ATTEMPTS; maxattempts++) {
    List<ServerName> fn = helper.generateFavoredNodes(region);
    checkDuplicateFN(fn);
    checkFNRacks(fn);
  }
}
 
Example #8
Source File: TestRecoverableZooKeeper.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Test
public void testSetDataVersionMismatchInLoop() throws Exception {
  String znode = "/hbase/splitWAL/9af7cfc9b15910a0b3d714bf40a3248f";
  Configuration conf = TEST_UTIL.getConfiguration();
  ZKWatcher zkw = new ZKWatcher(conf, "testSetDataVersionMismatchInLoop",
      abortable, true);
  String ensemble = ZKConfig.getZKQuorumServersString(conf);
  RecoverableZooKeeper rzk = ZKUtil.connect(conf, ensemble, zkw);
  rzk.create(znode, new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
  rzk.setData(znode, Bytes.toBytes("OPENING"), 0);
  Field zkField = RecoverableZooKeeper.class.getDeclaredField("zk");
  zkField.setAccessible(true);
  int timeout = conf.getInt(HConstants.ZK_SESSION_TIMEOUT, HConstants.DEFAULT_ZK_SESSION_TIMEOUT);
  ZookeeperStub zkStub = new ZookeeperStub(ensemble, timeout, zkw);
  zkStub.setThrowExceptionInNumOperations(1);
  zkField.set(rzk, zkStub);
  byte[] opened = Bytes.toBytes("OPENED");
  rzk.setData(znode, opened, 1);
  byte[] data = rzk.getData(znode, false, new Stat());
  assertTrue(Bytes.equals(opened, data));
}
 
Example #9
Source File: TestUtil.java    From phoenix with Apache License 2.0 6 votes vote down vote up
public static void clearMetaDataCache(Connection conn) throws Throwable {
    PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
    Table htable = pconn.getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
    htable.coprocessorService(MetaDataService.class, HConstants.EMPTY_START_ROW,
        HConstants.EMPTY_END_ROW, new Batch.Call<MetaDataService, ClearCacheResponse>() {
            @Override
            public ClearCacheResponse call(MetaDataService instance) throws IOException {
                ServerRpcController controller = new ServerRpcController();
                BlockingRpcCallback<ClearCacheResponse> rpcCallback =
                        new BlockingRpcCallback<ClearCacheResponse>();
                ClearCacheRequest.Builder builder = ClearCacheRequest.newBuilder();
                instance.clearCache(controller, builder.build(), rpcCallback);
                if(controller.getFailedOn() != null) {
                    throw controller.getFailedOn();
                }
                return rpcCallback.get(); 
            }
          });
}
 
Example #10
Source File: TestVisibilityLabelsWithACL.java    From hbase with Apache License 2.0 6 votes vote down vote up
private static Table createTableAndWriteDataWithLabels(TableName tableName, String... labelExps)
    throws Exception {
  Table table = null;
  try {
    table = TEST_UTIL.createTable(tableName, fam);
    int i = 1;
    List<Put> puts = new ArrayList<>(labelExps.length);
    for (String labelExp : labelExps) {
      Put put = new Put(Bytes.toBytes("row" + i));
      put.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value);
      put.setCellVisibility(new CellVisibility(labelExp));
      puts.add(put);
      i++;
    }
    table.put(puts);
  } finally {
    if (table != null) {
      table.close();
    }
  }
  return table;
}
 
Example #11
Source File: WhereOptimizerTest.java    From phoenix with Apache License 2.0 6 votes vote down vote up
@Test
public void testOrDiffColExpression() throws SQLException {
    String tenantId1 = "000000000000001";
    String entityId1 = "002333333333331";
    String query = "select * from atable where organization_id = ? or entity_id  = ?";
    List<Object> binds = Arrays.<Object>asList(tenantId1,entityId1);
    StatementContext context = compileStatement(query, binds);
    Scan scan = context.getScan();
    Filter filter = scan.getFilter();

    assertNotNull(filter);
    assertTrue(filter instanceof RowKeyComparisonFilter);
    ScanRanges scanRanges = context.getScanRanges();
    assertEquals(ScanRanges.EVERYTHING,scanRanges);
    assertArrayEquals(HConstants.EMPTY_START_ROW, scan.getStartRow());
    assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStopRow());
}
 
Example #12
Source File: CellCounter.java    From cloud-bigtable-examples with Apache License 2.0 6 votes vote down vote up
private static long[] getTimeRange(String[] args) throws IOException {
  final String startTimeArgKey = "--starttime=";
  final String endTimeArgKey = "--endtime=";
  long startTime = 0L;
  long endTime = 0L;

  for (int i = 1; i < args.length; i++) {
    System.out.println("i:" + i + "arg[i]" + args[i]);
    if (args[i].startsWith(startTimeArgKey)) {
      startTime = Long.parseLong(args[i].substring(startTimeArgKey.length()));
    }
    if (args[i].startsWith(endTimeArgKey)) {
      endTime = Long.parseLong(args[i].substring(endTimeArgKey.length()));
    }
  }

  if (startTime == 0 && endTime == 0)
    return null;

  endTime = endTime == 0 ? HConstants.LATEST_TIMESTAMP : endTime;
  LOG.warn("Got the timerange : " + startTime + " - " + endTime);
  return new long [] {startTime, endTime};
}
 
Example #13
Source File: BaseLoadBalancer.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Override
public List<RegionPlan>
    balanceCluster(Map<TableName, Map<ServerName, List<RegionInfo>>> loadOfAllTable) {
  if (isByTable) {
    List<RegionPlan> result = new ArrayList<>();
    loadOfAllTable.forEach((tableName, loadOfOneTable) -> {
      LOG.info("Start Generate Balance plan for table: " + tableName);
      List<RegionPlan> partialPlans = balanceTable(tableName, loadOfOneTable);
      if (partialPlans != null) {
        result.addAll(partialPlans);
      }
    });
    return result;
  } else {
    LOG.info("Start Generate Balance plan for cluster.");
    return balanceTable(HConstants.ENSEMBLE_TABLE_NAME, toEnsumbleTableLoad(loadOfAllTable));
  }
}
 
Example #14
Source File: WhereOptimizerTest.java    From phoenix with Apache License 2.0 6 votes vote down vote up
@Test
public void testUseOfFunctionOnLHSInMiddleOfRVCForLTE() throws SQLException {
    String tenantId = "000000000000001";
    String parentId = "000000000000002";
    String subStringParentId = parentId.substring(0, 3);
    Date createdDate = new Date(System.currentTimeMillis());
    
    String query = "select * from entity_history where (organization_id, substr(parent_id, 1, 3), created_date) <= (?,?,?)";
    List<Object> binds = Arrays.<Object>asList(tenantId, subStringParentId, createdDate);
    StatementContext context = compileStatement(query, binds);
    Scan scan = context.getScan();
    Filter filter = scan.getFilter();
    assertNotNull(filter);
    assertTrue(filter instanceof RowKeyComparisonFilter);
    byte[] expectedStopRow = ByteUtil.concat(
        PVarchar.INSTANCE.toBytes(tenantId), ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(subStringParentId)));
    assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStartRow());
    assertArrayEquals(expectedStopRow, scan.getStopRow());
}
 
Example #15
Source File: WALKeyImpl.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Override
public long estimatedSerializedSizeOf() {
  long size = encodedRegionName != null ? encodedRegionName.length : 0;
  size += tablename != null ? tablename.toBytes().length : 0;
  if (clusterIds != null) {
    size += 16 * clusterIds.size();
  }
  if (nonceGroup != HConstants.NO_NONCE) {
    size += Bytes.SIZEOF_LONG; // nonce group
  }
  if (nonce != HConstants.NO_NONCE) {
    size += Bytes.SIZEOF_LONG; // nonce
  }
  if (replicationScope != null) {
    for (Map.Entry<byte[], Integer> scope: replicationScope.entrySet()) {
      size += scope.getKey().length;
      size += Bytes.SIZEOF_INT;
    }
  }
  size += Bytes.SIZEOF_LONG; // sequence number
  size += Bytes.SIZEOF_LONG; // write time
  if (origLogSeqNum > 0) {
    size += Bytes.SIZEOF_LONG; // original sequence number
  }
  return size;
}
 
Example #16
Source File: TestCompactionScanQueryMatcher.java    From hbase with Apache License 2.0 6 votes vote down vote up
private void testDropDeletes(byte[] from, byte[] to, byte[][] rows, MatchCode... expected)
    throws IOException {
  long now = EnvironmentEdgeManager.currentTime();
  // Set time to purge deletes to negative value to avoid it ever happening.
  ScanInfo scanInfo = new ScanInfo(this.conf, fam2, 0, 1, ttl, KeepDeletedCells.FALSE,
      HConstants.DEFAULT_BLOCKSIZE, -1L, rowComparator, false);

  CompactionScanQueryMatcher qm = CompactionScanQueryMatcher.create(scanInfo,
    ScanType.COMPACT_RETAIN_DELETES, Long.MAX_VALUE, HConstants.OLDEST_TIMESTAMP,
    HConstants.OLDEST_TIMESTAMP, now, from, to, null);
  List<ScanQueryMatcher.MatchCode> actual = new ArrayList<>(rows.length);
  byte[] prevRow = null;
  for (byte[] row : rows) {
    if (prevRow == null || !Bytes.equals(prevRow, row)) {
      qm.setToNewRow(KeyValueUtil.createFirstOnRow(row));
      prevRow = row;
    }
    actual.add(qm.match(new KeyValue(row, fam2, null, now, Type.Delete)));
  }

  assertEquals(expected.length, actual.size());
  for (int i = 0; i < expected.length; i++) {
    LOG.debug("expected " + expected[i] + ", actual " + actual.get(i));
    assertEquals(expected[i], actual.get(i));
  }
}
 
Example #17
Source File: PhoenixConfigurationUtilTest.java    From phoenix with Apache License 2.0 6 votes vote down vote up
@Test
public void testUpsertStatement() throws Exception {
    Connection conn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES));
    final String tableName = "TEST_TABLE";
    try {
        String ddl = "CREATE TABLE "+ tableName + 
                "  (a_string varchar not null, a_binary varbinary not null, col1 integer" +
                "  CONSTRAINT pk PRIMARY KEY (a_string, a_binary))\n";
        conn.createStatement().execute(ddl);
        final Configuration configuration = new Configuration ();
        configuration.set(HConstants.ZOOKEEPER_QUORUM, getUrl());
        PhoenixConfigurationUtil.setOutputTableName(configuration, tableName);
        PhoenixConfigurationUtil.setPhysicalTableName(configuration, tableName);
        final String upserStatement = PhoenixConfigurationUtil.getUpsertStatement(configuration);
        final String expectedUpsertStatement = "UPSERT INTO " + tableName + " VALUES (?, ?, ?)"; 
        assertEquals(expectedUpsertStatement, upserStatement);
    } finally {
        conn.close();
    }
 }
 
Example #18
Source File: TestFsRegionsMetaRecoverer.java    From hbase-operator-tools with Apache License 2.0 6 votes vote down vote up
@Test
public void testPutRegionInfoFromHdfsInMeta() throws Exception {
  RegionInfo info = this.createRegionInfo("test-tbl");
  Path regionPath = new Path("/hbase/data/default/test-tbl/" + info.getEncodedName());
  FSDataInputStream fis = new FSDataInputStream(new TestInputStreamSeekable(info));
  when(this.mockedFileSystem.open(new Path(regionPath, ".regioninfo")))
    .thenReturn(fis);
  fixer.putRegionInfoFromHdfsInMeta(regionPath);
  Mockito.verify(this.mockedConnection).getTable(TableName.META_TABLE_NAME);
  ArgumentCaptor<Put> captor = ArgumentCaptor.forClass(Put.class);
  Mockito.verify(this.mockedTable).put(captor.capture());
  Put capturedPut = captor.getValue();
  List<Cell> cells = capturedPut.get(HConstants.CATALOG_FAMILY,
    HConstants.STATE_QUALIFIER);
  assertEquals(1, cells.size());
  String state = Bytes.toString(cells.get(0).getValueArray(),
    cells.get(0).getValueOffset(), cells.get(0).getValueLength());
  assertEquals(RegionState.State.valueOf(state), RegionState.State.CLOSED);
  cells = capturedPut.get(HConstants.CATALOG_FAMILY,
    HConstants.REGIONINFO_QUALIFIER);
  byte[] returnedInfo = Bytes.copy(cells.get(0).getValueArray(),
    cells.get(0).getValueOffset(), cells.get(0).getValueLength());
  assertEquals(info, RegionInfo.parseFrom(returnedInfo));
}
 
Example #19
Source File: ByteUtil.java    From phoenix with Apache License 2.0 6 votes vote down vote up
public static byte[] calculateTheClosestNextRowKeyForPrefix(byte[] rowKeyPrefix) {
    // Essentially we are treating it like an 'unsigned very very long' and doing +1 manually.
    // Search for the place where the trailing 0xFFs start
    int offset = rowKeyPrefix.length;
    while (offset > 0) {
        if (rowKeyPrefix[offset - 1] != (byte) 0xFF) {
            break;
        }
        offset--;
    }
    if (offset == 0) {
        // We got an 0xFFFF... (only FFs) stopRow value which is
        // the last possible prefix before the end of the table.
        // So set it to stop at the 'end of the table'
        return HConstants.EMPTY_END_ROW;
    }
    // Copy the right length of the original
    byte[] newStopRow = Arrays.copyOfRange(rowKeyPrefix, 0, offset);
    // And increment the last one
    newStopRow[newStopRow.length - 1]++;
    return newStopRow;
}
 
Example #20
Source File: TestMinorCompaction.java    From hbase with Apache License 2.0 6 votes vote down vote up
/** constructor */
public TestMinorCompaction() {
  super();

  // Set cache flush size to 1MB
  conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024*1024);
  conf.setInt(HConstants.HREGION_MEMSTORE_BLOCK_MULTIPLIER, 100);
  compactionThreshold = conf.getInt("hbase.hstore.compactionThreshold", 3);

  firstRowBytes = START_KEY_BYTES;
  secondRowBytes = START_KEY_BYTES.clone();
  // Increment the least significant character so we get to next row.
  secondRowBytes[START_KEY_BYTES.length - 1]++;
  thirdRowBytes = START_KEY_BYTES.clone();
  thirdRowBytes[START_KEY_BYTES.length - 1] =
      (byte) (thirdRowBytes[START_KEY_BYTES.length - 1] + 2);
  col1 = Bytes.toBytes("column1");
  col2 = Bytes.toBytes("column2");
}
 
Example #21
Source File: HFileReplicator.java    From hbase with Apache License 2.0 5 votes vote down vote up
public Void replicate() throws IOException {
  // Copy all the hfiles to the local file system
  Map<String, Path> tableStagingDirsMap = copyHFilesToStagingDir();

  int maxRetries = conf.getInt(HConstants.BULKLOAD_MAX_RETRIES_NUMBER, 10);

  for (Entry<String, Path> tableStagingDir : tableStagingDirsMap.entrySet()) {
    String tableNameString = tableStagingDir.getKey();
    Path stagingDir = tableStagingDir.getValue();
    TableName tableName = TableName.valueOf(tableNameString);

    // Prepare collection of queue of hfiles to be loaded(replicated)
    Deque<LoadQueueItem> queue = new LinkedList<>();
    BulkLoadHFilesTool.prepareHFileQueue(conf, connection, tableName, stagingDir, queue, false,
      false);

    if (queue.isEmpty()) {
      LOG.warn("Did not find any files to replicate in directory {}", stagingDir.toUri());
      return null;
    }
    fsDelegationToken.acquireDelegationToken(sinkFs);
    try {
      doBulkLoad(conf, tableName, stagingDir, queue, maxRetries);
    } finally {
      cleanup(stagingDir);
    }
  }
  return null;
}
 
Example #22
Source File: TestGetLastFlushedSequenceId.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Test
public void test() throws IOException, InterruptedException {
  testUtil.getAdmin().createNamespace(
    NamespaceDescriptor.create(tableName.getNamespaceAsString()).build());
  Table table = testUtil.createTable(tableName, families);
  table.put(new Put(Bytes.toBytes("k"))
          .addColumn(family, Bytes.toBytes("q"), Bytes.toBytes("v")));
  MiniHBaseCluster cluster = testUtil.getMiniHBaseCluster();
  List<JVMClusterUtil.RegionServerThread> rsts = cluster.getRegionServerThreads();
  Region region = null;
  for (int i = 0; i < cluster.getRegionServerThreads().size(); i++) {
    HRegionServer hrs = rsts.get(i).getRegionServer();
    for (Region r : hrs.getRegions(tableName)) {
      region = r;
      break;
    }
  }
  assertNotNull(region);
  Thread.sleep(2000);
  RegionStoreSequenceIds ids =
      testUtil.getHBaseCluster().getMaster()
          .getLastSequenceId(region.getRegionInfo().getEncodedNameAsBytes());
  assertEquals(HConstants.NO_SEQNUM, ids.getLastFlushedSequenceId());
  // This will be the sequenceid just before that of the earliest edit in memstore.
  long storeSequenceId = ids.getStoreSequenceId(0).getSequenceId();
  assertTrue(storeSequenceId > 0);
  testUtil.getAdmin().flush(tableName);
  Thread.sleep(2000);
  ids =
      testUtil.getHBaseCluster().getMaster()
          .getLastSequenceId(region.getRegionInfo().getEncodedNameAsBytes());
  assertTrue(ids.getLastFlushedSequenceId() + " > " + storeSequenceId,
    ids.getLastFlushedSequenceId() > storeSequenceId);
  assertEquals(ids.getLastFlushedSequenceId(), ids.getStoreSequenceId(0).getSequenceId());
  table.close();
}
 
Example #23
Source File: AsyncServerRequestRpcRetryingCaller.java    From hbase with Apache License 2.0 5 votes vote down vote up
public AsyncServerRequestRpcRetryingCaller(Timer retryTimer, AsyncConnectionImpl conn,
    long pauseNs, long pauseForCQTBENs, int maxAttempts, long operationTimeoutNs,
    long rpcTimeoutNs, int startLogErrorsCnt, ServerName serverName, Callable<T> callable) {
  super(retryTimer, conn, HConstants.NORMAL_QOS, pauseNs, pauseForCQTBENs, maxAttempts,
    operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt);
  this.serverName = serverName;
  this.callable = callable;
}
 
Example #24
Source File: HStore.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * Returns the configured checksum algorithm.
 * @param conf The configuration
 * @return The checksum algorithm that is set in the configuration
 */
public static ChecksumType getChecksumType(Configuration conf) {
  String checksumName = conf.get(HConstants.CHECKSUM_TYPE_NAME);
  if (checksumName == null) {
    return ChecksumType.getDefaultChecksumType();
  } else {
    return ChecksumType.nameToType(checksumName);
  }
}
 
Example #25
Source File: ConnectionUtil.java    From phoenix with Apache License 2.0 5 votes vote down vote up
/**
 * Returns the {@link Connection} from Configuration
 * @param configuration
 * @return
 * @throws SQLException
 */
public static Connection getConnection(final Configuration configuration) throws SQLException {
    Preconditions.checkNotNull(configuration);
    final Properties props = new Properties();
    final Connection conn = DriverManager.getConnection(QueryUtil.getUrl(configuration.get(HConstants.ZOOKEEPER_QUORUM)), props);
    return conn;
}
 
Example #26
Source File: RSRpcServices.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * Get some information of the region server.
 *
 * @param controller the RPC controller
 * @param request the request
 * @throws ServiceException
 */
@Override
@QosPriority(priority=HConstants.ADMIN_QOS)
public GetServerInfoResponse getServerInfo(final RpcController controller,
    final GetServerInfoRequest request) throws ServiceException {
  try {
    checkOpen();
  } catch (IOException ie) {
    throw new ServiceException(ie);
  }
  requestCount.increment();
  int infoPort = regionServer.infoServer != null ? regionServer.infoServer.getPort() : -1;
  return ResponseConverter.buildGetServerInfoResponse(regionServer.serverName, infoPort);
}
 
Example #27
Source File: HRegionServer.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * @see org.apache.hadoop.hbase.regionserver.HRegionServerCommandLine
 */
public static void main(String[] args) {
  LOG.info("STARTING executorService " + HRegionServer.class.getSimpleName());
  VersionInfo.logVersion();
  Configuration conf = HBaseConfiguration.create();
  @SuppressWarnings("unchecked")
  Class<? extends HRegionServer> regionServerClass = (Class<? extends HRegionServer>) conf
      .getClass(HConstants.REGION_SERVER_IMPL, HRegionServer.class);

  new HRegionServerCommandLine(regionServerClass).doMain(args);
}
 
Example #28
Source File: TestAsyncRegionAdminApi2.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Test
public void testGetRegionLocation() throws Exception {
  RawAsyncHBaseAdmin rawAdmin = (RawAsyncHBaseAdmin) ASYNC_CONN.getAdmin();
  TEST_UTIL.createMultiRegionTable(tableName, HConstants.CATALOG_FAMILY);
  AsyncTableRegionLocator locator = ASYNC_CONN.getRegionLocator(tableName);
  HRegionLocation regionLocation = locator.getRegionLocation(Bytes.toBytes("mmm")).get();
  RegionInfo region = regionLocation.getRegion();
  byte[] regionName = regionLocation.getRegion().getRegionName();
  HRegionLocation location = rawAdmin.getRegionLocation(regionName).get();
  assertTrue(Bytes.equals(regionName, location.getRegion().getRegionName()));
  location = rawAdmin.getRegionLocation(region.getEncodedNameAsBytes()).get();
  assertTrue(Bytes.equals(regionName, location.getRegion().getRegionName()));
}
 
Example #29
Source File: WALKeyImpl.java    From hbase with Apache License 2.0 5 votes vote down vote up
@VisibleForTesting
public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, long logSeqNum,
    final long now, UUID clusterId, MultiVersionConcurrencyControl mvcc) {
  List<UUID> clusterIds = new ArrayList<>(1);
  clusterIds.add(clusterId);
  init(encodedRegionName, tablename, logSeqNum, now, clusterIds, HConstants.NO_NONCE,
    HConstants.NO_NONCE, mvcc, null, null);
}
 
Example #30
Source File: ServerCommandLine.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * Print into log some of the important hbase attributes.
 */
private static void logHBaseConfigs(Configuration conf) {
  final String [] keys = new String [] {
    // Expand this list as you see fit.
    "hbase.tmp.dir",
    HConstants.HBASE_DIR,
    HConstants.CLUSTER_DISTRIBUTED,
    HConstants.ZOOKEEPER_QUORUM,

  };
  for (String key: keys) {
    LOG.info(key + ": " + conf.get(key));
  }
}