Java Code Examples for org.apache.hadoop.hbase.HConstants

The following are top voted examples for showing how to use org.apache.hadoop.hbase.HConstants. These examples are extracted from open source projects. You can vote up the examples you like and your votes will be used in our system to generate more good examples.
Example 1
Project: ditb   File: TestQuotaThrottle.java   Source Code and License 7 votes vote down vote up
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  TEST_UTIL.getConfiguration().setBoolean(QuotaUtil.QUOTA_CONF_KEY, true);
  TEST_UTIL.getConfiguration().setInt("hbase.hstore.compactionThreshold", 10);
  TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100);
  TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250);
  TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
  TEST_UTIL.getConfiguration().setBoolean("hbase.master.enabletable.roundrobin", true);
  TEST_UTIL.startMiniCluster(1);
  TEST_UTIL.waitTableAvailable(QuotaTableUtil.QUOTA_TABLE_NAME);
  QuotaCache.setTEST_FORCE_REFRESH(true);

  tables = new HTable[TABLE_NAMES.length];
  for (int i = 0; i < TABLE_NAMES.length; ++i) {
    tables[i] = TEST_UTIL.createTable(TABLE_NAMES[i], FAMILY);
  }
}
 
Example 2
Project: ditb   File: DefaultWALProvider.java   Source Code and License 7 votes vote down vote up
/**
 * This function returns region server name from a log file name which is in one of the following
 * formats:
 * <ul>
 *   <li>hdfs://&lt;name node&gt;/hbase/.logs/&lt;server name&gt;-splitting/...</li>
 *   <li>hdfs://&lt;name node&gt;/hbase/.logs/&lt;server name&gt;/...</li>
 * </ul>
 * @param logFile
 * @return null if the passed in logFile isn't a valid WAL file path
 */
public static ServerName getServerNameFromWALDirectoryName(Path logFile) {
  String logDirName = logFile.getParent().getName();
  // We were passed the directory and not a file in it.
  if (logDirName.equals(HConstants.HREGION_LOGDIR_NAME)) {
    logDirName = logFile.getName();
  }
  ServerName serverName = null;
  if (logDirName.endsWith(SPLITTING_EXT)) {
    logDirName = logDirName.substring(0, logDirName.length() - SPLITTING_EXT.length());
  }
  try {
    serverName = ServerName.parseServerName(logDirName);
  } catch (IllegalArgumentException ex) {
    serverName = null;
    LOG.warn("Cannot parse a server name from path=" + logFile + "; " + ex.getMessage());
  }
  if (serverName != null && serverName.getStartcode() < 0) {
    LOG.warn("Invalid log file path=" + logFile);
    serverName = null;
  }
  return serverName;
}
 
Example 3
Project: ditb   File: TestAssignmentManagerOnCluster.java   Source Code and License 6 votes vote down vote up
static void setupOnce() throws Exception {
  // Using the our load balancer to control region plans
  conf.setClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS,
    MyLoadBalancer.class, LoadBalancer.class);
  conf.setClass(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
    MyRegionObserver.class, RegionObserver.class);
  // Reduce the maximum attempts to speed up the test
  conf.setInt("hbase.assignment.maximum.attempts", 3);
  // Put meta on master to avoid meta server shutdown handling
  conf.set("hbase.balancer.tablesOnMaster", "hbase:meta");
  conf.setInt("hbase.master.maximum.ping.server.attempts", 3);
  conf.setInt("hbase.master.ping.server.retry.sleep.interval", 1);

  TEST_UTIL.startMiniCluster(1, 4, null, MyMaster.class, MyRegionServer.class);
  admin = TEST_UTIL.getHBaseAdmin();
}
 
Example 4
Project: ditb   File: TestMetaWithReplicas.java   Source Code and License 6 votes vote down vote up
@Test
public void testHBaseFsckWithFewerMetaReplicaZnodes() throws Exception {
  ClusterConnection c = (ClusterConnection)ConnectionFactory.createConnection(
      TEST_UTIL.getConfiguration());
  RegionLocations rl = c.locateRegion(TableName.META_TABLE_NAME, HConstants.EMPTY_START_ROW,
      false, false);
  HBaseFsckRepair.closeRegionSilentlyAndWait(c,
      rl.getRegionLocation(2).getServerName(), rl.getRegionLocation(2).getRegionInfo());
  ZooKeeperWatcher zkw = TEST_UTIL.getZooKeeperWatcher();
  ZKUtil.deleteNode(zkw, zkw.getZNodeForReplica(2));
  // check that problem exists
  HBaseFsck hbck = doFsck(TEST_UTIL.getConfiguration(), false);
  assertErrors(hbck, new ERROR_CODE[]{ERROR_CODE.UNKNOWN,ERROR_CODE.NO_META_REGION});
  // fix the problem
  hbck = doFsck(TEST_UTIL.getConfiguration(), true);
  // run hbck again to make sure we don't see any errors
  hbck = doFsck(TEST_UTIL.getConfiguration(), false);
  assertErrors(hbck, new ERROR_CODE[]{});
}
 
Example 5
Project: ditb   File: TestCoprocessorInterface.java   Source Code and License 6 votes vote down vote up
Configuration initSplit() {
  // Always compact if there is more than one store file.
  TEST_UTIL.getConfiguration().setInt("hbase.hstore.compactionThreshold", 2);
  // Make lease timeout longer, lease checks less frequent
  TEST_UTIL.getConfiguration().setInt(
      "hbase.master.lease.thread.wakefrequency", 5 * 1000);
  TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, 10 * 1000);
  // Increase the amount of time between client retries
  TEST_UTIL.getConfiguration().setLong("hbase.client.pause", 15 * 1000);
  // This size should make it so we always split using the addContent
  // below.  After adding all data, the first region is 1.3M
  TEST_UTIL.getConfiguration().setLong(HConstants.HREGION_MAX_FILESIZE,
      1024 * 128);
  TEST_UTIL.getConfiguration().setBoolean("hbase.testing.nocluster",
      true);
  TEST_UTIL.getConfiguration().setBoolean(CoprocessorHost.ABORT_ON_ERROR_KEY, false);

  return TEST_UTIL.getConfiguration();
}
 
Example 6
Project: ditb   File: TestFSHLog.java   Source Code and License 6 votes vote down vote up
protected void addEdits(WAL log,
                        HRegionInfo hri,
                        HTableDescriptor htd,
                        int times,
                        MultiVersionConcurrencyControl mvcc)
    throws IOException {
  final byte[] row = Bytes.toBytes("row");
  for (int i = 0; i < times; i++) {
    long timestamp = System.currentTimeMillis();
    WALEdit cols = new WALEdit();
    cols.add(new KeyValue(row, row, row, timestamp, row));
    WALKey key = new WALKey(hri.getEncodedNameAsBytes(), htd.getTableName(),
        WALKey.NO_SEQUENCE_ID, timestamp, WALKey.EMPTY_UUIDS, HConstants.NO_NONCE,
        HConstants.NO_NONCE, mvcc);
    log.append(htd, hri, key, cols, true);
  }
  log.sync();
}
 
Example 7
Project: ditb   File: HRegionServerCommandLine.java   Source Code and License 6 votes vote down vote up
private int start() throws Exception {
  Configuration conf = getConf();
  HRegionServer.loadWinterConf(conf, null);
  CoordinatedStateManager cp = CoordinatedStateManagerFactory.getCoordinatedStateManager(conf);
  try {
    // If 'local', don't start a region server here. Defer to
    // LocalHBaseCluster. It manages 'local' clusters.
    if (LocalHBaseCluster.isLocal(conf)) {
      LOG.warn("Not starting a distinct region server because " + HConstants.CLUSTER_DISTRIBUTED
          + " is false");
    } else {
      logProcessInfo(getConf());
      HRegionServer hrs = HRegionServer.constructRegionServer(regionServerClass, conf, cp);
      hrs.start();
      hrs.join();
      if (hrs.isAborted()) {
        throw new RuntimeException("HRegionServer Aborted");
      }
    }
  } catch (Throwable t) {
    LOG.error("Region server exiting", t);
    return 1;
  }
  return 0;
}
 
Example 8
Project: ditb   File: TestRSKilledWhenInitializing.java   Source Code and License 6 votes vote down vote up
@Override
protected void handleReportForDutyResponse(RegionServerStartupResponse c) throws IOException {
  if (firstRS.getAndSet(false)) {
    InetSocketAddress address = super.getRpcServer().getListenerAddress();
    if (address == null) {
      throw new IOException("Listener channel is closed");
    }
    for (NameStringPair e : c.getMapEntriesList()) {
      String key = e.getName();
      // The hostname the master sees us as.
      if (key.equals(HConstants.KEY_FOR_HOSTNAME_SEEN_BY_MASTER)) {
        String hostnameFromMasterPOV = e.getValue();
        assertEquals(address.getHostName(), hostnameFromMasterPOV);
      }
    }
    while (!masterActive) {
      Threads.sleep(100);
    }
    super.kill();
  } else {
    super.handleReportForDutyResponse(c);
  }
}
 
Example 9
Project: ditb   File: SecureTestUtil.java   Source Code and License 6 votes vote down vote up
/**
 * Revoke permissions globally from the given user. Will wait until all active
 * AccessController instances have updated their permissions caches or will
 * throw an exception upon timeout (10 seconds).
 */
public static void revokeGlobal(final HBaseTestingUtility util, final String user,
    final Permission.Action... actions) throws Exception {
  SecureTestUtil.updateACLs(util, new Callable<Void>() {
    @Override
    public Void call() throws Exception {
      try (Connection connection = ConnectionFactory.createConnection(util.getConfiguration())) {
        try (Table acl = connection.getTable(AccessControlLists.ACL_TABLE_NAME)) {
          BlockingRpcChannel service = acl.coprocessorService(HConstants.EMPTY_START_ROW);
          AccessControlService.BlockingInterface protocol =
              AccessControlService.newBlockingStub(service);
          ProtobufUtil.revoke(null, protocol, user, actions);
        }
      }
      return null;
    }
  });
}
 
Example 10
Project: ditb   File: ScanQueryMatcher.java   Source Code and License 6 votes vote down vote up
public boolean moreRowsMayExistAfter(Cell kv) {
  if (this.isReversed) {
    if (rowComparator.compareRows(kv.getRowArray(), kv.getRowOffset(),
        kv.getRowLength(), stopRow, 0, stopRow.length) <= 0) {
      return false;
    } else {
      return true;
    }
  }
  if (!Bytes.equals(stopRow , HConstants.EMPTY_END_ROW) &&
      rowComparator.compareRows(kv.getRowArray(),kv.getRowOffset(),
          kv.getRowLength(), stopRow, 0, stopRow.length) >= 0) {
    // KV >= STOPROW
    // then NO there is nothing left.
    return false;
  } else {
    return true;
  }
}
 
Example 11
Project: ditb   File: HFileBlock.java   Source Code and License 6 votes vote down vote up
/**
 * @param dataBlockEncoder data block encoding algorithm to use
 */
public Writer(HFileDataBlockEncoder dataBlockEncoder, HFileContext fileContext) {
  this.dataBlockEncoder = dataBlockEncoder != null
      ? dataBlockEncoder : NoOpDataBlockEncoder.INSTANCE;
  defaultBlockEncodingCtx = new HFileBlockDefaultEncodingContext(null,
      HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext);
  dataBlockEncodingCtx = this.dataBlockEncoder
      .newDataBlockEncodingContext(HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext);

  if (fileContext.getBytesPerChecksum() < HConstants.HFILEBLOCK_HEADER_SIZE) {
    throw new RuntimeException("Unsupported value of bytesPerChecksum. " +
        " Minimum is " + HConstants.HFILEBLOCK_HEADER_SIZE + " but the configured value is " +
        fileContext.getBytesPerChecksum());
  }

  baosInMemory = new ByteArrayOutputStream();

  prevOffsetByType = new long[BlockType.values().length];
  for (int i = 0; i < prevOffsetByType.length; ++i)
    prevOffsetByType[i] = -1;

  this.fileContext = fileContext;
}
 
Example 12
Project: ditb   File: HFileBlock.java   Source Code and License 6 votes vote down vote up
/**
 * Starts writing into the block. The previous block's data is discarded.
 *
 * @return the stream the user can write their data into
 * @throws IOException
 */
public DataOutputStream startWriting(BlockType newBlockType)
    throws IOException {
  if (state == State.BLOCK_READY && startOffset != -1) {
    // We had a previous block that was written to a stream at a specific
    // offset. Save that offset as the last offset of a block of that type.
    prevOffsetByType[blockType.getId()] = startOffset;
  }

  startOffset = -1;
  blockType = newBlockType;

  baosInMemory.reset();
  baosInMemory.write(HConstants.HFILEBLOCK_DUMMY_HEADER);

  state = State.WRITING;

  // We will compress it later in finishBlock()
  userDataStream = new DataOutputStream(baosInMemory);
  if (newBlockType == BlockType.DATA) {
    this.dataBlockEncoder.startBlockEncoding(dataBlockEncodingCtx, userDataStream);
  }
  this.unencodedDataSizeWritten = 0;
  return userDataStream;
}
 
Example 13
Project: ditb   File: TestZKBasedOpenCloseRegion.java   Source Code and License 6 votes vote down vote up
/**
 * If region open fails with IOException in openRegion() while doing tableDescriptors.get()
 * the region should not add into regionsInTransitionInRS map
 * @throws Exception
 */
@Test
public void testRegionOpenFailsDueToIOException() throws Exception {
  HRegionInfo REGIONINFO = new HRegionInfo(TableName.valueOf("t"),
      HConstants.EMPTY_START_ROW, HConstants.EMPTY_START_ROW);
  HRegionServer regionServer = TEST_UTIL.getHBaseCluster().getRegionServer(0);
  TableDescriptors htd = Mockito.mock(TableDescriptors.class);
  Object orizinalState = Whitebox.getInternalState(regionServer,"tableDescriptors");
  Whitebox.setInternalState(regionServer, "tableDescriptors", htd);
  Mockito.doThrow(new IOException()).when(htd).get((TableName) Mockito.any());
  try {
    ProtobufUtil.openRegion(null, regionServer.getRSRpcServices(),
      regionServer.getServerName(), REGIONINFO);
    fail("It should throw IOException ");
  } catch (IOException e) {
  }
  Whitebox.setInternalState(regionServer, "tableDescriptors", orizinalState);
  assertFalse("Region should not be in RIT",
      regionServer.getRegionsInTransitionInRS().containsKey(REGIONINFO.getEncodedNameAsBytes()));
}
 
Example 14
Project: ditb   File: BaseRowProcessorEndpoint.java   Source Code and License 6 votes vote down vote up
/**
 * Pass a processor to region to process multiple rows atomically.
 * 
 * The RowProcessor implementations should be the inner classes of your
 * RowProcessorEndpoint. This way the RowProcessor can be class-loaded with
 * the Coprocessor endpoint together.
 *
 * See {@code TestRowProcessorEndpoint} for example.
 *
 * The request contains information for constructing processor 
 * (see {@link #constructRowProcessorFromRequest}. The processor object defines
 * the read-modify-write procedure.
 */
@Override
public void process(RpcController controller, ProcessRequest request,
    RpcCallback<ProcessResponse> done) {
  ProcessResponse resultProto = null;
  try {
    RowProcessor<S,T> processor = constructRowProcessorFromRequest(request);
    Region region = env.getRegion();
    long nonceGroup = request.hasNonceGroup() ? request.getNonceGroup() : HConstants.NO_NONCE;
    long nonce = request.hasNonce() ? request.getNonce() : HConstants.NO_NONCE;
    region.processRowsWithLocks(processor, nonceGroup, nonce);
    T result = processor.getResult();
    ProcessResponse.Builder b = ProcessResponse.newBuilder();
    b.setRowProcessorResult(result.toByteString());
    resultProto = b.build();
  } catch (Exception e) {
    ResponseConverter.setControllerException(controller, new IOException(e));
  }
  done.run(resultProto);
}
 
Example 15
Project: ditb   File: DefaultVisibilityLabelServiceImpl.java   Source Code and License 6 votes vote down vote up
/**
 * Adds the mutations to labels region and set the results to the finalOpStatus. finalOpStatus
 * might have some entries in it where the OpStatus is FAILURE. We will leave those and set in
 * others in the order.
 * @param mutations
 * @param finalOpStatus
 * @return whether we need a ZK update or not.
 */
private boolean mutateLabelsRegion(List<Mutation> mutations, OperationStatus[] finalOpStatus)
    throws IOException {
  OperationStatus[] opStatus = this.labelsRegion.batchMutate(mutations
    .toArray(new Mutation[mutations.size()]), HConstants.NO_NONCE, HConstants.NO_NONCE);
  int i = 0;
  boolean updateZk = false;
  for (OperationStatus status : opStatus) {
    // Update the zk when atleast one of the mutation was added successfully.
    updateZk = updateZk || (status.getOperationStatusCode() == OperationStatusCode.SUCCESS);
    for (; i < finalOpStatus.length; i++) {
      if (finalOpStatus[i] == null) {
        finalOpStatus[i] = status;
        break;
      }
    }
  }
  return updateZk;
}
 
Example 16
Project: ditb   File: ProcessBasedLocalHBaseCluster.java   Source Code and License 6 votes vote down vote up
/**
 * Constructor. Modifies the passed configuration.
 * @param hbaseHome the top directory of the HBase source tree
 */
public ProcessBasedLocalHBaseCluster(Configuration conf,
    int numDataNodes, int numRegionServers) {
  this.conf = conf;
  this.hbaseHome = HBaseHomePath.getHomePath();
  this.numMasters = 1;
  this.numRegionServers = numRegionServers;
  this.workDir = hbaseHome + "/target/local_cluster";
  this.numDataNodes = numDataNodes;

  hbaseDaemonScript = hbaseHome + "/bin/hbase-daemon.sh";
  zkClientPort = HBaseTestingUtility.randomFreePort();

  this.rsPorts = sortedPorts(numRegionServers);
  this.masterPorts = sortedPorts(numMasters);

  conf.set(HConstants.ZOOKEEPER_QUORUM, HConstants.LOCALHOST);
  conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zkClientPort);
}
 
Example 17
Project: ditb   File: TokenUtil.java   Source Code and License 6 votes vote down vote up
/**
 * Obtain and return an authentication token for the current user.
 * @param conn The HBase cluster connection
 * @return the authentication token instance
 */
public static Token<AuthenticationTokenIdentifier> obtainToken(
    Connection conn) throws IOException {
  Table meta = null;
  try {
    meta = conn.getTable(TableName.META_TABLE_NAME);
    CoprocessorRpcChannel rpcChannel = meta.coprocessorService(HConstants.EMPTY_START_ROW);
    AuthenticationProtos.AuthenticationService.BlockingInterface service =
        AuthenticationProtos.AuthenticationService.newBlockingStub(rpcChannel);
    AuthenticationProtos.GetAuthenticationTokenResponse response = service.getAuthenticationToken(null,
        AuthenticationProtos.GetAuthenticationTokenRequest.getDefaultInstance());

    return ProtobufUtil.toToken(response.getToken());
  } catch (ServiceException se) {
    ProtobufUtil.toIOException(se);
  } finally {
    if (meta != null) {
      meta.close();
    }
  }
  // dummy return for ServiceException block
  return null;
}
 
Example 18
Project: ditb   File: TestCoprocessorScanPolicy.java   Source Code and License 6 votes vote down vote up
@Override
public InternalScanner preFlushScannerOpen(
    final ObserverContext<RegionCoprocessorEnvironment> c,
    Store store, KeyValueScanner memstoreScanner, InternalScanner s) throws IOException {
  Long newTtl = ttls.get(store.getTableName());
  if (newTtl != null) {
    System.out.println("PreFlush:" + newTtl);
  }
  Integer newVersions = versions.get(store.getTableName());
  ScanInfo oldSI = store.getScanInfo();
  HColumnDescriptor family = store.getFamily();
  ScanInfo scanInfo = new ScanInfo(TEST_UTIL.getConfiguration(),
      family.getName(), family.getMinVersions(),
      newVersions == null ? family.getMaxVersions() : newVersions,
      newTtl == null ? oldSI.getTtl() : newTtl, family.getKeepDeletedCells(),
      oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
  Scan scan = new Scan();
  scan.setMaxVersions(newVersions == null ? oldSI.getMaxVersions() : newVersions);
  return new StoreScanner(store, scanInfo, scan, Collections.singletonList(memstoreScanner),
      ScanType.COMPACT_RETAIN_DELETES, store.getSmallestReadPoint(),
      HConstants.OLDEST_TIMESTAMP);
}
 
Example 19
Project: ditb   File: HBaseAdmin.java   Source Code and License 6 votes vote down vote up
/**
 * Stop the designated regionserver
 * @param hostnamePort Hostname and port delimited by a <code>:</code> as in
 * <code>example.org:1234</code>
 * @throws IOException if a remote or network exception occurs
 */
@Override
public synchronized void stopRegionServer(final String hostnamePort)
throws IOException {
  String hostname = Addressing.parseHostname(hostnamePort);
  int port = Addressing.parsePort(hostnamePort);
  AdminService.BlockingInterface admin =
    this.connection.getAdmin(ServerName.valueOf(hostname, port, 0));
  StopServerRequest request = RequestConverter.buildStopServerRequest(
    "Called by admin client " + this.connection.toString());
  PayloadCarryingRpcController controller = rpcControllerFactory.newController();

  controller.setPriority(HConstants.HIGH_QOS);
  try {
    // TODO: this does not do retries, it should. Set priority and timeout in controller
    admin.stopServer(controller, request);
  } catch (ServiceException se) {
    throw ProtobufUtil.getRemoteException(se);
  }
}
 
Example 20
Project: ditb   File: TestZKBasedOpenCloseRegion.java   Source Code and License 6 votes vote down vote up
private static void waitUntilAllRegionsAssigned()
throws IOException {
  HTable meta = new HTable(TEST_UTIL.getConfiguration(), TableName.META_TABLE_NAME);
  while (true) {
    int rows = 0;
    Scan scan = new Scan();
    scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
    ResultScanner s = meta.getScanner(scan);
    for (Result r = null; (r = s.next()) != null;) {
      byte [] b =
        r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
      if (b == null || b.length <= 0) {
        break;
      }
      rows++;
    }
    s.close();
    // If I get to here and all rows have a Server, then all have been assigned.
    if (rows >= countOfRegions) {
      break;
    }
    LOG.info("Found=" + rows);
    Threads.sleep(1000);
  }
  meta.close();
}
 
Example 21
Project: ditb   File: ServerNonceManager.java   Source Code and License 6 votes vote down vote up
/**
 * Starts the operation if operation with such nonce has not already succeeded. If the
 * operation is in progress, waits for it to end and checks whether it has succeeded.
 * @param group Nonce group.
 * @param nonce Nonce.
 * @param stoppable Stoppable that terminates waiting (if any) when the server is stopped.
 * @return true if the operation has not already succeeded and can proceed; false otherwise.
 */
public boolean startOperation(long group, long nonce, Stoppable stoppable)
    throws InterruptedException {
  if (nonce == HConstants.NO_NONCE) return true;
  NonceKey nk = new NonceKey(group, nonce);
  OperationContext ctx = new OperationContext();
  while (true) {
    OperationContext oldResult = nonces.putIfAbsent(nk, ctx);
    if (oldResult == null) return true;

    // Collision with some operation - should be extremely rare.
    synchronized (oldResult) {
      int oldState = oldResult.getState();
      LOG.debug("Conflict detected by nonce: " + nk + ", " + oldResult);
      if (oldState != OperationContext.WAIT) {
        return oldState == OperationContext.PROCEED; // operation ended
      }
      oldResult.setHasWait();
      oldResult.wait(this.conflictWaitIterationMs); // operation is still active... wait and loop
      if (stoppable.isStopped()) {
        throw new InterruptedException("Server stopped");
      }
    }
  }
}
 
Example 22
Project: ditb   File: SequenceIdAccounting.java   Source Code and License 6 votes vote down vote up
/**
 * Iterates over the given Map and compares sequence ids with corresponding
 * entries in {@link #oldestUnflushedRegionSequenceIds}. If a region in
 * {@link #oldestUnflushedRegionSequenceIds} has a sequence id less than that passed
 * in <code>sequenceids</code> then return it.
 * @param sequenceids Sequenceids keyed by encoded region name.
 * @return regions found in this instance with sequence ids less than those passed in.
 */
byte[][] findLower(Map<byte[], Long> sequenceids) {
  List<byte[]> toFlush = null;
  // Keeping the old behavior of iterating unflushedSeqNums under oldestSeqNumsLock.
  synchronized (tieLock) {
    for (Map.Entry<byte[], Long> e: sequenceids.entrySet()) {
      Map<byte[], Long> m = this.lowestUnflushedSequenceIds.get(e.getKey());
      if (m == null) continue;
      // The lowest sequence id outstanding for this region.
      long lowest = getLowestSequenceId(m);
      if (lowest != HConstants.NO_SEQNUM && lowest <= e.getValue()) {
        if (toFlush == null) toFlush = new ArrayList<byte[]>();
        toFlush.add(e.getKey());
      }
    }
  }
  return toFlush == null? null: toFlush.toArray(new byte[][] { HConstants.EMPTY_BYTE_ARRAY });
}
 
Example 23
Project: ditb   File: ZKServerTool.java   Source Code and License 6 votes vote down vote up
public static ServerName[] readZKNodes(Configuration conf) {
  List<ServerName> hosts = new LinkedList<ServerName>();

  // Note that we do not simply grab the property
  // HConstants.ZOOKEEPER_QUORUM from the HBaseConfiguration because the
  // user may be using a zoo.cfg file.
  Properties zkProps = ZKConfig.makeZKProps(conf);
  for (Entry<Object, Object> entry : zkProps.entrySet()) {
    String key = entry.getKey().toString().trim();
    String value = entry.getValue().toString().trim();
    if (key.startsWith("server.")) {
      String[] parts = value.split(":");
      String host = parts[0];

      int port = HConstants.DEFAULT_ZOOKEPER_CLIENT_PORT;
      if (parts.length > 1) {
        port = Integer.parseInt(parts[1]);
      }
      hosts.add(ServerName.valueOf(host, port, -1));
    }
  }
  return hosts.toArray(new ServerName[hosts.size()]);
}
 
Example 24
Project: ditb   File: TestFSUtils.java   Source Code and License 6 votes vote down vote up
@Test
public void testVersion() throws DeserializationException, IOException {
  HBaseTestingUtility htu = new HBaseTestingUtility();
  final FileSystem fs = htu.getTestFileSystem();
  final Path rootdir = htu.getDataTestDir();
  assertNull(FSUtils.getVersion(fs, rootdir));
  // Write out old format version file.  See if we can read it in and convert.
  Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
  FSDataOutputStream s = fs.create(versionFile);
  final String version = HConstants.FILE_SYSTEM_VERSION;
  s.writeUTF(version);
  s.close();
  assertTrue(fs.exists(versionFile));
  FileStatus [] status = fs.listStatus(versionFile);
  assertNotNull(status);
  assertTrue(status.length > 0);
  String newVersion = FSUtils.getVersion(fs, rootdir);
  assertEquals(version.length(), newVersion.length());
  assertEquals(version, newVersion);
  // File will have been converted. Exercise the pb format
  assertEquals(version, FSUtils.getVersion(fs, rootdir));
  FSUtils.checkVersion(fs, rootdir, true);
}
 
Example 25
Project: ditb   File: TestHRegion.java   Source Code and License 6 votes vote down vote up
private Configuration initSplit() {
  // Always compact if there is more than one store file.
  CONF.setInt("hbase.hstore.compactionThreshold", 2);

  // Make lease timeout longer, lease checks less frequent
  CONF.setInt("hbase.master.lease.thread.wakefrequency", 5 * 1000);

  CONF.setInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, 10 * 1000);

  // Increase the amount of time between client retries
  CONF.setLong("hbase.client.pause", 15 * 1000);

  // This size should make it so we always split using the addContent
  // below. After adding all data, the first region is 1.3M
  CONF.setLong(HConstants.HREGION_MAX_FILESIZE, 1024 * 128);
  return CONF;
}
 
Example 26
Project: ditb   File: TestVisibilityLabelsWithCustomVisLabService.java   Source Code and License 6 votes vote down vote up
@BeforeClass
public static void setupBeforeClass() throws Exception {
  // setup configuration
  conf = TEST_UTIL.getConfiguration();
  conf.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, false);
  VisibilityTestUtil.enableVisiblityLabels(conf);
  conf.setClass(VisibilityUtils.VISIBILITY_LABEL_GENERATOR_CLASS, SimpleScanLabelGenerator.class,
      ScanLabelGenerator.class);
  conf.setClass(VisibilityLabelServiceManager.VISIBILITY_LABEL_SERVICE_CLASS,
      ExpAsStringVisibilityLabelServiceImpl.class, VisibilityLabelService.class);
  conf.set("hbase.superuser", "admin");
  TEST_UTIL.startMiniCluster(2);
  SUPERUSER = User.createUserForTesting(conf, "admin", new String[] { "supergroup" });

  // Wait for the labels table to become available
  TEST_UTIL.waitTableEnabled(LABELS_TABLE_NAME.getName(), 50000);
  addLabels();
}
 
Example 27
Project: ditb   File: TestLeaseRenewal.java   Source Code and License 5 votes vote down vote up
/**
 * @throws java.lang.Exception
 */
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD,
    leaseTimeout);
  TEST_UTIL.startMiniCluster();
}
 
Example 28
Project: ditb   File: StripeStoreFileManager.java   Source Code and License 5 votes vote down vote up
/**
 * Finds the stripe index for the stripe containing a row provided externally for get/scan.
 */
private final int findStripeForRow(byte[] row, boolean isStart) {
  if (isStart && row == HConstants.EMPTY_START_ROW) return 0;
  if (!isStart && row == HConstants.EMPTY_END_ROW) return state.stripeFiles.size() - 1;
  // If there's an exact match below, a stripe ends at "row". Stripe right boundary is
  // exclusive, so that means the row is in the next stripe; thus, we need to add one to index.
  // If there's no match, the return value of binarySearch is (-(insertion point) - 1), where
  // insertion point is the index of the next greater element, or list size if none. The
  // insertion point happens to be exactly what we need, so we need to add one to the result.
  return Math.abs(Arrays.binarySearch(state.stripeEndRows, row, Bytes.BYTES_COMPARATOR) + 1);
}
 
Example 29
Project: ditb   File: TestReplicationAdmin.java   Source Code and License 5 votes vote down vote up
/**
 * @throws java.lang.Exception
 */
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  TEST_UTIL.startMiniZKCluster();
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, HConstants.REPLICATION_ENABLE_DEFAULT);
  admin = new ReplicationAdmin(conf);
}
 
Example 30
Project: ditb   File: HMaster.java   Source Code and License 5 votes vote down vote up
public int getRegionServerInfoPort(final ServerName sn) {
  RegionServerInfo info = this.regionServerTracker.getRegionServerInfo(sn);
  if (info == null || info.getInfoPort() == 0) {
    return conf.getInt(HConstants.REGIONSERVER_INFO_PORT,
      HConstants.DEFAULT_REGIONSERVER_INFOPORT);
  }
  return info.getInfoPort();
}
 
Example 31
Project: ditb   File: RpcRetryingCallerFactory.java   Source Code and License 5 votes vote down vote up
public RpcRetryingCallerFactory(Configuration conf, RetryingCallerInterceptor interceptor) {
  this.conf = conf;
  pause = conf.getLong(HConstants.HBASE_CLIENT_PAUSE,
      HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
  retries = conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
      HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
  startLogErrorsCnt = conf.getInt(AsyncProcess.START_LOG_ERRORS_AFTER_COUNT_KEY,
      AsyncProcess.DEFAULT_START_LOG_ERRORS_AFTER_COUNT);
  this.interceptor = interceptor;
  enableBackPressure = conf.getBoolean(HConstants.ENABLE_CLIENT_BACKPRESSURE,
      HConstants.DEFAULT_ENABLE_CLIENT_BACKPRESSURE);
}
 
Example 32
Project: ditb   File: ScanQueryMatcher.java   Source Code and License 5 votes vote down vote up
/**
 * @param nextIndexed the key of the next entry in the block index (if any)
 * @param kv The Cell we're using to calculate the seek key
 * @return result of the compare between the indexed key and the key portion of the passed cell
 */
public int compareKeyForNextRow(Cell nextIndexed, Cell kv) {
  return rowComparator.compareKey(nextIndexed,
    kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(),
    null, 0, 0,
    null, 0, 0,
    HConstants.OLDEST_TIMESTAMP, Type.Minimum.getCode());
}
 
Example 33
Project: ditb   File: NoOpScanPolicyObserver.java   Source Code and License 5 votes vote down vote up
/**
 * Reimplement the default behavior
 */
@Override
public InternalScanner preFlushScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
    Store store, KeyValueScanner memstoreScanner, InternalScanner s) throws IOException {
  ScanInfo oldSI = store.getScanInfo();
  ScanInfo scanInfo = new ScanInfo(oldSI.getConfiguration(), store.getFamily(), oldSI.getTtl(),
      oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
  Scan scan = new Scan();
  scan.setMaxVersions(oldSI.getMaxVersions());
  return new StoreScanner(store, scanInfo, scan, Collections.singletonList(memstoreScanner),
      ScanType.COMPACT_RETAIN_DELETES, store.getSmallestReadPoint(), HConstants.OLDEST_TIMESTAMP);
}
 
Example 34
Project: ditb   File: TestLogRollAbort.java   Source Code and License 5 votes vote down vote up
/**
 * Tests the case where a RegionServer enters a GC pause,
 * comes back online after the master declared it dead and started to split.
 * Want log rolling after a master split to fail. See HBASE-2312.
 */
@Test (timeout=300000)
public void testLogRollAfterSplitStart() throws IOException {
  LOG.info("Verify wal roll after split starts will fail.");
  String logName = "testLogRollAfterSplitStart";
  Path thisTestsDir = new Path(HBASEDIR, DefaultWALProvider.getWALDirectoryName(logName));
  final WALFactory wals = new WALFactory(conf, null, logName);

  try {
    // put some entries in an WAL
    TableName tableName =
        TableName.valueOf(this.getClass().getName());
    HRegionInfo regioninfo = new HRegionInfo(tableName,
        HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
    final WAL log = wals.getWAL(regioninfo.getEncodedNameAsBytes());
    MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(1);

    final int total = 20;
    for (int i = 0; i < total; i++) {
      WALEdit kvs = new WALEdit();
      kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName()));
      HTableDescriptor htd = new HTableDescriptor(tableName);
      htd.addFamily(new HColumnDescriptor("column"));
      log.append(htd, regioninfo, new WALKey(regioninfo.getEncodedNameAsBytes(), tableName,
          System.currentTimeMillis(), mvcc), kvs, true);
    }
    // Send the data to HDFS datanodes and close the HDFS writer
    log.sync();
    ((FSHLog) log).replaceWriter(((FSHLog)log).getOldPath(), null, null, null);

    /* code taken from MasterFileSystem.getLogDirs(), which is called from MasterFileSystem.splitLog()
     * handles RS shutdowns (as observed by the splitting process)
     */
    // rename the directory so a rogue RS doesn't create more WALs
    Path rsSplitDir = thisTestsDir.suffix(DefaultWALProvider.SPLITTING_EXT);
    if (!fs.rename(thisTestsDir, rsSplitDir)) {
      throw new IOException("Failed fs.rename for log split: " + thisTestsDir);
    }
    LOG.debug("Renamed region directory: " + rsSplitDir);

    LOG.debug("Processing the old log files.");
    WALSplitter.split(HBASEDIR, rsSplitDir, OLDLOGDIR, fs, conf, wals);

    LOG.debug("Trying to roll the WAL.");
    try {
      log.rollWriter();
      Assert.fail("rollWriter() did not throw any exception.");
    } catch (IOException ioe) {
      if (ioe.getCause() instanceof FileNotFoundException) {
        LOG.info("Got the expected exception: ", ioe.getCause());
      } else {
        Assert.fail("Unexpected exception: " + ioe);
      }
    }
  } finally {
    wals.close();
    if (fs.exists(thisTestsDir)) {
      fs.delete(thisTestsDir, true);
    }
  }
}
 
Example 35
Project: ditb   File: TestLoadIncrementalHFiles.java   Source Code and License 5 votes vote down vote up
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  util.getConfiguration().set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,"");
  util.getConfiguration().setInt(
    LoadIncrementalHFiles.MAX_FILES_PER_REGION_PER_FAMILY,
    MAX_FILES_PER_REGION_PER_FAMILY);
  // change default behavior so that tag values are returned with normal rpcs
  util.getConfiguration().set(HConstants.RPC_CODEC_CONF_KEY,
      KeyValueCodecWithTags.class.getCanonicalName());
  util.startMiniCluster();

  setupNamespace();
}
 
Example 36
Project: ditb   File: TestLRUDictionary.java   Source Code and License 5 votes vote down vote up
/**
 * Assert can't add empty array.
 */
@Test
public void testPassingEmptyArrayToFindEntry() {
  assertEquals(Dictionary.NOT_IN_DICTIONARY,
    testee.findEntry(HConstants.EMPTY_BYTE_ARRAY, 0, 0));
  assertEquals(Dictionary.NOT_IN_DICTIONARY,
    testee.addEntry(HConstants.EMPTY_BYTE_ARRAY, 0, 0));
}
 
Example 37
Project: ditb   File: FSTableDescriptors.java   Source Code and License 5 votes vote down vote up
/**
 * Get the current table descriptor for the given table, or null if none exists.
 *
 * Uses a local cache of the descriptor but still checks the filesystem on each call
 * to see if a newer file has been created since the cached one was read.
 */
@Override
public HTableDescriptor get(final TableName tablename)
throws IOException {
  invocations++;
  if (TableName.META_TABLE_NAME.equals(tablename)) {
    cachehits++;
    return metaTableDescriptor;
  }
  // hbase:meta is already handled. If some one tries to get the descriptor for
  // .logs, .oldlogs or .corrupt throw an exception.
  if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tablename.getNameAsString())) {
     throw new IOException("No descriptor found for non table = " + tablename);
  }

  if (usecache) {
    // Look in cache of descriptors.
    HTableDescriptor cachedtdm = this.cache.get(tablename);
    if (cachedtdm != null) {
      cachehits++;
      return cachedtdm;
    }
  }
  HTableDescriptor tdmt = null;
  try {
    tdmt = getTableDescriptorFromFs(fs, rootdir, tablename, !fsreadonly);
  } catch (NullPointerException e) {
    LOG.debug("Exception during readTableDecriptor. Current table name = "
        + tablename, e);
  } catch (IOException ioe) {
    LOG.debug("Exception during readTableDecriptor. Current table name = "
        + tablename, ioe);
  }
  // last HTD written wins
  if (usecache && tdmt != null) {
    this.cache.put(tablename, tdmt);
  }

  return tdmt;
}
 
Example 38
Project: ditb   File: ZKConfig.java   Source Code and License 5 votes vote down vote up
/**
 * Separate the given key into the three configurations it should contain:
 * hbase.zookeeper.quorum, hbase.zookeeper.client.port
 * and zookeeper.znode.parent
 * @param key
 * @return the three configuration in the described order
 * @throws IOException
 */
public static ZKClusterKey transformClusterKey(String key) throws IOException {
  String[] parts = key.split(":");

  if (parts.length == 3) {
    return new ZKClusterKey(parts [0], Integer.parseInt(parts [1]), parts [2]);
  }

  if (parts.length > 3) {
    // The quorum could contain client port in server:clientport format, try to transform more.
    String zNodeParent = parts [parts.length - 1];
    String clientPort = parts [parts.length - 2];

    // The first part length is the total length minus the lengths of other parts and minus 2 ":"
    int endQuorumIndex = key.length() - zNodeParent.length() - clientPort.length() - 2;
    String quorumStringInput = key.substring(0, endQuorumIndex);
    String[] serverHosts = quorumStringInput.split(",");

    // The common case is that every server has its own client port specified - this means
    // that (total parts - the ZNodeParent part - the ClientPort part) is equal to
    // (the number of "," + 1) - "+ 1" because the last server has no ",".
    if ((parts.length - 2) == (serverHosts.length + 1)) {
      return new ZKClusterKey(quorumStringInput, Integer.parseInt(clientPort), zNodeParent);
    }

    // For the uncommon case that some servers has no port specified, we need to build the
    // server:clientport list using default client port for servers without specified port.
    return new ZKClusterKey(
        buildZKQuorumServerString(serverHosts, clientPort),
        Integer.parseInt(clientPort),
        zNodeParent);
  }

  throw new IOException("Cluster key passed " + key + " is invalid, the format should be:" +
      HConstants.ZOOKEEPER_QUORUM + ":" + HConstants.ZOOKEEPER_CLIENT_PORT + ":"
      + HConstants.ZOOKEEPER_ZNODE_PARENT);
}
 
Example 39
Project: ditb   File: RequestConverter.java   Source Code and License 5 votes vote down vote up
/**
 * Create a protocol buffer MutateRequest for an append
 *
 * @param regionName
 * @param append
 * @return a mutate request
 * @throws IOException
 */
public static MutateRequest buildMutateRequest(final byte[] regionName,
    final Append append, long nonceGroup, long nonce) throws IOException {
  MutateRequest.Builder builder = MutateRequest.newBuilder();
  RegionSpecifier region = buildRegionSpecifier(
    RegionSpecifierType.REGION_NAME, regionName);
  builder.setRegion(region);
  if (nonce != HConstants.NO_NONCE && nonceGroup != HConstants.NO_NONCE) {
    builder.setNonceGroup(nonceGroup);
  }
  builder.setMutation(ProtobufUtil.toMutation(MutationType.APPEND, append,
    MutationProto.newBuilder(), nonce));
  return builder.build();
}
 
Example 40
Project: ditb   File: TestWithDisabledAuthorization.java   Source Code and License 5 votes vote down vote up
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  conf = TEST_UTIL.getConfiguration();
  // Up the handlers; this test needs more than usual.
  conf.setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, 10);
  // Set up superuser
  SecureTestUtil.configureSuperuser(conf);

  // Install the VisibilityController as a system processor
  VisibilityTestUtil.enableVisiblityLabels(conf);

  // Now, DISABLE active authorization
  conf.setBoolean(User.HBASE_SECURITY_AUTHORIZATION_CONF_KEY, false);

  TEST_UTIL.startMiniCluster();

  // Wait for the labels table to become available
  TEST_UTIL.waitUntilAllRegionsAssigned(LABELS_TABLE_NAME);

  // create a set of test users
  SUPERUSER = User.createUserForTesting(conf, "admin", new String[] { "supergroup" });
  USER_RW = User.createUserForTesting(conf, "rwuser", new String[0]);

  // Define test labels
  SUPERUSER.runAs(new PrivilegedExceptionAction<Void>() {
    public Void run() throws Exception {
      try (Connection conn = ConnectionFactory.createConnection(conf)) {
        VisibilityClient.addLabels(conn,
          new String[] { SECRET, CONFIDENTIAL, PRIVATE });
        VisibilityClient.setAuths(conn,
          new String[] { SECRET, CONFIDENTIAL },
          USER_RW.getShortName());
      } catch (Throwable t) {
        fail("Should not have failed");          
      }
      return null;
    }
  });
}
 
Example 41
Project: ditb   File: CacheConfig.java   Source Code and License 5 votes vote down vote up
static long getLruCacheSize(final Configuration conf, final MemoryUsage mu) {
  float cachePercentage = conf.getFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY,
    HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT);
  if (cachePercentage <= 0.0001f) {
    blockCacheDisabled = true;
    return -1;
  }
  if (cachePercentage > 1.0) {
    throw new IllegalArgumentException(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY +
      " must be between 0.0 and 1.0, and not > 1.0");
  }

  // Calculate the amount of heap to give the heap.
  return (long) (mu.getMax() * cachePercentage);
}
 
Example 42
Project: ditb   File: MetaUtils.java   Source Code and License 5 votes vote down vote up
/**
 * @param conf Configuration
 * @throws IOException e
 */
public MetaUtils(Configuration conf) throws IOException {
  this.conf = conf;
  conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1);
  this.metaRegion = null;
  this.descriptors = new FSTableDescriptors(conf);
  initialize();
}
 
Example 43
Project: ditb   File: TestHFileOutputFormat.java   Source Code and License 5 votes vote down vote up
/**
 * Run small MR job.
 */
@Test
public void testWritingPEData() throws Exception {
  Configuration conf = util.getConfiguration();
  Path testDir = util.getDataTestDirOnTestFS("testWritingPEData");
  FileSystem fs = testDir.getFileSystem(conf);

  // Set down this value or we OOME in eclipse.
  conf.setInt("mapreduce.task.io.sort.mb", 20);
  // Write a few files.
  conf.setLong(HConstants.HREGION_MAX_FILESIZE, 64 * 1024);

  Job job = new Job(conf, "testWritingPEData");
  setupRandomGeneratorMapper(job);
  // This partitioner doesn't work well for number keys but using it anyways
  // just to demonstrate how to configure it.
  byte[] startKey = new byte[RandomKVGeneratingMapper.KEYLEN_DEFAULT];
  byte[] endKey = new byte[RandomKVGeneratingMapper.KEYLEN_DEFAULT];

  Arrays.fill(startKey, (byte)0);
  Arrays.fill(endKey, (byte)0xff);

  job.setPartitionerClass(SimpleTotalOrderPartitioner.class);
  // Set start and end rows for partitioner.
  SimpleTotalOrderPartitioner.setStartKey(job.getConfiguration(), startKey);
  SimpleTotalOrderPartitioner.setEndKey(job.getConfiguration(), endKey);
  job.setReducerClass(KeyValueSortReducer.class);
  job.setOutputFormatClass(HFileOutputFormat.class);
  job.setNumReduceTasks(4);
  job.getConfiguration().setStrings("io.serializations", conf.get("io.serializations"),
      MutationSerialization.class.getName(), ResultSerialization.class.getName(),
      KeyValueSerialization.class.getName());

  FileOutputFormat.setOutputPath(job, testDir);
  assertTrue(job.waitForCompletion(false));
  FileStatus [] files = fs.listStatus(testDir);
  assertTrue(files.length > 0);
}
 
Example 44
Project: ditb   File: HMaster.java   Source Code and License 5 votes vote down vote up
private int putUpJettyServer() throws IOException {
  if (!conf.getBoolean("hbase.master.infoserver.redirect", true)) {
    return -1;
  }
  int infoPort = conf.getInt("hbase.master.info.port.orig",
    HConstants.DEFAULT_MASTER_INFOPORT);
  // -1 is for disabling info server, so no redirecting
  if (infoPort < 0 || infoServer == null) {
    return -1;
  }
  String addr = conf.get("hbase.master.info.bindAddress", "0.0.0.0");
  if (!Addressing.isLocalAddress(InetAddress.getByName(addr))) {
    String msg =
        "Failed to start redirecting jetty server. Address " + addr
            + " does not belong to this host. Correct configuration parameter: "
            + "hbase.master.info.bindAddress";
    LOG.error(msg);
    throw new IOException(msg);
  }

  RedirectServlet.regionServerInfoPort = infoServer.getPort();
  if(RedirectServlet.regionServerInfoPort == infoPort) {
    return infoPort;
  }
  masterJettyServer = new org.mortbay.jetty.Server();
  Connector connector = new SelectChannelConnector();
  connector.setHost(addr);
  connector.setPort(infoPort);
  masterJettyServer.addConnector(connector);
  masterJettyServer.setStopAtShutdown(true);
  Context context = new Context(masterJettyServer, "/", Context.NO_SESSIONS);
  context.addServlet(RedirectServlet.class, "/*");
  try {
    masterJettyServer.start();
  } catch (Exception e) {
    throw new IOException("Failed to start redirecting jetty server", e);
  }
  return connector.getLocalPort();
}
 
Example 45
Project: ditb   File: HStore.java   Source Code and License 5 votes vote down vote up
/**
 * @param family
 * @return TTL in seconds of the specified family
 */
private static long determineTTLFromFamily(final HColumnDescriptor family) {
  // HCD.getTimeToLive returns ttl in seconds. Convert to milliseconds.
  long ttl = family.getTimeToLive();
  if (ttl == HConstants.FOREVER) {
    // Default is unlimited ttl.
    ttl = Long.MAX_VALUE;
  } else if (ttl == -1) {
    ttl = Long.MAX_VALUE;
  } else {
    // Second -> ms adjust for user data
    ttl *= 1000;
  }
  return ttl;
}
 
Example 46
Project: ditb   File: TableMapReduceUtil.java   Source Code and License 5 votes vote down vote up
/**
 * Enable a basic on-heap cache for these jobs. Any BlockCache implementation based on
 * direct memory will likely cause the map tasks to OOM when opening the region. This
 * is done here instead of in TableSnapshotRegionRecordReader in case an advanced user
 * wants to override this behavior in their job.
 */
public static void resetCacheConfig(Configuration conf) {
  conf.setFloat(
    HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT);
  conf.setFloat(HConstants.BUCKET_CACHE_SIZE_KEY, 0f);
  conf.unset(HConstants.BUCKET_CACHE_IOENGINE_KEY);
}
 
Example 47
Project: ditb   File: Compressor.java   Source Code and License 5 votes vote down vote up
private static void transformFile(Path input, Path output)
    throws IOException {
  Configuration conf = HBaseConfiguration.create();

  FileSystem inFS = input.getFileSystem(conf);
  FileSystem outFS = output.getFileSystem(conf);

  WAL.Reader in = WALFactory.createReaderIgnoreCustomClass(inFS, input, conf);
  WALProvider.Writer out = null;

  try {
    if (!(in instanceof ReaderBase)) {
      System.err.println("Cannot proceed, invalid reader type: " + in.getClass().getName());
      return;
    }
    boolean compress = ((ReaderBase)in).hasCompression();
    conf.setBoolean(HConstants.ENABLE_WAL_COMPRESSION, !compress);
    out = WALFactory.createWALWriter(outFS, output, conf);

    WAL.Entry e = null;
    while ((e = in.next()) != null) out.append(e);
  } finally {
    in.close();
    if (out != null) {
      out.close();
      out = null;
    }
  }
}
 
Example 48
Project: ditb   File: TestRegionPlacement.java   Source Code and License 5 votes vote down vote up
@BeforeClass
public static void setupBeforeClass() throws Exception {
  Configuration conf = TEST_UTIL.getConfiguration();
  // Enable the favored nodes based load balancer
  conf.setClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS,
      FavoredNodeLoadBalancer.class, LoadBalancer.class);
  conf.setBoolean("hbase.tests.use.shortcircuit.reads", false);
  TEST_UTIL.startMiniCluster(SLAVES);
  CONNECTION = TEST_UTIL.getConnection();
  admin = CONNECTION.getAdmin();
  rp = new RegionPlacementMaintainer(conf);
}
 
Example 49
Project: ditb   File: ServerStatisticTracker.java   Source Code and License 5 votes vote down vote up
public static ServerStatisticTracker create(Configuration conf) {
  if (!conf.getBoolean(HConstants.ENABLE_CLIENT_BACKPRESSURE,
      HConstants.DEFAULT_ENABLE_CLIENT_BACKPRESSURE)) {
    return null;
  }
  return new ServerStatisticTracker();
}
 
Example 50
Project: ditb   File: ThriftServerRunner.java   Source Code and License 5 votes vote down vote up
@Override
public long atomicIncrement(
    ByteBuffer tableName, ByteBuffer row, ByteBuffer column, long amount)
        throws IOError, IllegalArgument, TException {
  byte [][] famAndQf = KeyValue.parseColumn(getBytes(column));
  if(famAndQf.length == 1) {
    return atomicIncrement(tableName, row, famAndQf[0], HConstants.EMPTY_BYTE_ARRAY, amount);
  }
  return atomicIncrement(tableName, row, famAndQf[0], famAndQf[1], amount);
}
 
Example 51
Project: ditb   File: FSHLog.java   Source Code and License 5 votes vote down vote up
private static void split(final Configuration conf, final Path p)
throws IOException {
  FileSystem fs = FileSystem.get(conf);
  if (!fs.exists(p)) {
    throw new FileNotFoundException(p.toString());
  }
  if (!fs.getFileStatus(p).isDirectory()) {
    throw new IOException(p + " is not a directory");
  }

  final Path baseDir = FSUtils.getRootDir(conf);
  final Path archiveDir = new Path(baseDir, HConstants.HREGION_OLDLOGDIR_NAME);
  WALSplitter.split(baseDir, p, archiveDir, fs, conf, WALFactory.getInstance(conf));
}
 
Example 52
Project: ditb   File: AbstractHFileWriter.java   Source Code and License 5 votes vote down vote up
/** A helper method to create HFile output streams in constructors */
protected static FSDataOutputStream createOutputStream(Configuration conf,
    FileSystem fs, Path path, InetSocketAddress[] favoredNodes) throws IOException {
  FsPermission perms = FSUtils.getFilePermissions(fs, conf,
      HConstants.DATA_FILE_UMASK_KEY);
  return FSUtils.create(conf, fs, path, perms, favoredNodes);
}
 
Example 53
Project: ditb   File: RegionMergeTransactionImpl.java   Source Code and License 5 votes vote down vote up
public Put addLocation(final Put p, final ServerName sn, long openSeqNum) {
  p.add(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER, Bytes
      .toBytes(sn.getHostAndPort()));
  p.add(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER, Bytes.toBytes(sn
      .getStartcode()));
  p.add(HConstants.CATALOG_FAMILY, HConstants.SEQNUM_QUALIFIER, Bytes.toBytes(openSeqNum));
  return p;
}
 
Example 54
Project: ditb   File: FSUtils.java   Source Code and License 5 votes vote down vote up
/**
 * Verifies current version of file system
 *
 * @param fs filesystem object
 * @param rootdir root hbase directory
 * @return null if no version file exists, version string otherwise.
 * @throws IOException e
 * @throws org.apache.hadoop.hbase.exceptions.DeserializationException
 */
public static String getVersion(FileSystem fs, Path rootdir)
throws IOException, DeserializationException {
  Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
  FileStatus[] status = null;
  try {
    // hadoop 2.0 throws FNFE if directory does not exist.
    // hadoop 1.0 returns null if directory does not exist.
    status = fs.listStatus(versionFile);
  } catch (FileNotFoundException fnfe) {
    return null;
  }
  if (status == null || status.length == 0) return null;
  String version = null;
  byte [] content = new byte [(int)status[0].getLen()];
  FSDataInputStream s = fs.open(versionFile);
  try {
    IOUtils.readFully(s, content, 0, content.length);
    if (ProtobufUtil.isPBMagicPrefix(content)) {
      version = parseVersionFrom(content);
    } else {
      // Presume it pre-pb format.
      InputStream is = new ByteArrayInputStream(content);
      DataInputStream dis = new DataInputStream(is);
      try {
        version = dis.readUTF();
      } finally {
        dis.close();
      }
    }
  } catch (EOFException eof) {
    LOG.warn("Version file was empty, odd, will try to set it.");
  } finally {
    s.close();
  }
  return version;
}
 
Example 55
Project: ditb   File: ReplicationSink.java   Source Code and License 5 votes vote down vote up
/**
 * decorate the Configuration object to make replication more receptive to delays:
 * lessen the timeout and numTries.
 */
private void decorateConf() {
  this.conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
      this.conf.getInt("replication.sink.client.retries.number", 4));
  this.conf.setInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
      this.conf.getInt("replication.sink.client.ops.timeout", 10000));
  String replicationCodec = this.conf.get(HConstants.REPLICATION_CODEC_CONF_KEY);
  if (StringUtils.isNotEmpty(replicationCodec)) {
    this.conf.set(HConstants.RPC_CODEC_CONF_KEY, replicationCodec);
  }
 }
 
Example 56
Project: ditb   File: TestAggregateProtocol.java   Source Code and License 5 votes vote down vote up
@Test (timeout=300000)
public void testMinWithValidRangeWithNoCQ() throws Throwable {
  AggregationClient aClient = new AggregationClient(conf);
  Scan scan = new Scan();
  scan.addFamily(TEST_FAMILY);
  scan.setStartRow(HConstants.EMPTY_START_ROW);
  scan.setStopRow(HConstants.EMPTY_END_ROW);
  final ColumnInterpreter<Long, Long, EmptyMsg, LongMsg, LongMsg> ci =
      new LongColumnInterpreter();
  long min = aClient.min(TEST_TABLE, ci,
      scan);
  assertEquals(0, min);
}
 
Example 57
Project: ditb   File: TestFSUtils.java   Source Code and License 5 votes vote down vote up
@Test
public void testDeleteAndExists() throws Exception {
  HBaseTestingUtility htu = new HBaseTestingUtility();
  Configuration conf = htu.getConfiguration();
  conf.setBoolean(HConstants.ENABLE_DATA_FILE_UMASK, true);
  FileSystem fs = FileSystem.get(conf);
  FsPermission perms = FSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY);
  // then that the correct file is created
  String file = UUID.randomUUID().toString();
  Path p = new Path(htu.getDataTestDir(), "temptarget" + File.separator + file);
  Path p1 = new Path(htu.getDataTestDir(), "temppath" + File.separator + file);
  try {
    FSDataOutputStream out = FSUtils.create(conf, fs, p, perms, null);
    out.close();
    assertTrue("The created file should be present", FSUtils.isExists(fs, p));
    // delete the file with recursion as false. Only the file will be deleted.
    FSUtils.delete(fs, p, false);
    // Create another file
    FSDataOutputStream out1 = FSUtils.create(conf, fs, p1, perms, null);
    out1.close();
    // delete the file with recursion as false. Still the file only will be deleted
    FSUtils.delete(fs, p1, true);
    assertFalse("The created file should be present", FSUtils.isExists(fs, p1));
    // and then cleanup
  } finally {
    FSUtils.delete(fs, p, true);
    FSUtils.delete(fs, p1, true);
  }
}
 
Example 58
Project: ditb   File: TestMajorCompaction.java   Source Code and License 5 votes vote down vote up
/** constructor */
public TestMajorCompaction() {
  super();

  // Set cache flush size to 1MB
  conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024*1024);
  conf.setInt(HConstants.HREGION_MEMSTORE_BLOCK_MULTIPLIER, 100);
  compactionThreshold = conf.getInt("hbase.hstore.compactionThreshold", 3);

  secondRowBytes = START_KEY_BYTES.clone();
  // Increment the least significant character so we get to next row.
  secondRowBytes[START_KEY_BYTES.length - 1]++;
  thirdRowBytes = START_KEY_BYTES.clone();
  thirdRowBytes[START_KEY_BYTES.length - 1] += 2;
}
 
Example 59
Project: ditb   File: VisibilityClient.java   Source Code and License 5 votes vote down vote up
/**
 * @param connection the Connection instance to use.
 * @param user
 * @return labels, the given user is globally authorized for.
 * @throws Throwable
 */
public static GetAuthsResponse getAuths(Connection connection, final String user)
    throws Throwable {
    try (Table table = connection.getTable(LABELS_TABLE_NAME)) {
      Batch.Call<VisibilityLabelsService, GetAuthsResponse> callable =
          new Batch.Call<VisibilityLabelsService, GetAuthsResponse>() {
        ServerRpcController controller = new ServerRpcController();
        BlockingRpcCallback<GetAuthsResponse> rpcCallback =
            new BlockingRpcCallback<GetAuthsResponse>();

        public GetAuthsResponse call(VisibilityLabelsService service) throws IOException {
          GetAuthsRequest.Builder getAuthReqBuilder = GetAuthsRequest.newBuilder();
          getAuthReqBuilder.setUser(ByteStringer.wrap(Bytes.toBytes(user)));
          service.getAuths(controller, getAuthReqBuilder.build(), rpcCallback);
          GetAuthsResponse response = rpcCallback.get();
          if (controller.failedOnException()) {
            throw controller.getFailedOn();
          }
          return response;
        }
      };
      Map<byte[], GetAuthsResponse> result =
        table.coprocessorService(VisibilityLabelsService.class,
          HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, callable);
      return result.values().iterator().next(); // There will be exactly one region for labels
      // table and so one entry in result Map.
    }
}
 
Example 60
Project: ditb   File: TestReplicationSourceManager.java   Source Code and License 5 votes vote down vote up
@Test
public void testFailoverDeadServerCversionChange() throws Exception {
  LOG.debug("testFailoverDeadServerCversionChange");

  conf.setBoolean(HConstants.ZOOKEEPER_USEMULTI, true);
  final Server s0 = new DummyServer("cversion-change0.example.org");
  ReplicationQueues repQueues =
      ReplicationFactory.getReplicationQueues(s0.getZooKeeper(), conf, s0);
  repQueues.init(s0.getServerName().toString());
  // populate some znodes in the peer znode
  files.add("log1");
  files.add("log2");
  for (String file : files) {
    repQueues.addLog("1", file);
  }
  // simulate queue transfer
  Server s1 = new DummyServer("cversion-change1.example.org");
  ReplicationQueues rq1 =
      ReplicationFactory.getReplicationQueues(s1.getZooKeeper(), s1.getConfiguration(), s1);
  rq1.init(s1.getServerName().toString());

  ReplicationQueuesClient client =
      ReplicationFactory.getReplicationQueuesClient(s1.getZooKeeper(), s1.getConfiguration(), s1);

  int v0 = client.getQueuesZNodeCversion();
  rq1.claimQueues(s0.getServerName().getServerName());
  int v1 = client.getQueuesZNodeCversion();
  // cversion should increased by 1 since a child node is deleted
  assertEquals(v0 + 1, v1);

  s0.abort("", null);
}