org.apache.hadoop.hbase.client.ConnectionUtils Java Examples

The following examples show how to use org.apache.hadoop.hbase.client.ConnectionUtils. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestThriftConnection.java    From hbase with Apache License 2.0 6 votes vote down vote up
private static Connection createConnection(int port, boolean useHttp) throws IOException {
  Configuration conf = HBaseConfiguration.create(TEST_UTIL.getConfiguration());
  conf.set(ConnectionUtils.HBASE_CLIENT_CONNECTION_IMPL,
      ThriftConnection.class.getName());
  if (useHttp) {
    conf.set(Constants.HBASE_THRIFT_CLIENT_BUIDLER_CLASS,
        ThriftConnection.HTTPThriftClientBuilder.class.getName());
  }
  String host = HConstants.LOCALHOST;
  if (useHttp) {
    host = "http://" + host;
  }
  conf.set(Constants.HBASE_THRIFT_SERVER_NAME, host);
  conf.setInt(Constants.HBASE_THRIFT_SERVER_PORT, port);
  return ConnectionFactory.createConnection(conf);
}
 
Example #2
Source File: TestTableInputFormatBase.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Test
public void testNonSuccessiveSplitsAreNotMerged() throws IOException {
  JobContext context = mock(JobContext.class);
  Configuration conf = HBaseConfiguration.create();
  conf.set(ConnectionUtils.HBASE_CLIENT_CONNECTION_IMPL,
      ConnectionForMergeTesting.class.getName());
  conf.set(TableInputFormat.INPUT_TABLE, "testTable");
  conf.setBoolean(TableInputFormatBase.MAPREDUCE_INPUT_AUTOBALANCE, true);
  when(context.getConfiguration()).thenReturn(conf);

  TableInputFormat tifExclude = new TableInputFormatForMergeTesting();
  tifExclude.setConf(conf);
  // split["b", "c"] is excluded, split["o", "p"] and split["p", "q"] are merged,
  // but split["a", "b"] and split["c", "d"] are not merged.
  assertEquals(ConnectionForMergeTesting.START_KEYS.length - 1 - 1,
      tifExclude.getSplits(context).size());
}
 
Example #3
Source File: ThriftConnection.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Override
public boolean retryRequest(IOException exception, int executionCount, HttpContext context) {
  // Don't sleep for retrying the first time
  if (executionCount > 1 && pause > 0) {
    try {
      long sleepTime = ConnectionUtils.getPauseTime(pause, executionCount - 1);
      Thread.sleep(sleepTime);
    } catch (InterruptedException ie) {
      //reset interrupt marker
      Thread.currentThread().interrupt();
    }
  }
  return super.retryRequest(exception, executionCount, context);
}
 
Example #4
Source File: DualAsyncFSWAL.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Override
protected AsyncWriter createWriterInstance(Path path) throws IOException {
  AsyncWriter localWriter = super.createWriterInstance(path);
  // retry forever if we can not create the remote writer to prevent aborting the RS due to log
  // rolling error, unless the skipRemoteWal is set to true.
  // TODO: since for now we only have one thread doing log rolling, this may block the rolling for
  // other wals
  Path remoteWAL = new Path(remoteWALDir, path.getName());
  for (int retry = 0;; retry++) {
    if (skipRemoteWAL) {
      return localWriter;
    }
    AsyncWriter remoteWriter;
    try {
      remoteWriter = createAsyncWriter(remoteFs, remoteWAL);
    } catch (IOException e) {
      LOG.warn("create remote writer {} failed, retry = {}", remoteWAL, retry, e);
      try {
        Thread.sleep(ConnectionUtils.getPauseTime(100, retry));
      } catch (InterruptedException ie) {
        // restore the interrupt state
        Thread.currentThread().interrupt();
        Closeables.close(localWriter, true);
        throw (IOException) new InterruptedIOException().initCause(ie);
      }
      continue;
    }
    return createCombinedAsyncWriter(localWriter, remoteWriter);
  }
}
 
Example #5
Source File: TestMultiTableInputFormatBase.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * Test getSplits only puts up one Connection.
 * In past it has put up many Connections. Each Connection setup comes with a fresh new cache
 * so we have to do fresh hit on hbase:meta. Should only do one Connection when doing getSplits
 * even if a MultiTableInputFormat.
 * @throws IOException
 */
@Test
public void testMRSplitsConnectionCount() throws IOException {
  // Make instance of MTIFB.
  MultiTableInputFormatBase mtif = new MultiTableInputFormatBase() {
    @Override
    public RecordReader<ImmutableBytesWritable, Result> createRecordReader(InputSplit split,
        TaskAttemptContext context)
    throws IOException, InterruptedException {
      return super.createRecordReader(split, context);
    }
  };
  // Pass it a mocked JobContext. Make the JC return our Configuration.
  // Load the Configuration so it returns our special Connection so we can interpolate
  // canned responses.
  JobContext mockedJobContext = Mockito.mock(JobContext.class);
  Configuration c = HBaseConfiguration.create();
  c.set(ConnectionUtils.HBASE_CLIENT_CONNECTION_IMPL, MRSplitsConnection.class.getName());
  Mockito.when(mockedJobContext.getConfiguration()).thenReturn(c);
  // Invent a bunch of scans. Have each Scan go against a different table so a good spread.
  List<Scan> scans = new ArrayList<>();
  for (int i = 0; i < 10; i++) {
    Scan scan = new Scan();
    String tableName = this.name.getMethodName() + i;
    scan.setAttribute(SCAN_ATTRIBUTES_TABLE_NAME, Bytes.toBytes(tableName));
    scans.add(scan);
  }
  mtif.setScans(scans);
  // Get splits. Assert that that more than one.
  List<InputSplit> splits = mtif.getSplits(mockedJobContext);
  Assert.assertTrue(splits.size() > 0);
  // Assert only one Connection was made (see the static counter we have in the mocked
  // Connection MRSplitsConnection Constructor.
  Assert.assertEquals(1, MRSplitsConnection.creations.get());
}
 
Example #6
Source File: MemstoreAwareObserver.java    From spliceengine with GNU Affero General Public License v3.0 5 votes vote down vote up
public static void main(String...args) throws Exception {
    long timeWaited = 0l;
    for (int i = 1; i <= 40; i++) {
        timeWaited += ConnectionUtils.getPauseTime(90, i);
    }
    System.out.printf("timeWaited: %d sec%n", timeWaited);
}
 
Example #7
Source File: HRegionServer.java    From hbase with Apache License 2.0 4 votes vote down vote up
@Override
public boolean reportRegionStateTransition(final RegionStateTransitionContext context) {
  if (TEST_SKIP_REPORTING_TRANSITION) {
    return skipReportingTransition(context);
  }
  final ReportRegionStateTransitionRequest request =
      createReportRegionStateTransitionRequest(context);

  // Time to pause if master says 'please hold'. Make configurable if needed.
  final long initPauseTime = 1000;
  int tries = 0;
  long pauseTime;
  // Keep looping till we get an error. We want to send reports even though server is going down.
  // Only go down if clusterConnection is null. It is set to null almost as last thing as the
  // HRegionServer does down.
  while (this.asyncClusterConnection != null && !this.asyncClusterConnection.isClosed()) {
    RegionServerStatusService.BlockingInterface rss = rssStub;
    try {
      if (rss == null) {
        createRegionServerStatusStub();
        continue;
      }
      ReportRegionStateTransitionResponse response =
        rss.reportRegionStateTransition(null, request);
      if (response.hasErrorMessage()) {
        LOG.info("TRANSITION FAILED " + request + ": " + response.getErrorMessage());
        break;
      }
      // Log if we had to retry else don't log unless TRACE. We want to
      // know if were successful after an attempt showed in logs as failed.
      if (tries > 0 || LOG.isTraceEnabled()) {
        LOG.info("TRANSITION REPORTED " + request);
      }
      // NOTE: Return mid-method!!!
      return true;
    } catch (ServiceException se) {
      IOException ioe = ProtobufUtil.getRemoteException(se);
      boolean pause =
          ioe instanceof ServerNotRunningYetException || ioe instanceof PleaseHoldException
              || ioe instanceof CallQueueTooBigException;
      if (pause) {
        // Do backoff else we flood the Master with requests.
        pauseTime = ConnectionUtils.getPauseTime(initPauseTime, tries);
      } else {
        pauseTime = initPauseTime; // Reset.
      }
      LOG.info("Failed report transition " +
        TextFormat.shortDebugString(request) + "; retry (#" + tries + ")" +
          (pause?
              " after " + pauseTime + "ms delay (Master is coming online...).":
              " immediately."),
          ioe);
      if (pause) Threads.sleep(pauseTime);
      tries++;
      if (rssStub == rss) {
        rssStub = null;
      }
    }
  }
  return false;
}
 
Example #8
Source File: FanOutOneBlockAsyncDFSOutputHelper.java    From hbase with Apache License 2.0 4 votes vote down vote up
static void sleepIgnoreInterrupt(int retry) {
  try {
    Thread.sleep(ConnectionUtils.getPauseTime(100, retry));
  } catch (InterruptedException e) {
  }
}