org.apache.hadoop.hbase.DoNotRetryIOException Java Examples

The following examples show how to use org.apache.hadoop.hbase.DoNotRetryIOException. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestThriftHBaseServiceHandlerWithReadOnly.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Test
public void testIncrementWithReadOnly() throws Exception {
  ThriftHBaseServiceHandler handler = createHandler();
  byte[] rowName = Bytes.toBytes("testIncrement");
  ByteBuffer table = wrap(tableAname);

  List<TColumnIncrement> incrementColumns = new ArrayList<>(1);
  incrementColumns.add(new TColumnIncrement(wrap(familyAname), wrap(qualifierAname)));
  TIncrement increment = new TIncrement(wrap(rowName), incrementColumns);

  boolean exceptionCaught = false;
  try {
    handler.increment(table, increment);
  } catch (TIOError e) {
    exceptionCaught = true;
    assertTrue(e.getCause() instanceof DoNotRetryIOException);
    assertEquals("Thrift Server is in Read-only mode.", e.getMessage());
  } finally {
    assertTrue(exceptionCaught);
  }
}
 
Example #2
Source File: MasterQuotaManager.java    From hbase with Apache License 2.0 6 votes vote down vote up
private void checkQuotaSupport() throws IOException {
  if (!QuotaUtil.isQuotaEnabled(masterServices.getConfiguration())) {
    throw new DoNotRetryIOException(
      new UnsupportedOperationException("quota support disabled"));
  }
  if (!initialized) {
    long maxWaitTime = masterServices.getConfiguration().getLong(
      "hbase.master.wait.for.quota.manager.init", 30000); // default is 30 seconds.
    long startTime = EnvironmentEdgeManager.currentTime();
    do {
      try {
        Thread.sleep(100);
      } catch (InterruptedException e) {
        LOG.warn("Interrupted while waiting for Quota Manager to be initialized.");
        break;
      }
    } while (!initialized && (EnvironmentEdgeManager.currentTime() - startTime) < maxWaitTime);
    if (!initialized) {
      throw new IOException("Quota manager is uninitialized, please retry later.");
    }
  }
}
 
Example #3
Source File: BaseScannerRegionObserver.java    From phoenix with Apache License 2.0 6 votes vote down vote up
private static void throwIfScanOutOfRegion(Scan scan, HRegion region) throws DoNotRetryIOException {
    boolean isLocalIndex = ScanUtil.isLocalIndex(scan);
    byte[] lowerInclusiveScanKey = scan.getStartRow();
    byte[] upperExclusiveScanKey = scan.getStopRow();
    byte[] lowerInclusiveRegionKey = region.getStartKey();
    byte[] upperExclusiveRegionKey = region.getEndKey();
    boolean isStaleRegionBoundaries;
    if (isLocalIndex) {
        byte[] expectedUpperRegionKey = scan.getAttribute(EXPECTED_UPPER_REGION_KEY);
        isStaleRegionBoundaries = expectedUpperRegionKey != null &&
                Bytes.compareTo(upperExclusiveRegionKey, expectedUpperRegionKey) != 0;
    } else {
        isStaleRegionBoundaries = Bytes.compareTo(lowerInclusiveScanKey, lowerInclusiveRegionKey) < 0 ||
                ( Bytes.compareTo(upperExclusiveScanKey, upperExclusiveRegionKey) > 0 && upperExclusiveRegionKey.length != 0);
    }
    if (isStaleRegionBoundaries) {
        Exception cause = new StaleRegionBoundaryCacheException(region.getRegionInfo().getTable().getNameAsString());
        throw new DoNotRetryIOException(cause.getMessage(), cause);
    }
}
 
Example #4
Source File: ReplicationPeerManager.java    From hbase with Apache License 2.0 6 votes vote down vote up
private void checkQueuesDeleted(String peerId)
    throws ReplicationException, DoNotRetryIOException {
  for (ServerName replicator : queueStorage.getListOfReplicators()) {
    List<String> queueIds = queueStorage.getAllQueues(replicator);
    for (String queueId : queueIds) {
      ReplicationQueueInfo queueInfo = new ReplicationQueueInfo(queueId);
      if (queueInfo.getPeerId().equals(peerId)) {
        throw new DoNotRetryIOException("undeleted queue for peerId: " + peerId +
          ", replicator: " + replicator + ", queueId: " + queueId);
      }
    }
  }
  if (queueStorage.getAllPeersFromHFileRefsQueue().contains(peerId)) {
    throw new DoNotRetryIOException("Undeleted queue for peer " + peerId + " in hfile-refs");
  }
}
 
Example #5
Source File: SpliceFailFastInterceptor.java    From spliceengine with GNU Affero General Public License v3.0 6 votes vote down vote up
@Override
public void handleFailure(RetryingCallerInterceptorContext context, Throwable t) throws IOException {

    if (t instanceof UndeclaredThrowableException) {
        t = t.getCause();
    }
    if (t instanceof RemoteException) {
        RemoteException re = (RemoteException)t;
        t = re.unwrapRemoteException();
    }
    if (t instanceof DoNotRetryIOException) {
        throw (DoNotRetryIOException)t;
    }
    if (t instanceof IOException) {
        throw (IOException) t;
    }
    throw new IOException(t);
}
 
Example #6
Source File: ViewIT.java    From phoenix with Apache License 2.0 6 votes vote down vote up
private void processTable(String tableName) throws DoNotRetryIOException {
    if (tableName.equals(FAILED_VIEWNAME)) {
        // throwing anything other than instances of IOException result
        // in this coprocessor being unloaded
        // DoNotRetryIOException tells HBase not to retry this mutation
        // multiple times
        throw new DoNotRetryIOException();
    } else if (tableName.startsWith(SLOW_VIEWNAME_PREFIX) || slowDownAddingChildLink) {
        // simulate a slow write to SYSTEM.CATALOG or SYSTEM.CHILD_LINK
        if (latch1 != null) {
            latch1.countDown();
        }
        if (latch2 != null) {
            try {
                // wait till the second task is complete before completing the first task
                boolean result = latch2.await(2, TimeUnit.MINUTES);
                if (!result) {
                    throw new RuntimeException("Second task took took long to complete");
                }
            } catch (InterruptedException e) {
            }
        }
    }
}
 
Example #7
Source File: MemstoreAwareObserverTest.java    From spliceengine with GNU Affero General Public License v3.0 6 votes vote down vote up
@Test
public void preStoreScannerOpenPartitionMiss() throws Exception {
    MemstoreAwareObserver mao = new MemstoreAwareObserver();

    // env and scan do not share same start and end keys (partition miss)
    ObserverContext<RegionCoprocessorEnvironment> fakeCtx = mockRegionEnv(createByteArray(13), createByteArray(24));
    RegionScanner preScanner = mock(RegionScanner.class);

    try {
        mao.postScannerOpen(fakeCtx, mockScan(createByteArray(14), createByteArray(25)), preScanner);
        fail("Expected DoNotRetryIOException");
    } catch (IOException e) {
        // expected
        assertTrue(e instanceof DoNotRetryIOException);
    }
}
 
Example #8
Source File: TestFIFOCompactionPolicy.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Test
public void testSanityCheckBlockingStoreFiles() throws IOException {
  error.expect(DoNotRetryIOException.class);
  error.expectMessage("Blocking file count 'hbase.hstore.blockingStoreFiles'");
  error.expectMessage("is below recommended minimum of 1000 for column family");
  TableName tableName = TableName.valueOf(getClass().getSimpleName() + "-BlockingStoreFiles");
  TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName)
      .setValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY,
        FIFOCompactionPolicy.class.getName())
      .setValue(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
        DisabledRegionSplitPolicy.class.getName())
      .setValue(HStore.BLOCKING_STOREFILES_KEY, "10")
      .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family).setTimeToLive(1).build())
      .build();
  TEST_UTIL.getAdmin().createTable(desc);
}
 
Example #9
Source File: ServerCall.java    From hbase with Apache License 2.0 6 votes vote down vote up
static void setExceptionResponse(Throwable t, String errorMsg,
    ResponseHeader.Builder headerBuilder) {
  ExceptionResponse.Builder exceptionBuilder = ExceptionResponse.newBuilder();
  exceptionBuilder.setExceptionClassName(t.getClass().getName());
  exceptionBuilder.setStackTrace(errorMsg);
  exceptionBuilder.setDoNotRetry(t instanceof DoNotRetryIOException);
  if (t instanceof RegionMovedException) {
    // Special casing for this exception.  This is only one carrying a payload.
    // Do this instead of build a generic system for allowing exceptions carry
    // any kind of payload.
    RegionMovedException rme = (RegionMovedException)t;
    exceptionBuilder.setHostname(rme.getHostname());
    exceptionBuilder.setPort(rme.getPort());
  }
  // Set the exception as the result of the method invocation.
  headerBuilder.setException(exceptionBuilder.build());
}
 
Example #10
Source File: RSRpcServices.java    From hbase with Apache License 2.0 6 votes vote down vote up
private void checkShouldRejectReplicationRequest(List<WALEntry> entries) throws IOException {
  ReplicationSourceService replicationSource = regionServer.getReplicationSourceService();
  if (replicationSource == null || entries.isEmpty()) {
    return;
  }
  // We can ensure that all entries are for one peer, so only need to check one entry's
  // table name. if the table hit sync replication at peer side and the peer cluster
  // is (or is transiting to) state ACTIVE or DOWNGRADE_ACTIVE, we should reject to apply
  // those entries according to the design doc.
  TableName table = TableName.valueOf(entries.get(0).getKey().getTableName().toByteArray());
  if (replicationSource.getSyncReplicationPeerInfoProvider().checkState(table,
    RejectReplicationRequestStateChecker.get())) {
    throw new DoNotRetryIOException(
        "Reject to apply to sink cluster because sync replication state of sink cluster "
            + "is ACTIVE or DOWNGRADE_ACTIVE, table: " + table);
  }
}
 
Example #11
Source File: RetriesExhaustedWithDetailsException.java    From hbase with Apache License 2.0 6 votes vote down vote up
public static Map<String, Integer> classifyExs(List<Throwable> ths) {
  Map<String, Integer> cls = new HashMap<>();
  for (Throwable t : ths) {
    if (t == null) continue;
    String name = "";
    if (t instanceof DoNotRetryIOException ||
        t instanceof RegionTooBusyException) {
      // If RegionTooBusyException, print message since it has Region name in it.
      // RegionTooBusyException message was edited to remove variance. Has regionname, server,
      // and why the exception; no longer has duration it waited on lock nor current memsize.
      name = t.getMessage();
    } else {
      name = t.getClass().getSimpleName();
    }
    Integer i = cls.get(name);
    if (i == null) {
      i = 0;
    }
    i += 1;
    cls.put(name, i);
  }
  return cls;
}
 
Example #12
Source File: TestScannerWithCorruptHFile.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Test(expected = DoNotRetryIOException.class)
public void testScanOnCorruptHFile() throws IOException {
  TableName tableName = TableName.valueOf(name.getMethodName());
  TableDescriptorBuilder.ModifyableTableDescriptor tableDescriptor =
    new TableDescriptorBuilder.ModifyableTableDescriptor(tableName);
  tableDescriptor.setCoprocessor(CorruptHFileCoprocessor.class.getName());
  tableDescriptor.setColumnFamily(
    new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(FAMILY_NAME));
  Table table = TEST_UTIL.createTable(tableDescriptor, null);
  try {
    loadTable(table, 1);
    scan(table);
  } finally {
    table.close();
  }
}
 
Example #13
Source File: SystemCatalogIT.java    From phoenix with Apache License 2.0 6 votes vote down vote up
/**
 * Make sure that SYSTEM.CATALOG cannot be split if QueryServices.SYSTEM_CATALOG_SPLITTABLE is false
 */
@Test
public void testSystemTableSplit() throws Exception {
    testUtil = getUtility();
    for (int i=0; i<10; i++) {
        createTable("schema"+i+".table_"+i);
    }
    TableName systemCatalog = TableName.valueOf("SYSTEM.CATALOG");
    RegionLocator rl = testUtil.getConnection().getRegionLocator(systemCatalog);
    assertEquals(rl.getAllRegionLocations().size(), 1);
    try {
        // now attempt to split SYSTEM.CATALOG
        testUtil.getAdmin().split(systemCatalog);
        // make sure the split finishes (there's no synchronous splitting before HBase 2.x)
        testUtil.getAdmin().disableTable(systemCatalog);
        testUtil.getAdmin().enableTable(systemCatalog);
    } catch (DoNotRetryIOException e) {
        // table is not splittable
        assert (e.getMessage().contains("NOT splittable"));
    }

    // test again... Must still be exactly one region.
    rl = testUtil.getConnection().getRegionLocator(systemCatalog);
    assertEquals(1, rl.getAllRegionLocations().size());
}
 
Example #14
Source File: TestThriftHBaseServiceHandlerWithReadOnly.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Test
public void testDeleteWithReadOnly() throws Exception {
  ThriftHBaseServiceHandler handler = createHandler();
  byte[] rowName = Bytes.toBytes("testDelete");
  ByteBuffer table = wrap(tableAname);

  TDelete delete = new TDelete(wrap(rowName));

  boolean exceptionCaught = false;
  try {
    handler.deleteSingle(table, delete);
  } catch (TIOError e) {
    exceptionCaught = true;
    assertTrue(e.getCause() instanceof DoNotRetryIOException);
    assertEquals("Thrift Server is in Read-only mode.", e.getMessage());
  } finally {
    assertTrue(exceptionCaught);
  }
}
 
Example #15
Source File: ProtobufUtil.java    From hbase with Apache License 2.0 6 votes vote down vote up
/**
 * Convert a protocol buffer Filter to a client Filter
 *
 * @param proto the protocol buffer Filter to convert
 * @return the converted Filter
 */
@SuppressWarnings("unchecked")
public static Filter toFilter(FilterProtos.Filter proto) throws IOException {
  String type = proto.getName();
  final byte [] value = proto.getSerializedFilter().toByteArray();
  String funcName = "parseFrom";
  try {
    Class<?> c = Class.forName(type, true, ClassLoaderHolder.CLASS_LOADER);
    Method parseFrom = c.getMethod(funcName, byte[].class);
    if (parseFrom == null) {
      throw new IOException("Unable to locate function: " + funcName + " in type: " + type);
    }
    return (Filter)parseFrom.invoke(c, value);
  } catch (Exception e) {
    // Either we couldn't instantiate the method object, or "parseFrom" failed.
    // In either case, let's not retry.
    throw new DoNotRetryIOException(e);
  }
}
 
Example #16
Source File: TransactionProcessor.java    From phoenix-tephra with Apache License 2.0 6 votes vote down vote up
/**
 * Make sure that the transaction is within the max valid transaction lifetime.
 * @param env {@link RegionCoprocessorEnvironment} of the Region to which the coprocessor is
 *          associated
 * @param op {@link OperationWithAttributes} HBase operation to access its attributes if required
 * @param tx {@link Transaction} supplied by the
 * @throws DoNotRetryIOException thrown if the transaction is older than the max lifetime of a
 *           transaction IOException throw if the value of max lifetime of transaction is
 *           unavailable
 */
protected void ensureValidTxLifetime(RegionCoprocessorEnvironment env,
    @SuppressWarnings("unused") OperationWithAttributes op, @Nullable Transaction tx)
    throws IOException {
  if (tx == null) {
    return;
  }

  boolean validLifetime =
      (TxUtils.getTimestamp(tx.getTransactionId()) + txMaxLifetimeMillis) > System
          .currentTimeMillis();
  if (!validLifetime) {
    throw new DoNotRetryIOException(
        String.format("Transaction %s has exceeded max lifetime %s ms", tx.getTransactionId(),
          txMaxLifetimeMillis));
  }
}
 
Example #17
Source File: IndexRebuildRegionScanner.java    From phoenix with Apache License 2.0 6 votes vote down vote up
private static void applyDeleteOnPut(Delete del, Put put) throws IOException {
    for (List<Cell> cells : del.getFamilyCellMap().values()) {
        for (Cell cell : cells) {
            switch ((KeyValue.Type.codeToType(cell.getTypeByte()))) {
                case DeleteFamily:
                    put.getFamilyCellMap().remove(CellUtil.cloneFamily(cell));
                    break;
                case DeleteColumn:
                    removeColumn(put, cell);
                    break;
                default:
                    // We do not expect this can happen
                    throw new DoNotRetryIOException("Single version delete marker in data mutation " +
                            del);
            }
        }
    }
}
 
Example #18
Source File: SplitWALRemoteProcedure.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Override
protected void complete(MasterProcedureEnv env, Throwable error) {
  if (error == null) {
    try {
      env.getMasterServices().getSplitWALManager().deleteSplitWAL(walPath);
    } catch (IOException e) {
      LOG.warn("Failed split of {}; ignore...", walPath, e);
    }
    succ = true;
  } else {
    if (error instanceof DoNotRetryIOException) {
      LOG.warn("Sent {} to wrong server {}, try another", walPath, targetServer, error);
      succ = true;
    } else {
      LOG.warn("Failed split of {}, retry...", walPath, error);
      succ = false;
    }
  }
}
 
Example #19
Source File: TestFIFOCompactionPolicy.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Test
public void testSanityCheckMinVersion() throws IOException {
  error.expect(DoNotRetryIOException.class);
  error.expectMessage("MIN_VERSION > 0 is not supported for FIFO compaction");
  TableName tableName = TableName.valueOf(getClass().getSimpleName() + "-MinVersion");
  TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName)
      .setValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY,
        FIFOCompactionPolicy.class.getName())
      .setValue(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
        DisabledRegionSplitPolicy.class.getName())
      .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family).setTimeToLive(1)
          .setMinVersions(1).build())
      .build();
  TEST_UTIL.getAdmin().createTable(desc);
}
 
Example #20
Source File: RequestConverter.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * Create a protocol buffer MutateRequest for conditioned row mutations
 *
 * @return a mutate request
 * @throws IOException
 */
public static ClientProtos.MultiRequest buildMutateRequest(final byte[] regionName,
  final byte[] row, final byte[] family, final byte[] qualifier,
  final CompareOperator op, final byte[] value, final Filter filter, final TimeRange timeRange,
  final RowMutations rowMutations) throws IOException {
  RegionAction.Builder builder =
      getRegionActionBuilderWithRegion(RegionAction.newBuilder(), regionName);
  builder.setAtomic(true);
  ClientProtos.Action.Builder actionBuilder = ClientProtos.Action.newBuilder();
  MutationProto.Builder mutationBuilder = MutationProto.newBuilder();
  for (Mutation mutation: rowMutations.getMutations()) {
    MutationType mutateType;
    if (mutation instanceof Put) {
      mutateType = MutationType.PUT;
    } else if (mutation instanceof Delete) {
      mutateType = MutationType.DELETE;
    } else {
      throw new DoNotRetryIOException("RowMutations supports only put and delete, not " +
          mutation.getClass().getName());
    }
    mutationBuilder.clear();
    MutationProto mp = ProtobufUtil.toMutation(mutateType, mutation, mutationBuilder);
    actionBuilder.clear();
    actionBuilder.setMutation(mp);
    builder.addAction(actionBuilder.build());
  }
  return ClientProtos.MultiRequest.newBuilder().addRegionAction(builder.setCondition(
    buildCondition(row, family, qualifier, op, value, filter, timeRange)).build()).build();
}
 
Example #21
Source File: RestoreSnapshotProcedure.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * Action before any real action of restoring from snapshot.
 * @param env MasterProcedureEnv
 * @throws IOException
 */
private void prepareRestore(final MasterProcedureEnv env) throws IOException {
  final TableName tableName = getTableName();
  // Checks whether the table exists
  if (!MetaTableAccessor.tableExists(env.getMasterServices().getConnection(), tableName)) {
    throw new TableNotFoundException(tableName);
  }

  // Check whether table is disabled.
  env.getMasterServices().checkTableModifiable(tableName);

  // Check that we have at least 1 CF
  if (modifiedTableDescriptor.getColumnFamilyCount() == 0) {
    throw new DoNotRetryIOException("Table " + getTableName().toString() +
      " should have at least one column family.");
  }

  if (!getTableName().isSystemTable()) {
    // Table already exist. Check and update the region quota for this table namespace.
    final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
    SnapshotManifest manifest = SnapshotManifest.open(
      env.getMasterConfiguration(),
      mfs.getFileSystem(),
      SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, mfs.getRootDir()),
      snapshot);
    int snapshotRegionCount = manifest.getRegionManifestsMap().size();
    int tableRegionCount =
        ProcedureSyncWait.getMasterQuotaManager(env).getRegionCountOfTable(tableName);

    if (snapshotRegionCount > 0 && tableRegionCount != snapshotRegionCount) {
      ProcedureSyncWait.getMasterQuotaManager(env).checkAndUpdateNamespaceRegionQuota(
        tableName, snapshotRegionCount);
    }
  }
}
 
Example #22
Source File: HMaster.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Override
public long mergeRegions(
    final RegionInfo[] regionsToMerge,
    final boolean forcible,
    final long ng,
    final long nonce) throws IOException {
  checkInitialized();

  if (!isSplitOrMergeEnabled(MasterSwitchType.MERGE)) {
    String regionsStr = Arrays.deepToString(regionsToMerge);
    LOG.warn("Merge switch is off! skip merge of " + regionsStr);
    throw new DoNotRetryIOException("Merge of " + regionsStr +
        " failed because merge switch is off");
  }

  final String mergeRegionsStr = Arrays.stream(regionsToMerge).map(RegionInfo::getEncodedName)
    .collect(Collectors.joining(", "));
  return MasterProcedureUtil.submitProcedure(new NonceProcedureRunnable(this, ng, nonce) {
    @Override
    protected void run() throws IOException {
      getMaster().getMasterCoprocessorHost().preMergeRegions(regionsToMerge);
      String aid = getClientIdAuditPrefix();
      LOG.info("{} merge regions {}", aid, mergeRegionsStr);
      submitProcedure(new MergeTableRegionsProcedure(procedureExecutor.getEnvironment(),
          regionsToMerge, forcible));
      getMaster().getMasterCoprocessorHost().postMergeRegions(regionsToMerge);
    }

    @Override
    protected String getDescription() {
      return "MergeTableProcedure";
    }
  });
}
 
Example #23
Source File: RegionCoprocessorRpcChannelImpl.java    From hbase with Apache License 2.0 5 votes vote down vote up
private CompletableFuture<Message> rpcCall(MethodDescriptor method, Message request,
    Message responsePrototype, HBaseRpcController controller, HRegionLocation loc,
    ClientService.Interface stub) {
  CompletableFuture<Message> future = new CompletableFuture<>();
  if (region != null && !Bytes.equals(loc.getRegion().getRegionName(), region.getRegionName())) {
    future.completeExceptionally(new DoNotRetryIOException("Region name is changed, expected " +
      region.getRegionNameAsString() + ", actual " + loc.getRegion().getRegionNameAsString()));
    return future;
  }
  CoprocessorServiceRequest csr = CoprocessorRpcUtils.getCoprocessorServiceRequest(method,
    request, row, loc.getRegion().getRegionName());
  stub.execService(controller, csr,
    new org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback<CoprocessorServiceResponse>() {

      @Override
      public void run(CoprocessorServiceResponse resp) {
        if (controller.failed()) {
          future.completeExceptionally(controller.getFailed());
        } else {
          lastRegion = resp.getRegion().getValue().toByteArray();
          try {
            future.complete(CoprocessorRpcUtils.getResponse(resp, responsePrototype));
          } catch (IOException e) {
            future.completeExceptionally(e);
          }
        }
      }
    });
  return future;
}
 
Example #24
Source File: HRegionServer.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * Reports the given map of Regions and their size on the filesystem to the active Master.
 *
 * @param regionSizeStore The store containing region sizes
 * @return false if FileSystemUtilizationChore should pause reporting to master. true otherwise
 */
public boolean reportRegionSizesForQuotas(RegionSizeStore regionSizeStore) {
  RegionServerStatusService.BlockingInterface rss = rssStub;
  if (rss == null) {
    // the current server could be stopping.
    LOG.trace("Skipping Region size report to HMaster as stub is null");
    return true;
  }
  try {
    buildReportAndSend(rss, regionSizeStore);
  } catch (ServiceException se) {
    IOException ioe = ProtobufUtil.getRemoteException(se);
    if (ioe instanceof PleaseHoldException) {
      LOG.trace("Failed to report region sizes to Master because it is initializing."
          + " This will be retried.", ioe);
      // The Master is coming up. Will retry the report later. Avoid re-creating the stub.
      return true;
    }
    if (rssStub == rss) {
      rssStub = null;
    }
    createRegionServerStatusStub(true);
    if (ioe instanceof DoNotRetryIOException) {
      DoNotRetryIOException doNotRetryEx = (DoNotRetryIOException) ioe;
      if (doNotRetryEx.getCause() != null) {
        Throwable t = doNotRetryEx.getCause();
        if (t instanceof UnsupportedOperationException) {
          LOG.debug("master doesn't support ReportRegionSpaceUse, pause before retrying");
          return false;
        }
      }
    }
    LOG.debug("Failed to report region sizes to Master. This will be retried.", ioe);
  }
  return true;
}
 
Example #25
Source File: ReplicationPeerManager.java    From hbase with Apache License 2.0 5 votes vote down vote up
private ReplicationPeerDescription checkPeerExists(String peerId) throws DoNotRetryIOException {
  ReplicationPeerDescription desc = peers.get(peerId);
  if (desc == null) {
    throw new DoNotRetryIOException("Replication peer " + peerId + " does not exist");
  }
  return desc;
}
 
Example #26
Source File: FlappingLocalIndexIT.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Override
public void preClose(org.apache.hadoop.hbase.coprocessor.ObserverContext<RegionCoprocessorEnvironment> c,
        boolean abortRequested) throws IOException {
    if(DELAY_OPEN) {
        try {
            latch.await();
        } catch (InterruptedException e1) {
            throw new DoNotRetryIOException(e1);
        }
    }
}
 
Example #27
Source File: AccessController.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Override
public Result preAppend(ObserverContext<RegionCoprocessorEnvironment> c, Append append)
    throws IOException {
  User user = getActiveUser(c);
  checkForReservedTagPresence(user, append);

  // Require WRITE permission to the table, CF, and the KV to be appended
  RegionCoprocessorEnvironment env = c.getEnvironment();
  Map<byte[],? extends Collection<Cell>> families = append.getFamilyCellMap();
  AuthResult authResult = permissionGranted(OpType.APPEND, user,
      env, families, Action.WRITE);
  AccessChecker.logResult(authResult);
  if (!authResult.isAllowed()) {
    if (cellFeaturesEnabled && !compatibleEarlyTermination) {
      append.setAttribute(CHECK_COVERING_PERM, TRUE);
    } else if (authorizationEnabled)  {
      throw new AccessDeniedException("Insufficient permissions " +
        authResult.toContextString());
    }
  }

  byte[] bytes = append.getAttribute(AccessControlConstants.OP_ATTRIBUTE_ACL);
  if (bytes != null) {
    if (cellFeaturesEnabled) {
      addCellPermissions(bytes, append.getFamilyCellMap());
    } else {
      throw new DoNotRetryIOException("Cell ACLs cannot be persisted");
    }
  }

  return null;
}
 
Example #28
Source File: PhoenixIndexFailurePolicy.java    From phoenix with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
@Override
public void handleFailure(Multimap<HTableInterfaceReference, Mutation> attempted, Exception cause) throws IOException {
    Set<HTableInterfaceReference> refs = attempted.asMap().keySet();
    StringBuilder buf = new StringBuilder("Disabled index" + (refs.size() > 1 ? "es " : " "));
    try {
        for (HTableInterfaceReference ref : refs) {
            // Disable the index by using the updateIndexState method of MetaDataProtocol end point coprocessor.
            String indexTableName = ref.getTableName();
            byte[] indexTableKey = SchemaUtil.getTableKeyFromFullName(indexTableName);
            HTableInterface systemTable = env.getTable(PhoenixDatabaseMetaData.TYPE_TABLE_NAME_BYTES);
            MetaDataProtocol mdProxy = systemTable.coprocessorProxy(MetaDataProtocol.class, indexTableKey);
            // Mimic the Put that gets generated by the client on an update of the index state
            Put put = new Put(indexTableKey);
            put.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.INDEX_STATE_BYTES, PIndexState.DISABLE.getSerializedBytes());
            List<Mutation> tableMetadata = Collections.<Mutation>singletonList(put);
            MetaDataMutationResult result = mdProxy.updateIndexState(tableMetadata);
            if (result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) {
                LOG.warn("Attempt to disable index " + indexTableName + " failed with code = " + result.getMutationCode() + ". Will use default failure policy instead.");
                super.handleFailure(attempted, cause);
            }
            LOG.info("Successfully disabled index " + indexTableName);
            buf.append(indexTableName);
            buf.append(',');
        }
        buf.setLength(buf.length()-1);
        buf.append(" due to an exception while writing updates");
    } catch (Throwable t) {
        super.handleFailure(attempted, cause);
    }
    throw new DoNotRetryIOException(buf.toString(), cause);
}
 
Example #29
Source File: MasterRpcServices.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Override
public GrantResponse grant(RpcController controller, GrantRequest request)
    throws ServiceException {
  try {
    master.checkInitialized();
    if (master.cpHost != null && hasAccessControlServiceCoprocessor(master.cpHost)) {
      final UserPermission perm =
          ShadedAccessControlUtil.toUserPermission(request.getUserPermission());
      boolean mergeExistingPermissions = request.getMergeExistingPermissions();
      master.cpHost.preGrant(perm, mergeExistingPermissions);
      try (Table table = master.getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) {
        PermissionStorage.addUserPermission(getConfiguration(), perm, table,
          mergeExistingPermissions);
      }
      master.cpHost.postGrant(perm, mergeExistingPermissions);
      User caller = RpcServer.getRequestUser().orElse(null);
      if (AUDITLOG.isTraceEnabled()) {
        // audit log should store permission changes in addition to auth results
        String remoteAddress = RpcServer.getRemoteAddress().map(InetAddress::toString).orElse("");
        AUDITLOG.trace("User {} (remote address: {}) granted permission {}", caller,
          remoteAddress, perm);
      }
      return GrantResponse.getDefaultInstance();
    } else {
      throw new DoNotRetryIOException(
          new UnsupportedOperationException(AccessController.class.getName() + " is not loaded"));
    }
  } catch (IOException ioe) {
    throw new ServiceException(ioe);
  }
}
 
Example #30
Source File: TestLockProcedure.java    From hbase with Apache License 2.0 5 votes vote down vote up
private void validateLockRequestException(LockRequest lockRequest, String message)
    throws Exception {
  exception.expect(ServiceException.class);
  exception.expectCause(IsInstanceOf.instanceOf(DoNotRetryIOException.class));
  exception.expectMessage(
      StringStartsWith.startsWith("org.apache.hadoop.hbase.DoNotRetryIOException: "
          + "java.lang.IllegalArgumentException: " + message));
  masterRpcService.requestLock(null, lockRequest);
}