org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment Java Examples

The following examples show how to use org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ServerSideOperationsObserver.java    From geowave with Apache License 2.0 6 votes vote down vote up
@Override
public InternalScanner preFlush(
    final ObserverContext<RegionCoprocessorEnvironment> e,
    final Store store,
    final InternalScanner scanner) throws IOException {
  if (opStore == null) {
    return super.preFlush(e, store, scanner);
  }
  return super.preFlush(
      e,
      store,
      wrapScannerWithOps(
          e.getEnvironment().getRegionInfo().getTable(),
          scanner,
          null,
          ServerOpScope.MINOR_COMPACTION,
          INTERNAL_SCANNER_FACTORY));
}
 
Example #2
Source File: MemstoreAwareObserverTest.java    From spliceengine with GNU Affero General Public License v3.0 6 votes vote down vote up
@Test
public void preStoreScannerOpen() throws Exception {
    MemstoreAwareObserver mao = new MemstoreAwareObserver();

    // create scan, call preStoreScannerOpen

    // env and scan share same start and end keys (partition hit)
    byte[] startKey = createByteArray(13);
    byte[] endKey = createByteArray(24);

    ObserverContext<RegionCoprocessorEnvironment> fakeCtx = mockRegionEnv(startKey, endKey);
    RegionScanner preScanner = mock(RegionScanner.class);

    RegionScanner postScanner = mao.postScannerOpen(fakeCtx, mockScan(startKey, endKey), preScanner);

    assertNotNull(postScanner);
    assertNotEquals(preScanner, postScanner);
    postScanner.close();
}
 
Example #3
Source File: TransactionProcessor.java    From phoenix-tephra with Apache License 2.0 6 votes vote down vote up
/**
 * Refresh the properties related to transaction pruning. This method needs to be invoked if there is change in the
 * prune related properties after clearing the state by calling {@link #resetPruneState}.
 *
 * @param env {@link RegionCoprocessorEnvironment} of this region
 */
protected void initializePruneState(RegionCoprocessorEnvironment env) {
  Configuration conf = getConfiguration(env);
  if (conf != null) {
    pruneEnable = conf.getBoolean(TxConstants.TransactionPruning.PRUNE_ENABLE,
                                  TxConstants.TransactionPruning.DEFAULT_PRUNE_ENABLE);

    if (Boolean.TRUE.equals(pruneEnable)) {
      TableName pruneTable = TableName.valueOf(conf.get(TxConstants.TransactionPruning.PRUNE_STATE_TABLE,
                                                        TxConstants.TransactionPruning.DEFAULT_PRUNE_STATE_TABLE));
      long pruneFlushInterval = TimeUnit.SECONDS.toMillis(conf.getLong(
        TxConstants.TransactionPruning.PRUNE_FLUSH_INTERVAL,
        TxConstants.TransactionPruning.DEFAULT_PRUNE_FLUSH_INTERVAL));

      compactionState = new CompactionState(env, pruneTable, pruneFlushInterval);
      if (LOG.isDebugEnabled()) {
        TableName name = env.getRegion().getRegionInfo().getTable();
        LOG.debug(String.format("Automatic invalid list pruning is enabled for table %s:%s. Compaction state will " +
                                  "be recorded in table %s:%s", name.getNamespaceAsString(), name.getNameAsString(),
                                pruneTable.getNamespaceAsString(), pruneTable.getNameAsString()));
      }
    }
  }
}
 
Example #4
Source File: AccessController.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Override
public void preOpen(ObserverContext<RegionCoprocessorEnvironment> c)
    throws IOException {
  RegionCoprocessorEnvironment env = c.getEnvironment();
  final Region region = env.getRegion();
  if (region == null) {
    LOG.error("NULL region from RegionCoprocessorEnvironment in preOpen()");
  } else {
    RegionInfo regionInfo = region.getRegionInfo();
    if (regionInfo.getTable().isSystemTable()) {
      checkSystemOrSuperUser(getActiveUser(c));
    } else {
      requirePermission(c, "preOpen", Action.ADMIN);
    }
  }
}
 
Example #5
Source File: TransactionProcessor.java    From phoenix-tephra with Apache License 2.0 6 votes vote down vote up
/**
 * Refresh the properties related to transaction pruning. This method needs to be invoked if there is change in the
 * prune related properties after clearing the state by calling {@link #resetPruneState}.
 *
 * @param env {@link RegionCoprocessorEnvironment} of this region
 */
protected void initializePruneState(RegionCoprocessorEnvironment env) {
  Configuration conf = getConfiguration(env);
  if (conf != null) {
    pruneEnable = conf.getBoolean(TxConstants.TransactionPruning.PRUNE_ENABLE,
                                  TxConstants.TransactionPruning.DEFAULT_PRUNE_ENABLE);

    if (Boolean.TRUE.equals(pruneEnable)) {
      TableName pruneTable = TableName.valueOf(conf.get(TxConstants.TransactionPruning.PRUNE_STATE_TABLE,
                                                        TxConstants.TransactionPruning.DEFAULT_PRUNE_STATE_TABLE));
      long pruneFlushInterval = TimeUnit.SECONDS.toMillis(conf.getLong(
        TxConstants.TransactionPruning.PRUNE_FLUSH_INTERVAL,
        TxConstants.TransactionPruning.DEFAULT_PRUNE_FLUSH_INTERVAL));

      compactionState = new CompactionState(env, pruneTable, pruneFlushInterval);
      if (LOG.isDebugEnabled()) {
        LOG.debug(String.format("Automatic invalid list pruning is enabled for table %s. Compaction state " +
                                  "will be recorded in table %s",
                                env.getRegionInfo().getTable().getNameWithNamespaceInclAsString(),
                                pruneTable.getNameWithNamespaceInclAsString()));
      }
    }
  }
}
 
Example #6
Source File: ServerSideOperationsObserver.java    From geowave with Apache License 2.0 6 votes vote down vote up
@Override
public RegionScanner preScannerOpen(
    final ObserverContext<RegionCoprocessorEnvironment> e,
    final Scan scan,
    final RegionScanner s) throws IOException {
  if (opStore != null) {
    final TableName tableName = e.getEnvironment().getRegionInfo().getTable();
    if (!tableName.isSystemTable()) {
      final String namespace = tableName.getNamespaceAsString();
      final String qualifier = tableName.getQualifierAsString();
      final Collection<HBaseServerOp> serverOps =
          opStore.getOperations(namespace, qualifier, ServerOpScope.SCAN);
      for (final HBaseServerOp op : serverOps) {
        op.preScannerOpen(scan);
      }
    }
  }
  return super.preScannerOpen(e, scan, s);
}
 
Example #7
Source File: SpliceIndexObserver.java    From spliceengine with GNU Affero General Public License v3.0 6 votes vote down vote up
@Override
public void start(final CoprocessorEnvironment e) throws IOException{
    try {
        RegionCoprocessorEnvironment rce = ((RegionCoprocessorEnvironment) e);

        String tableName = rce.getRegion().getTableDescriptor().getTableName().getQualifierAsString();
        TableType table = EnvUtils.getTableType(HConfiguration.getConfiguration(), rce);
        switch (table) {
            case DERBY_SYS_TABLE:
                conglomId = -1; //bypass index management on derby system tables
                break;
            case USER_TABLE:
                conglomId = Long.parseLong(tableName);
                break;
            default:
                return; //disregard table environments which are not user or system tables
        }
    } catch (Throwable t) {
        throw CoprocessorUtils.getIOException(t);
    }
}
 
Example #8
Source File: TransactionProcessor.java    From phoenix-tephra with Apache License 2.0 6 votes vote down vote up
@Override
public InternalScanner preCompact(
    org.apache.hadoop.hbase.coprocessor.ObserverContext<RegionCoprocessorEnvironment> c,
    Store store, InternalScanner scanner, ScanType scanType,
    org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker tracker,
    CompactionRequest request) throws IOException {
  // Get the latest tx snapshot state for the compaction
  TransactionVisibilityState snapshot = cache.getLatestState();
  // Record tx state before the compaction
  if (compactionState != null) {
    compactionState.record(request, snapshot);
  }
  // Also make sure to use the same snapshot for the compaction
  InternalScanner s =
      createStoreScanner(c.getEnvironment(), "compaction", snapshot, scanner, scanType);
  if (s != null) {
    return s;
  }
  return scanner;
}
 
Example #9
Source File: HashJoinCacheIT.java    From phoenix with Apache License 2.0 6 votes vote down vote up
@Override
public void preScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
      final Scan scan) {
    final HashJoinInfo joinInfo = HashJoinInfo.deserializeHashJoinFromScan(scan);
    if (joinInfo != null) {
        TenantCache cache = GlobalCache.getTenantCache(c.getEnvironment(), null);
        int count = joinInfo.getJoinIds().length;
        for (int i = 0; i < count; i++) {
            ImmutableBytesPtr joinId = joinInfo.getJoinIds()[i];
            if (!ByteUtil.contains(lastRemovedJoinIds,joinId)) {
                lastRemovedJoinIds.add(joinId);
                cache.removeServerCache(joinId);
            }
        }
    }
}
 
Example #10
Source File: TestReplicaWithCluster.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Override
public void preGetOp(final ObserverContext<RegionCoprocessorEnvironment> e,
    final Get get, final List<Cell> results) throws IOException {

  int replicaId = e.getEnvironment().getRegion().getRegionInfo().getReplicaId();

  // Fail for the primary replica, but not for meta
  if (throwException) {
    if (!e.getEnvironment().getRegion().getRegionInfo().isMetaRegion() && (replicaId == 0)) {
      LOG.info("Get, throw Region Server Stopped Exceptoin for region " + e.getEnvironment()
          .getRegion().getRegionInfo());
      throw new RegionServerStoppedException("Server " + e.getEnvironment().getServerName()
              + " not running");
    }
  } else {
    LOG.info("Get, We're replica region " + replicaId);
  }
}
 
Example #11
Source File: MetaDataEndpointImpl.java    From phoenix with Apache License 2.0 6 votes vote down vote up
private PTable loadTable(RegionCoprocessorEnvironment env, byte[] key,
    ImmutableBytesPtr cacheKey, long clientTimeStamp, long asOfTimeStamp)
    throws IOException, SQLException {
    HRegion region = env.getRegion();
    Cache<ImmutableBytesPtr,PTable> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
    PTable table = metaDataCache.getIfPresent(cacheKey);
    // We always cache the latest version - fault in if not in cache
    if (table != null || (table = buildTable(key, cacheKey, region, asOfTimeStamp)) != null) {
        return table;
    }
    // if not found then check if newer table already exists and add delete marker for timestamp
    // found
    if (table == null
            && (table = buildDeletedTable(key, cacheKey, region, clientTimeStamp)) != null) {
        return table;
    }
    return null;
}
 
Example #12
Source File: IndexRegionObserver.java    From phoenix with Apache License 2.0 6 votes vote down vote up
@Override
public void postBatchMutateIndispensably(ObserverContext<RegionCoprocessorEnvironment> c,
    MiniBatchOperationInProgress<Mutation> miniBatchOp, final boolean success) throws IOException {
    if (this.disabled) {
        return;
    }
    BatchMutateContext context = getBatchMutateContext(c);
    if (context == null) {
        return;
    }
    try {
        for (RowLock rowLock : context.rowLocks) {
            rowLock.release();
        }
        this.builder.batchCompleted(miniBatchOp);

        if (success) { // The pre-index and data table updates are successful, and now, do post index updates
            doPost(c, context);
        }
     } finally {
         removeBatchMutateContext(c);
     }
}
 
Example #13
Source File: Indexer.java    From phoenix with Apache License 2.0 6 votes vote down vote up
@Override
public void postOpen(final ObserverContext<RegionCoprocessorEnvironment> c) {
  Multimap<HTableInterfaceReference, Mutation> updates = failedIndexEdits.getEdits(c.getEnvironment().getRegion());
  
  if (this.disabled) {
      super.postOpen(c);
      return;
    }
  LOG.info("Found some outstanding index updates that didn't succeed during"
      + " WAL replay - attempting to replay now.");
  //if we have no pending edits to complete, then we are done
  if (updates == null || updates.size() == 0) {
    return;
  }
  
  // do the usual writer stuff, killing the server again, if we can't manage to make the index
  // writes succeed again
  try {
      writer.writeAndKillYourselfOnFailure(updates);
  } catch (IOException e) {
      LOG.error("Exception thrown instead of killing server during index writing", e);
  }
}
 
Example #14
Source File: StatisticsScannerTest.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Before
public void setupMocks() throws Exception {
    this.config = new Configuration(false);

    // Create all of the mocks
    this.region = mock(Region.class);
    this.rsServices = mock(RegionServerServices.class);
    this.statsWriter = mock(StatisticsWriter.class);
    this.callable = mock(StatisticsScannerCallable.class);
    this.runTracker = mock(StatisticsCollectionRunTracker.class);
    this.mockScanner = mock(StatisticsScanner.class);
    this.tracker = mock(StatisticsCollector.class);
    this.delegate = mock(InternalScanner.class);
    this.regionInfo = mock(RegionInfo.class);
    this.env = mock(RegionCoprocessorEnvironment.class);
    this.conn = mock(Connection.class);

    // Wire up the mocks to the mock StatisticsScanner
    when(mockScanner.getStatisticsWriter()).thenReturn(statsWriter);
    when(mockScanner.createCallable()).thenReturn(callable);
    when(mockScanner.getStatsCollectionRunTracker(any(Configuration.class))).thenReturn(runTracker);
    when(mockScanner.getRegion()).thenReturn(region);
    when(mockScanner.getConfig()).thenReturn(config);
    when(mockScanner.getTracker()).thenReturn(tracker);
    when(mockScanner.getDelegate()).thenReturn(delegate);
    when(env.getConnection()).thenReturn(conn);
    when(mockScanner.getConnection()).thenReturn(conn);

    // Wire up the HRegionInfo mock to the Region mock
    when(region.getRegionInfo()).thenReturn(regionInfo);

    // Always call close() on the mock StatisticsScanner
    doCallRealMethod().when(mockScanner).close();
}
 
Example #15
Source File: GroupedAggregateRegionObserver.java    From phoenix with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
GroupByCache newCache(RegionCoprocessorEnvironment env, ImmutableBytesWritable tenantId, ServerAggregators aggregators, int estDistVals) {
    Configuration conf = env.getConfiguration();
    boolean spillableEnabled =
            conf.getBoolean(GROUPBY_SPILLABLE_ATTRIB, DEFAULT_GROUPBY_SPILLABLE);
    if (spillableEnabled) {
        return new SpillableGroupByCache(env, tenantId, aggregators, estDistVals);
    } 
    
    return new InMemoryGroupByCache(env, tenantId, aggregators, estDistVals);
}
 
Example #16
Source File: TransactionAwareHTableTest.java    From phoenix-tephra with Apache License 2.0 5 votes vote down vote up
@Override
public void prePut(final ObserverContext<RegionCoprocessorEnvironment> c,
                   final Put put, final WALEdit edit,
                   final Durability durability) throws IOException {
  if (put.getAttribute(TEST_ATTRIBUTE) == null) {
    throw new DoNotRetryIOException("Put should preserve attributes");
  }
  if (put.getDurability() != Durability.USE_DEFAULT) {
    throw new DoNotRetryIOException("Durability is not propagated correctly");
  }
}
 
Example #17
Source File: AccessController.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Override
public List<Pair<Cell, Cell>> postAppendBeforeWAL(
    ObserverContext<RegionCoprocessorEnvironment> ctx, Mutation mutation,
    List<Pair<Cell, Cell>> cellPairs) throws IOException {
  // If the HFile version is insufficient to persist tags, we won't have any
  // work to do here
  if (!cellFeaturesEnabled) {
    return cellPairs;
  }
  return cellPairs.stream().map(pair -> new Pair<>(pair.getFirst(),
      createNewCellWithTags(mutation, pair.getFirst(), pair.getSecond())))
      .collect(Collectors.toList());
}
 
Example #18
Source File: InvalidListPruneTest.java    From phoenix-tephra with Apache License 2.0 5 votes vote down vote up
@Override
protected CacheSupplier<TransactionStateCache> getTransactionStateCacheSupplier(RegionCoprocessorEnvironment env) {
  return new CacheSupplier<TransactionStateCache>() {
    @Override
    public TransactionStateCache get() {
      return new InMemoryTransactionStateCache();
    }

    @Override
    public void release() {
      // no-op
    }
  };
}
 
Example #19
Source File: TransactionProcessor.java    From phoenix-tephra with Apache License 2.0 5 votes vote down vote up
private boolean getAllowEmptyValues(RegionCoprocessorEnvironment env, HTableDescriptor htd) {
  String allowEmptyValuesFromTableDesc = htd.getValue(TxConstants.ALLOW_EMPTY_VALUES_KEY);
  Configuration conf = getConfiguration(env);
  boolean allowEmptyValuesFromConfig = (conf != null) ?
    conf.getBoolean(TxConstants.ALLOW_EMPTY_VALUES_KEY, TxConstants.ALLOW_EMPTY_VALUES_DEFAULT) :
    TxConstants.ALLOW_EMPTY_VALUES_DEFAULT;

  // If the property is not present in the tableDescriptor, get it from the Configuration
  return  (allowEmptyValuesFromTableDesc != null) ?
    Boolean.valueOf(allowEmptyValuesFromTableDesc) : allowEmptyValuesFromConfig;
}
 
Example #20
Source File: TransactionProcessor.java    From phoenix-tephra with Apache License 2.0 5 votes vote down vote up
@Override
public void preGetOp(ObserverContext<RegionCoprocessorEnvironment> e, Get get, List<Cell> results)
  throws IOException {
  Transaction tx = getFromOperation(get);
  if (tx != null) {
    projectFamilyDeletes(get);
    get.setMaxVersions();
    get.setTimeRange(TxUtils.getOldestVisibleTimestamp(ttlByFamily, tx, readNonTxnData),
                     TxUtils.getMaxVisibleTimestamp(tx));
    Filter newFilter = getTransactionFilter(tx, ScanType.USER_SCAN, get.getFilter());
    get.setFilter(newFilter);
  }
}
 
Example #21
Source File: AggregateProtocolEndPoint.java    From Eagle with Apache License 2.0 5 votes vote down vote up
@Override
public void start(CoprocessorEnvironment env) throws IOException {
    if (env instanceof RegionCoprocessorEnvironment) {
        this.env = (RegionCoprocessorEnvironment)env;
    } else {
        throw new CoprocessorException("Must be loaded on a table region!");
    }
}
 
Example #22
Source File: Indexer.java    From phoenix with Apache License 2.0 5 votes vote down vote up
private void doPostWithExceptions(ObserverContext<RegionCoprocessorEnvironment> c, BatchMutateContext context)
        throws IOException {
    //short circuit, if we don't need to do any work
    if (context == null || context.indexUpdates.isEmpty()) {
        return;
    }

    // get the current span, or just use a null-span to avoid a bunch of if statements
    try (TraceScope scope = Trace.startSpan("Completing index writes")) {
        Span current = scope.getSpan();
        if (current == null) {
            current = NullSpan.INSTANCE;
        }
        long start = EnvironmentEdgeManager.currentTimeMillis();
        
        current.addTimelineAnnotation("Actually doing index update for first time");
        writer.writeAndHandleFailure(context.indexUpdates, false, context.clientVersion);

        long duration = EnvironmentEdgeManager.currentTimeMillis() - start;
        if (duration >= slowIndexWriteThreshold) {
            if (LOGGER.isDebugEnabled()) {
                LOGGER.debug(getCallTooSlowMessage("indexWrite",
                        duration, slowIndexWriteThreshold));
            }
            metricSource.incrementNumSlowIndexWriteCalls();
        }
        metricSource.updateIndexWriteTime(duration);
    }
}
 
Example #23
Source File: AccessController.java    From hbase with Apache License 2.0 5 votes vote down vote up
private void initialize(RegionCoprocessorEnvironment e) throws IOException {
  final Region region = e.getRegion();
  Configuration conf = e.getConfiguration();
  Map<byte[], ListMultimap<String, UserPermission>> tables = PermissionStorage.loadAll(region);
  // For each table, write out the table's permissions to the respective
  // znode for that table.
  for (Map.Entry<byte[], ListMultimap<String, UserPermission>> t:
    tables.entrySet()) {
    byte[] entry = t.getKey();
    ListMultimap<String, UserPermission> perms = t.getValue();
    byte[] serialized = PermissionStorage.writePermissionsAsBytes(perms, conf);
    zkPermissionWatcher.writeToZookeeper(entry, serialized);
  }
  initialized = true;
}
 
Example #24
Source File: CompactionState.java    From phoenix-tephra with Apache License 2.0 5 votes vote down vote up
public CompactionState(final RegionCoprocessorEnvironment env, final TableName stateTable, long pruneFlushInterval) {
  this.regionName = env.getRegionInfo().getRegionName();
  this.regionNameAsString = env.getRegionInfo().getRegionNameAsString();
  DataJanitorState dataJanitorState = new DataJanitorState(new DataJanitorState.TableSupplier() {
    @Override
    public Table get() throws IOException {
      return env.getTable(stateTable);
    }
  });
  this.pruneUpperBoundWriterSupplier = new PruneUpperBoundWriterSupplier(stateTable, dataJanitorState,
                                                                         pruneFlushInterval);
  this.pruneUpperBoundWriter = pruneUpperBoundWriterSupplier.get();
}
 
Example #25
Source File: TestIndexWriter.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Test
public void getDefaultFailurePolicy() throws Exception {
  Configuration conf = new Configuration(false);
  RegionCoprocessorEnvironment env = Mockito.mock(RegionCoprocessorEnvironment.class);
  Mockito.when(env.getConfiguration()).thenReturn(conf);
  assertNotNull(IndexWriter.getFailurePolicy(env));
}
 
Example #26
Source File: TransactionProcessor.java    From phoenix-tephra with Apache License 2.0 5 votes vote down vote up
/**
 * Make sure that the transaction is within the max valid transaction lifetime.
 *
 * @param env {@link RegionCoprocessorEnvironment} of the Region to which the coprocessor is associated
 * @param op {@link OperationWithAttributes} HBase operation to access its attributes if required
 * @param tx {@link Transaction} supplied by the
 * @throws DoNotRetryIOException thrown if the transaction is older than the max lifetime of a transaction
 *         IOException throw if the value of max lifetime of transaction is unavailable
 */
protected void ensureValidTxLifetime(RegionCoprocessorEnvironment env,
                                     @SuppressWarnings("unused") OperationWithAttributes op,
                                     @Nullable Transaction tx) throws IOException {
  if (tx == null) {
    return;
  }

  boolean validLifetime =
    (TxUtils.getTimestamp(tx.getTransactionId()) + txMaxLifetimeMillis) > System.currentTimeMillis();
  if (!validLifetime) {
    throw new DoNotRetryIOException(String.format("Transaction %s has exceeded max lifetime %s ms",
                                                  tx.getTransactionId(), txMaxLifetimeMillis));
  }
}
 
Example #27
Source File: AbstractTestCITimeout.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Override
public void preDelete(final ObserverContext<RegionCoprocessorEnvironment> e,
    final Delete delete, final WALEdit edit, final Durability durability) throws IOException {
  Threads.sleep(sleepTime.get());
  if (ct.incrementAndGet() == 1) {
    throw new IOException("first call I fail");
  }
}
 
Example #28
Source File: MemstoreAwareObserverTest.java    From spliceengine with GNU Affero General Public License v3.0 5 votes vote down vote up
private ScanRequestThread(String name,
                          MemstoreAwareObserver mao,
                          ObserverContext<RegionCoprocessorEnvironment> fakeCtx,
                          Scan internalScanner, boolean shouldFail, StateOrdering ordering) {
    super(ordering, name, mao, shouldFail);
    this.fakeCtx = fakeCtx;
    this.internalScanner = internalScanner;
}
 
Example #29
Source File: PhoenixIndexCodec.java    From phoenix with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
@Override
public void initialize(RegionCoprocessorEnvironment env) {
    this.env = env;
    Configuration conf = env.getConfiguration();
    // Install handler that will attempt to disable the index first before killing the region
    // server
    conf.setIfUnset(IndexWriter.INDEX_FAILURE_POLICY_CONF_KEY,
        PhoenixIndexFailurePolicy.class.getName());
    this.builder = KeyValueBuilder.get(env.getHBaseVersion());
}
 
Example #30
Source File: TestRegionServerAbort.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Override
public void prePut(ObserverContext<RegionCoprocessorEnvironment> c, Put put, WALEdit edit,
                   Durability durability) throws IOException {
  if (put.getAttribute(DO_ABORT) != null) {
    // TODO: Change this so it throws a CP Abort Exception instead.
    RegionServerServices rss =
        ((HasRegionServerServices)c.getEnvironment()).getRegionServerServices();
    String str = "Aborting for test";
    LOG.info(str  + " " + rss.getServerName());
    rss.abort(str, new Throwable(str));
  }
}