Java Code Examples for org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest#isMajor()

The following examples show how to use org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest#isMajor() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: CompactionState.java    From phoenix-tephra with Apache License 2.0 5 votes vote down vote up
/**
 * Records the transaction state used for a compaction. This method is called when the compaction starts.
 *
 * @param request {@link CompactionRequest} for the compaction
 * @param snapshot transaction state that will be used for the compaction
 */
public void record(CompactionRequest request, @Nullable TransactionVisibilityState snapshot) {
  if (request.isMajor() && snapshot != null) {
    Transaction tx = TxUtils.createDummyTransaction(snapshot);
    pruneUpperBound = TxUtils.getPruneUpperBound(tx);
    if (LOG.isDebugEnabled()) {
      LOG.debug(
        String.format("Computed prune upper bound %s for compaction request %s using transaction state from time %s",
                      pruneUpperBound, request, snapshot.getTimestamp()));
    }
  } else {
    pruneUpperBound = -1;
  }
}
 
Example 2
Source File: CompactionState.java    From phoenix-tephra with Apache License 2.0 5 votes vote down vote up
/**
 * Records the transaction state used for a compaction. This method is called when the compaction starts.
 *
 * @param request {@link CompactionRequest} for the compaction
 * @param snapshot transaction state that will be used for the compaction
 */
public void record(CompactionRequest request, @Nullable TransactionVisibilityState snapshot) {
  if (request.isMajor() && snapshot != null) {
    Transaction tx = TxUtils.createDummyTransaction(snapshot);
    pruneUpperBound = TxUtils.getPruneUpperBound(tx);
    if (LOG.isDebugEnabled()) {
      LOG.debug(
        String.format("Computed prune upper bound %s for compaction request %s using transaction state from time %s",
                      pruneUpperBound, request, snapshot.getTimestamp()));
    }
  } else {
    pruneUpperBound = -1;
  }
}
 
Example 3
Source File: CompactionState.java    From phoenix-tephra with Apache License 2.0 5 votes vote down vote up
/**
 * Records the transaction state used for a compaction. This method is called when the compaction starts.
 *
 * @param request {@link CompactionRequest} for the compaction
 * @param snapshot transaction state that will be used for the compaction
 */
public void record(CompactionRequest request, @Nullable TransactionVisibilityState snapshot) {
  if (request.isMajor() && snapshot != null) {
    Transaction tx = TxUtils.createDummyTransaction(snapshot);
    pruneUpperBound = TxUtils.getPruneUpperBound(tx);
    if (LOG.isDebugEnabled()) {
      LOG.debug(
        String.format("Computed prune upper bound %s for compaction request %s using transaction state from time %s",
                      pruneUpperBound, request, snapshot.getTimestamp()));
    }
  } else {
    pruneUpperBound = -1;
  }
}
 
Example 4
Source File: CompactionState.java    From phoenix-tephra with Apache License 2.0 5 votes vote down vote up
/**
 * Records the transaction state used for a compaction. This method is called when the compaction starts.
 *
 * @param request {@link CompactionRequest} for the compaction
 * @param snapshot transaction state that will be used for the compaction
 */
public void record(CompactionRequest request, @Nullable TransactionVisibilityState snapshot) {
  if (request.isMajor() && snapshot != null) {
    Transaction tx = TxUtils.createDummyTransaction(snapshot);
    pruneUpperBound = TxUtils.getPruneUpperBound(tx);
    if (LOG.isDebugEnabled()) {
      LOG.debug(
        String.format("Computed prune upper bound %s for compaction request %s using transaction state from time %s",
                      pruneUpperBound, request, snapshot.getTimestamp()));
    }
  } else {
    pruneUpperBound = -1;
  }
}
 
Example 5
Source File: CompactionState.java    From phoenix-tephra with Apache License 2.0 5 votes vote down vote up
/**
 * Records the transaction state used for a compaction. This method is called when the compaction starts.
 *
 * @param request {@link CompactionRequest} for the compaction
 * @param snapshot transaction state that will be used for the compaction
 */
public void record(CompactionRequest request, @Nullable TransactionVisibilityState snapshot) {
  if (request.isMajor() && snapshot != null) {
    Transaction tx = TxUtils.createDummyTransaction(snapshot);
    pruneUpperBound = TxUtils.getPruneUpperBound(tx);
    if (LOG.isDebugEnabled()) {
      LOG.debug(
        String.format("Computed prune upper bound %s for compaction request %s using transaction state from time %s",
                      pruneUpperBound, request, snapshot.getTimestamp()));
    }
  } else {
    pruneUpperBound = -1;
  }
}
 
Example 6
Source File: CompactionState.java    From phoenix-tephra with Apache License 2.0 5 votes vote down vote up
/**
 * Records the transaction state used for a compaction. This method is called when the compaction starts.
 *
 * @param request {@link CompactionRequest} for the compaction
 * @param snapshot transaction state that will be used for the compaction
 */
public void record(CompactionRequest request, @Nullable TransactionVisibilityState snapshot) {
  if (request.isMajor() && snapshot != null) {
    Transaction tx = TxUtils.createDummyTransaction(snapshot);
    pruneUpperBound = TxUtils.getPruneUpperBound(tx);
    if (LOG.isDebugEnabled()) {
      LOG.debug(
        String.format("Computed prune upper bound %s for compaction request %s using transaction state from time %s",
                      pruneUpperBound, request, snapshot.getTimestamp()));
    }
  } else {
    pruneUpperBound = -1;
  }
}
 
Example 7
Source File: CompactionState.java    From phoenix-tephra with Apache License 2.0 5 votes vote down vote up
/**
 * Records the transaction state used for a compaction. This method is called when the compaction starts.
 *
 * @param request {@link CompactionRequest} for the compaction
 * @param snapshot transaction state that will be used for the compaction
 */
public void record(CompactionRequest request, @Nullable TransactionVisibilityState snapshot) {
  if (request.isMajor() && snapshot != null) {
    Transaction tx = TxUtils.createDummyTransaction(snapshot);
    pruneUpperBound = TxUtils.getPruneUpperBound(tx);
    if (LOG.isDebugEnabled()) {
      LOG.debug(
        String.format("Computed prune upper bound %s for compaction request %s using transaction state from time %s",
                      pruneUpperBound, request, snapshot.getTimestamp()));
    }
  } else {
    pruneUpperBound = -1;
  }
}
 
Example 8
Source File: CompactionState.java    From phoenix-tephra with Apache License 2.0 5 votes vote down vote up
/**
 * Records the transaction state used for a compaction. This method is called when the compaction starts.
 *
 * @param request {@link CompactionRequest} for the compaction
 * @param snapshot transaction state that will be used for the compaction
 */
public void record(CompactionRequest request, @Nullable TransactionVisibilityState snapshot) {
  if (request.isMajor() && snapshot != null) {
    Transaction tx = TxUtils.createDummyTransaction(snapshot);
    pruneUpperBound = TxUtils.getPruneUpperBound(tx);
    if (LOG.isDebugEnabled()) {
      LOG.debug(
        String.format("Computed prune upper bound %s for compaction request %s using transaction state from time %s",
                      pruneUpperBound, request, snapshot.getTimestamp()));
    }
  } else {
    pruneUpperBound = -1;
  }
}
 
Example 9
Source File: IndexHalfStoreFileReaderGenerator.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Override
public InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> c, Store store,
        InternalScanner s, ScanType scanType, CompactionLifeCycleTracker tracker,
        CompactionRequest request) throws IOException {

    if (!IndexUtil.isLocalIndexStore(store)) { return s; }
    if (!store.hasReferences()) {
        InternalScanner repairScanner = null;
        if (request.isMajor() && (!RepairUtil.isLocalIndexStoreFilesConsistent(c.getEnvironment(), store))) {
            LOGGER.info("we have found inconsistent data for local index for region:"
                    + c.getEnvironment().getRegion().getRegionInfo());
            if (c.getEnvironment().getConfiguration().getBoolean(LOCAL_INDEX_AUTOMATIC_REPAIR, true)) {
                LOGGER.info("Starting automatic repair of local Index for region:"
                        + c.getEnvironment().getRegion().getRegionInfo());
                repairScanner = getRepairScanner(c.getEnvironment(), store);
            }
        }
        if (repairScanner != null) {
            if (s!=null) {
                s.close();
            }
            return repairScanner;
        } else {
            return s;
        }
    }
    return s;
}
 
Example 10
Source File: SIObserver.java    From spliceengine with GNU Affero General Public License v3.0 5 votes vote down vote up
@Override
public InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> c, Store store, InternalScanner scanner, ScanType scanType, CompactionLifeCycleTracker tracker, CompactionRequest request) throws IOException {
    try {
        // We can't return null, there's a check in org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.preCompact
        // return a dummy implementation instead
        if (scanner == null || scanner == DummyScanner.INSTANCE)
            return DummyScanner.INSTANCE;

        if(tableEnvMatch){
            SIDriver driver=SIDriver.driver();
            SimpleCompactionContext context = new SimpleCompactionContext();
            SICompactionState state = new SICompactionState(driver.getTxnSupplier(),
                    driver.getConfiguration().getActiveTransactionMaxCacheSize(), context, driver.getRejectingExecutorService());
            SConfiguration conf = driver.getConfiguration();
            PurgeConfig purgeConfig;
            if (conf.getOlapCompactionAutomaticallyPurgeDeletedRows()) {
                if (request.isMajor())
                    purgeConfig = PurgeConfig.purgeDuringMajorCompactionConfig();
                else
                    purgeConfig = PurgeConfig.purgeDuringMinorCompactionConfig();
            } else {
                purgeConfig = PurgeConfig.noPurgeConfig();
            }
            SICompactionScanner siScanner = new SICompactionScanner(
                    state, scanner, purgeConfig,
                    conf.getOlapCompactionResolutionShare(), conf.getLocalCompactionResolutionBufferSize(), context);
            siScanner.start();
            return siScanner;
        }
        return scanner;
    } catch (Throwable t) {
        throw CoprocessorUtils.getIOException(t);
    }
}
 
Example 11
Source File: UngroupedAggregateRegionObserver.java    From phoenix with Apache License 2.0 4 votes vote down vote up
@Override
public void preCompactScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c, final Store store,
        ScanType scanType, ScanOptions options, CompactionLifeCycleTracker tracker,
        final CompactionRequest request) throws IOException {
    // Compaction and split upcalls run with the effective user context of the requesting user.
    // This will lead to failure of cross cluster RPC if the effective user is not
    // the login user. Switch to the login user context to ensure we have the expected
    // security context.
    final String fullTableName = c.getEnvironment().getRegion().getRegionInfo().getTable().getNameAsString();
    // since we will make a call to syscat, do nothing if we are compacting syscat itself
    if (request.isMajor() && !PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME.equals(fullTableName)) {
        User.runAsLoginUser(new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws Exception {
                // If the index is disabled, keep the deleted cells so the rebuild doesn't corrupt the index
                try (PhoenixConnection conn =
                        QueryUtil.getConnectionOnServer(compactionConfig).unwrap(PhoenixConnection.class)) {
                    PTable table = PhoenixRuntime.getTableNoCache(conn, fullTableName);
                    List<PTable> indexes = PTableType.INDEX.equals(table.getType()) ? Lists.newArrayList(table) : table.getIndexes();
                    // FIXME need to handle views and indexes on views as well
                    for (PTable index : indexes) {
                        if (index.getIndexDisableTimestamp() != 0) {
                            LOGGER.info(
                                "Modifying major compaction scanner to retain deleted cells for a table with disabled index: "
                                        + fullTableName);
                            options.setKeepDeletedCells(KeepDeletedCells.TRUE);
                            options.readAllVersions();
                            options.setTTL(Long.MAX_VALUE);
                        }
                    }
                } catch (Exception e) {
                    if (e instanceof TableNotFoundException) {
                        LOGGER.debug("Ignoring HBase table that is not a Phoenix table: " + fullTableName);
                        // non-Phoenix HBase tables won't be found, do nothing
                    } else {
                        LOGGER.error("Unable to modify compaction scanner to retain deleted cells for a table with disabled Index; "
                                + fullTableName,
                                e);
                    }
                }
                return null;
            }
        });
    }
}