Java Code Examples for org.apache.hadoop.hbase.client.TableDescriptor#getValue()

The following examples show how to use org.apache.hadoop.hbase.client.TableDescriptor#getValue() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TransactionProcessor.java    From phoenix-tephra with Apache License 2.0 5 votes vote down vote up
private boolean getAllowEmptyValues(RegionCoprocessorEnvironment env, TableDescriptor htd) {
  String allowEmptyValuesFromTableDesc = htd.getValue(TxConstants.ALLOW_EMPTY_VALUES_KEY);
  Configuration conf = getConfiguration(env);
  boolean allowEmptyValuesFromConfig =
      (conf != null) ? conf.getBoolean(TxConstants.ALLOW_EMPTY_VALUES_KEY,
        TxConstants.ALLOW_EMPTY_VALUES_DEFAULT) : TxConstants.ALLOW_EMPTY_VALUES_DEFAULT;

  // If the property is not present in the tableDescriptor, get it from the Configuration
  return (allowEmptyValuesFromTableDesc != null) ? Boolean.valueOf(allowEmptyValuesFromTableDesc)
      : allowEmptyValuesFromConfig;
}
 
Example 2
Source File: Constraints.java    From hbase with Apache License 2.0 5 votes vote down vote up
private static long getNextPriority(TableDescriptor desc) {
  String value = desc.getValue(COUNTER_KEY);

  long priority;
  // get the current priority
  if (value == null) {
    priority = MIN_PRIORITY;
  } else {
    priority = Long.parseLong(value) + 1;
  }

  return priority;
}
 
Example 3
Source File: PhoenixIndexFailurePolicy.java    From phoenix with Apache License 2.0 5 votes vote down vote up
/**
 * Check config for whether to disable index on index write failures
 * @param htd
 * @param config
 * @param connection
 * @return The table config for {@link PhoenixIndexFailurePolicy.DISABLE_INDEX_ON_WRITE_FAILURE}
 * @throws SQLException
 */
public static boolean getDisableIndexOnFailure(RegionCoprocessorEnvironment env) {
    TableDescriptor htd = env.getRegion().getTableDescriptor();
    Configuration config = env.getConfiguration();
    String value = htd.getValue(PhoenixIndexFailurePolicy.DISABLE_INDEX_ON_WRITE_FAILURE);
    boolean disableIndexOnFailure;
    if (value == null) {
        disableIndexOnFailure =
                config.getBoolean(QueryServices.INDEX_FAILURE_DISABLE_INDEX,
                    QueryServicesOptions.DEFAULT_INDEX_FAILURE_DISABLE_INDEX);
    } else {
        disableIndexOnFailure = Boolean.parseBoolean(value);
    }
    return disableIndexOnFailure;
}
 
Example 4
Source File: TableDescriptorChecker.java    From hbase with Apache License 2.0 4 votes vote down vote up
private static void checkCompactionPolicy(Configuration conf, TableDescriptor td)
    throws IOException {
  // FIFO compaction has some requirements
  // Actually FCP ignores periodic major compactions
  String className = td.getValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY);
  if (className == null) {
    className = conf.get(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY,
        ExploringCompactionPolicy.class.getName());
  }

  int blockingFileCount = HStore.DEFAULT_BLOCKING_STOREFILE_COUNT;
  String sv = td.getValue(HStore.BLOCKING_STOREFILES_KEY);
  if (sv != null) {
    blockingFileCount = Integer.parseInt(sv);
  } else {
    blockingFileCount = conf.getInt(HStore.BLOCKING_STOREFILES_KEY, blockingFileCount);
  }

  for (ColumnFamilyDescriptor hcd : td.getColumnFamilies()) {
    String compactionPolicy =
        hcd.getConfigurationValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY);
    if (compactionPolicy == null) {
      compactionPolicy = className;
    }
    if (!compactionPolicy.equals(FIFOCompactionPolicy.class.getName())) {
      continue;
    }
    // FIFOCompaction
    String message = null;

    // 1. Check TTL
    if (hcd.getTimeToLive() == ColumnFamilyDescriptorBuilder.DEFAULT_TTL) {
      message = "Default TTL is not supported for FIFO compaction";
      throw new IOException(message);
    }

    // 2. Check min versions
    if (hcd.getMinVersions() > 0) {
      message = "MIN_VERSION > 0 is not supported for FIFO compaction";
      throw new IOException(message);
    }

    // 3. blocking file count
    sv = hcd.getConfigurationValue(HStore.BLOCKING_STOREFILES_KEY);
    if (sv != null) {
      blockingFileCount = Integer.parseInt(sv);
    }
    if (blockingFileCount < 1000) {
      message =
          "Blocking file count '" + HStore.BLOCKING_STOREFILES_KEY + "' " + blockingFileCount +
              " is below recommended minimum of 1000 for column family " + hcd.getNameAsString();
      throw new IOException(message);
    }
  }
}