Java Code Examples for org.apache.hadoop.hbase.client.TableDescriptor#getColumnFamily()

The following examples show how to use org.apache.hadoop.hbase.client.TableDescriptor#getColumnFamily() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestDeleteMobTable.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Test
public void testDeleteMobTable() throws Exception {
  final TableName tableName = TableName.valueOf(name.getMethodName());
  TableDescriptor tableDescriptor = createTableDescriptor(tableName, true);
  ColumnFamilyDescriptor familyDescriptor = tableDescriptor.getColumnFamily(FAMILY);

  String fileName = null;
  Table table = createTableWithOneFile(tableDescriptor);
  try {
    // the mob file exists
    Assert.assertEquals(1, countMobFiles(tableName, familyDescriptor.getNameAsString()));
    Assert.assertEquals(0, countArchiveMobFiles(tableName, familyDescriptor.getNameAsString()));
    fileName = assertHasOneMobRow(table, tableName, familyDescriptor.getNameAsString());
    Assert.assertFalse(mobArchiveExist(tableName, familyDescriptor.getNameAsString(), fileName));
    Assert.assertTrue(mobTableDirExist(tableName));
  } finally {
    table.close();
    TEST_UTIL.deleteTable(tableName);
  }

  Assert.assertFalse(TEST_UTIL.getAdmin().tableExists(tableName));
  Assert.assertEquals(0, countMobFiles(tableName, familyDescriptor.getNameAsString()));
  Assert.assertEquals(1, countArchiveMobFiles(tableName, familyDescriptor.getNameAsString()));
  Assert.assertTrue(mobArchiveExist(tableName, familyDescriptor.getNameAsString(), fileName));
  Assert.assertFalse(mobTableDirExist(tableName));
}
 
Example 2
Source File: TestDeleteMobTable.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Test
public void testDeleteNonMobTable() throws Exception {
  final TableName tableName = TableName.valueOf(name.getMethodName());
  TableDescriptor htd = createTableDescriptor(tableName, false);
  ColumnFamilyDescriptor hcd = htd.getColumnFamily(FAMILY);

  Table table = createTableWithOneFile(htd);
  try {
    // the mob file doesn't exist
    Assert.assertEquals(0, countMobFiles(tableName, hcd.getNameAsString()));
    Assert.assertEquals(0, countArchiveMobFiles(tableName, hcd.getNameAsString()));
    Assert.assertFalse(mobTableDirExist(tableName));
  } finally {
    table.close();
    TEST_UTIL.deleteTable(tableName);
  }

  Assert.assertFalse(TEST_UTIL.getAdmin().tableExists(tableName));
  Assert.assertEquals(0, countMobFiles(tableName, hcd.getNameAsString()));
  Assert.assertEquals(0, countArchiveMobFiles(tableName, hcd.getNameAsString()));
  Assert.assertFalse(mobTableDirExist(tableName));
}
 
Example 3
Source File: AddColumnAction.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Override
public void perform() throws Exception {
  TableDescriptor tableDescriptor = admin.getDescriptor(tableName);
  ColumnFamilyDescriptor columnDescriptor = null;

  while (columnDescriptor == null
      || tableDescriptor.getColumnFamily(columnDescriptor.getName()) != null) {
    columnDescriptor = ColumnFamilyDescriptorBuilder.of(RandomStringUtils.randomAlphabetic(5));
  }

  // Don't try the modify if we're stopping
  if (context.isStopping()) {
    return;
  }

  getLogger().debug("Performing action: Adding " + columnDescriptor + " to " + tableName);

  TableDescriptor modifiedTable = TableDescriptorBuilder.newBuilder(tableDescriptor)
      .setColumnFamily(columnDescriptor).build();
  admin.modifyTable(modifiedTable);
}
 
Example 4
Source File: HBaseShims.java    From phoenix-omid with Apache License 2.0 5 votes vote down vote up
public static boolean OmidCompactionEnabled(ObserverContext<RegionCoprocessorEnvironment> env,
                              Store store,
                              String cfFlagValue) {
    TableDescriptor desc = env.getEnvironment().getRegion().getTableDescriptor();
    ColumnFamilyDescriptor famDesc = desc.getColumnFamily(Bytes.toBytes(store.getColumnFamilyName()));
    return Boolean.valueOf(Bytes.toString(famDesc.getValue(Bytes.toBytes(cfFlagValue))));
}
 
Example 5
Source File: HBaseShims.java    From phoenix-omid with Apache License 2.0 5 votes vote down vote up
public static void setCompaction(Connection conn, TableName table, byte[] columnFamily, String key, String value)
        throws IOException {
    try(Admin admin = conn.getAdmin()) {
        TableDescriptor desc = admin.getDescriptor(table);
        ColumnFamilyDescriptor cfDesc = desc.getColumnFamily(columnFamily);
        ColumnFamilyDescriptorBuilder cfBuilder = ColumnFamilyDescriptorBuilder.newBuilder(cfDesc);
        cfBuilder.setValue(Bytes.toBytes(key),Bytes.toBytes(value));
        admin.modifyColumnFamily(table, cfBuilder.build());
    }
}
 
Example 6
Source File: ExpiredMobFileCleaner.java    From hbase with Apache License 2.0 5 votes vote down vote up
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="REC_CATCH_EXCEPTION",
    justification="Intentional")
@Override
public int run(String[] args) throws Exception {
  if (args.length != 2) {
    printUsage();
    return 1;
  }
  String tableName = args[0];
  String familyName = args[1];
  TableName tn = TableName.valueOf(tableName);
  Connection connection = ConnectionFactory.createConnection(getConf());
  Admin admin = connection.getAdmin();
  try {
    TableDescriptor htd = admin.getDescriptor(tn);
    ColumnFamilyDescriptor family = htd.getColumnFamily(Bytes.toBytes(familyName));
    if (family == null || !family.isMobEnabled()) {
      throw new IOException("Column family " + familyName + " is not a MOB column family");
    }
    if (family.getMinVersions() > 0) {
      throw new IOException(
          "The minVersions of the column family is not 0, could not be handled by this cleaner");
    }
    cleanExpiredMobFiles(tableName, family);
    return 0;
  } finally {
    admin.close();
    try {
      connection.close();
    } catch (IOException e) {
      LOG.error("Failed to close the connection.", e);
    }
  }
}
 
Example 7
Source File: MasterProcedureTestingUtility.java    From hbase with Apache License 2.0 5 votes vote down vote up
public static void validateColumnFamilyModification(final HMaster master,
    final TableName tableName, final String family, ColumnFamilyDescriptor columnDescriptor)
    throws IOException {
  TableDescriptor htd = master.getTableDescriptors().get(tableName);
  assertTrue(htd != null);

  ColumnFamilyDescriptor hcfd = htd.getColumnFamily(Bytes.toBytes(family));
  assertEquals(0, ColumnFamilyDescriptor.COMPARATOR.compare(hcfd, columnDescriptor));
}
 
Example 8
Source File: TestMajorCompactorTTL.java    From hbase with Apache License 2.0 5 votes vote down vote up
protected void modifyTTL(TableName tableName) throws IOException, InterruptedException {
  // Set the TTL to 5 secs, so all the files just written above will get cleaned up on compact.
  admin.disableTable(tableName);
  utility.waitTableDisabled(tableName.getName());
  TableDescriptor descriptor = admin.getDescriptor(tableName);
  ColumnFamilyDescriptor colDesc = descriptor.getColumnFamily(FAMILY);
  ColumnFamilyDescriptorBuilder cFDB = ColumnFamilyDescriptorBuilder.newBuilder(colDesc);
  cFDB.setTimeToLive(5);
  admin.modifyColumnFamily(tableName, cFDB.build());
  admin.enableTable(tableName);
  utility.waitTableEnabled(tableName);
}
 
Example 9
Source File: TestSecureBulkLoadManager.java    From hbase with Apache License 2.0 5 votes vote down vote up
private void prepareHFile(Path dir, byte[] key, byte[] value) throws Exception {
  TableDescriptor desc = testUtil.getAdmin().getDescriptor(TABLE);
  ColumnFamilyDescriptor family = desc.getColumnFamily(FAMILY);
  Compression.Algorithm compression = HFile.DEFAULT_COMPRESSION_ALGORITHM;

  CacheConfig writerCacheConf = new CacheConfig(conf, family, null, ByteBuffAllocator.HEAP);
  writerCacheConf.setCacheDataOnWrite(false);
  HFileContext hFileContext = new HFileContextBuilder()
      .withIncludesMvcc(false)
      .withIncludesTags(true)
      .withCompression(compression)
      .withCompressTags(family.isCompressTags())
      .withChecksumType(HStore.getChecksumType(conf))
      .withBytesPerCheckSum(HStore.getBytesPerChecksum(conf))
      .withBlockSize(family.getBlocksize())
      .withHBaseCheckSum(true)
      .withDataBlockEncoding(family.getDataBlockEncoding())
      .withEncryptionContext(Encryption.Context.NONE)
      .withCreateTime(EnvironmentEdgeManager.currentTime())
      .build();
  StoreFileWriter.Builder builder =
      new StoreFileWriter.Builder(conf, writerCacheConf, dir.getFileSystem(conf))
      .withOutputDir(new Path(dir, family.getNameAsString()))
      .withBloomType(family.getBloomFilterType())
      .withMaxKeyCount(Integer.MAX_VALUE)
      .withFileContext(hFileContext);
  StoreFileWriter writer = builder.build();

  Put put = new Put(key);
  put.addColumn(FAMILY, COLUMN, value);
  for (Cell c : put.get(FAMILY, COLUMN)) {
    writer.append(c);
  }

  writer.close();
}
 
Example 10
Source File: CompactionTool.java    From hbase with Apache License 2.0 5 votes vote down vote up
private static HStore getStore(final Configuration conf, final FileSystem fs,
    final Path tableDir, final TableDescriptor htd, final RegionInfo hri,
    final String familyName) throws IOException {
  HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, hri);
  HRegion region = new HRegion(regionFs, null, conf, htd, null);
  return new HStore(region, htd.getColumnFamily(Bytes.toBytes(familyName)), conf, false);
}
 
Example 11
Source File: UpgradeUtil.java    From phoenix with Apache License 2.0 5 votes vote down vote up
/**
 * Make sure that all tables have necessary column family properties in sync
 * with each other and also in sync with all the table's indexes
 * See PHOENIX-3955
 * @param conn Phoenix connection
 * @param admin HBase admin used for getting existing tables and their descriptors
 * @throws SQLException
 * @throws IOException
 */
public static void syncTableAndIndexProperties(PhoenixConnection conn, Admin admin)
throws SQLException, IOException {
    Set<TableDescriptor> tableDescriptorsToSynchronize = new HashSet<>();
    for (TableDescriptor origTableDesc : admin.listTableDescriptors()) {
        if (MetaDataUtil.isViewIndex(origTableDesc.getTableName().getNameWithNamespaceInclAsString())) {
            // Ignore physical view index tables since we handle them for each base table already
            continue;
        }
        PTable table;
        String fullTableName = SchemaUtil.getPhysicalTableName(
                origTableDesc.getTableName().getName(),
                SchemaUtil.isNamespaceMappingEnabled(
                        null, conn.getQueryServices().getProps())).getNameAsString();
        try {
            // Use this getTable API to get the latest PTable
            table = PhoenixRuntime.getTable(conn, null, fullTableName);
        } catch (TableNotFoundException e) {
            // Ignore tables not mapped to a Phoenix Table
            LOGGER.warn("Error getting PTable for HBase table: " + fullTableName);
            continue;
        }
        if (table.getType() == PTableType.INDEX) {
            // Ignore global index tables since we handle them for each base table already
            continue;
        }
        syncUpdateCacheFreqAllIndexes(conn, table);
        ColumnFamilyDescriptor defaultColFam = origTableDesc.getColumnFamily(SchemaUtil.getEmptyColumnFamily(table));
        Map<String, Object> syncedProps = MetaDataUtil.getSyncedProps(defaultColFam);

        addTableDescIfPropsChanged(origTableDesc, defaultColFam, syncedProps, tableDescriptorsToSynchronize);
        syncGlobalIndexesForTable(conn.getQueryServices(), table, defaultColFam, syncedProps, tableDescriptorsToSynchronize);
        syncViewIndexTable(conn.getQueryServices(), table, defaultColFam, syncedProps, tableDescriptorsToSynchronize);
    }
    for (TableDescriptor t: tableDescriptorsToSynchronize) {
        admin.modifyTable(t);
    }
}
 
Example 12
Source File: UngroupedAggregateRegionObserver.java    From phoenix with Apache License 2.0 5 votes vote down vote up
private void checkForLocalIndexColumnFamilies(Region region,
        List<IndexMaintainer> indexMaintainers) throws IOException {
    TableDescriptor tableDesc = region.getTableDescriptor();
    String schemaName =
            tableDesc.getTableName().getNamespaceAsString()
                    .equals(NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR) ? SchemaUtil
                    .getSchemaNameFromFullName(tableDesc.getTableName().getNameAsString())
                    : tableDesc.getTableName().getNamespaceAsString();
    String tableName = SchemaUtil.getTableNameFromFullName(tableDesc.getTableName().getNameAsString());
    for (IndexMaintainer indexMaintainer : indexMaintainers) {
        Set<ColumnReference> coveredColumns = indexMaintainer.getCoveredColumns();
        if(coveredColumns.isEmpty()) {
            byte[] localIndexCf = indexMaintainer.getEmptyKeyValueFamily().get();
            // When covered columns empty we store index data in default column family so check for it.
            if (tableDesc.getColumnFamily(localIndexCf) == null) {
                ServerUtil.throwIOException("Column Family Not Found",
                    new ColumnFamilyNotFoundException(schemaName, tableName, Bytes
                            .toString(localIndexCf)));
            }
        }
        for (ColumnReference reference : coveredColumns) {
            byte[] cf = IndexUtil.getLocalIndexColumnFamily(reference.getFamily());
            ColumnFamilyDescriptor family = region.getTableDescriptor().getColumnFamily(cf);
            if (family == null) {
                ServerUtil.throwIOException("Column Family Not Found",
                    new ColumnFamilyNotFoundException(schemaName, tableName, Bytes.toString(cf)));
            }
        }
    }
}
 
Example 13
Source File: TestUtil.java    From phoenix with Apache License 2.0 4 votes vote down vote up
public static ColumnFamilyDescriptor getColumnDescriptor(Connection conn, TableName tableName)
    throws SQLException, IOException {
    Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
    TableDescriptor td = admin.getDescriptor(tableName);
    return td.getColumnFamily(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES);
}
 
Example 14
Source File: HBaseStoreManager.java    From atlas with Apache License 2.0 4 votes vote down vote up
private TableDescriptor ensureTableExists(String tableName, String initialCFName, int ttlInSeconds) throws BackendException {
    AdminMask adm = null;

    TableDescriptor desc;

    try { // Create our table, if necessary
        adm = getAdminInterface();
        /*
         * Some HBase versions/impls respond badly to attempts to create a
         * table without at least one CF. See #661. Creating a CF along with
         * the table avoids HBase carping.
         */
        if (adm.tableExists(tableName)) {
            desc = adm.getTableDescriptor(tableName);
            // Check and warn if long and short cf names are mixedly used for the same table.
            if (shortCfNames && initialCFName.equals(shortCfNameMap.get(SYSTEM_PROPERTIES_STORE_NAME))) {
                String longCFName = shortCfNameMap.inverse().get(initialCFName);
                if (desc.getColumnFamily(Bytes.toBytes(longCFName)) != null) {
                    logger.warn("Configuration {}=true, but the table \"{}\" already has column family with long name \"{}\".",
                        SHORT_CF_NAMES.getName(), tableName, longCFName);
                    logger.warn("Check {} configuration.", SHORT_CF_NAMES.getName());
                }
            }
            else if (!shortCfNames && initialCFName.equals(SYSTEM_PROPERTIES_STORE_NAME)) {
                String shortCFName = shortCfNameMap.get(initialCFName);
                if (desc.getColumnFamily(Bytes.toBytes(shortCFName)) != null) {
                    logger.warn("Configuration {}=false, but the table \"{}\" already has column family with short name \"{}\".",
                        SHORT_CF_NAMES.getName(), tableName, shortCFName);
                    logger.warn("Check {} configuration.", SHORT_CF_NAMES.getName());
                }
            }
        } else {
            desc = createTable(tableName, initialCFName, ttlInSeconds, adm);
        }
    } catch (IOException e) {
        throw new TemporaryBackendException(e);
    } finally {
        IOUtils.closeQuietly(adm);
    }

    return desc;
}
 
Example 15
Source File: QueryDatabaseMetaDataIT.java    From phoenix with Apache License 2.0 4 votes vote down vote up
@Test
public void testCreateOnExistingTable() throws Exception {
    try (PhoenixConnection pconn =
            DriverManager.getConnection(getUrl()).unwrap(PhoenixConnection.class)) {
        String tableName = generateUniqueName();// MDTEST_NAME;
        String schemaName = "";// MDTEST_SCHEMA_NAME;
        byte[] cfA = Bytes.toBytes(SchemaUtil.normalizeIdentifier("a"));
        byte[] cfB = Bytes.toBytes(SchemaUtil.normalizeIdentifier("b"));
        byte[] cfC = Bytes.toBytes("c");
        byte[][] familyNames = new byte[][] { cfB, cfC };
        byte[] htableName = SchemaUtil.getTableNameAsBytes(schemaName, tableName);
        Admin admin = pconn.getQueryServices().getAdmin();
        try {
            admin.disableTable(TableName.valueOf(htableName));
            admin.deleteTable(TableName.valueOf(htableName));
            admin.enableTable(TableName.valueOf(htableName));
        } catch (org.apache.hadoop.hbase.TableNotFoundException e) {
        }

        TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(TableName.valueOf(htableName));
        for (byte[] familyName : familyNames) {
            builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(familyName));
        }
        admin.createTable(builder.build());
        createMDTestTable(pconn, tableName,
            "a." + ColumnFamilyDescriptorBuilder.BLOCKSIZE+ "=" + 50000);

        TableDescriptor descriptor = admin.getDescriptor(TableName.valueOf(htableName));
        assertEquals(3, descriptor.getColumnFamilies().length);
        ColumnFamilyDescriptor cdA = descriptor.getColumnFamily(cfA);
        assertEquals(ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED, cdA.getKeepDeletedCells());
        assertNotEquals(ColumnFamilyDescriptorBuilder.DEFAULT_BLOCKSIZE, cdA.getBlocksize());
        assertEquals(DataBlockEncoding.NONE, cdA.getDataBlockEncoding()); // Overriden using
                                                                          // WITH
        assertEquals(1, cdA.getMaxVersions());// Overriden using WITH
        ColumnFamilyDescriptor cdB = descriptor.getColumnFamily(cfB);
        // Allow KEEP_DELETED_CELLS to be false for VIEW
        assertEquals(ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED, cdB.getKeepDeletedCells());
        assertEquals(ColumnFamilyDescriptorBuilder.DEFAULT_BLOCKSIZE, cdB.getBlocksize());
        assertEquals(DataBlockEncoding.NONE, cdB.getDataBlockEncoding()); // Should keep the
                                                                          // original value.
        // CF c should stay the same since it's not a Phoenix cf.
        ColumnFamilyDescriptor cdC = descriptor.getColumnFamily(cfC);
        assertNotNull("Column family not found", cdC);
        assertEquals(ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED, cdC.getKeepDeletedCells());
        assertEquals(ColumnFamilyDescriptorBuilder.DEFAULT_BLOCKSIZE, cdC.getBlocksize());
        assertFalse(SchemaUtil.DEFAULT_DATA_BLOCK_ENCODING == cdC.getDataBlockEncoding());
        assertTrue(descriptor.hasCoprocessor(UngroupedAggregateRegionObserver.class.getName()));
        assertTrue(descriptor.hasCoprocessor(GroupedAggregateRegionObserver.class.getName()));
        assertTrue(descriptor.hasCoprocessor(ServerCachingEndpointImpl.class.getName()));
        admin.close();

        int rowCount = 5;
        String upsert = "UPSERT INTO " + tableName + "(id,col1,col2) VALUES(?,?,?)";
        PreparedStatement ps = pconn.prepareStatement(upsert);
        for (int i = 0; i < rowCount; i++) {
            ps.setString(1, Integer.toString(i));
            ps.setInt(2, i + 1);
            ps.setInt(3, i + 2);
            ps.execute();
        }
        pconn.commit();
        String query = "SELECT count(1) FROM " + tableName;
        ResultSet rs = pconn.createStatement().executeQuery(query);
        assertTrue(rs.next());
        assertEquals(rowCount, rs.getLong(1));

        query = "SELECT id, col1,col2 FROM " + tableName;
        rs = pconn.createStatement().executeQuery(query);
        for (int i = 0; i < rowCount; i++) {
            assertTrue(rs.next());
            assertEquals(Integer.toString(i), rs.getString(1));
            assertEquals(i + 1, rs.getInt(2));
            assertEquals(i + 2, rs.getInt(3));
        }
        assertFalse(rs.next());
    }
}
 
Example 16
Source File: IntegrationTestDDLMasterFailover.java    From hbase with Apache License 2.0 4 votes vote down vote up
@Override
void perform() throws IOException {
  TableDescriptor selected = selectTable(disabledTables);
  if (selected == null) {
    return;
  }
  ColumnFamilyDescriptor columnDesc = selectFamily(selected);
  if (columnDesc == null){
    return;
  }

  Admin admin = connection.getAdmin();
  try {
    TableName tableName = selected.getTableName();
    // possible DataBlockEncoding ids
    DataBlockEncoding[] possibleIds = {DataBlockEncoding.NONE, DataBlockEncoding.PREFIX,
            DataBlockEncoding.DIFF, DataBlockEncoding.FAST_DIFF, DataBlockEncoding.ROW_INDEX_V1};
    short id = possibleIds[RandomUtils.nextInt(0, possibleIds.length)].getId();
    LOG.info("Altering encoding of column family: " + columnDesc + " to: " + id +
        " in table: " + tableName);

    ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder.newBuilder(columnDesc)
        .setDataBlockEncoding(DataBlockEncoding.getEncodingById(id))
        .build();
    TableDescriptor td = TableDescriptorBuilder.newBuilder(selected)
        .modifyColumnFamily(cfd)
        .build();
    admin.modifyTable(td);

    // assertion
    TableDescriptor freshTableDesc = admin.getDescriptor(tableName);
    ColumnFamilyDescriptor freshColumnDesc = freshTableDesc.getColumnFamily(columnDesc.getName());
    Assert.assertEquals("Encoding of column family: " + columnDesc + " was not altered",
        freshColumnDesc.getDataBlockEncoding().getId(), id);
    Assert.assertTrue(
      "After alter encoding of column family, Table: " + tableName + " is not disabled",
      admin.isTableDisabled(tableName));
    disabledTables.put(tableName, freshTableDesc);
    LOG.info("Altered encoding of column family: " + freshColumnDesc + " to: " + id +
      " in table: " + tableName);
  } catch (Exception e) {
    LOG.warn("Caught exception in action: " + this.getClass());
    throw e;
  } finally {
    admin.close();
  }
}
 
Example 17
Source File: IntegrationTestDDLMasterFailover.java    From hbase with Apache License 2.0 4 votes vote down vote up
@Override
void perform() throws IOException {
  TableDescriptor selected = selectTable(disabledTables);
  if (selected == null) {
    return;
  }
  ColumnFamilyDescriptor columnDesc = selectFamily(selected);
  if (columnDesc == null){
    return;
  }

  Admin admin = connection.getAdmin();
  int versions = RandomUtils.nextInt(0, 10) + 3;
  try {
    TableName tableName = selected.getTableName();
    LOG.info("Altering versions of column family: " + columnDesc + " to: " + versions +
        " in table: " + tableName);

    ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder.newBuilder(columnDesc)
        .setMinVersions(versions)
        .setMaxVersions(versions)
        .build();
    TableDescriptor td = TableDescriptorBuilder.newBuilder(selected)
        .modifyColumnFamily(cfd)
        .build();
    admin.modifyTable(td);

    // assertion
    TableDescriptor freshTableDesc = admin.getDescriptor(tableName);
    ColumnFamilyDescriptor freshColumnDesc = freshTableDesc.getColumnFamily(columnDesc.getName());
    Assert.assertEquals("Column family: " + columnDesc + " was not altered",
        freshColumnDesc.getMaxVersions(), versions);
    Assert.assertEquals("Column family: " + freshColumnDesc + " was not altered",
        freshColumnDesc.getMinVersions(), versions);
    Assert.assertTrue(
      "After alter versions of column family, Table: " + tableName + " is not disabled",
      admin.isTableDisabled(tableName));
    disabledTables.put(tableName, freshTableDesc);
    LOG.info("Altered versions of column family: " + columnDesc + " to: " + versions +
      " in table: " + tableName);
  } catch (Exception e) {
    LOG.warn("Caught exception in action: " + this.getClass());
    throw e;
  } finally {
    admin.close();
  }
}
 
Example 18
Source File: LoadTestTool.java    From hbase with Apache License 2.0 4 votes vote down vote up
/**
 * Apply column family options such as Bloom filters, compression, and data
 * block encoding.
 */
protected void applyColumnFamilyOptions(TableName tableName,
    byte[][] columnFamilies) throws IOException {
  try (Connection conn = ConnectionFactory.createConnection(conf);
      Admin admin = conn.getAdmin()) {
    TableDescriptor tableDesc = admin.getDescriptor(tableName);
    LOG.info("Disabling table " + tableName);
    admin.disableTable(tableName);
    for (byte[] cf : columnFamilies) {
      ColumnFamilyDescriptor columnDesc = tableDesc.getColumnFamily(cf);
      boolean isNewCf = columnDesc == null;
      ColumnFamilyDescriptorBuilder columnDescBuilder = isNewCf ?
          ColumnFamilyDescriptorBuilder.newBuilder(cf) :
          ColumnFamilyDescriptorBuilder.newBuilder(columnDesc);
      if (bloomType != null) {
        columnDescBuilder.setBloomFilterType(bloomType);
      }
      if (compressAlgo != null) {
        columnDescBuilder.setCompressionType(compressAlgo);
      }
      if (dataBlockEncodingAlgo != null) {
        columnDescBuilder.setDataBlockEncoding(dataBlockEncodingAlgo);
      }
      if (inMemoryCF) {
        columnDescBuilder.setInMemory(inMemoryCF);
      }
      if (cipher != null) {
        byte[] keyBytes = new byte[cipher.getKeyLength()];
        new SecureRandom().nextBytes(keyBytes);
        columnDescBuilder.setEncryptionType(cipher.getName());
        columnDescBuilder.setEncryptionKey(
            EncryptionUtil.wrapKey(conf,
                User.getCurrent().getShortName(),
                new SecretKeySpec(keyBytes,
                    cipher.getName())));
      }
      if (mobThreshold >= 0) {
        columnDescBuilder.setMobEnabled(true);
        columnDescBuilder.setMobThreshold(mobThreshold);
      }

      if (isNewCf) {
        admin.addColumnFamily(tableName, columnDescBuilder.build());
      } else {
        admin.modifyColumnFamily(tableName, columnDescBuilder.build());
      }
    }
    LOG.info("Enabling table " + tableName);
    admin.enableTable(tableName);
  }
}
 
Example 19
Source File: MobRefReporter.java    From hbase with Apache License 2.0 4 votes vote down vote up
/**
 * Main method for the tool.
 * @return 0 if success, 1 for bad args. 2 if job aborted with an exception,
 *   3 if mr job was unsuccessful
 */
public int run(String[] args) throws IOException, InterruptedException {
  // TODO make family and table optional
  if (args.length != 3) {
    printUsage();
    return 1;
  }
  final String output = args[0];
  final String tableName = args[1];
  final String familyName = args[2];
  final long reportStartTime = EnvironmentEdgeManager.currentTime();
  Configuration conf = getConf();
  try {
    FileSystem fs = FileSystem.get(conf);
    // check whether the current user is the same one with the owner of hbase root
    String currentUserName = UserGroupInformation.getCurrentUser().getShortUserName();
    FileStatus[] hbaseRootFileStat = fs.listStatus(new Path(conf.get(HConstants.HBASE_DIR)));
    if (hbaseRootFileStat.length > 0) {
      String owner = hbaseRootFileStat[0].getOwner();
      if (!owner.equals(currentUserName)) {
        String errorMsg = "The current user[" + currentUserName
            + "] does not have hbase root credentials."
            + " If this job fails due to an inability to read HBase's internal directories, "
            + "you will need to rerun as a user with sufficient permissions. The HBase superuser "
            + "is a safe choice.";
        LOG.warn(errorMsg);
      }
    } else {
      LOG.error("The passed configs point to an HBase dir does not exist: {}",
          conf.get(HConstants.HBASE_DIR));
      throw new IOException("The target HBase does not exist");
    }

    byte[] family;
    int maxVersions;
    TableName tn = TableName.valueOf(tableName);
    try (Connection connection = ConnectionFactory.createConnection(conf);
         Admin admin = connection.getAdmin()) {
      TableDescriptor htd = admin.getDescriptor(tn);
      ColumnFamilyDescriptor hcd = htd.getColumnFamily(Bytes.toBytes(familyName));
      if (hcd == null || !hcd.isMobEnabled()) {
        throw new IOException("Column family " + familyName + " is not a MOB column family");
      }
      family = hcd.getName();
      maxVersions = hcd.getMaxVersions();
    }


    String id = getClass().getSimpleName() + UUID.randomUUID().toString().replace("-", "");
    Job job = null;
    Scan scan = new Scan();
    scan.addFamily(family);
    // Do not retrieve the mob data when scanning
    scan.setAttribute(MobConstants.MOB_SCAN_RAW, Bytes.toBytes(Boolean.TRUE));
    scan.setAttribute(MobConstants.MOB_SCAN_REF_ONLY, Bytes.toBytes(Boolean.TRUE));
    // If a scanner caching value isn't set, pick a smaller default since we know we're doing
    // a full table scan and don't want to impact other clients badly.
    scan.setCaching(conf.getInt(HConstants.HBASE_CLIENT_SCANNER_CACHING, 10000));
    scan.setCacheBlocks(false);
    scan.readVersions(maxVersions);
    conf.set(REPORT_JOB_ID, id);

    job = Job.getInstance(conf);
    job.setJarByClass(getClass());
    TableMapReduceUtil.initTableMapperJob(tn, scan,
        MobRefMapper.class, Text.class, ImmutableBytesWritable.class, job);

    job.setReducerClass(MobRefReducer.class);
    job.setOutputFormatClass(TextOutputFormat.class);
    TextOutputFormat.setOutputPath(job, new Path(output));

    job.setJobName(getClass().getSimpleName() + "-" + tn + "-" + familyName);
    // for use in the reducer. easier than re-parsing it out of the scan string.
    job.getConfiguration().set(TableInputFormat.SCAN_COLUMN_FAMILY, familyName);

    // Use when we start this job as the base point for file "recency".
    job.getConfiguration().setLong(REPORT_START_DATETIME, reportStartTime);

    if (job.waitForCompletion(true)) {
      LOG.info("Finished creating report for '{}', family='{}'", tn, familyName);
    } else {
      System.err.println("Job was not successful");
      return 3;
    }
    return 0;

  } catch (ClassNotFoundException | RuntimeException | IOException | InterruptedException e) {
    System.err.println("Job aborted due to exception " + e);
    return 2; // job failed
  }
}
 
Example 20
Source File: TestHBaseMetaEdit.java    From hbase with Apache License 2.0 4 votes vote down vote up
/**
 * Set versions, set HBASE-16213 indexed block encoding, and add a column family.
 * Delete the column family. Then try to delete a core hbase:meta family (should fail).
 * Verify they are all in place by looking at TableDescriptor AND by checking
 * what the RegionServer sees after opening Region.
 */
@Test
public void testEditMeta() throws IOException {
  Admin admin = UTIL.getAdmin();
  admin.tableExists(TableName.META_TABLE_NAME);
  TableDescriptor originalDescriptor = getMetaDescriptor();
  ColumnFamilyDescriptor cfd = originalDescriptor.getColumnFamily(HConstants.CATALOG_FAMILY);
  int oldVersions = cfd.getMaxVersions();
  // Add '1' to current versions count. Set encoding too.
  cfd = ColumnFamilyDescriptorBuilder.newBuilder(cfd).setMaxVersions(oldVersions + 1).
      setConfiguration(ColumnFamilyDescriptorBuilder.DATA_BLOCK_ENCODING,
          DataBlockEncoding.ROW_INDEX_V1.toString()).build();
  admin.modifyColumnFamily(TableName.META_TABLE_NAME, cfd);
  byte [] extraColumnFamilyName = Bytes.toBytes("xtra");
  ColumnFamilyDescriptor newCfd =
    ColumnFamilyDescriptorBuilder.newBuilder(extraColumnFamilyName).build();
  admin.addColumnFamily(TableName.META_TABLE_NAME, newCfd);
  TableDescriptor descriptor = getMetaDescriptor();
  // Assert new max versions is == old versions plus 1.
  assertEquals(oldVersions + 1,
      descriptor.getColumnFamily(HConstants.CATALOG_FAMILY).getMaxVersions());
  descriptor = getMetaDescriptor();
  // Assert new max versions is == old versions plus 1.
  assertEquals(oldVersions + 1,
      descriptor.getColumnFamily(HConstants.CATALOG_FAMILY).getMaxVersions());
  assertTrue(descriptor.getColumnFamily(newCfd.getName()) != null);
  String encoding = descriptor.getColumnFamily(HConstants.CATALOG_FAMILY).getConfiguration().
      get(ColumnFamilyDescriptorBuilder.DATA_BLOCK_ENCODING);
  assertEquals(encoding, DataBlockEncoding.ROW_INDEX_V1.toString());
  Region r = UTIL.getHBaseCluster().getRegionServer(0).
      getRegion(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedName());
  assertEquals(oldVersions + 1,
      r.getStore(HConstants.CATALOG_FAMILY).getColumnFamilyDescriptor().getMaxVersions());
  encoding = r.getStore(HConstants.CATALOG_FAMILY).getColumnFamilyDescriptor().
      getConfigurationValue(ColumnFamilyDescriptorBuilder.DATA_BLOCK_ENCODING);
  assertEquals(encoding, DataBlockEncoding.ROW_INDEX_V1.toString());
  assertTrue(r.getStore(extraColumnFamilyName) != null);
  // Assert we can't drop critical hbase:meta column family but we can drop any other.
  admin.deleteColumnFamily(TableName.META_TABLE_NAME, newCfd.getName());
  descriptor = getMetaDescriptor();
  assertTrue(descriptor.getColumnFamily(newCfd.getName()) == null);
  try {
    admin.deleteColumnFamily(TableName.META_TABLE_NAME, HConstants.CATALOG_FAMILY);
    fail("Should not reach here");
  } catch (HBaseIOException hioe) {
    assertTrue(hioe.getMessage().contains("Delete of hbase:meta"));
  }
}