Java Code Examples for org.apache.hadoop.hbase.client.Admin#snapshot()

The following examples show how to use org.apache.hadoop.hbase.client.Admin#snapshot() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: HBaseMetadata.java    From presto-hbase-connector with Apache License 2.0 6 votes vote down vote up
/**
 * create snapshot
 *
 * @param snapshotName snapshot name
 * @param admin        admin
 * @param schemaName   schema name
 * @param tableName    table name
 * @throws IOException io exception
 */
public static void createSnapshot(String snapshotName,
                                  Admin admin,
                                  String schemaName,
                                  String tableName) throws IOException {
    long start = System.currentTimeMillis();
    String fullTableName;
    if (Constant.HBASE_NAMESPACE_DEFAULT.equals(schemaName)
            || "".equals(schemaName)) {
        fullTableName = tableName;
    } else {
        fullTableName = schemaName + ":" + tableName;
    }
    HBaseProtos.SnapshotDescription snapshot = HBaseProtos.SnapshotDescription.newBuilder()
            .setName(snapshotName)
            .setTable(fullTableName)
            .setType(HBaseProtos.SnapshotDescription.Type.FLUSH)
            // .setType(HBaseProtos.SnapshotDescription.Type.DISABLED)
            .build();
    admin.snapshot(snapshot);
    log.info("createSnapshot: create snapshot " + snapshotName
            + " used " + (System.currentTimeMillis() - start) + " mill seconds.");
}
 
Example 2
Source File: TestSystemTableSnapshot.java    From hbase with Apache License 2.0 6 votes vote down vote up
/**
  * Verify backup system table snapshot.
  *
  * @throws Exception if an operation on the table fails
  */
// @Test
 public void _testBackupRestoreSystemTable() throws Exception {
   LOG.info("test snapshot system table");

   TableName backupSystem = BackupSystemTable.getTableName(conf1);

   Admin hba = TEST_UTIL.getAdmin();
   String snapshotName = "sysTable";
   hba.snapshot(snapshotName, backupSystem);

   hba.disableTable(backupSystem);
   hba.restoreSnapshot(snapshotName);
   hba.enableTable(backupSystem);
   hba.close();
 }
 
Example 3
Source File: CreateSnapshot.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Override
protected int doWork() throws Exception {
    Connection connection = null;
    Admin admin = null;
    try {
        connection = ConnectionFactory.createConnection(getConf());
        admin = connection.getAdmin();
        admin.snapshot(new SnapshotDescription(snapshotName, tableName, snapshotType));
    } catch (Exception e) {
        System.err.println("failed to take the snapshot: " + e.getMessage());
        return -1;
    } finally {
        if (admin != null) {
            admin.close();
        }
        if (connection != null) {
            connection.close();
        }
    }
    return 0;
}
 
Example 4
Source File: TestCloneSnapshotProcedure.java    From hbase with Apache License 2.0 6 votes vote down vote up
private SnapshotProtos.SnapshotDescription getSnapshot() throws Exception {
  if (snapshot == null) {
    final TableName snapshotTableName = TableName.valueOf("testCloneSnapshot");
    long tid = System.currentTimeMillis();
    final String snapshotName = "snapshot-" + tid;

    Admin admin = UTIL.getAdmin();
    // create Table
    SnapshotTestingUtils.createTable(UTIL, snapshotTableName, getNumReplicas(), CF);
    // Load data
    SnapshotTestingUtils.loadData(UTIL, snapshotTableName, 500, CF);
    admin.disableTable(snapshotTableName);
    // take a snapshot
    admin.snapshot(snapshotName, snapshotTableName);
    admin.enableTable(snapshotTableName);

    List<SnapshotDescription> snapshotList = admin.listSnapshots();
    snapshot = ProtobufUtil.createHBaseProtosSnapshotDesc(snapshotList.get(0));
  }
  return snapshot;
}
 
Example 5
Source File: SnapshotTableAction.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Override
public void perform() throws Exception {
  HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
  String snapshotName = tableName + "-it-" + System.currentTimeMillis();
  Admin admin = util.getAdmin();

  // Don't try the snapshot if we're stopping
  if (context.isStopping()) {
    return;
  }

  getLogger().info("Performing action: Snapshot table {}", tableName);
  admin.snapshot(snapshotName, tableName);
  if (sleepTime > 0) {
    Thread.sleep(sleepTime);
  }
}
 
Example 6
Source File: UpgradeUtil.java    From phoenix with Apache License 2.0 6 votes vote down vote up
public static void mapTableToNamespace(Admin admin, String srcTableName, String destTableName, PTableType pTableType) throws IOException {
    TableName srcTable = TableName.valueOf(SchemaUtil.normalizeIdentifier(srcTableName));
    TableName dstTable = TableName.valueOf(destTableName);
    boolean srcTableExists=admin.tableExists(srcTable);
    // we need to move physical table in actual namespace for TABLE and Index
    if (srcTableExists && (PTableType.TABLE.equals(pTableType)
            || PTableType.INDEX.equals(pTableType) || PTableType.SYSTEM.equals(pTableType))) {
        boolean destTableExists=admin.tableExists(dstTable);
        if (!destTableExists) {
            String snapshotName = QueryConstants.UPGRADE_TABLE_SNAPSHOT_PREFIX + srcTableName;
            LOGGER.info("Disabling table " + srcTableName + " ..");
            admin.disableTable(srcTable);
            LOGGER.info(String.format("Taking snapshot %s of table %s..", snapshotName, srcTableName));
            admin.snapshot(snapshotName, srcTable);
            LOGGER.info(
                    String.format("Restoring snapshot %s in destination table %s..", snapshotName, destTableName));
            admin.cloneSnapshot(snapshotName, dstTable);
            LOGGER.info(String.format("deleting old table %s..", srcTableName));
            admin.deleteTable(srcTable);
            LOGGER.info(String.format("deleting snapshot %s..", snapshotName));
            admin.deleteSnapshot(snapshotName);
        } else {
            LOGGER.info(String.format("Destination Table %s already exists. No migration needed.", destTableName));
        }
    }
}
 
Example 7
Source File: UpdateStatisticsTool.java    From phoenix with Apache License 2.0 6 votes vote down vote up
/**
 * Run any tasks before the MR job is launched
 * Currently being used for snapshot creation
 */
private void preJobTask() throws Exception {
    if (!manageSnapshot) {
        return;
    }

    try (final Connection conn = ConnectionUtil.getInputConnection(getConf())) {
        Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
        boolean namespaceMapping = getConf().getBoolean(IS_NAMESPACE_MAPPING_ENABLED,
                DEFAULT_IS_NAMESPACE_MAPPING_ENABLED);
        String physicalTableName =  SchemaUtil.getPhysicalTableName(tableName.getBytes(),
                namespaceMapping).getNameAsString();
        admin.snapshot(snapshotName, TableName.valueOf(physicalTableName));
        LOGGER.info("Successfully created snapshot " + snapshotName + " for " + physicalTableName);
    }
}
 
Example 8
Source File: TableSnapshotReadsMapReduceIT.java    From phoenix with Apache License 2.0 6 votes vote down vote up
private void upsertAndSnapshot(String tableName, boolean shouldSplit) throws Exception {
  upsertData(tableName);

  TableName hbaseTableName = TableName.valueOf(tableName);
  Connection conn = DriverManager.getConnection(getUrl());
  Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();

  if (shouldSplit) {
    splitTableSync(admin, hbaseTableName, "BBBB".getBytes(), 2);
  }

  admin.snapshot(SNAPSHOT_NAME, hbaseTableName);

  List<SnapshotDescription> snapshots = admin.listSnapshots();
  Assert.assertEquals(tableName, snapshots.get(0).getTable());

  // Capture the snapshot timestamp to use as SCN while reading the table later
  // Assigning the timestamp value here will make tests less flaky
  timestamp = System.currentTimeMillis();

  // upsert data after snapshot
  PreparedStatement stmt = conn.prepareStatement(String.format(UPSERT, tableName));
  upsertData(stmt, "DDDD", "SNFB", 45);
  conn.commit();
}
 
Example 9
Source File: FullTableBackupClient.java    From hbase with Apache License 2.0 5 votes vote down vote up
protected void snapshotTable(Admin admin, TableName tableName, String snapshotName)
    throws IOException {
  int maxAttempts =
      conf.getInt(BACKUP_MAX_ATTEMPTS_KEY, DEFAULT_BACKUP_MAX_ATTEMPTS);
  int pause =
      conf.getInt(BACKUP_ATTEMPTS_PAUSE_MS_KEY, DEFAULT_BACKUP_ATTEMPTS_PAUSE_MS);
  int attempts = 0;

  while (attempts++ < maxAttempts) {
    try {
      admin.snapshot(snapshotName, tableName);
      return;
    } catch (IOException ee) {
      LOG.warn("Snapshot attempt " + attempts + " failed for table " + tableName
          + ", sleeping for " + pause + "ms", ee);
      if (attempts < maxAttempts) {
        try {
          Thread.sleep(pause);
        } catch (InterruptedException e) {
          Thread.currentThread().interrupt();
          break;
        }
      }
    }
  }
  throw new IOException("Failed to snapshot table "+ tableName);
}
 
Example 10
Source File: SnapshotTestingUtils.java    From hbase with Apache License 2.0 5 votes vote down vote up
public static void snapshot(Admin admin, final String snapshotName, final TableName tableName,
    final SnapshotType type, final int numTries) throws IOException {
  int tries = 0;
  CorruptedSnapshotException lastEx = null;
  while (tries++ < numTries) {
    try {
      admin.snapshot(snapshotName, tableName, type);
      return;
    } catch (CorruptedSnapshotException cse) {
      LOG.warn("Got CorruptedSnapshotException", cse);
      lastEx = cse;
    }
  }
  throw lastEx;
}
 
Example 11
Source File: SnapshotTestingUtils.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * Take a snapshot of the specified table and verify the given families.
 * Note that this will leave the table disabled in the case of an offline snapshot.
 */
public static void createSnapshotAndValidate(Admin admin,
    TableName tableName, List<byte[]> nonEmptyFamilyNames, List<byte[]> emptyFamilyNames,
    String snapshotNameString, Path rootDir, FileSystem fs, boolean onlineSnapshot)
      throws Exception {
  if (!onlineSnapshot) {
    try {
      LOG.info("prepping for offline snapshot.");
      admin.disableTable(tableName);
    } catch (TableNotEnabledException tne) {
      LOG.info("In attempting to disable " + tableName + " it turns out that the this table is " +
          "already disabled.");
    }
  }
  LOG.info("taking snapshot.");
  admin.snapshot(snapshotNameString, tableName);

  LOG.info("Confirming snapshot exists.");
  List<SnapshotDescription> snapshots =
      SnapshotTestingUtils.assertExistsMatchingSnapshot(admin, snapshotNameString, tableName);
  if (snapshots == null || snapshots.size() != 1) {
    Assert.fail("Incorrect number of snapshots for table " + tableName);
  }

  LOG.info("validating snapshot.");
  SnapshotTestingUtils.confirmSnapshotValid(
    ProtobufUtil.createHBaseProtosSnapshotDesc(snapshots.get(0)), tableName, nonEmptyFamilyNames,
    emptyFamilyNames, rootDir, admin, fs);
}
 
Example 12
Source File: TestFileArchiverNotifierImpl.java    From hbase with Apache License 2.0 4 votes vote down vote up
@Test
public void testIncrementalFileArchiving() throws Exception {
  final Admin admin = TEST_UTIL.getAdmin();
  final TableName tn = TableName.valueOf(testName.getMethodName());
  if (admin.tableExists(tn)) {
    admin.disableTable(tn);
    admin.deleteTable(tn);
  }
  final Table quotaTable = conn.getTable(QuotaUtil.QUOTA_TABLE_NAME);
  final TableName tn1 = helper.createTableWithRegions(1);
  admin.setQuota(QuotaSettingsFactory.limitTableSpace(
      tn1, SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS));

  // Write some data and flush it
  helper.writeData(tn1, 256L * SpaceQuotaHelperForTests.ONE_KILOBYTE);
  admin.flush(tn1);

  // Create a snapshot on the table
  final String snapshotName1 = tn1 + "snapshot1";
  admin.snapshot(new SnapshotDescription(snapshotName1, tn1, SnapshotType.SKIPFLUSH));

  FileArchiverNotifierImpl notifier = new FileArchiverNotifierImpl(conn, conf, fs, tn);
  long t1 = notifier.getLastFullCompute();
  long snapshotSize = notifier.computeAndStoreSnapshotSizes(Arrays.asList(snapshotName1));
  assertEquals("The size of the snapshots should be zero", 0, snapshotSize);
  assertTrue("Last compute time was not less than current compute time",
      t1 < notifier.getLastFullCompute());

  // No recently archived files and the snapshot should have no size
  assertEquals(0, extractSnapshotSize(quotaTable, tn, snapshotName1));

  // Invoke the addArchivedFiles method with no files
  notifier.addArchivedFiles(Collections.emptySet());

  // The size should not have changed
  assertEquals(0, extractSnapshotSize(quotaTable, tn, snapshotName1));

  notifier.addArchivedFiles(ImmutableSet.of(entry("a", 1024L), entry("b", 1024L)));

  // The size should not have changed
  assertEquals(0, extractSnapshotSize(quotaTable, tn, snapshotName1));

  // Pull one file referenced by the snapshot out of the manifest
  Set<String> referencedFiles = getFilesReferencedBySnapshot(snapshotName1);
  assertTrue("Found snapshot referenced files: " + referencedFiles, referencedFiles.size() >= 1);
  String referencedFile = Iterables.getFirst(referencedFiles, null);
  assertNotNull(referencedFile);

  // Report that a file this snapshot referenced was moved to the archive. This is a sign
  // that the snapshot should now "own" the size of this file
  final long fakeFileSize = 2048L;
  notifier.addArchivedFiles(ImmutableSet.of(entry(referencedFile, fakeFileSize)));

  // Verify that the snapshot owns this file.
  assertEquals(fakeFileSize, extractSnapshotSize(quotaTable, tn, snapshotName1));

  // In reality, we did not actually move the file, so a "full" computation should re-set the
  // size of the snapshot back to 0.
  long t2 = notifier.getLastFullCompute();
  snapshotSize = notifier.computeAndStoreSnapshotSizes(Arrays.asList(snapshotName1));
  assertEquals(0, snapshotSize);
  assertEquals(0, extractSnapshotSize(quotaTable, tn, snapshotName1));
  // We should also have no recently archived files after a re-computation
  assertTrue("Last compute time was not less than current compute time",
      t2 < notifier.getLastFullCompute());
}
 
Example 13
Source File: TestRestoreSnapshotProcedure.java    From hbase with Apache License 2.0 4 votes vote down vote up
private void setupSnapshotAndUpdateTable() throws Exception {
  long tid = System.currentTimeMillis();
  final String snapshotName = "snapshot-" + tid;
  Admin admin = UTIL.getAdmin();
  // create Table
  SnapshotTestingUtils.createTable(UTIL, snapshotTableName, getNumReplicas(), CF1, CF2);
  // Load data
  SnapshotTestingUtils.loadData(UTIL, snapshotTableName, rowCountCF1, CF1);
  SnapshotTestingUtils.loadData(UTIL, snapshotTableName, rowCountCF2, CF2);
  SnapshotTestingUtils.verifyRowCount(UTIL, snapshotTableName, rowCountCF1 + rowCountCF2);

  snapshotHTD = admin.getDescriptor(snapshotTableName);

  admin.disableTable(snapshotTableName);
  // take a snapshot
  admin.snapshot(snapshotName, snapshotTableName);

  List<SnapshotDescription> snapshotList = admin.listSnapshots();
  snapshot = ProtobufUtil.createHBaseProtosSnapshotDesc(snapshotList.get(0));

  // modify the table
  ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor columnFamilyDescriptor3 =
    new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(CF3);
  ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor columnFamilyDescriptor4 =
    new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(CF4);
  admin.addColumnFamily(snapshotTableName, columnFamilyDescriptor3);
  admin.addColumnFamily(snapshotTableName, columnFamilyDescriptor4);
  admin.deleteColumnFamily(snapshotTableName, CF2);
  // enable table and insert data
  admin.enableTable(snapshotTableName);
  SnapshotTestingUtils.loadData(UTIL, snapshotTableName, rowCountCF3, CF3);
  SnapshotTestingUtils.loadData(UTIL, snapshotTableName, rowCountCF4, CF4);
  SnapshotTestingUtils.loadData(UTIL, snapshotTableName, rowCountCF1addition, CF1);
  HTableDescriptor currentHTD = new HTableDescriptor(admin.getDescriptor(snapshotTableName));
  assertTrue(currentHTD.hasFamily(CF1));
  assertFalse(currentHTD.hasFamily(CF2));
  assertTrue(currentHTD.hasFamily(CF3));
  assertTrue(currentHTD.hasFamily(CF4));
  assertNotEquals(currentHTD.getFamiliesKeys().size(), snapshotHTD.getColumnFamilies().length);
  SnapshotTestingUtils.verifyRowCount(
    UTIL, snapshotTableName, rowCountCF1 + rowCountCF3 + rowCountCF4 + rowCountCF1addition);
  admin.disableTable(snapshotTableName);
}
 
Example 14
Source File: TestMasterObserver.java    From hbase with Apache License 2.0 4 votes vote down vote up
@Test
public void testSnapshotOperations() throws Exception {
  final TableName tableName = TableName.valueOf(name.getMethodName());
  MiniHBaseCluster cluster = UTIL.getHBaseCluster();
  HMaster master = cluster.getMaster();
  MasterCoprocessorHost host = master.getMasterCoprocessorHost();
  CPMasterObserver cp = host.findCoprocessor(CPMasterObserver.class);
  cp.resetStates();

  // create a table
  TableDescriptorBuilder.ModifyableTableDescriptor tableDescriptor =
    new TableDescriptorBuilder.ModifyableTableDescriptor(tableName);

  tableDescriptor.setColumnFamily(
    new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(TEST_FAMILY));
  Admin admin = UTIL.getAdmin();

  tableCreationLatch = new CountDownLatch(1);
  admin.createTable(tableDescriptor);
  tableCreationLatch.await();
  tableCreationLatch = new CountDownLatch(1);

  admin.disableTable(tableName);
  assertTrue(admin.isTableDisabled(tableName));

  try {
    // Test snapshot operation
    assertFalse("Coprocessor should not have been called yet",
      cp.wasSnapshotCalled());
    admin.snapshot(TEST_SNAPSHOT, tableName);
    assertTrue("Coprocessor should have been called on snapshot",
      cp.wasSnapshotCalled());

    //Test list operation
    admin.listSnapshots();
    assertTrue("Coprocessor should have been called on snapshot list",
      cp.wasListSnapshotCalled());

    // Test clone operation
    admin.cloneSnapshot(TEST_SNAPSHOT, TEST_CLONE);
    assertTrue("Coprocessor should have been called on snapshot clone",
      cp.wasCloneSnapshotCalled());
    assertFalse("Coprocessor restore should not have been called on snapshot clone",
      cp.wasRestoreSnapshotCalled());
    admin.disableTable(TEST_CLONE);
    assertTrue(admin.isTableDisabled(tableName));
    deleteTable(admin, TEST_CLONE);

    // Test restore operation
    cp.resetStates();
    admin.restoreSnapshot(TEST_SNAPSHOT);
    assertTrue("Coprocessor should have been called on snapshot restore",
      cp.wasRestoreSnapshotCalled());
    assertFalse("Coprocessor clone should not have been called on snapshot restore",
      cp.wasCloneSnapshotCalled());

    admin.deleteSnapshot(TEST_SNAPSHOT);
    assertTrue("Coprocessor should have been called on snapshot delete",
      cp.wasDeleteSnapshotCalled());
  } finally {
    deleteTable(admin, tableName);
  }
}
 
Example 15
Source File: IndexScrutinyTool.java    From phoenix with Apache License 2.0 4 votes vote down vote up
public Job createSubmittableJob(String schemaName, String indexTable, String dataTable,
        SourceTable sourceTable, Class<IndexScrutinyMapperForTest> mapperClass) throws Exception {
    Preconditions.checkArgument(SourceTable.DATA_TABLE_SOURCE.equals(sourceTable)
            || SourceTable.INDEX_TABLE_SOURCE.equals(sourceTable));

    final String qDataTable = SchemaUtil.getQualifiedTableName(schemaName, dataTable);
    final String qIndexTable;
    if (schemaName != null && !schemaName.isEmpty()) {
        qIndexTable = SchemaUtil.getQualifiedTableName(schemaName, indexTable);
    } else {
        qIndexTable = indexTable;
    }
    PhoenixConfigurationUtil.setScrutinyDataTable(configuration, qDataTable);
    PhoenixConfigurationUtil.setScrutinyIndexTable(configuration, qIndexTable);
    PhoenixConfigurationUtil.setScrutinySourceTable(configuration, sourceTable);
    PhoenixConfigurationUtil.setScrutinyOutputInvalidRows(configuration, outputInvalidRows);
    PhoenixConfigurationUtil.setScrutinyOutputMax(configuration, outputMaxRows);

    final PTable pdataTable = PhoenixRuntime.getTable(connection, qDataTable);
    final PTable pindexTable = PhoenixRuntime.getTable(connection, qIndexTable);

    // set CURRENT_SCN for our scan so that incoming writes don't throw off scrutiny
    configuration.set(PhoenixConfigurationUtil.CURRENT_SCN_VALUE, Long.toString(ts));

    // set the source table to either data or index table
    SourceTargetColumnNames columnNames =
            SourceTable.DATA_TABLE_SOURCE.equals(sourceTable)
                    ? new SourceTargetColumnNames.DataSourceColNames(pdataTable,
                            pindexTable)
                    : new SourceTargetColumnNames.IndexSourceColNames(pdataTable,
                            pindexTable);
    String qSourceTable = columnNames.getQualifiedSourceTableName();
    List<String> sourceColumnNames = columnNames.getSourceColNames();
    List<String> sourceDynamicCols = columnNames.getSourceDynamicCols();
    List<String> targetDynamicCols = columnNames.getTargetDynamicCols();

    // Setup the select query against source - we either select the index columns from the
    // index table,
    // or select the data table equivalents of the index columns from the data table
    final String selectQuery =
            QueryUtil.constructSelectStatement(qSourceTable, sourceColumnNames, null,
                Hint.NO_INDEX, true);
    LOGGER.info("Query used on source table to feed the mapper: " + selectQuery);

    PhoenixConfigurationUtil.setScrutinyOutputFormat(configuration, outputFormat);
    // if outputting to table, setup the upsert to the output table
    if (outputInvalidRows && OutputFormat.TABLE.equals(outputFormat)) {
        String upsertStmt =
                IndexScrutinyTableOutput.constructOutputTableUpsert(sourceDynamicCols,
                    targetDynamicCols, connection);
        PhoenixConfigurationUtil.setUpsertStatement(configuration, upsertStmt);
        LOGGER.info("Upsert statement used for output table: " + upsertStmt);
    }

    final String jobName =
            String.format(INDEX_JOB_NAME_TEMPLATE, qSourceTable,
                columnNames.getQualifiedTargetTableName());
    final Job job = Job.getInstance(configuration, jobName);

    if (!useSnapshot) {
        PhoenixMapReduceUtil.setInput(job, PhoenixIndexDBWritable.class, qDataTable,
            selectQuery);
    } else { // TODO check if using a snapshot works
        Admin admin = null;
        String snapshotName;
        try {
            final PhoenixConnection pConnection =
                    connection.unwrap(PhoenixConnection.class);
            admin = pConnection.getQueryServices().getAdmin();
            String pdataTableName = pdataTable.getName().getString();
            snapshotName = new StringBuilder(pdataTableName).append("-Snapshot").toString();
            admin.snapshot(snapshotName, TableName.valueOf(pdataTableName));
        } finally {
            if (admin != null) {
                admin.close();
            }
        }
        // root dir not a subdirectory of hbase dir
        Path rootDir = new Path("hdfs:///index-snapshot-dir");
        FSUtils.setRootDir(configuration, rootDir);

        // set input for map reduce job using hbase snapshots
        //PhoenixMapReduceUtil.setInput(job, PhoenixIndexDBWritable.class, snapshotName,
        //    qDataTable, restoreDir, selectQuery);
    }
    TableMapReduceUtil.initCredentials(job);
    Path outputPath =
            getOutputPath(configuration, basePath,
                SourceTable.DATA_TABLE_SOURCE.equals(sourceTable) ? pdataTable
                        : pindexTable);

    return configureSubmittableJob(job, outputPath, mapperClass);
}
 
Example 16
Source File: IndexTool.java    From phoenix with Apache License 2.0 4 votes vote down vote up
private Job configureJobForAsyncIndex() throws Exception {
    String physicalIndexTable = pIndexTable.getPhysicalName().getString();
    final PhoenixConnection pConnection = connection.unwrap(PhoenixConnection.class);
    final PostIndexDDLCompiler ddlCompiler =
            new PostIndexDDLCompiler(pConnection, new TableRef(pDataTable));
    ddlCompiler.compile(pIndexTable);
    final List<String> indexColumns = ddlCompiler.getIndexColumnNames();
    final String selectQuery = ddlCompiler.getSelectQuery();
    final String upsertQuery =
            QueryUtil.constructUpsertStatement(qIndexTable, indexColumns, Hint.NO_INDEX);

    configuration.set(PhoenixConfigurationUtil.UPSERT_STATEMENT, upsertQuery);
    PhoenixConfigurationUtil.setPhysicalTableName(configuration, physicalIndexTable);
    PhoenixConfigurationUtil.setDisableIndexes(configuration, indexTable);

    PhoenixConfigurationUtil.setUpsertColumnNames(configuration,
        indexColumns.toArray(new String[indexColumns.size()]));
    if (tenantId != null) {
        PhoenixConfigurationUtil.setTenantId(configuration, tenantId);
    }
    final List<ColumnInfo> columnMetadataList =
            PhoenixRuntime.generateColumnInfo(connection, qIndexTable, indexColumns);
    ColumnInfoToStringEncoderDecoder.encode(configuration, columnMetadataList);

    if (outputPath != null) {
        fs = outputPath.getFileSystem(configuration);
        fs.delete(outputPath, true);
    }
    final String jobName = String.format(INDEX_JOB_NAME_TEMPLATE, schemaName, dataTable, indexTable);
    final Job job = Job.getInstance(configuration, jobName);
    job.setJarByClass(IndexTool.class);
    job.setMapOutputKeyClass(ImmutableBytesWritable.class);
    if (outputPath != null) {
        FileOutputFormat.setOutputPath(job, outputPath);
    }

    if (!useSnapshot) {
        PhoenixMapReduceUtil.setInput(job, PhoenixIndexDBWritable.class, qDataTable, selectQuery);
    } else {
        Admin admin = null;
        String snapshotName;
        try {
            admin = pConnection.getQueryServices().getAdmin();
            String pdataTableName = pDataTable.getName().getString();
            snapshotName = new StringBuilder(pdataTableName).append("-Snapshot").toString();
            admin.snapshot(snapshotName, TableName.valueOf(pdataTableName));
        } finally {
            if (admin != null) {
                admin.close();
            }
        }
        // root dir not a subdirectory of hbase dir
        Path rootDir = new Path("hdfs:///index-snapshot-dir");
        FSUtils.setRootDir(configuration, rootDir);
        Path restoreDir = new Path(FSUtils.getRootDir(configuration), "restore-dir");

        // set input for map reduce job using hbase snapshots
        PhoenixMapReduceUtil
                    .setInput(job, PhoenixIndexDBWritable.class, snapshotName, qDataTable, restoreDir, selectQuery);
    }
    TableMapReduceUtil.initCredentials(job);
    
    job.setMapperClass(PhoenixIndexImportDirectMapper.class);
    return configureSubmittableJobUsingDirectApi(job);
}