Java Code Examples for org.apache.hadoop.hbase.client.RegionLocator#close()

The following examples show how to use org.apache.hadoop.hbase.client.RegionLocator#close() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: HBaseDataFragmenter.java    From pxf with Apache License 2.0 5 votes vote down vote up
private void addTableFragments(byte[] userData) throws IOException {
    RegionLocator regionLocator = connection.getRegionLocator(TableName.valueOf(context.getDataSource()));
    List <HRegionLocation> locations = regionLocator.getAllRegionLocations();

    for (HRegionLocation location : locations) {
        addFragment(location, userData);
    }

    regionLocator.close();
}
 
Example 2
Source File: TestRegionReplicaReplicationEndpoint.java    From hbase with Apache License 2.0 4 votes vote down vote up
private void testRegionReplicaReplicationIgnores(boolean dropTable, boolean disableReplication)
    throws Exception {
  // tests having edits from a disabled or dropped table is handled correctly by skipping those
  // entries and further edits after the edits from dropped/disabled table can be replicated
  // without problems.
  final TableName tableName = TableName.valueOf(
    name.getMethodName() + "_drop_" + dropTable + "_disabledReplication_" + disableReplication);
  HTableDescriptor htd = HTU.createTableDescriptor(tableName);
  int regionReplication = 3;
  htd.setRegionReplication(regionReplication);
  HTU.deleteTableIfAny(tableName);

  HTU.getAdmin().createTable(htd);
  TableName toBeDisabledTable = TableName.valueOf(
    dropTable ? "droppedTable" : (disableReplication ? "disableReplication" : "disabledTable"));
  HTU.deleteTableIfAny(toBeDisabledTable);
  htd = HTU.createTableDescriptor(TableName.valueOf(toBeDisabledTable.toString()),
    HColumnDescriptor.DEFAULT_MIN_VERSIONS, 3, HConstants.FOREVER,
    HColumnDescriptor.DEFAULT_KEEP_DELETED);
  htd.setRegionReplication(regionReplication);
  HTU.getAdmin().createTable(htd);

  // both tables are created, now pause replication
  HTU.getAdmin().disableReplicationPeer(ServerRegionReplicaUtil.getReplicationPeerId());

  // now that the replication is disabled, write to the table to be dropped, then drop the table.

  Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration());
  Table table = connection.getTable(tableName);
  Table tableToBeDisabled = connection.getTable(toBeDisabledTable);

  HTU.loadNumericRows(tableToBeDisabled, HBaseTestingUtility.fam1, 6000, 7000);

  RegionLocator rl = connection.getRegionLocator(toBeDisabledTable);
  HRegionLocation hrl = rl.getRegionLocation(HConstants.EMPTY_BYTE_ARRAY);
  byte[] encodedRegionName = hrl.getRegion().getEncodedNameAsBytes();

  Cell cell = CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(Bytes.toBytes("A"))
      .setFamily(HTU.fam1).setValue(Bytes.toBytes("VAL")).setType(Type.Put).build();
  Entry entry = new Entry(
    new WALKeyImpl(encodedRegionName, toBeDisabledTable, 1),
      new WALEdit()
          .add(cell));
  HTU.getAdmin().disableTable(toBeDisabledTable); // disable the table
  if (dropTable) {
    HTU.getAdmin().deleteTable(toBeDisabledTable);
  } else if (disableReplication) {
    htd.setRegionReplication(regionReplication - 2);
    HTU.getAdmin().modifyTable(htd);
    HTU.getAdmin().enableTable(toBeDisabledTable);
  }

  HRegionServer rs = HTU.getMiniHBaseCluster().getRegionServer(0);
  MetricsSource metrics = mock(MetricsSource.class);
  ReplicationEndpoint.Context ctx =
    new ReplicationEndpoint.Context(rs, HTU.getConfiguration(), HTU.getConfiguration(),
      HTU.getTestFileSystem(), ServerRegionReplicaUtil.getReplicationPeerId(),
      UUID.fromString(rs.getClusterId()), rs.getReplicationSourceService().getReplicationPeers()
        .getPeer(ServerRegionReplicaUtil.getReplicationPeerId()),
      metrics, rs.getTableDescriptors(), rs);
  RegionReplicaReplicationEndpoint rrpe = new RegionReplicaReplicationEndpoint();
  rrpe.init(ctx);
  rrpe.start();
  ReplicationEndpoint.ReplicateContext repCtx = new ReplicationEndpoint.ReplicateContext();
  repCtx.setEntries(Lists.newArrayList(entry, entry));
  assertTrue(rrpe.replicate(repCtx));
  verify(metrics, times(1)).incrLogEditsFiltered(eq(2L));
  rrpe.stop();
  if (disableReplication) {
    // enable replication again so that we can verify replication
    HTU.getAdmin().disableTable(toBeDisabledTable); // disable the table
    htd.setRegionReplication(regionReplication);
    HTU.getAdmin().modifyTable(htd);
    HTU.getAdmin().enableTable(toBeDisabledTable);
  }

  try {
    // load some data to the to-be-dropped table
    // load the data to the table
    HTU.loadNumericRows(table, HBaseTestingUtility.fam1, 0, 1000);

    // now enable the replication
    HTU.getAdmin().enableReplicationPeer(ServerRegionReplicaUtil.getReplicationPeerId());

    verifyReplication(tableName, regionReplication, 0, 1000);
  } finally {
    table.close();
    rl.close();
    tableToBeDisabled.close();
    HTU.deleteTableIfAny(toBeDisabledTable);
    connection.close();
  }
}
 
Example 3
Source File: TestTableResource.java    From hbase with Apache License 2.0 4 votes vote down vote up
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  TEST_UTIL.startMiniCluster(3);
  REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration());
  client = new Client(new Cluster().add("localhost",
    REST_TEST_UTIL.getServletPort()));
  context = JAXBContext.newInstance(
      TableModel.class,
      TableInfoModel.class,
      TableListModel.class,
      TableRegionModel.class);
  TEST_UTIL.createMultiRegionTable(TABLE, Bytes.toBytes(COLUMN_FAMILY), NUM_REGIONS);
  byte[] k = new byte[3];
  byte [][] famAndQf = CellUtil.parseColumn(Bytes.toBytes(COLUMN));
  List<Put> puts = new ArrayList<>();
  for (byte b1 = 'a'; b1 < 'z'; b1++) {
    for (byte b2 = 'a'; b2 < 'z'; b2++) {
      for (byte b3 = 'a'; b3 < 'z'; b3++) {
        k[0] = b1;
        k[1] = b2;
        k[2] = b3;
        Put put = new Put(k);
        put.setDurability(Durability.SKIP_WAL);
        put.addColumn(famAndQf[0], famAndQf[1], k);
        puts.add(put);
      }
    }
  }

  Connection connection = TEST_UTIL.getConnection();

  Table table =  connection.getTable(TABLE);
  table.put(puts);
  table.close();

  RegionLocator regionLocator = connection.getRegionLocator(TABLE);
  List<HRegionLocation> m = regionLocator.getAllRegionLocations();

  // should have four regions now
  assertEquals(NUM_REGIONS, m.size());
  regionMap = m;
  LOG.error("regions: " + regionMap);
  regionLocator.close();
}
 
Example 4
Source File: AbstractBulkLoadTool.java    From phoenix with Apache License 2.0 4 votes vote down vote up
/**
 * Submits the jobs to the cluster.
 * Loads the HFiles onto the respective tables.
 * @throws Exception 
 */
public int submitJob(final Configuration conf, final String qualifiedTableName,
    final String inputPaths, final Path outputPath, List<TargetTableRef> tablesToBeLoaded, boolean hasLocalIndexes) throws Exception {
   
    Job job = Job.getInstance(conf, "Phoenix MapReduce import for " + qualifiedTableName);
    FileInputFormat.addInputPaths(job, inputPaths);
    FileOutputFormat.setOutputPath(job, outputPath);

    job.setInputFormatClass(PhoenixTextInputFormat.class);
    job.setMapOutputKeyClass(TableRowkeyPair.class);
    job.setMapOutputValueClass(ImmutableBytesWritable.class);
    job.setOutputKeyClass(TableRowkeyPair.class);
    job.setOutputValueClass(KeyValue.class);
    job.setReducerClass(FormatToKeyValueReducer.class);
    byte[][] splitKeysBeforeJob = null;
    try(org.apache.hadoop.hbase.client.Connection hbaseConn =
            ConnectionFactory.createConnection(job.getConfiguration())) {
        RegionLocator regionLocator = null;
        if(hasLocalIndexes) {
            try{
                regionLocator = hbaseConn.getRegionLocator(
                        TableName.valueOf(qualifiedTableName));
                splitKeysBeforeJob = regionLocator.getStartKeys();
            } finally {
                if (regionLocator != null) regionLocator.close();
            }
        }
        MultiHfileOutputFormat.configureIncrementalLoad(job, tablesToBeLoaded);

        final String tableNamesAsJson = TargetTableRefFunctions.NAMES_TO_JSON
                .apply(tablesToBeLoaded);
        final String logicalNamesAsJson = TargetTableRefFunctions.LOGICAL_NAMES_TO_JSON
                .apply(tablesToBeLoaded);

        job.getConfiguration().set(FormatToBytesWritableMapper.TABLE_NAMES_CONFKEY,
                tableNamesAsJson);
        job.getConfiguration().set(FormatToBytesWritableMapper.LOGICAL_NAMES_CONFKEY,
                logicalNamesAsJson);

        // give subclasses their hook
        setupJob(job);

        LOGGER.info("Running MapReduce import job from {} to {}", inputPaths, outputPath);
        boolean success = job.waitForCompletion(true);

        if (success) {
            if (hasLocalIndexes) {
                try {
                    regionLocator = hbaseConn.getRegionLocator(
                            TableName.valueOf(qualifiedTableName));
                    if(!IndexUtil.matchingSplitKeys(splitKeysBeforeJob,
                            regionLocator.getStartKeys())) {
                        LOGGER.error("The table " + qualifiedTableName + " has local indexes and"
                                + " there is split key mismatch before and after running"
                                + " bulkload job. Please rerun the job otherwise there may be"
                                + " inconsistencies between actual data and index data.");
                        return -1;
                    }
                } finally {
                    if (regionLocator != null) regionLocator.close();
                }
            }
            LOGGER.info("Loading HFiles from {}", outputPath);
            completebulkload(conf,outputPath,tablesToBeLoaded);
            LOGGER.info("Removing output directory {}", outputPath);
            if(!outputPath.getFileSystem(conf).delete(outputPath, true)) {
                LOGGER.error("Failed to delete the output directory {}", outputPath);
            }
            return 0;
        } else {
           return -1;
       }
   }
}