Java Code Examples for org.apache.hadoop.hbase.client.ConnectionFactory#createConnection()

The following examples show how to use org.apache.hadoop.hbase.client.ConnectionFactory#createConnection() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: HBaseTransactionPruningPlugin.java    From phoenix-tephra with Apache License 2.0 6 votes vote down vote up
@Override
public void initialize(Configuration conf) throws IOException {
  this.conf = conf;
  this.connection = ConnectionFactory.createConnection(conf);

  final TableName stateTable = TableName.valueOf(conf.get(TxConstants.TransactionPruning.PRUNE_STATE_TABLE,
                                                          TxConstants.TransactionPruning.DEFAULT_PRUNE_STATE_TABLE));
  LOG.info("Initializing plugin with state table {}", stateTable.getNameWithNamespaceInclAsString());
  createPruneTable(stateTable);
  this.dataJanitorState = new DataJanitorState(new DataJanitorState.TableSupplier() {
    @Override
    public Table get() throws IOException {
      return connection.getTable(stateTable);
    }
  });
}
 
Example 2
Source File: HBaseTransactionPruningPlugin.java    From phoenix-tephra with Apache License 2.0 6 votes vote down vote up
@Override
public void initialize(Configuration conf) throws IOException {
  this.conf = conf;
  this.connection = ConnectionFactory.createConnection(conf);

  final TableName stateTable = TableName.valueOf(conf.get(TxConstants.TransactionPruning.PRUNE_STATE_TABLE,
                                                          TxConstants.TransactionPruning.DEFAULT_PRUNE_STATE_TABLE));
  LOG.info("Initializing plugin with state table {}", stateTable.getNameWithNamespaceInclAsString());
  createPruneTable(stateTable);
  this.dataJanitorState = new DataJanitorState(new DataJanitorState.TableSupplier() {
    @Override
    public Table get() throws IOException {
      return connection.getTable(stateTable);
    }
  });
}
 
Example 3
Source File: HBaseReadSplittableDoFn.java    From beam with Apache License 2.0 6 votes vote down vote up
@ProcessElement
public void processElement(
    @Element Read read,
    OutputReceiver<Result> out,
    RestrictionTracker<ByteKeyRange, ByteKey> tracker)
    throws Exception {
  Connection connection = ConnectionFactory.createConnection(read.getConfiguration());
  TableName tableName = TableName.valueOf(read.getTableId());
  Table table = connection.getTable(tableName);
  final ByteKeyRange range = tracker.currentRestriction();
  try (ResultScanner scanner =
      table.getScanner(HBaseUtils.newScanInRange(read.getScan(), range))) {
    for (Result result : scanner) {
      ByteKey key = ByteKey.copyFrom(result.getRow());
      if (!tracker.tryClaim(key)) {
        return;
      }
      out.output(result);
    }
    tracker.tryClaim(ByteKey.EMPTY);
  }
}
 
Example 4
Source File: CompactorUtil.java    From phoenix-omid with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws IOException {
    Config cmdline = new Config();
    JCommander jcommander = new JCommander(cmdline, args);
    if (cmdline.help) {
        jcommander.usage("CompactorUtil");
        System.exit(1);
    }

    HBaseLogin.loginIfNeeded(cmdline.loginFlags);

    Configuration conf = HBaseConfiguration.create();
    try (Connection conn = ConnectionFactory.createConnection(conf)) {
        if (cmdline.enable) {
            enableOmidCompaction(conn, TableName.valueOf(cmdline.table),
                    Bytes.toBytes(cmdline.columnFamily));
        } else if (cmdline.disable) {
            disableOmidCompaction(conn, TableName.valueOf(cmdline.table),
                    Bytes.toBytes(cmdline.columnFamily));
        } else {
            System.err.println("Must specify enable or disable");
        }
    }
}
 
Example 5
Source File: HBaseIOIT.java    From beam with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void setup() throws IOException {
  PipelineOptionsFactory.register(HBasePipelineOptions.class);
  options = TestPipeline.testingPipelineOptions().as(HBasePipelineOptions.class);

  numberOfRows = options.getNumberOfRecords();

  conf.setStrings("hbase.zookeeper.quorum", options.getHbaseServerName());
  conf.setStrings("hbase.cluster.distributed", "true");
  conf.setStrings("hbase.client.retries.number", "1");

  Connection connection = ConnectionFactory.createConnection(conf);

  admin = connection.getAdmin();
  HTableDescriptor testTable =
      new HTableDescriptor(TableName.valueOf(TABLE_NAME))
          .addFamily(new HColumnDescriptor(COLUMN_FAMILY));
  admin.createTable(testTable);
}
 
Example 6
Source File: HalyardTableUtils.java    From Halyard with Apache License 2.0 6 votes vote down vote up
/**
 * Helper method which locates or creates and returns the specified HTable used for triple/ quad storage
 * @param config Hadoop Configuration of the cluster running HBase
 * @param tableName String table name
 * @param create boolean option to create the table if does not exists
 * @param splits array of keys used to pre-split new table, may be null
 * @return HTable
 * @throws IOException throws IOException in case of any HBase IO problems
 */
public static HTable getTable(Configuration config, String tableName, boolean create, byte[][] splits) throws IOException {
    Configuration cfg = HBaseConfiguration.create(config);
    cfg.setLong(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, 3600000l);
    if (create) {
        try (Connection con = ConnectionFactory.createConnection(config)) {
            try (Admin admin = con.getAdmin()) {
            	    //check if the table exists and if it doesn't, make it
                if (!admin.tableExists(TableName.valueOf(tableName))) {
                    HTableDescriptor td = new HTableDescriptor(TableName.valueOf(tableName));
                    td.addFamily(createColumnFamily());
                    admin.createTable(td, splits);
                }
            }
        }
    }

    //this is deprecated, the recommendation now is to use connection.getTable()
    HTable table = new HTable(cfg, tableName);
    table.setAutoFlushTo(false);
    return table;
}
 
Example 7
Source File: HBaseTransactionPruningPlugin.java    From phoenix-tephra with Apache License 2.0 6 votes vote down vote up
@Override
public void initialize(Configuration conf) throws IOException {
  this.conf = conf;
  this.connection = ConnectionFactory.createConnection(conf);

  final TableName stateTable = TableName.valueOf(conf.get(TxConstants.TransactionPruning.PRUNE_STATE_TABLE,
                                                          TxConstants.TransactionPruning.DEFAULT_PRUNE_STATE_TABLE));
  LOG.info("Initializing plugin with state table {}", stateTable.getNameWithNamespaceInclAsString());
  createPruneTable(stateTable);
  this.dataJanitorState = new DataJanitorState(new DataJanitorState.TableSupplier() {
    @Override
    public Table get() throws IOException {
      return connection.getTable(stateTable);
    }
  });
}
 
Example 8
Source File: HBaseLoader.java    From flinkDemo with Apache License 2.0 6 votes vote down vote up
@Override
    public void loader() throws Exception {

        Table table = null;
        try {
            Configuration conf = HBaseConfiguration.create();
            Connection conn = ConnectionFactory.createConnection(conf);
            table = conn.getTable(TableName.valueOf("dfdq_rhm_aly:f_aly_point_data_test"));
            Put put = new Put("kkk".getBytes());
            put.addColumn(Bytes.toBytes("f"),Bytes.toBytes("t"),Bytes.toBytes(System.currentTimeMillis()));
            table.put(put);
        } catch (Exception e) {
            throw new Exception("批量存储数据失败!", e);
        } finally {
//            table.close();
        }
    }
 
Example 9
Source File: HBaseClientManager.java    From presto-hbase-connector with Apache License 2.0 5 votes vote down vote up
public Connection createConnection() {
    Configuration conf;
    Connection connection;
    try {
        conf = HBaseConfiguration.create();
        conf.set("hbase.zookeeper.quorum", config.getHbaseZookeeperQuorum());
        conf.set("hbase.zookeeper.property.clientPort", config.getZookeeperClientPort());
        conf.set("hbase.client.ipc.pool.size", "1");
        //  RPC fail retry times
        conf.set("hbase.client.retries.number", "3");

        conf.set("zookeeper.znode.parent", config.getZookeeperZnodeParent());

        // set this param a bigger value to avoid SocketTimeoutException when you invoke scanner.next()
        conf.set("hbase.client.scanner.timeout.period", "90000");

        if (config.getHbaseIsDistributed() != null) {
            conf.set("hbase.cluster.distributed", config.getHbaseIsDistributed());
        }
        long startTime = System.currentTimeMillis();
        connection = ConnectionFactory.createConnection(conf);

        if (System.currentTimeMillis() % SYSTEMOUT_INTERVAL == 0) {
            log.info("Create HBase connection " + (connection == null ? "succeed." : "failed.")
                    + ", used " + (System.currentTimeMillis() - startTime) + " mill sec");
        }

        return connection;
    } catch (Exception ex) {
        log.error(ex, ex.getMessage());
        return null;
    }
}
 
Example 10
Source File: InvalidListPruningDebugTool.java    From phoenix-tephra with Apache License 2.0 5 votes vote down vote up
/**
 * Initialize the Invalid List Debug Tool.
 * @param conf {@link Configuration}
 * @throws IOException when not able to create an HBase connection
 */
@Override
@SuppressWarnings("WeakerAccess")
public void initialize(final Configuration conf) throws IOException {
  LOG.debug("InvalidListPruningDebugMain : initialize method called");
  connection = ConnectionFactory.createConnection(conf);
  tableName = TableName.valueOf(conf.get(TxConstants.TransactionPruning.PRUNE_STATE_TABLE,
                                         TxConstants.TransactionPruning.DEFAULT_PRUNE_STATE_TABLE));
  dataJanitorState = new DataJanitorState(new DataJanitorState.TableSupplier() {
    @Override
    public Table get() throws IOException {
      return connection.getTable(tableName);
    }
  });
}
 
Example 11
Source File: HBaseConnection.java    From kylin-on-parquet-v2 with Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("resource")
public static Connection get(StorageURL url) {
    // find configuration
    Configuration conf = configCache.get(url);
    if (conf == null) {
        conf = newHBaseConfiguration(url);
        configCache.put(url, conf);
    }

    Connection connection = connPool.get(url);
    try {
        while (true) {
            // I don't use DCL since recreate a connection is not a big issue.
            if (connection == null || connection.isClosed()) {
                logger.info("connection is null or closed, creating a new one");
                connection = ConnectionFactory.createConnection(conf);
                connPool.put(url, connection);
            }

            if (connection == null || connection.isClosed()) {
                Thread.sleep(10000);// wait a while and retry
            } else {
                break;
            }
        }

    } catch (Throwable t) {
        logger.error("Error when open connection " + url, t);
        throw new RuntimeException("Error when open connection " + url, t);
    }

    return connection;
}
 
Example 12
Source File: HBaseAtlasHookIT.java    From atlas with Apache License 2.0 5 votes vote down vote up
@Test (enabled = false)
public void testCreateNamesapce() throws Exception {
    final Configuration conf = HBaseConfiguration.create();

    conf.set("hbase.zookeeper.quorum", "localhost");
    conf.set("hbase.zookeeper.property.clientPort", String.valueOf(port));
    conf.set("zookeeper.znode.parent", "/hbase-unsecure");

    Connection          conn  = ConnectionFactory.createConnection(conf);
    Admin               admin = conn.getAdmin();
    NamespaceDescriptor ns    = NamespaceDescriptor.create("test_namespace").build();

    admin.createNamespace(ns);

    //assert on qualified name
    String        nameSpace   = assertNameSpaceIsRegistered(ns.getName());
    AtlasClientV2 atlasClient = getAtlasClient();

    if (atlasClient != null) {
        AtlasEntityWithExtInfo nameSpaceRef           = atlasClient.getEntityByGuid(nameSpace);
        String                 nameSpaceQualifiedName = HBaseAtlasHook.getNameSpaceQualifiedName(CLUSTER_NAME, ns.getName());

        Assert.assertEquals(nameSpaceRef.getEntity().getAttribute(AtlasClient.REFERENCEABLE_ATTRIBUTE_NAME), nameSpaceQualifiedName);
    } else {
        Assert.fail("Unable to create AtlasClient for Testing");
    }
}
 
Example 13
Source File: HBaseCompat2_0.java    From atlas with Apache License 2.0 4 votes vote down vote up
@Override
public ConnectionMask createConnection(Configuration conf) throws IOException
{
    return new HConnection2_0(ConnectionFactory.createConnection(conf));
}
 
Example 14
Source File: HBaseAccessor.java    From pxf with Apache License 2.0 4 votes vote down vote up
/**
 * Load hbase table object using ConnectionFactory
 */
private void openTable() throws IOException {
    connection = ConnectionFactory.createConnection(HBaseConfiguration.create(configuration));
    table = connection.getTable(TableName.valueOf(context.getDataSource()));
}
 
Example 15
Source File: HBaseStoreClient.java    From warp10-platform with Apache License 2.0 4 votes vote down vote up
public HBaseStoreClient(KeyStore keystore, Properties properties) throws IOException {
  
  this.keystore = keystore;
  this.hbaseKey = keystore.getKey(KeyStore.AES_HBASE_DATA);
  this.properties = properties;
  
  if (properties.containsKey(io.warp10.continuum.Configuration.EGRESS_HBASE_DATA_BLOCKCACHE_GTS_THRESHOLD)) {
    this.blockcacheThreshold = Long.parseLong(properties.getProperty(io.warp10.continuum.Configuration.EGRESS_HBASE_DATA_BLOCKCACHE_GTS_THRESHOLD));
  } else {
    this.blockcacheThreshold = 0L;
  }
  
  this.useHBaseFilter = "true".equals(properties.getProperty(io.warp10.continuum.Configuration.EGRESS_HBASE_FILTER));
  
  this.hbaseFilterThreshold = Integer.parseInt(properties.getProperty(io.warp10.continuum.Configuration.EGRESS_HBASE_FILTER_THRESHOLD, Integer.toString(HBASE_FILTER_THRESHOLD_DEFAULT)));

  Configuration conf = new Configuration();
  conf.set(HConstants.ZOOKEEPER_QUORUM, properties.getProperty(io.warp10.continuum.Configuration.EGRESS_HBASE_DATA_ZKCONNECT));
  if (!"".equals(properties.getProperty(io.warp10.continuum.Configuration.EGRESS_HBASE_DATA_ZNODE))) {
    conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, properties.getProperty(io.warp10.continuum.Configuration.EGRESS_HBASE_DATA_ZNODE));
  }

  if (properties.containsKey(io.warp10.continuum.Configuration.EGRESS_HBASE_ZOOKEEPER_PROPERTY_CLIENTPORT)) {
    conf.set(HConstants.ZOOKEEPER_CLIENT_PORT, properties.getProperty(io.warp10.continuum.Configuration.EGRESS_HBASE_ZOOKEEPER_PROPERTY_CLIENTPORT));
  }

  if (properties.containsKey(io.warp10.continuum.Configuration.EGRESS_HBASE_CLIENT_IPC_POOL_SIZE)) {
    conf.set(HConstants.HBASE_CLIENT_IPC_POOL_SIZE, properties.getProperty(io.warp10.continuum.Configuration.EGRESS_HBASE_CLIENT_IPC_POOL_SIZE));
  }
  
  if (properties.containsKey(io.warp10.continuum.Configuration.EGRESS_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD)) {      
    conf.set(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, properties.getProperty(io.warp10.continuum.Configuration.EGRESS_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD));
  }
  
  if (properties.containsKey(io.warp10.continuum.Configuration.EGRESS_HBASE_RPC_TIMEOUT)) {
    conf.set(HConstants.HBASE_RPC_TIMEOUT_KEY, properties.getProperty(io.warp10.continuum.Configuration.EGRESS_HBASE_RPC_TIMEOUT));
  }
  
  if (properties.containsKey(io.warp10.continuum.Configuration.EGRESS_HBASE_CLIENT_MAX_PERSERVER_TASKS)) {
    conf.set(HConstants.HBASE_CLIENT_MAX_PERSERVER_TASKS, properties.getProperty(io.warp10.continuum.Configuration.EGRESS_HBASE_CLIENT_MAX_PERSERVER_TASKS));
  }

  if (properties.containsKey(io.warp10.continuum.Configuration.EGRESS_HBASE_CLIENT_MAX_PERREGION_TASKS)) {
    conf.set(HConstants.HBASE_CLIENT_MAX_PERREGION_TASKS, properties.getProperty(io.warp10.continuum.Configuration.EGRESS_HBASE_CLIENT_MAX_PERREGION_TASKS));
  }
  
  if (properties.containsKey(io.warp10.continuum.Configuration.EGRESS_HBASE_CLIENT_MAX_TOTAL_TASKS)) {
    conf.set(HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS, properties.getProperty(io.warp10.continuum.Configuration.EGRESS_HBASE_CLIENT_MAX_TOTAL_TASKS));
  }

  //
  // Handle additional HBase configurations
  //
  
  if (properties.containsKey(io.warp10.continuum.Configuration.EGRESS_HBASE_CONFIG)) {
    String[] keys = properties.getProperty(io.warp10.continuum.Configuration.EGRESS_HBASE_CONFIG).split(",");
    for (String key: keys) {
      if (!properties.containsKey("egress." + key.trim())) {
        throw new RuntimeException("Missing declared property 'egress." + key.trim() + "'.");
      }
      conf.set(key, properties.getProperty("egress." + key.trim()));
    }
  }

  this.conn = ConnectionFactory.createConnection(conf);
  this.tableName = TableName.valueOf(properties.getProperty(io.warp10.continuum.Configuration.EGRESS_HBASE_DATA_TABLE));
  
  //
  // Initialize HBaseRegionKeys
  //
  
  HBaseRegionKeys.getRegionKeys(conn, tableName);
  
  this.colfam = properties.getProperty(io.warp10.continuum.Configuration.EGRESS_HBASE_DATA_COLFAM).getBytes(StandardCharsets.UTF_8);
}
 
Example 16
Source File: RowCounterCLI.java    From kylin-on-parquet-v2 with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws IOException {

        if (args == null || args.length != 3) {
            logger.info(
                    "Usage: hbase org.apache.hadoop.util.RunJar kylin-job-latest.jar org.apache.kylin.job.tools.RowCounterCLI [HTABLE_NAME] [STARTKEY] [ENDKEY]");
            return; // if no enough arguments provided, return with above message
        }

        logger.info(args[0]);
        String htableName = args[0];
        logger.info(args[1]);
        byte[] startKey = BytesUtil.fromReadableText(args[1]);
        logger.info(args[2]);
        byte[] endKey = BytesUtil.fromReadableText(args[2]);

        if (startKey == null) {
            logger.info("startkey is null ");
        } else {
            logger.info("startkey lenght: {}", startKey.length);
        }
        if(logger.isInfoEnabled()){
            logger.info("start key in binary: {}", Bytes.toStringBinary(startKey));
            logger.info("end key in binary: {}", Bytes.toStringBinary(endKey));
        }

        Configuration conf = HBaseConnection.getCurrentHBaseConfiguration();

        Scan scan = new Scan();
        scan.setCaching(512);
        scan.setCacheBlocks(true);
        scan.setStartRow(startKey);
        scan.setStopRow(endKey);

        logger.info("My Scan {}", scan);
        try (Connection conn = ConnectionFactory.createConnection(conf);
                Table tableInterface = conn.getTable(TableName.valueOf(htableName))) {
            Iterator<Result> iterator = tableInterface.getScanner(scan).iterator();
            int counter = 0;
            while (iterator.hasNext()) {
                iterator.next();
                counter++;
                if (counter % 1000 == 1) {
                    logger.info("number of rows: {}", counter);
                }
            }
            logger.info("number of rows: {}", counter);
        }
    }
 
Example 17
Source File: PingHBaseCLI.java    From kylin-on-parquet-v2 with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws IOException {
    String hbaseTable = args[0];

    System.out.println("Hello friend.");

    Configuration hconf = HBaseConnection.getCurrentHBaseConfiguration();
    if (User.isHBaseSecurityEnabled(hconf)) {
        try {
            System.out.println("--------------Getting kerberos credential for user " + UserGroupInformation.getCurrentUser().getUserName());
            TokenUtil.obtainAndCacheToken(hconf, UserGroupInformation.getCurrentUser());
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
            System.out.println("--------------Error while getting kerberos credential for user " + UserGroupInformation.getCurrentUser().getUserName());
        }
    }

    Scan scan = new Scan();
    int limit = 20;

    Connection conn = null;
    Table table = null;
    ResultScanner scanner = null;
    try {
        conn = ConnectionFactory.createConnection(hconf);
        table = conn.getTable(TableName.valueOf(hbaseTable));
        scanner = table.getScanner(scan);
        int count = 0;
        for (Result r : scanner) {
            byte[] rowkey = r.getRow();
            System.out.println(Bytes.toStringBinary(rowkey));
            count++;
            if (count == limit)
                break;
        }
    } finally {
        IOUtils.closeQuietly(scanner);
        IOUtils.closeQuietly(table);
        IOUtils.closeQuietly(conn);
    }

}
 
Example 18
Source File: HBaseBridge.java    From atlas with Apache License 2.0 4 votes vote down vote up
public HBaseBridge(Configuration atlasConf, AtlasClientV2 atlasClientV2) throws Exception {
    this.atlasClientV2     = atlasClientV2;
    this.metadataNamespace = getMetadataNamespace(atlasConf);

    org.apache.hadoop.conf.Configuration conf = HBaseConfiguration.create();

    LOG.info("checking HBase availability..");

    HBaseAdmin.available(conf);

    LOG.info("HBase is available");

    Connection conn = ConnectionFactory.createConnection(conf);

    hbaseAdmin = conn.getAdmin();
}
 
Example 19
Source File: SimpleHfileToRmdbExporter.java    From super-cloudops with Apache License 2.0 4 votes vote down vote up
/**
 * Do hfile bulk exporting
 * 
 * @param builder
 * @throws Exception
 */
@SuppressWarnings({ "unchecked", "rawtypes" })
public static void doRmdbExporting(CommandLine line) throws Exception {
	// Configuration.
	String tabname = line.getOptionValue("tabname");
	String user = line.getOptionValue("user");
	Configuration conf = new Configuration();
	conf.set("hbase.zookeeper.quorum", line.getOptionValue("zkaddr"));
	conf.set("hbase.fs.tmp.dir", line.getOptionValue("T", DEFAULT_HBASE_MR_TMPDIR));
	conf.set(TableInputFormat.INPUT_TABLE, tabname);
	conf.set(TableInputFormat.SCAN_BATCHSIZE, line.getOptionValue("batchSize", DEFAULT_SCAN_BATCH_SIZE));

	// Check directory.
	String outputDir = line.getOptionValue("output", DEFAULT_HFILE_OUTPUT_DIR) + "/" + tabname;
	FileSystem fs = FileSystem.get(new URI(outputDir), new Configuration(), user);
	if (fs.exists(new Path(outputDir))) {
		fs.delete(new Path(outputDir), true);
	}

	// Set scan condition.(if necessary)
	HfileBulkExporter.setScanIfNecessary(conf, line);

	// Job.
	Connection conn = ConnectionFactory.createConnection(conf);
	TableName tab = TableName.valueOf(tabname);
	Job job = Job.getInstance(conf);
	job.setJobName(HfileBulkExporter.class.getSimpleName() + "@" + tab.getNameAsString());
	job.setJarByClass(HfileBulkExporter.class);
	job.setMapperClass((Class<Mapper>) ClassUtils.getClass(line.getOptionValue("mapperClass", DEFAULT_MAPPER_CLASS)));
	job.setInputFormatClass(TableInputFormat.class);
	job.setMapOutputKeyClass(ImmutableBytesWritable.class);
	job.setMapOutputValueClass(Put.class);

	HFileOutputFormat2.configureIncrementalLoad(job, conn.getTable(tab), conn.getRegionLocator(tab));
	FileOutputFormat.setOutputPath(job, new Path(outputDir));
	if (job.waitForCompletion(true)) {
		long total = job.getCounters().findCounter(DEFUALT_COUNTER_GROUP, DEFUALT_COUNTER_TOTAL).getValue();
		long processed = job.getCounters().findCounter(DEFUALT_COUNTER_GROUP, DEFUALT_COUNTER_PROCESSED).getValue();
		log.info(String.format("Exported to successfully! with processed:(%d)/total:(%d)", processed, total));
	}

}
 
Example 20
Source File: HBaseCommitTable.java    From phoenix-omid with Apache License 2.0 2 votes vote down vote up
/**
 * Create a hbase commit table.
 * Note that we do not take ownership of the passed htable, it is just used to construct the writer and client.
 * @throws IOException 
 */
@Inject
public HBaseCommitTable(Configuration hbaseConfig, HBaseCommitTableConfig config) throws IOException {
    this(ConnectionFactory.createConnection(hbaseConfig), config, KeyGeneratorImplementations.defaultKeyGenerator());
}