Java Code Examples for org.apache.hadoop.hbase.client.Scan.setTimeRange()

The following are Jave code examples for showing how to use setTimeRange() of the org.apache.hadoop.hbase.client.Scan class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
Example 1
Project: ditb   File: CellCounter.java   Source Code and License Vote up 6 votes
private static Scan getConfiguredScanForJob(Configuration conf, String[] args) throws IOException {
  Scan s = new Scan();
  // Set Scan Versions
  s.setMaxVersions(Integer.MAX_VALUE);
  s.setCacheBlocks(false);
  // Set Scan Column Family
  if (conf.get(TableInputFormat.SCAN_COLUMN_FAMILY) != null) {
    s.addFamily(Bytes.toBytes(conf.get(TableInputFormat.SCAN_COLUMN_FAMILY)));
  }
  // Set RowFilter or Prefix Filter if applicable.
  Filter rowFilter = getRowFilter(args);
  if (rowFilter!= null) {
    LOG.info("Setting Row Filter for counter.");
    s.setFilter(rowFilter);
  }
  // Set TimeRange if defined
  long timeRange[] = getTimeRange(args);
  if (timeRange != null) {
    LOG.info("Setting TimeRange for counter.");
    s.setTimeRange(timeRange[0], timeRange[1]);
  }
  return s;
}
 
Example 2
Project: ditb   File: HashTable.java   Source Code and License Vote up 6 votes
Scan initScan() throws IOException {
  Scan scan = new Scan();
  scan.setCacheBlocks(false);
  if (startTime != 0 || endTime != 0) {
    scan.setTimeRange(startTime, endTime == 0 ? HConstants.LATEST_TIMESTAMP : endTime);
  }
  if (scanBatch > 0) {
    scan.setBatch(scanBatch);
  }
  if (versions >= 0) {
    scan.setMaxVersions(versions);
  }
  if (!isTableStartRow(startRow)) {
    scan.setStartRow(startRow);
  }
  if (!isTableEndRow(stopRow)) {
    scan.setStopRow(stopRow);
  }
  if(families != null) {
    for(String fam : families.split(",")) {
      scan.addFamily(Bytes.toBytes(fam));
    }
  }
  return scan;
}
 
Example 3
Project: ditb   File: ThriftServerRunner.java   Source Code and License Vote up 5 votes
@Override
public int scannerOpenTs(ByteBuffer tableName, ByteBuffer startRow,
    List<ByteBuffer> columns, long timestamp,
    Map<ByteBuffer, ByteBuffer> attributes) throws IOError, TException {

  Table table = null;
  try {
    table = getTable(tableName);
    Scan scan = new Scan(getBytes(startRow));
    addAttributes(scan, attributes);
    scan.setTimeRange(0, timestamp);
    if (columns != null && columns.size() != 0) {
      for (ByteBuffer column : columns) {
        byte [][] famQf = KeyValue.parseColumn(getBytes(column));
        if(famQf.length == 1) {
          scan.addFamily(famQf[0]);
        } else {
          scan.addColumn(famQf[0], famQf[1]);
        }
      }
    }
    return addScanner(table.getScanner(scan), false);
  } catch (IOException e) {
    LOG.warn(e.getMessage(), e);
    throw new IOError(Throwables.getStackTraceAsString(e));
  } finally{
    closeTable(table);
  }
}
 
Example 4
Project: ditb   File: ThriftServerRunner.java   Source Code and License Vote up 5 votes
@Override
public int scannerOpenWithStopTs(ByteBuffer tableName, ByteBuffer startRow,
    ByteBuffer stopRow, List<ByteBuffer> columns, long timestamp,
    Map<ByteBuffer, ByteBuffer> attributes)
    throws IOError, TException {

  Table table = null;
  try {
    table = getTable(tableName);
    Scan scan = new Scan(getBytes(startRow), getBytes(stopRow));
    addAttributes(scan, attributes);
    scan.setTimeRange(0, timestamp);
    if (columns != null && columns.size() != 0) {
      for (ByteBuffer column : columns) {
        byte [][] famQf = KeyValue.parseColumn(getBytes(column));
        if(famQf.length == 1) {
          scan.addFamily(famQf[0]);
        } else {
          scan.addColumn(famQf[0], famQf[1]);
        }
      }
    }
    scan.setTimeRange(0, timestamp);
    return addScanner(table.getScanner(scan), false);
  } catch (IOException e) {
    LOG.warn(e.getMessage(), e);
    throw new IOError(Throwables.getStackTraceAsString(e));
  } finally{
    closeTable(table);
  }
}
 
Example 5
Project: ditb   File: VerifyReplication.java   Source Code and License Vote up 5 votes
/**
 * Sets up the actual job.
 *
 * @param conf  The current configuration.
 * @param args  The command line parameters.
 * @return The newly created job.
 * @throws java.io.IOException When setting up the job fails.
 */
public static Job createSubmittableJob(Configuration conf, String[] args)
throws IOException {
  if (!doCommandLine(args)) {
    return null;
  }
  if (!conf.getBoolean(HConstants.REPLICATION_ENABLE_KEY,
      HConstants.REPLICATION_ENABLE_DEFAULT)) {
    throw new IOException("Replication needs to be enabled to verify it.");
  }
  conf.set(NAME+".peerId", peerId);
  conf.set(NAME+".tableName", tableName);
  conf.setLong(NAME+".startTime", startTime);
  conf.setLong(NAME+".endTime", endTime);
  if (families != null) {
    conf.set(NAME+".families", families);
  }

  Pair<ReplicationPeerConfig, Configuration> peerConfigPair = getPeerQuorumConfig(conf);
  ReplicationPeerConfig peerConfig = peerConfigPair.getFirst();
  String peerQuorumAddress = peerConfig.getClusterKey();
  LOG.info("Peer Quorum Address: " + peerQuorumAddress + ", Peer Configuration: " +
      peerConfig.getConfiguration());
  conf.set(NAME + ".peerQuorumAddress", peerQuorumAddress);
  HBaseConfiguration.setWithPrefix(conf, PEER_CONFIG_PREFIX,
      peerConfig.getConfiguration().entrySet());

  conf.setInt(NAME + ".versions", versions);
  LOG.info("Number of version: " + versions);

  Job job = new Job(conf, NAME + "_" + tableName);
  job.setJarByClass(VerifyReplication.class);

  Scan scan = new Scan();
  scan.setTimeRange(startTime, endTime);
  if (versions >= 0) {
    scan.setMaxVersions(versions);
    LOG.info("Number of versions set to " + versions);
  }
  if(families != null) {
    String[] fams = families.split(",");
    for(String fam : fams) {
      scan.addFamily(Bytes.toBytes(fam));
    }
  }
  TableMapReduceUtil.initTableMapperJob(tableName, scan,
      Verifier.class, null, null, job);

  Configuration peerClusterConf = peerConfigPair.getSecond();
  // Obtain the auth token from peer cluster
  TableMapReduceUtil.initCredentialsForCluster(job, peerClusterConf);

  job.setOutputFormatClass(NullOutputFormat.class);
  job.setNumReduceTasks(0);
  return job;
}
 
Example 6
Project: ditb   File: HBaseTestCase.java   Source Code and License Vote up 5 votes
public ScannerIncommon getScanner(byte [] family, byte [][] qualifiers,
  byte [] firstRow, long ts)
throws IOException {
  Scan scan = new Scan(firstRow);
  if(qualifiers == null || qualifiers.length == 0) {
    scan.addFamily(family);
  } else {
    for(int i=0; i<qualifiers.length; i++){
      scan.addColumn(HConstants.CATALOG_FAMILY, qualifiers[i]);
    }
  }
  scan.setTimeRange(0, ts);
  return new
    InternalScannerIncommon(region.getScanner(scan));
}
 
Example 7
Project: ditb   File: HBaseTestCase.java   Source Code and License Vote up 5 votes
public ScannerIncommon getScanner(byte [] family, byte [][] qualifiers,
    byte [] firstRow, long ts)
  throws IOException {
  Scan scan = new Scan(firstRow);
  if(qualifiers == null || qualifiers.length == 0) {
    scan.addFamily(family);
  } else {
    for(int i=0; i<qualifiers.length; i++){
      scan.addColumn(HConstants.CATALOG_FAMILY, qualifiers[i]);
    }
  }
  scan.setTimeRange(0, ts);
  return new
    ClientScannerIncommon(table.getScanner(scan));
}
 
Example 8
Project: ditb   File: GSScannerCaching.java   Source Code and License Vote up 5 votes
/**
 * scan all index tables, common rowkeys will be saved in rowkeySet
 * can be optimized in 2 ways:
 * 1. scan index tables by the order of #CandidateRowkeys, similar to CCIndex
 * 2. scan index tables in parallel
 *
 * @throws IOException
 */
public static Queue<byte[]> createRowkeyQueueBySecondaryIndex(Connection conn,
    IndexTableRelation relation, Map<byte[], NavigableSet<byte[]>> familyMap,
    ScanRange.ScanRangeList rangeList) throws IOException {
  TreeSet<byte[]> rowkeySet = null;
  for (ScanRange range : rangeList.getRanges()) {
    Scan scan = new Scan();
    scan.setStartRow(range.getStart());
    scan.setStopRow(range.getStop());
    scan.setFamilyMap(familyMap);
    if (range.getStartTs() != -1 && range.getStopTs() != -1) {
      scan.setTimeRange(range.getStartTs(), range.getStopTs());
    }
    TableName tableName = relation.getIndexTableName(range.getFamily(), range.getQualifier());
    Table table = conn.getTable(tableName);
    ResultScanner scanner = table.getScanner(scan);
    Result res;
    TreeSet<byte[]> candidateSet = new TreeSet<>(Bytes.BYTES_COMPARATOR);
    while ((res = scanner.next()) != null) {
      candidateSet.add(IndexPutParser.parseIndexRowKey(res.getRow())[0]);
    }
    System.out.println("get " + candidateSet.size() + " candidate rowkeys from " + range);
    if (rowkeySet == null) {
      rowkeySet = candidateSet;
    } else {
      rowkeySet = getCommonSet(rowkeySet, candidateSet);
    }
    System.out.println("common key set size " + rowkeySet.size() + " after " + range);
    if (rowkeySet.isEmpty()) { // no commons keys at all, can ignore the rest index tables
      break;
    }
  }
  if (rowkeySet != null && !rowkeySet.isEmpty()) {
    Queue<byte[]> rowkeyQueue = new LinkedList<>();
    for (byte[] rowkey : rowkeySet)
      rowkeyQueue.add(rowkey);
    return rowkeyQueue;
  }
  return null;
}
 
Example 9
Project: ditb   File: ThriftUtilities.java   Source Code and License Vote up 4 votes
public static Scan scanFromThrift(TScan in) throws IOException {
  Scan out = new Scan();

  if (in.isSetStartRow())
    out.setStartRow(in.getStartRow());
  if (in.isSetStopRow())
    out.setStopRow(in.getStopRow());
  if (in.isSetCaching())
    out.setCaching(in.getCaching());
  if (in.isSetMaxVersions()) {
    out.setMaxVersions(in.getMaxVersions());
  }

  if (in.isSetColumns()) {
    for (TColumn column : in.getColumns()) {
      if (column.isSetQualifier()) {
        out.addColumn(column.getFamily(), column.getQualifier());
      } else {
        out.addFamily(column.getFamily());
      }
    }
  }

  TTimeRange timeRange = in.getTimeRange();
  if (timeRange != null &&
      timeRange.isSetMinStamp() && timeRange.isSetMaxStamp()) {
    out.setTimeRange(timeRange.getMinStamp(), timeRange.getMaxStamp());
  }

  if (in.isSetBatchSize()) {
    out.setBatch(in.getBatchSize());
  }

  if (in.isSetFilterString()) {
    ParseFilter parseFilter = new ParseFilter();
    out.setFilter(parseFilter.parseFilterString(in.getFilterString()));
  }

  if (in.isSetAttributes()) {
    addAttributes(out,in.getAttributes());
  }
  
  if (in.isSetAuthorizations()) {
    out.setAuthorizations(new Authorizations(in.getAuthorizations().getLabels()));
  }

  if (in.isSetReversed()) {
    out.setReversed(in.isReversed());
  }

  return out;
}
 
Example 10
Project: ditb   File: ThriftServerRunner.java   Source Code and License Vote up 4 votes
@Override
public int scannerOpenWithScan(ByteBuffer tableName, TScan tScan,
    Map<ByteBuffer, ByteBuffer> attributes)
    throws IOError {

  Table table = null;
  try {
    table = getTable(tableName);
    Scan scan = new Scan();
    addAttributes(scan, attributes);
    if (tScan.isSetStartRow()) {
      scan.setStartRow(tScan.getStartRow());
    }
    if (tScan.isSetStopRow()) {
      scan.setStopRow(tScan.getStopRow());
    }
    if (tScan.isSetTimestamp()) {
      scan.setTimeRange(0, tScan.getTimestamp());
    }
    if (tScan.isSetCaching()) {
      scan.setCaching(tScan.getCaching());
    }
    if (tScan.isSetBatchSize()) {
      scan.setBatch(tScan.getBatchSize());
    }
    if (tScan.isSetColumns() && tScan.getColumns().size() != 0) {
      for(ByteBuffer column : tScan.getColumns()) {
        byte [][] famQf = KeyValue.parseColumn(getBytes(column));
        if(famQf.length == 1) {
          scan.addFamily(famQf[0]);
        } else {
          scan.addColumn(famQf[0], famQf[1]);
        }
      }
    }
    if (tScan.isSetFilterString()) {
      ParseFilter parseFilter = new ParseFilter();
      scan.setFilter(
          parseFilter.parseFilterString(tScan.getFilterString()));
    }
    if (tScan.isSetReversed()) {
      scan.setReversed(tScan.isReversed());
    }
    return addScanner(table.getScanner(scan), tScan.sortColumns);
  } catch (IOException e) {
    LOG.warn(e.getMessage(), e);
    throw new IOError(Throwables.getStackTraceAsString(e));
  } finally{
    closeTable(table);
  }
}
 
Example 11
Project: ditb   File: VerifyReplication.java   Source Code and License Vote up 4 votes
/**
 * Map method that compares every scanned row with the equivalent from
 * a distant cluster.
 * @param row  The current table row key.
 * @param value  The columns.
 * @param context  The current context.
 * @throws IOException When something is broken with the data.
 */
@Override
public void map(ImmutableBytesWritable row, final Result value,
                Context context)
    throws IOException {
  if (replicatedScanner == null) {
    Configuration conf = context.getConfiguration();
    final Scan scan = new Scan();
    scan.setCaching(conf.getInt(TableInputFormat.SCAN_CACHEDROWS, 1));
    long startTime = conf.getLong(NAME + ".startTime", 0);
    long endTime = conf.getLong(NAME + ".endTime", Long.MAX_VALUE);
    String families = conf.get(NAME + ".families", null);
    if(families != null) {
      String[] fams = families.split(",");
      for(String fam : fams) {
        scan.addFamily(Bytes.toBytes(fam));
      }
    }
    scan.setTimeRange(startTime, endTime);
    int versions = conf.getInt(NAME+".versions", -1);
    LOG.info("Setting number of version inside map as: " + versions);
    if (versions >= 0) {
      scan.setMaxVersions(versions);
    }

    final TableSplit tableSplit = (TableSplit)(context.getInputSplit());
    HConnectionManager.execute(new HConnectable<Void>(conf) {
      @Override
      public Void connect(HConnection conn) throws IOException {
        String zkClusterKey = conf.get(NAME + ".peerQuorumAddress");
        Configuration peerConf = HBaseConfiguration.createClusterConf(conf,
            zkClusterKey, PEER_CONFIG_PREFIX);

        TableName tableName = TableName.valueOf(conf.get(NAME + ".tableName"));
        replicatedTable = new HTable(peerConf, tableName);
        scan.setStartRow(value.getRow());
        scan.setStopRow(tableSplit.getEndRow());
        replicatedScanner = replicatedTable.getScanner(scan);
        return null;
      }
    });
    currentCompareRowInPeerTable = replicatedScanner.next();
  }
  while (true) {
    if (currentCompareRowInPeerTable == null) {
      // reach the region end of peer table, row only in source table
      logFailRowAndIncreaseCounter(context, Counters.ONLY_IN_SOURCE_TABLE_ROWS, value);
      break;
    }
    int rowCmpRet = Bytes.compareTo(value.getRow(), currentCompareRowInPeerTable.getRow());
    if (rowCmpRet == 0) {
      // rowkey is same, need to compare the content of the row
      try {
        Result.compareResults(value, currentCompareRowInPeerTable);
        context.getCounter(Counters.GOODROWS).increment(1);
      } catch (Exception e) {
        logFailRowAndIncreaseCounter(context, Counters.CONTENT_DIFFERENT_ROWS, value);
        LOG.error("Exception while comparing row : " + e);
      }
      currentCompareRowInPeerTable = replicatedScanner.next();
      break;
    } else if (rowCmpRet < 0) {
      // row only exists in source table
      logFailRowAndIncreaseCounter(context, Counters.ONLY_IN_SOURCE_TABLE_ROWS, value);
      break;
    } else {
      // row only exists in peer table
      logFailRowAndIncreaseCounter(context, Counters.ONLY_IN_PEER_TABLE_ROWS,
        currentCompareRowInPeerTable);
      currentCompareRowInPeerTable = replicatedScanner.next();
    }
  }
}
 
Example 12
Project: ditb   File: Export.java   Source Code and License Vote up 4 votes
private static Scan getConfiguredScanForJob(Configuration conf, String[] args) throws IOException {
  Scan s = new Scan();
  // Optional arguments.
  // Set Scan Versions
  int versions = args.length > 2? Integer.parseInt(args[2]): 1;
  s.setMaxVersions(versions);
  // Set Scan Range
  long startTime = args.length > 3? Long.parseLong(args[3]): 0L;
  long endTime = args.length > 4? Long.parseLong(args[4]): Long.MAX_VALUE;
  s.setTimeRange(startTime, endTime);
  // Set cache blocks
  s.setCacheBlocks(false);
  // set Start and Stop row
  if (conf.get(TableInputFormat.SCAN_ROW_START) != null) {
    s.setStartRow(Bytes.toBytes(conf.get(TableInputFormat.SCAN_ROW_START)));
  }
  if (conf.get(TableInputFormat.SCAN_ROW_STOP) != null) {
    s.setStopRow(Bytes.toBytes(conf.get(TableInputFormat.SCAN_ROW_STOP)));
  }
  // Set Scan Column Family
  boolean raw = Boolean.parseBoolean(conf.get(RAW_SCAN));
  if (raw) {
    s.setRaw(raw);
  }
  
  if (conf.get(TableInputFormat.SCAN_COLUMN_FAMILY) != null) {
    s.addFamily(Bytes.toBytes(conf.get(TableInputFormat.SCAN_COLUMN_FAMILY)));
  }
  // Set RowFilter or Prefix Filter if applicable.
  Filter exportFilter = getExportFilter(args);
  if (exportFilter!= null) {
      LOG.info("Setting Scan Filter for Export.");
    s.setFilter(exportFilter);
  }

  int batching = conf.getInt(EXPORT_BATCHING, -1);
  if (batching !=  -1){
    try {
      s.setBatch(batching);
    } catch (IncompatibleFilterException e) {
      LOG.error("Batching could not be set", e);
    }
  }
  LOG.info("versions=" + versions + ", starttime=" + startTime +
    ", endtime=" + endTime + ", keepDeletedCells=" + raw);
  return s;
}
 
Example 13
Project: ditb   File: TestKeepDeletes.java   Source Code and License Vote up 4 votes
/**
 * basic verification of existing behavior
 */
@Test
public void testWithoutKeepingDeletes() throws Exception {
  // KEEP_DELETED_CELLS is NOT enabled
  HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 0, 3,
      HConstants.FOREVER, KeepDeletedCells.FALSE);
  HRegion region = hbu.createLocalHRegion(htd, null, null);

  long ts = EnvironmentEdgeManager.currentTime();
  Put p = new Put(T1, ts);
  p.add(c0, c0, T1);
  region.put(p);

  Get gOne = new Get(T1);
  gOne.setMaxVersions();
  gOne.setTimeRange(0L, ts + 1);
  Result rOne = region.get(gOne);
  assertFalse(rOne.isEmpty());


  Delete d = new Delete(T1, ts+2);
  d.deleteColumn(c0, c0, ts);
  region.delete(d);

  // "past" get does not see rows behind delete marker
  Get g = new Get(T1);
  g.setMaxVersions();
  g.setTimeRange(0L, ts+1);
  Result r = region.get(g);
  assertTrue(r.isEmpty());

  // "past" scan does not see rows behind delete marker
  Scan s = new Scan();
  s.setMaxVersions();
  s.setTimeRange(0L, ts+1);
  InternalScanner scanner = region.getScanner(s);
  List<Cell> kvs = new ArrayList<Cell>();
  while (scanner.next(kvs))
    ;
  assertTrue(kvs.isEmpty());

  // flushing and minor compaction keep delete markers
  region.flush(true);
  region.compact(false);
  assertEquals(1, countDeleteMarkers(region));
  region.compact(true);
  // major compaction deleted it
  assertEquals(0, countDeleteMarkers(region));

  HRegion.closeHRegion(region);
}
 
Example 14
Project: ditb   File: GSScanner.java   Source Code and License Vote up 4 votes
/**
 * scan all index tables, common rowkeys will be saved in rowkeySet
 * can be optimized in 2 ways:
 * 1. scan index tables by the order of #CandidateRowkeys, similar to CCIndex
 * 2. scan index tables in parallel
 *
 * @throws IOException
 */
public static Queue<byte[]> createRowkeyQueueBySecondaryIndex(Connection conn,
    IndexTableRelation relation, Map<byte[], NavigableSet<byte[]>> familyMap,
    ScanRange.ScanRangeList rangeList, Scan rawScan) throws IOException {
  TreeSet<byte[]> rowkeySet = null;
  long timeToMerge = 0;
  for (ScanRange range : rangeList.getRanges()) {
    Scan scan = new Scan();
    scan.setStartRow(range.getStart());
    scan.setStopRow(range.getStop());
    scan.setFamilyMap(familyMap);
    scan.setCaching(rawScan.getCaching());
    scan.setCacheBlocks(rawScan.getCacheBlocks());
    scan.setId(rawScan.getId());
    if (range.getStartTs() != -1 && range.getStopTs() != -1) {
      scan.setTimeRange(range.getStartTs(), range.getStopTs());
    }
    TableName tableName = relation.getIndexTableName(range.getFamily(), range.getQualifier());
    Table table = conn.getTable(tableName);
    ResultScanner scanner = table.getScanner(scan);
    Result res;
    long timeStart = System.currentTimeMillis();
    TreeSet<byte[]> candidateSet = new TreeSet<>(Bytes.BYTES_COMPARATOR);
    while ((res = scanner.next()) != null) {
      candidateSet.add(IndexPutParser.parseIndexRowKey(res.getRow())[0]);
    }
    System.out.println(String
        .format("get %d candidate rowkeys from %s in scan %s, cost %.2f seconds",
            candidateSet.size(), range.toString(), scan.getId(),
            (System.currentTimeMillis() - timeStart) / 1000.0));
    if (rowkeySet == null) {
      rowkeySet = candidateSet;
    } else {
      timeStart = System.currentTimeMillis();
      rowkeySet = getCommonSet(rowkeySet, candidateSet);
      timeToMerge += (System.currentTimeMillis() - timeStart);
    }
    System.out.println(
        "common key set size " + rowkeySet.size() + " after " + range + " in scan " + scan
            .getId());
    if (rowkeySet.isEmpty()) { // no commons keys at all, can ignore the rest index tables
      break;
    }
  }
  System.out.println(String
      .format("get %d result rowkeys in scan %s, cost %.2f seconds", rowkeySet.size(),
          rawScan.getId(), timeToMerge / 1000.0));
  if (rowkeySet != null && !rowkeySet.isEmpty()) {
    Queue<byte[]> rowkeyQueue = new LinkedList<>();
    for (byte[] rowkey : rowkeySet)
      rowkeyQueue.add(rowkey);
    return rowkeyQueue;
  }
  return null;
}