Java Code Examples for org.apache.hadoop.hbase.client.Scan#setTimeRange()

The following examples show how to use org.apache.hadoop.hbase.client.Scan#setTimeRange() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: PcapGetterHBaseImpl.java    From opensoc-streaming with Apache License 2.0 6 votes vote down vote up
/**
 * Sets the time range on scan.
 * 
 * @param scan
 *          the scan
 * @param startTime
 *          the start time
 * @param endTime
 *          the end time
 * @throws IOException
 *           Signals that an I/O exception has occurred.
 */
private void setTimeRangeOnScan(Scan scan, long startTime, long endTime)
    throws IOException {
  boolean setTimeRange = true;
  if (startTime < 0 && endTime < 0) {
    setTimeRange = false;
  }
  if (setTimeRange) {
    if (startTime < 0) {
      startTime = 0;
    } else {
      startTime = PcapHelper.convertToDataCreationTimeUnit(startTime);
    }
    if (endTime < 0) {
      endTime = Long.MAX_VALUE;
    } else {
      endTime = PcapHelper.convertToDataCreationTimeUnit(endTime);
    }
    Assert.isTrue(startTime < endTime,
        "startTime value must be less than endTime value");
    scan.setTimeRange(startTime, endTime);
  }
}
 
Example 2
Source File: HashTable.java    From hbase with Apache License 2.0 6 votes vote down vote up
Scan initScan() throws IOException {
  Scan scan = new Scan();
  scan.setCacheBlocks(false);
  if (startTime != 0 || endTime != 0) {
    scan.setTimeRange(startTime, endTime == 0 ? HConstants.LATEST_TIMESTAMP : endTime);
  }
  if (scanBatch > 0) {
    scan.setBatch(scanBatch);
  }
  if (versions >= 0) {
    scan.readVersions(versions);
  }
  if (!isTableStartRow(startRow)) {
    scan.withStartRow(startRow);
  }
  if (!isTableEndRow(stopRow)) {
    scan.withStopRow(stopRow);
  }
  if(families != null) {
    for(String fam : families.split(",")) {
      scan.addFamily(Bytes.toBytes(fam));
    }
  }
  return scan;
}
 
Example 3
Source File: BaseScannerRegionObserver.java    From phoenix with Apache License 2.0 6 votes vote down vote up
@Override
public void preScannerOpen(org.apache.hadoop.hbase.coprocessor.ObserverContext<RegionCoprocessorEnvironment> c,
        Scan scan) throws IOException {
    byte[] txnScn = scan.getAttribute(TX_SCN);
    if (txnScn!=null) {
        TimeRange timeRange = scan.getTimeRange();
        scan.setTimeRange(timeRange.getMin(), Bytes.toLong(txnScn));
    }
    if (isRegionObserverFor(scan)) {
        // For local indexes, we need to throw if out of region as we'll get inconsistent
        // results otherwise while in other cases, it may just mean out client-side data
        // on region boundaries is out of date and can safely be ignored.
        if (!skipRegionBoundaryCheck(scan) || ScanUtil.isLocalIndex(scan)) {
            throwIfScanOutOfRegion(scan, c.getEnvironment().getRegion());
        }
        // Muck with the start/stop row of the scan and set as reversed at the
        // last possible moment. You need to swap the start/stop and make the
        // start exclusive and the stop inclusive.
        ScanUtil.setupReverseScan(scan);
    }
}
 
Example 4
Source File: TestRegionCoprocessorHost.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Test
public void testPreStoreScannerOpen() throws IOException {

  RegionCoprocessorHost host = new RegionCoprocessorHost(region, rsServices, conf);
  Scan scan = new Scan();
  scan.setTimeRange(TimeRange.INITIAL_MIN_TIMESTAMP, TimeRange.INITIAL_MAX_TIMESTAMP);
  assertTrue("Scan is not for all time", scan.getTimeRange().isAllTime());
  //SimpleRegionObserver is set to update the ScanInfo parameters if the passed-in scan
  //is for all time. this lets us exercise both that the Scan is wired up properly in the coproc
  //and that we can customize the metadata

  ScanInfo oldScanInfo = getScanInfo();

  HStore store = mock(HStore.class);
  when(store.getScanInfo()).thenReturn(oldScanInfo);
  ScanInfo newScanInfo = host.preStoreScannerOpen(store, scan);

  verifyScanInfo(newScanInfo);
}
 
Example 5
Source File: IndexTestingUtils.java    From phoenix with Apache License 2.0 6 votes vote down vote up
/**
 * Verify the state of the index table between the given key and time ranges against the list of
 * expected keyvalues.
 * @throws IOException
 */
@SuppressWarnings({ "javadoc", "deprecation" })
public static void verifyIndexTableAtTimestamp(HTable index1, List<KeyValue> expected,
    long start, long end, byte[] startKey, byte[] endKey) throws IOException {
  LOG.debug("Scanning " + Bytes.toString(index1.getTableName()) + " between times (" + start
      + ", " + end + "] and keys: [" + Bytes.toString(startKey) + ", " + Bytes.toString(endKey)
      + "].");
  Scan s = new Scan(startKey, endKey);
  // s.setRaw(true);
  s.setMaxVersions();
  s.setTimeRange(start, end);
  List<KeyValue> received = new ArrayList<KeyValue>();
  ResultScanner scanner = index1.getScanner(s);
  for (Result r : scanner) {
    received.addAll(r.list());
    LOG.debug("Received: " + r.list());
  }
  scanner.close();
  assertEquals("Didn't get the expected kvs from the index table!", expected, received);
}
 
Example 6
Source File: TransactionProcessor.java    From phoenix-tephra with Apache License 2.0 5 votes vote down vote up
@Override
public void preScannerOpen(
    org.apache.hadoop.hbase.coprocessor.ObserverContext<RegionCoprocessorEnvironment> c,
    Scan scan) throws IOException {
  Transaction tx = getFromOperation(scan);
  if (tx != null) {
    projectFamilyDeletes(scan);
    scan.setMaxVersions();
    scan.setTimeRange(TxUtils.getOldestVisibleTimestamp(ttlByFamily, tx, readNonTxnData),
      TxUtils.getMaxVisibleTimestamp(tx));
    Filter newFilter = getTransactionFilter(tx, ScanType.USER_SCAN, scan.getFilter());
    scan.setFilter(newFilter);
  }
}
 
Example 7
Source File: RowCounter.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * Sets up the actual job.
 *
 * @param conf  The current configuration.
 * @return The newly created job.
 * @throws IOException When setting up the job fails.
 */
public Job createSubmittableJob(Configuration conf) throws IOException {
  Job job = Job.getInstance(conf, conf.get(JOB_NAME_CONF_KEY, NAME + "_" + tableName));
  job.setJarByClass(RowCounter.class);
  Scan scan = new Scan();
  scan.setCacheBlocks(false);
  setScanFilter(scan, rowRangeList);

  for (String columnName : this.columns) {
    String family = StringUtils.substringBefore(columnName, ":");
    String qualifier = StringUtils.substringAfter(columnName, ":");
    if (StringUtils.isBlank(qualifier)) {
      scan.addFamily(Bytes.toBytes(family));
    } else {
      scan.addColumn(Bytes.toBytes(family), Bytes.toBytes(qualifier));
    }
  }

  if(this.expectedCount >= 0) {
    conf.setLong(EXPECTED_COUNT_KEY, this.expectedCount);
  }

  scan.setTimeRange(startTime, endTime);
  job.setOutputFormatClass(NullOutputFormat.class);
  TableMapReduceUtil.initTableMapperJob(tableName, scan,
    RowCounterMapper.class, ImmutableBytesWritable.class, Result.class, job);
  job.setNumReduceTasks(0);
  return job;
}
 
Example 8
Source File: TransactionProcessor.java    From phoenix-tephra with Apache License 2.0 5 votes vote down vote up
@Override
public RegionScanner preScannerOpen(ObserverContext<RegionCoprocessorEnvironment> e, Scan scan, RegionScanner s)
  throws IOException {
  Transaction tx = getFromOperation(scan);
  if (tx != null) {
    projectFamilyDeletes(scan);
    scan.setMaxVersions();
    scan.setTimeRange(TxUtils.getOldestVisibleTimestamp(ttlByFamily, tx, readNonTxnData),
                      TxUtils.getMaxVisibleTimestamp(tx));
    Filter newFilter = getTransactionFilter(tx, ScanType.USER_SCAN, scan.getFilter());
    scan.setFilter(newFilter);
  }
  return s;
}
 
Example 9
Source File: TransactionProcessor.java    From phoenix-tephra with Apache License 2.0 5 votes vote down vote up
@Override
public RegionScanner preScannerOpen(ObserverContext<RegionCoprocessorEnvironment> e, Scan scan, RegionScanner s)
  throws IOException {
  Transaction tx = getFromOperation(scan);
  if (tx != null) {
    projectFamilyDeletes(scan);
    scan.setMaxVersions();
    scan.setTimeRange(TxUtils.getOldestVisibleTimestamp(ttlByFamily, tx, readNonTxnData),
                      TxUtils.getMaxVisibleTimestamp(tx));
    Filter newFilter = getTransactionFilter(tx, ScanType.USER_SCAN, scan.getFilter());
    scan.setFilter(newFilter);
  }
  return s;
}
 
Example 10
Source File: HbaseServiceImpl.java    From searchanalytics-bigdata with MIT License 5 votes vote down vote up
@Override
public int numberOfTimesAFacetFilterClickedInLastAnHour(final String columnName, final String columnValue) {
	Scan scan = new Scan();
	scan.addColumn(HbaseJsonEventSerializer.COLUMFAMILY_FILTERS_BYTES,
			Bytes.toBytes(columnName));
	Filter filter = new SingleColumnValueFilter(HbaseJsonEventSerializer.COLUMFAMILY_FILTERS_BYTES,
			Bytes.toBytes(columnName), CompareOp.EQUAL, Bytes.toBytes(columnValue));
	scan.setFilter(filter);
	DateTime dateTime = new DateTime();
	try {
		scan.setTimeRange(dateTime.minusHours(1).getMillis(), dateTime.getMillis());
	} catch (IOException e) {
		throw new RuntimeException(e);
	}
	int count =
	hbaseTemplate.find("searchclicks", scan, new RowMapper<String>() {
		@Override
		public String mapRow(Result result, int rowNum) throws Exception {
			byte[] value = result.getValue(
					HbaseJsonEventSerializer.COLUMFAMILY_FILTERS_BYTES,
					Bytes.toBytes(columnName));
			if (value != null) {
				String facetValue = new String(value);
				LOG.debug("Facet field: {} and Facet Value: {}",
						new Object[] { columnName, facetValue });
			}
			return null;
		}
	}).size();

	LOG.debug("Checking numberOfTimesAFacetFilterClickedInLastAnHour done with count:{}", count);
	return count;
}
 
Example 11
Source File: MetaDataUtil.java    From phoenix with Apache License 2.0 5 votes vote down vote up
public static Scan newTableRowsScan(byte[] key, long startTimeStamp, long stopTimeStamp)
        throws IOException {
    Scan scan = new Scan();
    scan.setTimeRange(startTimeStamp, stopTimeStamp);
    scan.setStartRow(key);
    byte[] stopKey = ByteUtil.concat(key, QueryConstants.SEPARATOR_BYTE_ARRAY);
    ByteUtil.nextKey(stopKey, stopKey.length);
    scan.setStopRow(stopKey);
    return scan;
}
 
Example 12
Source File: TransactionProcessor.java    From phoenix-tephra with Apache License 2.0 5 votes vote down vote up
@Override
public RegionScanner preScannerOpen(ObserverContext<RegionCoprocessorEnvironment> e, Scan scan, RegionScanner s)
  throws IOException {
  Transaction tx = getFromOperation(scan);
  if (tx != null) {
    projectFamilyDeletes(scan);
    scan.setMaxVersions();
    scan.setTimeRange(TxUtils.getOldestVisibleTimestamp(ttlByFamily, tx, readNonTxnData),
                      TxUtils.getMaxVisibleTimestamp(tx));
    Filter newFilter = getTransactionFilter(tx, ScanType.USER_SCAN, scan.getFilter());
    scan.setFilter(newFilter);
  }
  return s;
}
 
Example 13
Source File: TimestampTestBase.java    From hbase with Apache License 2.0 5 votes vote down vote up
public static int assertScanContentTimestamp(final Table in, final long ts)
  throws IOException {
    Scan scan = new Scan().withStartRow(HConstants.EMPTY_START_ROW);
    scan.addFamily(FAMILY_NAME);
    scan.setTimeRange(0, ts);
    ResultScanner scanner = in.getScanner(scan);
    int count = 0;
    try {
      // TODO FIX
//      HStoreKey key = new HStoreKey();
//      TreeMap<byte [], Cell>value =
//        new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
//      while (scanner.next(key, value)) {
//        assertTrue(key.getTimestamp() <= ts);
//        // Content matches the key or HConstants.LATEST_TIMESTAMP.
//        // (Key does not match content if we 'put' with LATEST_TIMESTAMP).
//        long l = Bytes.toLong(value.get(COLUMN).getValue());
//        assertTrue(key.getTimestamp() == l ||
//          HConstants.LATEST_TIMESTAMP == l);
//        count++;
//        value.clear();
//      }
    } finally {
      scanner.close();
    }
    return count;
  }
 
Example 14
Source File: RowCounter.java    From hbase with Apache License 2.0 4 votes vote down vote up
/**
 * Sets up the actual job.
 *
 * @param conf  The current configuration.
 * @param args  The command line parameters.
 * @return The newly created job.
 * @throws IOException When setting up the job fails.
 * @deprecated as of release 2.3.0. Will be removed on 4.0.0. Please use main method instead.
 */
@Deprecated
public static Job createSubmittableJob(Configuration conf, String[] args)
  throws IOException {
  String tableName = args[0];
  List<MultiRowRangeFilter.RowRange> rowRangeList = null;
  long startTime = 0;
  long endTime = 0;

  StringBuilder sb = new StringBuilder();

  final String rangeSwitch = "--range=";
  final String startTimeArgKey = "--starttime=";
  final String endTimeArgKey = "--endtime=";
  final String expectedCountArg = "--expected-count=";

  // First argument is table name, starting from second
  for (int i = 1; i < args.length; i++) {
    if (args[i].startsWith(rangeSwitch)) {
      try {
        rowRangeList = parseRowRangeParameter(
          args[i].substring(args[1].indexOf(rangeSwitch)+rangeSwitch.length()));
      } catch (IllegalArgumentException e) {
        return null;
      }
      continue;
    }
    if (args[i].startsWith(startTimeArgKey)) {
      startTime = Long.parseLong(args[i].substring(startTimeArgKey.length()));
      continue;
    }
    if (args[i].startsWith(endTimeArgKey)) {
      endTime = Long.parseLong(args[i].substring(endTimeArgKey.length()));
      continue;
    }
    if (args[i].startsWith(expectedCountArg)) {
      conf.setLong(EXPECTED_COUNT_KEY,
        Long.parseLong(args[i].substring(expectedCountArg.length())));
      continue;
    }
    // if no switch, assume column names
    sb.append(args[i]);
    sb.append(" ");
  }
  if (endTime < startTime) {
    printUsage("--endtime=" + endTime + " needs to be greater than --starttime=" + startTime);
    return null;
  }

  Job job = Job.getInstance(conf, conf.get(JOB_NAME_CONF_KEY, NAME + "_" + tableName));
  job.setJarByClass(RowCounter.class);
  Scan scan = new Scan();
  scan.setCacheBlocks(false);
  setScanFilter(scan, rowRangeList);
  if (sb.length() > 0) {
    for (String columnName : sb.toString().trim().split(" ")) {
      String family = StringUtils.substringBefore(columnName, ":");
      String qualifier = StringUtils.substringAfter(columnName, ":");

      if (StringUtils.isBlank(qualifier)) {
        scan.addFamily(Bytes.toBytes(family));
      }
      else {
        scan.addColumn(Bytes.toBytes(family), Bytes.toBytes(qualifier));
      }
    }
  }
  scan.setTimeRange(startTime, endTime == 0 ? HConstants.LATEST_TIMESTAMP : endTime);
  job.setOutputFormatClass(NullOutputFormat.class);
  TableMapReduceUtil.initTableMapperJob(tableName, scan,
    RowCounterMapper.class, ImmutableBytesWritable.class, Result.class, job);
  job.setNumReduceTasks(0);
  return job;
}
 
Example 15
Source File: TransactionProcessorTest.java    From phoenix-tephra with Apache License 2.0 4 votes vote down vote up
@Test
public void testPreExistingData() throws Exception {
  String tableName = "TestPreExistingData";
  byte[] familyBytes = Bytes.toBytes("f");
  long ttlMillis = TimeUnit.DAYS.toMillis(14);
  HRegion region = createRegion(tableName, familyBytes, ttlMillis);
  try {
    region.initialize();

    // timestamps for pre-existing, non-transactional data
    long now = txVisibilityState.getVisibilityUpperBound() / TxConstants.MAX_TX_PER_MS;
    long older = now - ttlMillis / 2;
    long newer = now - ttlMillis / 3;
    // timestamps for transactional data
    long nowTx = txVisibilityState.getVisibilityUpperBound();
    long olderTx = nowTx - (ttlMillis / 2) * TxConstants.MAX_TX_PER_MS;
    long newerTx = nowTx - (ttlMillis / 3) * TxConstants.MAX_TX_PER_MS;

    Map<byte[], Long> ttls = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
    ttls.put(familyBytes, ttlMillis);

    List<Cell> cells = new ArrayList<>();
    cells.add(new KeyValue(Bytes.toBytes("r1"), familyBytes, Bytes.toBytes("c1"), older, Bytes.toBytes("v11")));
    cells.add(new KeyValue(Bytes.toBytes("r1"), familyBytes, Bytes.toBytes("c2"), newer, Bytes.toBytes("v12")));
    cells.add(new KeyValue(Bytes.toBytes("r2"), familyBytes, Bytes.toBytes("c1"), older, Bytes.toBytes("v21")));
    cells.add(new KeyValue(Bytes.toBytes("r2"), familyBytes, Bytes.toBytes("c2"), newer, Bytes.toBytes("v22")));
    cells.add(new KeyValue(Bytes.toBytes("r3"), familyBytes, Bytes.toBytes("c1"), olderTx, Bytes.toBytes("v31")));
    cells.add(new KeyValue(Bytes.toBytes("r3"), familyBytes, Bytes.toBytes("c2"), newerTx, Bytes.toBytes("v32")));

    // Write non-transactional and transactional data
    for (Cell c : cells) {
      region.put(new Put(CellUtil.cloneRow(c)).addColumn(CellUtil.cloneFamily(c), CellUtil.cloneQualifier(c),
                        c.getTimestamp(), CellUtil.cloneValue(c)));
    }

    Scan rawScan = new Scan();
    rawScan.setMaxVersions();

    Transaction dummyTransaction = TxUtils.createDummyTransaction(txVisibilityState);
    Scan txScan = new Scan();
    txScan.setMaxVersions();
    txScan.setTimeRange(TxUtils.getOldestVisibleTimestamp(ttls, dummyTransaction, true),
                        TxUtils.getMaxVisibleTimestamp(dummyTransaction));
    txScan.setFilter(TransactionFilters.getVisibilityFilter(dummyTransaction, ttls, false, ScanType.USER_SCAN));

    // read all back with raw scanner
    scanAndAssert(region, cells, rawScan);

    // read all back with transaction filter
    scanAndAssert(region, cells, txScan);

    // force a flush to clear the memstore
    region.flushcache(true, false, new FlushLifeCycleTracker() { });
    scanAndAssert(region, cells, txScan);

    // force a major compaction to remove any expired cells
    region.compact(true);
    scanAndAssert(region, cells, txScan);

    // Reduce TTL, this should make cells with timestamps older and olderTx expire
    long newTtl = ttlMillis / 2 - 1;
    region = updateTtl(region, familyBytes, newTtl);
    ttls.put(familyBytes, newTtl);
    txScan.setTimeRange(TxUtils.getOldestVisibleTimestamp(ttls, dummyTransaction, true),
                        TxUtils.getMaxVisibleTimestamp(dummyTransaction));
    txScan.setFilter(TransactionFilters.getVisibilityFilter(dummyTransaction, ttls, false, ScanType.USER_SCAN));

    // Raw scan should still give all cells
    scanAndAssert(region, cells, rawScan);
    // However, tx scan should not return expired cells
    scanAndAssert(region, select(cells, 1, 3, 5), txScan);

    region.flushcache(true, false, new FlushLifeCycleTracker() { });
    scanAndAssert(region, cells, rawScan);

    // force a major compaction to remove any expired cells
    region.compact(true);
    // This time raw scan too should not return expired cells, as they would be dropped during major compaction
    scanAndAssert(region, select(cells, 1, 3, 5), rawScan);

    // Reduce TTL again to 1 ms, this should expire all cells
    newTtl = 1;
    region = updateTtl(region, familyBytes, newTtl);
    ttls.put(familyBytes, newTtl);
    txScan.setTimeRange(TxUtils.getOldestVisibleTimestamp(ttls, dummyTransaction, true),
                        TxUtils.getMaxVisibleTimestamp(dummyTransaction));
    txScan.setFilter(TransactionFilters.getVisibilityFilter(dummyTransaction, ttls, false, ScanType.USER_SCAN));

    // force a major compaction to remove expired cells
    region.compact(true);
    // This time raw scan should not return any cells, as all cells have expired.
    scanAndAssert(region, Collections.<Cell>emptyList(), rawScan);
  } finally {
    region.close();
  }
}
 
Example 16
Source File: OmidTransactionTable.java    From phoenix with Apache License 2.0 4 votes vote down vote up
@Override
public ResultScanner getScanner(Scan scan) throws IOException {
    scan.setTimeRange(0, Long.MAX_VALUE);
    return tTable.getScanner(tx, scan);
}
 
Example 17
Source File: ThriftUtilities.java    From hbase with Apache License 2.0 4 votes vote down vote up
public static Scan scanFromThrift(TScan in) throws IOException {
  Scan out = new Scan();

  if (in.isSetStartRow()) {
    out.withStartRow(in.getStartRow());
  }
  if (in.isSetStopRow()) {
    out.withStopRow(in.getStopRow());
  }
  if (in.isSetCaching()) {
    out.setCaching(in.getCaching());
  }
  if (in.isSetMaxVersions()) {
    out.readVersions(in.getMaxVersions());
  }

  if (in.isSetColumns()) {
    for (TColumn column : in.getColumns()) {
      if (column.isSetQualifier()) {
        out.addColumn(column.getFamily(), column.getQualifier());
      } else {
        out.addFamily(column.getFamily());
      }
    }
  }

  TTimeRange timeRange = in.getTimeRange();
  if (timeRange != null &&
      timeRange.isSetMinStamp() && timeRange.isSetMaxStamp()) {
    out.setTimeRange(timeRange.getMinStamp(), timeRange.getMaxStamp());
  }

  if (in.isSetBatchSize()) {
    out.setBatch(in.getBatchSize());
  }

  if (in.isSetFilterString()) {
    ParseFilter parseFilter = new ParseFilter();
    out.setFilter(parseFilter.parseFilterString(in.getFilterString()));
  }

  if (in.isSetAttributes()) {
    addAttributes(out,in.getAttributes());
  }

  if (in.isSetAuthorizations()) {
    out.setAuthorizations(new Authorizations(in.getAuthorizations().getLabels()));
  }

  if (in.isSetReversed()) {
    out.setReversed(in.isReversed());
  }

  if (in.isSetCacheBlocks()) {
    out.setCacheBlocks(in.isCacheBlocks());
  }

  if (in.isSetColFamTimeRangeMap()) {
    Map<ByteBuffer, TTimeRange> colFamTimeRangeMap = in.getColFamTimeRangeMap();
    if (MapUtils.isNotEmpty(colFamTimeRangeMap)) {
      for (Map.Entry<ByteBuffer, TTimeRange> entry : colFamTimeRangeMap.entrySet()) {
        out.setColumnFamilyTimeRange(Bytes.toBytes(entry.getKey()),
            entry.getValue().getMinStamp(), entry.getValue().getMaxStamp());
      }
    }
  }

  if (in.isSetReadType()) {
    out.setReadType(readTypeFromThrift(in.getReadType()));
  }

  if (in.isSetLimit()) {
    out.setLimit(in.getLimit());
  }

  if (in.isSetConsistency()) {
    out.setConsistency(consistencyFromThrift(in.getConsistency()));
  }

  if (in.isSetTargetReplicaId()) {
    out.setReplicaId(in.getTargetReplicaId());
  }

  if (in.isSetFilterBytes()) {
    out.setFilter(filterFromThrift(in.getFilterBytes()));
  }

  return out;
}
 
Example 18
Source File: QueryDatabaseMetaDataTest.java    From phoenix with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
@Test
public void testCreateDropTable() throws Exception {
    long ts = nextTimestamp();
    String tenantId = getOrganizationId();
    initATableValues(tenantId, getDefaultSplits(tenantId), null, ts);
    
    ensureTableCreated(getUrl(), BTABLE_NAME, null, ts-2);
    ensureTableCreated(getUrl(), PTSDB_NAME, null, ts-2);
    
    Properties props = new Properties();
    props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 5));
    Connection conn5 = DriverManager.getConnection(PHOENIX_JDBC_URL, props);
    String query = "SELECT a_string FROM aTable";
    // Data should still be there b/c we only dropped the schema
    props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 8));
    assertTrue(conn5.prepareStatement(query).executeQuery().next());
    conn5.createStatement().executeUpdate("DROP TABLE " + ATABLE_NAME);
    
    // Confirm that data is no longer there because we dropped the table
    // This needs to be done natively b/c the metadata is gone
    HTableInterface htable = conn5.unwrap(PhoenixConnection.class).getQueryServices().getTable(SchemaUtil.getTableNameAsBytes(ATABLE_SCHEMA_NAME, ATABLE_NAME));
    Scan scan = new Scan();
    scan.setFilter(new FirstKeyOnlyFilter());
    scan.setTimeRange(0, ts+9);
    assertNull(htable.getScanner(scan).next());
    conn5.close();

    // Still should work b/c we're at an earlier timestamp than when table was deleted
    props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 2));
    Connection conn2 = DriverManager.getConnection(PHOENIX_JDBC_URL, props);
    assertTrue(conn2.prepareStatement(query).executeQuery().next());
    conn2.close();
    
    props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 10));
    Connection conn10 = DriverManager.getConnection(PHOENIX_JDBC_URL, props);
    try {
        conn10.prepareStatement(query).executeQuery().next();
        fail();
    } catch (TableNotFoundException e) {
    }
}
 
Example 19
Source File: QueryDatabaseMetaDataIT.java    From phoenix with Apache License 2.0 4 votes vote down vote up
@Test
public void testCreateDropTable() throws Exception {
    long ts = nextTimestamp();
    String tenantId = getOrganizationId();
    initATableValues(tenantId, getDefaultSplits(tenantId), null, ts);
    
    ensureTableCreated(getUrl(), BTABLE_NAME, null, ts-2);
    ensureTableCreated(getUrl(), PTSDB_NAME, null, ts-2);
    
    Properties props = new Properties();
    props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 5));
    Connection conn5 = DriverManager.getConnection(getUrl(), props);
    String query = "SELECT a_string FROM aTable";
    // Data should still be there b/c we only dropped the schema
    props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 8));
    assertTrue(conn5.prepareStatement(query).executeQuery().next());
    conn5.createStatement().executeUpdate("DROP TABLE " + ATABLE_NAME);
    
    // Confirm that data is no longer there because we dropped the table
    // This needs to be done natively b/c the metadata is gone
    HTableInterface htable = conn5.unwrap(PhoenixConnection.class).getQueryServices().getTable(SchemaUtil.getTableNameAsBytes(ATABLE_SCHEMA_NAME, ATABLE_NAME));
    Scan scan = new Scan();
    scan.setFilter(new FirstKeyOnlyFilter());
    scan.setTimeRange(0, ts+9);
    assertNull(htable.getScanner(scan).next());
    conn5.close();

    // Still should work b/c we're at an earlier timestamp than when table was deleted
    props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 2));
    Connection conn2 = DriverManager.getConnection(getUrl(), props);
    assertTrue(conn2.prepareStatement(query).executeQuery().next());
    conn2.close();
    
    props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 10));
    Connection conn10 = DriverManager.getConnection(getUrl(), props);
    try {
        conn10.prepareStatement(query).executeQuery().next();
        fail();
    } catch (TableNotFoundException e) {
    }
}
 
Example 20
Source File: TestKeepDeletes.java    From hbase with Apache License 2.0 4 votes vote down vote up
/**
 * basic verification of existing behavior
 */
@Test
public void testWithoutKeepingDeletes() throws Exception {
  // KEEP_DELETED_CELLS is NOT enabled
  HTableDescriptor htd = hbu.createTableDescriptor(TableName.valueOf(name.getMethodName()), 0, 3,
      HConstants.FOREVER, KeepDeletedCells.FALSE);
  HRegion region = hbu.createLocalHRegion(htd, null, null);

  long ts = EnvironmentEdgeManager.currentTime();
  Put p = new Put(T1, ts);
  p.addColumn(c0, c0, T1);
  region.put(p);

  Get gOne = new Get(T1);
  gOne.readAllVersions();
  gOne.setTimeRange(0L, ts + 1);
  Result rOne = region.get(gOne);
  assertFalse(rOne.isEmpty());


  Delete d = new Delete(T1, ts+2);
  d.addColumn(c0, c0, ts);
  region.delete(d);

  // "past" get does not see rows behind delete marker
  Get g = new Get(T1);
  g.readAllVersions();
  g.setTimeRange(0L, ts+1);
  Result r = region.get(g);
  assertTrue(r.isEmpty());

  // "past" scan does not see rows behind delete marker
  Scan s = new Scan();
  s.readAllVersions();
  s.setTimeRange(0L, ts+1);
  InternalScanner scanner = region.getScanner(s);
  List<Cell> kvs = new ArrayList<>();
  while (scanner.next(kvs)) {
    continue;
  }
  assertTrue(kvs.isEmpty());

  // flushing and minor compaction keep delete markers
  region.flush(true);
  region.compact(false);
  assertEquals(1, countDeleteMarkers(region));
  region.compact(true);
  // major compaction deleted it
  assertEquals(0, countDeleteMarkers(region));

  HBaseTestingUtility.closeRegionAndWAL(region);
}