org.apache.hadoop.hbase.filter.SubstringComparator Java Examples
The following examples show how to use
org.apache.hadoop.hbase.filter.SubstringComparator.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: HBaseTest.java From xxhadoop with Apache License 2.0 | 6 votes |
@Test public void testScan() throws IOException { Connection connection = admin.getConnection(); Table table = connection.getTable(TableName.valueOf("tbl_girls")); Scan scan = new Scan(Bytes.toBytes("0001"), Bytes.toBytes("0004")); // RowKeyFilter Filter filter = new PrefixFilter(Bytes.toBytes("000")); scan.setFilter(filter); Filter filter2 = new RowFilter(CompareOp.EQUAL, new SubstringComparator("000")); scan.setFilter(filter2); //BinaryComparator binaryComparator = new BinaryComparator(Bytes.toBytes(29)); Filter filter3 = new SingleColumnValueFilter(Bytes.toBytes("base_info"), Bytes.toBytes("age"), CompareOp.GREATER, Bytes.toBytes(29)); scan.setFilter(filter3); ResultScanner resultScanner = table.getScanner(scan); for (Result result : resultScanner) { LOGGER.info(result.toString()); int value = Bytes.toInt(result.getValue(Bytes.toBytes("base_info"), Bytes.toBytes("age"))); LOGGER.info(String.valueOf(value)); } }
Example #2
Source File: RegionsMerger.java From hbase-operator-tools with Apache License 2.0 | 6 votes |
private List<RegionInfo> getOpenRegions(Connection connection, TableName table) throws Exception { List<RegionInfo> regions = new ArrayList<>(); Table metaTbl = connection.getTable(META_TABLE_NAME); String tblName = table.getNameAsString(); RowFilter rowFilter = new RowFilter(CompareOperator.EQUAL, new SubstringComparator(tblName+",")); SingleColumnValueFilter colFilter = new SingleColumnValueFilter(CATALOG_FAMILY, STATE_QUALIFIER, CompareOperator.EQUAL, Bytes.toBytes("OPEN")); Scan scan = new Scan(); FilterList filter = new FilterList(FilterList.Operator.MUST_PASS_ALL); filter.addFilter(rowFilter); filter.addFilter(colFilter); scan.setFilter(filter); try(ResultScanner rs = metaTbl.getScanner(scan)){ Result r; while ((r = rs.next()) != null) { RegionInfo region = RegionInfo.parseFrom(r.getValue(CATALOG_FAMILY, REGIONINFO_QUALIFIER)); regions.add(region); } } return regions; }
Example #3
Source File: ScannerModel.java From hbase with Apache License 2.0 | 6 votes |
public ByteArrayComparableModel( ByteArrayComparable comparator) { String typeName = comparator.getClass().getSimpleName(); ComparatorType type = ComparatorType.valueOf(typeName); this.type = typeName; switch (type) { case BinaryComparator: case BinaryPrefixComparator: this.value = Bytes.toString(Base64.getEncoder().encode(comparator.getValue())); break; case BitComparator: this.value = Bytes.toString(Base64.getEncoder().encode(comparator.getValue())); this.op = ((BitComparator)comparator).getOperator().toString(); break; case NullComparator: break; case RegexStringComparator: case SubstringComparator: this.value = Bytes.toString(comparator.getValue()); break; default: throw new RuntimeException("unhandled filter type: " + type); } }
Example #4
Source File: HBCK2.java From hbase-operator-tools with Apache License 2.0 | 5 votes |
int setRegionState(ClusterConnection connection, String region, RegionState.State newState) throws IOException { if (newState == null) { throw new IllegalArgumentException("State can't be null."); } RegionState.State currentState = null; Table table = connection.getTable(TableName.valueOf("hbase:meta")); RowFilter filter = new RowFilter(CompareOperator.EQUAL, new SubstringComparator(region)); Scan scan = new Scan(); scan.setFilter(filter); Result result = table.getScanner(scan).next(); if (result != null) { byte[] currentStateValue = result.getValue(HConstants.CATALOG_FAMILY, HConstants.STATE_QUALIFIER); if (currentStateValue == null) { System.out.println("WARN: Region state info on meta was NULL"); } else { currentState = RegionState.State.valueOf( org.apache.hadoop.hbase.util.Bytes.toString(currentStateValue)); } Put put = new Put(result.getRow()); put.addColumn(HConstants.CATALOG_FAMILY, HConstants.STATE_QUALIFIER, org.apache.hadoop.hbase.util.Bytes.toBytes(newState.name())); table.put(put); System.out.println("Changed region " + region + " STATE from " + currentState + " to " + newState); return EXIT_SUCCESS; } else { System.out.println("ERROR: Could not find region " + region + " in meta."); } return EXIT_FAILURE; }
Example #5
Source File: TestSnapshotFilter.java From phoenix-omid with Apache License 2.0 | 5 votes |
@Test(timeOut = 60_000) public void testServerSideSnapshotFiltering() throws Throwable { byte[] rowName1 = Bytes.toBytes("row1"); byte[] famName1 = Bytes.toBytes(TEST_FAMILY); byte[] colName1 = Bytes.toBytes("col1"); byte[] dataValue1 = Bytes.toBytes("testWrite-1"); byte[] dataValue2 = Bytes.toBytes("testWrite-2"); String TEST_TABLE = "testServerSideSnapshotFiltering"; createTableIfNotExists(TEST_TABLE, Bytes.toBytes(TEST_FAMILY)); TTable tt = new TTable(connection, TEST_TABLE); Transaction tx1 = tm.begin(); Put put1 = new Put(rowName1); put1.addColumn(famName1, colName1, dataValue1); tt.put(tx1, put1); tm.commit(tx1); Transaction tx2 = tm.begin(); Put put2 = new Put(rowName1); put2.addColumn(famName1, colName1, dataValue2); tt.put(tx2, put2); Transaction tx3 = tm.begin(); Get get = new Get(rowName1); // If snapshot filtering is not done in the server then the first value is // "testWrite-2" and the whole row will be filtered out. SingleColumnValueFilter filter = new SingleColumnValueFilter( famName1, colName1, CompareFilter.CompareOp.EQUAL, new SubstringComparator("testWrite-1")); get.setFilter(filter); Result results = tt.get(tx3, get); assertTrue(results.size() == 1); }
Example #6
Source File: MetaTableAccessor.java From hbase with Apache License 2.0 | 5 votes |
/** * Scans META table for a row whose key contains the specified <B>regionEncodedName</B>, returning * a single related <code>Result</code> instance if any row is found, null otherwise. * @param connection the connection to query META table. * @param regionEncodedName the region encoded name to look for at META. * @return <code>Result</code> instance with the row related info in META, null otherwise. * @throws IOException if any errors occur while querying META. */ public static Result scanByRegionEncodedName(Connection connection, String regionEncodedName) throws IOException { RowFilter rowFilter = new RowFilter(CompareOperator.EQUAL, new SubstringComparator(regionEncodedName)); Scan scan = getMetaScan(connection, 1); scan.setFilter(rowFilter); ResultScanner resultScanner = getMetaHTable(connection).getScanner(scan); return resultScanner.next(); }
Example #7
Source File: ViewTTLIT.java From phoenix with Apache License 2.0 | 5 votes |
private void assertViewHeaderRowsHaveViewTTLRelatedCells(String schemaName, long minTimestamp, boolean rawScan, int expectedRows) throws IOException, SQLException { FilterList filterList = new FilterList(FilterList.Operator.MUST_PASS_ALL); RowFilter schemaNameFilter = new RowFilter( CompareFilter.CompareOp.EQUAL, new SubstringComparator(schemaName) ); QualifierFilter viewTTLQualifierFilter = new QualifierFilter(CompareFilter.CompareOp.EQUAL, new BinaryComparator(PhoenixDatabaseMetaData.VIEW_TTL_BYTES)); filterList.addFilter(schemaNameFilter); filterList.addFilter(viewTTLQualifierFilter); try (Table tbl = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES) .getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES)) { Scan allRows = new Scan(); allRows.setRaw(rawScan); allRows.setTimeRange(minTimestamp, HConstants.LATEST_TIMESTAMP); allRows.setFilter(filterList); ResultScanner scanner = tbl.getScanner(allRows); int numMatchingRows = 0; for (Result result = scanner.next(); result != null; result = scanner.next()) { numMatchingRows += result.containsColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.VIEW_TTL_BYTES) ? 1 : 0; } assertEquals(String.format("Expected rows do not match for table = %s at timestamp %d", Bytes.toString(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES), minTimestamp), expectedRows, numMatchingRows); } }
Example #8
Source File: TestSnapshotFilter.java From phoenix-omid with Apache License 2.0 | 4 votes |
@Test(timeOut = 60_000) public void testServerSideSnapshotScannerFiltering() throws Throwable { byte[] rowName1 = Bytes.toBytes("row1"); byte[] famName1 = Bytes.toBytes(TEST_FAMILY); byte[] colName1 = Bytes.toBytes("col1"); byte[] dataValue1 = Bytes.toBytes("testWrite-1"); byte[] dataValue2 = Bytes.toBytes("testWrite-2"); String TEST_TABLE = "testServerSideSnapshotFiltering"; createTableIfNotExists(TEST_TABLE, Bytes.toBytes(TEST_FAMILY)); TTable tt = new TTable(connection, TEST_TABLE); Transaction tx1 = tm.begin(); Put put1 = new Put(rowName1); put1.addColumn(famName1, colName1, dataValue1); tt.put(tx1, put1); tm.commit(tx1); Transaction tx2 = tm.begin(); Put put2 = new Put(rowName1); put2.addColumn(famName1, colName1, dataValue2); // tt.put(tx2, put2); Transaction tx3 = tm.begin(); // If snapshot filtering is not done in the server then the first value is // "testWrite-2" and the whole row will be filtered out. SingleColumnValueFilter filter = new SingleColumnValueFilter( famName1, colName1, CompareFilter.CompareOp.EQUAL, new SubstringComparator("testWrite-1")); Scan scan = new Scan(); scan.setFilter(filter); ResultScanner iterableRS = tt.getScanner(tx3, scan); Result result = iterableRS.next(); assertTrue(result.size() == 1); }
Example #9
Source File: TestFromClientSide5.java From hbase with Apache License 2.0 | 4 votes |
/** * Test for HBASE-17125 */ @Test public void testReadWithFilter() throws Exception { final TableName tableName = name.getTableName(); try (Table table = TEST_UTIL.createTable(tableName, FAMILY, 3)) { byte[] VALUEA = Bytes.toBytes("value-a"); byte[] VALUEB = Bytes.toBytes("value-b"); long[] ts = {1000, 2000, 3000, 4000}; Put put = new Put(ROW); // Put version 1000,2000,3000,4000 of column FAMILY:QUALIFIER for (int t = 0; t <= 3; t++) { if (t <= 1) { put.addColumn(FAMILY, QUALIFIER, ts[t], VALUEA); } else { put.addColumn(FAMILY, QUALIFIER, ts[t], VALUEB); } } table.put(put); Scan scan = new Scan().setFilter(new ValueFilter(CompareOperator.EQUAL, new SubstringComparator("value-a"))) .readVersions(3); ResultScanner scanner = table.getScanner(scan); Result result = scanner.next(); // ts[0] has gone from user view. Only read ts[2] which value is less or equal to 3 assertNResult(result, ROW, FAMILY, QUALIFIER, new long[]{ts[1]}, new byte[][]{VALUEA}, 0, 0); Get get = new Get(ROW) .setFilter(new ValueFilter(CompareOperator.EQUAL, new SubstringComparator("value-a"))) .readVersions(3); result = table.get(get); // ts[0] has gone from user view. Only read ts[2] which value is less or equal to 3 assertNResult(result, ROW, FAMILY, QUALIFIER, new long[]{ts[1]}, new byte[][]{VALUEA}, 0, 0); // Test with max versions 1, it should still read ts[1] scan = new Scan().setFilter(new ValueFilter(CompareOperator.EQUAL, new SubstringComparator("value-a"))) .readVersions(1); scanner = table.getScanner(scan); result = scanner.next(); // ts[0] has gone from user view. Only read ts[2] which value is less or equal to 3 assertNResult(result, ROW, FAMILY, QUALIFIER, new long[]{ts[1]}, new byte[][]{VALUEA}, 0, 0); // Test with max versions 1, it should still read ts[1] get = new Get(ROW) .setFilter(new ValueFilter(CompareOperator.EQUAL, new SubstringComparator("value-a"))) .readVersions(1); result = table.get(get); // ts[0] has gone from user view. Only read ts[2] which value is less or equal to 3 assertNResult(result, ROW, FAMILY, QUALIFIER, new long[]{ts[1]}, new byte[][]{VALUEA}, 0, 0); // Test with max versions 5, it should still read ts[1] scan = new Scan().setFilter(new ValueFilter(CompareOperator.EQUAL, new SubstringComparator("value-a"))) .readVersions(5); scanner = table.getScanner(scan); result = scanner.next(); // ts[0] has gone from user view. Only read ts[2] which value is less or equal to 3 assertNResult(result, ROW, FAMILY, QUALIFIER, new long[]{ts[1]}, new byte[][]{VALUEA}, 0, 0); // Test with max versions 5, it should still read ts[1] get = new Get(ROW) .setFilter(new ValueFilter(CompareOperator.EQUAL, new SubstringComparator("value-a"))) .readVersions(5); result = table.get(get); // ts[0] has gone from user view. Only read ts[2] which value is less or equal to 3 assertNResult(result, ROW, FAMILY, QUALIFIER, new long[]{ts[1]}, new byte[][]{VALUEA}, 0, 0); } }
Example #10
Source File: ProcessRecordService.java From hraven with Apache License 2.0 | 4 votes |
/** * @param cluster for which to return the last ProcessRecord. * @param compareOp to apply to the processState argument. If * {@link CompareOp#NO_OP} is passed, then no filter is used at all, * and processState argument is ignored. * @param processState return rows where the compareOp applies. * @param maxCount the maximum number of results to return. * @param processFileSubstring return rows where the process file path * contains this string. If <code>null</code> or empty string, then * no filtering is applied. * @return the last process record that is not in {@link ProcessState#CREATED} * state. Note that no records with a maxModificationTime of 0 * (beginning of time) will be returned * @throws IOException */ public List<ProcessRecord> getProcessRecords(String cluster, CompareOp compareOp, ProcessState processState, int maxCount, String processFileSubstring) throws IOException { Scan scan = new Scan(); // Pull data only for our cluster scan.setStartRow( keyConv.toBytes(new ProcessRecordKey(cluster, Long.MAX_VALUE))); // Records are sorted in reverse order, so the last one for this cluster // would be the one with a modification time at the beginning of time. scan.setStopRow(keyConv.toBytes(new ProcessRecordKey(cluster, 0))); scan.addColumn(Constants.INFO_FAM_BYTES, Constants.MIN_MOD_TIME_MILLIS_COLUMN_BYTES); scan.addColumn(Constants.INFO_FAM_BYTES, Constants.PROCESSED_JOB_FILES_COLUMN_BYTES); scan.addColumn(Constants.INFO_FAM_BYTES, Constants.PROCESS_FILE_COLUMN_BYTES); scan.addColumn(Constants.INFO_FAM_BYTES, Constants.PROCESSING_STATE_COLUMN_BYTES); scan.addColumn(Constants.INFO_FAM_BYTES, Constants.MIN_JOB_ID_COLUMN_BYTES); scan.addColumn(Constants.INFO_FAM_BYTES, Constants.MAX_JOB_ID_COLUMN_BYTES); scan.setMaxVersions(1); FilterList filterList = new FilterList(FilterList.Operator.MUST_PASS_ALL); // Filter on process state only when needed. if (!NO_OP.equals(compareOp)) { byte[] filterColumnValue = Bytes.toBytes(processState.getCode()); Filter processingStatefilter = new SingleColumnValueFilter( Constants.INFO_FAM_BYTES, Constants.PROCESSING_STATE_COLUMN_BYTES, compareOp, filterColumnValue); filterList.addFilter(processingStatefilter); } // Filter on process file only when needed if (processFileSubstring != null && processFileSubstring.length() > 0) { SubstringComparator ssc = new SubstringComparator(processFileSubstring); Filter processFileFilter = new SingleColumnValueFilter(Constants.INFO_FAM_BYTES, Constants.PROCESS_FILE_COLUMN_BYTES, EQUAL, ssc); filterList.addFilter(processFileFilter); } // Add filters only if any filter was actually needed. if (filterList.getFilters().size() > 0) { scan.setFilter(filterList); } ResultScanner scanner = null; List<ProcessRecord> records = null; Table processRecordTable = null; try { processRecordTable = hbaseConnection .getTable(TableName.valueOf(Constants.JOB_FILE_PROCESS_TABLE)); scanner = processRecordTable.getScanner(scan); records = createFromResults(scanner, maxCount); } finally { if (scanner != null) { scanner.close(); } if (processRecordTable != null) { processRecordTable.close(); } } return records; }