Java Code Examples for org.apache.hadoop.hbase.client.Scan#setRaw()

The following examples show how to use org.apache.hadoop.hbase.client.Scan#setRaw() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestUtil.java    From phoenix with Apache License 2.0 6 votes vote down vote up
public static int getRowCount(Table table, boolean isRaw) throws IOException {
    Scan s = new Scan();
    s.setRaw(isRaw);;
    s.setMaxVersions();
    int rows = 0;
    try (ResultScanner scanner = table.getScanner(s)) {
        Result result = null;
        while ((result = scanner.next()) != null) {
            rows++;
            CellScanner cellScanner = result.cellScanner();
            Cell current = null;
            while (cellScanner.advance()) {
                current = cellScanner.current();
            }
        }
    }
    return rows;
}
 
Example 2
Source File: TestUtil.java    From phoenix with Apache License 2.0 6 votes vote down vote up
public static void dumpTable(Table table) throws IOException {
    System.out.println("************ dumping " + table + " **************");
    Scan s = new Scan();
    s.setRaw(true);;
    s.setMaxVersions();
    try (ResultScanner scanner = table.getScanner(s)) {
        Result result = null;
        while ((result = scanner.next()) != null) {
            CellScanner cellScanner = result.cellScanner();
            Cell current = null;
            while (cellScanner.advance()) {
                current = cellScanner.current();
                System.out.println(current);
            }
        }
    }
    System.out.println("-----------------------------------------------");
}
 
Example 3
Source File: TestUtil.java    From phoenix with Apache License 2.0 6 votes vote down vote up
public static void dumpIndexStatus(Connection conn, String indexName) throws IOException, SQLException {
    try (Table table = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES)) { 
        System.out.println("************ dumping index status for " + indexName + " **************");
        Scan s = new Scan();
        s.setRaw(true);
        s.setMaxVersions();
        byte[] startRow = SchemaUtil.getTableKeyFromFullName(indexName);
        s.setStartRow(startRow);
        s.setStopRow(ByteUtil.nextKey(ByteUtil.concat(startRow, QueryConstants.SEPARATOR_BYTE_ARRAY)));
        try (ResultScanner scanner = table.getScanner(s)) {
            Result result = null;
            while ((result = scanner.next()) != null) {
                CellScanner cellScanner = result.cellScanner();
                Cell current = null;
                while (cellScanner.advance()) {
                    current = cellScanner.current();
                    if (Bytes.compareTo(current.getQualifierArray(), current.getQualifierOffset(), current.getQualifierLength(), PhoenixDatabaseMetaData.INDEX_STATE_BYTES, 0, PhoenixDatabaseMetaData.INDEX_STATE_BYTES.length) == 0) {
                        System.out.println(current.getTimestamp() + "/INDEX_STATE=" + PIndexState.fromSerializedValue(current.getValueArray()[current.getValueOffset()]));
                    }
                }
            }
        }
        System.out.println("-----------------------------------------------");
}
}
 
Example 4
Source File: MetaDataEndpointImpl.java    From phoenix with Apache License 2.0 6 votes vote down vote up
private PTable buildDeletedTable(byte[] key, ImmutableBytesPtr cacheKey, HRegion region,
    long clientTimeStamp) throws IOException {
    if (clientTimeStamp == HConstants.LATEST_TIMESTAMP) {
        return null;
    }

    Scan scan = MetaDataUtil.newTableRowsScan(key, clientTimeStamp, HConstants.LATEST_TIMESTAMP);
    scan.setFilter(new FirstKeyOnlyFilter());
    scan.setRaw(true);
    List<Cell> results = Lists.<Cell> newArrayList();
    try (RegionScanner scanner = region.getScanner(scan);) {
      scanner.next(results);
    }
    // HBase ignores the time range on a raw scan (HBASE-7362)
    if (!results.isEmpty() && results.get(0).getTimestamp() > clientTimeStamp) {
        Cell kv = results.get(0);
        if (kv.getTypeByte() == Type.Delete.getCode()) {
            Cache<ImmutableBytesPtr, PTable> metaDataCache =
                    GlobalCache.getInstance(this.env).getMetaDataCache();
            PTable table = newDeletedTableMarker(kv.getTimestamp());
            metaDataCache.put(cacheKey, table);
            return table;
        }
    }
    return null;
}
 
Example 5
Source File: TestKeepDeletes.java    From hbase with Apache License 2.0 6 votes vote down vote up
/**
 * The ExplicitColumnTracker does not support "raw" scanning.
 */
@Test
public void testRawScanWithColumns() throws Exception {
  HTableDescriptor htd = hbu.createTableDescriptor(TableName.valueOf(name.getMethodName()), 0, 3,
      HConstants.FOREVER, KeepDeletedCells.TRUE);
  Region region = hbu.createLocalHRegion(htd, null, null);

  Scan s = new Scan();
  s.setRaw(true);
  s.readAllVersions();
  s.addColumn(c0, c0);

  try {
    region.getScanner(s);
    fail("raw scanner with columns should have failed");
  } catch (org.apache.hadoop.hbase.DoNotRetryIOException dnre) {
    // ok!
  }

  HBaseTestingUtility.closeRegionAndWAL(region);
}
 
Example 6
Source File: TestKeepDeletes.java    From hbase with Apache License 2.0 6 votes vote down vote up
private int countDeleteMarkers(HRegion region) throws IOException {
  Scan s = new Scan();
  s.setRaw(true);
  // use max versions from the store(s)
  s.readVersions(region.getStores().iterator().next().getScanInfo().getMaxVersions());
  InternalScanner scan = region.getScanner(s);
  List<Cell> kvs = new ArrayList<>();
  int res = 0;
  boolean hasMore;
  do {
    hasMore = scan.next(kvs);
    for (Cell kv : kvs) {
      if(CellUtil.isDelete(kv)) {
        res++;
      }
    }
    kvs.clear();
  } while (hasMore);
  scan.close();
  return res;
}
 
Example 7
Source File: TestUtil.java    From phoenix with Apache License 2.0 6 votes vote down vote up
public static CellCount getCellCount(Table table, boolean isRaw) throws IOException {
    Scan s = new Scan();
    s.setRaw(isRaw);;
    s.setMaxVersions();

    CellCount cellCount = new CellCount();
    try (ResultScanner scanner = table.getScanner(s)) {
        Result result = null;
        while ((result = scanner.next()) != null) {
            CellScanner cellScanner = result.cellScanner();
            Cell current = null;
            while (cellScanner.advance()) {
                current = cellScanner.current();
                cellCount.addCell(Bytes.toString(CellUtil.cloneRow(current)));
            }
        }
    }
    return cellCount;
}
 
Example 8
Source File: IndexToolForNonTxGlobalIndexIT.java    From phoenix with Apache License 2.0 6 votes vote down vote up
private Pair<Integer, Integer> countPutsAndDeletes(String tableName) throws Exception {
    int numPuts = 0;
    int numDeletes = 0;
    try (org.apache.hadoop.hbase.client.Connection hcon =
            ConnectionFactory.createConnection(config)) {
        Table htable = hcon.getTable(TableName.valueOf(tableName));
        Scan scan = new Scan();
        scan.setRaw(true);
        ResultScanner scanner = htable.getScanner(scan);

        for (Result result = scanner.next(); result != null; result = scanner.next()) {
            for (Cell cell : result.rawCells()) {
                if (cell.getType() == Cell.Type.Put) {
                    numPuts++;
                } else if (cell.getType() == Cell.Type.DeleteFamily) {
                            numDeletes++;
                        }
            }
        }
    }
    return new Pair<Integer, Integer>(numPuts, numDeletes);
}
 
Example 9
Source File: MetaDataEndpointImpl.java    From phoenix with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
private PTable buildDeletedTable(byte[] key, ImmutableBytesPtr cacheKey, HRegion region, long clientTimeStamp) throws IOException {
    if (clientTimeStamp == HConstants.LATEST_TIMESTAMP) {
        return null;
    }
    
    Scan scan = newTableRowsScan(key, clientTimeStamp, HConstants.LATEST_TIMESTAMP);
    scan.setFilter(new FirstKeyOnlyFilter());
    scan.setRaw(true);
    RegionScanner scanner = region.getScanner(scan);
    List<KeyValue> results = Lists.<KeyValue>newArrayList();
    scanner.next(results);
    // HBase ignores the time range on a raw scan (HBASE-7362)
    if (!results.isEmpty() && results.get(0).getTimestamp() > clientTimeStamp) {
        KeyValue kv = results.get(0);
        if (kv.isDelete()) {
            Map<ImmutableBytesPtr,PTable> metaDataCache = GlobalCache.getInstance(this.getEnvironment()).getMetaDataCache();
            PTable table = newDeletedTableMarker(kv.getTimestamp());
            metaDataCache.put(cacheKey, table);
            return table;
        }
    }
    return null;
}
 
Example 10
Source File: BaseIndexIT.java    From phoenix with Apache License 2.0 6 votes vote down vote up
private void assertNoIndexDeletes(Connection conn, long minTimestamp, String fullIndexName) throws IOException, SQLException {
    if (!this.mutable) {
        PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
        PTable index = pconn.getTable(new PTableKey(null, fullIndexName));
        byte[] physicalIndexTable = index.getPhysicalName().getBytes();
        try (Table hIndex = pconn.getQueryServices().getTable(physicalIndexTable)) {
            Scan scan = new Scan();
            scan.setRaw(true);
            if (this.transactional) {
                minTimestamp = TransactionUtil.convertToNanoseconds(minTimestamp);
            }
            scan.setTimeRange(minTimestamp, HConstants.LATEST_TIMESTAMP);
            ResultScanner scanner = hIndex.getScanner(scan);
            Result result;
            while ((result = scanner.next()) != null) {
                CellScanner cellScanner = result.cellScanner();
                while (cellScanner.advance()) {
                    Cell current = cellScanner.current();
                    assertTrue(CellUtil.isPut(current));
                }
            }
        };
    }
}
 
Example 11
Source File: UngroupedAggregateRegionObserver.java    From phoenix with Apache License 2.0 5 votes vote down vote up
private RegionScanner rebuildIndices(final RegionScanner innerScanner, final Region region, final Scan scan,
                                     final RegionCoprocessorEnvironment env) throws IOException {
    boolean oldCoproc = region.getTableDescriptor().hasCoprocessor(Indexer.class.getCanonicalName());
    byte[] valueBytes = scan.getAttribute(BaseScannerRegionObserver.INDEX_REBUILD_VERIFY_TYPE);
    IndexTool.IndexVerifyType verifyType = (valueBytes != null) ?
            IndexTool.IndexVerifyType.fromValue(valueBytes):IndexTool.IndexVerifyType.NONE;
    if(oldCoproc  && verifyType == IndexTool.IndexVerifyType.ONLY) {
        return new IndexerRegionScanner(innerScanner, region, scan, env);
    }
    if (!scan.isRaw()) {
        Scan rawScan = new Scan(scan);
        rawScan.setRaw(true);
        rawScan.setMaxVersions();
        rawScan.getFamilyMap().clear();
        // For rebuilds we use count (*) as query for regular tables which ends up setting the FKOF on scan
        // This filter doesn't give us all columns and skips to the next row as soon as it finds 1 col
        // For rebuilds we need all columns and all versions
        if (scan.getFilter() instanceof FirstKeyOnlyFilter) {
            rawScan.setFilter(null);
        } else if (scan.getFilter() != null) {
            // Override the filter so that we get all versions
            rawScan.setFilter(new AllVersionsIndexRebuildFilter(scan.getFilter()));
        }
        rawScan.setCacheBlocks(false);
        for (byte[] family : scan.getFamilyMap().keySet()) {
            rawScan.addFamily(family);
        }
        innerScanner.close();
        RegionScanner scanner = region.getScanner(rawScan);
        return new IndexRebuildRegionScanner(scanner, region, scan, env, this);
    }
    return new IndexRebuildRegionScanner(innerScanner, region, scan, env, this);
}
 
Example 12
Source File: MutationStateIT.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Test
public void testSplitMutationsIntoSameGroupForSingleRow() throws Exception {
    String tableName = "TBL_" + generateUniqueName();
    String indexName = "IDX_" + generateUniqueName();
    Properties props = new Properties();
    props.put("phoenix.mutate.batchSize", "2");
    try (PhoenixConnection conn = DriverManager.getConnection(getUrl(), props).unwrap(PhoenixConnection.class)) {
        conn.setAutoCommit(false);
        conn.createStatement().executeUpdate(
                "CREATE TABLE "  + tableName + " ("
                        + "A VARCHAR NOT NULL PRIMARY KEY,"
                        + "B VARCHAR,"
                        + "C VARCHAR,"
                        + "D VARCHAR) COLUMN_ENCODED_BYTES = 0");
        conn.createStatement().executeUpdate("CREATE INDEX " + indexName + " on "  + tableName + " (C) INCLUDE(D)");

        conn.createStatement().executeUpdate("UPSERT INTO "  + tableName + "(A,B,C,D) VALUES ('A2','B2','C2','D2')");
        conn.createStatement().executeUpdate("UPSERT INTO "  + tableName + "(A,B,C,D) VALUES ('A3','B3', 'C3', null)");
        conn.commit();

        Table htable = conn.getQueryServices().getTable(Bytes.toBytes(tableName));
        Scan scan = new Scan();
        scan.setRaw(true);
        Iterator<Result> scannerIter = htable.getScanner(scan).iterator();
        while (scannerIter.hasNext()) {
            long ts = -1;
            Result r = scannerIter.next();
            for (Cell cell : r.listCells()) {
                if (ts == -1) {
                    ts = cell.getTimestamp();
                } else {
                    assertEquals("(" + cell.toString() + ") has different ts", ts, cell.getTimestamp());
                }
            }
        }
        htable.close();
    }
}
 
Example 13
Source File: MutableIndexReplicationIT.java    From phoenix with Apache License 2.0 5 votes vote down vote up
private boolean ensureAnyRows(Table remoteTable) throws IOException {
    Scan scan = new Scan();
    scan.setRaw(true);
    ResultScanner scanner = remoteTable.getScanner(scan);
    boolean found = false;
    for (Result r : scanner) {
        LOGGER.info("got row: " + r);
        found = true;
    }
    scanner.close();
    return found;
}
 
Example 14
Source File: ViewTTLIT.java    From phoenix with Apache License 2.0 5 votes vote down vote up
private void assertViewHeaderRowsHaveViewTTLRelatedCells(String schemaName, long minTimestamp,
        boolean rawScan, int expectedRows) throws IOException, SQLException {

    FilterList filterList = new FilterList(FilterList.Operator.MUST_PASS_ALL);
    RowFilter schemaNameFilter = new RowFilter(
            CompareFilter.CompareOp.EQUAL,
            new SubstringComparator(schemaName)
    );
    QualifierFilter viewTTLQualifierFilter = new QualifierFilter(CompareFilter.CompareOp.EQUAL,
            new BinaryComparator(PhoenixDatabaseMetaData.VIEW_TTL_BYTES));
    filterList.addFilter(schemaNameFilter);
    filterList.addFilter(viewTTLQualifierFilter);
    try (Table tbl = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES)
            .getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES)) {

        Scan allRows = new Scan();
        allRows.setRaw(rawScan);
        allRows.setTimeRange(minTimestamp, HConstants.LATEST_TIMESTAMP);
        allRows.setFilter(filterList);
        ResultScanner scanner = tbl.getScanner(allRows);
        int numMatchingRows = 0;
        for (Result result = scanner.next(); result != null; result = scanner.next()) {
            numMatchingRows +=
                    result.containsColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES,
                            PhoenixDatabaseMetaData.VIEW_TTL_BYTES) ? 1 : 0;
        }
        assertEquals(String.format("Expected rows do not match for table = %s at timestamp %d",
                Bytes.toString(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES), minTimestamp), expectedRows, numMatchingRows);
    }

}
 
Example 15
Source File: MutableIndexReplicationIT.java    From phoenix with Apache License 2.0 5 votes vote down vote up
private boolean ensureAnyRows(HTable remoteTable) throws IOException {
    Scan scan = new Scan();
    scan.setRaw(true);
    ResultScanner scanner = remoteTable.getScanner(scan);
    boolean found = false;
    for (Result r : scanner) {
        LOG.info("got row: " + r);
        found = true;
    }
    scanner.close();
    return found;
}
 
Example 16
Source File: IndexManagementUtil.java    From phoenix with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
public static Scan newLocalStateScan(List<? extends Iterable<? extends ColumnReference>> refsArray) {
    Scan s = new Scan();
    s.setRaw(true);
    // add the necessary columns to the scan
    for (Iterable<? extends ColumnReference> refs : refsArray) {
        for (ColumnReference ref : refs) {
            s.addFamily(ref.getFamily());
        }
    }
    s.setMaxVersions();
    return s;
}
 
Example 17
Source File: IndexRebuildRegionScanner.java    From phoenix with Apache License 2.0 4 votes vote down vote up
private RegionScanner getLocalScanner() throws IOException {
    // override the filter to skip scan and open new scanner
    // when lower bound of timerange is passed or newStartKey was populated
    // from previous call to next()
    if (minTimestamp != 0) {
        Scan incrScan = new Scan(scan);
        incrScan.setTimeRange(minTimestamp, scan.getTimeRange().getMax());
        incrScan.setRaw(true);
        incrScan.setMaxVersions();
        incrScan.getFamilyMap().clear();
        incrScan.setCacheBlocks(false);
        for (byte[] family : scan.getFamilyMap().keySet()) {
            incrScan.addFamily(family);
        }
        // For rebuilds we use count (*) as query for regular tables which ends up setting the FKOF on scan
        // This filter doesn't give us all columns and skips to the next row as soon as it finds 1 col
        // For rebuilds we need all columns and all versions
        if (scan.getFilter() instanceof FirstKeyOnlyFilter) {
            incrScan.setFilter(null);
        } else if (scan.getFilter() != null) {
            // Override the filter so that we get all versions
            incrScan.setFilter(new AllVersionsIndexRebuildFilter(scan.getFilter()));
        }
        if (nextStartKey != null) {
            incrScan.setStartRow(nextStartKey);
        }
        List<KeyRange> keys = new ArrayList<>();
        try (RegionScanner scanner = region.getScanner(incrScan)) {
            List<Cell> row = new ArrayList<>();
            int rowCount = 0;
            // collect row keys that have been modified in the given time-range
            // up to the size of page to build skip scan filter
            do {
                hasMoreIncr = scanner.nextRaw(row);
                if (!row.isEmpty()) {
                    keys.add(PVarbinary.INSTANCE.getKeyRange(CellUtil.cloneRow(row.get(0))));
                    rowCount++;
                }
                row.clear();
            } while (hasMoreIncr && rowCount < pageSizeInRows);
        }
        if (!hasMoreIncr && keys.isEmpty()) {
            return null;
        }
        if (keys.isEmpty()) {
            return innerScanner;
        }
        nextStartKey =
                ByteUtil.calculateTheClosestNextRowKeyForPrefix(keys.get(keys.size() - 1).getLowerRange());
        ScanRanges scanRanges = ScanRanges.createPointLookup(keys);
        scanRanges.initializeScan(incrScan);
        SkipScanFilter skipScanFilter = scanRanges.getSkipScanFilter();
        incrScan.setFilter(new SkipScanFilter(skipScanFilter, true));
        //putting back the min time to 0 for index and data reads
        incrScan.setTimeRange(0, scan.getTimeRange().getMax());
        scan.setTimeRange(0, scan.getTimeRange().getMax());
        return region.getScanner(incrScan);
    }
    return innerScanner;
}
 
Example 18
Source File: StoreNullsIT.java    From phoenix with Apache License 2.0 4 votes vote down vote up
private void ensureNullsStoredCorrectly(Connection conn) throws Exception {
    ResultSet rs1 = conn.createStatement().executeQuery("SELECT NAME FROM "+dataTableName);
    rs1.next();
    assertEquals("v1", rs1.getString(1));
    rs1.next();
    assertNull(rs1.getString(1));
    rs1.next();
    Table htable =
            ConnectionFactory.createConnection(getUtility().getConfiguration()).getTable(
                TableName.valueOf(dataTableName));
    Scan s = new Scan();
    s.setRaw(true);
    ResultScanner scanner = htable.getScanner(s);
    // first row has a value for name
    Result rs = scanner.next();
    PTable table = conn.unwrap(PhoenixConnection.class).getTable(new PTableKey(null, dataTableName));
    PColumn nameColumn = table.getColumnForColumnName("NAME");
    byte[] qualifier = table.getImmutableStorageScheme()== ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS ? QueryConstants.SINGLE_KEYVALUE_COLUMN_QUALIFIER_BYTES : nameColumn.getColumnQualifierBytes();
    assertTrue(rs.containsColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, qualifier));
    assertTrue(rs.size() == 2); // 2 because it also includes the empty key value column
    KeyValueColumnExpression colExpression =
            table.getImmutableStorageScheme() == ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS
                    ? new SingleCellColumnExpression(nameColumn, "NAME",
                            table.getEncodingScheme(), table.getImmutableStorageScheme())
                    : new KeyValueColumnExpression(nameColumn);
    ImmutableBytesPtr ptr = new ImmutableBytesPtr();
    colExpression.evaluate(new ResultTuple(rs), ptr);
    assertEquals(new ImmutableBytesPtr(PVarchar.INSTANCE.toBytes("v1")), ptr);
    rs = scanner.next();
    
    if ( !mutable && !columnEncoded // we don't issue a put with empty value for immutable tables with cols stored per key value
            || (mutable && !storeNulls)) { // for this case we use a delete to represent the null
        assertFalse(rs.containsColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, qualifier));
        assertEquals(1, rs.size());
    }
    else { 
        assertTrue(rs.containsColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, qualifier));
        assertEquals(2, rs.size()); 
    }
    // assert null stored correctly 
    ptr = new ImmutableBytesPtr();
    if (colExpression.evaluate(new ResultTuple(rs), ptr)) {
        assertEquals(new ImmutableBytesPtr(ByteUtil.EMPTY_BYTE_ARRAY), ptr);
    }
    assertNull(scanner.next());
    scanner.close();
    htable.close();
}
 
Example 19
Source File: TestUtil.java    From phoenix with Apache License 2.0 4 votes vote down vote up
/**
 * Runs a major compaction, and then waits until the compaction is complete before returning.
 *
 * @param tableName name of the table to be compacted
 */
public static void doMajorCompaction(Connection conn, String tableName) throws Exception {

    tableName = SchemaUtil.normalizeIdentifier(tableName);

    // We simply write a marker row, request a major compaction, and then wait until the marker
    // row is gone
    PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
    PTable table = pconn.getTable(new PTableKey(pconn.getTenantId(), tableName));
    ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices();
    MutationState mutationState = pconn.getMutationState();
    if (table.isTransactional()) {
        mutationState.startTransaction(table.getTransactionProvider());
    }
    try (Table htable = mutationState.getHTable(table)) {
        byte[] markerRowKey = Bytes.toBytes("TO_DELETE");
       
        Put put = new Put(markerRowKey);
        put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
        htable.put(put);
        Delete delete = new Delete(markerRowKey);
        delete.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
        htable.delete(delete);
        htable.close();
        if (table.isTransactional()) {
            mutationState.commit();
        }
    
        Admin hbaseAdmin = services.getAdmin();
        hbaseAdmin.flush(TableName.valueOf(tableName));
        hbaseAdmin.majorCompact(TableName.valueOf(tableName));
        hbaseAdmin.close();
    
        boolean compactionDone = false;
        while (!compactionDone) {
            Thread.sleep(6000L);
            Scan scan = new Scan();
            scan.setStartRow(markerRowKey);
            scan.setStopRow(Bytes.add(markerRowKey, new byte[] { 0 }));
            scan.setRaw(true);
    
            try (Table htableForRawScan = services.getTable(Bytes.toBytes(tableName))) {
                ResultScanner scanner = htableForRawScan.getScanner(scan);
                List<Result> results = Lists.newArrayList(scanner);
                LOGGER.info("Results: " + results);
                compactionDone = results.isEmpty();
                scanner.close();
            }
            LOGGER.info("Compaction done: " + compactionDone);
            
            // need to run compaction after the next txn snapshot has been written so that compaction can remove deleted rows
            if (!compactionDone && table.isTransactional()) {
                hbaseAdmin = services.getAdmin();
                hbaseAdmin.flush(TableName.valueOf(tableName));
                hbaseAdmin.majorCompact(TableName.valueOf(tableName));
                hbaseAdmin.close();
            }
        }
    }
}
 
Example 20
Source File: TestCompaction.java    From phoenix-omid with Apache License 2.0 4 votes vote down vote up
@Test(timeOut = 60_000)
public void testACellDeletedNonTransactionallyIsPreservedWhenMinorCompactionOccurs() throws Throwable {
    String TEST_TABLE = "testACellDeletedNonTransactionallyIsPreservedWhenMinorCompactionOccurs";
    createTableIfNotExists(TEST_TABLE, Bytes.toBytes(TEST_FAMILY));
    TTable txTable = new TTable(connection, TEST_TABLE);

    Table table = txTable.getHTable();

    // Configure the environment to create a minor compaction

    // Write first a value transactionally
    HBaseTransaction tx0 = (HBaseTransaction) tm.begin();
    byte[] rowId = Bytes.toBytes("row1");
    Put p0 = new Put(rowId);
    p0.addColumn(fam, qual, Bytes.toBytes("testValue-0"));
    txTable.put(tx0, p0);
    tm.commit(tx0);

    // create the first hfile
    manualFlush(TEST_TABLE);

    // Write another value transactionally
    HBaseTransaction tx1 = (HBaseTransaction) tm.begin();
    Put p1 = new Put(rowId);
    p1.addColumn(fam, qual, Bytes.toBytes("testValue-1"));
    txTable.put(tx1, p1);
    tm.commit(tx1);

    // create the second hfile
    manualFlush(TEST_TABLE);

    // Write yet another value transactionally
    HBaseTransaction tx2 = (HBaseTransaction) tm.begin();
    Put p2 = new Put(rowId);
    p2.addColumn(fam, qual, Bytes.toBytes("testValue-2"));
    txTable.put(tx2, p2);
    tm.commit(tx2);

    // create a third hfile
    manualFlush(TEST_TABLE);

    // Then perform a non-transactional Delete
    Delete d = new Delete(rowId);
    d.addColumn(fam, qual);
    table.delete(d);

    // create the fourth hfile
    manualFlush(TEST_TABLE);

    // Trigger the minor compaction
    HBaseTransaction lwmTx = (HBaseTransaction) tm.begin();
    setCompactorLWM(lwmTx.getStartTimestamp(), TEST_TABLE);
    admin.compact(TableName.valueOf(TEST_TABLE));
    Thread.sleep(5000);

    // Then perform a non-tx (raw) scan...
    Scan scan = new Scan();
    scan.setRaw(true);
    ResultScanner scannerResults = table.getScanner(scan);

    // ...and test the deleted cell is still there
    int count = 0;
    Result scanResult;
    List<Cell> listOfCellsScanned = new ArrayList<>();
    while ((scanResult = scannerResults.next()) != null) {
        listOfCellsScanned = scanResult.listCells(); // equivalent to rawCells()
        count++;
    }
    assertEquals(count, 1, "There should be only one result in scan results");
    assertEquals(listOfCellsScanned.size(), 3, "There should be 3 cell entries in scan results (2 puts, 1 del)");
    boolean wasDeletedCellFound = false;
    int numberOfDeletedCellsFound = 0;
    for (Cell cell : listOfCellsScanned) {
        if (CellUtil.isDelete(cell)) {
            wasDeletedCellFound = true;
            numberOfDeletedCellsFound++;
        }
    }
    assertTrue(wasDeletedCellFound, "We should have found a non-transactionally deleted cell");
    assertEquals(numberOfDeletedCellsFound, 1, "There should be only only one deleted cell");

    table.close();
}