Java Code Examples for org.apache.hadoop.hbase.client.Put#addColumn()
The following examples show how to use
org.apache.hadoop.hbase.client.Put#addColumn() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestTimeRangeMapRed.java From hbase with Apache License 2.0 | 6 votes |
@Override public void map(ImmutableBytesWritable key, Result result, Context context) throws IOException { List<Long> tsList = new ArrayList<>(); for (Cell kv : result.listCells()) { tsList.add(kv.getTimestamp()); } List<Put> puts = new ArrayList<>(); for (Long ts : tsList) { Put put = new Put(key.get()); put.setDurability(Durability.SKIP_WAL); put.addColumn(FAMILY_NAME, COLUMN_NAME, ts, Bytes.toBytes(true)); puts.add(put); } table.put(puts); }
Example 2
Source File: JobHistoryRawService.java From hraven with Apache License 2.0 | 6 votes |
/** * Flags a job's RAW record for reprocessing * * @param jobId */ public void markJobForReprocesssing(QualifiedJobId jobId) throws IOException { Put p = new Put(idConv.toBytes(jobId)); p.addColumn(Constants.INFO_FAM_BYTES, Constants.RAW_COL_REPROCESS_BYTES, Bytes.toBytes(true)); Table rawTable = null; try { rawTable = hbaseConnection .getTable(TableName.valueOf(Constants.HISTORY_RAW_TABLE)); rawTable.put(p); } finally { if (rawTable != null) { rawTable.close(); } } }
Example 3
Source File: UseSchemaIT.java From phoenix with Apache License 2.0 | 6 votes |
@Test public void testMappedView() throws Exception { Properties props = new Properties(); String schema = generateUniqueName(); String tableName = generateUniqueName(); String fullTablename = schema + QueryConstants.NAME_SEPARATOR + tableName; props.setProperty(QueryServices.SCHEMA_ATTRIB, schema); Connection conn = DriverManager.getConnection(getUrl(), props); Admin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin(); admin.createNamespace(NamespaceDescriptor.create(schema).build()); admin.createTable(TableDescriptorBuilder.newBuilder(TableName.valueOf(fullTablename)). addColumnFamily(ColumnFamilyDescriptorBuilder.of(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES)).build()); Put put = new Put(PVarchar.INSTANCE.toBytes(fullTablename)); put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES); Table phoenixSchematable = admin.getConnection().getTable(TableName.valueOf(fullTablename)); phoenixSchematable.put(put); phoenixSchematable.close(); conn.createStatement().execute("CREATE VIEW " + tableName + " (tablename VARCHAR PRIMARY KEY)"); ResultSet rs = conn.createStatement().executeQuery("select tablename from " + tableName); assertTrue(rs.next()); assertEquals(fullTablename, rs.getString(1)); admin.close(); conn.close(); }
Example 4
Source File: CatalogJanitor.java From hbase with Apache License 2.0 | 6 votes |
/** * For testing against a cluster. * Doesn't have a MasterServices context so does not report on good vs bad servers. */ public static void main(String [] args) throws IOException { checkLog4jProperties(); ReportMakingVisitor visitor = new ReportMakingVisitor(null); Configuration configuration = HBaseConfiguration.create(); configuration.setBoolean("hbase.defaults.for.version.skip", true); try (Connection connection = ConnectionFactory.createConnection(configuration)) { /* Used to generate an overlap. */ Get g = new Get(Bytes.toBytes("t2,40,1564119846424.1db8c57d64e0733e0f027aaeae7a0bf0.")); g.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); try (Table t = connection.getTable(TableName.META_TABLE_NAME)) { Result r = t.get(g); byte [] row = g.getRow(); row[row.length - 2] <<= row[row.length - 2]; Put p = new Put(g.getRow()); p.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER)); t.put(p); } MetaTableAccessor.scanMetaForTableRegions(connection, visitor, null); Report report = visitor.getReport(); LOG.info(report != null? report.toString(): "empty"); } }
Example 5
Source File: TestRegionServerNoMaster.java From hbase with Apache License 2.0 | 6 votes |
@BeforeClass public static void before() throws Exception { HTU.startMiniCluster(NB_SERVERS); final TableName tableName = TableName.valueOf(TestRegionServerNoMaster.class.getSimpleName()); // Create table then get the single region for our new table. table = HTU.createTable(tableName,HConstants.CATALOG_FAMILY); Put p = new Put(row); p.addColumn(HConstants.CATALOG_FAMILY, row, row); table.put(p); try (RegionLocator locator = HTU.getConnection().getRegionLocator(tableName)) { hri = locator.getRegionLocation(row, false).getRegion(); } regionName = hri.getRegionName(); stopMasterAndAssignMeta(HTU); }
Example 6
Source File: HBaseMetadataWriter.java From geowave with Apache License 2.0 | 5 votes |
@Override public void write(final GeoWaveMetadata metadata) { // we use a hashset of row IDs so that we can retain multiple versions // (otherwise timestamps will be applied on the server side in // batches and if the same row exists within a batch we will not // retain multiple versions) final Put put = new Put(metadata.getPrimaryId()); final byte[] secondaryBytes = metadata.getSecondaryId() != null ? metadata.getSecondaryId() : new byte[0]; put.addColumn(metadataTypeBytes, secondaryBytes, metadata.getValue()); if ((metadata.getVisibility() != null) && (metadata.getVisibility().length > 0)) { put.setCellVisibility( new CellVisibility(StringUtils.stringFromBinary(metadata.getVisibility()))); } try { synchronized (duplicateRowTracker) { final ByteArray primaryId = new ByteArray(metadata.getPrimaryId()); if (!duplicateRowTracker.add(primaryId)) { writer.flush(); duplicateRowTracker.clear(); duplicateRowTracker.add(primaryId); } } writer.mutate(put); } catch (final IOException e) { LOGGER.error("Unable to write metadata", e); } }
Example 7
Source File: TestMasterReplication.java From hbase with Apache License 2.0 | 5 votes |
private void putAndWait(byte[] row, byte[] fam, Table source, Table target) throws Exception { Put put = new Put(row); put.addColumn(fam, row, row); source.put(put); wait(row, target, false); }
Example 8
Source File: TestConstraint.java From hbase with Apache License 2.0 | 5 votes |
/** * Test that we run a passing constraint * @throws Exception */ @SuppressWarnings("unchecked") @Test public void testConstraintPasses() throws Exception { // create the table // it would be nice if this was also a method on the util TableDescriptorBuilder.ModifyableTableDescriptor tableDescriptor = new TableDescriptorBuilder.ModifyableTableDescriptor(tableName); for (byte[] family : new byte[][]{dummy, test}) { tableDescriptor.setColumnFamily( new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(family)); } // add a constraint Constraints.add(tableDescriptor, CheckWasRunConstraint.class); util.getAdmin().createTable(tableDescriptor); Table table = util.getConnection().getTable(tableName); try { // test that we don't fail on a valid put Put put = new Put(row1); byte[] value = Bytes.toBytes(Integer.toString(10)); byte[] qualifier = new byte[0]; put.addColumn(dummy, qualifier, value); table.put(put); } finally { table.close(); } assertTrue(CheckWasRunConstraint.wasRun); }
Example 9
Source File: HbaseAgentInfoDao.java From pinpoint with Apache License 2.0 | 5 votes |
@Override public void insert(AgentInfoBo agentInfo) { Objects.requireNonNull(agentInfo, "agentInfo"); if (logger.isDebugEnabled()) { logger.debug("insert agent info. {}", agentInfo); } // Assert agentId CollectorUtils.checkAgentId(agentInfo.getAgentId()); // Assert applicationName CollectorUtils.checkApplicationName(agentInfo.getApplicationName()); final byte[] agentId = Bytes.toBytes(agentInfo.getAgentId()); final long reverseKey = TimeUtils.reverseTimeMillis(agentInfo.getStartTime()); final byte[] rowKey = RowKeyUtils.concatFixedByteAndLong(agentId, HbaseTableConstatns.AGENT_NAME_MAX_LEN, reverseKey); final Put put = new Put(rowKey); // should add additional agent informations. for now added only starttime for sqlMetaData final byte[] agentInfoBoValue = agentInfo.writeValue(); put.addColumn(descriptor.getColumnFamilyName(), descriptor.getColumnFamily().QUALIFIER_IDENTIFIER, agentInfoBoValue); if (agentInfo.getServerMetaData() != null) { final byte[] serverMetaDataBoValue = agentInfo.getServerMetaData().writeValue(); put.addColumn(descriptor.getColumnFamilyName(), descriptor.getColumnFamily().QUALIFIER_SERVER_META_DATA, serverMetaDataBoValue); } if (agentInfo.getJvmInfo() != null) { final byte[] jvmInfoBoValue = agentInfo.getJvmInfo().writeValue(); put.addColumn(descriptor.getColumnFamilyName(), descriptor.getColumnFamily().QUALIFIER_JVM, jvmInfoBoValue); } final TableName agentInfoTableName = descriptor.getTableName(); hbaseTemplate.put(agentInfoTableName, put); }
Example 10
Source File: JobFileRawLoaderMapper.java From hraven with Apache License 2.0 | 5 votes |
/** * Call {@link #readJobFile(FileStatus)} and add the raw bytes and the last * modified millis to {@code puts} * * @param puts to add puts to. * @rowkey to identify the row in the raw table. * @param rawColumn where to add the raw data in * @param fileStatus Referring to the jobFile to load. * @throws IOException */ private void addRawPut(List<Put> puts, byte[] rowKey, byte[] rawColumn, byte[] lastModificationColumn, FileStatus fileStatus) throws IOException { byte[] rawBytes = readJobFile(fileStatus); Put raw = new Put(rowKey); byte[] rawLastModifiedMillis = Bytes.toBytes(fileStatus.getModificationTime()); raw.addColumn(Constants.RAW_FAM_BYTES, rawColumn, rawBytes); raw.addColumn(Constants.INFO_FAM_BYTES, lastModificationColumn, rawLastModifiedMillis); puts.add(raw); }
Example 11
Source File: CubeVisitServiceTest.java From kylin with Apache License 2.0 | 5 votes |
public static void prepareTestData() throws Exception { try { util.getHBaseAdmin().disableTable(TABLE); util.getHBaseAdmin().deleteTable(TABLE); } catch (Exception e) { // ignore table not found } Table table = util.createTable(TABLE, FAM); HRegionInfo hRegionInfo = new HRegionInfo(table.getName()); region = util.createLocalHRegion(hRegionInfo, table.getTableDescriptor()); gtInfo = newInfo(); GridTable gridTable = newTable(gtInfo); IGTScanner scanner = gridTable.scan(new GTScanRequestBuilder().setInfo(gtInfo).setRanges(null) .setDimensions(null).setFilterPushDown(null).createGTScanRequest()); for (GTRecord record : scanner) { byte[] value = record.exportColumns(gtInfo.getPrimaryKey()).toBytes(); byte[] key = new byte[RowConstants.ROWKEY_SHARD_AND_CUBOID_LEN + value.length]; System.arraycopy(Bytes.toBytes(baseCuboid), 0, key, RowConstants.ROWKEY_SHARDID_LEN, RowConstants.ROWKEY_CUBOIDID_LEN); System.arraycopy(value, 0, key, RowConstants.ROWKEY_SHARD_AND_CUBOID_LEN, value.length); Put put = new Put(key); put.addColumn(FAM[0], COL_M, record.exportColumns(gtInfo.getColumnBlock(1)).toBytes()); put.addColumn(FAM[1], COL_M, record.exportColumns(gtInfo.getColumnBlock(2)).toBytes()); region.put(put); } }
Example 12
Source File: TestReplicationStuckWithDroppedTable.java From hbase with Apache License 2.0 | 5 votes |
@Test public void testEditsStuckBehindDroppedTable() throws Exception { // add peer ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder() .setClusterKey(utility2.getClusterKey()) .setReplicateAllUserTables(true).build(); admin1.addReplicationPeer(PEER_ID, rpc); // create table createTable(NORMAL_TABLE); createTable(DROPPED_TABLE); admin1.disableReplicationPeer(PEER_ID); try (Table droppedTable = utility1.getConnection().getTable(DROPPED_TABLE)) { Put put = new Put(ROW); put.addColumn(FAMILY, QUALIFIER, VALUE); droppedTable.put(put); } admin1.disableTable(DROPPED_TABLE); admin1.deleteTable(DROPPED_TABLE); admin2.disableTable(DROPPED_TABLE); admin2.deleteTable(DROPPED_TABLE); admin1.enableReplicationPeer(PEER_ID); verifyReplicationStuck(); // Remove peer admin1.removeReplicationPeer(PEER_ID); // Drop table admin1.disableTable(NORMAL_TABLE); admin1.deleteTable(NORMAL_TABLE); admin2.disableTable(NORMAL_TABLE); admin2.deleteTable(NORMAL_TABLE); }
Example 13
Source File: StatisticsWriter.java From phoenix with Apache License 2.0 | 5 votes |
private Put getLastStatsUpdatedTimePut(long timeStamp) { long currentTime = EnvironmentEdgeManager.currentTimeMillis(); byte[] prefix = tableName; Put put = new Put(prefix); put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.LAST_STATS_UPDATE_TIME_BYTES, timeStamp, PDate.INSTANCE.toBytes(new Date(currentTime))); return put; }
Example 14
Source File: TestFiltersWithBinaryComponentComparator.java From hbase with Apache License 2.0 | 5 votes |
/** * Since we are trying to emulate * SQL: SELECT * from table where a = 1 and b > 10 and b < 20 and * c > 90 and c < 100 and d = 1 * We are generating rows with: * a = 1, b >=9 and b < 22, c >= 89 and c < 102, and d = 1 * At the end the table will look something like this: * ------------ * a| b| c|d| * ------------ * 1| 9| 89|1|family:qf|xyz| * ----------- * 1| 9| 90|1|family:qf|abc| * ----------- * 1| 9| 91|1|family:qf|xyz| * ------------------------- * . * ------------------------- * . * ------------------------- * 1|21|101|1|family:qf|xyz| */ private void generateRows(Table ht, byte[] family, byte[] qf) throws IOException { for(int a = 1; a < 2; ++a) { for(int b = 9; b < 22; ++b) { for(int c = 89; c < 102; ++c) { for(int d = 1; d < 2 ; ++d) { byte[] key = new byte[16]; Bytes.putInt(key,0,a); Bytes.putInt(key,4,b); Bytes.putInt(key,8,c); Bytes.putInt(key,12,d); Put row = new Put(key); if (c%2==0) { row.addColumn(family, qf, Bytes.toBytes("abc")); if (LOG.isInfoEnabled()) { LOG.info("added row: {} with value 'abc'", Arrays.toString(Hex.encodeHex(key))); } } else { row.addColumn(family, qf, Bytes.toBytes("xyz")); if (LOG.isInfoEnabled()) { LOG.info("added row: {} with value 'xyz'", Arrays.toString(Hex.encodeHex(key))); } } } } } } TEST_UTIL.flush(); }
Example 15
Source File: TestDefaultMobStoreFlusher.java From hbase with Apache License 2.0 | 4 votes |
private void testFlushFile(TableDescriptorBuilder.ModifyableTableDescriptor tableDescriptor) throws Exception { Table table = null; try { table = TEST_UTIL.createTable(tableDescriptor, null); //put data Put put0 = new Put(row1); put0.addColumn(family, qf1, 1, value1); table.put(put0); //put more data Put put1 = new Put(row2); put1.addColumn(family, qf2, 1, value2); table.put(put1); //flush TEST_UTIL.flush(tableDescriptor.getTableName()); //Scan Scan scan = new Scan(); scan.addColumn(family, qf1); scan.readVersions(4); ResultScanner scanner = table.getScanner(scan); //Compare int size = 0; for (Result result : scanner) { size++; List<Cell> cells = result.getColumnCells(family, qf1); // Verify the cell size Assert.assertEquals(1, cells.size()); // Verify the value Assert.assertArrayEquals(value1, CellUtil.cloneValue(cells.get(0))); } scanner.close(); Assert.assertEquals(1, size); } finally { table.close(); } }
Example 16
Source File: TestShadowCells.java From phoenix-omid with Apache License 2.0 | 4 votes |
@Test(timeOut = 60_000) public void testCrashingAfterCommitDoesNotWriteShadowCells(ITestContext context) throws Exception { CommitTable.Client commitTableClient = spy(getCommitTable(context).getClient()); HBaseOmidClientConfiguration hbaseOmidClientConf = new HBaseOmidClientConfiguration(); hbaseOmidClientConf.setConnectionString(TSO_SERVER_HOST + ":" + TSO_SERVER_PORT); hbaseOmidClientConf.setHBaseConfiguration(hbaseConf); PostCommitActions syncPostCommitter = spy( new HBaseSyncPostCommitter(new NullMetricsProvider(), commitTableClient, connection)); AbstractTransactionManager tm = spy((AbstractTransactionManager) HBaseTransactionManager.builder(hbaseOmidClientConf) .postCommitter(syncPostCommitter) .commitTableClient(commitTableClient) .commitTableWriter(getCommitTable(context).getWriter()) .build()); // The following line emulates a crash after commit that is observed in (*) below doThrow(new RuntimeException()).when(syncPostCommitter).updateShadowCells(any(HBaseTransaction.class)); TTable table = new TTable(connection, TEST_TABLE); HBaseTransaction t1 = (HBaseTransaction) tm.begin(); // Test shadow cell are created properly Put put = new Put(row); put.addColumn(family, qualifier, data1); table.put(t1, put); try { tm.commit(t1); } catch (Exception e) { // (*) crash // Do nothing } // After commit with the emulated crash, test that only the cell is there assertTrue(hasCell(row, family, qualifier, t1.getStartTimestamp(), new TTableCellGetterAdapter(table)), "Cell should be there"); assertFalse(hasShadowCell(row, family, qualifier, t1.getStartTimestamp(), new TTableCellGetterAdapter(table)), "Shadow cell should not be there"); Transaction t2 = tm.begin(); Get get = new Get(row); get.addColumn(family, qualifier); Result getResult = table.get(t2, get); assertTrue(Arrays.equals(data1, getResult.getValue(family, qualifier)), "Shadow cell should not be there"); verify(commitTableClient, times(1)).getCommitTimestamp(anyLong()); }
Example 17
Source File: TestRowCounter.java From hbase with Apache License 2.0 | 4 votes |
/** * Test a case when the timerange is specified with --starttime and --endtime options * * @throws Exception in case of any unexpected error. */ @Test public void testCreateSubmittableJobWithArgsTimeRange() throws Exception { final byte[] family = Bytes.toBytes(COL_FAM); final byte[] col1 = Bytes.toBytes(COL1); Put put1 = new Put(Bytes.toBytes("row_timerange_" + 1)); Put put2 = new Put(Bytes.toBytes("row_timerange_" + 2)); Put put3 = new Put(Bytes.toBytes("row_timerange_" + 3)); long ts; String tableName = TABLE_NAME_TS_RANGE+"CreateSubmittableJobWithArgs"; // clean up content of TABLE_NAME Table table = TEST_UTIL.createTable(TableName.valueOf(tableName), Bytes.toBytes(COL_FAM)); ts = System.currentTimeMillis(); put1.addColumn(family, col1, ts, Bytes.toBytes("val1")); table.put(put1); Thread.sleep(100); ts = System.currentTimeMillis(); put2.addColumn(family, col1, ts, Bytes.toBytes("val2")); put3.addColumn(family, col1, ts, Bytes.toBytes("val3")); table.put(put2); table.put(put3); table.close(); String[] args = new String[] { tableName, COL_FAM + ":" + COL1, "--starttime=" + 0, "--endtime=" + ts }; runCreateSubmittableJobWithArgs(args, 1); args = new String[] { tableName, COL_FAM + ":" + COL1, "--starttime=" + 0, "--endtime=" + (ts - 10) }; runCreateSubmittableJobWithArgs(args, 1); args = new String[] { tableName, COL_FAM + ":" + COL1, "--starttime=" + ts, "--endtime=" + (ts + 1000) }; runCreateSubmittableJobWithArgs(args, 2); args = new String[] { tableName, COL_FAM + ":" + COL1, "--starttime=" + (ts - 30 * 1000), "--endtime=" + (ts + 30 * 1000), }; runCreateSubmittableJobWithArgs(args, 3); }
Example 18
Source File: PerformanceEvaluation.java From hbase with Apache License 2.0 | 4 votes |
@Override @SuppressWarnings("ReturnValueIgnored") boolean testRow(final int i) throws IOException, InterruptedException { byte[] row = generateRow(i); Put put = new Put(row); for (int family = 0; family < opts.families; family++) { byte[] familyName = Bytes.toBytes(FAMILY_NAME_BASE + family); for (int column = 0; column < opts.columns; column++) { byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column); byte[] value = generateData(this.rand, getValueLength(this.rand)); if (opts.useTags) { byte[] tag = generateData(this.rand, TAG_LENGTH); Tag[] tags = new Tag[opts.noOfTags]; for (int n = 0; n < opts.noOfTags; n++) { Tag t = new ArrayBackedTag((byte) n, tag); tags[n] = t; } KeyValue kv = new KeyValue(row, familyName, qualifier, HConstants.LATEST_TIMESTAMP, value, tags); put.add(kv); updateValueSize(kv.getValueLength()); } else { put.addColumn(familyName, qualifier, value); updateValueSize(value.length); } } } put.setDurability(opts.writeToWAL ? Durability.SYNC_WAL : Durability.SKIP_WAL); try { table.put(put).get(); if (opts.multiPut > 0) { this.puts.add(put); if (this.puts.size() == opts.multiPut) { this.table.put(puts).stream().map(f -> AsyncRandomReadTest.propagate(f::get)); this.puts.clear(); } else { return false; } } else { table.put(put).get(); } } catch (ExecutionException e) { throw new IOException(e); } return true; }
Example 19
Source File: TestCompactionTool.java From hbase with Apache License 2.0 | 4 votes |
private void putAndFlush(int key) throws Exception{ Put put = new Put(Bytes.toBytes(key)); put.addColumn(HBaseTestingUtility.fam1, qualifier, Bytes.toBytes("val" + key)); region.put(put); region.flush(true); }
Example 20
Source File: TestAsynchronousPostCommitter.java From phoenix-omid with Apache License 2.0 | 4 votes |
@Test(timeOut = 30_000) public void testOnlyShadowCellsUpdateIsExecuted(ITestContext context) throws Exception { CommitTable.Client commitTableClient = getCommitTable(context).getClient(); PostCommitActions syncPostCommitter = spy(new HBaseSyncPostCommitter(new NullMetricsProvider(), commitTableClient, connection)); ListeningExecutorService postCommitExecutor = MoreExecutors.listeningDecorator(Executors.newSingleThreadExecutor( new ThreadFactoryBuilder().setNameFormat("postCommit-%d").build())); PostCommitActions asyncPostCommitter = new HBaseAsyncPostCommitter(syncPostCommitter, postCommitExecutor); TransactionManager tm = newTransactionManager(context, asyncPostCommitter); final CountDownLatch removeCommitTableEntryCalledLatch = new CountDownLatch(1); doAnswer(new Answer<Void>() { public Void answer(InvocationOnMock invocation) { // Do not invoke real method simulating a fail of the async clean of commit table entry removeCommitTableEntryCalledLatch.countDown(); return null; } }).when(syncPostCommitter).removeCommitTableEntry(any(AbstractTransaction.class)); try (TTable txTable = new TTable(connection, TEST_TABLE)) { // Execute tx with async post commit actions Transaction tx1 = tm.begin(); Put put1 = new Put(row1); put1.addColumn(family, qualifier, Bytes.toBytes("hey!")); txTable.put(tx1, put1); Put put2 = new Put(row2); put2.addColumn(family, qualifier, Bytes.toBytes("hou!")); txTable.put(tx1, put2); tm.commit(tx1); long tx1Id = tx1.getTransactionId(); // We continue when the unsuccessful call of the method for cleaning commit table has been invoked removeCommitTableEntryCalledLatch.await(); // We check that the shadow cells are there (because the update of the shadow cells should precede // the cleaning of the commit table entry) ... assertTrue(CellUtils.hasShadowCell(row1, family, qualifier, tx1Id, new TTableCellGetterAdapter(txTable))); assertTrue(CellUtils.hasShadowCell(row2, family, qualifier, tx1Id, new TTableCellGetterAdapter(txTable))); // ... and the commit table entry has NOT been cleaned Optional<CommitTable.CommitTimestamp> commitTimestamp = commitTableClient.getCommitTimestamp(tx1Id).get(); assertTrue(commitTimestamp.isPresent()); assertTrue(commitTimestamp.get().isValid()); // Final checks verify(syncPostCommitter, times(1)).updateShadowCells(any(AbstractTransaction.class)); verify(syncPostCommitter, times(1)).removeCommitTableEntry(any(AbstractTransaction.class)); } }