Java Code Examples for org.apache.hadoop.hbase.client.Put#add()

The following examples show how to use org.apache.hadoop.hbase.client.Put#add() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestHbaseClient.java    From kylin with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws IOException {
    foo(6, 5);
    foo(5, 2);
    foo(3, 0);

    Configuration conf = HBaseConfiguration.create();
    conf.set("hbase.zookeeper.quorum", "hbase_host");
    conf.set("zookeeper.znode.parent", "/hbase-unsecure");

    HTable table = new HTable(conf, "test1");
    Put put = new Put(Bytes.toBytes("row1"));

    put.add(Bytes.toBytes("colfam1"), Bytes.toBytes("qual1"), Bytes.toBytes("val1"));
    put.add(Bytes.toBytes("colfam1"), Bytes.toBytes("qual2"), Bytes.toBytes("val2"));

    table.put(put);
    table.close();
}
 
Example 2
Source File: HBaseBackedTransactionLogger.java    From hbase-secondary-index with GNU General Public License v3.0 5 votes vote down vote up
public void setStatusForTransaction(final long transactionId, final TransactionStatus status) {
    Put update = new Put(getRow(transactionId));
    update.add(INFO_FAMILY, STATUS_QUALIFIER, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(status.name()));

    HTableInterface table = getTable();
    try {
        table.put(update);
    } catch (IOException e) {
        throw new RuntimeException(e);
    } finally {
        putTable(table);
    }
}
 
Example 3
Source File: IndexRegionObserver.java    From phoenix with Apache License 2.0 5 votes vote down vote up
/**
 * Retrieve the the last committed data row state. This method is called only for regular data mutations since for
 * rebuild (i.e., index replay) mutations include all row versions.
 */

private void getCurrentRowStates(ObserverContext<RegionCoprocessorEnvironment> c,
                                 BatchMutateContext context) throws IOException {
    Set<KeyRange> keys = new HashSet<KeyRange>(context.rowsToLock.size());
    for (ImmutableBytesPtr rowKeyPtr : context.rowsToLock) {
        keys.add(PVarbinary.INSTANCE.getKeyRange(rowKeyPtr.get()));
    }
    Scan scan = new Scan();
    ScanRanges scanRanges = ScanRanges.createPointLookup(new ArrayList<KeyRange>(keys));
    scanRanges.initializeScan(scan);
    SkipScanFilter skipScanFilter = scanRanges.getSkipScanFilter();
    scan.setFilter(skipScanFilter);
    context.dataRowStates = new HashMap<ImmutableBytesPtr, Pair<Put, Put>>(context.rowsToLock.size());
    try (RegionScanner scanner = c.getEnvironment().getRegion().getScanner(scan)) {
        boolean more = true;
        while(more) {
            List<Cell> cells = new ArrayList<Cell>();
            more = scanner.next(cells);
            if (cells.isEmpty()) {
                continue;
            }
            byte[] rowKey = CellUtil.cloneRow(cells.get(0));
            Put put = new Put(rowKey);
            for (Cell cell : cells) {
                put.add(cell);
            }
            context.dataRowStates.put(new ImmutableBytesPtr(rowKey), new Pair<Put, Put>(put, new Put(put)));
        }
    }
}
 
Example 4
Source File: DynamicColumnTest.java    From phoenix with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
private static void initTableValues() throws Exception {
    ConnectionQueryServices services = driver.getConnectionQueryServices(getUrl(), TEST_PROPERTIES);
    HTableInterface hTable = services.getTable(SchemaUtil.getTableNameAsBytes(HBASE_DYNAMIC_COLUMNS_SCHEMA_NAME,HBASE_DYNAMIC_COLUMNS));
    try {
        // Insert rows using standard HBase mechanism with standard HBase "types"
        List<Row> mutations = new ArrayList<Row>();
        byte[] dv = Bytes.toBytes("DV");
        byte[] first = Bytes.toBytes("F");
        byte[] f1v1 = Bytes.toBytes("F1V1");
        byte[] f1v2 = Bytes.toBytes("F1V2");
        byte[] f2v1 = Bytes.toBytes("F2V1");
        byte[] f2v2 = Bytes.toBytes("F2V2");
        byte[] key = Bytes.toBytes("entry1");

        Put put = new Put(key);
        put.add(QueryConstants.EMPTY_COLUMN_BYTES, dv, Bytes.toBytes("default"));
        put.add(QueryConstants.EMPTY_COLUMN_BYTES, first, Bytes.toBytes("first"));
        put.add(FAMILY_NAME, f1v1, Bytes.toBytes("f1value1"));
        put.add(FAMILY_NAME, f1v2, Bytes.toBytes("f1value2"));
        put.add(FAMILY_NAME2, f2v1, Bytes.toBytes("f2value1"));
        put.add(FAMILY_NAME2, f2v2, Bytes.toBytes("f2value2"));
        mutations.add(put);

        hTable.batch(mutations);

    } finally {
        hTable.close();
    }
    // Create Phoenix table after HBase table was created through the native APIs
    // The timestamp of the table creation must be later than the timestamp of the data
    ensureTableCreated(getUrl(), HBASE_DYNAMIC_COLUMNS);
}
 
Example 5
Source File: KeyValueBuilder.java    From phoenix with Apache License 2.0 5 votes vote down vote up
/**
 * Helper method for a {@link KeyValueBuilder} that catches an IOException from a {@link Put}
 * when adding a {@link KeyValue} generated by the KeyValueBuilder.
 * @throws RuntimeException if there is an IOException thrown from the underlying {@link Put}
 */
@SuppressWarnings("javadoc")
public static void addQuietly(Put put, KeyValueBuilder builder, KeyValue kv) {
    try {
        put.add(kv);
    } catch (IOException e) {
        throw new RuntimeException("KeyValue Builder " + builder + " created an invalid kv: "
                + kv + "!");
    }
}
 
Example 6
Source File: EnrichedCDRHbaseOutputOperator.java    From examples with Apache License 2.0 5 votes vote down vote up
@Override
public Put operationPut(EnrichedCDR ecdr)
{
  Put put = new Put(Bytes.toBytes(ecdr.getImsi()));
  put.add(familyName, Bytes.toBytes("isdn"), Bytes.toBytes(ecdr.getIsdn()));
  put.add(familyName, Bytes.toBytes("imei"), Bytes.toBytes(ecdr.getImei()));
  put.add(familyName, Bytes.toBytes("plan"), Bytes.toBytes(ecdr.getPlan()));
  put.add(familyName, Bytes.toBytes("callType"), Bytes.toBytes(ecdr.getCallType()));
  put.add(familyName, Bytes.toBytes("correspType"), Bytes.toBytes(ecdr.getCorrespType()));
  put.add(familyName, Bytes.toBytes("correspIsdn"), Bytes.toBytes(ecdr.getCorrespIsdn()));
  put.add(familyName, Bytes.toBytes("duration"), Bytes.toBytes(ecdr.getDuration()));
  put.add(familyName, Bytes.toBytes("bytes"), Bytes.toBytes(ecdr.getBytes()));
  put.add(familyName, Bytes.toBytes("dr"), Bytes.toBytes(ecdr.getDr()));
  put.add(familyName, Bytes.toBytes("lat"), Bytes.toBytes(ecdr.getLat()));
  put.add(familyName, Bytes.toBytes("lon"), Bytes.toBytes(ecdr.getLon()));
  put.add(familyName, Bytes.toBytes("date"), Bytes.toBytes(ecdr.getDate()));
  put.add(familyName, Bytes.toBytes("time"), Bytes.toBytes(ecdr.getTimeInDay()));
  if (ecdr.getDrLabel() != null) {
    put.add(familyName, Bytes.toBytes("drLabel"), Bytes.toBytes(ecdr.getDrLabel()));
  }
  put.add(familyName, Bytes.toBytes("operatorCode"), Bytes.toBytes(ecdr.getOperatorCode()));
  put.add(familyName, Bytes.toBytes("deviceBrand"), Bytes.toBytes(ecdr.getDeviceBrand()));
  put.add(familyName, Bytes.toBytes("deviceModel"), Bytes.toBytes(ecdr.getDeviceModel()));
  put.add(familyName, Bytes.toBytes("zipCode"), Bytes.toBytes(ecdr.getZipCode()));
  return put;

}
 
Example 7
Source File: IndexRebuildRegionScanner.java    From phoenix with Apache License 2.0 5 votes vote down vote up
public static void apply(Put destination, Put source) throws IOException {
    for (List<Cell> cells : source.getFamilyCellMap().values()) {
        for (Cell cell : cells) {
            if (!destination.has(CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell))) {
                destination.add(cell);
            }
        }
    }
}
 
Example 8
Source File: HBaseTestHelper.java    From attic-apex-malhar with Apache License 2.0 5 votes vote down vote up
public static void populateHBase() throws IOException
{
  Configuration conf = getConfiguration();
  clearHBase(conf);
  HTable table1 = new HTable(conf, "table1");
  for (int i = 0; i < 500; ++i) {
    Put put = new Put(Bytes.toBytes("row" + i));
    for (int j = 0; j < 500; ++j) {
      put.add(colfam0_bytes, Bytes.toBytes("col" + "-" +  j ), Bytes.toBytes("val" + "-" + i + "-" + j));
    }
    table1.put(put);
  }
  table1.close();
}
 
Example 9
Source File: AbstractConverter.java    From metron with Apache License 2.0 5 votes vote down vote up
@Override
public Put toPut(String columnFamily, KEY_T key, VALUE_T values) throws IOException {
  Put put = new Put(key.toBytes());
  byte[] cf = Bytes.toBytes(columnFamily);
  for(Map.Entry<byte[], byte[]> kv : values.toColumns()) {
    put.add(cf, kv.getKey(), kv.getValue());
  }
  return put;
}
 
Example 10
Source File: TestReversibleScanners.java    From hbase with Apache License 2.0 5 votes vote down vote up
private static void loadDataToRegion(HRegion region, byte[] additionalFamily)
    throws IOException {
  for (int i = 0; i < ROWSIZE; i++) {
    Put put = new Put(ROWS[i]);
    for (int j = 0; j < QUALSIZE; j++) {
      put.add(makeKV(i, j));
      // put additional family
      put.add(makeKV(i, j, additionalFamily));
    }
    region.put(put);
    if (i == ROWSIZE / 3 || i == ROWSIZE * 2 / 3) {
      region.flush(true);
    }
  }
}
 
Example 11
Source File: EndToEndCoveredColumnsIndexBuilderIT.java    From phoenix with Apache License 2.0 4 votes vote down vote up
/**
   * Similar to {@link #testExpectedResultsInTableStateForSinglePut()}, but against batches of puts.
   * Previous implementations managed batches by playing current state against each element in the
   * batch, rather than combining all the per-row updates into a single mutation for the batch. This
   * test ensures that we see the correct expected state.
   * @throws Exception on failure
   */
  @SuppressWarnings("deprecation")
@Test
  public void testExpectedResultsInTableStateForBatchPuts() throws Exception {
    long ts = state.ts;
    // build up a list of puts to make, all on the same row
    Put p1 = new Put(row, ts);
    p1.add(family, qual, Bytes.toBytes("v1"));
    Put p2 = new Put(row, ts + 1);
    p2.add(family, qual, Bytes.toBytes("v2"));

    // setup all the verifiers we need. This is just the same as above, but will be called twice
    // since we need to iterate the batch.

    // get all the underlying kvs for the put
    final List<Cell> allKvs = new ArrayList<Cell>(2);
    allKvs.addAll(p2.getFamilyCellMap().get(family));
    allKvs.addAll(p1.getFamilyCellMap().get(family));

    // setup the verifier for the data we expect to write
    // both puts should be put into a single batch
    final ColumnReference familyRef =
        new ColumnReference(EndToEndCoveredColumnsIndexBuilderIT.family, ColumnReference.ALL_QUALIFIERS);
    VerifyingIndexCodec codec = state.codec;
    // no previous state in the table
    codec.verifiers.add(new ListMatchingVerifier("cleanup state 1", Collections
        .<Cell> emptyList(), familyRef));
    codec.verifiers.add(new ListMatchingVerifier("put state 1", p1.getFamilyCellMap().get(family),
        familyRef));

    codec.verifiers.add(new ListMatchingVerifier("cleanup state 2", p1.getFamilyCellMap().get(family),
        familyRef));
    // kvs from both puts should be in the table now
    codec.verifiers.add(new ListMatchingVerifier("put state 2", allKvs, familyRef));

    // do the actual put (no indexing will actually be done)
    HTable primary = state.table;
    primary.setAutoFlush(false);
    primary.put(Arrays.asList(p1, p2));
    primary.flushCommits();

    // cleanup after ourselves
    cleanup(state);
  }
 
Example 12
Source File: FailWithoutRetriesIT.java    From phoenix with Apache License 2.0 4 votes vote down vote up
/**
 * If this test times out, then we didn't fail quickly enough. {@link Indexer} maybe isn't
 * rethrowing the exception correctly?
 * <p>
 * We use a custom codec to enforce the thrown exception.
 * @throws Exception
 */
@Test(timeout = 300000)
public void testQuickFailure() throws Exception {
  // incorrectly setup indexing for the primary table - target index table doesn't exist, which
  // should quickly return to the client
  byte[] family = Bytes.toBytes("family");
  ColumnGroup fam1 = new ColumnGroup(getIndexTableName());
  // values are [col1]
  fam1.add(new CoveredColumn(family, CoveredColumn.ALL_QUALIFIERS));
  CoveredColumnIndexSpecifierBuilder builder = new CoveredColumnIndexSpecifierBuilder();
  // add the index family
  builder.addIndexGroup(fam1);
  // usually, we would create the index table here, but we don't for the sake of the test.

  // setup the primary table
  String primaryTable = Bytes.toString(table.getTableName());
  @SuppressWarnings("deprecation")
  HTableDescriptor pTable = new HTableDescriptor(primaryTable);
  pTable.addFamily(new HColumnDescriptor(family));
  // override the codec so we can use our test one
  builder.build(pTable, FailingTestCodec.class);

  // create the primary table
  HBaseAdmin admin = UTIL.getHBaseAdmin();
  admin.createTable(pTable);
  Configuration conf = new Configuration(UTIL.getConfiguration());
  // up the number of retries/wait time to make it obvious that we are failing with retries here
  conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 20);
  conf.setLong(HConstants.HBASE_CLIENT_PAUSE, 1000);
  HTable primary = new HTable(conf, primaryTable);
  primary.setAutoFlush(false, true);

  // do a simple put that should be indexed
  Put p = new Put(Bytes.toBytes("row"));
  p.add(family, null, Bytes.toBytes("value"));
  primary.put(p);
  try {
    primary.flushCommits();
    fail("Shouldn't have gotten a successful write to the primary table");
  } catch (RetriesExhaustedWithDetailsException e) {
    LOG.info("Correclty got a failure of the put!");
  }
  primary.close();
}
 
Example 13
Source File: TestWALReplayWithIndexWritesAndCompressedWAL.java    From phoenix with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
/**
 * Test writing edits into an HRegion, closing it, splitting logs, opening Region again. Verify
 * seqids.
 * @throws Exception on failure
 */
@Test
public void testReplayEditsWrittenViaHRegion() throws Exception {
  final String tableNameStr = "testReplayEditsWrittenViaHRegion";
  final HRegionInfo hri = new HRegionInfo(Bytes.toBytes(tableNameStr), null, null, false);
  final Path basedir = new Path(this.hbaseRootDir, tableNameStr);
  deleteDir(basedir);
  final HTableDescriptor htd = createBasic3FamilyHTD(tableNameStr);
  
  //setup basic indexing for the table
  // enable indexing to a non-existant index table
  byte[] family = new byte[] { 'a' };
  ColumnGroup fam1 = new ColumnGroup(INDEX_TABLE_NAME);
  fam1.add(new CoveredColumn(family, CoveredColumn.ALL_QUALIFIERS));
  CoveredColumnIndexSpecifierBuilder builder = new CoveredColumnIndexSpecifierBuilder();
  builder.addIndexGroup(fam1);
  builder.build(htd);

  // create the region + its WAL
  HRegion region0 = HRegion.createHRegion(hri, hbaseRootDir, this.conf, htd);
  region0.close();
  region0.getLog().closeAndDelete();
  HLog wal = createWAL(this.conf);
  RegionServerServices mockRS = Mockito.mock(RegionServerServices.class);
  // mock out some of the internals of the RSS, so we can run CPs
  Mockito.when(mockRS.getWAL()).thenReturn(wal);
  RegionServerAccounting rsa = Mockito.mock(RegionServerAccounting.class);
  Mockito.when(mockRS.getRegionServerAccounting()).thenReturn(rsa);
  ServerName mockServerName = Mockito.mock(ServerName.class);
  Mockito.when(mockServerName.getServerName()).thenReturn(tableNameStr + "-server-1234");
  Mockito.when(mockRS.getServerName()).thenReturn(mockServerName);
  HRegion region = new HRegion(basedir, wal, this.fs, this.conf, hri, htd, mockRS);
  long seqid = region.initialize();
  // HRegionServer usually does this. It knows the largest seqid across all regions.
  wal.setSequenceNumber(seqid);
  
  //make an attempted write to the primary that should also be indexed
  byte[] rowkey = Bytes.toBytes("indexed_row_key");
  Put p = new Put(rowkey);
  p.add(family, Bytes.toBytes("qual"), Bytes.toBytes("value"));
  region.put(new Put[] { p });

  // we should then see the server go down
  Mockito.verify(mockRS, Mockito.times(1)).abort(Mockito.anyString(),
    Mockito.any(Exception.class));
  region.close(true);
  wal.close();

  // then create the index table so we are successful on WAL replay
  CoveredColumnIndexer.createIndexTable(UTIL.getHBaseAdmin(), INDEX_TABLE_NAME);

  // run the WAL split and setup the region
  runWALSplit(this.conf);
  HLog wal2 = createWAL(this.conf);
  HRegion region1 = new HRegion(basedir, wal2, this.fs, this.conf, hri, htd, mockRS);

  // initialize the region - this should replay the WALEdits from the WAL
  region1.initialize();

  // now check to ensure that we wrote to the index table
  HTable index = new HTable(UTIL.getConfiguration(), INDEX_TABLE_NAME);
  int indexSize = getKeyValueCount(index);
  assertEquals("Index wasn't propertly updated from WAL replay!", 1, indexSize);
  Get g = new Get(rowkey);
  final Result result = region1.get(g);
  assertEquals("Primary region wasn't updated from WAL replay!", 1, result.size());

  // cleanup the index table
  HBaseAdmin admin = UTIL.getHBaseAdmin();
  admin.disableTable(INDEX_TABLE_NAME);
  admin.deleteTable(INDEX_TABLE_NAME);
  admin.close();
}
 
Example 14
Source File: TestParalleWriterIndexCommitter.java    From phoenix with Apache License 2.0 4 votes vote down vote up
@SuppressWarnings({ "unchecked", "deprecation" })
@Test
public void testSynchronouslyCompletesAllWrites() throws Exception {
  LOG.info("Starting " + test.getTableNameString());
  LOG.info("Current thread is interrupted: " + Thread.interrupted());
  Abortable abort = new StubAbortable();
  Stoppable stop = Mockito.mock(Stoppable.class);
  ExecutorService exec = Executors.newFixedThreadPool(1);
  Map<ImmutableBytesPtr, HTableInterface> tables =
      new HashMap<ImmutableBytesPtr, HTableInterface>();
  FakeTableFactory factory = new FakeTableFactory(tables);

  ImmutableBytesPtr tableName = new ImmutableBytesPtr(this.test.getTableName());
  Put m = new Put(row);
  m.add(Bytes.toBytes("family"), Bytes.toBytes("qual"), null);
  Multimap<HTableInterfaceReference, Mutation> indexUpdates =
      ArrayListMultimap.<HTableInterfaceReference, Mutation> create();
  indexUpdates.put(new HTableInterfaceReference(tableName), m);

  HTableInterface table = Mockito.mock(HTableInterface.class);
  final boolean[] completed = new boolean[] { false };
  Mockito.when(table.batch(Mockito.anyList())).thenAnswer(new Answer<Void>() {

    @Override
    public Void answer(InvocationOnMock invocation) throws Throwable {
      // just keep track that it was called
      completed[0] = true;
      return null;
    }
  });
  Mockito.when(table.getTableName()).thenReturn(test.getTableName());
  // add the table to the set of tables, so its returned to the writer
  tables.put(tableName, table);

  // setup the writer and failure policy
  ParallelWriterIndexCommitter writer = new ParallelWriterIndexCommitter(VersionInfo.getVersion());
  writer.setup(factory, exec, abort, stop, 1);
  writer.write(indexUpdates);
  assertTrue("Writer returned before the table batch completed! Likely a race condition tripped",
    completed[0]);
  writer.stop(this.test.getTableNameString() + " finished");
  assertTrue("Factory didn't get shutdown after writer#stop!", factory.shutdown);
  assertTrue("ExectorService isn't terminated after writer#stop!", exec.isShutdown());
}
 
Example 15
Source File: TransactionProcessorTest.java    From phoenix-tephra with Apache License 2.0 4 votes vote down vote up
@Test
public void testDeleteFiltering() throws Exception {
  String tableName = "TestDeleteFiltering";
  byte[] familyBytes = Bytes.toBytes("f");
  byte[] columnBytes = Bytes.toBytes("c");
  HRegion region = createRegion(tableName, familyBytes, 0);
  try {
    region.initialize();
    TransactionStateCache cache = new TransactionStateCacheSupplier(conf).get();
    LOG.info("Coprocessor is using transaction state: " + waitForTransactionState(cache));

    byte[] row = Bytes.toBytes(1);
    for (int i = 4; i < V.length; i++) {
      Put p = new Put(row);
      p.add(familyBytes, columnBytes, V[i], Bytes.toBytes(V[i]));
      region.put(p);
    }

    // delete from the third entry back
    // take that cell's timestamp + 1 to simulate a delete in a new tx
    long deleteTs = V[5] + 1;
    Delete d = new Delete(row, deleteTs);
    LOG.info("Issuing delete at timestamp " + deleteTs);
    // row deletes are not yet supported (TransactionAwareHTable normally handles this)
    d.deleteColumns(familyBytes, columnBytes);
    region.delete(d);

    List<Cell> results = Lists.newArrayList();

    // force a flush to clear the data
    // during flush, we should drop the deleted version, but not the others
    LOG.info("Flushing region " + region.getRegionInfo().getRegionNameAsString());
    region.flushcache(true, false);

    // now a normal scan should return row with versions at: V[8], V[6].
    // V[7] is invalid and V[5] and prior are deleted.
    Scan scan = new Scan();
    scan.setMaxVersions(10);
    RegionScanner regionScanner = region.getScanner(scan);
    // should be only one row
    assertFalse(regionScanner.next(results));
    assertKeyValueMatches(results, 1,
        new long[]{V[8], V[6], deleteTs},
        new byte[][]{Bytes.toBytes(V[8]), Bytes.toBytes(V[6]), new byte[0]});
  } finally {
    region.close();
  }
}
 
Example 16
Source File: IndexMaintainerTest.java    From phoenix with Apache License 2.0 4 votes vote down vote up
private void testIndexRowKeyBuilding(String schemaName, String tableName, String dataColumns,
         String pk, String indexColumns, Object[] values, String includeColumns,
         String dataProps, String indexProps, KeyValueBuilder builder) throws Exception {
     Connection conn = DriverManager.getConnection(getUrl());
     String fullTableName = SchemaUtil.getTableName(SchemaUtil.normalizeIdentifier(schemaName),SchemaUtil.normalizeIdentifier(tableName));
     String fullIndexName = SchemaUtil.getTableName(SchemaUtil.normalizeIdentifier(schemaName),SchemaUtil.normalizeIdentifier("idx"));
     conn.createStatement().execute("CREATE TABLE " + fullTableName + "(" + dataColumns + " CONSTRAINT pk PRIMARY KEY (" + pk + "))  " + (dataProps.isEmpty() ? "" : dataProps) );
     try {
         conn.createStatement().execute("CREATE INDEX idx ON " + fullTableName + "(" + indexColumns + ") " + (includeColumns.isEmpty() ? "" : "INCLUDE (" + includeColumns + ") ") + (indexProps.isEmpty() ? "" : indexProps));
         PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
         PTable table = pconn.getTable(new PTableKey(pconn.getTenantId(), fullTableName));
         PTable index = pconn.getTable(new PTableKey(pconn.getTenantId(),fullIndexName));
         ImmutableBytesWritable ptr = new ImmutableBytesWritable();
         table.getIndexMaintainers(ptr, pconn);
         List<IndexMaintainer> c1 = IndexMaintainer.deserialize(ptr, builder, true);
         assertEquals(1,c1.size());
         IndexMaintainer im1 = c1.get(0);
         
         StringBuilder buf = new StringBuilder("UPSERT INTO " + fullTableName  + " VALUES(");
         for (int i = 0; i < values.length; i++) {
             buf.append("?,");
         }
         buf.setCharAt(buf.length()-1, ')');
         PreparedStatement stmt = conn.prepareStatement(buf.toString());
         for (int i = 0; i < values.length; i++) {
             stmt.setObject(i+1, values[i]);
         }
         stmt.execute();
         	Iterator<Pair<byte[],List<Cell>>> iterator = PhoenixRuntime.getUncommittedDataIterator(conn);
         List<Cell> dataKeyValues = iterator.next().getSecond();
         Map<ColumnReference,byte[]> valueMap = Maps.newHashMapWithExpectedSize(dataKeyValues.size());
ImmutableBytesWritable rowKeyPtr = new ImmutableBytesWritable(dataKeyValues.get(0).getRowArray(), dataKeyValues.get(0).getRowOffset(), dataKeyValues.get(0).getRowLength());
         byte[] row = rowKeyPtr.copyBytes();
         Put dataMutation = new Put(row);
         for (Cell kv : dataKeyValues) {
             valueMap.put(new ColumnReference(kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength(), kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength()), CellUtil.cloneValue(kv));
             dataMutation.add(kv);
         }
         ValueGetter valueGetter = newValueGetter(row, valueMap);
         
         List<Mutation> indexMutations = IndexTestUtil.generateIndexData(index, table, dataMutation, ptr, builder);
         assertEquals(1,indexMutations.size());
         assertTrue(indexMutations.get(0) instanceof Put);
         Mutation indexMutation = indexMutations.get(0);
         ImmutableBytesWritable indexKeyPtr = new ImmutableBytesWritable(indexMutation.getRow());
         ptr.set(rowKeyPtr.get(), rowKeyPtr.getOffset(), rowKeyPtr.getLength());
         byte[] mutablelndexRowKey = im1.buildRowKey(valueGetter, ptr, null, null, HConstants.LATEST_TIMESTAMP);
         byte[] immutableIndexRowKey = indexKeyPtr.copyBytes();
         assertArrayEquals(immutableIndexRowKey, mutablelndexRowKey);
         for (ColumnReference ref : im1.getCoveredColumns()) {
             valueMap.get(ref);
         }
         byte[] dataRowKey = im1.buildDataRowKey(indexKeyPtr, null);
         assertArrayEquals(dataRowKey, CellUtil.cloneRow(dataKeyValues.get(0)));
     } finally {
         try {
             conn.rollback();
             conn.createStatement().execute("DROP TABLE " + fullTableName);
         } finally {
             conn.close();
         }
     }
 }
 
Example 17
Source File: TestEndToEndCoveredIndexing.java    From phoenix with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
/**
 * Similar to the {@link #testMultipleTimestampsInSinglePut()}, this check the same with deletes
 * @throws Exception on failure
 */
@Test
public void testMultipleTimestampsInSingleDelete() throws Exception {
  HTable primary = createSetupTables(fam1);

  // do a put to the primary table
  Put p = new Put(row1);
  long ts1 = 10, ts2 = 11, ts3 = 12;
  p.add(FAM, indexed_qualifer, ts1, value1);
  // our group indexes all columns in the this family, so any qualifier here is ok
  p.add(FAM2, regular_qualifer, ts2, value3);
  primary.put(p);
  primary.flushCommits();

  // check to make sure everything we expect is there
  HTable index1 = new HTable(UTIL.getConfiguration(), fam1.getTable());

  // ts1, we just have v1
  List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], CoveredColumn>>();
  pairs.clear();
  pairs.add(new Pair<byte[], CoveredColumn>(value1, col1));
  pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2));
  List<KeyValue> expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts1, pairs);
  IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts1, value1);

  // at ts2, don't have the above anymore
  pairs.clear();
  expected = Collections.emptyList();
  IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts2, ts2 + 1, value1, value1);
  // but we do have the new entry at ts2
  pairs.clear();
  pairs.add(new Pair<byte[], CoveredColumn>(value1, col1));
  pairs.add(new Pair<byte[], CoveredColumn>(value3, col2));
  expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts2, pairs);
  IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts2, value1);

  // now build up a delete with a couple different timestamps
  Delete d = new Delete(row1);
  // these deletes have to match the exact ts since we are doing an exact match (deleteColumn).
  d.deleteColumn(FAM, indexed_qualifer, ts1);
  // since this doesn't match exactly, we actually shouldn't see a change in table state
  d.deleteColumn(FAM2, regular_qualifer, ts3);
  primary.delete(d);

  // at ts1, we should have the put covered exactly by the delete and into the entire future
  expected = Collections.emptyList();
  IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts1, Long.MAX_VALUE, value1,
    value1);

  // at ts2, we should just see value3
  pairs.clear();
  pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col1));
  pairs.add(new Pair<byte[], CoveredColumn>(value3, col2));
  expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts2, pairs);
  IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts2, value1);

  // the later delete is a point delete, so we shouldn't see any change at ts3
  IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts2, ts3, value1,
    HConstants.EMPTY_END_ROW);

  // cleanup
  closeAndCleanupTables(primary, index1);
}
 
Example 18
Source File: TransactionalRegion.java    From hbase-secondary-index with GNU General Public License v3.0 4 votes vote down vote up
protected void doReconstructionLog(final Path oldCoreLogFile, final long minSeqId, final long maxSeqId,
        final Progressable reporter) throws UnsupportedEncodingException, IOException {

    Path trxPath = new Path(oldCoreLogFile.getParent(), THLog.HREGION_OLD_THLOGFILE_NAME);

    // We can ignore doing anything with the Trx Log table, it is
    // not-transactional.
    if (super.getTableDesc().getNameAsString().equals(HBaseBackedTransactionLogger.TABLE_NAME)) {
        return;
    }

    THLogRecoveryManager recoveryManager = new THLogRecoveryManager(this);
    Map<Long, WALEdit> commitedTransactionsById = recoveryManager.getCommitsFromLog(trxPath, minSeqId, reporter);

    if (commitedTransactionsById != null && commitedTransactionsById.size() > 0) {
        LOG.debug("found " + commitedTransactionsById.size() + " COMMITED transactions to recover.");

        for (Entry<Long, WALEdit> entry : commitedTransactionsById.entrySet()) {
            LOG.debug("Writing " + entry.getValue().size() + " updates for transaction " + entry.getKey());
            WALEdit b = entry.getValue();

            for (KeyValue kv : b.getKeyValues()) {
                // FIXME need to convert these into puts and deletes. Not sure this is
                // the write way.
                // Could probably combine multiple KV's into single put/delete.
                // Also timestamps?
                if (kv.getType() == KeyValue.Type.Put.getCode()) {
                    Put put = new Put();
                    put.add(kv);
                    super.put(put);
                } else if (kv.isDelete()) {
                    Delete del = new Delete(kv.getRow());
                    if (kv.isDeleteFamily()) {
                        del.deleteFamily(kv.getFamily());
                    } else if (kv.isDeleteType()) {
                        del.deleteColumn(kv.getFamily(), kv.getQualifier());
                    }
                }

            }

        }

        LOG.debug("Flushing cache"); // We must trigger a cache flush,
        // otherwise we will would ignore the log on subsequent failure
        if (!super.flushcache()) {
            LOG.warn("Did not flush cache");
        }
    }
}
 
Example 19
Source File: TransactionProcessorTest.java    From phoenix-tephra with Apache License 2.0 4 votes vote down vote up
@Test
public void testDeleteFiltering() throws Exception {
  String tableName = "TestDeleteFiltering";
  byte[] familyBytes = Bytes.toBytes("f");
  byte[] columnBytes = Bytes.toBytes("c");
  HRegion region = createRegion(tableName, familyBytes, 0);
  try {
    region.initialize();
    TransactionStateCache cache = new TransactionStateCacheSupplier(conf).get();
    LOG.info("Coprocessor is using transaction state: " + waitForTransactionState(cache));

    byte[] row = Bytes.toBytes(1);
    for (int i = 4; i < V.length; i++) {
      Put p = new Put(row);
      p.add(familyBytes, columnBytes, V[i], Bytes.toBytes(V[i]));
      region.put(p);
    }

    // delete from the third entry back
    // take that cell's timestamp + 1 to simulate a delete in a new tx
    long deleteTs = V[5] + 1;
    Delete d = new Delete(row, deleteTs);
    LOG.info("Issuing delete at timestamp " + deleteTs);
    // row deletes are not yet supported (TransactionAwareHTable normally handles this)
    d.deleteColumns(familyBytes, columnBytes);
    region.delete(d);

    List<Cell> results = Lists.newArrayList();

    // force a flush to clear the data
    // during flush, we should drop the deleted version, but not the others
    LOG.info("Flushing region " + region.getRegionInfo().getRegionNameAsString());
    region.flushcache(true, false);

    // now a normal scan should return row with versions at: V[8], V[6].
    // V[7] is invalid and V[5] and prior are deleted.
    Scan scan = new Scan();
    scan.setMaxVersions(10);
    RegionScanner regionScanner = region.getScanner(scan);
    // should be only one row
    assertFalse(regionScanner.next(results));
    assertKeyValueMatches(results, 1,
        new long[]{V[8], V[6], deleteTs},
        new byte[][]{Bytes.toBytes(V[8]), Bytes.toBytes(V[6]), new byte[0]});
  } finally {
    region.close();
  }
}
 
Example 20
Source File: PermissionStorage.java    From hbase with Apache License 2.0 4 votes vote down vote up
/**
 * Stores a new user permission grant in the access control lists table.
 * @param conf the configuration
 * @param userPerm the details of the permission to be granted
 * @param t acl table instance. It is closed upon method return.
 * @throws IOException in the case of an error accessing the metadata table
 */
public static void addUserPermission(Configuration conf, UserPermission userPerm, Table t,
    boolean mergeExistingPermissions) throws IOException {
  Permission permission = userPerm.getPermission();
  Permission.Action[] actions = permission.getActions();
  byte[] rowKey = userPermissionRowKey(permission);
  Put p = new Put(rowKey);
  byte[] key = userPermissionKey(userPerm);

  if ((actions == null) || (actions.length == 0)) {
    String msg = "No actions associated with user '" + userPerm.getUser() + "'";
    LOG.warn(msg);
    throw new IOException(msg);
  }

  Set<Permission.Action> actionSet = new TreeSet<Permission.Action>();
  if(mergeExistingPermissions){
    List<UserPermission> perms = getUserPermissions(conf, rowKey, null, null, null, false);
    UserPermission currentPerm = null;
    for (UserPermission perm : perms) {
      if (userPerm.equalsExceptActions(perm)) {
        currentPerm = perm;
        break;
      }
    }

    if (currentPerm != null && currentPerm.getPermission().getActions() != null){
      actionSet.addAll(Arrays.asList(currentPerm.getPermission().getActions()));
    }
  }

  // merge current action with new action.
  actionSet.addAll(Arrays.asList(actions));

  // serialize to byte array.
  byte[] value = new byte[actionSet.size()];
  int index = 0;
  for (Permission.Action action : actionSet) {
    value[index++] = action.code();
  }
  p.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY)
      .setRow(p.getRow())
      .setFamily(ACL_LIST_FAMILY)
      .setQualifier(key)
      .setTimestamp(p.getTimestamp())
      .setType(Type.Put)
      .setValue(value)
      .build());
  if (LOG.isDebugEnabled()) {
    LOG.debug("Writing permission with rowKey " + Bytes.toString(rowKey) + " "
        + Bytes.toString(key) + ": " + Bytes.toStringBinary(value));
  }
  try {
    t.put(p);
  } finally {
    t.close();
  }
}