Java Code Examples for org.apache.hadoop.hbase.client.Put.add()

The following are Jave code examples for showing how to use add() of the org.apache.hadoop.hbase.client.Put class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
Example 1
Project: ditb   File: PerformanceEvaluation.java   Source Code and License Vote up 6 votes
@Override
void testRow(final int i) throws IOException {
  byte[] row = format(i);
  Put put = new Put(row);
  for (int column = 0; column < opts.columns; column++) {
    byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column);
    byte[] value = generateData(this.rand, getValueLength(this.rand));
    if (opts.useTags) {
      byte[] tag = generateData(this.rand, TAG_LENGTH);
      Tag[] tags = new Tag[opts.noOfTags];
      for (int n = 0; n < opts.noOfTags; n++) {
        Tag t = new Tag((byte) n, tag);
        tags[n] = t;
      }
      KeyValue kv = new KeyValue(row, FAMILY_NAME, qualifier, HConstants.LATEST_TIMESTAMP,
          value, tags);
      put.add(kv);
      updateValueSize(kv.getValueLength());
    } else {
      put.add(FAMILY_NAME, qualifier, value);
      updateValueSize(value.length);
    }
  }
  put.setDurability(opts.writeToWAL ? Durability.SYNC_WAL : Durability.SKIP_WAL);
  mutator.mutate(put);
}
 
Example 2
Project: ditb   File: TestZooKeeperTableArchiveClient.java   Source Code and License Vote up 5 votes
/**
 * Create a new hfile in the passed region
 * @param region region to operate on
 * @param columnFamily family for which to add data
 * @throws IOException
 */
private void createHFileInRegion(Region region, byte[] columnFamily) throws IOException {
  // put one row in the region
  Put p = new Put(Bytes.toBytes("row"));
  p.add(columnFamily, Bytes.toBytes("Qual"), Bytes.toBytes("v1"));
  region.put(p);
  // flush the region to make a store file
  region.flush(true);
}
 
Example 3
Project: ditb   File: TestEncodedSeekers.java   Source Code and License Vote up 5 votes
private void doPuts(Region region) throws IOException{
  LoadTestKVGenerator dataGenerator = new LoadTestKVGenerator(MIN_VALUE_SIZE, MAX_VALUE_SIZE);
   for (int i = 0; i < NUM_ROWS; ++i) {
    byte[] key = LoadTestKVGenerator.md5PrefixedKey(i).getBytes();
    for (int j = 0; j < NUM_COLS_PER_ROW; ++j) {
      Put put = new Put(key);
      put.setDurability(Durability.ASYNC_WAL);
      byte[] col = Bytes.toBytes(String.valueOf(j));
      byte[] value = dataGenerator.generateRandomSizeValue(key, col);
      if (includeTags) {
        Tag[] tag = new Tag[1];
        tag[0] = new Tag((byte) 1, "Visibility");
        KeyValue kv = new KeyValue(key, CF_BYTES, col, HConstants.LATEST_TIMESTAMP, value, tag);
        put.add(kv);
      } else {
        put.add(CF_BYTES, col, value);
      }
      if(VERBOSE){
        KeyValue kvPut = new KeyValue(key, CF_BYTES, col, value);
        System.err.println(Strings.padFront(i+"", ' ', 4)+" "+kvPut);
      }
      region.put(put);
    }
    if (i % NUM_ROWS_PER_FLUSH == 0) {
      region.flush(true);
    }
  }
}
 
Example 4
Project: ditb   File: TestRegionObserverInterface.java   Source Code and License Vote up 5 votes
@Test (timeout=300000)
public void testRowMutation() throws IOException {
  TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testRowMutation");
  Table table = util.createTable(tableName, new byte[][] {A, B, C});
  try {
    verifyMethodResult(SimpleRegionObserver.class,
      new String[] {"hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut",
          "hadDeleted"},
      tableName,
      new Boolean[] {false, false, false, false, false});
    Put put = new Put(ROW);
    put.add(A, A, A);
    put.add(B, B, B);
    put.add(C, C, C);

    Delete delete = new Delete(ROW);
    delete.deleteColumn(A, A);
    delete.deleteColumn(B, B);
    delete.deleteColumn(C, C);

    RowMutations arm = new RowMutations(ROW);
    arm.add(put);
    arm.add(delete);
    table.mutateRow(arm);

    verifyMethodResult(SimpleRegionObserver.class,
        new String[] {"hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut",
    "hadDeleted"},
    tableName,
    new Boolean[] {false, false, true, true, true}
        );
  } finally {
    util.deleteTable(tableName);
    table.close();
  }
}
 
Example 5
Project: ditb   File: TestHRegion.java   Source Code and License Vote up 5 votes
private void putRows(HRegion r, int numRows, String value, String key) throws IOException {
  for (int i = 0; i < numRows; i++) {
    String row = key + "_" + i/* UUID.randomUUID().toString() */;
    System.out.println(String.format("Saving row: %s, with value %s", row, value));
    Put put = new Put(Bytes.toBytes(row));
    put.setDurability(Durability.SKIP_WAL);
    put.add(Bytes.toBytes("trans-blob"), null, Bytes.toBytes("value for blob"));
    put.add(Bytes.toBytes("trans-type"), null, Bytes.toBytes("statement"));
    put.add(Bytes.toBytes("trans-date"), null, Bytes.toBytes("20090921010101999"));
    put.add(Bytes.toBytes("trans-tags"), Bytes.toBytes("qual2"), Bytes.toBytes(value));
    put.add(Bytes.toBytes("trans-group"), null, Bytes.toBytes("adhocTransactionGroupId"));
    r.put(put);
  }
}
 
Example 6
Project: ditb   File: TestBigDecimalColumnInterpreter.java   Source Code and License Vote up 5 votes
/**
 * A set up method to start the test cluster. AggregateProtocolImpl is registered and will be
 * loaded during region startup.
 * @throws Exception
 */
@BeforeClass
public static void setupBeforeClass() throws Exception {

  conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
    "org.apache.hadoop.hbase.coprocessor.AggregateImplementation");

  util.startMiniCluster(2);
  final byte[][] SPLIT_KEYS = new byte[][] { ROWS[rowSeperator1], ROWS[rowSeperator2] };
  HTable table = util.createTable(TEST_TABLE, TEST_FAMILY, SPLIT_KEYS);
  /**
   * The testtable has one CQ which is always populated and one variable CQ for each row rowkey1:
   * CF:CQ CF:CQ1 rowKey2: CF:CQ CF:CQ2
   */
  for (int i = 0; i < ROWSIZE; i++) {
    Put put = new Put(ROWS[i]);
    put.setDurability(Durability.SKIP_WAL);
    BigDecimal bd = new BigDecimal(i);
    put.add(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes(bd));
    table.put(put);
    Put p2 = new Put(ROWS[i]);
    put.setDurability(Durability.SKIP_WAL);
    p2.add(TEST_FAMILY, Bytes.add(TEST_MULTI_CQ, Bytes.toBytes(bd)),
      Bytes.toBytes(bd.multiply(new BigDecimal("0.10"))));
    table.put(p2);
  }
  table.close();
}
 
Example 7
Project: ditb   File: TestFuzzyRowAndColumnRangeFilter.java   Source Code and License Vote up 5 votes
@Test
public void Test() throws Exception {
  String cf = "f";
  String table = "TestFuzzyAndColumnRangeFilterClient";
  Table ht = TEST_UTIL.createTable(TableName.valueOf(table),
          Bytes.toBytes(cf), Integer.MAX_VALUE);

  // 10 byte row key - (2 bytes 4 bytes 4 bytes)
  // 4 byte qualifier
  // 4 byte value

  for (int i1 = 0; i1 < 2; i1++) {
    for (int i2 = 0; i2 < 5; i2++) {
      byte[] rk = new byte[10];

      ByteBuffer buf = ByteBuffer.wrap(rk);
      buf.clear();
      buf.putShort((short) 2);
      buf.putInt(i1);
      buf.putInt(i2);

      for (int c = 0; c < 5; c++) {
        byte[] cq = new byte[4];
        Bytes.putBytes(cq, 0, Bytes.toBytes(c), 0, 4);

        Put p = new Put(rk);
        p.setDurability(Durability.SKIP_WAL);
        p.add(cf.getBytes(), cq, Bytes.toBytes(c));
        ht.put(p);
        LOG.info("Inserting: rk: " + Bytes.toStringBinary(rk) + " cq: "
                + Bytes.toStringBinary(cq));
      }
    }
  }

  TEST_UTIL.flush();

  // test passes
  runTest(ht, 0, 10);

  // test fails
  runTest(ht, 1, 8);
}
 
Example 8
Project: ditb   File: TestRegionObserverStacking.java   Source Code and License Vote up 5 votes
public void testRegionObserverStacking() throws Exception {
  byte[] ROW = Bytes.toBytes("testRow");
  byte[] TABLE = Bytes.toBytes(this.getClass().getSimpleName());
  byte[] A = Bytes.toBytes("A");
  byte[][] FAMILIES = new byte[][] { A } ;

  Configuration conf = HBaseConfiguration.create();
  HRegion region = initHRegion(TABLE, getClass().getName(),
    conf, FAMILIES);
  RegionCoprocessorHost h = region.getCoprocessorHost();
  h.load(ObserverA.class, Coprocessor.PRIORITY_HIGHEST, conf);
  h.load(ObserverB.class, Coprocessor.PRIORITY_USER, conf);
  h.load(ObserverC.class, Coprocessor.PRIORITY_LOWEST, conf);

  Put put = new Put(ROW);
  put.add(A, A, A);
  region.put(put);

  Coprocessor c = h.findCoprocessor(ObserverA.class.getName());
  long idA = ((ObserverA)c).id;
  c = h.findCoprocessor(ObserverB.class.getName());
  long idB = ((ObserverB)c).id;
  c = h.findCoprocessor(ObserverC.class.getName());
  long idC = ((ObserverC)c).id;

  assertTrue(idA < idB);
  assertTrue(idB < idC);
}
 
Example 9
Project: ditb   File: TestForceCacheImportantBlocks.java   Source Code and License Vote up 5 votes
private void writeTestData(Region region) throws IOException {
  for (int i = 0; i < NUM_ROWS; ++i) {
    Put put = new Put(Bytes.toBytes("row" + i));
    for (int j = 0; j < NUM_COLS_PER_ROW; ++j) {
      for (long ts = 1; ts < NUM_TIMESTAMPS_PER_COL; ++ts) {
        put.add(CF_BYTES, Bytes.toBytes("col" + j), ts,
            Bytes.toBytes("value" + i + "_" + j + "_" + ts));
      }
    }
    region.put(put);
    if ((i + 1) % ROWS_PER_HFILE == 0) {
      region.flush(true);
    }
  }
}
 
Example 10
Project: ditb   File: TestVisibilityLabels.java   Source Code and License Vote up 5 votes
@Test
public void testFlushedFileWithVisibilityTags() throws Exception {
  final byte[] qual2 = Bytes.toBytes("qual2");
  TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
  HTableDescriptor desc = new HTableDescriptor(tableName);
  HColumnDescriptor col = new HColumnDescriptor(fam);
  desc.addFamily(col);
  TEST_UTIL.getHBaseAdmin().createTable(desc);
  try (Table table = TEST_UTIL.getConnection().getTable(tableName)) {
    Put p1 = new Put(row1);
    p1.add(fam, qual, value);
    p1.setCellVisibility(new CellVisibility(CONFIDENTIAL));

    Put p2 = new Put(row1);
    p2.add(fam, qual2, value);
    p2.setCellVisibility(new CellVisibility(SECRET));

    RowMutations rm = new RowMutations(row1);
    rm.add(p1);
    rm.add(p2);

    table.mutateRow(rm);
  }
  TEST_UTIL.getHBaseAdmin().flush(tableName);
  List<HRegion> regions = TEST_UTIL.getHBaseCluster().getRegions(tableName);
  Store store = regions.get(0).getStore(fam);
  Collection<StoreFile> storefiles = store.getStorefiles();
  assertTrue(storefiles.size() > 0);
  for (StoreFile storeFile : storefiles) {
    assertTrue(storeFile.getReader().getHFileReader().getFileContext().isIncludesTags());
  }
}
 
Example 11
Project: ditb   File: TestBatchCoprocessorEndpoint.java   Source Code and License Vote up 5 votes
@BeforeClass
public static void setupBeforeClass() throws Exception {
  // set configure to indicate which cp should be loaded
  Configuration conf = util.getConfiguration();
  conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
      org.apache.hadoop.hbase.coprocessor.ColumnAggregationEndpoint.class.getName(),
      ProtobufCoprocessorService.class.getName(),
      ColumnAggregationEndpointWithErrors.class.getName(),
      ColumnAggregationEndpointNullResponse.class.getName());
  conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY,
      ProtobufCoprocessorService.class.getName());
  util.startMiniCluster(2);
  Admin admin = new HBaseAdmin(conf);
  HTableDescriptor desc = new HTableDescriptor(TEST_TABLE);
  desc.addFamily(new HColumnDescriptor(TEST_FAMILY));
  admin.createTable(desc, new byte[][]{ROWS[rowSeperator1], ROWS[rowSeperator2]});
  util.waitUntilAllRegionsAssigned(TEST_TABLE);
  admin.close();

  Table table = new HTable(conf, TEST_TABLE);
  for (int i = 0; i < ROWSIZE; i++) {
    Put put = new Put(ROWS[i]);
    put.add(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes(i));
    table.put(put);
  }
  table.close();
}
 
Example 12
Project: QDrill   File: TestTableGenerator.java   Source Code and License Vote up 5 votes
public static void generateHBaseDatasetIntOB(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(FAMILY_F));

  if (numberRegions > 1) {
    admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
  } else {
    admin.createTable(desc);
  }

  HTable table = new HTable(admin.getConfiguration(), tableName);

  for (int i = -49; i <= 100; i ++) {
    byte[] bytes = new byte[5];
    org.apache.hadoop.hbase.util.PositionedByteRange br =
            new org.apache.hadoop.hbase.util.SimplePositionedByteRange(bytes, 0, 5);
    org.apache.hadoop.hbase.util.OrderedBytes.encodeInt32(br, i,
            org.apache.hadoop.hbase.util.Order.ASCENDING);
    Put p = new Put(bytes);
    p.add(FAMILY_F, COLUMN_C, String.format("value %d", i).getBytes());
    table.put(p);
  }

  table.flushCommits();
  table.close();

  admin.flush(tableName);
}
 
Example 13
Project: QDrill   File: TestTableGenerator.java   Source Code and License Vote up 5 votes
public static void generateHBaseDatasetDoubleOBDesc(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(FAMILY_F));

  if (numberRegions > 1) {
    admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
  } else {
    admin.createTable(desc);
  }

  HTable table = new HTable(admin.getConfiguration(), tableName);

  for (double i = 0.5; i <= 100.00; i += 0.75) {
      byte[] bytes = new byte[9];
      org.apache.hadoop.hbase.util.PositionedByteRange br =
              new org.apache.hadoop.hbase.util.SimplePositionedByteRange(bytes, 0, 9);
      org.apache.hadoop.hbase.util.OrderedBytes.encodeFloat64(br, i,
              org.apache.hadoop.hbase.util.Order.DESCENDING);
    Put p = new Put(bytes);
    p.add(FAMILY_F, COLUMN_C, String.format("value %03f", i).getBytes());
    table.put(p);
  }

  table.flushCommits();
  table.close();

  admin.flush(tableName);
}
 
Example 14
Project: ditb   File: TestRegionMergeTransactionOnCluster.java   Source Code and License Vote up 5 votes
private void loadData(Table table) throws IOException {
  for (int i = 0; i < ROWSIZE; i++) {
    Put put = new Put(ROWS[i]);
    put.add(FAMILYNAME, QUALIFIER, Bytes.toBytes(i));
    table.put(put);
  }
}
 
Example 15
Project: ditb   File: TestMultipleColumnPrefixFilter.java   Source Code and License Vote up 4 votes
@Test
public void testMultipleColumnPrefixFilterWithManyFamilies() throws IOException {
  String family1 = "Family1";
  String family2 = "Family2";
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("TestMultipleColumnPrefixFilter"));
  HColumnDescriptor hcd1 = new HColumnDescriptor(family1);
  hcd1.setMaxVersions(3);
  htd.addFamily(hcd1);
  HColumnDescriptor hcd2 = new HColumnDescriptor(family2);
  hcd2.setMaxVersions(3);
  htd.addFamily(hcd2);
  HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
  HRegion region = HRegion.createHRegion(info, TEST_UTIL.
    getDataTestDir(), TEST_UTIL.getConfiguration(), htd);

  List<String> rows = generateRandomWords(100, "row");
  List<String> columns = generateRandomWords(10000, "column");
  long maxTimestamp = 3;

  List<Cell> kvList = new ArrayList<Cell>();

  Map<String, List<Cell>> prefixMap = new HashMap<String,
      List<Cell>>();

  prefixMap.put("p", new ArrayList<Cell>());
  prefixMap.put("q", new ArrayList<Cell>());
  prefixMap.put("s", new ArrayList<Cell>());

  String valueString = "ValueString";

  for (String row: rows) {
    Put p = new Put(Bytes.toBytes(row));
    p.setDurability(Durability.SKIP_WAL);
    for (String column: columns) {
      for (long timestamp = 1; timestamp <= maxTimestamp; timestamp++) {
        double rand = Math.random();
        Cell kv;
        if (rand < 0.5) 
          kv = KeyValueTestUtil.create(row, family1, column, timestamp,
              valueString);
        else 
          kv = KeyValueTestUtil.create(row, family2, column, timestamp,
              valueString);
        p.add(kv);
        kvList.add(kv);
        for (String s: prefixMap.keySet()) {
          if (column.startsWith(s)) {
            prefixMap.get(s).add(kv);
          }
        }
      }
    }
    region.put(p);
  }

  MultipleColumnPrefixFilter filter;
  Scan scan = new Scan();
  scan.setMaxVersions();
  byte [][] filter_prefix = new byte [2][];
  filter_prefix[0] = new byte [] {'p'};
  filter_prefix[1] = new byte [] {'q'};
  
  filter = new MultipleColumnPrefixFilter(filter_prefix);
  scan.setFilter(filter);
  List<Cell> results = new ArrayList<Cell>();  
  InternalScanner scanner = region.getScanner(scan);
  while (scanner.next(results))
    ;
  assertEquals(prefixMap.get("p").size() + prefixMap.get("q").size(), results.size());

  HRegion.closeHRegion(region);
}
 
Example 16
Project: ditb   File: TestHFileOutputFormat2.java   Source Code and License Vote up 4 votes
@Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test
public void testExcludeMinorCompaction() throws Exception {
  Configuration conf = util.getConfiguration();
  conf.setInt("hbase.hstore.compaction.min", 2);
  generateRandomStartKeys(5);
  util.setJobWithoutMRCluster();
  util.startMiniCluster();
  try (Connection conn = ConnectionFactory.createConnection(conf);
      Admin admin = conn.getAdmin()){
    Path testDir = util.getDataTestDirOnTestFS("testExcludeMinorCompaction");
    final FileSystem fs = util.getDFSCluster().getFileSystem();
    Table table = util.createTable(TABLE_NAME, FAMILIES);
    assertEquals("Should start with empty table", 0, util.countRows(table));

    // deep inspection: get the StoreFile dir
    final Path storePath = new Path(
      FSUtils.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME),
        new Path(admin.getTableRegions(TABLE_NAME).get(0).getEncodedName(),
          Bytes.toString(FAMILIES[0])));
    assertEquals(0, fs.listStatus(storePath).length);

    // put some data in it and flush to create a storefile
    Put p = new Put(Bytes.toBytes("test"));
    p.add(FAMILIES[0], Bytes.toBytes("1"), Bytes.toBytes("1"));
    table.put(p);
    admin.flush(TABLE_NAME);
    assertEquals(1, util.countRows(table));
    quickPoll(new Callable<Boolean>() {
      @Override
      public Boolean call() throws Exception {
        return fs.listStatus(storePath).length == 1;
      }
    }, 5000);

    // Generate a bulk load file with more rows
    conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude",
        true);

    RegionLocator regionLocator = conn.getRegionLocator(TABLE_NAME);
    runIncrementalPELoad(conf, table.getTableDescriptor(), regionLocator, testDir);

    // Perform the actual load
    new LoadIncrementalHFiles(conf).doBulkLoad(testDir, admin, table, regionLocator);

    // Ensure data shows up
    int expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
    assertEquals("LoadIncrementalHFiles should put expected data in table",
        expectedRows + 1, util.countRows(table));

    // should have a second StoreFile now
    assertEquals(2, fs.listStatus(storePath).length);

    // minor compactions shouldn't get rid of the file
    admin.compact(TABLE_NAME);
    try {
      quickPoll(new Callable<Boolean>() {
        @Override
        public Boolean call() throws Exception {
          return fs.listStatus(storePath).length == 1;
        }
      }, 5000);
      throw new IOException("SF# = " + fs.listStatus(storePath).length);
    } catch (AssertionError ae) {
      // this is expected behavior
    }

    // a major compaction should work though
    admin.majorCompact(TABLE_NAME);
    quickPoll(new Callable<Boolean>() {
      @Override
      public Boolean call() throws Exception {
        return fs.listStatus(storePath).length == 1;
      }
    }, 5000);

  } finally {
    util.shutdownMiniCluster();
  }
}
 
Example 17
Project: ditb   File: TestAsyncIPC.java   Source Code and License Vote up 4 votes
public static void main(String[] args) throws IOException, SecurityException,
    NoSuchMethodException, InterruptedException {
  if (args.length != 2) {
    System.out.println("Usage: TestAsyncIPC <CYCLES> <CELLS_PER_CYCLE>");
    return;
  }
  // ((Log4JLogger)HBaseServer.LOG).getLogger().setLevel(Level.INFO);
  // ((Log4JLogger)HBaseClient.LOG).getLogger().setLevel(Level.INFO);
  int cycles = Integer.parseInt(args[0]);
  int cellcount = Integer.parseInt(args[1]);
  Configuration conf = HBaseConfiguration.create();
  TestRpcServer rpcServer = new TestRpcServer();
  MethodDescriptor md = SERVICE.getDescriptorForType().findMethodByName("echo");
  EchoRequestProto param = EchoRequestProto.newBuilder().setMessage("hello").build();
  AsyncRpcClient client = new AsyncRpcClient(conf);
  KeyValue kv = BIG_CELL;
  Put p = new Put(CellUtil.cloneRow(kv));
  for (int i = 0; i < cellcount; i++) {
    p.add(kv);
  }
  RowMutations rm = new RowMutations(CellUtil.cloneRow(kv));
  rm.add(p);
  try {
    rpcServer.start();
    InetSocketAddress address = rpcServer.getListenerAddress();
    if (address == null) {
      throw new IOException("Listener channel is closed");
    }
    long startTime = System.currentTimeMillis();
    User user = User.getCurrent();
    for (int i = 0; i < cycles; i++) {
      List<CellScannable> cells = new ArrayList<CellScannable>();
      // Message param = RequestConverter.buildMultiRequest(HConstants.EMPTY_BYTE_ARRAY, rm);
      ClientProtos.RegionAction.Builder builder =
          RequestConverter.buildNoDataRegionAction(HConstants.EMPTY_BYTE_ARRAY, rm, cells,
            RegionAction.newBuilder(), ClientProtos.Action.newBuilder(),
            MutationProto.newBuilder());
      builder.setRegion(RegionSpecifier
          .newBuilder()
          .setType(RegionSpecifierType.REGION_NAME)
          .setValue(
            ByteString.copyFrom(HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes())));
      if (i % 100000 == 0) {
        LOG.info("" + i);
        // Uncomment this for a thread dump every so often.
        // ReflectionUtils.printThreadInfo(new PrintWriter(System.out),
        // "Thread dump " + Thread.currentThread().getName());
      }
      PayloadCarryingRpcController pcrc =
          new PayloadCarryingRpcController(CellUtil.createCellScanner(cells));
      // Pair<Message, CellScanner> response =
      client.call(pcrc, md, builder.build(), param, user, address,
          new MetricsConnection.CallStats());
      /*
       * int count = 0; while (p.getSecond().advance()) { count++; } assertEquals(cells.size(),
       * count);
       */
    }
    LOG.info("Cycled " + cycles + " time(s) with " + cellcount + " cell(s) in "
        + (System.currentTimeMillis() - startTime) + "ms");
  } finally {
    client.close();
    rpcServer.stop();
  }
}
 
Example 18
Project: ditb   File: TestSplitTransactionOnCluster.java   Source Code and License Vote up 4 votes
@Test (timeout=300000)
public void testSSHCleanupDaugtherRegionsOfAbortedSplit() throws Exception {
  TableName table = TableName.valueOf("testSSHCleanupDaugtherRegionsOfAbortedSplit");
  try {
    HTableDescriptor desc = new HTableDescriptor(table);
    desc.addFamily(new HColumnDescriptor(Bytes.toBytes("f")));
    admin.createTable(desc);
    HTable hTable = new HTable(cluster.getConfiguration(), desc.getTableName());
    for(int i = 1; i < 5; i++) {
      Put p1 = new Put(("r"+i).getBytes());
      p1.add(Bytes.toBytes("f"), "q1".getBytes(), "v".getBytes());
      hTable.put(p1);
    }
    admin.flush(desc.getTableName());
    List<HRegion> regions = cluster.getRegions(desc.getTableName());
    int serverWith = cluster.getServerWith(regions.get(0).getRegionInfo().getRegionName());
    HRegionServer regionServer = cluster.getRegionServer(serverWith);
    cluster.getServerWith(regions.get(0).getRegionInfo().getRegionName());
    SplitTransactionImpl st = new SplitTransactionImpl(regions.get(0), Bytes.toBytes("r3"));
    st.prepare();
    st.stepsBeforePONR(regionServer, regionServer, false);
    Path tableDir =
        FSUtils.getTableDir(cluster.getMaster().getMasterFileSystem().getRootDir(),
          desc.getTableName());
    tableDir.getFileSystem(cluster.getConfiguration());
    List<Path> regionDirs =
        FSUtils.getRegionDirs(tableDir.getFileSystem(cluster.getConfiguration()), tableDir);
    assertEquals(3,regionDirs.size());
    cluster.startRegionServer();
    regionServer.kill();
    cluster.getRegionServerThreads().get(serverWith).join();
    // Wait until finish processing of shutdown
    while (cluster.getMaster().getServerManager().areDeadServersInProgress()) {
      Thread.sleep(10);
    }
    AssignmentManager am = cluster.getMaster().getAssignmentManager();
    while(am.getRegionStates().isRegionsInTransition()) {
      Thread.sleep(10);
    }
    assertEquals(am.getRegionStates().getRegionsInTransition().toString(), 0, am
        .getRegionStates().getRegionsInTransition().size());
    regionDirs =
        FSUtils.getRegionDirs(tableDir.getFileSystem(cluster.getConfiguration()), tableDir);
    assertEquals(1,regionDirs.size());
  } finally {
    TESTING_UTIL.deleteTable(table);
  }
}
 
Example 19
Project: ditb   File: TestHRegion.java   Source Code and License Vote up 4 votes
@Test
public void testgetHDFSBlocksDistribution() throws Exception {
  HBaseTestingUtility htu = new HBaseTestingUtility();
  // Why do we set the block size in this test?  If we set it smaller than the kvs, then we'll
  // break up the file in to more pieces that can be distributed across the three nodes and we
  // won't be able to have the condition this test asserts; that at least one node has
  // a copy of all replicas -- if small block size, then blocks are spread evenly across the
  // the three nodes.  hfilev3 with tags seems to put us over the block size.  St.Ack.
  // final int DEFAULT_BLOCK_SIZE = 1024;
  // htu.getConfiguration().setLong("dfs.blocksize", DEFAULT_BLOCK_SIZE);
  htu.getConfiguration().setInt("dfs.replication", 2);

  // set up a cluster with 3 nodes
  MiniHBaseCluster cluster = null;
  String dataNodeHosts[] = new String[] { "host1", "host2", "host3" };
  int regionServersCount = 3;

  try {
    cluster = htu.startMiniCluster(1, regionServersCount, dataNodeHosts);
    byte[][] families = { fam1, fam2 };
    Table ht = htu.createTable(Bytes.toBytes(this.getName()), families);

    // Setting up region
    byte row[] = Bytes.toBytes("row1");
    byte col[] = Bytes.toBytes("col1");

    Put put = new Put(row);
    put.add(fam1, col, 1, Bytes.toBytes("test1"));
    put.add(fam2, col, 1, Bytes.toBytes("test2"));
    ht.put(put);

    HRegion firstRegion = htu.getHBaseCluster().getRegions(TableName.valueOf(this.getName()))
        .get(0);
    firstRegion.flush(true);
    HDFSBlocksDistribution blocksDistribution1 = firstRegion.getHDFSBlocksDistribution();

    // Given the default replication factor is 2 and we have 2 HFiles,
    // we will have total of 4 replica of blocks on 3 datanodes; thus there
    // must be at least one host that have replica for 2 HFiles. That host's
    // weight will be equal to the unique block weight.
    long uniqueBlocksWeight1 = blocksDistribution1.getUniqueBlocksTotalWeight();
    StringBuilder sb = new StringBuilder();
    for (String host: blocksDistribution1.getTopHosts()) {
      if (sb.length() > 0) sb.append(", ");
      sb.append(host);
      sb.append("=");
      sb.append(blocksDistribution1.getWeight(host));
    }

    String topHost = blocksDistribution1.getTopHosts().get(0);
    long topHostWeight = blocksDistribution1.getWeight(topHost);
    String msg = "uniqueBlocksWeight=" + uniqueBlocksWeight1 + ", topHostWeight=" +
      topHostWeight + ", topHost=" + topHost + "; " + sb.toString();
    LOG.info(msg);
    assertTrue(msg, uniqueBlocksWeight1 == topHostWeight);

    // use the static method to compute the value, it should be the same.
    // static method is used by load balancer or other components
    HDFSBlocksDistribution blocksDistribution2 = HRegion.computeHDFSBlocksDistribution(
        htu.getConfiguration(), firstRegion.getTableDesc(), firstRegion.getRegionInfo());
    long uniqueBlocksWeight2 = blocksDistribution2.getUniqueBlocksTotalWeight();

    assertTrue(uniqueBlocksWeight1 == uniqueBlocksWeight2);

    ht.close();
  } finally {
    if (cluster != null) {
      htu.shutdownMiniCluster();
    }
  }
}
 
Example 20
Project: ditb   File: TestHFileOutputFormat.java   Source Code and License Vote up 4 votes
@Test
public void testExcludeMinorCompaction() throws Exception {
  Configuration conf = util.getConfiguration();
  conf.setInt("hbase.hstore.compaction.min", 2);
  generateRandomStartKeys(5);

  try {
    util.setJobWithoutMRCluster();
    util.startMiniCluster();
    Path testDir = util.getDataTestDirOnTestFS("testExcludeMinorCompaction");
    final FileSystem fs = util.getTestFileSystem();
    HBaseAdmin admin = new HBaseAdmin(conf);
    HTable table = util.createTable(TABLE_NAME, FAMILIES);
    assertEquals("Should start with empty table", 0, util.countRows(table));

    // deep inspection: get the StoreFile dir
    final Path storePath = HStore.getStoreHomedir(
        FSUtils.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME),
        admin.getTableRegions(TABLE_NAME).get(0),
        FAMILIES[0]);
    assertEquals(0, fs.listStatus(storePath).length);

    // put some data in it and flush to create a storefile
    Put p = new Put(Bytes.toBytes("test"));
    p.add(FAMILIES[0], Bytes.toBytes("1"), Bytes.toBytes("1"));
    table.put(p);
    admin.flush(TABLE_NAME.getName());
    assertEquals(1, util.countRows(table));
    quickPoll(new Callable<Boolean>() {
      public Boolean call() throws Exception {
        return fs.listStatus(storePath).length == 1;
      }
    }, 5000);

    // Generate a bulk load file with more rows
    conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude",
        true);
    runIncrementalPELoad(conf, table, testDir);

    // Perform the actual load
    new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table);

    // Ensure data shows up
    int expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
    assertEquals("LoadIncrementalHFiles should put expected data in table",
        expectedRows + 1, util.countRows(table));

    // should have a second StoreFile now
    assertEquals(2, fs.listStatus(storePath).length);

    // minor compactions shouldn't get rid of the file
    admin.compact(TABLE_NAME.getName());
    try {
      quickPoll(new Callable<Boolean>() {
        public Boolean call() throws Exception {
          return fs.listStatus(storePath).length == 1;
        }
      }, 5000);
      throw new IOException("SF# = " + fs.listStatus(storePath).length);
    } catch (AssertionError ae) {
      // this is expected behavior
    }

    // a major compaction should work though
    admin.majorCompact(TABLE_NAME.getName());
    quickPoll(new Callable<Boolean>() {
      public Boolean call() throws Exception {
        return fs.listStatus(storePath).length == 1;
      }
    }, 5000);

  } finally {
    util.shutdownMiniCluster();
  }
}