Java Code Examples for org.apache.hadoop.hbase.client.Admin#split()

The following examples show how to use org.apache.hadoop.hbase.client.Admin#split() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SplitAllRegionOfTableAction.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Override
public void perform() throws Exception {
  HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
  Admin admin = util.getAdmin();
  // Don't try the split if we're stopping
  if (context.isStopping()) {
    return;
  }


  // Don't always split. This should allow splitting of a full table later in the run
  if (ThreadLocalRandom.current().nextDouble()
      < (((double) splits) / ((double) maxFullTableSplits)) / ((double) 2)) {
    splits++;
    getLogger().info("Performing action: Split all regions of {}", tableName);
    admin.split(tableName);
  } else {
    getLogger().info("Skipping split of all regions.");
  }
}
 
Example 2
Source File: TableSnapshotReadsMapReduceIT.java    From phoenix with Apache License 2.0 5 votes vote down vote up
private void splitTableSync(Admin admin, TableName hbaseTableName,
                            byte[] splitPoint , int expectedRegions) throws IOException, InterruptedException {
  admin.split(hbaseTableName, splitPoint);
  for (int i = 0; i < 100; i++) {
    List<HRegionInfo> hRegionInfoList = admin.getTableRegions(hbaseTableName);
    if (hRegionInfoList.size() >= expectedRegions) {
      break;
    }
    LOGGER.info("Sleeping for 1000 ms while waiting for "
            + hbaseTableName.getNameAsString() + " to split");
    Thread.sleep(1000);
  }
}
 
Example 3
Source File: UpsertSelectAutoCommitIT.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Test
public void testUpsertSelectDoesntSeeUpsertedData() throws Exception {
    Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
    props.setProperty(QueryServices.MUTATE_BATCH_SIZE_BYTES_ATTRIB, Integer.toString(512));
    props.setProperty(QueryServices.SCAN_CACHE_SIZE_ATTRIB, Integer.toString(3));
    props.setProperty(QueryServices.SCAN_RESULT_CHUNK_SIZE, Integer.toString(3));
    props.setProperty(QueryServices.ENABLE_SERVER_SIDE_UPSERT_MUTATIONS,
        allowServerSideMutations);
    Connection conn = DriverManager.getConnection(getUrl(), props);
    conn.setAutoCommit(true);
    String tableName = generateUniqueName();
    conn.createStatement().execute("CREATE SEQUENCE "+ tableName + "_seq CACHE 1000");
    conn.createStatement().execute("CREATE TABLE " + tableName
            + " (pk INTEGER PRIMARY KEY, val INTEGER) UPDATE_CACHE_FREQUENCY=3600000");

    conn.createStatement().execute(
        "UPSERT INTO " + tableName + " VALUES (NEXT VALUE FOR "+ tableName + "_seq, 1)");
    PreparedStatement stmt =
            conn.prepareStatement("UPSERT INTO " + tableName
                    + " SELECT NEXT VALUE FOR "+ tableName + "_seq, val FROM " + tableName);
    Admin admin =
            driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
    for (int i=0; i<12; i++) {
        try {
            admin.split(TableName.valueOf(tableName));
        } catch (IOException ignore) {
            // we don't care if the split sometime cannot be executed
        }
        int upsertCount = stmt.executeUpdate();
        assertEquals((int)Math.pow(2, i), upsertCount);
    }
    admin.close();
    conn.close();
}
 
Example 4
Source File: RoundRobinResultIteratorIT.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Test
public void testRoundRobinAfterTableSplit() throws Exception {
    String tableName = generateUniqueName();
    byte[] tableNameBytes = Bytes.toBytes(tableName);
    int numRows = setupTableForSplit(tableName);
    Connection conn = getConnection();
    ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices();
    int nRegions = services.getAllTableRegions(tableNameBytes).size();
    int nRegionsBeforeSplit = nRegions;
    Admin admin = services.getAdmin();
    try {
        // Split is an async operation. So hoping 10 seconds is long enough time.
        // If the test tends to flap, then you might want to increase the wait time
        admin.split(TableName.valueOf(tableName));
        CountDownLatch latch = new CountDownLatch(1);
        int nTries = 0;
        long waitTimeMillis = 2000;
        while (nRegions == nRegionsBeforeSplit && nTries < 10) {
            latch.await(waitTimeMillis, TimeUnit.MILLISECONDS);
            nRegions = services.getAllTableRegions(tableNameBytes).size();
            nTries++;
        }
        
        String query = "SELECT * FROM " + tableName;
        Statement stmt = conn.createStatement();
        stmt.setFetchSize(10); // this makes scanner caches to be replenished in parallel.
        ResultSet rs = stmt.executeQuery(query);
        int numRowsRead = 0;
        while (rs.next()) {
            numRowsRead++;
        }
        nRegions = services.getAllTableRegions(tableNameBytes).size();
        // Region cache has been updated, as there are more regions now
        assertNotEquals(nRegions, nRegionsBeforeSplit);
        assertEquals(numRows, numRowsRead);
    } finally {
        admin.close();
    }

}
 
Example 5
Source File: BaseTest.java    From phoenix with Apache License 2.0 5 votes vote down vote up
/**
 *  Synchronously split table at the given split point
 */
protected static void splitRegion(TableName fullTableName, byte[] splitPoint) throws SQLException, IOException, InterruptedException {
    Admin admin =
            driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
    admin.split(fullTableName, splitPoint);
    // make sure the split finishes (there's no synchronous splitting before HBase 2.x)
    admin.disableTable(fullTableName);
    admin.enableTable(fullTableName);
}
 
Example 6
Source File: TestTablePermissions.java    From hbase with Apache License 2.0 4 votes vote down vote up
@Test
public void testPersistence() throws Exception {
  Configuration conf = UTIL.getConfiguration();
  try (Connection connection = ConnectionFactory.createConnection(conf)) {
    addUserPermission(conf,
      new UserPermission("albert",
          Permission.newBuilder(TEST_TABLE).withActions(Permission.Action.READ).build()),
      connection.getTable(PermissionStorage.ACL_TABLE_NAME));
    addUserPermission(conf,
      new UserPermission("betty",
          Permission.newBuilder(TEST_TABLE)
              .withActions(Permission.Action.READ, Permission.Action.WRITE).build()),
      connection.getTable(PermissionStorage.ACL_TABLE_NAME));
    addUserPermission(conf,
      new UserPermission("clark",
          Permission.newBuilder(TEST_TABLE).withFamily(TEST_FAMILY)
              .withActions(Permission.Action.READ).build()),
      connection.getTable(PermissionStorage.ACL_TABLE_NAME));
    addUserPermission(conf,
      new UserPermission("dwight",
          Permission.newBuilder(TEST_TABLE).withFamily(TEST_FAMILY).withQualifier(TEST_QUALIFIER)
              .withActions(Permission.Action.WRITE).build()),
      connection.getTable(PermissionStorage.ACL_TABLE_NAME));
  }
  // verify permissions survive changes in table metadata
  ListMultimap<String, UserPermission> preperms =
      PermissionStorage.getTablePermissions(conf, TEST_TABLE);

  Table table = UTIL.getConnection().getTable(TEST_TABLE);
  table.put(
    new Put(Bytes.toBytes("row1")).addColumn(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes("v1")));
  table.put(
    new Put(Bytes.toBytes("row2")).addColumn(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes("v2")));
  Admin admin = UTIL.getAdmin();
  try {
    admin.split(TEST_TABLE);
  }
  catch (IOException e) {
    //although split fail, this may not affect following check
    //In old Split API without AM2, if region's best split key is not found,
    //there are not exception thrown. But in current API, exception
    //will be thrown.
    LOG.debug("region is not splittable, because " + e);
  }

  // wait for split
  Thread.sleep(10000);

  ListMultimap<String, UserPermission> postperms =
      PermissionStorage.getTablePermissions(conf, TEST_TABLE);

  checkMultimapEqual(preperms, postperms);
}
 
Example 7
Source File: TestEndToEndSplitTransaction.java    From hbase with Apache License 2.0 4 votes vote down vote up
/**
 * This is the test for : HBASE-20940 This test will split the region and try to open an reference
 * over store file. Once store file has any reference, it makes sure that region can't be split
 */
@Test
public void testCanSplitJustAfterASplit() throws Exception {
  LOG.info("Starting testCanSplitJustAfterASplit");
  byte[] fam = Bytes.toBytes("cf_split");

  CompactSplit compactSplit =
    TEST_UTIL.getMiniHBaseCluster().getRegionServer(0).getCompactSplitThread();
  TableName tableName = TableName.valueOf("CanSplitTable");
  Table source = TEST_UTIL.getConnection().getTable(tableName);
  Admin admin = TEST_UTIL.getAdmin();
  // set a large min compaction file count to avoid compaction just after splitting.
  TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName)
    .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam)).build();
  Map<String, StoreFileReader> scanner = Maps.newHashMap();
  try {
    admin.createTable(htd);
    TEST_UTIL.loadTable(source, fam);
    compactSplit.setCompactionsEnabled(false);
    admin.split(tableName);
    TEST_UTIL.waitFor(60000, () -> TEST_UTIL.getHBaseCluster().getRegions(tableName).size() == 2);

    List<HRegion> regions = TEST_UTIL.getHBaseCluster().getRegions(tableName);
    regions.stream()
      .forEach(r -> r.getStores().get(0).getStorefiles().stream()
        .filter(s -> s.isReference() && !scanner.containsKey(r.getRegionInfo().getEncodedName()))
        .forEach(sf -> {
          StoreFileReader reader = ((HStoreFile) sf).getReader();
          reader.getStoreFileScanner(true, false, false, 0, 0, false);
          scanner.put(r.getRegionInfo().getEncodedName(), reader);
          LOG.info("Got reference to file = " + sf.getPath() + ",for region = " +
            r.getRegionInfo().getEncodedName());
        }));
    assertTrue("Regions did not split properly", regions.size() > 1);
    assertTrue("Could not get reference any of the store file", scanner.size() > 1);
    compactSplit.setCompactionsEnabled(true);
    for (HRegion region : regions) {
      region.compact(true);
    }

    regions.stream()
      .filter(region -> scanner.containsKey(region.getRegionInfo().getEncodedName()))
      .forEach(r -> assertFalse("Contains an open file reference which can be split",
        r.getStores().get(0).canSplit()));
  } finally {
    scanner.values().forEach(s -> {
      try {
        s.close(true);
      } catch (IOException ioe) {
        LOG.error("Failed while closing store file", ioe);
      }
    });
    scanner.clear();
    Closeables.close(source, true);
    if (!compactSplit.isCompactionsEnabled()) {
      compactSplit.setCompactionsEnabled(true);
    }
    TEST_UTIL.deleteTableIfAny(tableName);
  }
}
 
Example 8
Source File: TestTableSnapshotInputFormat.java    From hbase with Apache License 2.0 4 votes vote down vote up
@Test
public void testNoDuplicateResultsWhenSplitting() throws Exception {
  TableName tableName = TableName.valueOf("testNoDuplicateResultsWhenSplitting");
  String snapshotName = "testSnapshotBug";
  try {
    if (UTIL.getAdmin().tableExists(tableName)) {
      UTIL.deleteTable(tableName);
    }

    UTIL.createTable(tableName, FAMILIES);
    Admin admin = UTIL.getAdmin();

    // put some stuff in the table
    Table table = UTIL.getConnection().getTable(tableName);
    UTIL.loadTable(table, FAMILIES);

    // split to 2 regions
    admin.split(tableName, Bytes.toBytes("eee"));
    TestTableSnapshotScanner.blockUntilSplitFinished(UTIL, tableName, 2);

    Path rootDir = CommonFSUtils.getRootDir(UTIL.getConfiguration());
    FileSystem fs = rootDir.getFileSystem(UTIL.getConfiguration());

    SnapshotTestingUtils.createSnapshotAndValidate(admin, tableName, Arrays.asList(FAMILIES),
      null, snapshotName, rootDir, fs, true);

    // load different values
    byte[] value = Bytes.toBytes("after_snapshot_value");
    UTIL.loadTable(table, FAMILIES, value);

    // cause flush to create new files in the region
    admin.flush(tableName);
    table.close();

    Job job = new Job(UTIL.getConfiguration());
    Path tmpTableDir = UTIL.getDataTestDirOnTestFS(snapshotName);
    // limit the scan
    Scan scan = new Scan().withStartRow(getStartRow()).withStopRow(getEndRow());

    TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, scan,
      TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, false,
      tmpTableDir);

    verifyWithMockedMapReduce(job, 2, 2, getStartRow(), getEndRow());
  } finally {
    UTIL.getAdmin().deleteSnapshot(snapshotName);
    UTIL.deleteTable(tableName);
  }
}
 
Example 9
Source File: SkipScanAfterManualSplitIT.java    From phoenix with Apache License 2.0 4 votes vote down vote up
@Test
public void testManualSplit() throws Exception {
    String tableName = generateUniqueName();
    byte[] tableNameBytes = Bytes.toBytes(tableName);
    initTable(tableName);
    Connection conn = getConnection();
    ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices();
    int nRegions = services.getAllTableRegions(tableNameBytes).size();
    int nInitialRegions = nRegions;
    Admin admin = services.getAdmin();
    try {
        admin.split(TableName.valueOf(tableName));
        int nTries = 0;
        while (nRegions == nInitialRegions && nTries < 10) {
            Thread.sleep(1000);
            nRegions = services.getAllTableRegions(tableNameBytes).size();
            nTries++;
        }
        // Split finished by this time, but cache isn't updated until
        // table is accessed
        assertEquals(nRegions, nInitialRegions);
        
        int nRows = 2;
        String query = "SELECT count(*) FROM " + tableName + " WHERE a IN ('tl','jt',' a',' b',' c',' d')";
        ResultSet rs1 = conn.createStatement().executeQuery(query);
        assertTrue(rs1.next());
        nRegions = services.getAllTableRegions(tableNameBytes).size();
        // Region cache has been updated, as there are more regions now
        assertNotEquals(nRegions, nInitialRegions);
        /*
        if (nRows != rs1.getInt(1)) {
            // Run the same query again and it always passes now
            // (as region cache is up-to-date)
            ResultSet r2 = conn.createStatement().executeQuery(query);
            assertTrue(r2.next());
            assertEquals(nRows, r2.getInt(1));
        }
        */
        assertEquals(nRows, rs1.getInt(1));
    } finally {
        admin.close();
    }

}
 
Example 10
Source File: AggregateQueryIT.java    From phoenix with Apache License 2.0 4 votes vote down vote up
@Test
public void testSplitWithCachedMeta() throws Exception {
    // Tests that you don't get an ambiguous column exception when using the same alias as the column name
    String query = "SELECT a_string, b_string, count(1) FROM " + tableName + " WHERE organization_id=? and entity_id<=? GROUP BY a_string,b_string";
    Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
    Connection conn = DriverManager.getConnection(getUrl(), props);
    Admin admin = null;
    try {
        PreparedStatement statement = conn.prepareStatement(query);
        statement.setString(1, tenantId);
        statement.setString(2, ROW4);
        ResultSet rs = statement.executeQuery();
        assertTrue(rs.next());
        assertEquals(A_VALUE, rs.getString(1));
        assertEquals(B_VALUE, rs.getString(2));
        assertEquals(2, rs.getLong(3));
        assertTrue(rs.next());
        assertEquals(A_VALUE, rs.getString(1));
        assertEquals(C_VALUE, rs.getString(2));
        assertEquals(1, rs.getLong(3));
        assertTrue(rs.next());
        assertEquals(A_VALUE, rs.getString(1));
        assertEquals(E_VALUE, rs.getString(2));
        assertEquals(1, rs.getLong(3));
        assertFalse(rs.next());
        
        TableName tn =TableName.valueOf(tableName);
        admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
        Configuration configuration = conn.unwrap(PhoenixConnection.class).getQueryServices().getConfiguration();
        org.apache.hadoop.hbase.client.Connection hbaseConn = ConnectionFactory.createConnection(configuration);
        ((ClusterConnection)hbaseConn).clearRegionCache(TableName.valueOf(tableName));
        RegionLocator regionLocator = hbaseConn.getRegionLocator(TableName.valueOf(tableName));
        int nRegions = regionLocator.getAllRegionLocations().size();
        admin.split(tn, ByteUtil.concat(Bytes.toBytes(tenantId), Bytes.toBytes("00A3")));
        int retryCount = 0;
        do {
            Thread.sleep(2000);
            retryCount++;
            //htable.clearRegionCache();
        } while (retryCount < 10 && regionLocator.getAllRegionLocations().size() == nRegions);
        assertNotEquals(nRegions, regionLocator.getAllRegionLocations().size());
        
        statement.setString(1, tenantId);
        rs = statement.executeQuery();
        assertTrue(rs.next());
        assertEquals(A_VALUE, rs.getString(1));
        assertEquals(B_VALUE, rs.getString(2));
        assertEquals(2, rs.getLong(3));
        assertTrue(rs.next());
        assertEquals(A_VALUE, rs.getString(1));
        assertEquals(C_VALUE, rs.getString(2));
        assertEquals(1, rs.getLong(3));
        assertTrue(rs.next());
        assertEquals(A_VALUE, rs.getString(1));
        assertEquals(E_VALUE, rs.getString(2));
       assertEquals(1, rs.getLong(3));
        assertFalse(rs.next());
    } finally {
        if (admin != null) {
        admin.close();
        }
        conn.close();
    }
}
 
Example 11
Source File: MutableIndexSplitIT.java    From phoenix with Apache License 2.0 4 votes vote down vote up
private List<RegionInfo> splitDuringScan(Connection conn1, String tableName, String indexName, String[] strings, Admin admin, boolean isReverse)
        throws SQLException, IOException, InterruptedException {
    ResultSet rs;

    String query = "SELECT t_id,k1,v1 FROM " + tableName;
    rs = conn1.createStatement().executeQuery(query);
    String[] tIdColumnValues = new String[26]; 
    String[] v1ColumnValues = new String[26];
    int[] k1ColumnValue = new int[26];
    for (int j = 0; j < 5; j++) {
        assertTrue(rs.next());
        tIdColumnValues[j] = rs.getString("t_id");
        k1ColumnValue[j] = rs.getInt("k1");
        v1ColumnValues[j] = rs.getString("V1");
    }

    String[] splitKeys = new String[2];
    splitKeys[0] = strings[4];
    splitKeys[1] = strings[12];

    int[] splitInts = new int[2];
    splitInts[0] = 22;
    splitInts[1] = 4;
    List<RegionInfo> regionsOfUserTable = null;
    for(int i = 0; i <=1; i++) {
        boolean split = false;
        for (int j = 0; j < 150 && !split; j++) {
            try {
                if (localIndex) {
                    //With Hbase 2.2 the local index splits trigger longCompactions, and have
                    //to wait for an RS_COMPACTED_FILES_DISCHARGER run before the second split
                    //is successful
                    admin.split(TableName.valueOf(tableName),
                            ByteUtil.concat(Bytes.toBytes(splitKeys[i])));
                } else {
                    admin.split(TableName.valueOf(indexName),
                            ByteUtil.concat(Bytes.toBytes(splitInts[i])));
                }
                split = true;
            } catch (IOException x) {
                // wait up to a minute for the split to succeed
                Thread.sleep(1000);
            }
        }
        assertTrue(split);

        regionsOfUserTable =
                MetaTableAccessor.getTableRegions(admin.getConnection(),
                    TableName.valueOf(localIndex ? tableName : indexName), false);

        while (regionsOfUserTable.size() < (i+2)) {
            Thread.sleep(1000);
            regionsOfUserTable =
                    MetaTableAccessor.getTableRegions(admin.getConnection(),
                        TableName.valueOf(localIndex ? tableName : indexName), false);
        }
        assertTrue(regionsOfUserTable.size() >= (i+2));
    }
    for (int j = 5; j < 26; j++) {
        assertTrue(rs.next());
        tIdColumnValues[j] = rs.getString("t_id");
        k1ColumnValue[j] = rs.getInt("k1");
        v1ColumnValues[j] = rs.getString("V1");
    }
    Arrays.sort(tIdColumnValues);
    Arrays.sort(v1ColumnValues);
    Arrays.sort(k1ColumnValue);
    assertTrue(Arrays.equals(strings, tIdColumnValues));
    assertTrue(Arrays.equals(strings, v1ColumnValues));
    for(int i=0;i<26;i++) {
        assertEquals(i, k1ColumnValue[i]);
    }
    assertFalse(rs.next());
    return regionsOfUserTable;
}
 
Example 12
Source File: LocalIndexSplitMergeIT.java    From phoenix with Apache License 2.0 4 votes vote down vote up
@Test
public void testLocalIndexScanAfterRegionSplit() throws Exception {
    String schemaName = generateUniqueName();
    String tableName = schemaName + "." + generateUniqueName();
    String indexName = "IDX_" + generateUniqueName();
    TableName physicalTableName = SchemaUtil.getPhysicalTableName(tableName.getBytes(), false);
    String indexPhysicalTableName = physicalTableName.getNameAsString();

    createBaseTable(tableName, "('e','j','o')");
    Connection conn1 = getConnectionForLocalIndexTest();
    try {
        String[] strings =
                { "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o",
                        "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z" };
        for (int i = 0; i < 26; i++) {
            conn1.createStatement()
                    .execute("UPSERT INTO " + tableName + " values('" + strings[i] + "'," + i
                            + "," + (i + 1) + "," + (i + 2) + ",'" + strings[25 - i] + "')");
        }
        conn1.commit();
        conn1.createStatement()
                .execute("CREATE LOCAL INDEX " + indexName + " ON " + tableName + "(v1)");
        conn1.createStatement()
                .execute("CREATE LOCAL INDEX " + indexName + "_2 ON " + tableName + "(k3)");

        ResultSet rs = conn1.createStatement().executeQuery("SELECT * FROM " + tableName);
        assertTrue(rs.next());

        Admin admin = conn1.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
        for (int i = 1; i < 3; i++) {
            admin.split(physicalTableName, ByteUtil.concat(Bytes.toBytes(strings[3 * i])));
            List<RegionInfo> regionsOfUserTable =
                    MetaTableAccessor.getTableRegions(admin.getConnection(), physicalTableName,
                        false);

            while (regionsOfUserTable.size() != (4 + i)) {
                Thread.sleep(100);
                regionsOfUserTable =
                        MetaTableAccessor.getTableRegions(admin.getConnection(),
                            physicalTableName, false);
            }
            assertEquals(4 + i, regionsOfUserTable.size());
            String[] tIdColumnValues = new String[26];
            String[] v1ColumnValues = new String[26];
            int[] k1ColumnValue = new int[26];
            String query = "SELECT t_id,k1,v1 FROM " + tableName;
            rs = conn1.createStatement().executeQuery(query);
            Thread.sleep(1000);
            for (int j = 0; j < 26; j++) {
                assertTrue("No row found at " + j, rs.next());
                tIdColumnValues[j] = rs.getString("t_id");
                k1ColumnValue[j] = rs.getInt("k1");
                v1ColumnValues[j] = rs.getString("V1");
            }
            Arrays.sort(tIdColumnValues);
            Arrays.sort(v1ColumnValues);
            Arrays.sort(k1ColumnValue);
            assertTrue(Arrays.equals(strings, tIdColumnValues));
            assertTrue(Arrays.equals(strings, v1ColumnValues));
            for (int m = 0; m < 26; m++) {
                assertEquals(m, k1ColumnValue[m]);
            }

            rs = conn1.createStatement().executeQuery("EXPLAIN " + query);
            assertEquals("CLIENT PARALLEL " + (4 + i) + "-WAY RANGE SCAN OVER "
                    + indexPhysicalTableName + " [1]\n"
                    + "    SERVER FILTER BY FIRST KEY ONLY\n" + "CLIENT MERGE SORT",
                QueryUtil.getExplainPlan(rs));

            query = "SELECT t_id,k1,k3 FROM " + tableName;
            rs = conn1.createStatement().executeQuery("EXPLAIN " + query);
            assertEquals(
                "CLIENT PARALLEL "
                        + ((strings[3 * i].compareTo("j") < 0) ? (4 + i) : (4 + i - 1))
                        + "-WAY RANGE SCAN OVER " + indexPhysicalTableName + " [2]\n"
                        + "    SERVER FILTER BY FIRST KEY ONLY\n" + "CLIENT MERGE SORT",
                QueryUtil.getExplainPlan(rs));
            rs = conn1.createStatement().executeQuery(query);
            Thread.sleep(1000);
            int[] k3ColumnValue = new int[26];
            for (int j = 0; j < 26; j++) {
                assertTrue(rs.next());
                tIdColumnValues[j] = rs.getString("t_id");
                k1ColumnValue[j] = rs.getInt("k1");
                k3ColumnValue[j] = rs.getInt("k3");
            }
            Arrays.sort(tIdColumnValues);
            Arrays.sort(k1ColumnValue);
            Arrays.sort(k3ColumnValue);
            assertTrue(Arrays.equals(strings, tIdColumnValues));
            for (int m = 0; m < 26; m++) {
                assertEquals(m, k1ColumnValue[m]);
                assertEquals(m + 2, k3ColumnValue[m]);
            }
        }
    } finally {
        conn1.close();
    }
}