Java Code Examples for org.apache.hadoop.hbase.HConstants#EMPTY_START_ROW
The following examples show how to use
org.apache.hadoop.hbase.HConstants#EMPTY_START_ROW .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestEndToEndSplitTransaction.java From hbase with Apache License 2.0 | 6 votes |
void verifyStartEndKeys(Pair<byte[][], byte[][]> keys) { byte[][] startKeys = keys.getFirst(); byte[][] endKeys = keys.getSecond(); assertEquals(startKeys.length, endKeys.length); assertTrue("Found 0 regions for the table", startKeys.length > 0); assertArrayEquals("Start key for the first region is not byte[0]", HConstants.EMPTY_START_ROW, startKeys[0]); byte[] prevEndKey = HConstants.EMPTY_START_ROW; // ensure that we do not have any gaps for (int i = 0; i < startKeys.length; i++) { assertArrayEquals( "Hole in hbase:meta is detected. prevEndKey=" + Bytes.toStringBinary(prevEndKey) + " ,regionStartKey=" + Bytes.toStringBinary(startKeys[i]), prevEndKey, startKeys[i]); prevEndKey = endKeys[i]; } assertArrayEquals("End key for the last region is not byte[0]", HConstants.EMPTY_END_ROW, endKeys[endKeys.length - 1]); }
Example 2
Source File: ScanUtil.java From phoenix with Apache License 2.0 | 6 votes |
private static byte[] getReversedRow(byte[] startRow) { /* * Must get previous key because this is going from an inclusive start key to an exclusive stop key, and we need * the start key to be included. We get the previous key by decrementing the last byte by one. However, with * variable length data types, we need to fill with the max byte value, otherwise, if the start key is 'ab', we * lower it to 'aa' which would cause 'aab' to be included (which isn't correct). So we fill with a 0xFF byte to * prevent this. A single 0xFF would be enough for our primitive types (as that byte wouldn't occur), but for an * arbitrary VARBINARY key we can't know how many bytes to tack on. It's lame of HBase to force us to do this. */ byte[] newStartRow = startRow; if (startRow.length != 0) { newStartRow = Arrays.copyOf(startRow, startRow.length + MAX_FILL_LENGTH_FOR_PREVIOUS_KEY.length); if (ByteUtil.previousKey(newStartRow, startRow.length)) { System.arraycopy(MAX_FILL_LENGTH_FOR_PREVIOUS_KEY, 0, newStartRow, startRow.length, MAX_FILL_LENGTH_FOR_PREVIOUS_KEY.length); } else { newStartRow = HConstants.EMPTY_START_ROW; } } return newStartRow; }
Example 3
Source File: HBaseAccessorWithFilter.java From pxf with Apache License 2.0 | 5 votes |
@Override public void initialize(RequestContext requestContext) { super.initialize(requestContext); tupleDescription = new HBaseTupleDescription(requestContext); splits = new ArrayList<>(); currentRegionIndex = 0; scanStartKey = HConstants.EMPTY_START_ROW; scanEndKey = HConstants.EMPTY_END_ROW; }
Example 4
Source File: HBaseAccessor.java From pxf with Apache License 2.0 | 5 votes |
/** * Initializes HBaseAccessor based on GPDB table description and * initializes the scan start and end keys of the HBase table to default values. * * @param requestContext data provided in the request */ @Override public void initialize(RequestContext requestContext) { super.initialize(requestContext); tupleDescription = new HBaseTupleDescription(context); split = null; scanStartKey = HConstants.EMPTY_START_ROW; scanEndKey = HConstants.EMPTY_END_ROW; }
Example 5
Source File: TableInputFormat.java From hgraphdb with Apache License 2.0 | 5 votes |
@Override protected Pair<byte[][], byte[][]> getStartEndKeys() throws IOException { if (isMock()) { return new Pair<>(new byte[][]{HConstants.EMPTY_START_ROW}, new byte[][]{HConstants.EMPTY_END_ROW}); } else { return super.getStartEndKeys(); } }
Example 6
Source File: ConnectionlessQueryServicesImpl.java From phoenix with Apache License 2.0 | 5 votes |
private static List<HRegionLocation> generateRegionLocations(byte[] physicalName, byte[][] splits) { byte[] startKey = HConstants.EMPTY_START_ROW; List<HRegionLocation> regions = Lists.newArrayListWithExpectedSize(splits.length); for (byte[] split : splits) { regions.add(new HRegionLocation( new HRegionInfo(TableName.valueOf(physicalName), startKey, split), SERVER_NAME, -1)); startKey = split; } regions.add(new HRegionLocation( new HRegionInfo(TableName.valueOf(physicalName), startKey, HConstants.EMPTY_END_ROW), SERVER_NAME, -1)); return regions; }
Example 7
Source File: ScanUtil.java From phoenix with Apache License 2.0 | 5 votes |
public static void setupReverseScan(Scan scan) { if (isReversed(scan)) { byte[] startRow = scan.getStartRow(); byte[] stopRow = scan.getStopRow(); byte[] newStartRow = startRow; byte[] newStopRow = stopRow; if (startRow.length != 0) { /* * Must get previous key because this is going from an inclusive start key to an exclusive stop key, and * we need the start key to be included. We get the previous key by decrementing the last byte by one. * However, with variable length data types, we need to fill with the max byte value, otherwise, if the * start key is 'ab', we lower it to 'aa' which would cause 'aab' to be included (which isn't correct). * So we fill with a 0xFF byte to prevent this. A single 0xFF would be enough for our primitive types (as * that byte wouldn't occur), but for an arbitrary VARBINARY key we can't know how many bytes to tack * on. It's lame of HBase to force us to do this. */ newStartRow = Arrays.copyOf(startRow, startRow.length + MAX_FILL_LENGTH_FOR_PREVIOUS_KEY.length); if (ByteUtil.previousKey(newStartRow, startRow.length)) { System.arraycopy(MAX_FILL_LENGTH_FOR_PREVIOUS_KEY, 0, newStartRow, startRow.length, MAX_FILL_LENGTH_FOR_PREVIOUS_KEY.length); } else { newStartRow = HConstants.EMPTY_START_ROW; } } if (stopRow.length != 0) { // Must add null byte because we need the start to be exclusive while it was inclusive newStopRow = ByteUtil.concat(stopRow, QueryConstants.SEPARATOR_BYTE_ARRAY); } scan.setStartRow(newStopRow); scan.setStopRow(newStartRow); scan.setReversed(true); } }
Example 8
Source File: HMobStore.java From hbase with Apache License 2.0 | 5 votes |
/** * Creates the writer for the mob file in temp directory. * @param date The latest date of written cells. * @param maxKeyCount The key count. * @param compression The compression algorithm. * @param startKey The start key. * @param isCompaction If the writer is used in compaction. * @return The writer for the mob file. * @throws IOException */ public StoreFileWriter createWriterInTmp(Date date, long maxKeyCount, Compression.Algorithm compression, byte[] startKey, boolean isCompaction) throws IOException { if (startKey == null) { startKey = HConstants.EMPTY_START_ROW; } Path path = getTempDir(); return createWriterInTmp(MobUtils.formatDate(date), path, maxKeyCount, compression, startKey, isCompaction); }
Example 9
Source File: TestStripeStoreFileManager.java From hbase with Apache License 2.0 | 5 votes |
private void verifyGetOrScanScenario(StripeStoreFileManager manager, byte[] start, byte[] end, Collection<HStoreFile> results) throws Exception { start = start != null ? start : HConstants.EMPTY_START_ROW; end = end != null ? end : HConstants.EMPTY_END_ROW; Collection<HStoreFile> sfs = manager.getFilesForScan(start, true, end, false); assertEquals(results.size(), sfs.size()); for (HStoreFile result : results) { assertTrue(sfs.contains(result)); } }
Example 10
Source File: TableInputFormatBase.java From hbase with Apache License 2.0 | 5 votes |
/** * Calculates the splits that will serve as input for the map tasks. * * Splits are created in number equal to the smallest between numSplits and * the number of {@link org.apache.hadoop.hbase.regionserver.HRegion}s in the table. * If the number of splits is smaller than the number of * {@link org.apache.hadoop.hbase.regionserver.HRegion}s then splits are spanned across * multiple {@link org.apache.hadoop.hbase.regionserver.HRegion}s * and are grouped the most evenly possible. In the * case splits are uneven the bigger splits are placed first in the * {@link InputSplit} array. * * @param job the map task {@link JobConf} * @param numSplits a hint to calculate the number of splits (mapred.map.tasks). * * @return the input splits * * @see org.apache.hadoop.mapred.InputFormat#getSplits(org.apache.hadoop.mapred.JobConf, int) */ public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException { if (this.table == null) { initialize(job); } // null check in case our child overrides getTable to not throw. try { if (getTable() == null) { // initialize() must not have been implemented in the subclass. throw new IOException(INITIALIZATION_ERROR); } } catch (IllegalStateException exception) { throw new IOException(INITIALIZATION_ERROR, exception); } byte [][] startKeys = this.regionLocator.getStartKeys(); if (startKeys == null || startKeys.length == 0) { throw new IOException("Expecting at least one region"); } if (this.inputColumns == null || this.inputColumns.length == 0) { throw new IOException("Expecting at least one column"); } int realNumSplits = numSplits > startKeys.length? startKeys.length: numSplits; InputSplit[] splits = new InputSplit[realNumSplits]; int middle = startKeys.length / realNumSplits; int startPos = 0; for (int i = 0; i < realNumSplits; i++) { int lastPos = startPos + middle; lastPos = startKeys.length % realNumSplits > i ? lastPos + 1 : lastPos; String regionLocation = regionLocator.getRegionLocation(startKeys[startPos]). getHostname(); splits[i] = new TableSplit(this.table.getName(), startKeys[startPos], ((i + 1) < realNumSplits) ? startKeys[lastPos]: HConstants.EMPTY_START_ROW, regionLocation); LOG.info("split: " + i + "->" + splits[i]); startPos = lastPos; } return splits; }
Example 11
Source File: ConnectionQueryServicesImpl.java From phoenix with BSD 3-Clause "New" or "Revised" License | 5 votes |
@Override public List<HRegionLocation> getAllTableRegions(byte[] tableName) throws SQLException { /* * Use HConnection.getRegionLocation as it uses the cache in HConnection, while getting * all region locations from the HTable doesn't. */ int retryCount = 0, maxRetryCount = 1; boolean reload =false; while (true) { try { // We could surface the package projected HConnectionImplementation.getNumberOfCachedRegionLocations // to get the sizing info we need, but this would require a new class in the same package and a cast // to this implementation class, so it's probably not worth it. List<HRegionLocation> locations = Lists.newArrayList(); byte[] currentKey = HConstants.EMPTY_START_ROW; do { HRegionLocation regionLocation = connection.getRegionLocation(tableName, currentKey, reload); locations.add(regionLocation); currentKey = regionLocation.getRegionInfo().getEndKey(); } while (!Bytes.equals(currentKey, HConstants.EMPTY_END_ROW)); return locations; } catch (IOException e) { if (retryCount++ < maxRetryCount) { // One retry, in case split occurs while navigating reload = true; continue; } throw new SQLExceptionInfo.Builder(SQLExceptionCode.GET_TABLE_REGIONS_FAIL) .setRootCause(e).build().buildException(); } } }
Example 12
Source File: TableOverAsyncTable.java From hbase with Apache License 2.0 | 5 votes |
private List<byte[]> getStartKeysInRange(byte[] start, byte[] end) throws IOException { if (start == null) { start = HConstants.EMPTY_START_ROW; } if (end == null) { end = HConstants.EMPTY_END_ROW; } return getKeysAndRegionsInRange(start, end, true).getFirst(); }
Example 13
Source File: ConnectionlessQueryServicesImpl.java From phoenix with Apache License 2.0 | 5 votes |
private static List<HRegionLocation> generateRegionLocations(byte[] physicalName, byte[][] splits) { byte[] startKey = HConstants.EMPTY_START_ROW; List<HRegionLocation> regions = Lists.newArrayListWithExpectedSize(splits.length); for (byte[] split : splits) { regions.add(new HRegionLocation(RegionInfoBuilder .newBuilder(TableName.valueOf(physicalName)).setStartKey(startKey) .setEndKey(split).build(), SERVER_NAME, -1)); startKey = split; } regions.add(new HRegionLocation(RegionInfoBuilder .newBuilder(TableName.valueOf(physicalName)).setStartKey(startKey) .setEndKey(HConstants.EMPTY_END_ROW).build(), SERVER_NAME, -1)); return regions; }
Example 14
Source File: TransactionState.java From hbase-secondary-index with GNU General Public License v3.0 | 4 votes |
public ScanRange(final byte[] startRow, final byte[] endRow) { this.startRow = startRow == HConstants.EMPTY_START_ROW ? null : startRow; this.endRow = endRow == HConstants.EMPTY_END_ROW ? null : endRow; }
Example 15
Source File: HBaseSalter.java From SpyGlass with Apache License 2.0 | 4 votes |
public static Pair<byte[], byte[]>[] getDistributedIntervals( byte[] originalStartKey, byte[] originalStopKey, byte[] regionStartKey, byte[] regionStopKey, String prefixList) throws IOException { LOG.debug("".format("OSRT: (%s) OSTP: (%s) RSRT: (%s) RSTP: (%s) PRFX: (%s)", Bytes.toString(originalStartKey), Bytes.toString(originalStopKey), Bytes.toString(regionStartKey), Bytes.toString(regionStopKey), prefixList )); byte[][] startKeys; byte[][] stopKeys; if(Arrays.equals(regionStartKey, HConstants.EMPTY_START_ROW) && Arrays.equals(regionStopKey, HConstants.EMPTY_END_ROW) ) { startKeys = getAllKeys(originalStartKey, prefixList); stopKeys = getAllKeys(originalStopKey, prefixList); } else if(Arrays.equals(regionStartKey, HConstants.EMPTY_START_ROW)) { startKeys = getAllKeysWithStop(originalStartKey, prefixList, regionStopKey[0]); stopKeys = getAllKeysWithStop(originalStopKey, prefixList, regionStopKey[0]); } else if(Arrays.equals(regionStopKey, HConstants.EMPTY_END_ROW)) { startKeys = getAllKeysWithStart(originalStartKey, prefixList, regionStartKey[0]); stopKeys = getAllKeysWithStart(originalStopKey, prefixList, regionStartKey[0]); } else { startKeys = getAllKeysInRange(originalStartKey, prefixList, regionStartKey[0], regionStopKey[0]); stopKeys = getAllKeysInRange(originalStopKey, prefixList, regionStartKey[0], regionStopKey[0]); } if( startKeys.length != stopKeys.length) { throw new IOException("LENGTH of START Keys and STOP Keys DO NOT match"); } if( Arrays.equals(originalStartKey, HConstants.EMPTY_START_ROW) && Arrays.equals(originalStopKey, HConstants.EMPTY_END_ROW) ) { Arrays.sort(stopKeys, Bytes.BYTES_RAWCOMPARATOR); // stop keys are the start key of the next interval for (int i = startKeys.length - 1; i >= 1; i--) { startKeys[i] = startKeys[i - 1]; } startKeys[0] = HConstants.EMPTY_START_ROW; stopKeys[stopKeys.length - 1] = HConstants.EMPTY_END_ROW; } else if (Arrays.equals(originalStartKey, HConstants.EMPTY_START_ROW)) { Arrays.sort(stopKeys, Bytes.BYTES_RAWCOMPARATOR); // stop keys are the start key of the next interval for (int i = startKeys.length - 1; i >= 1; i--) { startKeys[i] = startKeys[i - 1]; } startKeys[0] = HConstants.EMPTY_START_ROW; } else if (Arrays.equals(originalStopKey, HConstants.EMPTY_END_ROW)) { Arrays.sort(startKeys, Bytes.BYTES_RAWCOMPARATOR); // stop keys are the start key of the next interval for (int i = 0; i < stopKeys.length - 1; i++) { stopKeys[i] = stopKeys[i + 1]; } stopKeys[stopKeys.length - 1] = HConstants.EMPTY_END_ROW; } Pair<byte[], byte[]>[] intervals = new Pair[startKeys.length]; for (int i = 0; i < startKeys.length; i++) { intervals[i] = new Pair<byte[], byte[]>(startKeys[i], stopKeys[i]); } return intervals; }
Example 16
Source File: RegionInfoBuilder.java From hbase with Apache License 2.0 | 4 votes |
/** * Private constructor used constructing MutableRegionInfo for the * first meta regions */ private MutableRegionInfo(long regionId, TableName tableName, int replicaId) { this(tableName, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, false, regionId, replicaId, false); }
Example 17
Source File: RegionInfoBuilder.java From hbase with Apache License 2.0 | 4 votes |
private static byte[] checkStartKey(byte[] startKey) { return startKey == null? HConstants.EMPTY_START_ROW: startKey; }
Example 18
Source File: AbstractTestRegionLocator.java From hbase with Apache License 2.0 | 4 votes |
private byte[] getStartKey(int index) { return index == 0 ? HConstants.EMPTY_START_ROW : SPLIT_KEYS[index - 1]; }
Example 19
Source File: HBaseFilterBuilder.java From pxf with Apache License 2.0 | 4 votes |
public HBaseFilterBuilder(HBaseTupleDescription tupleDescription) { this.filterQueue = new LinkedList<>(); this.tupleDescription = tupleDescription; this.startKey = HConstants.EMPTY_START_ROW; this.endKey = HConstants.EMPTY_END_ROW; }
Example 20
Source File: SkeletonHBaseClientPartition.java From spliceengine with GNU Affero General Public License v3.0 | votes |
@Override public byte[] getStartKey(){ return HConstants.EMPTY_START_ROW; }