Java Code Examples for org.apache.hadoop.hbase.util.Pair

The following examples show how to use org.apache.hadoop.hbase.util.Pair. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: phoenix   Source File: IndexLoadBalancerIT.java    License: Apache License 2.0 6 votes vote down vote up
public boolean checkForColocation(HMaster master, String tableName, String indexTableName)
        throws IOException, InterruptedException {
    List<Pair<byte[], ServerName>> uTableStartKeysAndLocations =
            getStartKeysAndLocations(master, tableName);
    List<Pair<byte[], ServerName>> iTableStartKeysAndLocations =
            getStartKeysAndLocations(master, indexTableName);

    boolean regionsColocated = true;
    if (uTableStartKeysAndLocations.size() != iTableStartKeysAndLocations.size()) {
        regionsColocated = false;
    } else {
        for (int i = 0; i < uTableStartKeysAndLocations.size(); i++) {
            Pair<byte[], ServerName> uStartKeyAndLocation = uTableStartKeysAndLocations.get(i);
            Pair<byte[], ServerName> iStartKeyAndLocation = iTableStartKeysAndLocations.get(i);

            if (Bytes.compareTo(uStartKeyAndLocation.getFirst(), iStartKeyAndLocation
                    .getFirst()) == 0) {
                if (uStartKeyAndLocation.getSecond().equals(iStartKeyAndLocation.getSecond())) {
                    continue;
                }
            }
            regionsColocated = false;
        }
    }
    return regionsColocated;
}
 
Example 2
Source Project: hbase   Source File: HRegionServer.java    License: Apache License 2.0 6 votes vote down vote up
protected void initializeMemStoreChunkCreator() {
  if (MemStoreLAB.isEnabled(conf)) {
    // MSLAB is enabled. So initialize MemStoreChunkPool
    // By this time, the MemstoreFlusher is already initialized. We can get the global limits from
    // it.
    Pair<Long, MemoryType> pair = MemorySizeUtil.getGlobalMemStoreSize(conf);
    long globalMemStoreSize = pair.getFirst();
    boolean offheap = this.regionServerAccounting.isOffheap();
    // When off heap memstore in use, take full area for chunk pool.
    float poolSizePercentage = offheap? 1.0F:
        conf.getFloat(MemStoreLAB.CHUNK_POOL_MAXSIZE_KEY, MemStoreLAB.POOL_MAX_SIZE_DEFAULT);
    float initialCountPercentage = conf.getFloat(MemStoreLAB.CHUNK_POOL_INITIALSIZE_KEY,
        MemStoreLAB.POOL_INITIAL_SIZE_DEFAULT);
    int chunkSize = conf.getInt(MemStoreLAB.CHUNK_SIZE_KEY, MemStoreLAB.CHUNK_SIZE_DEFAULT);
    // init the chunkCreator
    ChunkCreator.initialize(chunkSize, offheap, globalMemStoreSize, poolSizePercentage,
      initialCountPercentage, this.hMemManager);
  }
}
 
Example 3
Source Project: Kylin   Source File: CubeManager.java    License: Apache License 2.0 6 votes vote down vote up
public Pair<CubeSegment, CubeSegment> appendAndMergeSegments(CubeInstance cube, long endDate) throws IOException {
    checkNoBuildingSegment(cube);
    checkCubeIsPartitioned(cube);

    if (cube.getSegments().size() == 0)
        throw new IllegalStateException("expect at least one existing segment");

    long appendStart = calculateStartDateForAppendSegment(cube);
    CubeSegment appendSegment = newSegment(cube, appendStart, endDate);

    long startDate = cube.getDescriptor().getModel().getPartitionDesc().getPartitionDateStart();
    CubeSegment mergeSegment = newSegment(cube, startDate, endDate);

    validateNewSegments(cube, mergeSegment);
    cube.getSegments().add(appendSegment);
    cube.getSegments().add(mergeSegment);
    Collections.sort(cube.getSegments());
    updateCube(cube);

    return new Pair<CubeSegment, CubeSegment>(appendSegment, mergeSegment);
}
 
Example 4
Source Project: phoenix   Source File: ExpressionUtil.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * <pre>
 * Infer OrderBys from the rowkey columns of {@link PTable},for projected table may be there is no rowkey columns,
 * so we should move forward to inspect {@link ProjectedColumn} by {@link #getOrderByFromProjectedTable}.
 * The second part of the return pair is the rowkey column offset we must skip when we create OrderBys, because for table with salted/multiTenant/viewIndexId,
 * some leading rowkey columns should be skipped.
 * </pre>
 * @param tableRef
 * @param phoenixConnection
 * @param orderByReverse
 * @return
 * @throws SQLException
 */
public static Pair<OrderBy,Integer> getOrderByFromTable(
        TableRef tableRef,
        PhoenixConnection phoenixConnection,
        boolean orderByReverse) throws SQLException {

    PTable table = tableRef.getTable();
    Pair<OrderBy,Integer> orderByAndRowKeyColumnOffset =
            getOrderByFromTableByRowKeyColumn(table, phoenixConnection, orderByReverse);
    if(orderByAndRowKeyColumnOffset.getFirst() != OrderBy.EMPTY_ORDER_BY) {
        return orderByAndRowKeyColumnOffset;
    }
    if(table.getType() == PTableType.PROJECTED) {
        orderByAndRowKeyColumnOffset =
                getOrderByFromProjectedTable(tableRef, phoenixConnection, orderByReverse);
        if(orderByAndRowKeyColumnOffset.getFirst() != OrderBy.EMPTY_ORDER_BY) {
            return orderByAndRowKeyColumnOffset;
        }
    }
    return new Pair<OrderBy,Integer>(OrderBy.EMPTY_ORDER_BY, 0);
}
 
Example 5
Source Project: phoenix   Source File: IndexScrutinyMapper.java    License: Apache License 2.0 6 votes vote down vote up
@Override
protected void map(NullWritable key, PhoenixIndexDBWritable record, Context context)
        throws IOException, InterruptedException {
    try {
        final List<Object> values = record.getValues();

        context.getCounter(PhoenixJobCounters.INPUT_RECORDS).increment(1);
        currentBatchValues.add(new Pair<>(record.getRowTs(), values));
        if (context.getCounter(PhoenixJobCounters.INPUT_RECORDS).getValue() % batchSize != 0) {
            // if we haven't hit the batch size, just report progress and move on to next record
            context.progress();
            return;
        } else {
            // otherwise, process the batch
            processBatch(context);
        }
        context.progress(); // Make sure progress is reported to Application Master.
    } catch (SQLException | IllegalArgumentException e) {
        LOGGER.error(" Error while read/write of a record ", e);
        context.getCounter(PhoenixJobCounters.FAILED_RECORDS).increment(1);
        throw new IOException(e);
    }
}
 
Example 6
Source Project: hbase   Source File: TestRSGroupsAdmin2.java    License: Apache License 2.0 6 votes vote down vote up
private Pair<ServerName, RegionStateNode> createTableWithRegionSplitting(RSGroupInfo rsGroupInfo,
  int tableRegionCount) throws Exception {
  final byte[] familyNameBytes = Bytes.toBytes("f");
  // All the regions created below will be assigned to the default group.
  TEST_UTIL.createMultiRegionTable(tableName, familyNameBytes, tableRegionCount);
  TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
    @Override
    public boolean evaluate() throws Exception {
      List<String> regions = getTableRegionMap().get(tableName);
      if (regions == null) {
        return false;
      }
      return getTableRegionMap().get(tableName).size() >= tableRegionCount;
    }
  });

  return randomlySetOneRegionStateToSplitting(rsGroupInfo);
}
 
Example 7
Source Project: hbase   Source File: BackupObserver.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void preCommitStoreFile(final ObserverContext<RegionCoprocessorEnvironment> ctx,
    final byte[] family, final List<Pair<Path, Path>> pairs) throws IOException {
  Configuration cfg = ctx.getEnvironment().getConfiguration();
  if (pairs == null || pairs.isEmpty() || !BackupManager.isBackupEnabled(cfg)) {
    LOG.debug("skipping recording bulk load in preCommitStoreFile since backup is disabled");
    return;
  }
  try (Connection connection = ConnectionFactory.createConnection(cfg);
      BackupSystemTable tbl = new BackupSystemTable(connection)) {
    List<TableName> fullyBackedUpTables = tbl.getTablesForBackupType(BackupType.FULL);
    RegionInfo info = ctx.getEnvironment().getRegionInfo();
    TableName tableName = info.getTable();
    if (!fullyBackedUpTables.contains(tableName)) {
      if (LOG.isTraceEnabled()) {
        LOG.trace(tableName + " has not gone thru full backup");
      }
      return;
    }
    tbl.writeFilesForBulkLoadPreCommit(tableName, info.getEncodedNameAsBytes(), family, pairs);
    return;
  }
}
 
Example 8
Source Project: hbase   Source File: SplitTableRegionProcedure.java    License: Apache License 2.0 6 votes vote down vote up
private Pair<Path, Path> splitStoreFile(HRegionFileSystem regionFs, byte[] family, HStoreFile sf)
  throws IOException {
  if (LOG.isDebugEnabled()) {
    LOG.debug("pid=" + getProcId() + " splitting started for store file: " +
        sf.getPath() + " for region: " + getParentRegion().getShortNameToLog());
  }

  final byte[] splitRow = getSplitRow();
  final String familyName = Bytes.toString(family);
  final Path path_first = regionFs.splitStoreFile(this.daughterOneRI, familyName, sf, splitRow,
      false, splitPolicy);
  final Path path_second = regionFs.splitStoreFile(this.daughterTwoRI, familyName, sf, splitRow,
     true, splitPolicy);
  if (LOG.isDebugEnabled()) {
    LOG.debug("pid=" + getProcId() + " splitting complete for store file: " +
        sf.getPath() + " for region: " + getParentRegion().getShortNameToLog());
  }
  return new Pair<Path,Path>(path_first, path_second);
}
 
Example 9
Source Project: hbase   Source File: RequestConverter.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Create a protocol buffer UpdateFavoredNodesRequest to update a list of favorednode mappings
 * @param updateRegionInfos a list of favored node mappings
 * @return a protocol buffer UpdateFavoredNodesRequest
 */
public static UpdateFavoredNodesRequest buildUpdateFavoredNodesRequest(
    final List<Pair<RegionInfo, List<ServerName>>> updateRegionInfos) {
  UpdateFavoredNodesRequest.Builder ubuilder = UpdateFavoredNodesRequest.newBuilder();
  if (updateRegionInfos != null && !updateRegionInfos.isEmpty()) {
    RegionUpdateInfo.Builder builder = RegionUpdateInfo.newBuilder();
    for (Pair<RegionInfo, List<ServerName>> pair : updateRegionInfos) {
      builder.setRegion(ProtobufUtil.toRegionInfo(pair.getFirst()));
      for (ServerName server : pair.getSecond()) {
        builder.addFavoredNodes(ProtobufUtil.toServerName(server));
      }
      ubuilder.addUpdateInfo(builder.build());
      builder.clear();
    }
  }
  return ubuilder.build();
}
 
Example 10
Source Project: phoenix   Source File: TupleProjector.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Iterate over the list of cells returned from the scan and return a tuple projector for the
 * dynamic columns by parsing the metadata stored for the list of dynamic columns
 * @param result list of cells
 * @param dynCols list of dynamic columns to be populated
 * @param dynColCells list of cells corresponding to dynamic columns to be populated
 * @return The tuple projector corresponding to dynamic columns or null if there are no dynamic
 * columns to process
 * @throws InvalidProtocolBufferException Thrown if there is an error parsing byte[] to protobuf
 */
public static TupleProjector getDynamicColumnsTupleProjector(List<Cell> result,
        List<PColumn> dynCols, List<Cell> dynColCells) throws InvalidProtocolBufferException {
    Set<Pair<ByteBuffer, ByteBuffer>> dynColCellQualifiers = new HashSet<>();
    populateDynColsFromResult(result, dynCols, dynColCellQualifiers);
    if (dynCols.isEmpty()) {
        return null;
    }
    populateDynamicColumnCells(result, dynColCellQualifiers, dynColCells);
    if (dynColCells.isEmpty()) {
        return null;
    }
    KeyValueSchema dynColsSchema = PhoenixRuntime.buildKeyValueSchema(dynCols);
    Expression[] expressions = new Expression[dynCols.size()];
    for (int i = 0; i < dynCols.size(); i++) {
        expressions[i] = new KeyValueColumnExpression(dynCols.get(i));
    }
    return new TupleProjector(dynColsSchema, expressions);
}
 
Example 11
Source Project: phoenix   Source File: ConnectionlessQueryServicesImpl.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public MetaDataMutationResult getFunctions(PName tenantId,
        List<Pair<byte[], Long>> functionNameAndTimeStampPairs, long clientTimestamp)
        throws SQLException {
    List<PFunction> functions = new ArrayList<PFunction>(functionNameAndTimeStampPairs.size());
    for(Pair<byte[], Long> functionInfo: functionNameAndTimeStampPairs) {
        try {
            PFunction function2 = metaData.getFunction(new PTableKey(tenantId, Bytes.toString(functionInfo.getFirst())));
            functions.add(function2);
        } catch (FunctionNotFoundException e) {
            return new MetaDataMutationResult(MutationCode.FUNCTION_NOT_FOUND, 0, null);
        }
    }
    if(functions.isEmpty()) {
        return null;
    }
    return new MetaDataMutationResult(MutationCode.FUNCTION_ALREADY_EXISTS, 0, functions, true);
}
 
Example 12
Source Project: Kylin   Source File: DeployUtil.java    License: Apache License 2.0 6 votes vote down vote up
public static void deployJobJars() throws IOException {
    Pair<File, File> files = getJobJarFiles();
    File originalJobJar = files.getFirst();
    File originalCoprocessorJar = files.getSecond();

    String jobJarPath = config().getKylinJobJarPath();
    if (StringUtils.isEmpty(jobJarPath)) {
        throw new RuntimeException("deployJobJars cannot find job jar");
    }

    File targetJobJar = new File(jobJarPath);
    File jobJarRenamedAsTarget = new File(originalJobJar.getParentFile(), targetJobJar.getName());
    if (originalJobJar.equals(jobJarRenamedAsTarget) == false) {
        FileUtils.copyFile(originalJobJar, jobJarRenamedAsTarget);
    }

    File targetCoprocessorJar = new File(config().getCoprocessorLocalJar());
    File coprocessorJarRenamedAsTarget = new File(originalCoprocessorJar.getParentFile(), targetCoprocessorJar.getName());
    if (originalCoprocessorJar.equals(coprocessorJarRenamedAsTarget) == false) {
        FileUtils.copyFile(originalCoprocessorJar, coprocessorJarRenamedAsTarget);
    }

    CliCommandExecutor cmdExec = config().getCliCommandExecutor();
    cmdExec.copyFile(jobJarRenamedAsTarget.getAbsolutePath(), targetJobJar.getParent());
    cmdExec.copyFile(coprocessorJarRenamedAsTarget.getAbsolutePath(), targetCoprocessorJar.getParent());
}
 
Example 13
Source Project: hbase   Source File: FuzzyRowFilter.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * @return true if and only if the fields of the filter that are serialized are equal to the
 *         corresponding fields in other. Used for testing.
 */
@Override
boolean areSerializedFieldsEqual(Filter o) {
  if (o == this) return true;
  if (!(o instanceof FuzzyRowFilter)) return false;

  FuzzyRowFilter other = (FuzzyRowFilter) o;
  if (this.fuzzyKeysData.size() != other.fuzzyKeysData.size()) return false;
  for (int i = 0; i < fuzzyKeysData.size(); ++i) {
    Pair<byte[], byte[]> thisData = this.fuzzyKeysData.get(i);
    Pair<byte[], byte[]> otherData = other.fuzzyKeysData.get(i);
    if (!(Bytes.equals(thisData.getFirst(), otherData.getFirst()) && Bytes.equals(
      thisData.getSecond(), otherData.getSecond()))) {
      return false;
    }
  }
  return true;
}
 
Example 14
Source Project: hbase   Source File: TestZKProcedureControllers.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public Pair<ZKProcedureCoordinator, List<ZKProcedureMemberRpcs>> start(
        ZKWatcher watcher, String operationName,
        ProcedureCoordinator coordinator, String controllerName,
        ProcedureMember member, List<String> expected) throws Exception {
  // start the controller
  ZKProcedureCoordinator controller = new ZKProcedureCoordinator(
      watcher, operationName, CONTROLLER_NODE_NAME);
  controller.start(coordinator);

  // make a cohort controller for each expected node

  List<ZKProcedureMemberRpcs> cohortControllers = new ArrayList<>();
  for (String nodeName : expected) {
    ZKProcedureMemberRpcs cc = new ZKProcedureMemberRpcs(watcher, operationName);
    cc.start(nodeName, member);
    cohortControllers.add(cc);
  }
  return new Pair<>(controller, cohortControllers);
}
 
Example 15
Source Project: phoenix   Source File: ScanPlan.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * @return Pair of numbers in which the first part is estimated number of bytes that will be
 *         scanned and the second part is estimated number of rows. Returned value is null if
 *         estimated size of data to scan is beyond a threshold.
 * @throws SQLException
 */
private static Pair<Long, Long> getEstimateOfDataSizeToScanIfWithinThreshold(StatementContext context, PTable table, Integer perScanLimit) throws SQLException {
    Scan scan = context.getScan();
    ConnectionQueryServices services = context.getConnection().getQueryServices();
    long estRowSize = SchemaUtil.estimateRowSize(table);
    long regionSize = services.getProps().getLong(HConstants.HREGION_MAX_FILESIZE,
            HConstants.DEFAULT_MAX_FILE_SIZE);
    if (perScanLimit == null || scan.getFilter() != null) {
        /*
         * If a limit is not provided or if we have a filter, then we are not able to decide whether
         * the amount of data we need to scan is less than the threshold.
         */
        return null;
    } 
    float factor =
        services.getProps().getFloat(QueryServices.LIMITED_QUERY_SERIAL_THRESHOLD,
            QueryServicesOptions.DEFAULT_LIMITED_QUERY_SERIAL_THRESHOLD);
    long threshold = (long)(factor * regionSize);
    long estimatedBytes = perScanLimit * estRowSize;
    long estimatedRows = perScanLimit;
    return (perScanLimit * estRowSize < threshold) ? new Pair<>(estimatedBytes, estimatedRows) : null;
}
 
Example 16
Source Project: hbase   Source File: ZKReplicationQueueStorage.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Return the {lastPushedSequenceId, ZNodeDataVersion} pair. if ZNodeDataVersion is -1, it means
 * that the ZNode does not exist.
 */
@VisibleForTesting
protected Pair<Long, Integer> getLastSequenceIdWithVersion(String encodedRegionName,
    String peerId) throws KeeperException {
  Stat stat = new Stat();
  String path = getSerialReplicationRegionPeerNode(encodedRegionName, peerId);
  byte[] data = ZKUtil.getDataNoWatch(zookeeper, path, stat);
  if (data == null) {
    // ZNode does not exist, so just return version -1 to indicate that no node exist.
    return Pair.newPair(HConstants.NO_SEQNUM, -1);
  }
  try {
    return Pair.newPair(ZKUtil.parseWALPositionFrom(data), stat.getVersion());
  } catch (DeserializationException de) {
    LOG.warn("Failed to parse log position (region=" + encodedRegionName + ", peerId=" + peerId
        + "), data=" + Bytes.toStringBinary(data));
  }
  return Pair.newPair(HConstants.NO_SEQNUM, stat.getVersion());
}
 
Example 17
Source Project: hbase   Source File: TestAsyncTableScanMetrics.java    License: Apache License 2.0 5 votes vote down vote up
private static Pair<List<Result>, ScanMetrics> doScanWithRawAsyncTable(Scan scan)
    throws IOException, InterruptedException {
  BufferingScanResultConsumer consumer = new BufferingScanResultConsumer();
  CONN.getTable(TABLE_NAME).scan(scan, consumer);
  List<Result> results = new ArrayList<>();
  for (Result result; (result = consumer.take()) != null;) {
    results.add(result);
  }
  return Pair.newPair(results, consumer.getScanMetrics());
}
 
Example 18
Source Project: phoenix   Source File: PhoenixIndexCodec.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public Iterable<IndexUpdate> getIndexDeletes(TableState state, IndexMetaData context, byte[] regionStartKey, byte[] regionEndKey) throws IOException {
    PhoenixIndexMetaData metaData = (PhoenixIndexMetaData)context;
    List<IndexMaintainer> indexMaintainers = metaData.getIndexMaintainers();
    ImmutableBytesWritable ptr = new ImmutableBytesWritable();
    ptr.set(state.getCurrentRowKey());
    List<IndexUpdate> indexUpdates = Lists.newArrayList();
    for (IndexMaintainer maintainer : indexMaintainers) {
        // For transactional tables, we use an index maintainer
        // to aid in rollback if there's a KeyValue column in the index. The alternative would be
        // to hold on to all uncommitted index row keys (even ones already sent to HBase) on the
        // client side.
        Set<ColumnReference> cols = Sets.newHashSet(maintainer.getAllColumns());
        cols.add(new ColumnReference(indexMaintainers.get(0).getDataEmptyKeyValueCF(), indexMaintainers.get(0).getEmptyKeyValueQualifier()));
        Pair<ValueGetter, IndexUpdate> statePair = state.getIndexUpdateState(cols, metaData.getReplayWrite() != null, true, context);
        ValueGetter valueGetter = statePair.getFirst();
        if (valueGetter!=null) {
            IndexUpdate indexUpdate = statePair.getSecond();
            indexUpdate.setTable(maintainer.isLocalIndex() ? tableName : maintainer.getIndexTableName());
            Delete delete = maintainer.buildDeleteMutation(KV_BUILDER, valueGetter, ptr, state.getPendingUpdate(),
                    state.getCurrentTimestamp(), regionStartKey, regionEndKey);
            indexUpdate.setUpdate(delete);
            indexUpdates.add(indexUpdate);
        }
    }
    return indexUpdates;
}
 
Example 19
Source Project: hbase-operator-tools   Source File: RegionsMerger.java    License: Apache License 2.0 5 votes vote down vote up
private boolean canMerge(Path path, RegionInfo region1, RegionInfo region2,
    Collection<Pair<RegionInfo, RegionInfo>> alreadyMerging) throws IOException {
  if(alreadyMerging.stream().anyMatch(regionPair ->
      region1.equals(regionPair.getFirst()) ||
      region2.equals(regionPair.getFirst()) ||
      region1.equals(regionPair.getSecond()) ||
      region2.equals(regionPair.getSecond()))){
    return false;
  }
  if (RegionInfo.areAdjacent(region1, region2)) {
    long size1 = sumSizeInFS(new Path(path, region1.getEncodedName()));
    long size2 = sumSizeInFS(new Path(path, region2.getEncodedName()));
    boolean mergeable = (resultSizeThreshold > (size1 + size2));
    if (!mergeable) {
      LOG.warn("Not merging regions {} and {} because resulting region size would get close to " +
          "the {} limit. {} total size: {}; {} total size:{}", region1.getEncodedName(),
        region2.getEncodedName(), resultSizeThreshold, region1.getEncodedName(), size1,
        region2.getEncodedName(), size2);
    }
    return mergeable;
  } else {
    LOG.warn(
      "WARNING: Can't merge regions {} and {} because those are not adjacent.",
      region1.getEncodedName(),
      region2.getEncodedName());
    return false;
  }
}
 
Example 20
Source Project: hbase   Source File: TestPerColumnFamilyFlush.java    License: Apache License 2.0 5 votes vote down vote up
private static Pair<HRegion, HRegionServer> getRegionWithName(TableName tableName) {
  MiniHBaseCluster cluster = TEST_UTIL.getMiniHBaseCluster();
  List<JVMClusterUtil.RegionServerThread> rsts = cluster.getRegionServerThreads();
  for (int i = 0; i < cluster.getRegionServerThreads().size(); i++) {
    HRegionServer hrs = rsts.get(i).getRegionServer();
    for (HRegion region : hrs.getRegions(tableName)) {
      return Pair.newPair(region, hrs);
    }
  }
  return null;
}
 
Example 21
Source Project: phoenix   Source File: JoinCompiler.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public Pair<Table, List<JoinSpec>> visit(BindTableNode boundTableNode) throws SQLException {
    TableRef tableRef = resolveTable(boundTableNode.getAlias(), boundTableNode.getName());
    boolean isWildCard = isWildCardSelectForTable(select.getSelect(), tableRef, origResolver);
    Table table = new Table(boundTableNode, isWildCard, Collections.<ColumnDef>emptyList(), boundTableNode.getTableSamplingRate(), tableRef);
    return new Pair<Table, List<JoinSpec>>(table, null);
}
 
Example 22
Source Project: phoenix   Source File: DelegateConnectionQueryServices.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public MetaDataMutationResult addColumn(List<Mutation> tableMetaData,
                                        PTable table,
                                        PTable parentTable,
                                        Map<String, List<Pair<String, Object>>> properties,
                                        Set<String> colFamiliesForPColumnsToBeAdded,
                                        List<PColumn> columns) throws SQLException {
    return getDelegate().addColumn(tableMetaData, table, parentTable,
            properties, colFamiliesForPColumnsToBeAdded, columns);
}
 
Example 23
Source Project: hbase   Source File: NettyRpcClientConfigHelper.java    License: Apache License 2.0 5 votes vote down vote up
static Pair<EventLoopGroup, Class<? extends Channel>> getEventLoopConfig(Configuration conf) {
  String name = conf.get(EVENT_LOOP_CONFIG);
  if (name == null) {
    return getDefaultEventLoopConfig(conf);
  }
  if (StringUtils.isBlank(name)) {
    return null;
  }
  return EVENT_LOOP_CONFIG_MAP.get(name);
}
 
Example 24
@Test
public void testParseCreateTablePrimaryKeyConstraintWithOrder() throws Exception {
	for (String order : new String[]{"asc", "desc", ""}) {
		String s = "create table core.entity_history_archive (id CHAR(15), name VARCHAR(150), constraint pk primary key (id ${o}, name ${o}))".replace("${o}", order);
		CreateTableStatement stmt = (CreateTableStatement)new SQLParser(new StringReader(s)).parseStatement();
		PrimaryKeyConstraint pkConstraint = stmt.getPrimaryKeyConstraint();
		List<Pair<ColumnName,ColumnModifier>> columns = pkConstraint.getColumnNames();
		assertEquals(2, columns.size());
		for (Pair<ColumnName,ColumnModifier> pair : columns) {
			assertEquals(ColumnModifier.fromDDLValue(order), pkConstraint.getColumn(pair.getFirst()).getSecond());
		}    		
	}
}
 
Example 25
Source Project: hbase   Source File: MetaFixer.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * @return Attempts to calculate a new {@link RegionInfo} that covers the region range described
 *   in {@code hole}.
 */
private static Optional<RegionInfo> getHoleCover(Pair<RegionInfo, RegionInfo> hole) {
  final RegionInfo left = hole.getFirst();
  final RegionInfo right = hole.getSecond();

  if (left.getTable().equals(right.getTable())) {
    // Simple case.
    if (Bytes.compareTo(left.getEndKey(), right.getStartKey()) >= 0) {
      LOG.warn("Skipping hole fix; left-side endKey is not less than right-side startKey;"
        + " left=<{}>, right=<{}>", left, right);
      return Optional.empty();
    }
    return Optional.of(buildRegionInfo(left.getTable(), left.getEndKey(), right.getStartKey()));
  }

  final boolean leftUndefined = left.equals(RegionInfo.UNDEFINED);
  final boolean rightUndefined = right.equals(RegionInfo.UNDEFINED);
  final boolean last = left.isLast();
  final boolean first = right.isFirst();
  if (leftUndefined && rightUndefined) {
    LOG.warn("Skipping hole fix; both the hole left-side and right-side RegionInfos are " +
      "UNDEFINED; left=<{}>, right=<{}>", left, right);
    return Optional.empty();
  }
  if (leftUndefined || last) {
    return Optional.of(
      buildRegionInfo(right.getTable(), HConstants.EMPTY_START_ROW, right.getStartKey()));
  }
  if (rightUndefined || first) {
    return Optional.of(
      buildRegionInfo(left.getTable(), left.getEndKey(), HConstants.EMPTY_END_ROW));
  }
  LOG.warn("Skipping hole fix; don't know what to do with left=<{}>, right=<{}>", left, right);
  return Optional.empty();
}
 
Example 26
Source Project: phoenix   Source File: QuerySchemaParserFunction.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public Pair<String, String> apply(final String selectStatement) {
    Preconditions.checkNotNull(selectStatement);
    Preconditions.checkArgument(!selectStatement.isEmpty(), "Select Query is empty!!");
    Connection connection = null;
    try {
        connection = ConnectionUtil.getConnection(this.configuration);
        final Statement  statement = connection.createStatement();
        final PhoenixStatement pstmt = statement.unwrap(PhoenixStatement.class);
        final QueryPlan queryPlan = pstmt.compileQuery(selectStatement);
        isValidStatement(queryPlan);
        final String tableName = queryPlan.getTableRef().getTable().getName().getString();
        final List<? extends ColumnProjector> projectedColumns = queryPlan.getProjector().getColumnProjectors();
        final List<String> columns = Lists.transform(projectedColumns,
                                                        new Function<ColumnProjector,String>() {
                                                            @Override
                                                            public String apply(ColumnProjector column) {
                                                                return column.getName();
                                                            }
                                                        });
        final String columnsAsStr = Joiner.on(",").join(columns);
        return new Pair<String, String>(tableName, columnsAsStr);
    } catch (SQLException e) {
        LOG.error(String.format(" Error [%s] parsing SELECT query [%s] ",e.getMessage(),selectStatement));
        throw new RuntimeException(e);
    } finally {
        if(connection != null) {
            try {
                connection.close();
            } catch(SQLException sqle) {
                LOG.error(" Error closing connection ");
                throw new RuntimeException(sqle);
            }
        }
    }
}
 
Example 27
Source Project: phoenix   Source File: OrderPreservingTracker.java    License: Apache License 2.0 5 votes vote down vote up
public TrackOrderPreservingExpressionVisitor(OrderBy orderBy) {
    if(orderBy.isEmpty()) {
        this.expressionToPositionAndOrderByExpression = Collections.<Expression, Pair<Integer,OrderByExpression>> emptyMap();
        return;
    }
    List<OrderByExpression> orderByExpressions = orderBy.getOrderByExpressions();
    this.expressionToPositionAndOrderByExpression = new HashMap<Expression, Pair<Integer,OrderByExpression>>(orderByExpressions.size());
    int index = 0;
    for(OrderByExpression orderByExpression : orderByExpressions) {
        this.expressionToPositionAndOrderByExpression.put(
                orderByExpression.getExpression(),
                new Pair<Integer,OrderByExpression>(index++, orderByExpression));
    }
}
 
Example 28
Source Project: phoenix   Source File: AlterIndexStatement.java    License: Apache License 2.0 5 votes vote down vote up
public AlterIndexStatement(NamedTableNode indexTableNode, String dataTableName, boolean ifExists, PIndexState indexState, boolean isRebuildAll, boolean async, ListMultimap<String,Pair<String,Object>> props) {
    super(indexTableNode,0);
    this.dataTableName = dataTableName;
    this.ifExists = ifExists;
    this.indexState = indexState;
    this.async = async;
    this.isRebuildAll = isRebuildAll;
    this.props= props==null ? ImmutableListMultimap.<String,Pair<String,Object>>of() : props;
}
 
Example 29
@Override
public Collection<Pair<Mutation, byte[]>> getIndexUpdateForFilteredRows(
    Collection<KeyValue> filtered) throws IOException {

  // stores all the return values
  IndexUpdateManager updateMap = new IndexUpdateManager();
  // batch the updates by row to make life easier and ordered
  Collection<Batch> batches = batchByRow(filtered);

  for (Batch batch : batches) {
    Put p = new Put(batch.getKvs().iterator().next().getRow());
    for (KeyValue kv : batch.getKvs()) {
      // we only need to cleanup Put entries
      byte type = kv.getType();
      Type t = KeyValue.Type.codeToType(type);
      if (!t.equals(Type.Put)) {
        continue;
      }

      // add the kv independently
      p.add(kv);
    }

    // do the usual thing as for deletes
    Collection<Batch> timeBatch = createTimestampBatchesFromMutation(p);
    LocalTableState state = new LocalTableState(env, localTable, p);
    for (Batch entry : timeBatch) {
      //just set the timestamp on the table - it already has all the future state
      state.setCurrentTimestamp(entry.getTimestamp());
      this.addDeleteUpdatesToMap(updateMap, state, entry.getTimestamp());
    }
  }
  return updateMap.toMap();
}
 
Example 30
Source Project: phoenix   Source File: BaseTenantSpecificViewIndexIT.java    License: Apache License 2.0 5 votes vote down vote up
private void createAndPopulateTenantView(Connection conn, String tenantId, String baseTable, String valuePrefix) throws SQLException {
    String ddl = "CREATE VIEW v(v2 VARCHAR) AS SELECT * FROM " + baseTable + " WHERE k1 = 1";
    conn.createStatement().execute(ddl);
    tenantViewsToDelete.add(new Pair<String, String>(tenantId, "v"));
    for (int i = 0; i < 10; i++) {
        conn.createStatement().execute("UPSERT INTO v(k2,v1,v2) VALUES(" + i + ",'" + valuePrefix + "v1-" + (i%5) + "','" + valuePrefix + "v2-" + (i%2) + "')");
    }
    conn.commit();
}