Java Code Examples for org.apache.hadoop.hbase.util.Pair#getSecond()

The following examples show how to use org.apache.hadoop.hbase.util.Pair#getSecond() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TsvImporterTextMapper.java    From hbase with Apache License 2.0 6 votes vote down vote up
/**
 * Convert a line of TSV text into an HBase table row.
 */
@Override
public void map(LongWritable offset, Text value, Context context) throws IOException {
  try {
    Pair<Integer,Integer> rowKeyOffests = parser.parseRowKey(value.getBytes(), value.getLength());
    ImmutableBytesWritable rowKey = new ImmutableBytesWritable(
        value.getBytes(), rowKeyOffests.getFirst(), rowKeyOffests.getSecond());
    context.write(rowKey, value);
  } catch (ImportTsv.TsvParser.BadTsvLineException|IllegalArgumentException badLine) {
    if (logBadLines) {
      System.err.println(value);
    }
    System.err.println("Bad line at offset: " + offset.get() + ":\n" + badLine.getMessage());
    if (skipBadLines) {
      incrementBadLineCount(1);
      return;
    }
    throw new IOException(badLine);
  } catch (InterruptedException e) {
    LOG.error("Interrupted while emitting TSV text", e);
    Thread.currentThread().interrupt();
  }
}
 
Example 2
Source File: IIKeyValueCodec.java    From Kylin with Apache License 2.0 6 votes vote down vote up
private void goToNext() {
	if (next != null) { // was not fetched
		return;
	}

	// NOTE the input keys are ordered
	while (next == null && iterator.hasNext()) {
		Pair<ImmutableBytesWritable, ImmutableBytesWritable> kv = iterator
				.next();
		ImmutableBytesWritable k = kv.getFirst();
		ImmutableBytesWritable v = kv.getSecond();
		decodeKey(k);

		if (curShard != lastShard
				|| curSliceTimestamp != lastSliceTimestamp) {
			makeNext();
		}
		consumeCurrent(v);
	}
	if (next == null) {
		makeNext();
	}
}
 
Example 3
Source File: DeployUtil.java    From Kylin with Apache License 2.0 6 votes vote down vote up
public static void deployJobJars() throws IOException {
    Pair<File, File> files = getJobJarFiles();
    File originalJobJar = files.getFirst();
    File originalCoprocessorJar = files.getSecond();

    String jobJarPath = config().getKylinJobJarPath();
    if (StringUtils.isEmpty(jobJarPath)) {
        throw new RuntimeException("deployJobJars cannot find job jar");
    }

    File targetJobJar = new File(jobJarPath);
    File jobJarRenamedAsTarget = new File(originalJobJar.getParentFile(), targetJobJar.getName());
    if (originalJobJar.equals(jobJarRenamedAsTarget) == false) {
        FileUtils.copyFile(originalJobJar, jobJarRenamedAsTarget);
    }

    File targetCoprocessorJar = new File(config().getCoprocessorLocalJar());
    File coprocessorJarRenamedAsTarget = new File(originalCoprocessorJar.getParentFile(), targetCoprocessorJar.getName());
    if (originalCoprocessorJar.equals(coprocessorJarRenamedAsTarget) == false) {
        FileUtils.copyFile(originalCoprocessorJar, coprocessorJarRenamedAsTarget);
    }

    CliCommandExecutor cmdExec = config().getCliCommandExecutor();
    cmdExec.copyFile(jobJarRenamedAsTarget.getAbsolutePath(), targetJobJar.getParent());
    cmdExec.copyFile(coprocessorJarRenamedAsTarget.getAbsolutePath(), targetCoprocessorJar.getParent());
}
 
Example 4
Source File: PhoenixIndexImportDirectMapper.java    From phoenix with Apache License 2.0 6 votes vote down vote up
private void writeBatch(MutationState mutationState, Context context) throws IOException,
        SQLException, InterruptedException {
    final Iterator<Pair<byte[], List<Mutation>>> iterator = mutationState.toMutations(true, null);
    while (iterator.hasNext()) {
        Pair<byte[], List<Mutation>> mutationPair = iterator.next();
        List<Mutation> batchMutations = mutationPair.getSecond();
        List<List<Mutation>> batchOfBatchMutations =
            MutationState.getMutationBatchList(batchSize, batchSizeBytes, batchMutations);
        for (List<Mutation> mutationList : batchOfBatchMutations) {
            writer.write(mutationList);
        }
        context.getCounter(PhoenixJobCounters.OUTPUT_RECORDS).increment(
            mutationPair.getSecond().size());
    }
    connection.rollback();
    currentBatchCount = 0;
}
 
Example 5
Source File: RegionServerAccounting.java    From hbase with Apache License 2.0 6 votes vote down vote up
public RegionServerAccounting(Configuration conf) {
  Pair<Long, MemoryType> globalMemstoreSizePair = MemorySizeUtil.getGlobalMemStoreSize(conf);
  this.globalMemStoreLimit = globalMemstoreSizePair.getFirst();
  this.memType = globalMemstoreSizePair.getSecond();
  this.globalMemStoreLimitLowMarkPercent =
      MemorySizeUtil.getGlobalMemStoreHeapLowerMark(conf, this.memType == MemoryType.HEAP);
  // When off heap memstore in use we configure the global off heap space for memstore as bytes
  // not as % of max memory size. In such case, the lower water mark should be specified using the
  // key "hbase.regionserver.global.memstore.size.lower.limit" which says % of the global upper
  // bound and defaults to 95%. In on heap case also specifying this way is ideal. But in the past
  // we used to take lower bound also as the % of xmx (38% as default). For backward compatibility
  // for this deprecated config,we will fall back to read that config when new one is missing.
  // Only for on heap case, do this fallback mechanism. For off heap it makes no sense.
  // TODO When to get rid of the deprecated config? ie
  // "hbase.regionserver.global.memstore.lowerLimit". Can get rid of this boolean passing then.
  this.globalMemStoreLimitLowMark =
      (long) (this.globalMemStoreLimit * this.globalMemStoreLimitLowMarkPercent);
  this.globalOnHeapMemstoreLimit = MemorySizeUtil.getOnheapGlobalMemStoreSize(conf);
  this.globalOnHeapMemstoreLimitLowMark =
      (long) (this.globalOnHeapMemstoreLimit * this.globalMemStoreLimitLowMarkPercent);
  this.retainedRegionRWRequestsCnt = new ConcurrentHashMap<>();
}
 
Example 6
Source File: CubeStorageEngine.java    From Kylin with Apache License 2.0 6 votes vote down vote up
private TupleFilter translateDerivedInCompare(CompareTupleFilter compf, Set<TblColRef> collector) {
    if (compf.getColumn() == null || compf.getValues().isEmpty())
        return compf;

    TblColRef derived = compf.getColumn();
    if (cubeDesc.isDerived(derived) == false)
        return compf;

    DeriveInfo hostInfo = cubeDesc.getHostInfo(derived);
    CubeManager cubeMgr = CubeManager.getInstance(this.cubeInstance.getConfig());
    CubeSegment seg = cubeInstance.getLatestReadySegment();
    LookupStringTable lookup = cubeMgr.getLookupTable(seg, hostInfo.dimension);
    Pair<TupleFilter, Boolean> translated = DerivedFilterTranslator.translate(lookup, hostInfo, compf);
    TupleFilter translatedFilter = translated.getFirst();
    boolean loosened = translated.getSecond();
    if (loosened) {
        collectColumnsRecursively(compf, collector);
    }
    return translatedFilter;
}
 
Example 7
Source File: HFileLink.java    From hbase with Apache License 2.0 6 votes vote down vote up
/**
 * Get the full path of the HFile referenced by the back reference
 *
 * @param rootDir root hbase directory
 * @param linkRefPath Link Back Reference path
 * @return full path of the referenced hfile
 */
public static Path getHFileFromBackReference(final Path rootDir, final Path linkRefPath) {
  Pair<TableName, String> p = parseBackReferenceName(linkRefPath.getName());
  TableName linkTableName = p.getFirst();
  String linkRegionName = p.getSecond();

  String hfileName = getBackReferenceFileName(linkRefPath.getParent());
  Path familyPath = linkRefPath.getParent().getParent();
  Path regionPath = familyPath.getParent();
  Path tablePath = regionPath.getParent();

  String linkName = createHFileLinkName(CommonFSUtils.getTableName(tablePath),
          regionPath.getName(), hfileName);
  Path linkTableDir = CommonFSUtils.getTableDir(rootDir, linkTableName);
  Path regionDir = HRegion.getRegionDir(linkTableDir, linkRegionName);
  return new Path(new Path(regionDir, familyPath.getName()), linkName);
}
 
Example 8
Source File: IndexWriter.java    From phoenix with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
/**
 * Convert the passed index updates to {@link HTableInterfaceReference}s.
 * @param indexUpdates from the index builder
 * @return pairs that can then be written by an {@link IndexWriter}.
 */
public static Multimap<HTableInterfaceReference, Mutation> resolveTableReferences(
    Collection<Pair<Mutation, byte[]>> indexUpdates) {
  Multimap<HTableInterfaceReference, Mutation> updates = ArrayListMultimap
      .<HTableInterfaceReference, Mutation> create();
  // simple map to make lookups easy while we build the map of tables to create
  Map<ImmutableBytesPtr, HTableInterfaceReference> tables =
      new HashMap<ImmutableBytesPtr, HTableInterfaceReference>(updates.size());
  for (Pair<Mutation, byte[]> entry : indexUpdates) {
    byte[] tableName = entry.getSecond();
    ImmutableBytesPtr ptr = new ImmutableBytesPtr(tableName);
    HTableInterfaceReference table = tables.get(ptr);
    if (table == null) {
      table = new HTableInterfaceReference(ptr);
      tables.put(ptr, table);
    }
    updates.put(table, entry.getFirst());
  }

  return updates;
}
 
Example 9
Source File: QueryOptimizerTest.java    From phoenix with Apache License 2.0 5 votes vote down vote up
private static List<String> getColumnNames(List<Pair<String, String>> columns) {
    List<String> columnNames = new ArrayList<String>(columns.size());
    for (Pair<String, String> col : columns) {
        String familyName = col.getFirst();
        String columnName = col.getSecond();
        if (familyName != null) {
            columnName = familyName + QueryConstants.NAME_SEPARATOR + columnName;
        }
        columnNames.add(columnName);
    }
    return columnNames;
}
 
Example 10
Source File: CoveredColumnIndexCodec.java    From phoenix with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
/**
 * @param group
 * @param state
 * @return the update that should be made to the table
 */
private IndexUpdate getIndexUpdateForGroup(ColumnGroup group, TableState state) {
  List<CoveredColumn> refs = group.getColumns();
  try {
    Pair<Scanner, IndexUpdate> stateInfo = state.getIndexedColumnsTableState(refs);
    Scanner kvs = stateInfo.getFirst();
    Pair<Integer, List<ColumnEntry>> columns =
        getNextEntries(refs, kvs, state.getCurrentRowKey());
    // make sure we close the scanner
    kvs.close();
    if (columns.getFirst().intValue() == 0) {
      return stateInfo.getSecond();
    }
    // have all the column entries, so just turn it into a Delete for the row
    // convert the entries to the needed values
    byte[] rowKey =
        composeRowKey(state.getCurrentRowKey(), columns.getFirst(), columns.getSecond());
    Put p = new Put(rowKey, state.getCurrentTimestamp());
    // add the columns to the put
    addColumnsToPut(p, columns.getSecond());

    // update the index info
    IndexUpdate update = stateInfo.getSecond();
    update.setTable(Bytes.toBytes(group.getTable()));
    update.setUpdate(p);
    return update;
  } catch (IOException e) {
    throw new RuntimeException("Unexpected exception when getting state for columns: " + refs);
  }
}
 
Example 11
Source File: PhoenixRuntime.java    From phoenix with Apache License 2.0 5 votes vote down vote up
/**
 * Get the list of uncommitted KeyValues for the connection. Currently used to write an
 * Phoenix-compliant HFile from a map/reduce job.
 * @param conn an open JDBC connection
 * @return the list of HBase mutations for uncommitted data
 * @throws SQLException
 */
public static Iterator<Pair<byte[],List<KeyValue>>> getUncommittedDataIterator(Connection conn, boolean includeMutableIndexes) throws SQLException {
    final PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
    final Iterator<Pair<byte[],List<Mutation>>> iterator = pconn.getMutationState().toMutations(includeMutableIndexes);
    return new Iterator<Pair<byte[],List<KeyValue>>>() {

        @Override
        public boolean hasNext() {
            return iterator.hasNext();
        }

        @Override
        public Pair<byte[], List<KeyValue>> next() {
            Pair<byte[],List<Mutation>> pair = iterator.next();
            List<KeyValue> keyValues = Lists.newArrayListWithExpectedSize(pair.getSecond().size() * 5); // Guess-timate 5 key values per row
            for (Mutation mutation : pair.getSecond()) {
                for (List<Cell> keyValueList : mutation.getFamilyCellMap().values()) {
                    for (Cell keyValue : keyValueList) {
                        keyValues.add(org.apache.hadoop.hbase.KeyValueUtil.ensureKeyValue(keyValue));
                    }
                }
            }
            Collections.sort(keyValues, pconn.getKeyValueBuilder().getKeyValueComparator());
            return new Pair<byte[], List<KeyValue>>(pair.getFirst(),keyValues);
        }

        @Override
        public void remove() {
            throw new UnsupportedOperationException();
        }

    };
}
 
Example 12
Source File: MetaDataClient.java    From phoenix with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
private PColumn newColumn(int position, ColumnDef def, PrimaryKeyConstraint pkConstraint) throws SQLException {
    try {
        ColumnName columnDefName = def.getColumnDefName();
        ColumnModifier columnModifier = def.getColumnModifier();
        boolean isPK = def.isPK();
        if (pkConstraint != null) {
            Pair<ColumnName,ColumnModifier> pkColumnModifier = pkConstraint.getColumn(columnDefName);
            if (pkColumnModifier != null) {
                isPK = true;
                columnModifier = pkColumnModifier.getSecond();
            }
        }
        
        String columnName = columnDefName.getColumnName();
        PName familyName = null;
        if (def.isPK() && !pkConstraint.getColumnNames().isEmpty() ) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_ALREADY_EXISTS)
                .setColumnName(columnName).build().buildException();
        }
        if (def.getColumnDefName().getFamilyName() != null) {
            String family = def.getColumnDefName().getFamilyName();
            if (isPK) {
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_WITH_FAMILY_NAME)
                    .setColumnName(columnName).setFamilyName(family).build().buildException();
            } else if (!def.isNull()) {
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.KEY_VALUE_NOT_NULL)
                    .setColumnName(columnName).setFamilyName(family).build().buildException();
            }
            familyName = PNameFactory.newName(family);
        } else if (!isPK) {
            familyName = PNameFactory.newName(QueryConstants.DEFAULT_COLUMN_FAMILY);
        }
        
        PColumn column = new PColumnImpl(PNameFactory.newName(columnName), familyName, def.getDataType(),
                def.getMaxLength(), def.getScale(), def.isNull(), position, columnModifier);
        return column;
    } catch (IllegalArgumentException e) { // Based on precondition check in constructor
        throw new SQLException(e);
    }
}
 
Example 13
Source File: IndexRegionObserver.java    From phoenix with Apache License 2.0 5 votes vote down vote up
/**
 * This method applies the pending put mutations on the the next row states.
 * Before this method is called, the next row states is set to current row states.
 */
private void applyPendingPutMutations(MiniBatchOperationInProgress<Mutation> miniBatchOp,
                                      BatchMutateContext context, long now) throws IOException {
    for (Integer i = 0; i < miniBatchOp.size(); i++) {
        if (miniBatchOp.getOperationStatus(i) == IGNORE) {
            continue;
        }
        Mutation m = miniBatchOp.getOperation(i);
        // skip this mutation if we aren't enabling indexing
        if (!this.builder.isEnabled(m)) {
            continue;
        }
        // Unless we're replaying edits to rebuild the index, we update the time stamp
        // of the data table to prevent overlapping time stamps (which prevents index
        // inconsistencies as this case isn't handled correctly currently).
        setTimestamp(m, now);
        if (m instanceof Put) {
            ImmutableBytesPtr rowKeyPtr = new ImmutableBytesPtr(m.getRow());
            Pair<Put, Put> dataRowState = context.dataRowStates.get(rowKeyPtr);
            if (dataRowState == null) {
                dataRowState = new Pair<Put, Put>(null, null);
                context.dataRowStates.put(rowKeyPtr, dataRowState);
            }
            Put nextDataRowState = dataRowState.getSecond();
            dataRowState.setSecond((nextDataRowState != null) ? applyNew((Put) m, nextDataRowState) : new Put((Put) m));
        }
    }
}
 
Example 14
Source File: CompactionScanQueryMatcher.java    From hbase with Apache License 2.0 5 votes vote down vote up
public static CompactionScanQueryMatcher create(ScanInfo scanInfo, ScanType scanType,
    long readPointToUse, long earliestPutTs, long oldestUnexpiredTS, long now,
    byte[] dropDeletesFromRow, byte[] dropDeletesToRow,
    RegionCoprocessorHost regionCoprocessorHost) throws IOException {
  Pair<DeleteTracker, ColumnTracker> trackers = getTrackers(regionCoprocessorHost, null,
      scanInfo,oldestUnexpiredTS, null);
  DeleteTracker deleteTracker = trackers.getFirst();
  ColumnTracker columnTracker = trackers.getSecond();
  if (dropDeletesFromRow == null) {
    if (scanType == ScanType.COMPACT_RETAIN_DELETES) {
      if (scanInfo.isNewVersionBehavior()) {
        return new IncludeAllCompactionQueryMatcher(scanInfo, deleteTracker, columnTracker,
            readPointToUse, oldestUnexpiredTS, now);
      } else {
        return new MinorCompactionScanQueryMatcher(scanInfo, deleteTracker, columnTracker,
            readPointToUse, oldestUnexpiredTS, now);
      }
    } else {
      return new MajorCompactionScanQueryMatcher(scanInfo, deleteTracker, columnTracker,
          readPointToUse, earliestPutTs, oldestUnexpiredTS, now);
    }
  } else {
    return new StripeCompactionScanQueryMatcher(scanInfo, deleteTracker, columnTracker,
        readPointToUse, earliestPutTs, oldestUnexpiredTS, now, dropDeletesFromRow,
        dropDeletesToRow);
  }
}
 
Example 15
Source File: SortMergeJoinPlan.java    From phoenix with Apache License 2.0 4 votes vote down vote up
public SortMergeJoinPlan(
        StatementContext context,
        FilterableStatement statement,
        TableRef table,
        JoinType type,
        QueryPlan lhsPlan,
        QueryPlan rhsPlan,
        Pair<List<Expression>,List<Expression>> lhsAndRhsKeyExpressions,
        List<Expression> rhsKeyExpressions,
        PTable joinedTable,
        PTable lhsTable,
        PTable rhsTable,
        int rhsFieldPosition,
        boolean isSingleValueOnly,
        Pair<List<OrderByNode>,List<OrderByNode>> lhsAndRhsOrderByNodes) throws SQLException {
    if (type == JoinType.Right) throw new IllegalArgumentException("JoinType should not be " + type);
    this.context = context;
    this.statement = statement;
    this.table = table;
    this.joinType = type;
    this.lhsPlan = lhsPlan;
    this.rhsPlan = rhsPlan;
    this.lhsKeyExpressions = lhsAndRhsKeyExpressions.getFirst();
    this.rhsKeyExpressions = lhsAndRhsKeyExpressions.getSecond();
    this.joinedSchema = buildSchema(joinedTable);
    this.lhsSchema = buildSchema(lhsTable);
    this.rhsSchema = buildSchema(rhsTable);
    this.rhsFieldPosition = rhsFieldPosition;
    this.isSingleValueOnly = isSingleValueOnly;
    this.tableRefs = Sets.newHashSetWithExpectedSize(lhsPlan.getSourceRefs().size() + rhsPlan.getSourceRefs().size());
    this.tableRefs.addAll(lhsPlan.getSourceRefs());
    this.tableRefs.addAll(rhsPlan.getSourceRefs());
    this.thresholdBytes =
            context.getConnection().getQueryServices().getProps().getLong(
                QueryServices.CLIENT_SPOOL_THRESHOLD_BYTES_ATTRIB,
                QueryServicesOptions.DEFAULT_CLIENT_SPOOL_THRESHOLD_BYTES);
    this.spoolingEnabled =
            context.getConnection().getQueryServices().getProps().getBoolean(
                QueryServices.CLIENT_JOIN_SPOOLING_ENABLED_ATTRIB,
                QueryServicesOptions.DEFAULT_CLIENT_JOIN_SPOOLING_ENABLED);
    this.actualOutputOrderBys = convertActualOutputOrderBy(lhsAndRhsOrderByNodes.getFirst(), lhsAndRhsOrderByNodes.getSecond(), context);
}
 
Example 16
Source File: TestSecureRESTServer.java    From hbase with Apache License 2.0 4 votes vote down vote up
@Test
public void testPositiveAuthorization() throws Exception {
  // Create a table, write a row to it, grant read perms to the client
  UserGroupInformation superuser = UserGroupInformation.loginUserFromKeytabAndReturnUGI(
      SERVICE_PRINCIPAL, serviceKeytab.getAbsolutePath());
  final TableName table = TableName.valueOf("publicTable");
  superuser.doAs(new PrivilegedExceptionAction<Void>() {
    @Override
    public Void run() throws Exception {
      try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration())) {
        TableDescriptor desc = TableDescriptorBuilder.newBuilder(table)
            .setColumnFamily(ColumnFamilyDescriptorBuilder.of("f1"))
            .build();
        conn.getAdmin().createTable(desc);
        try (Table t = conn.getTable(table)) {
          Put p = new Put(Bytes.toBytes("a"));
          p.addColumn(Bytes.toBytes("f1"), new byte[0], Bytes.toBytes("1"));
          t.put(p);
        }
        AccessControlClient.grant(conn, CLIENT_PRINCIPAL, Action.READ);
      } catch (Throwable e) {
        if (e instanceof Exception) {
          throw (Exception) e;
        } else {
          throw new Exception(e);
        }
      }
      return null;
    }
  });

  // Read that row as the client
  Pair<CloseableHttpClient,HttpClientContext> pair = getClient();
  CloseableHttpClient client = pair.getFirst();
  HttpClientContext context = pair.getSecond();

  HttpGet get = new HttpGet(new URL("http://localhost:"+ REST_TEST.getServletPort()).toURI()
      + "/" + table + "/a");
  get.addHeader("Accept", "application/json");
  UserGroupInformation user = UserGroupInformation.loginUserFromKeytabAndReturnUGI(
      CLIENT_PRINCIPAL, clientKeytab.getAbsolutePath());
  String jsonResponse = user.doAs(new PrivilegedExceptionAction<String>() {
    @Override
    public String run() throws Exception {
      try (CloseableHttpResponse response = client.execute(get, context)) {
        final int statusCode = response.getStatusLine().getStatusCode();
        assertEquals(response.getStatusLine().toString(), HttpURLConnection.HTTP_OK, statusCode);
        HttpEntity entity = response.getEntity();
        return EntityUtils.toString(entity);
      }
    }
  });
  ObjectMapper mapper = new JacksonJaxbJsonProvider()
      .locateMapper(CellSetModel.class, MediaType.APPLICATION_JSON_TYPE);
  CellSetModel model = mapper.readValue(jsonResponse, CellSetModel.class);
  assertEquals(1, model.getRows().size());
  RowModel row = model.getRows().get(0);
  assertEquals("a", Bytes.toString(row.getKey()));
  assertEquals(1, row.getCells().size());
  CellModel cell = row.getCells().get(0);
  assertEquals("1", Bytes.toString(cell.getValue()));
}
 
Example 17
Source File: IndexRegionObserver.java    From phoenix with Apache License 2.0 4 votes vote down vote up
private void preparePostIndexMutations(BatchMutateContext context, long now, PhoenixIndexMetaData indexMetaData,
                                       String tableName)
        throws Throwable {
    context.postIndexUpdates = ArrayListMultimap.<HTableInterfaceReference, Mutation>create();
    List<IndexMaintainer> maintainers = indexMetaData.getIndexMaintainers();
    // Check if we need to skip post index update for any of the rows
    for (IndexMaintainer indexMaintainer : maintainers) {
        byte[] emptyCF = indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary();
        byte[] emptyCQ = indexMaintainer.getEmptyKeyValueQualifier();
        HTableInterfaceReference hTableInterfaceReference =
                new HTableInterfaceReference(new ImmutableBytesPtr(indexMaintainer.getIndexTableName()));
        List<Pair<Mutation, byte[]>> updates = context.indexUpdates.get(hTableInterfaceReference);
        for (Pair<Mutation, byte[]> update : updates) {
            // Are there concurrent updates on the data table row? if so, skip post index updates
            // and let read repair resolve conflicts
            ImmutableBytesPtr rowKey = new ImmutableBytesPtr(update.getSecond());
            PendingRow pendingRow = pendingRows.get(rowKey);
            if (!pendingRow.isConcurrent()) {
                Mutation m = update.getFirst();
                if (m instanceof Put) {
                    Put verifiedPut = new Put(m.getRow());
                    // Set the status of the index row to "verified"
                    verifiedPut.addColumn(emptyCF, emptyCQ, now, VERIFIED_BYTES);
                    context.postIndexUpdates.put(hTableInterfaceReference, verifiedPut);
                } else {
                    context.postIndexUpdates.put(hTableInterfaceReference, m);
                }
            } else {
                if (!hasAllIndexedColumns(indexMaintainer, context.multiMutationMap.get(rowKey))) {
                    // This batch needs to be retried since one of the concurrent mutations does not have the value
                    // for an indexed column. Not including an index column may lead to incorrect index row key
                    // generation for concurrent mutations since concurrent mutations are not serialized entirely
                    // and do not see each other's effect on data table. Throwing an IOException will result in
                    // retries of this batch. Before throwing exception, we need to remove reference counts and
                    // locks for the rows of this batch
                    removePendingRows(context);
                    context.indexUpdates.clear();
                    for (RowLock rowLock : context.rowLocks) {
                        rowLock.release();
                    }
                    context.rowLocks.clear();
                    throw new IOException("One of the concurrent mutations does not have all indexed columns. " +
                            "The batch needs to be retried " + tableName);
                }
            }
        }
    }

    // We are done with handling concurrent mutations. So we can remove the rows of this batch from
    // the collection of pending rows
    removePendingRows(context);
    context.indexUpdates.clear();
}
 
Example 18
Source File: MultiThreadedRunner.java    From phoenix with Apache License 2.0 4 votes vote down vote up
/**
 * Timed query execution
 *
 * @throws Exception
 * @returns boolean true if query finished without timing out; false otherwise
 */
private boolean timedQuery(long iterationNumber) throws Exception {
    boolean
            isSelectCountStatement =
            query.getStatement().toUpperCase().trim().contains("COUNT(") ? true : false;

    Connection conn = null;
    PreparedStatement statement = null;
    ResultSet rs = null;
    Long queryStartTime = EnvironmentEdgeManager.currentTimeMillis();
    Date startDate = Calendar.getInstance().getTime();
    String exception = null;
    Long resultRowCount = 0L;
    String queryIteration = threadName + ":" + iterationNumber;
    Long queryElapsedTime = 0L;

    try {
        conn = pUtil.getConnection(query.getTenantId(), scenario.getPhoenixProperties());
        conn.setAutoCommit(true);
        final String statementString = query.getDynamicStatement(ruleApplier, scenario);
        statement = conn.prepareStatement(statementString);
        LOGGER.info("Executing iteration: " + queryIteration + ": " + statementString);
        
        if (scenario.getWriteParams() != null) {
        	Workload writes = new WriteWorkload(PhoenixUtil.create(), parser, scenario, GeneratePhoenixStats.NO);
        	workloadExecutor.add(writes);
        }
        
        boolean isQuery = statement.execute();
        if (isQuery) {
            rs = statement.getResultSet();
            Pair<Long, Long> r = getResults(rs, queryIteration, isSelectCountStatement, queryStartTime);
            resultRowCount = r.getFirst();
            queryElapsedTime = r.getSecond();
        } else {
            conn.commit();
        }
    } catch (Exception e) {
        LOGGER.error("Exception while executing query iteration " + queryIteration, e);
        exception = e.getMessage();
        throw e;
    } finally {
        getThreadTime().getRunTimesInMs().add(new RunTime(exception, startDate, resultRowCount,
                queryElapsedTime, queryElapsedTime > query.getTimeoutDuration()));

        if (rs != null) rs.close();
        if (statement != null) statement.close();
        if (conn != null) conn.close();
    }
    return true;
}
 
Example 19
Source File: RpcConnection.java    From hbase with Apache License 2.0 4 votes vote down vote up
protected RpcConnection(Configuration conf, HashedWheelTimer timeoutTimer, ConnectionId remoteId,
    String clusterId, boolean isSecurityEnabled, Codec codec, CompressionCodec compressor)
    throws IOException {
  if (remoteId.getAddress().isUnresolved()) {
    throw new UnknownHostException("unknown host: " + remoteId.getAddress().getHostName());
  }
  this.serverAddress = remoteId.getAddress().getAddress();
  this.timeoutTimer = timeoutTimer;
  this.codec = codec;
  this.compressor = compressor;
  this.conf = conf;

  User ticket = remoteId.getTicket();
  this.securityInfo = SecurityInfo.getInfo(remoteId.getServiceName());
  this.useSasl = isSecurityEnabled;

  // Choose the correct Token and AuthenticationProvider for this client to use
  SaslClientAuthenticationProviders providers =
      SaslClientAuthenticationProviders.getInstance(conf);
  Pair<SaslClientAuthenticationProvider, Token<? extends TokenIdentifier>> pair;
  if (useSasl && securityInfo != null) {
    pair = providers.selectProvider(clusterId, ticket);
    if (pair == null) {
      if (LOG.isTraceEnabled()) {
        LOG.trace("Found no valid authentication method from providers={} with tokens={}",
            providers.toString(), ticket.getTokens());
      }
      throw new RuntimeException("Found no valid authentication method from options");
    }
  } else if (!useSasl) {
    // Hack, while SIMPLE doesn't go via SASL.
    pair = providers.getSimpleProvider();
  } else {
    throw new RuntimeException("Could not compute valid client authentication provider");
  }

  this.provider = pair.getFirst();
  this.token = pair.getSecond();

  LOG.debug("Using {} authentication for service={}, sasl={}",
      provider.getSaslAuthMethod().getName(), remoteId.serviceName, useSasl);
  reloginMaxBackoff = conf.getInt("hbase.security.relogin.maxbackoff", 5000);
  this.remoteId = remoteId;
}
 
Example 20
Source File: HbaseSplitUtil.java    From DataLink with Apache License 2.0 4 votes vote down vote up
private static List<Configuration> doSplit(Configuration config, byte[] startRowkeyByte,
                                           byte[] endRowkeyByte, Pair<byte[][], byte[][]> regionRanges) {

    List<Configuration> configurations = new ArrayList<Configuration>();

    for (int i = 0; i < regionRanges.getFirst().length; i++) {

        byte[] regionStartKey = regionRanges.getFirst()[i];
        byte[] regionEndKey = regionRanges.getSecond()[i];

        // 当前的region为最后一个region
        // 如果最后一个region的start Key大于用户指定的userEndKey,则最后一个region,应该不包含在内
        // 注意如果用户指定userEndKey为"",则此判断应该不成立。userEndKey为""表示取得最大的region
        if (Bytes.compareTo(regionEndKey, HConstants.EMPTY_BYTE_ARRAY) == 0
                && (endRowkeyByte.length != 0 && (Bytes.compareTo(
                regionStartKey, endRowkeyByte) > 0))) {
            continue;
        }

        // 如果当前的region不是最后一个region,
        // 用户配置的userStartKey大于等于region的endkey,则这个region不应该含在内
        if ((Bytes.compareTo(regionEndKey, HConstants.EMPTY_BYTE_ARRAY) != 0)
                && (Bytes.compareTo(startRowkeyByte, regionEndKey) >= 0)) {
            continue;
        }

        // 如果用户配置的userEndKey小于等于 region的startkey,则这个region不应该含在内
        // 注意如果用户指定的userEndKey为"",则次判断应该不成立。userEndKey为""表示取得最大的region
        if (endRowkeyByte.length != 0
                && (Bytes.compareTo(endRowkeyByte, regionStartKey) <= 0)) {
            continue;
        }

        Configuration p = config.clone();

        String thisStartKey = getStartKey(startRowkeyByte, regionStartKey);

        String thisEndKey = getEndKey(endRowkeyByte, regionEndKey);

        p.set(Key.START_ROWKEY, thisStartKey);
        p.set(Key.END_ROWKEY, thisEndKey);

        LOG.debug("startRowkey:[{}], endRowkey:[{}] .", thisStartKey, thisEndKey);

        configurations.add(p);
    }

    return configurations;
}