org.apache.kylin.common.util.Pair Java Examples

The following examples show how to use org.apache.kylin.common.util.Pair. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: RestClient.java    From kylin-on-parquet-v2 with Apache License 2.0 6 votes vote down vote up
public Pair<String, String> getJobServerWithState() throws IOException {
    String url = baseUrl + "/service_discovery/state/is_active_job_node";
    HttpGet get = new HttpGet(url);
    HttpResponse response = null;
    try {
        response = client.execute(get);
        String msg = EntityUtils.toString(response.getEntity());

        if (response.getStatusLine().getStatusCode() != 200) {
            throw new IOException(INVALID_RESPONSE + response.getStatusLine().getStatusCode()
                    + " with getting job server state  " + url + "\n" + msg);
        }
        return Pair.newPair(host + ":" + port, msg);
    } finally {
        cleanup(get, response);
    }
}
 
Example #2
Source File: LookupTable.java    From kylin-on-parquet-v2 with Apache License 2.0 6 votes vote down vote up
public Pair<T, T> mapRange(String col, T beginValue, T endValue, String returnCol) {
    int colIdx = tableDesc.findColumnByName(col).getZeroBasedIndex();
    int returnIdx = tableDesc.findColumnByName(returnCol).getZeroBasedIndex();
    Comparator<T> colComp = getComparator(colIdx);
    Comparator<T> returnComp = getComparator(returnIdx);

    T returnBegin = null;
    T returnEnd = null;
    for (T[] row : data.values()) {
        if (between(beginValue, row[colIdx], endValue, colComp)) {
            T returnValue = row[returnIdx];
            if (returnBegin == null || returnComp.compare(returnValue, returnBegin) < 0) {
                returnBegin = returnValue;
            }
            if (returnEnd == null || returnComp.compare(returnValue, returnEnd) > 0) {
                returnEnd = returnValue;
            }
        }
    }
    if (returnBegin == null && returnEnd == null)
        return null;
    else
        return Pair.newPair(returnBegin, returnEnd);
}
 
Example #3
Source File: DimensionEncodingFactory.java    From kylin-on-parquet-v2 with Apache License 2.0 6 votes vote down vote up
public static Map<String, Integer> getValidEncodings() {
    if (factoryMap == null)
        initFactoryMap();

    Map<String, Integer> result = Maps.newTreeMap();
    for (Pair<String, Integer> p : factoryMap.keySet()) {
        if (result.containsKey(p.getFirst())) {
            if (result.get(p.getFirst()) > p.getSecond()) {
                continue;//skip small versions
            }
        }

        result.put(p.getFirst(), p.getSecond());
    }
    result.put(DictionaryDimEnc.ENCODING_NAME, 1);
    return result;
}
 
Example #4
Source File: CubeHBaseEndpointRPC.java    From kylin-on-parquet-v2 with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("unchecked")
private List<Pair<byte[], byte[]>> getEPKeyRanges(short baseShard, short shardNum, int totalShards) {
    if (shardNum == 0) {
        return Lists.newArrayList();
    }

    if (shardNum == totalShards) {
        //all shards
        return Lists.newArrayList(
                Pair.newPair(getByteArrayForShort((short) 0), getByteArrayForShort((short) (shardNum - 1))));
    } else if (baseShard + shardNum <= totalShards) {
        //endpoint end key is inclusive, so no need to append 0 or anything
        return Lists.newArrayList(Pair.newPair(getByteArrayForShort(baseShard),
                getByteArrayForShort((short) (baseShard + shardNum - 1))));
    } else {
        //0,1,2,3,4 wants 4,0
        return Lists.newArrayList(
                Pair.newPair(getByteArrayForShort(baseShard), getByteArrayForShort((short) (totalShards - 1))), //
                Pair.newPair(getByteArrayForShort((short) 0),
                        getByteArrayForShort((short) (baseShard + shardNum - totalShards - 1))));
    }
}
 
Example #5
Source File: SqlNodeConverter.java    From kylin with Apache License 2.0 6 votes vote down vote up
private SqlNode convertSqlCall(SqlCall sqlCall) {
    SqlOperator operator = sqlCall.getOperator();
    if (operator != null) {
        Pair<SqlNode, SqlNode> matched = convMaster.matchSqlFunc(sqlCall);

        if (matched != null) {
            Preconditions.checkState(matched.getFirst() instanceof SqlCall);
            SqlCall sourceTmpl = (SqlCall) matched.getFirst();

            Preconditions.checkState(sourceTmpl.operandCount() == sqlCall.operandCount());
            SqlNode targetTmpl = matched.getSecond();

            boolean isWindowCall = sourceTmpl.getOperator() instanceof SqlOverOperator;
            SqlParamsFinder sqlParamsFinder = SqlParamsFinder.newInstance(sourceTmpl, sqlCall, isWindowCall);
            return targetTmpl.accept(new SqlFuncFiller(sqlParamsFinder.getParamNodes(), isWindowCall));
        }
    }
    return null;
}
 
Example #6
Source File: TableService.java    From kylin with Apache License 2.0 6 votes vote down vote up
public void checkHiveTableCompatibility(String prj, TableDesc tableDesc) throws Exception {
    Preconditions.checkNotNull(tableDesc.getDatabase());
    Preconditions.checkNotNull(tableDesc.getName());

    String database = tableDesc.getDatabase().toUpperCase(Locale.ROOT);
    String tableName = tableDesc.getName().toUpperCase(Locale.ROOT);
    ProjectInstance projectInstance = getProjectManager().getProject(prj);
    ISourceMetadataExplorer explr = SourceManager.getSource(projectInstance).getSourceMetadataExplorer();

    TableDesc hiveTableDesc;
    try {
        Pair<TableDesc, TableExtDesc> pair = explr.loadTableMetadata(database, tableName, prj);
        hiveTableDesc = pair.getFirst();
    } catch (Exception e) {
        logger.error("Fail to get metadata for hive table {} due to ", tableDesc.getIdentity(), e);
        throw new RuntimeException("Fail to get metadata for hive table " + tableDesc.getIdentity());
    }

    TableSchemaUpdateChecker.CheckResult result = getSchemaUpdateChecker().allowMigrate(tableDesc, hiveTableDesc);
    result.raiseExceptionWhenInvalid();
}
 
Example #7
Source File: CubeHBaseRPC.java    From kylin with Apache License 2.0 6 votes vote down vote up
private RawScan preparedHBaseScan(GTRecord pkStart, GTRecord pkEnd, List<GTRecord> fuzzyKeys, ImmutableBitSet selectedColBlocks) {
    final List<Pair<byte[], byte[]>> selectedColumns = makeHBaseColumns(selectedColBlocks);

    LazyRowKeyEncoder encoder = new LazyRowKeyEncoder(cubeSeg, cuboid);
    byte[] start = encoder.createBuf();
    byte[] end = encoder.createBuf();

    encoder.setBlankByte(RowConstants.ROWKEY_LOWER_BYTE);
    encoder.encode(pkStart, pkStart.getInfo().getPrimaryKey(), start);

    encoder.setBlankByte(RowConstants.ROWKEY_UPPER_BYTE);
    encoder.encode(pkEnd, pkEnd.getInfo().getPrimaryKey(), end);
    byte[] temp = new byte[end.length + 1];//append extra 0 to the end key to make it inclusive while scanning
    System.arraycopy(end, 0, temp, 0, end.length);
    end = temp;

    List<Pair<byte[], byte[]>> hbaseFuzzyKeys = translateFuzzyKeys(fuzzyKeys);

    KylinConfig config = cubeSeg.getCubeDesc().getConfig();
    int hbaseCaching = config.getHBaseScanCacheRows();
    int hbaseMaxResultSize = config.getHBaseScanMaxResultSize();
    //        if (isMemoryHungry(selectedColBlocks))
    //            hbaseCaching /= 10;

    return new RawScan(start, end, selectedColumns, hbaseFuzzyKeys, hbaseCaching, hbaseMaxResultSize);
}
 
Example #8
Source File: TupleExpressionSerializerTest.java    From kylin with Apache License 2.0 6 votes vote down vote up
@Test
public void testSerialization() {
    TblColRef colD = TblColRef.mockup(t, 1, "C1", "string");
    TblColRef colM = TblColRef.mockup(t, 2, "C2", "decimal");
    BigDecimal value = BigDecimal.valueOf(10L);

    ColumnTupleFilter colFilter = new ColumnTupleFilter(colD);
    ConstantTupleFilter constFilter = new ConstantTupleFilter("col");
    CompareTupleFilter compareFilter = new CompareTupleFilter(TupleFilter.FilterOperatorEnum.EQ);
    compareFilter.addChild(colFilter);
    compareFilter.addChild(constFilter);

    ColumnTupleExpression colTuple = new ColumnTupleExpression(colM);
    ConstantTupleExpression constTuple = new ConstantTupleExpression(value);

    Pair<TupleFilter, TupleExpression> whenEntry = new Pair<TupleFilter, TupleExpression>(compareFilter, colTuple);
    CaseTupleExpression caseTuple = new CaseTupleExpression(Lists.newArrayList(whenEntry), constTuple);

    byte[] result = TupleExpressionSerializer.serialize(caseTuple, StringCodeSystem.INSTANCE);

    TupleExpression desTuple = TupleExpressionSerializer.deserialize(result, StringCodeSystem.INSTANCE);
    assertEquals(caseTuple, desTuple);
}
 
Example #9
Source File: RawScan.java    From kylin with Apache License 2.0 6 votes vote down vote up
@Override
public void serialize(RawScan value, ByteBuffer out) {
    BytesUtil.writeByteArray(value.startKey, out);
    BytesUtil.writeByteArray(value.endKey, out);
    BytesUtil.writeVInt(value.hbaseColumns.size(), out);
    for (Pair<byte[], byte[]> hbaseColumn : value.hbaseColumns) {
        BytesUtil.writeByteArray(hbaseColumn.getFirst(), out);
        BytesUtil.writeByteArray(hbaseColumn.getSecond(), out);
    }
    BytesUtil.writeVInt(value.fuzzyKeys.size(), out);
    for (Pair<byte[], byte[]> fuzzyKey : value.fuzzyKeys) {
        BytesUtil.writeByteArray(fuzzyKey.getFirst(), out);
        BytesUtil.writeByteArray(fuzzyKey.getSecond(), out);
    }
    BytesUtil.writeVInt(value.hbaseCaching, out);
    BytesUtil.writeVInt(value.hbaseMaxResultSize, out);
}
 
Example #10
Source File: MemcachedChunkingCache.java    From kylin-on-parquet-v2 with Apache License 2.0 6 votes vote down vote up
protected static Pair<KeyHook, byte[][]> getKeyValuePair(int nSplit, String keyS, byte[] valueB) {
    KeyHook keyHook;
    byte[][] splitValueB = null;
    if (nSplit > 1) {
        if (logger.isDebugEnabled()) {
            logger.debug("Enable chunking for putting large cached object values, chunk size = " + nSplit
                    + ", original value bytes size = " + valueB.length);
        }
        String[] chunkKeySs = new String[nSplit];
        for (int i = 0; i < nSplit; i++) {
            chunkKeySs[i] = keyS + i;
        }
        keyHook = new KeyHook(chunkKeySs, null);
        splitValueB = splitBytes(valueB, nSplit);
    } else {
        if (logger.isDebugEnabled()) {
            logger.debug(
                    "Chunking not enabled, put the original value bytes to keyhook directly, original value bytes size = "
                            + valueB.length);
        }
        keyHook = new KeyHook(null, valueB);
    }

    return new Pair<>(keyHook, splitValueB);
}
 
Example #11
Source File: RawScan.java    From kylin-on-parquet-v2 with Apache License 2.0 6 votes vote down vote up
@Override
public void serialize(RawScan value, ByteBuffer out) {
    BytesUtil.writeByteArray(value.startKey, out);
    BytesUtil.writeByteArray(value.endKey, out);
    BytesUtil.writeVInt(value.hbaseColumns.size(), out);
    for (Pair<byte[], byte[]> hbaseColumn : value.hbaseColumns) {
        BytesUtil.writeByteArray(hbaseColumn.getFirst(), out);
        BytesUtil.writeByteArray(hbaseColumn.getSecond(), out);
    }
    BytesUtil.writeVInt(value.fuzzyKeys.size(), out);
    for (Pair<byte[], byte[]> fuzzyKey : value.fuzzyKeys) {
        BytesUtil.writeByteArray(fuzzyKey.getFirst(), out);
        BytesUtil.writeByteArray(fuzzyKey.getSecond(), out);
    }
    BytesUtil.writeVInt(value.hbaseCaching, out);
    BytesUtil.writeVInt(value.hbaseMaxResultSize, out);
}
 
Example #12
Source File: QueryService.java    From kylin-on-parquet-v2 with Apache License 2.0 6 votes vote down vote up
public SQLResponse update(SQLRequest sqlRequest) throws Exception {
    // non select operations, only supported when enable pushdown
    logger.debug("Query pushdown enabled, redirect the non-select query to pushdown engine.");
    Connection conn = null;
    try {
        conn = QueryConnection.getConnection(sqlRequest.getProject());
        Pair<List<List<String>>, List<SelectedColumnMeta>> r = PushDownUtil.tryPushDownNonSelectQuery(
                sqlRequest.getProject(), sqlRequest.getSql(), conn.getSchema(), BackdoorToggles.getPrepareOnly());

        List<SelectedColumnMeta> columnMetas = Lists.newArrayList();
        columnMetas.add(new SelectedColumnMeta(false, false, false, false, 1, false, Integer.MAX_VALUE, "c0", "c0",
                null, null, null, Integer.MAX_VALUE, 128, 1, "char", false, false, false));

        return buildSqlResponse(sqlRequest.getProject(), true, r.getFirst(), columnMetas);

    } catch (Exception e) {
        logger.info("pushdown engine failed to finish current non-select query");
        throw e;
    } finally {
        close(null, null, conn);
    }
}
 
Example #13
Source File: TableService.java    From kylin-on-parquet-v2 with Apache License 2.0 6 votes vote down vote up
public List<Pair<TableDesc, TableExtDesc>> extractHiveTableMeta(String[] tables, String project) throws Exception { // de-dup
    SetMultimap<String, String> db2tables = LinkedHashMultimap.create();
    for (String fullTableName : tables) {
        String[] parts = HadoopUtil.parseHiveTableName(fullTableName);
        db2tables.put(parts[0], parts[1]);
    }

    // load all tables first
    List<Pair<TableDesc, TableExtDesc>> allMeta = Lists.newArrayList();
    ProjectInstance projectInstance = getProjectManager().getProject(project);
    ISourceMetadataExplorer explr = SourceManager.getSource(projectInstance).getSourceMetadataExplorer();
    for (Map.Entry<String, String> entry : db2tables.entries()) {
        Pair<TableDesc, TableExtDesc> pair = explr.loadTableMetadata(entry.getKey(), entry.getValue(), project);
        TableDesc tableDesc = pair.getFirst();
        Preconditions.checkState(tableDesc.getDatabase().equals(entry.getKey().toUpperCase(Locale.ROOT)));
        Preconditions.checkState(tableDesc.getName().equals(entry.getValue().toUpperCase(Locale.ROOT)));
        Preconditions.checkState(tableDesc.getIdentity()
                .equals(entry.getKey().toUpperCase(Locale.ROOT) + "." + entry.getValue().toUpperCase(Locale.ROOT)));
        TableExtDesc extDesc = pair.getSecond();
        Preconditions.checkState(tableDesc.getIdentity().equals(extDesc.getIdentity()));
        allMeta.add(pair);
    }
    return allMeta;
}
 
Example #14
Source File: CubeSegment.java    From kylin with Apache License 2.0 6 votes vote down vote up
public static Pair<Long, Long> parseSegmentName(String segmentName) {
    if ("FULL".equals(segmentName)) {
        return new Pair<>(0L, 0L);
    }
    String[] startEnd = segmentName.split("_");
    if (startEnd.length != 2) {
        throw new IllegalArgumentException("the segmentName is illegal: " + segmentName);
    }
    SimpleDateFormat dateFormat = new SimpleDateFormat("yyyyMMddHHmmss", Locale.ROOT);
    dateFormat.setTimeZone(TimeZone.getTimeZone("GMT"));

    try {
        long dateRangeStart = dateFormat.parse(startEnd[0]).getTime();
        long dateRangeEnd = dateFormat.parse(startEnd[1]).getTime();
        return new Pair<>(dateRangeStart, dateRangeEnd);
    } catch (ParseException e) {
        throw new IllegalArgumentException("Invalid segmentName for CubeSegment, segmentName = " + segmentName);
    }
}
 
Example #15
Source File: InvertIndexSearcher.java    From kylin with Apache License 2.0 6 votes vote down vote up
public InvertIndexSearcher(CuboidMetaInfo cuboidMetaInfo, TblColRef[] cols, ByteBuffer idxBuffer)
        throws IOException {
    Map<String, Pair<Integer, Integer>> columnMetas = Maps.newHashMap();
    for (DimensionMetaInfo dimensionInfo : cuboidMetaInfo.getDimensionsInfo()) {
        for (TblColRef col : cols) {
            if (dimensionInfo.getName().equals(col.getName())) {
                columnMetas
                        .put(col.getName(), new Pair<>(dimensionInfo.getStartOffset() + dimensionInfo.getDataLength(), dimensionInfo.getIndexLength()));
            }
        }
    }
    for (Map.Entry<String, Pair<Integer, Integer>> columnMeta : columnMetas.entrySet()) {
        String colName = columnMeta.getKey();
        Pair<Integer, Integer> positionInfo = columnMeta.getValue();
        int offset = positionInfo.getFirst();
        int length = positionInfo.getSecond();
        //start offset of this column
        ByteBuffer colIdxBuf = idxBuffer.asReadOnlyBuffer();
        colIdxBuf.position(offset);
        colIdxBuf.limit(offset + length);
        ColInvertIndexSearcher colIndexSearcher = ColInvertIndexSearcher.load(colIdxBuf);
        colIndexSearchers.put(colName, colIndexSearcher);
    }
}
 
Example #16
Source File: QueryService.java    From kylin-on-parquet-v2 with Apache License 2.0 6 votes vote down vote up
/**
 * @param correctedSql
 * @param sqlRequest
 * @return
 * @throws Exception
 */
private SQLResponse executeRequest(String correctedSql, SQLRequest sqlRequest, Connection conn) throws Exception {
    Statement stat = null;
    ResultSet resultSet = null;
    boolean isPushDown = false;

    Pair<List<List<String>>, List<SelectedColumnMeta>> r = null;
    try {
        stat = conn.createStatement();
        processStatementAttr(stat, sqlRequest);
        resultSet = stat.executeQuery(correctedSql);

        r = createResponseFromResultSet(resultSet);

    } catch (SQLException sqlException) {
        r = pushDownQuery(sqlRequest, correctedSql, conn, sqlException);
        if (r == null)
            throw sqlException;

        isPushDown = true;
    } finally {
        close(resultSet, stat, null); //conn is passed in, not my duty to close
    }

    return buildSqlResponse(sqlRequest.getProject(), isPushDown, r.getFirst(), r.getSecond());
}
 
Example #17
Source File: Segments.java    From kylin with Apache License 2.0 6 votes vote down vote up
public Pair<Boolean, Boolean> fitInSegments(ISegment newOne) {
    if (this.isEmpty())
        return null;

    ISegment first = this.get(0);
    ISegment last = this.get(this.size() - 1);
    Endpoint start = newOne.getSegRange().start;
    Endpoint end = newOne.getSegRange().end;
    boolean startFit = false;
    boolean endFit = false;
    for (ISegment sss : this) {
        if (sss == newOne)
            continue;
        startFit = startFit || (start.equals(sss.getSegRange().start) || start.equals(sss.getSegRange().end));
        endFit = endFit || (end.equals(sss.getSegRange().start) || end.equals(sss.getSegRange().end));
    }
    if (!startFit && endFit && newOne == first)
        startFit = true;
    if (!endFit && startFit && newOne == last)
        endFit = true;

    return Pair.newPair(startFit, endFit);
}
 
Example #18
Source File: SegmentReEncoder.java    From kylin with Apache License 2.0 6 votes vote down vote up
/**
 * Re-encode with both dimension and measure in encoded (Text) format.
 * @param key
 * @param value
 * @return
 * @throws IOException
 */
public Pair<Text, Text> reEncode(Text key, Text value) throws IOException {
    if (initialized == false) {
        throw new IllegalStateException("Not initialized");
    }
    Object[] measureObjs = new Object[measureDescs.size()];
    // re-encode measures if dictionary is used
    if (dictMeasures.size() > 0) {
        codec.decode(ByteBuffer.wrap(value.getBytes(), 0, value.getLength()), measureObjs);
        for (Pair<Integer, MeasureIngester> pair : dictMeasures) {
            int i = pair.getFirst();
            MeasureIngester ingester = pair.getSecond();
            measureObjs[i] = ingester.reEncodeDictionary(measureObjs[i], measureDescs.get(i), oldDicts, newDicts);
        }

        ByteBuffer valueBuf = codec.encode(measureObjs);
        textValue.set(valueBuf.array(), 0, valueBuf.position());
        return Pair.newPair(processKey(key), textValue);
    } else {
        return Pair.newPair(processKey(key), value);
    }
}
 
Example #19
Source File: CuboidStatsReaderUtil.java    From kylin-on-parquet-v2 with Apache License 2.0 6 votes vote down vote up
public static Pair<Map<Long, Long>, Long> readCuboidStatsWithSourceFromSegment(Set<Long> cuboidIds,
        CubeSegment cubeSegment) throws IOException {
    if (cubeSegment == null) {
        logger.warn("The cube segment can not be " + null);
        return null;
    }

    CubeStatsReader cubeStatsReader = new CubeStatsReader(cubeSegment, null, cubeSegment.getConfig());
    if (cubeStatsReader.getCuboidRowEstimatesHLL() == null
            || cubeStatsReader.getCuboidRowEstimatesHLL().isEmpty()) {
        logger.info("Cuboid Statistics is not enabled.");
        return null;
    }

    Map<Long, Long> cuboidsWithStatsAll = cubeStatsReader.getCuboidRowEstimatesHLL();
    Map<Long, Long> cuboidsWithStats = Maps.newHashMapWithExpectedSize(cuboidIds.size());
    for (Long cuboid : cuboidIds) {
        Long rowEstimate = cuboidsWithStatsAll.get(cuboid);
        if (rowEstimate == null) {
            logger.warn("Cannot get the row count stats for cuboid " + cuboid);
        } else {
            cuboidsWithStats.put(cuboid, rowEstimate);
        }
    }
    return new Pair<>(cuboidsWithStats, cubeStatsReader.sourceRowCount);
}
 
Example #20
Source File: NDCuboidMapper.java    From kylin-on-parquet-v2 with Apache License 2.0 5 votes vote down vote up
@Override
public void doMap(Text key, Text value, Context context) throws IOException, InterruptedException {
    long cuboidId = rowKeySplitter.split(key.getBytes());
    Cuboid parentCuboid = Cuboid.findForMandatory(cubeDesc, cuboidId);

    Collection<Long> myChildren = cuboidScheduler.getSpanningCuboid(cuboidId);

    // if still empty or null
    if (myChildren == null || myChildren.size() == 0) {
        context.getCounter(BatchConstants.MAPREDUCE_COUNTER_GROUP_NAME, "Skipped records").increment(1L);
        if (skipCounter++ % BatchConstants.NORMAL_RECORD_LOG_THRESHOLD == 0) {
            logger.info("Skipping record with ordinal: " + skipCounter);
        }
        return;
    }

    context.getCounter(BatchConstants.MAPREDUCE_COUNTER_GROUP_NAME, "Processed records").increment(1L);

    if (handleCounter++ % BatchConstants.NORMAL_RECORD_LOG_THRESHOLD == 0) {
        logger.info("Handling record with ordinal: " + handleCounter);
        logger.info("Parent cuboid: " + parentCuboid.getId() + "; Children: " + myChildren);
    }

    Pair<Integer, ByteArray> result;
    for (Long child : myChildren) {
        Cuboid childCuboid = Cuboid.findForMandatory(cubeDesc, child);
        result = ndCuboidBuilder.buildKey(parentCuboid, childCuboid, rowKeySplitter.getSplitBuffers());
        outputKey.set(result.getSecond().array(), 0, result.getFirst());
        context.write(outputKey, value);
    }

}
 
Example #21
Source File: TempStatementUtil.java    From kylin with Apache License 2.0 5 votes vote down vote up
private static void translateCreateToWith(String sql, KylinConfig config) throws IOException {
    Pair<String, String> translated = translateCreateToWithInternal(sql);
    String identifier = translated.getFirst();
    String sql1 = translated.getSecond();

    TempStatementManager manager = TempStatementManager.getInstance(config);
    if (manager.getTempStatement(identifier) == null || !manager.getTempStatement(identifier).equals(sql1)) {
        manager.updateTempStatement(identifier, sql1);
    }
}
 
Example #22
Source File: QueryService.java    From kylin with Apache License 2.0 5 votes vote down vote up
private Pair<List<List<String>>, List<SelectedColumnMeta>> createResponseFromResultSet(ResultSet resultSet)
        throws Exception {
    List<List<String>> results = Lists.newArrayList();
    List<SelectedColumnMeta> columnMetas = Lists.newArrayList();

    ResultSetMetaData metaData = resultSet.getMetaData();
    int columnCount = metaData.getColumnCount();

    // Fill in selected column meta
    for (int i = 1; i <= columnCount; ++i) {
        columnMetas.add(new SelectedColumnMeta(metaData.isAutoIncrement(i), metaData.isCaseSensitive(i),
                metaData.isSearchable(i), metaData.isCurrency(i), metaData.isNullable(i), metaData.isSigned(i),
                metaData.getColumnDisplaySize(i), metaData.getColumnLabel(i), metaData.getColumnName(i),
                metaData.getSchemaName(i), metaData.getCatalogName(i), metaData.getTableName(i),
                metaData.getPrecision(i), metaData.getScale(i), metaData.getColumnType(i),
                metaData.getColumnTypeName(i), metaData.isReadOnly(i), metaData.isWritable(i),
                metaData.isDefinitelyWritable(i)));
    }

    // fill in results
    while (resultSet.next()) {
        List<String> oneRow = Lists.newArrayListWithCapacity(columnCount);
        for (int i = 0; i < columnCount; i++) {
            oneRow.add((resultSet.getString(i + 1)));
        }

        results.add(oneRow);
    }

    return new Pair<>(results, columnMetas);
}
 
Example #23
Source File: PushDownExecutor.java    From kylin with Apache License 2.0 5 votes vote down vote up
private Pair<List<List<String>>, List<SelectedColumnMeta>> queryBySingleRunner(IPushDownRunner runner,
        String project, String sql, String defaultSchema, SQLException sqlException,
        boolean isSelect, boolean isPrepare) throws Exception {

    logger.debug("Query Pushdown runner {}", runner);

    // default schema in calcite does not apply to other engines.
    // since this is a universql requirement, it's not implemented as a converter
    if (defaultSchema != null && !defaultSchema.equals("DEFAULT")) {
        String completed = sql;
        try {
            completed = PushDownUtil.schemaCompletion(sql, defaultSchema);
        } catch (SqlParseException e) {
            // fail to parse the pushdown sql, ignore
            logger.debug("fail to do schema completion on the pushdown sql, ignore it.",
                    e.getMessage());
        }
        if (!sql.equals(completed)) {
            logger.info("the query is converted to {} after schema completion", completed);
            sql = completed;
        }
    }

    sql = runner.convertSql(kylinConfig, sql, project, defaultSchema, isPrepare);

    List<List<String>> returnRows = Lists.newArrayList();
    List<SelectedColumnMeta> returnColumnMeta = Lists.newArrayList();

    if (isSelect) {
        runner.executeQuery(sql, returnRows, returnColumnMeta);
    }
    if (!isSelect && !isPrepare && kylinConfig.isPushDownUpdateEnabled()) {
        runner.executeUpdate(sql);
    }
    return Pair.newPair(returnRows, returnColumnMeta);
}
 
Example #24
Source File: CubeService.java    From kylin with Apache License 2.0 5 votes vote down vote up
public Map<Long, Map<Long, Pair<Long, Long>>> getCuboidRollingUpStats(String cubeName) {
    String cuboidSource = QueryCubePropertyEnum.CUBOID_SOURCE.toString();
    String cuboidTgt = QueryCubePropertyEnum.CUBOID_TARGET.toString();
    String aggCount = QueryCubePropertyEnum.AGGR_COUNT.toString();
    String returnCount = QueryCubePropertyEnum.RETURN_COUNT.toString();
    String table = getMetricsManager().getSystemTableFromSubject(getConfig().getKylinMetricsSubjectQueryCube());
    String sql = "select " + cuboidSource + ", " + cuboidTgt + ", avg(" + aggCount + "), avg(" + returnCount + ")"//
            + " from " + table //
            + " where " + QueryCubePropertyEnum.CUBE.toString() + " = ?" //
            + " group by " + cuboidSource + ", " + cuboidTgt;

    List<List<String>> orgRollingUpCount = getPrepareQueryResult(cubeName, sql);
    return formatRollingUpStats(orgRollingUpCount);
}
 
Example #25
Source File: HiveProducer.java    From kylin-on-parquet-v2 with Apache License 2.0 5 votes vote down vote up
public HiveProducerRecord parseToHiveProducerRecord(String tableName, Map<String, String> partitionKVs,
                                                    Map<String, Object> rawValue) throws Exception {
    Pair<String, String> tableNameSplits = ActiveReservoirReporter.getTableNameSplits(tableName);
    List<FieldSchema> fields = tableFieldSchemaCache.get(tableNameSplits).getSecond();
    List<Object> columnValues = Lists.newArrayListWithExpectedSize(fields.size());
    for (FieldSchema fieldSchema : fields) {
        columnValues.add(rawValue.get(fieldSchema.getName().toUpperCase(Locale.ROOT)));
    }

    return new HiveProducerRecord(tableNameSplits.getFirst(), tableNameSplits.getSecond(), partitionKVs,
            columnValues);
}
 
Example #26
Source File: KylinTableCreator.java    From kylin with Apache License 2.0 5 votes vote down vote up
public static TableDesc generateKylinTable(KylinConfig kylinConfig, MetricsSinkDesc sinkDesc, String subject,
                                           List<Pair<String, String>> columns) {
    TableDesc kylinTable = new TableDesc();

    Pair<String, String> tableNameSplits = ActiveReservoirReporter
            .getTableNameSplits(sinkDesc.getTableNameForMetrics(subject));
    kylinTable.setUuid(RandomUtil.randomUUID().toString());
    kylinTable.setDatabase(tableNameSplits.getFirst());
    kylinTable.setName(tableNameSplits.getSecond());
    kylinTable.setTableType(null);
    kylinTable.setLastModified(0L);
    kylinTable.setSourceType(sinkDesc.getSourceType());

    ColumnDesc[] columnDescs = new ColumnDesc[columns.size()];
    for (int i = 0; i < columns.size(); i++) {
        columnDescs[i] = new ColumnDesc();
        Pair<String, String> entry = columns.get(i);
        columnDescs[i].setId(Integer.toString(i + 1));
        columnDescs[i].setName(entry.getFirst());
        columnDescs[i].setDatatype(entry.getSecond());
    }
    kylinTable.setColumns(columnDescs);

    kylinTable.init(kylinConfig, MetricsManager.SYSTEM_PROJECT);

    return kylinTable;
}
 
Example #27
Source File: CubeController.java    From kylin with Apache License 2.0 5 votes vote down vote up
private Map<Long, Long> getRecommendCuboidList(CubeInstance cube) throws IOException {
    // Get cuboid source info
    Map<Long, Long> optimizeHitFrequencyMap = getSourceCuboidHitFrequency(cube.getName());
    Map<Long, Map<Long, Pair<Long, Long>>> rollingUpCountSourceMap = cubeService
            .getCuboidRollingUpStats(cube.getName());
    return cubeService.getRecommendCuboidStatistics(cube, optimizeHitFrequencyMap, rollingUpCountSourceMap);
}
 
Example #28
Source File: HiveTableCreator.java    From kylin-on-parquet-v2 with Apache License 2.0 5 votes vote down vote up
public static List<Pair<String, String>> getTimeColumnsForMetrics() {
    List<Pair<String, String>> columns = Lists.newLinkedList();
    columns.add(new Pair<>(RecordEvent.RecordReserveKeyEnum.TIME.toString(), HiveTypeEnum.HBIGINT.toString()));
    columns.add(new Pair<>(TimePropertyEnum.YEAR.toString(), HiveTypeEnum.HSTRING.toString()));
    columns.add(new Pair<>(TimePropertyEnum.MONTH.toString(), HiveTypeEnum.HSTRING.toString()));
    columns.add(new Pair<>(TimePropertyEnum.WEEK_BEGIN_DATE.toString(), HiveTypeEnum.HSTRING.toString()));
    columns.add(new Pair<>(TimePropertyEnum.DAY_TIME.toString(), HiveTypeEnum.HSTRING.toString()));
    columns.add(new Pair<>(TimePropertyEnum.TIME_HOUR.toString(), HiveTypeEnum.HINT.toString()));
    columns.add(new Pair<>(TimePropertyEnum.TIME_MINUTE.toString(), HiveTypeEnum.HINT.toString()));
    columns.add(new Pair<>(TimePropertyEnum.TIME_SECOND.toString(), HiveTypeEnum.HINT.toString()));

    return columns;
}
 
Example #29
Source File: CubeDesc.java    From kylin-on-parquet-v2 with Apache License 2.0 5 votes vote down vote up
private Pair<Boolean, Set<String>> hasOverlap(ArrayList<Set<String>> dimsList, Set<String> Dims) {
    Set<String> existing = new HashSet<>();
    Set<String> overlap = new HashSet<>();
    for (Set<String> dims : dimsList) {
        if (CollectionUtils.containsAny(existing, dims)) {
            overlap.addAll(ensureOrder(CollectionUtils.intersection(existing, dims)));
        }
        existing.addAll(dims);
    }
    return new Pair<>(overlap.size() > 0, overlap);
}
 
Example #30
Source File: HiveTableCreator.java    From kylin with Apache License 2.0 5 votes vote down vote up
public static List<Pair<String, String>> getTimeColumnsForMetrics() {
    List<Pair<String, String>> columns = Lists.newLinkedList();
    columns.add(new Pair<>(RecordEvent.RecordReserveKeyEnum.TIME.toString(), HiveTypeEnum.HBIGINT.toString()));
    columns.add(new Pair<>(TimePropertyEnum.YEAR.toString(), HiveTypeEnum.HSTRING.toString()));
    columns.add(new Pair<>(TimePropertyEnum.MONTH.toString(), HiveTypeEnum.HSTRING.toString()));
    columns.add(new Pair<>(TimePropertyEnum.WEEK_BEGIN_DATE.toString(), HiveTypeEnum.HSTRING.toString()));
    columns.add(new Pair<>(TimePropertyEnum.DAY_TIME.toString(), HiveTypeEnum.HSTRING.toString()));
    columns.add(new Pair<>(TimePropertyEnum.TIME_HOUR.toString(), HiveTypeEnum.HINT.toString()));
    columns.add(new Pair<>(TimePropertyEnum.TIME_MINUTE.toString(), HiveTypeEnum.HINT.toString()));
    columns.add(new Pair<>(TimePropertyEnum.TIME_SECOND.toString(), HiveTypeEnum.HINT.toString()));

    return columns;
}