Java Code Examples for org.apache.kylin.common.util.Pair#getFirst()

The following examples show how to use org.apache.kylin.common.util.Pair#getFirst() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SqlNodeConverter.java    From kylin-on-parquet-v2 with Apache License 2.0 6 votes vote down vote up
private SqlNode convertSqlCall(SqlCall sqlCall) {
    SqlOperator operator = sqlCall.getOperator();
    if (operator != null) {
        Pair<SqlNode, SqlNode> matched = convMaster.matchSqlFunc(sqlCall);

        if (matched != null) {
            Preconditions.checkState(matched.getFirst() instanceof SqlCall);
            SqlCall sourceTmpl = (SqlCall) matched.getFirst();

            Preconditions.checkState(sourceTmpl.operandCount() == sqlCall.operandCount());
            SqlNode targetTmpl = matched.getSecond();

            boolean isWindowCall = sourceTmpl.getOperator() instanceof SqlOverOperator;
            SqlParamsFinder sqlParamsFinder = SqlParamsFinder.newInstance(sourceTmpl, sqlCall, isWindowCall);
            return targetTmpl.accept(new SqlFuncFiller(sqlParamsFinder.getParamNodes(), isWindowCall));
        }
    }
    return null;
}
 
Example 2
Source File: HiveTable.java    From kylin-on-parquet-v2 with Apache License 2.0 6 votes vote down vote up
@Override
public TableSignature getSignature() throws IOException {
    try {
        String path = computeHDFSLocation();
        Pair<Long, Long> sizeAndLastModified = DFSFileTable.getSizeAndLastModified(path);
        long size = sizeAndLastModified.getFirst();
        long lastModified = sizeAndLastModified.getSecond();

        // for non-native hive table, cannot rely on size & last modified on HDFS
        if (this.hiveTableMeta.isNative == false) {
            lastModified = System.currentTimeMillis(); // assume table is ever changing
        }

        return new TableSignature(path, size, lastModified);

    } catch (Exception e) {
        if (e instanceof IOException)
            throw (IOException) e;
        else
            throw new IOException(e);
    }
}
 
Example 3
Source File: SqlNodeConverter.java    From kylin with Apache License 2.0 6 votes vote down vote up
private SqlNode convertSqlCall(SqlCall sqlCall) {
    SqlOperator operator = sqlCall.getOperator();
    if (operator != null) {
        Pair<SqlNode, SqlNode> matched = convMaster.matchSqlFunc(sqlCall);

        if (matched != null) {
            Preconditions.checkState(matched.getFirst() instanceof SqlCall);
            SqlCall sourceTmpl = (SqlCall) matched.getFirst();

            Preconditions.checkState(sourceTmpl.operandCount() == sqlCall.operandCount());
            SqlNode targetTmpl = matched.getSecond();

            boolean isWindowCall = sourceTmpl.getOperator() instanceof SqlOverOperator;
            SqlParamsFinder sqlParamsFinder = SqlParamsFinder.newInstance(sourceTmpl, sqlCall, isWindowCall);
            return targetTmpl.accept(new SqlFuncFiller(sqlParamsFinder.getParamNodes(), isWindowCall));
        }
    }
    return null;
}
 
Example 4
Source File: JdbcExplorerTest.java    From kylin-on-parquet-v2 with Apache License 2.0 6 votes vote down vote up
@Test
public void testLoadTableMetadata() throws Exception {
    Pair<TableDesc, TableExtDesc> pair = explorer.loadTableMetadata("DEFAULT", "TEST_KYLIN_FACT", "DEFAULT");
    Assert.assertNotNull(pair.getFirst());
    Assert.assertNotNull(pair.getSecond());

    TableDesc tblDesc = pair.getFirst();
    TableExtDesc tblExtDesc = pair.getSecond();
    Assert.assertEquals("TEST_KYLIN_FACT", tblDesc.getName());
    Assert.assertEquals("TABLE", tblDesc.getTableType());
    Assert.assertEquals("DEFAULT.TEST_KYLIN_FACT", tblDesc.getIdentity());
    Assert.assertEquals("DEFAULT", tblDesc.getDatabase());
    Assert.assertEquals("DEFAULT", tblDesc.getProject());
    Assert.assertEquals(tblDesc.getIdentity(), tblExtDesc.getIdentity());
    Assert.assertEquals(tblDesc.getProject(), tblExtDesc.getProject());

    ColumnDesc[] columnDescs = tblDesc.getColumns();
    Assert.assertEquals(tblDesc.getColumnCount(), columnDescs.length);
    Assert.assertNotNull(columnDescs[0].getName());
    Assert.assertNotNull(columnDescs[0].getDatatype());
    Assert.assertNotNull(columnDescs[0].getType());
    Assert.assertNotNull(columnDescs[0].getId());
}
 
Example 5
Source File: MemcachedChunkingCache.java    From kylin-on-parquet-v2 with Apache License 2.0 6 votes vote down vote up
/**
 * This method overrides the parent putBinary() method. It will split the large value bytes into multiple chunks to fit into the internal Cache.
 * It generates a KeyHook to store the splitted chunked keys.
 */
@Override
public void putBinary(String keyS, byte[] valueB, int expiration) {
    if (Strings.isNullOrEmpty(keyS)) {
        return;
    }
    int nSplit = getValueSplit(config, keyS, valueB.length);
    Pair<KeyHook, byte[][]> keyValuePair = getKeyValuePair(nSplit, keyS, valueB);
    KeyHook keyHook = keyValuePair.getFirst();
    byte[][] splitValueB = keyValuePair.getSecond();

    if (logger.isDebugEnabled()) {
        logger.debug("put key hook:{} to cache for hash key", keyHook);
    }
    super.putBinary(keyS, serializeValue(keyHook), expiration);
    if (nSplit > 1) {
        for (int i = 0; i < nSplit; i++) {
            if (logger.isDebugEnabled()) {
                logger.debug("Chunk[" + i + "] bytes size before encoding  = " + splitValueB[i].length);
            }
            super.putBinary(keyHook.getChunkskey()[i], splitValueB[i], expiration);
        }
    }
}
 
Example 6
Source File: JdbcExplorerTest.java    From kylin with Apache License 2.0 6 votes vote down vote up
@Test
public void testLoadTableMetadata() throws Exception {
    Pair<TableDesc, TableExtDesc> pair = explorer.loadTableMetadata("DEFAULT", "TEST_KYLIN_FACT", "DEFAULT");
    Assert.assertNotNull(pair.getFirst());
    Assert.assertNotNull(pair.getSecond());

    TableDesc tblDesc = pair.getFirst();
    TableExtDesc tblExtDesc = pair.getSecond();
    Assert.assertEquals("TEST_KYLIN_FACT", tblDesc.getName());
    Assert.assertEquals("TABLE", tblDesc.getTableType());
    Assert.assertEquals("DEFAULT.TEST_KYLIN_FACT", tblDesc.getIdentity());
    Assert.assertEquals("DEFAULT", tblDesc.getDatabase());
    Assert.assertEquals("DEFAULT", tblDesc.getProject());
    Assert.assertEquals(tblDesc.getIdentity(), tblExtDesc.getIdentity());
    Assert.assertEquals(tblDesc.getProject(), tblExtDesc.getProject());

    ColumnDesc[] columnDescs = tblDesc.getColumns();
    Assert.assertEquals(tblDesc.getColumnCount(), columnDescs.length);
    Assert.assertNotNull(columnDescs[0].getName());
    Assert.assertNotNull(columnDescs[0].getDatatype());
    Assert.assertNotNull(columnDescs[0].getType());
    Assert.assertNotNull(columnDescs[0].getId());
}
 
Example 7
Source File: InMemCubeBuilderUtils.java    From kylin with Apache License 2.0 6 votes vote down vote up
public static final Pair<ImmutableBitSet, ImmutableBitSet> getDimensionAndMetricColumnBitSet(final long baseCuboidId, final long childCuboidId, final int measureCount) {
    final Pair<ImmutableBitSet, ImmutableBitSet> parentDimensionAndMetricColumnBitSet = getDimensionAndMetricColumnBitSet(baseCuboidId, measureCount);
    ImmutableBitSet parentDimensions = parentDimensionAndMetricColumnBitSet.getFirst();
    ImmutableBitSet measureColumns = parentDimensionAndMetricColumnBitSet.getSecond();
    ImmutableBitSet childDimensions = parentDimensions;
    long mask = Long.highestOneBit(baseCuboidId);
    long parentCuboidIdActualLength = (long)Long.SIZE - Long.numberOfLeadingZeros(baseCuboidId);
    int index = 0;
    for (int i = 0; i < parentCuboidIdActualLength; i++) {
        if ((mask & baseCuboidId) > 0) {
            if ((mask & childCuboidId) == 0) {
                // this dim will be aggregated
                childDimensions = childDimensions.set(index, false);
            }
            index++;
        }
        mask = mask >> 1;
    }
    return Pair.newPair(childDimensions, measureColumns);
}
 
Example 8
Source File: HiveMetadataExplorer.java    From kylin-on-parquet-v2 with Apache License 2.0 5 votes vote down vote up
@Override
public void validateSQL(String query) throws Exception {
    final HiveCmdBuilder hiveCmdBuilder = new HiveCmdBuilder();
    hiveCmdBuilder.addStatement(query);

    Pair<Integer, String> response = KylinConfig.getInstanceFromEnv().getCliCommandExecutor()
            .execute(hiveCmdBuilder.toString());
    if (response.getFirst() != 0) {
        throw new IllegalArgumentException(response.getSecond());
    }
}
 
Example 9
Source File: ITAclTableMigrationToolTest.java    From kylin-on-parquet-v2 with Apache License 2.0 5 votes vote down vote up
private void addRecordsToTable() throws Exception {
    Table htable = HBaseConnection.get(kylinConfig.getStorageUrl()).getTable(userTable);
    Pair<byte[], byte[]> pair = getRandomUserRecord();
    Put put = new Put(pair.getFirst());
    put.addColumn(Bytes.toBytes(AclConstant.USER_AUTHORITY_FAMILY), Bytes.toBytes(AclConstant.USER_AUTHORITY_COLUMN), pair.getSecond());
    htable.put(put);
}
 
Example 10
Source File: CompareFilterTimeRangeChecker.java    From kylin-on-parquet-v2 with Apache License 2.0 5 votes vote down vote up
private CheckResult checkForEqValue(Pair<Long, Long> timeUnitRange) {
    if (timeUnitRange.getFirst() <= timeStart && timeUnitRange.getSecond() >= timeEnd) {
        return CheckResult.INCLUDED;
    }
    if (timeUnitRange.getSecond() <= timeStart || timeUnitRange.getFirst() >= timeEnd) {
        return CheckResult.EXCLUDED;
    }
    return CheckResult.OVERLAP;
}
 
Example 11
Source File: BuildCubeWithEngine.java    From kylin with Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("unused")
private void checkHFilesInHBase(CubeSegment segment) throws IOException {
    try (Connection conn = HBaseConnection.get(KylinConfig.getInstanceFromEnv().getStorageUrl())) {
        String tableName = segment.getStorageLocationIdentifier();

        HBaseRegionSizeCalculator cal = new HBaseRegionSizeCalculator(tableName, conn);
        Map<byte[], Long> sizeMap = cal.getRegionSizeMap();
        long totalSize = 0;
        for (Long size : sizeMap.values()) {
            totalSize += size;
        }
        if (totalSize == 0) {
            return;
        }
        Map<byte[], Pair<Integer, Integer>> countMap = cal.getRegionHFileCountMap();
        // check if there's region contains more than one hfile, which means the hfile config take effects
        boolean hasMultiHFileRegions = false;
        for (Pair<Integer, Integer> count : countMap.values()) {
            // check if hfile count is greater than store count
            if (count.getSecond() > count.getFirst()) {
                hasMultiHFileRegions = true;
                break;
            }
        }
        if (KylinConfig.getInstanceFromEnv().getHBaseHFileSizeGB() == 0 && hasMultiHFileRegions) {
            throw new IOException("hfile size set to 0, but found region contains more than one hfiles");
        } else if (KylinConfig.getInstanceFromEnv().getHBaseHFileSizeGB() > 0 && !hasMultiHFileRegions) {
            throw new IOException("hfile size set greater than 0, but all regions still has only one hfile");
        }
    }
}
 
Example 12
Source File: TopNMeasureType.java    From kylin-on-parquet-v2 with Apache License 2.0 5 votes vote down vote up
private static DimensionEncoding[] getDimensionEncodings(FunctionDesc function, List<TblColRef> literalCols,
        Map<TblColRef, Dictionary<String>> dictionaryMap) {
    final DimensionEncoding[] dimensionEncodings = new DimensionEncoding[literalCols.size()];
    for (int i = 0; i < literalCols.size(); i++) {
        TblColRef colRef = literalCols.get(i);

        Pair<String, String> topNEncoding = TopNMeasureType.getEncoding(function, colRef);
        String encoding = topNEncoding.getFirst();
        String encodingVersionStr = topNEncoding.getSecond();
        if (StringUtils.isEmpty(encoding) || DictionaryDimEnc.ENCODING_NAME.equals(encoding)) {
            dimensionEncodings[i] = new DictionaryDimEnc(dictionaryMap.get(colRef));
        } else {
            int encodingVersion = 1;
            if (!StringUtils.isEmpty(encodingVersionStr)) {
                try {
                    encodingVersion = Integer.parseInt(encodingVersionStr);
                } catch (NumberFormatException e) {
                    throw new RuntimeException(TopNMeasureType.CONFIG_ENCODING_VERSION_PREFIX + colRef.getName()
                            + " has to be an integer");
                }
            }
            Object[] encodingConf = DimensionEncoding.parseEncodingConf(encoding);
            String encodingName = (String) encodingConf[0];
            String[] encodingArgs = (String[]) encodingConf[1];

            encodingArgs = DateDimEnc.replaceEncodingArgs(encoding, encodingArgs, encodingName,
                    literalCols.get(i).getType());

            dimensionEncodings[i] = DimensionEncodingFactory.create(encodingName, encodingArgs, encodingVersion);
        }
    }

    return dimensionEncodings;
}
 
Example 13
Source File: CLIHiveClient.java    From kylin with Apache License 2.0 5 votes vote down vote up
/**
 * only used by Deploy Util
 * @throws IOException 
 */
@Override
public void executeHQL(String hql) throws IOException {
    final HiveCmdBuilder hiveCmdBuilder = new HiveCmdBuilder();
    hiveCmdBuilder.addStatement(hql);
    Pair<Integer, String> response = KylinConfig.getInstanceFromEnv().getCliCommandExecutor()
            .execute(hiveCmdBuilder.toString());
    if (response.getFirst() != 0) {
        throw new IllegalArgumentException("Failed to execute hql [" + hql + "], error message is: " + response.getSecond());
    }

}
 
Example 14
Source File: JdbcExplorerTest.java    From kylin with Apache License 2.0 5 votes vote down vote up
@Test
public void testLoadTableMetadata() throws SQLException {
    String tableName = "tb1";
    String databaseName = "testdb";
    ResultSet rs1 = mock(ResultSet.class);
    when(rs1.next()).thenReturn(true).thenReturn(false);
    when(rs1.getString("TABLE_TYPE")).thenReturn("TABLE");

    ResultSet rs2 = mock(ResultSet.class);
    when(rs2.next()).thenReturn(true).thenReturn(true).thenReturn(true).thenReturn(false);
    when(rs2.getString("COLUMN_NAME")).thenReturn("COL1").thenReturn("COL2").thenReturn("COL3");
    when(rs2.getInt("DATA_TYPE")).thenReturn(Types.VARCHAR).thenReturn(Types.INTEGER).thenReturn(Types.DECIMAL);
    when(rs2.getInt("COLUMN_SIZE")).thenReturn(128).thenReturn(10).thenReturn(19);
    when(rs2.getInt("DECIMAL_DIGITS")).thenReturn(0).thenReturn(0).thenReturn(4);
    when(rs2.getInt("ORDINAL_POSITION")).thenReturn(1).thenReturn(3).thenReturn(2);
    when(rs2.getString("REMARKS")).thenReturn("comment1").thenReturn("comment2").thenReturn("comment3");

    when(jdbcMetadata.getTable(dbmd, databaseName, tableName)).thenReturn(rs1);
    when(jdbcMetadata.listColumns(dbmd, databaseName, tableName)).thenReturn(rs2);

    Pair<TableDesc, TableExtDesc> result = jdbcExplorer.loadTableMetadata(databaseName, tableName, "proj");
    TableDesc tableDesc = result.getFirst();
    ColumnDesc columnDesc = tableDesc.getColumns()[1];

    Assert.assertEquals(databaseName.toUpperCase(Locale.ROOT), tableDesc.getDatabase());
    Assert.assertEquals(3, tableDesc.getColumnCount());
    Assert.assertEquals("TABLE", tableDesc.getTableType());
    Assert.assertEquals("COL2", columnDesc.getName());
    Assert.assertEquals("integer", columnDesc.getTypeName());
    Assert.assertEquals("comment2", columnDesc.getComment());
    Assert.assertEquals(databaseName.toUpperCase(Locale.ROOT) + "." + tableName.toUpperCase(Locale.ROOT),
            result.getSecond().getIdentity());
}
 
Example 15
Source File: FragmentFilesMerger.java    From kylin-on-parquet-v2 with Apache License 2.0 5 votes vote down vote up
@Override
public RawRecord next() {
    Pair<DecodedRecord, Integer> currRecordEntry = minHeap.poll();
    DecodedRecord currRecord = currRecordEntry.getFirst();

    enqueueFromFragment(currRecordEntry.getSecond());
    boolean needAggregate = false;
    boolean first = true;
    while ((!minHeap.isEmpty()) && StringArrayComparator.INSTANCE.compare(currRecord.dimensions,
            minHeap.peek().getFirst().dimensions) == 0) {
        if (first) {
            doAggregate(currRecord);
            first = false;
            needAggregate = true;
        }
        Pair<DecodedRecord, Integer> nextRecord = minHeap.poll();
        doAggregate(nextRecord.getFirst());
        enqueueFromFragment(nextRecord.getSecond());
    }
    byte[][] newEncodedDimVals = encodeToNewDimValues(currRecord.dimensions);
    if (!needAggregate) {
        return new RawRecord(newEncodedDimVals, currRecord.metrics);
    }

    for (int i = 0; i < oneRawRecord.getDimensions().length; i++) {
        oneRawRecord.setDimension(i, newEncodedDimVals[i]);
    }
    Object[] metricValues = new Object[parsedCubeInfo.measureCount];
    resultAggrs.collectStates(metricValues);
    for (int i = 0; i < metricValues.length; i++) {
        metricsBuf.clear();
        metricsSerializers[i].serialize(metricValues[i], metricsBuf);
        byte[] metricBytes = Arrays.copyOf(metricsBuf.array(), metricsBuf.position());
        oneRawRecord.setMetric(i, metricBytes);
    }
    resultAggrs.reset();
    return oneRawRecord;
}
 
Example 16
Source File: HiveProducer.java    From kylin-on-parquet-v2 with Apache License 2.0 5 votes vote down vote up
public HiveProducerRecord parseToHiveProducerRecord(String tableName, Map<String, String> partitionKVs,
                                                    Map<String, Object> rawValue) throws Exception {
    Pair<String, String> tableNameSplits = ActiveReservoirReporter.getTableNameSplits(tableName);
    List<FieldSchema> fields = tableFieldSchemaCache.get(tableNameSplits).getSecond();
    List<Object> columnValues = Lists.newArrayListWithExpectedSize(fields.size());
    for (FieldSchema fieldSchema : fields) {
        columnValues.add(rawValue.get(fieldSchema.getName().toUpperCase(Locale.ROOT)));
    }

    return new HiveProducerRecord(tableNameSplits.getFirst(), tableNameSplits.getSecond(), partitionKVs,
            columnValues);
}
 
Example 17
Source File: TempStatementUtil.java    From kylin with Apache License 2.0 5 votes vote down vote up
private static void translateCreateToWith(String sql, KylinConfig config) throws IOException {
    Pair<String, String> translated = translateCreateToWithInternal(sql);
    String identifier = translated.getFirst();
    String sql1 = translated.getSecond();

    TempStatementManager manager = TempStatementManager.getInstance(config);
    if (manager.getTempStatement(identifier) == null || !manager.getTempStatement(identifier).equals(sql1)) {
        manager.updateTempStatement(identifier, sql1);
    }
}
 
Example 18
Source File: CompareFilterTimeRangeChecker.java    From kylin with Apache License 2.0 4 votes vote down vote up
public CheckResult check(CompareTupleFilter compFilter, TimeDerivedColumnType timeDerivedColumnType, long timezoneOffset) {
    Object timestampValue = compFilter.getFirstValue();
    Set conditionValues = compFilter.getValues();
    Pair<Long, Long> timeUnitRange;
    if (timeDerivedColumnType != TimeDerivedColumnType.MINUTE_START
            && timeDerivedColumnType != TimeDerivedColumnType.HOUR_START) {
        timeUnitRange = timezoneOffset == 0 ? timeDerivedColumnType.getTimeUnitRange(timestampValue)
                : timeDerivedColumnType.getTimeUnitRangeTimezoneAware(timestampValue, timezoneOffset);
    } else {
        timeUnitRange = timeDerivedColumnType.getTimeUnitRange(timestampValue);
    }
    switch (compFilter.getOperator()) {
    case EQ:
        return checkForEqValue(timeUnitRange);
    case NEQ:
        if (timeUnitRange.getFirst() <= timeStart && timeUnitRange.getSecond() >= timeEnd) {
            return CheckResult.EXCLUDED;
        }
        if (timeUnitRange.getSecond() <= timeStart || timeUnitRange.getFirst() >= timeEnd) {
            return CheckResult.INCLUDED;
        }
        return CheckResult.OVERLAP;
    case LT:
        if ((!endClose && timeUnitRange.getFirst() >= timeEnd) || (endClose && timeUnitRange.getFirst() > timeEnd)) {
            return CheckResult.INCLUDED;
        }
        if (timeUnitRange.getFirst() <= timeStart) {
            return CheckResult.EXCLUDED;
        }
        return CheckResult.OVERLAP;
    case LTE:
        if (timeUnitRange.getFirst() >= timeEnd) {
            return CheckResult.INCLUDED;
        }
        if (timeUnitRange.getSecond() < timeStart) {
            return CheckResult.EXCLUDED;
        }
        return CheckResult.OVERLAP;
    case GT:
        if (timeUnitRange.getSecond() < timeStart) {
            return CheckResult.INCLUDED;
        }
        if (timeUnitRange.getFirst() >= timeEnd) {
            return CheckResult.EXCLUDED;
        }
        return CheckResult.OVERLAP;
    case GTE:
        if (timeUnitRange.getFirst() <= timeStart) {
            return CheckResult.INCLUDED;
        }
        if ((!endClose && timeUnitRange.getFirst() >= timeEnd) || (endClose && timeUnitRange.getFirst() > timeEnd)) {
            return CheckResult.EXCLUDED;
        }
        return CheckResult.OVERLAP;
    case IN:
        return checkForInValues(timeDerivedColumnType, conditionValues, timezoneOffset);
    default:
        return CheckResult.OVERLAP;
    }
}
 
Example 19
Source File: CubeManager.java    From kylin with Apache License 2.0 4 votes vote down vote up
public CubeSegment refreshSegment(CubeInstance cube, TSRange tsRange, SegmentRange segRange)
        throws IOException {
    CubeInstance cubeCopy = cube.latestCopyForWrite(); // get a latest copy

    checkInputRanges(tsRange, segRange);
    PartitionDesc partitionDesc = cubeCopy.getModel().getPartitionDesc();
    if (partitionDesc == null || partitionDesc.isPartitioned() == false) {
        // full build
        tsRange = null;
        segRange = null;
    }

    CubeSegment newSegment = newSegment(cubeCopy, tsRange, segRange);

    Pair<Boolean, Boolean> pair = cubeCopy.getSegments().fitInSegments(newSegment);
    if (pair.getFirst() == false || pair.getSecond() == false)
        throw new IllegalArgumentException("The new refreshing segment " + newSegment
                + " does not match any existing segment in cube " + cubeCopy);

    if (segRange != null) {
        CubeSegment toRefreshSeg = null;
        for (CubeSegment cubeSegment : cubeCopy.getSegments()) {
            if (cubeSegment.getSegRange().equals(segRange)) {
                toRefreshSeg = cubeSegment;
                break;
            }
        }

        if (toRefreshSeg == null) {
            throw new IllegalArgumentException(
                    "For streaming cube, only one segment can be refreshed at one time");
        }

        newSegment.setSourcePartitionOffsetStart(toRefreshSeg.getSourcePartitionOffsetStart());
        newSegment.setSourcePartitionOffsetEnd(toRefreshSeg.getSourcePartitionOffsetEnd());
    }

    CubeUpdate update = new CubeUpdate(cubeCopy);
    update.setToAddSegs(newSegment);
    updateCube(update);

    return newSegment;
}
 
Example 20
Source File: CompareFilterTimeRangeChecker.java    From kylin-on-parquet-v2 with Apache License 2.0 4 votes vote down vote up
public CheckResult check(CompareTupleFilter compFilter, TimeDerivedColumnType timeDerivedColumnType, long timezoneOffset) {
    Object timestampValue = compFilter.getFirstValue();
    Set conditionValues = compFilter.getValues();
    Pair<Long, Long> timeUnitRange;
    if (timeDerivedColumnType != TimeDerivedColumnType.MINUTE_START
            && timeDerivedColumnType != TimeDerivedColumnType.HOUR_START) {
        timeUnitRange = timezoneOffset == 0 ? timeDerivedColumnType.getTimeUnitRange(timestampValue)
                : timeDerivedColumnType.getTimeUnitRangeTimezoneAware(timestampValue, timezoneOffset);
    } else {
        timeUnitRange = timeDerivedColumnType.getTimeUnitRange(timestampValue);
    }
    switch (compFilter.getOperator()) {
    case EQ:
        return checkForEqValue(timeUnitRange);
    case NEQ:
        if (timeUnitRange.getFirst() <= timeStart && timeUnitRange.getSecond() >= timeEnd) {
            return CheckResult.EXCLUDED;
        }
        if (timeUnitRange.getSecond() <= timeStart || timeUnitRange.getFirst() >= timeEnd) {
            return CheckResult.INCLUDED;
        }
        return CheckResult.OVERLAP;
    case LT:
        if ((!endClose && timeUnitRange.getFirst() >= timeEnd) || (endClose && timeUnitRange.getFirst() > timeEnd)) {
            return CheckResult.INCLUDED;
        }
        if (timeUnitRange.getFirst() <= timeStart) {
            return CheckResult.EXCLUDED;
        }
        return CheckResult.OVERLAP;
    case LTE:
        if (timeUnitRange.getFirst() >= timeEnd) {
            return CheckResult.INCLUDED;
        }
        if (timeUnitRange.getSecond() < timeStart) {
            return CheckResult.EXCLUDED;
        }
        return CheckResult.OVERLAP;
    case GT:
        if (timeUnitRange.getSecond() < timeStart) {
            return CheckResult.INCLUDED;
        }
        if (timeUnitRange.getFirst() >= timeEnd) {
            return CheckResult.EXCLUDED;
        }
        return CheckResult.OVERLAP;
    case GTE:
        if (timeUnitRange.getFirst() <= timeStart) {
            return CheckResult.INCLUDED;
        }
        if ((!endClose && timeUnitRange.getFirst() >= timeEnd) || (endClose && timeUnitRange.getFirst() > timeEnd)) {
            return CheckResult.EXCLUDED;
        }
        return CheckResult.OVERLAP;
    case IN:
        return checkForInValues(timeDerivedColumnType, conditionValues, timezoneOffset);
    default:
        return CheckResult.OVERLAP;
    }
}