org.apache.kylin.cube.CubeManager Java Examples
The following examples show how to use
org.apache.kylin.cube.CubeManager.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: JdbcHiveMRInputTest.java From kylin-on-parquet-v2 with Apache License 2.0 | 6 votes |
@Test public void testGenSqoopCmd_Partition() throws IOException { ISource source = SourceManager.getSource(new JdbcSourceAware()); IMRInput input = source.adaptToBuildEngine(IMRInput.class); Assert.assertNotNull(input); CubeManager cubeManager = CubeManager.getInstance(getTestConfig()); CubeDesc cubeDesc = CubeDescManager.getInstance(getTestConfig()).getCubeDesc("ci_inner_join_cube"); CubeSegment seg = cubeManager.appendSegment(cubeManager.getCube(cubeDesc.getName()), new SegmentRange.TSRange(System.currentTimeMillis() - 100L, System.currentTimeMillis() + 100L)); CubeJoinedFlatTableDesc flatDesc = new CubeJoinedFlatTableDesc(seg); JdbcHiveMRInput.JdbcMRBatchCubingInputSide inputSide = (JdbcHiveMRInput.JdbcMRBatchCubingInputSide) input .getBatchCubingInputSide(flatDesc); AbstractExecutable executable = new MockInputSide(flatDesc, inputSide).createSqoopToFlatHiveStep("/tmp", cubeDesc.getName()); Assert.assertNotNull(executable); String cmd = executable.getParam("cmd"); Assert.assertTrue(cmd.contains("org.h2.Driver")); Assert.assertTrue(cmd.contains( "--boundary-query \"SELECT MIN(\\\"TEST_KYLIN_FACT\\\".\\\"LEAF_CATEG_ID\\\"), MAX(\\\"TEST_KYLIN_FACT\\\".\\\"LEAF_CATEG_ID\\\")" + System.lineSeparator() + "FROM \\\"DEFAULT\\\".\\\"TEST_KYLIN_FACT\\\" AS \\\"TEST_KYLIN_FACT\\\"")); source.close(); }
Example #2
Source File: CsvSourceTest.java From kylin-on-parquet-v2 with Apache License 2.0 | 6 votes |
@Test public void testGetSourceDataFromFactTable() { CubeManager cubeMgr = CubeManager.getInstance(getTestConfig()); CubeInstance cube = cubeMgr.getCube(CUBE_NAME); TableDesc fact = MetadataConverter.extractFactTable(cube); List<ColumnDesc> colDescs = Lists.newArrayList(); Iterator<ColumnDesc> iterator = fact.columns().iterator(); while (iterator.hasNext()) { colDescs.add(iterator.next()); } NSparkCubingEngine.NSparkCubingSource cubingSource = new CsvSource().adaptToBuildEngine(NSparkCubingEngine.NSparkCubingSource.class); Dataset<Row> cubeDS = cubingSource.getSourceData(fact, ss, Maps.newHashMap()); cubeDS.take(10); StructType schema = cubeDS.schema(); for (int i = 0; i < colDescs.size(); i++) { StructField field = schema.fields()[i]; Assert.assertEquals(field.name(), colDescs.get(i).columnName()); Assert.assertEquals(field.dataType(), colDescs.get(i).dataType()); } }
Example #3
Source File: NDCuboidMapper.java From kylin-on-parquet-v2 with Apache License 2.0 | 6 votes |
@Override protected void doSetup(Context context) throws IOException { super.bindCurrentConfiguration(context.getConfiguration()); cubeName = context.getConfiguration().get(BatchConstants.CFG_CUBE_NAME); segmentID = context.getConfiguration().get(BatchConstants.CFG_CUBE_SEGMENT_ID); String cuboidModeName = context.getConfiguration().get(BatchConstants.CFG_CUBOID_MODE); KylinConfig config = AbstractHadoopJob.loadKylinPropsAndMetadata(); CubeInstance cube = CubeManager.getInstance(config).getCube(cubeName); cubeDesc = cube.getDescriptor(); cubeSegment = cube.getSegmentById(segmentID); ndCuboidBuilder = new NDCuboidBuilder(cubeSegment); // initialize CubiodScheduler cuboidScheduler = CuboidSchedulerUtil.getCuboidSchedulerByMode(cubeSegment, cuboidModeName); rowKeySplitter = new RowKeySplitter(cubeSegment); }
Example #4
Source File: JdbcHiveMRInputTest.java From kylin with Apache License 2.0 | 6 votes |
@Test public void testGenSqoopCmd_WithLookupShardBy() throws IOException { ISource source = SourceManager.getSource(new JdbcSourceAware()); IMRInput input = source.adaptToBuildEngine(IMRInput.class); Assert.assertNotNull(input); CubeManager cubeManager = CubeManager.getInstance(getTestConfig()); CubeDesc cubeDesc = CubeDescManager.getInstance(getTestConfig()).getCubeDesc("ut_jdbc_shard"); CubeSegment seg = cubeManager.appendSegment(cubeManager.getCube(cubeDesc.getName()), new SegmentRange.TSRange(System.currentTimeMillis() - 100L, System.currentTimeMillis() + 100L)); CubeJoinedFlatTableDesc flatDesc = new CubeJoinedFlatTableDesc(seg); JdbcHiveMRInput.JdbcMRBatchCubingInputSide inputSide = (JdbcHiveMRInput.JdbcMRBatchCubingInputSide) input .getBatchCubingInputSide(flatDesc); AbstractExecutable executable = new MockInputSide(flatDesc, inputSide).createSqoopToFlatHiveStep("/tmp", cubeDesc.getName()); Assert.assertNotNull(executable); String cmd = executable.getParam("cmd"); Assert.assertTrue(cmd.contains("org.h2.Driver")); Assert.assertTrue(cmd.contains( "--boundary-query \"SELECT MIN(\\\"TEST_CATEGORY_GROUPINGS\\\".\\\"META_CATEG_NAME\\\"), MAX(\\\"TEST_CATEGORY_GROUPINGS\\\".\\\"META_CATEG_NAME\\\")" + System.lineSeparator() + "FROM \\\"DEFAULT\\\".\\\"TEST_CATEGORY_GROUPINGS\\\" AS \\\"TEST_CATEGORY_GROUPINGS\\\"\"")); source.close(); }
Example #5
Source File: UpdateCubeInfoAfterCheckpointStep.java From kylin-on-parquet-v2 with Apache License 2.0 | 6 votes |
@Override protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException { final CubeManager cubeManager = CubeManager.getInstance(context.getConfig()); final CubeInstance cube = cubeManager.getCube(CubingExecutableUtil.getCubeName(this.getParams())); Set<Long> recommendCuboids = cube.getCuboidsRecommend(); try { List<CubeSegment> newSegments = cube.getSegments(SegmentStatusEnum.READY_PENDING); Map<Long, Long> recommendCuboidsWithStats = CuboidStatsReaderUtil .readCuboidStatsFromSegments(recommendCuboids, newSegments); if (recommendCuboidsWithStats == null) { throw new RuntimeException("Fail to get statistics info for recommended cuboids after optimization!!!"); } cubeManager.promoteCheckpointOptimizeSegments(cube, recommendCuboidsWithStats, newSegments.toArray(new CubeSegment[newSegments.size()])); return new ExecuteResult(); } catch (Exception e) { logger.error("fail to update cube after build", e); return ExecuteResult.createError(e); } }
Example #6
Source File: InMemCuboidReducer.java From kylin-on-parquet-v2 with Apache License 2.0 | 6 votes |
@Override protected void doSetup(Context context) throws IOException { super.bindCurrentConfiguration(context.getConfiguration()); KylinConfig config = AbstractHadoopJob.loadKylinPropsAndMetadata(); String cubeName = context.getConfiguration().get(BatchConstants.CFG_CUBE_NAME).toUpperCase(Locale.ROOT); CubeInstance cube = CubeManager.getInstance(config).getCube(cubeName); CubeDesc cubeDesc = cube.getDescriptor(); List<MeasureDesc> measuresDescs = cubeDesc.getMeasures(); codec = new BufferedMeasureCodec(measuresDescs); aggs = new MeasureAggregators(measuresDescs); input = new Object[measuresDescs.size()]; result = new Object[measuresDescs.size()]; outputKey = new Text(); outputValue = new Text(); }
Example #7
Source File: CalculateStatsFromBaseCuboidReducer.java From kylin-on-parquet-v2 with Apache License 2.0 | 6 votes |
@Override protected void doSetup(Context context) throws IOException { super.bindCurrentConfiguration(context.getConfiguration()); Configuration conf = context.getConfiguration(); KylinConfig config = AbstractHadoopJob.loadKylinPropsAndMetadata(); String cubeName = conf.get(BatchConstants.CFG_CUBE_NAME); CubeInstance cube = CubeManager.getInstance(config).getCube(cubeName); cubeConfig = cube.getConfig(); baseCuboidId = cube.getCuboidScheduler().getBaseCuboidId(); baseCuboidRowCountInMappers = Lists.newLinkedList(); output = conf.get(BatchConstants.CFG_OUTPUT_PATH); samplingPercentage = Integer .parseInt(context.getConfiguration().get(BatchConstants.CFG_STATISTICS_SAMPLING_PERCENT)); taskId = context.getTaskAttemptID().getTaskID().getId(); cuboidHLLMap = Maps.newHashMap(); }
Example #8
Source File: DeployUtil.java From kylin-on-parquet-v2 with Apache License 2.0 | 6 votes |
public static void prepareTestDataForStreamingCube(long startTime, long endTime, int numberOfRecords, String cubeName, StreamDataLoader streamDataLoader) throws IOException { CubeInstance cubeInstance = CubeManager.getInstance(KylinConfig.getInstanceFromEnv()).getCube(cubeName); List<String> data = StreamingTableDataGenerator.generate(numberOfRecords, startTime, endTime, cubeInstance.getRootFactTable(), cubeInstance.getProject()); //load into kafka streamDataLoader.loadIntoKafka(data); logger.info("Write {} messages into {}", data.size(), streamDataLoader.toString()); //csv data for H2 use TableRef factTable = cubeInstance.getModel().getRootFactTable(); List<TblColRef> tableColumns = Lists.newArrayList(factTable.getColumns()); TimedJsonStreamParser timedJsonStreamParser = new TimedJsonStreamParser(tableColumns, null); StringBuilder sb = new StringBuilder(); for (String json : data) { List<String> rowColumns = timedJsonStreamParser .parse(ByteBuffer.wrap(json.getBytes(StandardCharsets.UTF_8))).get(0).getData(); sb.append(StringUtils.join(rowColumns, ",")); sb.append(System.getProperty("line.separator")); } appendFactTableData(sb.toString(), cubeInstance.getRootFactTable()); }
Example #9
Source File: Coordinator.java From kylin with Apache License 2.0 | 6 votes |
private CubeAssignment reassignCubeImpl(String cubeName, CubeAssignment preAssignments, CubeAssignment newAssignments) { logger.info("start cube reBalance, cube:{}, previous assignments:{}, new assignments:{}", cubeName, preAssignments, newAssignments); if (newAssignments.equals(preAssignments)) { logger.info("the new assignment is the same as the previous assignment, do nothing for this reassignment"); return newAssignments; } CubeInstance cubeInstance = CubeManager.getInstance(KylinConfig.getInstanceFromEnv()).getCube(cubeName); doReassign(cubeInstance, preAssignments, newAssignments); MapDifference<Integer, List<Partition>> assignDiff = Maps.difference(preAssignments.getAssignments(), newAssignments.getAssignments()); // add empty partitions to the removed replica sets, means that there's still data in the replica set, but no new data will be consumed. Map<Integer, List<Partition>> removedAssign = assignDiff.entriesOnlyOnLeft(); for (Integer removedReplicaSet : removedAssign.keySet()) { newAssignments.addAssignment(removedReplicaSet, Lists.<Partition> newArrayList()); } streamMetadataStore.saveNewCubeAssignment(newAssignments); AssignmentsCache.getInstance().clearCubeCache(cubeName); return newAssignments; }
Example #10
Source File: DeployCoprocessorCLI.java From kylin-on-parquet-v2 with Apache License 2.0 | 6 votes |
private static List<String> getHTableNames(KylinConfig config) { CubeManager cubeMgr = CubeManager.getInstance(config); ArrayList<String> result = new ArrayList<String>(); for (CubeInstance cube : cubeMgr.listAllCubes()) { if (cube.getStorageType() == IStorageAware.ID_HBASE || cube.getStorageType() == IStorageAware.ID_SHARDED_HBASE || cube.getStorageType() == IStorageAware.ID_REALTIME_AND_HBASE) { for (CubeSegment seg : cube.getSegments(SegmentStatusEnum.READY)) { String tableName = seg.getStorageLocationIdentifier(); if (StringUtils.isBlank(tableName) == false) { result.add(tableName); System.out.println("added new table: " + tableName); } } } } return result; }
Example #11
Source File: MergeCuboidMapper.java From kylin-on-parquet-v2 with Apache License 2.0 | 6 votes |
@Override protected void doSetup(Context context) throws IOException, InterruptedException { super.bindCurrentConfiguration(context.getConfiguration()); String cubeName = context.getConfiguration().get(BatchConstants.CFG_CUBE_NAME); String segmentID = context.getConfiguration().get(BatchConstants.CFG_CUBE_SEGMENT_ID); KylinConfig config = AbstractHadoopJob.loadKylinPropsAndMetadata(); CubeManager cubeManager = CubeManager.getInstance(config); CubeInstance cube = cubeManager.getCube(cubeName); CubeDesc cubeDesc = cube.getDescriptor(); CubeSegment mergedCubeSegment = cube.getSegmentById(segmentID); // decide which source segment FileSplit fileSplit = (FileSplit) context.getInputSplit(); IMROutput2.IMRMergeOutputFormat outputFormat = MRUtil.getBatchMergeOutputSide2(mergedCubeSegment) .getOutputFormat(); CubeSegment sourceCubeSegment = outputFormat.findSourceSegment(fileSplit, cube); reEncoder = new SegmentReEncoder(cubeDesc, sourceCubeSegment, mergedCubeSegment, config); }
Example #12
Source File: UpdateHTableHostCLI.java From kylin-on-parquet-v2 with Apache License 2.0 | 6 votes |
private static List<String> getHTableNames(KylinConfig config) { CubeManager cubeMgr = CubeManager.getInstance(config); ArrayList<String> result = new ArrayList<>(); for (CubeInstance cube : cubeMgr.listAllCubes()) { for (CubeSegment seg : cube.getSegments(SegmentStatusEnum.READY)) { String tableName = seg.getStorageLocationIdentifier(); if (!StringUtils.isBlank(tableName)) { result.add(tableName); logger.info("added new table: {}", tableName); } } } return result; }
Example #13
Source File: FlinkCubingByLayer.java From kylin with Apache License 2.0 | 6 votes |
@Override public void open(Configuration parameters) throws Exception { KylinConfig kConfig = AbstractHadoopJob.loadKylinConfigFromHdfs(conf, metaUrl); try (KylinConfig.SetAndUnsetThreadLocalConfig autoUnset = KylinConfig .setAndUnsetThreadLocalConfig(kConfig)) { CubeInstance cubeInstance = CubeManager.getInstance(kConfig).getCube(cubeName); CubeDesc cubeDesc = cubeInstance.getDescriptor(); CubeSegment cubeSegment = cubeInstance.getSegmentById(segmentId); CubeJoinedFlatTableEnrich interDesc = new CubeJoinedFlatTableEnrich( EngineFactory.getJoinedFlatTableDesc(cubeSegment), cubeDesc); long baseCuboidId = Cuboid.getBaseCuboidId(cubeDesc); Cuboid baseCuboid = Cuboid.findForMandatory(cubeDesc, baseCuboidId); baseCuboidBuilder = new BaseCuboidBuilder(kConfig, cubeDesc, cubeSegment, interDesc, AbstractRowKeyEncoder.createInstance(cubeSegment, baseCuboid), MeasureIngester.create(cubeDesc.getMeasures()), cubeSegment.buildDictionaryMap()); } }
Example #14
Source File: RowValueDecoderTest.java From kylin with Apache License 2.0 | 5 votes |
@Test public void testDecode() throws Exception { CubeDesc cubeDesc = CubeManager.getInstance(getTestConfig()).getCube("test_kylin_cube_with_slr_ready").getDescriptor(); HBaseColumnDesc hbaseCol = cubeDesc.getHbaseMapping().getColumnFamily()[0].getColumns()[0]; BufferedMeasureCodec codec = new BufferedMeasureCodec(hbaseCol.getMeasures()); BigDecimal sum = new BigDecimal("333.1234567"); BigDecimal min = new BigDecimal("333.1111111"); BigDecimal max = new BigDecimal("333.1999999"); Long count = new Long(2); Long item_count = new Long(100); ByteBuffer buf = codec.encode(new Object[] { sum, min, max, count, item_count }); buf.flip(); byte[] valueBytes = new byte[buf.limit()]; System.arraycopy(buf.array(), 0, valueBytes, 0, buf.limit()); RowValueDecoder rowValueDecoder = new RowValueDecoder(hbaseCol); for (MeasureDesc measure : cubeDesc.getMeasures()) { FunctionDesc aggrFunc = measure.getFunction(); int index = hbaseCol.findMeasure(aggrFunc); rowValueDecoder.setProjectIndex(index); } rowValueDecoder.decodeAndConvertJavaObj(valueBytes); Object[] measureValues = rowValueDecoder.getValues(); //BigDecimal.ROUND_HALF_EVEN in BigDecimalSerializer assertEquals("[333.1235, 333.1111, 333.2000, 2, 100]", Arrays.toString(measureValues)); }
Example #15
Source File: DeployUtil.java From Kylin with Apache License 2.0 | 5 votes |
public static void deployMetadata() throws IOException { // install metadata to hbase ResourceTool.reset(config()); ResourceTool.copy(KylinConfig.createInstanceFromUri(AbstractKylinTestCase.LOCALMETA_TEST_DATA), config()); // update cube desc signature. for (CubeInstance cube : CubeManager.getInstance(config()).listAllCubes()) { cube.getDescriptor().setSignature(cube.getDescriptor().calculateSignature()); CubeManager.getInstance(config()).updateCube(cube); } }
Example #16
Source File: ITDoggedCubeBuilderTest.java From kylin-on-parquet-v2 with Apache License 2.0 | 5 votes |
@BeforeClass public static void before() throws IOException { staticCreateTestMetadata(); KylinConfig kylinConfig = KylinConfig.getInstanceFromEnv(); CubeManager cubeManager = CubeManager.getInstance(kylinConfig); cube = cubeManager.getCube("ssb"); flatTable = LocalFileMetadataTestCase.LOCALMETA_TEST_DATA + "/data/" + kylinConfig.getHiveIntermediateTablePrefix() + "ssb_19920101000000_19920201000000.csv"; dictionaryMap = ITInMemCubeBuilderTest.getDictionaryMap(cube, flatTable); }
Example #17
Source File: CubeSegmentTupleIterator.java From Kylin with Apache License 2.0 | 5 votes |
private TupleInfo buildTupleInfo(Cuboid cuboid) { TupleInfo info = new TupleInfo(); int index = 0; rowKeyDecoder.setCuboid(cuboid); List<TblColRef> rowColumns = rowKeyDecoder.getColumns(); List<String> colNames = rowKeyDecoder.getNames(context.getAliasMap()); for (int i = 0; i < rowColumns.size(); i++) { TblColRef column = rowColumns.get(i); if (!dimensions.contains(column)) { continue; } // add normal column info.setField(colNames.get(i), rowColumns.get(i), rowColumns.get(i).getType().getName(), index++); } // derived columns and filler Map<Array<TblColRef>, List<DeriveInfo>> hostToDerivedInfo = cubeSeg.getCubeDesc().getHostToDerivedInfo(rowColumns, null); for (Entry<Array<TblColRef>, List<DeriveInfo>> entry : hostToDerivedInfo.entrySet()) { TblColRef[] hostCols = entry.getKey().data; for (DeriveInfo deriveInfo : entry.getValue()) { // mark name for each derived field for (TblColRef derivedCol : deriveInfo.columns) { String derivedField = getFieldName(derivedCol, context.getAliasMap()); info.setField(derivedField, derivedCol, derivedCol.getType().getName(), index++); } // add filler info.addDerivedColumnFiller(Tuple.newDerivedColumnFiller(rowColumns, hostCols, deriveInfo, info, CubeManager.getInstance(this.cube.getConfig()), cubeSeg)); } } for (RowValueDecoder rowValueDecoder : this.rowValueDecoders) { List<String> names = rowValueDecoder.getNames(); MeasureDesc[] measures = rowValueDecoder.getMeasures(); for (int i = 0; i < measures.length; i++) { String dataType = measures[i].getFunction().getSQLType(); info.setField(names.get(i), null, dataType, index++); } } return info; }
Example #18
Source File: FlinkCubingMerge.java From kylin with Apache License 2.0 | 5 votes |
@Override public void open(Configuration parameters) throws Exception { this.kylinConfig = AbstractHadoopJob.loadKylinConfigFromHdfs(conf, metaUrl); final CubeInstance cube = CubeManager.getInstance(kylinConfig).getCube(cubeName); final CubeDesc cubeDesc = CubeDescManager.getInstance(kylinConfig).getCubeDesc(cube.getDescName()); final CubeSegment sourceSeg = cube.getSegmentById(sourceSegmentId); final CubeSegment mergedSeg = cube.getSegmentById(mergedSegmentId); this.segmentReEncoder = new SegmentReEncoder(cubeDesc, sourceSeg, mergedSeg, kylinConfig); }
Example #19
Source File: StreamingSegmentManagerTest.java From kylin with Apache License 2.0 | 5 votes |
@Before public void setUp() throws Exception { this.createTestMetadata(); setUpTestKylinCube(); this.baseStorePath = KylinConfig.getInstanceFromEnv().getStreamingIndexPath(); CubeInstance cubeInstance = CubeManager.getInstance(KylinConfig.getInstanceFromEnv()).getCube(cubeName); this.streamingSegmentManager = new StreamingSegmentManager(baseStorePath, cubeInstance, new MockPositionHandler(), null); this.cleanupSegments(); this.testHelper = new TestHelper(cubeInstance); StreamingQueryProfile.set(new StreamingQueryProfile("test-query-id", System.currentTimeMillis())); }
Example #20
Source File: CubeController.java From kylin-on-parquet-v2 with Apache License 2.0 | 5 votes |
/** * Force rebuild a cube's lookup table snapshot * * @throws IOException */ @RequestMapping(value = "/{cubeName}/segs/{segmentName}/refresh_lookup", method = { RequestMethod.PUT }, produces = { "application/json" }) @ResponseBody public CubeInstance rebuildLookupSnapshot(@PathVariable String cubeName, @PathVariable String segmentName, @RequestParam(value = "lookupTable") String lookupTable) { try { final CubeManager cubeMgr = cubeService.getCubeManager(); final CubeInstance cube = cubeMgr.getCube(cubeName); return cubeService.rebuildLookupSnapshot(cube, segmentName, lookupTable); } catch (IOException e) { logger.error(e.getLocalizedMessage(), e); throw new InternalErrorException(e.getLocalizedMessage(), e); } }
Example #21
Source File: SparkCubingMerge.java From kylin-on-parquet-v2 with Apache License 2.0 | 5 votes |
private void init() { this.kylinConfig = AbstractHadoopJob.loadKylinConfigFromHdfs(conf, metaUrl); final CubeInstance cube = CubeManager.getInstance(kylinConfig).getCube(cubeName); final CubeDesc cubeDesc = CubeDescManager.getInstance(kylinConfig).getCubeDesc(cube.getDescName()); final CubeSegment sourceSeg = cube.getSegmentById(sourceSegmentId); final CubeSegment mergedSeg = cube.getSegmentById(mergedSegmentId); this.segmentReEncoder = new SegmentReEncoder(cubeDesc, sourceSeg, mergedSeg, kylinConfig); }
Example #22
Source File: SparkBuildDictionary.java From kylin with Apache License 2.0 | 5 votes |
private void init() { try (KylinConfig.SetAndUnsetThreadLocalConfig autoUnset = KylinConfig .setAndUnsetThreadLocalConfig(config)) { cubeSegment = CubeManager.getInstance(config).getCube(cubeName).getSegmentById(segmentId); } initialized = true; }
Example #23
Source File: RowKeySplitterTest.java From kylin-on-parquet-v2 with Apache License 2.0 | 5 votes |
@Test public void testWithSlr() throws Exception { //has shard CubeInstance cube = CubeManager.getInstance(getTestConfig()).getCube("TEST_KYLIN_CUBE_WITH_SLR_READY"); RowKeySplitter rowKeySplitter = new RowKeySplitter(cube.getFirstSegment(), 11, 20); // base cuboid rowkey byte[] input = { 0, 0, 0, 0, 0, 0, 0, 0, 1, -1, 0, -104, -106, -128, 11, 54, -105, 55, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 13, 71, 114, 65, 66, 73, 78, 9, 9, 9, 9, 9, 9, 9, 9, 0, 10, 0 }; rowKeySplitter.split(input); assertEquals(11, rowKeySplitter.getBufferSize()); }
Example #24
Source File: NManualBuildAndQueryTest.java From kylin-on-parquet-v2 with Apache License 2.0 | 5 votes |
private void buildTwoSegementAndMerge(String cubeName) throws Exception { KylinConfig config = KylinConfig.getInstanceFromEnv(); CubeManager cubeMgr = CubeManager.getInstance(config); Assert.assertTrue(config.getHdfsWorkingDirectory().startsWith("file:")); // cleanup all segments first cleanupSegments(cubeName); /** * Round1. Build 2 segment */ ExecutableState state; state = buildCuboid(cubeName, new SegmentRange.TSRange(dateToLong("2010-01-01"), dateToLong("2012-01-01"))); Assert.assertEquals(ExecutableState.SUCCEED, state); state = buildCuboid(cubeName, new SegmentRange.TSRange(dateToLong("2012-01-01"), dateToLong("2015-01-01"))); Assert.assertEquals(ExecutableState.SUCCEED, state); /** * Round2. Merge two segments */ state = mergeSegments(cubeName, dateToLong("2010-01-01"), dateToLong("2015-01-01"), false); Assert.assertEquals(ExecutableState.SUCCEED, state); /** * validate cube segment info */ CubeSegment firstSegment = cubeMgr.reloadCube(cubeName).getSegments().get(0); Assert.assertEquals(new SegmentRange.TSRange(dateToLong("2010-01-01"), dateToLong("2015-01-01")), firstSegment.getSegRange()); }
Example #25
Source File: FlinkCubingByLayer.java From kylin with Apache License 2.0 | 5 votes |
@Override public void open(Configuration parameters) throws Exception { KylinConfig kConfig = AbstractHadoopJob.loadKylinConfigFromHdfs(conf, metaUrl); try (KylinConfig.SetAndUnsetThreadLocalConfig autoUnset = KylinConfig .setAndUnsetThreadLocalConfig(kConfig)) { CubeInstance cubeInstance = CubeManager.getInstance(kConfig).getCube(cubeName); this.cubeSegment = cubeInstance.getSegmentById(segmentId); this.cubeDesc = cubeInstance.getDescriptor(); this.ndCuboidBuilder = new NDCuboidBuilder(cubeSegment, new RowKeyEncoderProvider(cubeSegment)); this.rowKeySplitter = new RowKeySplitter(cubeSegment); } }
Example #26
Source File: KylinHealthCheckJob.java From kylin with Apache License 2.0 | 5 votes |
private void checkErrorMeta() { reporter.log("## Checking metadata"); CubeManager cubeManager = CubeManager.getInstance(config); for (String cube : cubeManager.getErrorCubes()) { reporter.log("Error loading CubeDesc at " + cube); } DataModelManager modelManager = DataModelManager.getInstance(config); for (String model : modelManager.getErrorModels()) { reporter.log("Error loading DataModelDesc at " + model); } }
Example #27
Source File: CubeHFileMapper2Test.java From Kylin with Apache License 2.0 | 5 votes |
@Before public void setup() throws Exception { this.createTestMetadata(); // hack for distributed cache FileUtils.deleteDirectory(new File("../job/meta")); FileUtils.copyDirectory(new File(getTestConfig().getMetadataUrl()), new File("../job/meta")); CubeDesc desc = CubeManager.getInstance(getTestConfig()).getCube(cubeName).getDescriptor(); codec = new MeasureCodec(desc.getMeasures()); }
Example #28
Source File: CubeMetadataUpgrade.java From kylin-on-parquet-v2 with Apache License 2.0 | 5 votes |
public void verify() { logger.info("================================================================="); logger.info("The changes are applied, now it's time to verify the new metadata store by reloading all metadata:"); logger.info("================================================================="); config.clearManagers(); DataModelManager.getInstance(config); CubeDescManager.getInstance(config); CubeManager.getInstance(config); ProjectManager.getInstance(config); //cleanup(); }
Example #29
Source File: StreamingServer.java From kylin with Apache License 2.0 | 5 votes |
public StreamingSegmentManager getStreamingSegmentManager(String cubeName) { if (streamingSegmentManagerMap.get(cubeName) == null) { synchronized (streamingSegmentManagerMap) { if (streamingSegmentManagerMap.get(cubeName) == null) { CubeInstance cubeInstance = CubeManager.getInstance(KylinConfig.getInstanceFromEnv()).getCube(cubeName); ISourcePositionHandler sourcePositionHandler = StreamingSourceFactory.getStreamingSource(cubeInstance).getSourcePositionHandler(); StreamingSegmentManager segmentManager = new StreamingSegmentManager(baseStorePath, cubeInstance, sourcePositionHandler, this); streamingSegmentManagerMap.put(cubeName, segmentManager); } } } return streamingSegmentManagerMap.get(cubeName); }
Example #30
Source File: CubeMetaIngester.java From kylin with Apache License 2.0 | 5 votes |
private void injest(File metaRoot) throws IOException { KylinConfig srcConfig = KylinConfig.createInstanceFromUri(metaRoot.getAbsolutePath()); TableMetadataManager srcMetadataManager = TableMetadataManager.getInstance(srcConfig); DataModelManager srcModelManager = DataModelManager.getInstance(srcConfig); HybridManager srcHybridManager = HybridManager.getInstance(srcConfig); CubeManager srcCubeManager = CubeManager.getInstance(srcConfig); CubeDescManager srcCubeDescManager = CubeDescManager.getInstance(srcConfig); checkAndMark(srcMetadataManager, srcModelManager, srcHybridManager, srcCubeManager, srcCubeDescManager); new ResourceTool().copy(srcConfig, kylinConfig, Lists.newArrayList(requiredResources)); // clear the cache Broadcaster.getInstance(kylinConfig).notifyClearAll(); ProjectManager projectManager = ProjectManager.getInstance(kylinConfig); for (TableDesc tableDesc : srcMetadataManager.listAllTables(null)) { logger.info("add " + tableDesc + " to " + targetProjectName); projectManager.addTableDescToProject(Lists.newArrayList(tableDesc.getIdentity()).toArray(new String[0]), targetProjectName); } for (CubeInstance cube : srcCubeManager.listAllCubes()) { logger.info("add " + cube + " to " + targetProjectName); projectManager.addModelToProject(cube.getModel().getName(), targetProjectName); projectManager.moveRealizationToProject(RealizationType.CUBE, cube.getName(), targetProjectName, null); } }