Java Code Examples for org.apache.kylin.job.JobInstance#getRelatedCube()

The following examples show how to use org.apache.kylin.job.JobInstance#getRelatedCube() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: JobService.java    From kylin-on-parquet-v2 with Apache License 2.0 5 votes vote down vote up
public void resubmitJob(JobInstance job) throws IOException {
    aclEvaluate.checkProjectOperationPermission(job);

    Coordinator coordinator = Coordinator.getInstance();
    CubeManager cubeManager = CubeManager.getInstance(KylinConfig.getInstanceFromEnv());
    String cubeName = job.getRelatedCube();
    CubeInstance cubeInstance = cubeManager.getCube(cubeName);

    String segmentName = job.getRelatedSegmentName();
    try {
        Pair<Long, Long> segmentRange = CubeSegment.parseSegmentName(segmentName);
        logger.info("submit streaming segment build, cube:{} segment:{}", cubeName, segmentName);
        CubeSegment newSeg = coordinator.getCubeManager().appendSegment(cubeInstance,
                new SegmentRange.TSRange(segmentRange.getFirst(), segmentRange.getSecond()));

        DefaultChainedExecutable executable = new StreamingCubingEngine().createStreamingCubingJob(newSeg, aclEvaluate.getCurrentUserName());
        coordinator.getExecutableManager().addJob(executable);
        CubingJob cubingJob = (CubingJob) executable;
        newSeg.setLastBuildJobID(cubingJob.getId());

        SegmentBuildState.BuildState state = new SegmentBuildState.BuildState();
        state.setBuildStartTime(System.currentTimeMillis());
        state.setState(SegmentBuildState.BuildState.State.BUILDING);
        state.setJobId(cubingJob.getId());
        coordinator.getStreamMetadataStore().updateSegmentBuildState(cubeName, segmentName, state);
    } catch (Exception e) {
        logger.error("streaming job submit fail, cubeName:" + cubeName + " segment:" + segmentName, e);
        throw e;
    }
}
 
Example 2
Source File: JobService.java    From kylin-on-parquet-v2 with Apache License 2.0 5 votes vote down vote up
public void dropJob(JobInstance job) {
    aclEvaluate.checkProjectOperationPermission(job);
    if (job.getRelatedCube() != null && getCubeManager().getCube(job.getRelatedCube()) != null) {
        if (job.getStatus() != JobStatusEnum.FINISHED && job.getStatus() != JobStatusEnum.DISCARDED) {
            throw new BadRequestException(
                    "Only FINISHED and DISCARDED job can be deleted. Please wait for the job finishing or discard the job!!!");
        }
    }
    getExecutableManager().deleteJob(job.getId());
    logger.info("Delete job [" + job.getId() + "] trigger by + "
            + SecurityContextHolder.getContext().getAuthentication().getName());
}
 
Example 3
Source File: JobService.java    From kylin with Apache License 2.0 5 votes vote down vote up
public void resubmitJob(JobInstance job) throws IOException {
    aclEvaluate.checkProjectOperationPermission(job);

    Coordinator coordinator = Coordinator.getInstance();
    CubeManager cubeManager = CubeManager.getInstance(KylinConfig.getInstanceFromEnv());
    String cubeName = job.getRelatedCube();
    CubeInstance cubeInstance = cubeManager.getCube(cubeName);

    String segmentName = job.getRelatedSegmentName();
    try {
        Pair<Long, Long> segmentRange = CubeSegment.parseSegmentName(segmentName);
        logger.info("submit streaming segment build, cube:{} segment:{}", cubeName, segmentName);
        CubeSegment newSeg = coordinator.getCubeManager().appendSegment(cubeInstance,
                new SegmentRange.TSRange(segmentRange.getFirst(), segmentRange.getSecond()));

        DefaultChainedExecutable executable = new StreamingCubingEngine().createStreamingCubingJob(newSeg, aclEvaluate.getCurrentUserName());
        coordinator.getExecutableManager().addJob(executable);
        CubingJob cubingJob = (CubingJob) executable;
        newSeg.setLastBuildJobID(cubingJob.getId());

        SegmentBuildState.BuildState state = new SegmentBuildState.BuildState();
        state.setBuildStartTime(System.currentTimeMillis());
        state.setState(SegmentBuildState.BuildState.State.BUILDING);
        state.setJobId(cubingJob.getId());
        coordinator.getStreamMetadataStore().updateSegmentBuildState(cubeName, segmentName, state);
    } catch (Exception e) {
        logger.error("streaming job submit fail, cubeName:" + cubeName + " segment:" + segmentName, e);
        throw e;
    }
}
 
Example 4
Source File: JobService.java    From kylin with Apache License 2.0 5 votes vote down vote up
public void dropJob(JobInstance job) {
    aclEvaluate.checkProjectOperationPermission(job);
    if (job.getRelatedCube() != null && getCubeManager().getCube(job.getRelatedCube()) != null) {
        if (job.getStatus() != JobStatusEnum.FINISHED && job.getStatus() != JobStatusEnum.DISCARDED) {
            throw new BadRequestException(
                    "Only FINISHED and DISCARDED job can be deleted. Please wait for the job finishing or discard the job!!!");
        }
    }
    getExecutableManager().deleteJob(job.getId());
    logger.info("Delete job [" + job.getId() + "] trigger by + "
            + SecurityContextHolder.getContext().getAuthentication().getName());
}
 
Example 5
Source File: JobService.java    From kylin-on-parquet-v2 with Apache License 2.0 4 votes vote down vote up
public void cancelJob(JobInstance job) throws IOException {
    aclEvaluate.checkProjectOperationPermission(job);
    if (null == job.getRelatedCube() || null == getCubeManager().getCube(job.getRelatedCube())
            || null == job.getRelatedSegment()) {
        getExecutableManager().discardJob(job.getId());
    }

    logger.info("Cancel job [" + job.getId() + "] trigger by "
            + SecurityContextHolder.getContext().getAuthentication().getName());
    if (job.getStatus() == JobStatusEnum.FINISHED) {
        throw new IllegalStateException(
                "The job " + job.getId() + " has already been finished and cannot be discarded.");
    }

    if (job.getStatus() != JobStatusEnum.DISCARDED) {
        AbstractExecutable executable = getExecutableManager().getJob(job.getId());
        if (executable instanceof CubingJob) {
            cancelCubingJobInner((CubingJob) executable);
            //release global mr hive dict lock if exists
            if (executable.getStatus().isFinalState()) {
                try {
                    DistributedLock lock = KylinConfig.getInstanceFromEnv().getDistributedLockFactory().lockForCurrentThread();
                    if(lock.isLocked(CubeJobLockUtil.getLockPath(executable.getCubeName(), job.getId()))){//release cube job dict lock if exists
                        lock.purgeLocks(CubeJobLockUtil.getLockPath(executable.getCubeName(), null));
                        logger.info("{} unlock cube job dict lock path({}) success", job.getId(), CubeJobLockUtil.getLockPath(executable.getCubeName(), null));

                        if (lock.isLocked(CubeJobLockUtil.getEphemeralLockPath(executable.getCubeName()))) {//release cube job Ephemeral lock if exists
                            lock.purgeLocks(CubeJobLockUtil.getEphemeralLockPath(executable.getCubeName()));
                            logger.info("{} unlock cube job ephemeral lock path({}) success", job.getId(), CubeJobLockUtil.getEphemeralLockPath(executable.getCubeName()));
                        }
                    }
                }catch (Exception e){
                    logger.error("get some error when release cube {} job {} job id {} " , executable.getCubeName(), job.getName(), job.getId());
                }
            }
        } else if (executable instanceof CheckpointExecutable) {
            cancelCheckpointJobInner((CheckpointExecutable) executable);
        } else {
            getExecutableManager().discardJob(executable.getId());
        }
    }
}
 
Example 6
Source File: JobService.java    From kylin with Apache License 2.0 4 votes vote down vote up
public void cancelJob(JobInstance job) throws IOException {
    aclEvaluate.checkProjectOperationPermission(job);
    if (null == job.getRelatedCube() || null == getCubeManager().getCube(job.getRelatedCube())
            || null == job.getRelatedSegment()) {
        getExecutableManager().discardJob(job.getId());
    }

    logger.info("Cancel job [" + job.getId() + "] trigger by "
            + SecurityContextHolder.getContext().getAuthentication().getName());
    if (job.getStatus() == JobStatusEnum.FINISHED) {
        throw new IllegalStateException(
                "The job " + job.getId() + " has already been finished and cannot be discarded.");
    }

    if (job.getStatus() != JobStatusEnum.DISCARDED) {
        AbstractExecutable executable = getExecutableManager().getJob(job.getId());
        if (executable instanceof CubingJob) {
            cancelCubingJobInner((CubingJob) executable);
            //release global mr hive dict lock if exists
            if (executable.getStatus().isFinalState()) {
                try {
                    DistributedLock lock = KylinConfig.getInstanceFromEnv().getDistributedLockFactory().lockForCurrentThread();
                    if (lock.isLocked(MRHiveDictUtil.getLockPath(executable.getCubeName(), job.getId()))) {//release mr/hive global dict lock if exists
                        lock.purgeLocks(MRHiveDictUtil.getLockPath(executable.getCubeName(), null));
                        logger.info("{} unlock global MR/Hive dict lock path({}) success", job.getId(),
                                MRHiveDictUtil.getLockPath(executable.getCubeName(), null));
                        if (lock.isLocked(MRHiveDictUtil.getEphemeralLockPath(executable.getCubeName()))) {//release mr/hive global dict Ephemeral lock if exists
                            lock.purgeLocks(MRHiveDictUtil.getEphemeralLockPath(executable.getCubeName()));
                            logger.info("{} unlock global MR/Hive dict ephemeral lock path({}) success", job.getId(),
                                    MRHiveDictUtil.getEphemeralLockPath(executable.getCubeName()));
                        }
                    }

                    if(lock.isLocked(CubeJobLockUtil.getLockPath(executable.getCubeName(), job.getId()))){//release cube job dict lock if exists
                            lock.purgeLocks(CubeJobLockUtil.getLockPath(executable.getCubeName(), null));
                            logger.info("{} unlock cube job dict lock path({}) success", job.getId(), CubeJobLockUtil.getLockPath(executable.getCubeName(), null));

                            if (lock.isLocked(CubeJobLockUtil.getEphemeralLockPath(executable.getCubeName()))) {//release cube job Ephemeral lock if exists
                                lock.purgeLocks(CubeJobLockUtil.getEphemeralLockPath(executable.getCubeName()));
                                logger.info("{} unlock cube job ephemeral lock path({}) success", job.getId(), CubeJobLockUtil.getEphemeralLockPath(executable.getCubeName()));
                            }
                        }
                }catch (Exception e){
                    logger.error("get some error when release cube {} job {} job id {} " , executable.getCubeName(), job.getName(), job.getId());
                }
            }
        } else if (executable instanceof CheckpointExecutable) {
            cancelCheckpointJobInner((CheckpointExecutable) executable);
        } else {
            getExecutableManager().discardJob(executable.getId());
        }
    }
}