Java Code Examples for org.apache.kylin.common.KylinConfig#getCoprocessorLocalJar()

The following examples show how to use org.apache.kylin.common.KylinConfig#getCoprocessorLocalJar() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DeployCoprocessorCLI.java    From kylin-on-parquet-v2 with Apache License 2.0 5 votes vote down vote up
private static void initHTableCoprocessor(HTableDescriptor desc) throws IOException {
    KylinConfig kylinConfig = KylinConfig.getInstanceFromEnv();
    Configuration hconf = HBaseConnection.getCurrentHBaseConfiguration();
    FileSystem fileSystem = FileSystem.get(hconf);

    String localCoprocessorJar = kylinConfig.getCoprocessorLocalJar();
    Path hdfsCoprocessorJar = DeployCoprocessorCLI.uploadCoprocessorJar(localCoprocessorJar, fileSystem, null);

    DeployCoprocessorCLI.addCoprocessorOnHTable(desc, hdfsCoprocessorJar);
}
 
Example 2
Source File: DeployCoprocessorCLI.java    From kylin with Apache License 2.0 5 votes vote down vote up
private static void initHTableCoprocessor(HTableDescriptor desc) throws IOException {
    KylinConfig kylinConfig = KylinConfig.getInstanceFromEnv();
    Configuration hconf = HBaseConnection.getCurrentHBaseConfiguration();
    FileSystem fileSystem = FileSystem.get(hconf);

    String localCoprocessorJar = kylinConfig.getCoprocessorLocalJar();
    Path hdfsCoprocessorJar = DeployCoprocessorCLI.uploadCoprocessorJar(localCoprocessorJar, fileSystem, null);

    DeployCoprocessorCLI.addCoprocessorOnHTable(desc, hdfsCoprocessorJar);
}
 
Example 3
Source File: DeployCoprocessorCLI.java    From Kylin with Apache License 2.0 5 votes vote down vote up
private static void initHTableCoprocessor(HTableDescriptor desc) throws IOException {
    KylinConfig kylinConfig = KylinConfig.getInstanceFromEnv();
    Configuration hconf = HadoopUtil.getCurrentConfiguration();
    FileSystem fileSystem = FileSystem.get(hconf);

    String localCoprocessorJar = kylinConfig.getCoprocessorLocalJar();
    Path hdfsCoprocessorJar = DeployCoprocessorCLI.uploadCoprocessorJar(localCoprocessorJar, fileSystem, null);

    DeployCoprocessorCLI.addCoprocessorOnHTable(desc, hdfsCoprocessorJar);
}
 
Example 4
Source File: DeployCoprocessorCLI.java    From kylin-on-parquet-v2 with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws IOException {
    KylinConfig kylinConfig = KylinConfig.getInstanceFromEnv();
    Configuration hconf = HBaseConnection.getCurrentHBaseConfiguration();
    FileSystem fileSystem = FileSystem.get(hconf);
    Connection conn = HBaseConnection.get(kylinConfig.getStorageUrl());
    Admin hbaseAdmin = null;
    DeployCoprocessorCLIOps ops = parseArgs(args);

    try {
        hbaseAdmin = conn.getAdmin();
        String localCoprocessorJar;
        if ("default".equals(ops.getLocalCoprocessorJar())) {
            localCoprocessorJar = kylinConfig.getCoprocessorLocalJar();
        } else {
            localCoprocessorJar = new File(ops.getLocalCoprocessorJar()).getAbsolutePath();
        }
        logger.info("Identify coprocessor jar " + localCoprocessorJar);
        logger.info("Use at most {} threads to do upgrade", ops.getMaxThreads());

        List<String> tableNames = getHTableNames(kylinConfig);
        logger.info("Identify tables " + tableNames);

        String filterType = ops.getFilterType().toLowerCase(Locale.ROOT);
        if (filterType.equals("-table")) {
            tableNames = filterByTables(tableNames, Arrays.asList(ops.getEntities()));
        } else if (filterType.equals("-cube")) {
            tableNames = filterByCubes(tableNames, Arrays.asList(ops.getEntities()));
        } else if (filterType.equals("-project")) {
            tableNames = filterByProjects(tableNames, Arrays.asList(ops.getEntities()));
        } else if (!filterType.equals("all")) {
            printUsageAndExit();
        }
        logger.info("Tables after filtering by type " + filterType + ": " + tableNames);

        tableNames = filterByGitCommit(hbaseAdmin, tableNames);
        logger.info("Will execute tables " + tableNames);

        long start = System.currentTimeMillis();

        Set<String> oldJarPaths = getCoprocessorJarPaths(hbaseAdmin, tableNames);
        logger.info("Old coprocessor jar: " + oldJarPaths);

        Path hdfsCoprocessorJar = uploadCoprocessorJar(localCoprocessorJar, fileSystem, oldJarPaths);
        logger.info("New coprocessor jar: " + hdfsCoprocessorJar);

        Pair<List<String>, List<String>> results = resetCoprocessorOnHTables(hbaseAdmin, hdfsCoprocessorJar,
                tableNames, ops.getMaxThreads());

        // Don't remove old jars, missing coprocessor jar will fail hbase
        // removeOldJars(oldJarPaths, fileSystem);

        logger.info("Processed time: " + (System.currentTimeMillis() - start));
        logger.info("Processed tables count: " + results.getFirst().size());
        logger.info("Processed tables: " + results.getFirst());
        logger.error("Failed tables count: " + results.getSecond().size());
        logger.error("Failed tables : " + results.getSecond());
        logger.info("Active coprocessor jar: " + hdfsCoprocessorJar);
    } finally {
        if (hbaseAdmin != null) {
            hbaseAdmin.close();
        }
    }
}
 
Example 5
Source File: DeployCoprocessorCLI.java    From kylin with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws IOException {
    KylinConfig kylinConfig = KylinConfig.getInstanceFromEnv();
    Configuration hconf = HBaseConnection.getCurrentHBaseConfiguration();
    FileSystem fileSystem = FileSystem.get(hconf);
    Connection conn = HBaseConnection.get(kylinConfig.getStorageUrl());
    Admin hbaseAdmin = null;
    DeployCoprocessorCLIOps ops = parseArgs(args);

    try {
        hbaseAdmin = conn.getAdmin();
        String localCoprocessorJar;
        if ("default".equals(ops.getLocalCoprocessorJar())) {
            localCoprocessorJar = kylinConfig.getCoprocessorLocalJar();
        } else {
            localCoprocessorJar = new File(ops.getLocalCoprocessorJar()).getAbsolutePath();
        }
        logger.info("Identify coprocessor jar " + localCoprocessorJar);
        logger.info("Use at most {} threads to do upgrade", ops.getMaxThreads());

        List<String> tableNames = getHTableNames(kylinConfig);
        logger.info("Identify tables " + tableNames);

        String filterType = ops.getFilterType().toLowerCase(Locale.ROOT);
        if (filterType.equals("-table")) {
            tableNames = filterByTables(tableNames, Arrays.asList(ops.getEntities()));
        } else if (filterType.equals("-cube")) {
            tableNames = filterByCubes(tableNames, Arrays.asList(ops.getEntities()));
        } else if (filterType.equals("-project")) {
            tableNames = filterByProjects(tableNames, Arrays.asList(ops.getEntities()));
        } else if (!filterType.equals("all")) {
            printUsageAndExit();
        }
        logger.info("Tables after filtering by type " + filterType + ": " + tableNames);

        tableNames = filterByGitCommit(hbaseAdmin, tableNames);
        logger.info("Will execute tables " + tableNames);

        long start = System.currentTimeMillis();

        Set<String> oldJarPaths = getCoprocessorJarPaths(hbaseAdmin, tableNames);
        logger.info("Old coprocessor jar: " + oldJarPaths);

        Path hdfsCoprocessorJar = uploadCoprocessorJar(localCoprocessorJar, fileSystem, oldJarPaths);
        logger.info("New coprocessor jar: " + hdfsCoprocessorJar);

        Pair<List<String>, List<String>> results = resetCoprocessorOnHTables(hbaseAdmin, hdfsCoprocessorJar,
                tableNames, ops.getMaxThreads());

        // Don't remove old jars, missing coprocessor jar will fail hbase
        // removeOldJars(oldJarPaths, fileSystem);

        logger.info("Processed time: " + (System.currentTimeMillis() - start));
        logger.info("Processed tables count: " + results.getFirst().size());
        logger.info("Processed tables: " + results.getFirst());
        logger.error("Failed tables count: " + results.getSecond().size());
        logger.error("Failed tables : " + results.getSecond());
        logger.info("Active coprocessor jar: " + hdfsCoprocessorJar);
    } finally {
        if (hbaseAdmin != null) {
            hbaseAdmin.close();
        }
    }
}