Java Code Examples for org.apache.kylin.metadata.model.TableDesc#getIdentity()

The following examples show how to use org.apache.kylin.metadata.model.TableDesc#getIdentity() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TableSchemaUpdateChecker.java    From kylin-on-parquet-v2 with Apache License 2.0 6 votes vote down vote up
private void checkValidationInModel(TableDesc newTableDesc, List<String> issues, DataModelDesc usedModel){
    final String fullTableName = newTableDesc.getIdentity();
    // if user reloads a fact table used by model, then all used columns must match current schema
    if (usedModel.isFactTable(fullTableName)) {
        TableDesc factTable = usedModel.findFirstTable(fullTableName).getTableDesc();
        List<String> violateColumns = checkAllColumnsInFactTable(usedModel, factTable, newTableDesc);
        if (!violateColumns.isEmpty()) {
            issues.add(format(Locale.ROOT, "Column %s used in model[%s], but changed " + "in hive",
                    violateColumns, usedModel.getName()));
        }
    }

    // if user reloads a lookup table used by cube, only append column(s) are allowed, all existing columns
    // must be the same (except compatible type changes)
    if (usedModel.isLookupTable(fullTableName)) {
        TableDesc lookupTable = usedModel.findFirstTable(fullTableName).getTableDesc();
        if (!checkAllColumnsInTableDesc(lookupTable, newTableDesc)) {
            issues.add(format(Locale.ROOT, "Table '%s' is used as Lookup Table in model[%s], but "
                            + "changed in " + "hive, only append operation are supported on hive table as lookup table",
                    lookupTable.getIdentity(), usedModel.getName()));
        }
    }
}
 
Example 2
Source File: TableService.java    From kylin with Apache License 2.0 6 votes vote down vote up
public void checkHiveTableCompatibility(String prj, TableDesc tableDesc) throws Exception {
    Preconditions.checkNotNull(tableDesc.getDatabase());
    Preconditions.checkNotNull(tableDesc.getName());

    String database = tableDesc.getDatabase().toUpperCase(Locale.ROOT);
    String tableName = tableDesc.getName().toUpperCase(Locale.ROOT);
    ProjectInstance projectInstance = getProjectManager().getProject(prj);
    ISourceMetadataExplorer explr = SourceManager.getSource(projectInstance).getSourceMetadataExplorer();

    TableDesc hiveTableDesc;
    try {
        Pair<TableDesc, TableExtDesc> pair = explr.loadTableMetadata(database, tableName, prj);
        hiveTableDesc = pair.getFirst();
    } catch (Exception e) {
        logger.error("Fail to get metadata for hive table {} due to ", tableDesc.getIdentity(), e);
        throw new RuntimeException("Fail to get metadata for hive table " + tableDesc.getIdentity());
    }

    TableSchemaUpdateChecker.CheckResult result = getSchemaUpdateChecker().allowMigrate(tableDesc, hiveTableDesc);
    result.raiseExceptionWhenInvalid();
}
 
Example 3
Source File: SnapshotManager.java    From kylin with Apache License 2.0 6 votes vote down vote up
public SnapshotTable rebuildSnapshot(IReadableTable table, TableDesc tableDesc, String overwriteUUID)
        throws IOException {
    SnapshotTable snapshot = new SnapshotTable(table, tableDesc.getIdentity());
    snapshot.setUuid(overwriteUUID);
    snapshot.takeSnapshot(table, tableDesc);

    try {
        SnapshotTable existing = getSnapshotTable(snapshot.getResourcePath());
        snapshot.setLastModified(existing.getLastModified());
    } catch (Exception ex) {
        logger.error("Error reading {}, delete it and save rebuild", snapshot.getResourcePath(), ex);
    }

    save(snapshot);
    snapshotCache.put(snapshot.getResourcePath(), snapshot);

    return snapshot;
}
 
Example 4
Source File: DeployUtil.java    From Kylin with Apache License 2.0 6 votes vote down vote up
private static String[] generateCreateTableHql(TableDesc tableDesc) {

        String dropsql = "DROP TABLE IF EXISTS " + tableDesc.getIdentity();
        StringBuilder ddl = new StringBuilder();

        ddl.append("CREATE TABLE " + tableDesc.getIdentity() + "\n");
        ddl.append("(" + "\n");

        for (int i = 0; i < tableDesc.getColumns().length; i++) {
            ColumnDesc col = tableDesc.getColumns()[i];
            if (i > 0) {
                ddl.append(",");
            }
            ddl.append(col.getName() + " " + SqlHiveDataTypeMapping.getHiveDataType((col.getDatatype())) + "\n");
        }

        ddl.append(")" + "\n");
        ddl.append("ROW FORMAT DELIMITED FIELDS TERMINATED BY ','" + "\n");
        ddl.append("STORED AS TEXTFILE");

        return new String[] { dropsql, ddl.toString() };
    }
 
Example 5
Source File: TableSchemaUpdateChecker.java    From kylin with Apache License 2.0 6 votes vote down vote up
private void checkValidationInCube(TableDesc newTableDesc, List<String> issues, CubeInstance cube) {
    final String fullTableName = newTableDesc.getIdentity();
    String modelName = cube.getModel().getName();
    // if user reloads a fact table used by cube, then all used columns must match current schema
    if (cube.getModel().isFactTable(fullTableName)) {
        TableDesc factTable = cube.getModel().findFirstTable(fullTableName).getTableDesc();
        List<String> violateColumns = checkAllColumnsInCube(cube, factTable, newTableDesc);
        if (!violateColumns.isEmpty()) {
            issues.add(format(Locale.ROOT, "Column %s used in cube[%s] and model[%s], but changed " + "in hive",
                    violateColumns, cube.getName(), modelName));
        }
    }

    // if user reloads a lookup table used by cube, only append column(s) are allowed, all existing columns
    // must be the same (except compatible type changes)
    if (cube.getModel().isLookupTable(fullTableName)) {
        TableDesc lookupTable = cube.getModel().findFirstTable(fullTableName).getTableDesc();
        if (!checkAllColumnsInTableDesc(lookupTable, newTableDesc)) {
            issues.add(format(Locale.ROOT, "Table '%s' is used as Lookup Table in cube[%s] and model[%s], but "
                            + "changed in " + "hive, only append operation are supported on hive table as lookup table",
                    lookupTable.getIdentity(), cube.getName(), modelName));
        }
    }
}
 
Example 6
Source File: TableSchemaUpdateChecker.java    From kylin with Apache License 2.0 6 votes vote down vote up
private void checkValidationInModel(TableDesc newTableDesc, List<String> issues, DataModelDesc usedModel) {
    List<String> violateColumns = Lists.newArrayList();

    final String fullTableName = newTableDesc.getIdentity();
    if (usedModel.isFactTable(fullTableName)) {
        TableDesc factTable = usedModel.findFirstTable(fullTableName).getTableDesc();
        violateColumns.addAll(checkAllColumnsInFactTable(usedModel, factTable, newTableDesc));
    }

    if (usedModel.isLookupTable(fullTableName)) {
        violateColumns.addAll(checkAllColumnsInLookupTable(usedModel, newTableDesc));
    }

    if (!violateColumns.isEmpty()) {
        issues.add(format(Locale.ROOT, "Column %s used in model[%s], but not exist " + "in hive", violateColumns,
                usedModel.getName()));
    }
}
 
Example 7
Source File: TableSchemaUpdateChecker.java    From kylin-on-parquet-v2 with Apache License 2.0 6 votes vote down vote up
public CheckResult allowReload(TableDesc newTableDesc, String prj) {
    final String fullTableName = newTableDesc.getIdentity();

    TableDesc existing = metadataManager.getTableDesc(fullTableName, prj);
    if (existing == null) {
        return CheckResult.validOnFirstLoad(fullTableName);
    }
    List<String> issues = Lists.newArrayList();

    for (DataModelDesc usedModel : findModelByTable(newTableDesc, prj)){
        checkValidationInModel(newTableDesc, issues, usedModel);
    }

    for (CubeInstance cube : findCubeByTable(newTableDesc)) {
        checkValidationInCube(newTableDesc, issues, cube);
    }

    if (issues.isEmpty()) {
        return CheckResult.validOnCompatibleSchema(fullTableName);
    }
    return CheckResult.invalidOnIncompatibleSchema(fullTableName, issues);
}
 
Example 8
Source File: SnapshotManager.java    From kylin-on-parquet-v2 with Apache License 2.0 6 votes vote down vote up
public SnapshotTable rebuildSnapshot(IReadableTable table, TableDesc tableDesc, String overwriteUUID)
        throws IOException {
    SnapshotTable snapshot = new SnapshotTable(table, tableDesc.getIdentity());
    snapshot.setUuid(overwriteUUID);
    snapshot.takeSnapshot(table, tableDesc);

    try {
        SnapshotTable existing = getSnapshotTable(snapshot.getResourcePath());
        snapshot.setLastModified(existing.getLastModified());
    } catch (Exception ex) {
        logger.error("Error reading {}, delete it and save rebuild", snapshot.getResourcePath(), ex);
    }

    save(snapshot);
    snapshotCache.put(snapshot.getResourcePath(), snapshot);

    return snapshot;
}
 
Example 9
Source File: MetadataManager.java    From Kylin with Apache License 2.0 6 votes vote down vote up
public String appendDBName(String table) {

        if (table.indexOf(".") > 0)
            return table;

        Map<String, TableDesc> map = getAllTablesMap();

        int count = 0;
        String result = null;
        for (TableDesc t : map.values()) {
            if (t.getName().equalsIgnoreCase(table)) {
                result = t.getIdentity();
                count++;
            }
        }

        if (count == 1)
            return result;

        if (count > 1) {
            logger.warn("There are more than 1 table named with '" + table + "' in different database; The program couldn't determine, randomly pick '" + result + "'");
        }
        return result;
    }
 
Example 10
Source File: SnapshotManager.java    From kylin-on-parquet-v2 with Apache License 2.0 5 votes vote down vote up
public SnapshotTable buildSnapshot(IReadableTable table, TableDesc tableDesc, KylinConfig cubeConfig)
        throws IOException {
    SnapshotTable snapshot = new SnapshotTable(table, tableDesc.getIdentity());
    snapshot.updateRandomUuid();

    synchronized (getConcurrentObject(tableDesc.getIdentity())) {
        SnapshotTable reusableSnapshot = getReusableSnapShot(table, snapshot, tableDesc, cubeConfig);
        if (reusableSnapshot != null)
            return updateDictLastModifiedTime(reusableSnapshot.getResourcePath());

        snapshot.takeSnapshot(table, tableDesc);
        return trySaveNewSnapshot(snapshot);
    }
}
 
Example 11
Source File: CubeMetadataUpgrade.java    From Kylin with Apache License 2.0 5 votes vote down vote up
public String appendDBName(String table) {

        if (table.indexOf(".") > 0)
            return table;

        Map<String, TableDesc> map = this.getMetadataManager().getAllTablesMap();

        int count = 0;
        String result = null;
        for (TableDesc t : map.values()) {
            if (t.getName().equalsIgnoreCase(table)) {
                result = t.getIdentity();
                count++;
            }
        }

        if (count == 1)
            return result;

        if (count > 1) {
            errorMsgs.add("There are more than 1 table named with '" + table + "' in different database; The program couldn't determine, randomly pick '" + result + "'");
        }

        if (count == 0) {
            errorMsgs.add("There is no table named with '" + table + "'");
        }

        return result;
    }
 
Example 12
Source File: SnapshotManager.java    From kylin with Apache License 2.0 5 votes vote down vote up
public SnapshotTable buildSnapshot(IReadableTable table, TableDesc tableDesc, KylinConfig cubeConfig)
        throws IOException {
    SnapshotTable snapshot = new SnapshotTable(table, tableDesc.getIdentity());
    snapshot.updateRandomUuid();

    synchronized (getConcurrentObject(tableDesc.getIdentity())) {
        SnapshotTable reusableSnapshot = getReusableSnapShot(table, snapshot, tableDesc, cubeConfig);
        if (reusableSnapshot != null)
            return updateDictLastModifiedTime(reusableSnapshot.getResourcePath());

        snapshot.takeSnapshot(table, tableDesc);
        return trySaveNewSnapshot(snapshot);
    }
}
 
Example 13
Source File: StreamingV2Controller.java    From kylin-on-parquet-v2 with Apache License 2.0 4 votes vote down vote up
private void validateInput(TableDesc tableDesc, StreamingSourceConfig streamingSourceConfig) {
    if (StringUtils.isEmpty(tableDesc.getIdentity()) || StringUtils.isEmpty(streamingSourceConfig.getName())) {
        logger.error("streamingSourceConfig name should not be empty.");
        throw new BadRequestException("streamingSourceConfig name should not be empty.");
    }

    // validate the compatibility for input table schema and the underline hive table schema
    if (tableDesc.getSourceType() == ISourceAware.ID_KAFKA_HIVE) {
        List<FieldSchema> fields;
        String db = tableDesc.getDatabase();
        try {
            HiveMetaStoreClient metaStoreClient = new HiveMetaStoreClient(new HiveConf());
            fields = metaStoreClient.getFields(db, tableDesc.getName());
            logger.info("Checking the {} in {}", tableDesc.getName(), db);
        } catch (NoSuchObjectException noObjectException) {
            logger.info("table not exist in hive meta store for table:" + tableDesc.getIdentity(),
                    noObjectException);
            throw new BadRequestException(
                    "table doesn't exist in hive meta store for table:" + tableDesc.getIdentity(),
                    ResponseCode.CODE_UNDEFINED, noObjectException);
        } catch (Exception e) {
            logger.error("error when get metadata from hive meta store for table:" + tableDesc.getIdentity(), e);
            throw new BadRequestException("error when connect hive meta store", ResponseCode.CODE_UNDEFINED, e);
        }
        // check the data type compatibility for each column
        Map<String, FieldSchema> fieldSchemaMap = Maps.newHashMap();
        for (FieldSchema field : fields) {
            fieldSchemaMap.put(field.getName().toUpperCase(Locale.ROOT), field);
        }
        List<String> incompatibleMsgs = Lists.newArrayList();
        for (ColumnDesc columnDesc : tableDesc.getColumns()) {
            FieldSchema fieldSchema = fieldSchemaMap.get(columnDesc.getName().toUpperCase(Locale.ROOT));
            if (fieldSchema == null) {
                // Partition column cannot be fetched via Hive Metadata API.
                if (!TimeDerivedColumnType.isTimeDerivedColumn(columnDesc.getName())) {
                    incompatibleMsgs.add("Column not exist in hive table:" + columnDesc.getName());
                    continue;
                } else {
                    logger.info("Column not exist in hive table: {}.", columnDesc.getName());
                    continue;
                }
            }
            if (!checkHiveTableFieldCompatible(fieldSchema, columnDesc)) {
                String msg = String.format(Locale.ROOT,
                        "column:%s defined in hive type:%s is incompatible with the column definition:%s",
                        columnDesc.getName(), fieldSchema.getType(), columnDesc.getDatatype());
                incompatibleMsgs.add(msg);
            }
        }
        if (!incompatibleMsgs.isEmpty()) {
            logger.info("incompatible for hive and input table schema:{}", incompatibleMsgs);
            throw new BadRequestException(
                    "incompatible for hive schema and input table schema:" + incompatibleMsgs);
        }
    }
}
 
Example 14
Source File: H2Database.java    From Kylin with Apache License 2.0 4 votes vote down vote up
private void loadH2Table(String tableName, String joinType) throws SQLException {
    MetadataManager metaMgr = MetadataManager.getInstance(config);
    TableDesc tableDesc = metaMgr.getTableDesc(tableName.toUpperCase());
    File tempFile = null;

    String fileNameSuffix = joinType.equalsIgnoreCase("default") ? "" : "." + joinType;

    try {
        tempFile = File.createTempFile("tmp_h2", ".csv");
        FileOutputStream tempFileStream = new FileOutputStream(tempFile);
        String normalPath = "/data/" + tableDesc.getIdentity() + ".csv";

        // If it's the fact table, there will be a facttable.csv.inner or
        // facttable.csv.left in hbase
        // otherwise just use lookup.csv
        InputStream csvStream = metaMgr.getStore().getResource(normalPath + fileNameSuffix);
        if (csvStream == null) {
            csvStream = metaMgr.getStore().getResource(normalPath);
        } else {
            logger.info("H2 decides to load " + (normalPath + fileNameSuffix) + " for table " + tableDesc.getIdentity());
        }

        org.apache.commons.io.IOUtils.copy(csvStream, tempFileStream);

        csvStream.close();
        tempFileStream.close();

    } catch (IOException e) {
        e.printStackTrace();
    }

    String cvsFilePath = tempFile.getPath();
    Statement stmt = h2Connection.createStatement();

    String createDBSql = "CREATE SCHEMA IF NOT EXISTS DEFAULT;\nCREATE SCHEMA IF NOT EXISTS EDW;\nSET SCHEMA DEFAULT;\n";
    stmt.executeUpdate(createDBSql);

    String sql = generateCreateH2TableSql(tableDesc, cvsFilePath);
    stmt.executeUpdate(sql);

    if (tempFile != null)
        tempFile.delete();
}
 
Example 15
Source File: CubeMetaIngester.java    From kylin with Apache License 2.0 4 votes vote down vote up
private void checkAndMark(TableMetadataManager srcMetadataManager, DataModelManager srcModelManager, HybridManager srcHybridManager, CubeManager srcCubeManager, CubeDescManager srcCubeDescManager) {
    if (srcHybridManager.listHybridInstances().size() > 0) {
        throw new IllegalStateException("Does not support ingest hybrid yet");
    }

    ProjectManager projectManager = ProjectManager.getInstance(kylinConfig);
    ProjectInstance targetProject = projectManager.getProject(targetProjectName);
    if (targetProject == null) {
        throw new IllegalStateException("Target project does not exist in target metadata: " + targetProjectName);
    }

    TableMetadataManager metadataManager = TableMetadataManager.getInstance(kylinConfig);
    for (TableDesc tableDesc : srcMetadataManager.listAllTables(null)) {
        TableDesc existing = metadataManager.getTableDesc(tableDesc.getIdentity(), targetProjectName);
        if (existing != null && !existing.equals(tableDesc)) {
            logger.info("Table {} already has a different version in target metadata store", tableDesc.getIdentity());
            logger.info("Existing version: {}", existing);
            logger.info("New version: {}", tableDesc);

            if (!forceIngest && !overwriteTables) {
                throw new IllegalStateException("table already exists with a different version: " + tableDesc.getIdentity() + ". Consider adding -overwriteTables option to force overwriting (with caution)");
            } else {
                logger.warn("Overwriting the old table desc: {}", tableDesc.getIdentity());
            }
        }
        requiredResources.add(tableDesc.getResourcePath());
    }

    DataModelManager modelManager = DataModelManager.getInstance(kylinConfig);
    for (DataModelDesc dataModelDesc : srcModelManager.listDataModels()) {
        checkExesting(modelManager.getDataModelDesc(dataModelDesc.getName()), "model", dataModelDesc.getName());
        requiredResources.add(DataModelDesc.concatResourcePath(dataModelDesc.getName()));
    }

    CubeDescManager cubeDescManager = CubeDescManager.getInstance(kylinConfig);
    for (CubeDesc cubeDesc : srcCubeDescManager.listAllDesc()) {
        checkExesting(cubeDescManager.getCubeDesc(cubeDesc.getName()), "cube desc", cubeDesc.getName());
        requiredResources.add(CubeDesc.concatResourcePath(cubeDesc.getName()));
    }

    CubeManager cubeManager = CubeManager.getInstance(kylinConfig);
    for (CubeInstance cube : srcCubeManager.listAllCubes()) {
        checkExesting(cubeManager.getCube(cube.getName()), "cube", cube.getName());
        requiredResources.add(CubeInstance.concatResourcePath(cube.getName()));
    }

}
 
Example 16
Source File: ModelDataGenerator.java    From kylin with Apache License 2.0 4 votes vote down vote up
private String path(TableDesc table) {
    return outputPath + "/" + table.getIdentity() + ".csv";
}
 
Example 17
Source File: H2Database.java    From kylin with Apache License 2.0 4 votes vote down vote up
private String path(TableDesc tableDesc) {
    if ("EDW.TEST_SELLER_TYPE_DIM".equals(tableDesc.getIdentity())) // it is a view of table below
        return "/data/" + "EDW.TEST_SELLER_TYPE_DIM_TABLE" + ".csv";
    else
        return "/data/" + tableDesc.getIdentity() + ".csv";
}
 
Example 18
Source File: ModelDataGenerator.java    From kylin-on-parquet-v2 with Apache License 2.0 4 votes vote down vote up
private String path(TableDesc table) {
    return outputPath + "/" + table.getIdentity() + ".csv";
}
 
Example 19
Source File: StreamingV2Controller.java    From kylin with Apache License 2.0 4 votes vote down vote up
private void validateInput(TableDesc tableDesc, StreamingSourceConfig streamingSourceConfig) {
    if (StringUtils.isEmpty(tableDesc.getIdentity()) || StringUtils.isEmpty(streamingSourceConfig.getName())) {
        logger.error("streamingSourceConfig name should not be empty.");
        throw new BadRequestException("streamingSourceConfig name should not be empty.");
    }

    // validate the compatibility for input table schema and the underline hive table schema
    if (tableDesc.getSourceType() == ISourceAware.ID_KAFKA_HIVE) {
        List<FieldSchema> fields;
        String db = tableDesc.getDatabase();
        try {
            HiveMetaStoreClient metaStoreClient = new HiveMetaStoreClient(new HiveConf());
            fields = metaStoreClient.getFields(db, tableDesc.getName());
            logger.info("Checking the {} in {}", tableDesc.getName(), db);
        } catch (NoSuchObjectException noObjectException) {
            logger.info("table not exist in hive meta store for table:" + tableDesc.getIdentity(),
                    noObjectException);
            throw new BadRequestException(
                    "table doesn't exist in hive meta store for table:" + tableDesc.getIdentity(),
                    ResponseCode.CODE_UNDEFINED, noObjectException);
        } catch (Exception e) {
            logger.error("error when get metadata from hive meta store for table:" + tableDesc.getIdentity(), e);
            throw new BadRequestException("error when connect hive meta store", ResponseCode.CODE_UNDEFINED, e);
        }
        // check the data type compatibility for each column
        Map<String, FieldSchema> fieldSchemaMap = Maps.newHashMap();
        for (FieldSchema field : fields) {
            fieldSchemaMap.put(field.getName().toUpperCase(Locale.ROOT), field);
        }
        List<String> incompatibleMsgs = Lists.newArrayList();
        for (ColumnDesc columnDesc : tableDesc.getColumns()) {
            FieldSchema fieldSchema = fieldSchemaMap.get(columnDesc.getName().toUpperCase(Locale.ROOT));
            if (fieldSchema == null) {
                // Partition column cannot be fetched via Hive Metadata API.
                if (!TimeDerivedColumnType.isTimeDerivedColumn(columnDesc.getName())) {
                    incompatibleMsgs.add("Column not exist in hive table:" + columnDesc.getName());
                    continue;
                } else {
                    logger.info("Column not exist in hive table: {}.", columnDesc.getName());
                    continue;
                }
            }
            if (!checkHiveTableFieldCompatible(fieldSchema, columnDesc)) {
                String msg = String.format(Locale.ROOT,
                        "column:%s defined in hive type:%s is incompatible with the column definition:%s",
                        columnDesc.getName(), fieldSchema.getType(), columnDesc.getDatatype());
                incompatibleMsgs.add(msg);
            }
        }
        if (!incompatibleMsgs.isEmpty()) {
            logger.info("incompatible for hive and input table schema:{}", incompatibleMsgs);
            throw new BadRequestException(
                    "incompatible for hive schema and input table schema:" + incompatibleMsgs);
        }
    }
}
 
Example 20
Source File: CubeMetaIngester.java    From kylin-on-parquet-v2 with Apache License 2.0 4 votes vote down vote up
private void checkAndMark(TableMetadataManager srcMetadataManager, DataModelManager srcModelManager, HybridManager srcHybridManager, CubeManager srcCubeManager, CubeDescManager srcCubeDescManager) {
    if (srcHybridManager.listHybridInstances().size() > 0) {
        throw new IllegalStateException("Does not support ingest hybrid yet");
    }

    ProjectManager projectManager = ProjectManager.getInstance(kylinConfig);
    ProjectInstance targetProject = projectManager.getProject(targetProjectName);
    if (targetProject == null) {
        throw new IllegalStateException("Target project does not exist in target metadata: " + targetProjectName);
    }

    TableMetadataManager metadataManager = TableMetadataManager.getInstance(kylinConfig);
    for (TableDesc tableDesc : srcMetadataManager.listAllTables(null)) {
        TableDesc existing = metadataManager.getTableDesc(tableDesc.getIdentity(), targetProjectName);
        if (existing != null && !existing.equals(tableDesc)) {
            logger.info("Table {} already has a different version in target metadata store", tableDesc.getIdentity());
            logger.info("Existing version: {}", existing);
            logger.info("New version: {}", tableDesc);

            if (!forceIngest && !overwriteTables) {
                throw new IllegalStateException("table already exists with a different version: " + tableDesc.getIdentity() + ". Consider adding -overwriteTables option to force overwriting (with caution)");
            } else {
                logger.warn("Overwriting the old table desc: {}", tableDesc.getIdentity());
            }
        }
        requiredResources.add(tableDesc.getResourcePath());
    }

    DataModelManager modelManager = DataModelManager.getInstance(kylinConfig);
    for (DataModelDesc dataModelDesc : srcModelManager.listDataModels()) {
        checkExesting(modelManager.getDataModelDesc(dataModelDesc.getName()), "model", dataModelDesc.getName());
        requiredResources.add(DataModelDesc.concatResourcePath(dataModelDesc.getName()));
    }

    CubeDescManager cubeDescManager = CubeDescManager.getInstance(kylinConfig);
    for (CubeDesc cubeDesc : srcCubeDescManager.listAllDesc()) {
        checkExesting(cubeDescManager.getCubeDesc(cubeDesc.getName()), "cube desc", cubeDesc.getName());
        requiredResources.add(CubeDesc.concatResourcePath(cubeDesc.getName()));
    }

    CubeManager cubeManager = CubeManager.getInstance(kylinConfig);
    for (CubeInstance cube : srcCubeManager.listAllCubes()) {
        checkExesting(cubeManager.getCube(cube.getName()), "cube", cube.getName());
        requiredResources.add(CubeInstance.concatResourcePath(cube.getName()));
    }

}