Java Code Examples for org.apache.sqoop.model.MJob#getDriverConfig()

The following examples show how to use org.apache.sqoop.model.MJob#getDriverConfig() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: JobBean.java    From sqoop-on-spark with Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("unchecked")
private JSONObject extractJob(boolean skipSensitive, MJob job) {
  JSONObject object = new JSONObject();
  object.put(ID, job.getPersistenceId());
  object.put(NAME, job.getName());
  object.put(ENABLED, job.getEnabled());
  object.put(CREATION_USER, job.getCreationUser());
  object.put(CREATION_DATE, job.getCreationDate().getTime());
  object.put(UPDATE_USER, job.getLastUpdateUser());
  object.put(UPDATE_DATE, job.getLastUpdateDate().getTime());
  // job link associated connectors
  // TODO(SQOOP-1634): fix not to require the connectorIds in the post data
  object.put(FROM_CONNECTOR_ID, job.getConnectorId(Direction.FROM));
  object.put(TO_CONNECTOR_ID, job.getConnectorId(Direction.TO));
  // job associated links
  object.put(FROM_LINK_ID, job.getLinkId(Direction.FROM));
  object.put(TO_LINK_ID, job.getLinkId(Direction.TO));
  // job configs
  MFromConfig fromConfigList = job.getFromJobConfig();
  object.put(FROM_CONFIG_VALUES,
      extractConfigList(fromConfigList.getConfigs(), fromConfigList.getType(), skipSensitive));
  MToConfig toConfigList = job.getToJobConfig();
  object.put(TO_CONFIG_VALUES,
      extractConfigList(toConfigList.getConfigs(), toConfigList.getType(), skipSensitive));
  MDriverConfig driverConfigList = job.getDriverConfig();
  object.put(
      DRIVER_CONFIG_VALUES,
      extractConfigList(driverConfigList.getConfigs(), driverConfigList.getType(),
          skipSensitive));

  return object;
}
 
Example 2
Source File: FromRDBMSToKiteHiveTest.java    From sqoop-on-spark with Apache License 2.0 5 votes vote down vote up
@Test
public void testCities() throws Exception {
  // Job creation
  MJob job = getClient().createJob(rdbmsLink.getPersistenceId(), kiteLink.getPersistenceId());

  // Set rdbms "FROM" config
  MConfigList fromConfig = job.getJobConfig(Direction.FROM);
  fillRdbmsFromConfig(job, "id");
  // TODO: Kite have troubles with some data types, so we're limiting the columns to int only
  fromConfig.getStringInput("fromJobConfig.columns").setValue(provider.escapeColumnName("id"));

  // Fill the Kite "TO" config
  MConfigList toConfig = job.getJobConfig(Direction.TO);
  toConfig.getStringInput("toJobConfig.uri").setValue("dataset:hive:testtable");
  toConfig.getEnumInput("toJobConfig.fileFormat").setValue(FileFormat.AVRO);

  // driver config
  MDriverConfig driverConfig = job.getDriverConfig();
  driverConfig.getIntegerInput("throttlingConfig.numExtractors").setValue(1);

  saveJob(job);

  executeJob(job);

  // Assert correct output
  ProviderAsserts.assertRow(hiveProvider, new TableName("testtable"), new Object[]{"id", 1}, "1");
  ProviderAsserts.assertRow(hiveProvider, new TableName("testtable"), new Object[]{"id", 2}, "2");
  ProviderAsserts.assertRow(hiveProvider, new TableName("testtable"), new Object[]{"id", 3}, "3");
  ProviderAsserts.assertRow(hiveProvider, new TableName("testtable"), new Object[]{"id", 4}, "4");

  hiveProvider.dropTable(new TableName("testtable"));
}
 
Example 3
Source File: FromHDFSToKafkaTest.java    From sqoop-on-spark with Apache License 2.0 5 votes vote down vote up
@Test
public void testBasic() throws Exception {
  createHdfsFromFile("input-0001",input);

  // Create Kafka link
  MLink kafkaLink = getClient().createLink("kafka-connector");
  fillKafkaLinkConfig(kafkaLink);
  saveLink(kafkaLink);

  // HDFS link
  MLink hdfsLink = getClient().createLink("hdfs-connector");
  fillHdfsLink(hdfsLink);
  saveLink(hdfsLink);

  // Job creation
  MJob job = getClient().createJob(hdfsLink.getPersistenceId(), kafkaLink.getPersistenceId());

  // Job connector configs
  fillHdfsFromConfig(job);
  fillKafkaToConfig(job);

  // driver config
  MDriverConfig driverConfig = job.getDriverConfig();
  driverConfig.getIntegerInput("throttlingConfig.numExtractors").setValue(3);
  saveJob(job);

  executeJob(job);

  // this will assert the content of the array matches the content of the topic
  validateContent(input);
}
 
Example 4
Source File: FromRDBMSToKafkaTest.java    From sqoop-on-spark with Apache License 2.0 5 votes vote down vote up
@Test
public void testBasic() throws Exception {
  createAndLoadTableCities();

  // Kafka link
  MLink kafkaLink = getClient().createLink("kafka-connector");
  fillKafkaLinkConfig(kafkaLink);
  saveLink(kafkaLink);

  // RDBMS link
  MLink rdbmsLink = getClient().createLink("generic-jdbc-connector");
  fillRdbmsLinkConfig(rdbmsLink);
  saveLink(rdbmsLink);

  // Job creation
  MJob job = getClient().createJob(rdbmsLink.getPersistenceId(), kafkaLink.getPersistenceId());

  // set rdbms "FROM" job config
  fillRdbmsFromConfig(job, "id");

  // set Kafka  "TO" job config
  fillKafkaToConfig(job);

  // driver config
  MDriverConfig driverConfig = job.getDriverConfig();
  driverConfig.getIntegerInput("throttlingConfig.numExtractors").setValue(3);
  saveJob(job);

  executeJob(job);

  // this will assert the content of the array matches the content of the topic
  validateContent(input);
}
 
Example 5
Source File: SqoopJDBCKafkaJob.java    From sqoop-on-spark with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws Exception {

    final SqoopSparkJob sparkJob = new SqoopSparkJob();
    CommandLine cArgs = SqoopSparkJob.parseArgs(createOptions(), args);
    SparkConf conf = sparkJob.init(cArgs);
    JavaSparkContext context = new JavaSparkContext(conf);

    MConnector fromConnector = RepositoryManager.getInstance().getRepository()
        .findConnector("generic-jdbc-connector");

    MLinkConfig fromLinkConfig = fromConnector.getLinkConfig();
    MLink fromLink = new MLink(fromConnector.getPersistenceId(), fromLinkConfig);
    fromLink.setName("jdbcLink-" + System.currentTimeMillis());

    fromLink.getConnectorLinkConfig().getStringInput("linkConfig.jdbcDriver")
        .setValue("com.mysql.jdbc.Driver");

    fromLink.getConnectorLinkConfig().getStringInput("linkConfig.connectionString")
        .setValue(cArgs.getOptionValue("jdbcString"));
    fromLink.getConnectorLinkConfig().getStringInput("linkConfig.username")
        .setValue(cArgs.getOptionValue("u"));
    fromLink.getConnectorLinkConfig().getStringInput("linkConfig.password")
        .setValue(cArgs.getOptionValue("p"));

    RepositoryManager.getInstance().getRepository().createLink(fromLink);

    MConnector toConnector = RepositoryManager.getInstance().getRepository()
        .findConnector("kafka-connector");

    MLinkConfig toLinkConfig = toConnector.getLinkConfig();

    MLink toLink = new MLink(toConnector.getPersistenceId(), toLinkConfig);
    toLink.setName("kafkaLink-" + System.currentTimeMillis());

    toLink.getConnectorLinkConfig().getStringInput("linkConfig.brokerList")
        .setValue(cArgs.getOptionValue("broker"));
    toLink.getConnectorLinkConfig().getStringInput("linkConfig.zookeeperConnect")
        .setValue(cArgs.getOptionValue("zk"));

    RepositoryManager.getInstance().getRepository().createLink(toLink);

    MFromConfig fromJobConfig = fromConnector.getFromConfig();
    MToConfig toJobConfig = toConnector.getToConfig();

    MJob sqoopJob = new MJob(fromConnector.getPersistenceId(), toConnector.getPersistenceId(),
        fromLink.getPersistenceId(), toLink.getPersistenceId(), fromJobConfig, toJobConfig, Driver
            .getInstance().getDriver().getDriverConfig());
    // jdbc configs
    MFromConfig fromConfig = sqoopJob.getFromJobConfig();
    fromConfig.getStringInput("fromJobConfig.tableName").setValue(cArgs.getOptionValue("table"));
    fromConfig.getStringInput("fromJobConfig.partitionColumn").setValue(cArgs.getOptionValue("partitionCol"));
    // kafka configs
    MToConfig toConfig = sqoopJob.getToJobConfig();
    toConfig.getStringInput("toJobConfig.topic").setValue("test-spark-topic");

    MDriverConfig driverConfig = sqoopJob.getDriverConfig();
    if (cArgs.getOptionValue("numE") != null) {
      driverConfig.getIntegerInput("throttlingConfig.numExtractors").setValue(
          Integer.valueOf(cArgs.getOptionValue("numE")));
    }
    if (cArgs.getOptionValue("numL") != null) {

      driverConfig.getIntegerInput("throttlingConfig.numLoaders").setValue(
          Integer.valueOf(cArgs.getOptionValue("numL")));
    }    RepositoryManager.getInstance().getRepository().createJob(sqoopJob);
    sparkJob.setJob(sqoopJob);
    sparkJob.execute(conf, context);
  }
 
Example 6
Source File: SqoopJDBCHDFSJob.java    From sqoop-on-spark with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws Exception {

    final SqoopSparkJob sparkJob = new SqoopSparkJob();
    CommandLine cArgs = SqoopSparkJob.parseArgs(createOptions(), args);
    SparkConf conf = sparkJob.init(cArgs);
    JavaSparkContext context = new JavaSparkContext(conf);

    MConnector fromConnector = RepositoryManager.getInstance().getRepository()
        .findConnector("generic-jdbc-connector");
    MConnector toConnector = RepositoryManager.getInstance().getRepository()
        .findConnector("hdfs-connector");

    MLinkConfig fromLinkConfig = fromConnector.getLinkConfig();
    MLinkConfig toLinkConfig = toConnector.getLinkConfig();

    MLink fromLink = new MLink(fromConnector.getPersistenceId(), fromLinkConfig);
    fromLink.setName("jdbcLink-" + System.currentTimeMillis());
    fromLink.getConnectorLinkConfig().getStringInput("linkConfig.jdbcDriver")
        .setValue("com.mysql.jdbc.Driver");

    fromLink.getConnectorLinkConfig().getStringInput("linkConfig.connectionString")
        .setValue(cArgs.getOptionValue("jdbcString"));
    fromLink.getConnectorLinkConfig().getStringInput("linkConfig.username")
        .setValue(cArgs.getOptionValue("u"));
    fromLink.getConnectorLinkConfig().getStringInput("linkConfig.password")
        .setValue(cArgs.getOptionValue("p"));
    RepositoryManager.getInstance().getRepository().createLink(fromLink);

    MLink toLink = new MLink(toConnector.getPersistenceId(), toLinkConfig);
    toLink.setName("hdfsLink-" + System.currentTimeMillis());
    toLink.getConnectorLinkConfig().getStringInput("linkConfig.confDir")
        .setValue(cArgs.getOptionValue("outputDir"));
    RepositoryManager.getInstance().getRepository().createLink(toLink);

    MFromConfig fromJobConfig = fromConnector.getFromConfig();
    MToConfig toJobConfig = toConnector.getToConfig();

    MJob sqoopJob = new MJob(fromConnector.getPersistenceId(), toConnector.getPersistenceId(),
        fromLink.getPersistenceId(), toLink.getPersistenceId(), fromJobConfig, toJobConfig, Driver
            .getInstance().getDriver().getDriverConfig());

    MConfigList fromConfig = sqoopJob.getJobConfig(Direction.FROM);
    fromConfig.getStringInput("fromJobConfig.tableName").setValue(cArgs.getOptionValue("table"));
    fromConfig.getStringInput("fromJobConfig.partitionColumn").setValue(
        cArgs.getOptionValue("paritionCol"));

    MToConfig toConfig = sqoopJob.getToJobConfig();
    toConfig.getStringInput("toJobConfig.outputDirectory").setValue(
        cArgs.getOptionValue("outputDir") + System.currentTimeMillis());
    MDriverConfig driverConfig = sqoopJob.getDriverConfig();
    if (cArgs.getOptionValue("numE") != null) {
      driverConfig.getIntegerInput("throttlingConfig.numExtractors").setValue(
          Integer.valueOf(cArgs.getOptionValue("numE")));
    }
    if (cArgs.getOptionValue("numL") != null) {

      driverConfig.getIntegerInput("throttlingConfig.numLoaders").setValue(
          Integer.valueOf(cArgs.getOptionValue("numL")));
    }
    RepositoryManager.getInstance().getRepository().createJob(sqoopJob);
    sparkJob.setJob(sqoopJob);
    sparkJob.execute(conf, context);
  }
 
Example 7
Source File: TableStagedRDBMSTest.java    From sqoop-on-spark with Apache License 2.0 4 votes vote down vote up
@Test
public void testStagedTransfer() throws Exception {
  final TableName stageTableName = new TableName("STAGE_" + getTableName());
  createTableCities();
  createHdfsFromFile("input-0001",
      "1,'USA','2004-10-23','San Francisco'",
      "2,'USA','2004-10-24','Sunnyvale'",
      "3,'Czech Republic','2004-10-25','Brno'",
      "4,'USA','2004-10-26','Palo Alto'"
    );
  new Cities(provider, stageTableName).createTables();

  // RDBMS link
  MLink rdbmsLink = getClient().createLink("generic-jdbc-connector");
  fillRdbmsLinkConfig(rdbmsLink);
  saveLink(rdbmsLink);

  // HDFS link
  MLink hdfsLink = getClient().createLink("hdfs-connector");
  fillHdfsLink(hdfsLink);
  saveLink(hdfsLink);

  // Job creation
  MJob job = getClient().createJob(hdfsLink.getPersistenceId(),
      rdbmsLink.getPersistenceId());

  // fill HDFS "FROM" config
  fillHdfsFromConfig(job);

  // fill rdbms "TO" config here
  fillRdbmsToConfig(job);
  MConfigList configs = job.getJobConfig(Direction.TO);
  configs.getStringInput("toJobConfig.stageTableName").setValue(provider.escapeTableName(stageTableName.getTableName()));

  // driver config
  MConfigList driverConfig = job.getDriverConfig();
  driverConfig.getIntegerInput("throttlingConfig.numExtractors").setValue(3);

  saveJob(job);

  executeJob(job);

  assertEquals(0L, provider.rowCount(stageTableName));
  assertEquals(4L, provider.rowCount(getTableName()));
  assertRowInCities(1, "USA", "2004-10-23", "San Francisco");
  assertRowInCities(2, "USA", "2004-10-24", "Sunnyvale");
  assertRowInCities(3, "Czech Republic", "2004-10-25", "Brno");
  assertRowInCities(4, "USA", "2004-10-26", "Palo Alto");

  // Clean up testing table
  provider.dropTable(stageTableName);
  dropTable();
}
 
Example 8
Source File: AllTypesTest.java    From sqoop-on-spark with Apache License 2.0 4 votes vote down vote up
@Test
public void testFrom() throws Exception {
  createTable("id",
    "id", "INT",
    "value", type.name
  );

  int i = 1;
  for(ExampleValue value: type.values) {
    insertRow(false, Integer.toString(i++), value.insertStatement);
  }

  // RDBMS link
  MLink rdbmsConnection = getClient().createLink("generic-jdbc-connector");
  fillRdbmsLinkConfig(rdbmsConnection);
  saveLink(rdbmsConnection);

  // HDFS link
  MLink hdfsConnection = getClient().createLink("hdfs-connector");
  fillHdfsLink(hdfsConnection);
  saveLink(hdfsConnection);

  // Job creation
  MJob job = getClient().createJob(rdbmsConnection.getPersistenceId(), hdfsConnection.getPersistenceId());

  // Fill rdbms "FROM" config
  fillRdbmsFromConfig(job, "id");
  MConfigList fromConfig = job.getJobConfig(Direction.FROM);
  fromConfig.getStringInput("fromJobConfig.columns").setValue(provider.escapeColumnName("value"));

  // Fill the hdfs "TO" config
  fillHdfsToConfig(job, ToFormat.TEXT_FILE);

  // driver config
  MDriverConfig driverConfig = job.getDriverConfig();
  driverConfig.getIntegerInput("throttlingConfig.numExtractors").setValue(1);

  saveJob(job);
  executeJob(job);

  // Assert correct output
  assertHdfsTo(type.escapedStringValues());

  // Clean up testing table
  dropTable();
}
 
Example 9
Source File: AllTypesTest.java    From sqoop-on-spark with Apache License 2.0 4 votes vote down vote up
@Test
public void testTo() throws Exception {
  createTable(null,
    "value", type.name
  );

  createHdfsFromFile("input-0001", type.escapedStringValues());

  // RDBMS link
  MLink rdbmsLink = getClient().createLink("generic-jdbc-connector");
  fillRdbmsLinkConfig(rdbmsLink);
  saveLink(rdbmsLink);

  // HDFS link
  MLink hdfsLink = getClient().createLink("hdfs-connector");
  fillHdfsLink(hdfsLink);
  saveLink(hdfsLink);

  // Job creation
  MJob job = getClient().createJob(hdfsLink.getPersistenceId(), rdbmsLink.getPersistenceId());
  fillHdfsFromConfig(job);

  // Set the rdbms "TO" config here
  fillRdbmsToConfig(job);

  // Driver config
  MDriverConfig driverConfig = job.getDriverConfig();
  driverConfig.getIntegerInput("throttlingConfig.numExtractors").setValue(1);

  saveJob(job);
  executeJob(job);
  dumpTable();

  assertEquals(type.values.size(), rowCount());
  for(ExampleValue value : type.values) {
    assertRow(
      new Object[] {"value", value.insertStatement},
      false,
      value.objectValue);
  }

  // Clean up testing table
  dropTable();
}
 
Example 10
Source File: PartitionerTest.java    From sqoop-on-spark with Apache License 2.0 4 votes vote down vote up
@Test
public void testSplitter() throws Exception {
  createAndLoadTableUbuntuReleases();

  // RDBMS link
  MLink rdbmsLink = getClient().createLink("generic-jdbc-connector");
  fillRdbmsLinkConfig(rdbmsLink);
  saveLink(rdbmsLink);

  // HDFS link
  MLink hdfsLink = getClient().createLink("hdfs-connector");
  fillHdfsLink(hdfsLink);
  saveLink(hdfsLink);

  // Job creation
  MJob job = getClient().createJob(rdbmsLink.getPersistenceId(), hdfsLink.getPersistenceId());

  // set the rdbms "FROM" config
  fillRdbmsFromConfig(job, partitionColumn);

  // fill hdfs "TO" config
  fillHdfsToConfig(job, ToFormat.TEXT_FILE);

  // set driver config
  MDriverConfig driverConfig = job.getDriverConfig();
  driverConfig.getIntegerInput("throttlingConfig.numExtractors").setValue(extractors);

  saveJob(job);

  executeJob(job);

  // Assert correct output
  assertHdfsToFiles((extractors > maxOutputFiles) ? maxOutputFiles : extractors);
  assertHdfsTo(
      "1,'Warty Warthog',4.10,'2004-10-20',false",
      "2,'Hoary Hedgehog',5.04,'2005-04-08',false",
      "3,'Breezy Badger',5.10,'2005-10-13',false",
      "4,'Dapper Drake',6.06,'2006-06-01',true",
      "5,'Edgy Eft',6.10,'2006-10-26',false",
      "6,'Feisty Fawn',7.04,'2007-04-19',false",
      "7,'Gutsy Gibbon',7.10,'2007-10-18',false",
      "8,'Hardy Heron',8.04,'2008-04-24',true",
      "9,'Intrepid Ibex',8.10,'2008-10-18',false",
      "10,'Jaunty Jackalope',9.04,'2009-04-23',false",
      "11,'Karmic Koala',9.10,'2009-10-29',false",
      "12,'Lucid Lynx',10.04,'2010-04-29',true",
      "13,'Maverick Meerkat',10.10,'2010-10-10',false",
      "14,'Natty Narwhal',11.04,'2011-04-28',false",
      "15,'Oneiric Ocelot',11.10,'2011-10-10',false",
      "16,'Precise Pangolin',12.04,'2012-04-26',true",
      "17,'Quantal Quetzal',12.10,'2012-10-18',false",
      "18,'Raring Ringtail',13.04,'2013-04-25',false",
      "19,'Saucy Salamander',13.10,'2013-10-17',false"
    );

  // Clean up testing table
  dropTable();
}
 
Example 11
Source File: FromRDBMSToHDFSTest.java    From sqoop-on-spark with Apache License 2.0 4 votes vote down vote up
@Test
public void testCities() throws Exception {
  createAndLoadTableCities();

  // RDBMS link
  MLink rdbmsConnection = getClient().createLink("generic-jdbc-connector");
  fillRdbmsLinkConfig(rdbmsConnection);
  saveLink(rdbmsConnection);

  // HDFS link
  MLink hdfsConnection = getClient().createLink("hdfs-connector");
  fillHdfsLink(hdfsConnection);
  saveLink(hdfsConnection);

  // Job creation
  MJob job = getClient().createJob(rdbmsConnection.getPersistenceId(), hdfsConnection.getPersistenceId());

  // Set rdbms "FROM" config
  fillRdbmsFromConfig(job, "id");

  // fill the hdfs "TO" config
  fillHdfsToConfig(job, ToFormat.TEXT_FILE);
  // driver config
  MDriverConfig driverConfig = job.getDriverConfig();
  driverConfig.getIntegerInput("throttlingConfig.numExtractors").setValue(3);

  saveJob(job);

  executeJob(job);

  // Assert correct output
  assertHdfsTo(
    "1,'USA','2004-10-23','San Francisco'",
    "2,'USA','2004-10-24','Sunnyvale'",
    "3,'Czech Republic','2004-10-25','Brno'",
    "4,'USA','2004-10-26','Palo Alto'"
  );

  // Clean up testing table
  dropTable();
}
 
Example 12
Source File: FromHDFSToRDBMSTest.java    From sqoop-on-spark with Apache License 2.0 4 votes vote down vote up
@Test
public void testBasic() throws Exception {
  createHdfsFromFile("input-0001",
      "1,'USA','2004-10-23','San Francisco'",
      "2,'USA','2004-10-24','Sunnyvale'",
      "3,'Czech Republic','2004-10-25','Brno'",
      "4,'USA','2004-10-26','Palo Alto'"
  );

  // RDBMS link
  MLink rdbmsLink = getClient().createLink("generic-jdbc-connector");
  fillRdbmsLinkConfig(rdbmsLink);
  saveLink(rdbmsLink);

  // HDFS link
  MLink hdfsLink = getClient().createLink("hdfs-connector");
  fillHdfsLink(hdfsLink);
  saveLink(hdfsLink);

  // Job creation
  MJob job = getClient().createJob(hdfsLink.getPersistenceId(), rdbmsLink.getPersistenceId());

  // set hdfs "FROM" config for the job, since the connector test case base class only has utilities for hdfs!
  fillHdfsFromConfig(job);

  // set the rdbms "TO" config here
  fillRdbmsToConfig(job);

  // driver config
  MDriverConfig driverConfig = job.getDriverConfig();
  driverConfig.getIntegerInput("throttlingConfig.numExtractors").setValue(3);
  saveJob(job);

  executeJob(job);

  assertEquals(4L, provider.rowCount(getTableName()));
  assertRowInCities(1, "USA", "2004-10-23", "San Francisco");
  assertRowInCities(2, "USA", "2004-10-24", "Sunnyvale");
  assertRowInCities(3, "Czech Republic", "2004-10-25", "Brno");
  assertRowInCities(4, "USA", "2004-10-26", "Palo Alto");
}
 
Example 13
Source File: FromRDBMSToKiteTest.java    From sqoop-on-spark with Apache License 2.0 4 votes vote down vote up
@Test
public void testCities() throws Exception {
  // RDBMS link
  MLink rdbmsLink = getClient().createLink("generic-jdbc-connector");
  fillRdbmsLinkConfig(rdbmsLink);
  saveLink(rdbmsLink);

  // Kite link
  MLink kiteLink = getClient().createLink("kite-connector");
  kiteLink.getConnectorLinkConfig().getStringInput("linkConfig.authority").setValue(hdfsClient.getUri().getAuthority());
  saveLink(kiteLink);

  // Job creation
  MJob job = getClient().createJob(rdbmsLink.getPersistenceId(), kiteLink.getPersistenceId());

  // Set rdbms "FROM" config
  fillRdbmsFromConfig(job, "id");
  // TODO: Kite have troubles with some data types, so we're limiting the columns to int only
  MConfigList fromConfig = job.getJobConfig(Direction.FROM);
  fromConfig.getStringInput("fromJobConfig.columns").setValue(provider.escapeColumnName("id"));

  // Fill the Kite "TO" config
  MConfigList toConfig = job.getJobConfig(Direction.TO);
  toConfig.getStringInput("toJobConfig.uri").setValue("dataset:hdfs:" + getHadoopTestDirectory());
  toConfig.getEnumInput("toJobConfig.fileFormat").setValue(FileFormat.CSV);

  // driver config
  MDriverConfig driverConfig = job.getDriverConfig();
  driverConfig.getIntegerInput("throttlingConfig.numExtractors").setValue(1);

  saveJob(job);

  executeJob(job);

  // Assert correct output
  assertHdfsTo(
    "\"1\"",
    "\"2\"",
    "\"3\"",
    "\"4\""
  );
}