org.apache.iceberg.Transaction Java Examples

The following examples show how to use org.apache.iceberg.Transaction. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: HiveCreateReplaceTableTest.java    From iceberg with Apache License 2.0 6 votes vote down vote up
@Test
public void testCreateTableTxnTableCreatedConcurrently() {
  Assert.assertFalse("Table should not exist", catalog.tableExists(TABLE_IDENTIFIER));

  Transaction txn = catalog.newCreateTableTransaction(
      TABLE_IDENTIFIER, SCHEMA, SPEC, tableLocation, Maps.newHashMap());

  // create the table concurrently
  catalog.createTable(TABLE_IDENTIFIER, SCHEMA, SPEC);
  Assert.assertTrue("Table should be created", catalog.tableExists(TABLE_IDENTIFIER));

  AssertHelpers.assertThrows(
      "Create table txn should fail",
      AlreadyExistsException.class,
      "Table already exists: hivedb.tbl",
      txn::commitTransaction);
}
 
Example #2
Source File: HiveCreateReplaceTableTest.java    From iceberg with Apache License 2.0 6 votes vote down vote up
@Test
public void testCreateTableTxnAndAppend() {
  Assert.assertFalse("Table should not exist", catalog.tableExists(TABLE_IDENTIFIER));

  Transaction txn = catalog.newCreateTableTransaction(
      TABLE_IDENTIFIER, SCHEMA, SPEC, tableLocation, Maps.newHashMap());

  AppendFiles append = txn.newAppend();
  DataFile dataFile = DataFiles.builder(SPEC)
      .withPath("/path/to/data-a.parquet")
      .withFileSizeInBytes(0)
      .withRecordCount(1)
      .build();
  append.appendFile(dataFile);
  append.commit();
  txn.commitTransaction();

  Table table = catalog.loadTable(TABLE_IDENTIFIER);
  Snapshot snapshot = table.currentSnapshot();
  Assert.assertTrue("Table should have one manifest file", snapshot.allManifests().size() == 1);
}
 
Example #3
Source File: HiveCreateReplaceTableTest.java    From iceberg with Apache License 2.0 6 votes vote down vote up
@Test
public void testCreateTableTxn() {
  Assert.assertFalse("Table should not exist", catalog.tableExists(TABLE_IDENTIFIER));

  Transaction txn = catalog.newCreateTableTransaction(
      TABLE_IDENTIFIER, SCHEMA, SPEC, tableLocation, Maps.newHashMap());
  txn.updateProperties()
      .set("prop", "value")
      .commit();

  // verify the table is still not visible before the transaction is committed
  Assert.assertFalse(catalog.tableExists(TABLE_IDENTIFIER));

  txn.commitTransaction();

  Table table = catalog.loadTable(TABLE_IDENTIFIER);
  Assert.assertEquals("Table props should match", "value", table.properties().get("prop"));
}
 
Example #4
Source File: HiveCreateReplaceTableTest.java    From iceberg with Apache License 2.0 6 votes vote down vote up
@Test
public void testReplaceTableTxnTableDeletedConcurrently() {
  catalog.createTable(TABLE_IDENTIFIER, SCHEMA, SPEC, tableLocation, Maps.newHashMap());
  Assert.assertTrue("Table should exist", catalog.tableExists(TABLE_IDENTIFIER));

  Transaction txn = catalog.newReplaceTableTransaction(TABLE_IDENTIFIER, SCHEMA, SPEC, false);

  catalog.dropTable(TABLE_IDENTIFIER);

  txn.updateProperties()
      .set("prop", "value")
      .commit();

  AssertHelpers.assertThrows(
      "Replace table txn should fail",
      NoSuchTableException.class,
      "No such table: hivedb.tbl",
      txn::commitTransaction);
}
 
Example #5
Source File: HiveCreateReplaceTableTest.java    From iceberg with Apache License 2.0 6 votes vote down vote up
@Test
public void testReplaceTableTxnTableModifiedConcurrently() {
  Table table = catalog.createTable(TABLE_IDENTIFIER, SCHEMA, SPEC, tableLocation, Maps.newHashMap());
  Assert.assertTrue("Table should exist", catalog.tableExists(TABLE_IDENTIFIER));

  Transaction txn = catalog.newReplaceTableTransaction(TABLE_IDENTIFIER, SCHEMA, SPEC, false);

  // update the table concurrently
  table.updateProperties()
      .set("another-prop", "another-value")
      .commit();

  txn.updateProperties()
      .set("prop", "value")
      .commit();
  txn.commitTransaction();

  // the replace should still succeed
  table = catalog.loadTable(TABLE_IDENTIFIER);
  Assert.assertNull("Table props should be updated", table.properties().get("another-prop"));
  Assert.assertEquals("Table props should match", "value", table.properties().get("prop"));
}
 
Example #6
Source File: HiveCreateReplaceTableTest.java    From iceberg with Apache License 2.0 6 votes vote down vote up
@Test
public void testCreateOrReplaceTableTxnTableDeletedConcurrently() {
  Assert.assertFalse("Table should not exist", catalog.tableExists(TABLE_IDENTIFIER));

  catalog.createTable(TABLE_IDENTIFIER, SCHEMA, SPEC);
  Assert.assertTrue("Table should be created", catalog.tableExists(TABLE_IDENTIFIER));

  Transaction txn = catalog.newReplaceTableTransaction(
      TABLE_IDENTIFIER, SCHEMA, PartitionSpec.unpartitioned(), tableLocation, Maps.newHashMap(), true);
  txn.updateProperties()
      .set("prop", "value")
      .commit();

  // drop the table concurrently
  catalog.dropTable(TABLE_IDENTIFIER);

  // expect the transaction to succeed anyway
  txn.commitTransaction();

  Table table = catalog.loadTable(TABLE_IDENTIFIER);
  Assert.assertEquals("Table props should match", "value", table.properties().get("prop"));
}
 
Example #7
Source File: HiveCreateReplaceTableTest.java    From iceberg with Apache License 2.0 6 votes vote down vote up
@Test
public void testCreateOrReplaceTableTxnTableCreatedConcurrently() {
  Assert.assertFalse("Table should not exist", catalog.tableExists(TABLE_IDENTIFIER));

  Transaction txn = catalog.newReplaceTableTransaction(
      TABLE_IDENTIFIER, SCHEMA, PartitionSpec.unpartitioned(), tableLocation, Maps.newHashMap(), true);
  txn.updateProperties()
      .set("prop", "value")
      .commit();

  // create the table concurrently
  catalog.createTable(TABLE_IDENTIFIER, SCHEMA, SPEC);
  Assert.assertTrue("Table should be created", catalog.tableExists(TABLE_IDENTIFIER));

  // expect the transaction to succeed anyway
  txn.commitTransaction();

  Table table = catalog.loadTable(TABLE_IDENTIFIER);
  Assert.assertEquals("Partition spec should match", PartitionSpec.unpartitioned(), table.spec());
  Assert.assertEquals("Table props should match", "value", table.properties().get("prop"));
}
 
Example #8
Source File: HiveCreateReplaceTableTest.java    From iceberg with Apache License 2.0 6 votes vote down vote up
@Test
public void testCreateTableTxnWithGlobalTableLocation() {
  Assert.assertFalse("Table should not exist", catalog.tableExists(TABLE_IDENTIFIER));

  Transaction txn = catalog.newCreateTableTransaction(
      TABLE_IDENTIFIER, SCHEMA, SPEC, "file:///" + tableLocation, Maps.newHashMap());
  txn.commitTransaction();

  Table table = catalog.loadTable(TABLE_IDENTIFIER);

  DataFile dataFile = DataFiles.builder(SPEC)
      .withPath("/path/to/data-a.parquet")
      .withFileSizeInBytes(0)
      .withRecordCount(1)
      .build();

  table.newAppend()
      .appendFile(dataFile)
      .commit();

  Assert.assertEquals("Write should succeed", 1, Iterables.size(table.snapshots()));
}
 
Example #9
Source File: IcebergMetastoreTables.java    From metacat with Apache License 2.0 5 votes vote down vote up
@Override
public Transaction newReplaceTableTransaction(final TableIdentifier identifier,
                                              final Schema schema,
                                              final PartitionSpec spec,
                                              final String location,
                                              final Map<String, String> properties,
                                              final boolean orCreate) {
    throw new MetacatNotSupportedException("not supported");
}
 
Example #10
Source File: IcebergMetastoreTables.java    From metacat with Apache License 2.0 5 votes vote down vote up
@Override
public Transaction newCreateTableTransaction(final TableIdentifier identifier,
                                             final Schema schema,
                                             final PartitionSpec spec,
                                             final String location,
                                             final Map<String, String> properties) {
    throw new MetacatNotSupportedException("not supported");
}
 
Example #11
Source File: TestIcebergPartitions.java    From dremio-oss with Apache License 2.0 5 votes vote down vote up
@Test
public void testNonIdentityPartitions() throws Exception {
  File root = tempDir.newFolder();
  HadoopTables tables = new HadoopTables(conf);
  PartitionSpec partitionSpec = PartitionSpec
      .builderFor(schema)
      .bucket(NAME, 2)
      .build();
  Table table = tables.create(schema, partitionSpec, root.getAbsolutePath());

  // Append some data files.
  Transaction transaction = table.newTransaction();
  AppendFiles appendFiles = transaction.newAppend();
  appendFiles.appendFile(createDataFile(root, "d1", 1, "jack", 100));
  appendFiles.appendFile(createDataFile(root, "d2", 1, "jack", 200));
  appendFiles.appendFile(createDataFile(root, "d3", 2, "jill", 300));
  appendFiles.appendFile(createDataFile(root, "d4", 2, "jill", 400));
  appendFiles.appendFile(createDataFile(root, "d5", 2, "jill", 500));
  appendFiles.commit();
  transaction.commitTransaction();

  try {
    IcebergTableInfo tableInfo = new IcebergTableWrapper(getSabotContext(),
        HadoopFileSystem.get(fs), conf, root.getAbsolutePath()).getTableInfo();
    fail("Expected error while reading metadata of iceberg table with non-identity partition field");
  } catch (Exception ex) {
    Assert.assertTrue("UserException expected", ex instanceof UserException);
    UserException uex = ((UserException) ex);
    Assert.assertEquals("Invalid ErrorType. Expected " + UserBitShared.DremioPBError.ErrorType.UNSUPPORTED_OPERATION
            + " but got " + uex.getErrorType(), UserBitShared.DremioPBError.ErrorType.UNSUPPORTED_OPERATION, uex.getErrorType());
    String expectedErrorMsg = "Column values and partition values are not same for [name] column";
    Assert.assertTrue("Expected message to contain " + expectedErrorMsg + " but was "
        + uex.getOriginalMessage() + " instead", uex.getOriginalMessage().contains(expectedErrorMsg));
  }
}
 
Example #12
Source File: TestHadoopCatalog.java    From iceberg with Apache License 2.0 5 votes vote down vote up
@Test
public void testCallingLocationProviderWhenNoCurrentMetadata() throws IOException {
  Configuration conf = new Configuration();
  String warehousePath = temp.newFolder().getAbsolutePath();
  HadoopCatalog catalog = new HadoopCatalog(conf, warehousePath);

  TableIdentifier tableIdent = TableIdentifier.of("ns1", "ns2", "table1");
  Transaction create = catalog.newCreateTableTransaction(tableIdent, SCHEMA);
  create.table().locationProvider();  // NPE triggered if not handled appropriately
  create.commitTransaction();

  Assert.assertEquals("1 table expected", 1, catalog.listTables(Namespace.of("ns1", "ns2")).size());
  catalog.dropTable(tableIdent, true);
}
 
Example #13
Source File: HiveCreateReplaceTableTest.java    From iceberg with Apache License 2.0 5 votes vote down vote up
@Test
public void testCreateOrReplaceTableTxnTableExists() {
  catalog.createTable(TABLE_IDENTIFIER, SCHEMA, SPEC, tableLocation, Maps.newHashMap());
  Assert.assertTrue("Table should exist", catalog.tableExists(TABLE_IDENTIFIER));

  Transaction txn = catalog.newReplaceTableTransaction(TABLE_IDENTIFIER, SCHEMA, true);
  txn.commitTransaction();

  Table table = catalog.loadTable(TABLE_IDENTIFIER);
  Assert.assertEquals("Partition spec should match", PartitionSpec.unpartitioned(), table.spec());
}
 
Example #14
Source File: HiveCreateReplaceTableTest.java    From iceberg with Apache License 2.0 5 votes vote down vote up
@Test
public void testCreateOrReplaceTableTxnTableNotExists() {
  Assert.assertFalse("Table should not exist", catalog.tableExists(TABLE_IDENTIFIER));

  Transaction txn = catalog.newReplaceTableTransaction(TABLE_IDENTIFIER, SCHEMA, SPEC, true);
  txn.updateProperties()
      .set("prop", "value")
      .commit();
  txn.commitTransaction();

  Table table = catalog.loadTable(TABLE_IDENTIFIER);
  Assert.assertEquals("Table props should match", "value", table.properties().get("prop"));
}
 
Example #15
Source File: HiveCreateReplaceTableTest.java    From iceberg with Apache License 2.0 5 votes vote down vote up
@Test
public void testReplaceTableTxn() {
  catalog.createTable(TABLE_IDENTIFIER, SCHEMA, SPEC, tableLocation, Maps.newHashMap());
  Assert.assertTrue("Table should exist", catalog.tableExists(TABLE_IDENTIFIER));

  Transaction txn = catalog.newReplaceTableTransaction(TABLE_IDENTIFIER, SCHEMA, false);
  txn.commitTransaction();

  Table table = catalog.loadTable(TABLE_IDENTIFIER);
  Assert.assertEquals("Partition spec should match", PartitionSpec.unpartitioned(), table.spec());
}
 
Example #16
Source File: StagedSparkTable.java    From iceberg with Apache License 2.0 4 votes vote down vote up
public StagedSparkTable(Transaction transaction) {
  super(transaction.table());
  this.transaction = transaction;
}
 
Example #17
Source File: TestCreateTable.java    From dremio-oss with Apache License 2.0 4 votes vote down vote up
@Test
public void testDroppingOfMapTypeColumn() throws Exception{
  String table1 = "iceberg_map_test";
  try {
    File table1Folder = new File(getDfsTestTmpSchemaLocation(), table1);
    HadoopTables hadoopTables = new HadoopTables(new Configuration());

    Schema schema = new Schema(
      Types.NestedField.optional(1, "col1", Types.MapType.ofOptional(1, 2, Types.IntegerType.get(), Types.StringType.get())),
      Types.NestedField.optional(2, "col2", Types.IntegerType.get())
    );
    PartitionSpec spec = PartitionSpec
      .builderFor(schema)
      .build();
    Table table = hadoopTables.create(schema, spec, table1Folder.getPath());
    Transaction transaction = table.newTransaction();
    AppendFiles appendFiles = transaction.newAppend();
    final String testWorkingPath = TestTools.getWorkingPath() + "/src/test/resources/iceberg/mapTest";
    final String parquetFile = "iceberg_map_test.parquet";
    File dataFile = new File(testWorkingPath, parquetFile);
    appendFiles.appendFile(
      DataFiles.builder(spec)
        .withInputFile(Files.localInput(dataFile))
        .withRecordCount(1)
        .withFormat(FileFormat.PARQUET)
        .build()
    );
    appendFiles.commit();
    transaction.commitTransaction();

    testBuilder()
      .sqlQuery("select * from dfs_test.iceberg_map_test")
      .unOrdered()
      .baselineColumns("col2")
      .baselineValues(1)
      .build()
      .run();

    Thread.sleep(1001);
    String insertCommandSql = "insert into  dfs_test.iceberg_map_test select * from (values(2))";
    test(insertCommandSql);
    Thread.sleep(1001);

    testBuilder()
      .sqlQuery("select * from dfs_test.iceberg_map_test")
      .unOrdered()
      .baselineColumns("col2")
      .baselineValues(1)
      .baselineValues(2)
      .build()
      .run();
  }
  finally {
    FileUtils.deleteQuietly(new File(getDfsTestTmpSchemaLocation(), table1));
  }
}
 
Example #18
Source File: TestIcebergTableDrop.java    From dremio-oss with Apache License 2.0 4 votes vote down vote up
@Test
public void testDropTable() throws Exception {
  try (AutoCloseable c = enableIcebergTables()) {
    Path rootPath = Paths.get(getDfsTestTmpSchemaLocation(), "iceberg", "nation");
    Files.createDirectories(rootPath);
    String root = rootPath.toString();

    String tableName = "dfs_test.iceberg.nation";

    HadoopTables tables = new HadoopTables(conf);
    Table table = tables.create(schema, null, root);
    IcebergTableInfo tableInfo =
        new IcebergTableWrapper(getSabotContext(), HadoopFileSystem.get(fs), conf, root)
            .getTableInfo();
    assertEquals(tableInfo.getRecordCount(), 0);

    // Append some data files.
    Transaction transaction = table.newTransaction();
    AppendFiles appendFiles = transaction.newAppend();
    appendFiles.appendFile(createDataFile(rootPath.toFile(), "d1"));
    appendFiles.commit();
    transaction.commitTransaction();

    testBuilder()
        .sqlQuery("select count(*) c from " + tableName)
        .unOrdered()
        .baselineColumns("c")
        .baselineValues(25L)
        .build()
        .run();

    testBuilder()
        .sqlQuery("DROP TABLE " + tableName)
        .unOrdered()
        .baselineColumns("ok", "summary")
        .baselineValues(true, String.format("Table [%s] dropped", tableName))
        .build()
        .run();

    errorMsgTestHelper(
        "select count(*) c from " + tableName, "Table '" + tableName + "' not found");
  }
}
 
Example #19
Source File: TestRefresh.java    From dremio-oss with Apache License 2.0 4 votes vote down vote up
@Test
public void testRefresh() throws Exception {
  try (AutoCloseable c = enableIcebergTables()) {
    Path rootPath = Paths.get(getDfsTestTmpSchemaLocation(), "iceberg", "metadata_refresh");
    Files.createDirectories(rootPath);
    String root = rootPath.toString();
    String tableName = "dfs_test.iceberg.metadata_refresh";

    HadoopTables tables = new HadoopTables(conf);
    Table table = tables.create(schema, null, root);

    IcebergTableInfo tableInfo =
        new IcebergTableWrapper(getSabotContext(), HadoopFileSystem.get(fs), conf, root)
            .getTableInfo();
    assertEquals(tableInfo.getRecordCount(), 0);

    // Append some data files.
    Transaction transaction = table.newTransaction();
    AppendFiles appendFiles = transaction.newAppend();
    appendFiles.appendFile(createDataFile(rootPath.toFile(), "d1"));
    appendFiles.commit();
    transaction.commitTransaction();

    testBuilder()
        .sqlQuery("select count(*) c from " + tableName)
        .unOrdered()
        .baselineColumns("c")
        .baselineValues(25L)
        .build()
        .run();

    // to detect an mtime change.
    Thread.sleep(1000);

    // refresh without an update
    testBuilder()
        .sqlQuery("ALTER TABLE " + tableName + " REFRESH METADATA")
        .unOrdered()
        .baselineColumns("ok", "summary")
        .baselineValues(
            true,
            String.format(
                "Table '%s' read signature reviewed but source stated metadata is unchanged, no refresh occurred.",
                tableName))
        .build()
        .run();

    // Do another append
    transaction = table.newTransaction();
    appendFiles = transaction.newAppend();
    appendFiles.appendFile(createDataFile(rootPath.toFile(), "d2"));
    appendFiles.commit();
    transaction.commitTransaction();

    // refresh
    testBuilder()
        .sqlQuery("ALTER TABLE " + tableName + " REFRESH METADATA")
        .unOrdered()
        .baselineColumns("ok", "summary")
        .baselineValues(true, String.format("Metadata for table '%s' refreshed.", tableName))
        .build()
        .run();

    // validate increased row count
    testBuilder()
        .sqlQuery("select count(*) c from " + tableName)
        .unOrdered()
        .baselineColumns("c")
        .baselineValues(50L)
        .build()
        .run();
  }
}
 
Example #20
Source File: TestIcebergPartitions.java    From dremio-oss with Apache License 2.0 4 votes vote down vote up
@Test
public void testPartitions() throws Exception {
  File root = tempDir.newFolder();
  HadoopTables tables = new HadoopTables(conf);
  Table table = tables.create(schema, spec, root.getAbsolutePath());

  // test empty table.
  IcebergTableInfo tableInfo = new IcebergTableWrapper(getSabotContext(),
    HadoopFileSystem.get(fs), conf, root.getAbsolutePath()).getTableInfo();
  assertEquals(tableInfo.getRecordCount(), 0);

  List<String> expectedColumns = Arrays.asList(ID, NAME);
  assertEquals(expectedColumns, tableInfo.getPartitionColumns());

  assertEquals(0, ImmutableList.copyOf(tableInfo.getPartitionChunkListing().iterator()).size());

  // Append some data files.
  Transaction transaction = table.newTransaction();
  AppendFiles appendFiles = transaction.newAppend();
  appendFiles.appendFile(createDataFile(root, "d1", 1, "jack", 100));
  appendFiles.appendFile(createDataFile(root, "d2", 1, "jack", 200));
  appendFiles.appendFile(createDataFile(root, "d3", 2, "jill", 300));
  appendFiles.appendFile(createDataFile(root, "d4", 2, "jill", 400));
  appendFiles.appendFile(createDataFile(root, "d5", 2, "jill", 500));
  appendFiles.commit();
  transaction.commitTransaction();

  tableInfo = new IcebergTableWrapper(getSabotContext(),
    HadoopFileSystem.get(fs), conf, root.getAbsolutePath()).getTableInfo();
  assertEquals(1500, tableInfo.getRecordCount());
  assertEquals(2, ImmutableList.copyOf(tableInfo.getPartitionChunkListing().iterator()).size());

  // validate first partition
  final AtomicLong recordCount = new AtomicLong(0);
  PartitionChunk p1 = findPartition(ImmutableList.copyOf(tableInfo.getPartitionChunkListing().iterator()), 1, "jack");
  assertNotNull(p1);
  assertEquals(2, p1.getSplitCount());
  p1.getSplits().iterator().forEachRemaining(x -> recordCount.addAndGet(x.getRecordCount()));
  assertEquals(300, recordCount.intValue());

  // validate second partition
  PartitionChunk p2 = findPartition(ImmutableList.copyOf(tableInfo.getPartitionChunkListing().iterator()), 2, "jill");
  assertNotNull(p2);

  assertEquals(3, p2.getSplitCount());
  recordCount.set(0);
  p2.getSplits().iterator().forEachRemaining(x -> recordCount.addAndGet(x.getRecordCount()));
  assertEquals(1200, recordCount.intValue());
}
 
Example #21
Source File: Catalog.java    From iceberg with Apache License 2.0 3 votes vote down vote up
/**
 * Start a transaction to create a table.
 *
 * @param identifier a table identifier
 * @param schema a schema
 * @param spec a partition spec
 * @param properties a string map of table properties
 * @return a {@link Transaction} to create the table
 * @throws AlreadyExistsException if the table already exists
 */
default Transaction newCreateTableTransaction(
    TableIdentifier identifier,
    Schema schema,
    PartitionSpec spec,
    Map<String, String> properties) {
  return newCreateTableTransaction(identifier, schema, spec, null, properties);
}
 
Example #22
Source File: Catalog.java    From iceberg with Apache License 2.0 3 votes vote down vote up
/**
 * Start a transaction to create a table.
 *
 * @param identifier a table identifier
 * @param schema a schema
 * @param spec a partition spec
 * @return a {@link Transaction} to create the table
 * @throws AlreadyExistsException if the table already exists
 */
default Transaction newCreateTableTransaction(
    TableIdentifier identifier,
    Schema schema,
    PartitionSpec spec) {
  return newCreateTableTransaction(identifier, schema, spec, null, null);
}
 
Example #23
Source File: Catalog.java    From iceberg with Apache License 2.0 3 votes vote down vote up
/**
 * Start a transaction to replace a table.
 *
 * @param identifier a table identifier
 * @param schema a schema
 * @param spec a partition spec
 * @param location a location for the table; leave null if unspecified
 * @param properties a string map of table properties
 * @param orCreate whether to create the table if not exists
 * @return a {@link Transaction} to replace the table
 * @throws NoSuchTableException if the table doesn't exist and orCreate is false
 */
Transaction newReplaceTableTransaction(
    TableIdentifier identifier,
    Schema schema,
    PartitionSpec spec,
    String location,
    Map<String, String> properties,
    boolean orCreate);
 
Example #24
Source File: Catalog.java    From iceberg with Apache License 2.0 3 votes vote down vote up
/**
 * Start a transaction to replace a table.
 *
 * @param identifier a table identifier
 * @param schema a schema
 * @param spec a partition spec
 * @param properties a string map of table properties
 * @param orCreate whether to create the table if not exists
 * @return a {@link Transaction} to replace the table
 * @throws NoSuchTableException if the table doesn't exist and orCreate is false
 */
default Transaction newReplaceTableTransaction(
    TableIdentifier identifier,
    Schema schema,
    PartitionSpec spec,
    Map<String, String> properties,
    boolean orCreate) {
  return newReplaceTableTransaction(identifier, schema, spec, null, properties, orCreate);
}
 
Example #25
Source File: Catalog.java    From iceberg with Apache License 2.0 3 votes vote down vote up
/**
 * Start a transaction to replace a table.
 *
 * @param identifier a table identifier
 * @param schema a schema
 * @param spec a partition spec
 * @param orCreate whether to create the table if not exists
 * @return a {@link Transaction} to replace the table
 * @throws NoSuchTableException if the table doesn't exist and orCreate is false
 */
default Transaction newReplaceTableTransaction(
    TableIdentifier identifier,
    Schema schema,
    PartitionSpec spec,
    boolean orCreate) {
  return newReplaceTableTransaction(identifier, schema, spec, null, null, orCreate);
}
 
Example #26
Source File: Catalog.java    From iceberg with Apache License 2.0 3 votes vote down vote up
/**
 * Start a transaction to replace a table.
 *
 * @param identifier a table identifier
 * @param schema a schema
 * @param orCreate whether to create the table if not exists
 * @return a {@link Transaction} to replace the table
 * @throws NoSuchTableException if the table doesn't exist and orCreate is false
 */
default Transaction newReplaceTableTransaction(
    TableIdentifier identifier,
    Schema schema,
    boolean orCreate) {
  return newReplaceTableTransaction(identifier, schema, PartitionSpec.unpartitioned(), null, null, orCreate);
}
 
Example #27
Source File: Catalog.java    From iceberg with Apache License 2.0 3 votes vote down vote up
/**
 * Start a transaction to create a table.
 *
 * @param identifier a table identifier
 * @param schema a schema
 * @param spec a partition spec
 * @param location a location for the table; leave null if unspecified
 * @param properties a string map of table properties
 * @return a {@link Transaction} to create the table
 * @throws AlreadyExistsException if the table already exists
 */
Transaction newCreateTableTransaction(
    TableIdentifier identifier,
    Schema schema,
    PartitionSpec spec,
    String location,
    Map<String, String> properties);
 
Example #28
Source File: Catalog.java    From iceberg with Apache License 2.0 2 votes vote down vote up
/**
 * Start a transaction to create a table.
 *
 * @param identifier a table identifier
 * @param schema a schema
 * @return a {@link Transaction} to create the table
 * @throws AlreadyExistsException if the table already exists
 */
default Transaction newCreateTableTransaction(
    TableIdentifier identifier,
    Schema schema) {
  return newCreateTableTransaction(identifier, schema, PartitionSpec.unpartitioned(), null, null);
}