Java Code Examples for org.apache.iceberg.TableMetadataParser#Codec

The following examples show how to use org.apache.iceberg.TableMetadataParser#Codec . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: HadoopTableOperations.java    From iceberg with Apache License 2.0 6 votes vote down vote up
private Path getMetadataFile(int metadataVersion) throws IOException {
  for (TableMetadataParser.Codec codec : TableMetadataParser.Codec.values()) {
    Path metadataFile = metadataFilePath(metadataVersion, codec);
    FileSystem fs = getFileSystem(metadataFile, conf);
    if (fs.exists(metadataFile)) {
      return metadataFile;
    }

    if (codec.equals(TableMetadataParser.Codec.GZIP)) {
      // we have to be backward-compatible with .metadata.json.gz files
      metadataFile = oldMetadataFilePath(metadataVersion, codec);
      fs = getFileSystem(metadataFile, conf);
      if (fs.exists(metadataFile)) {
        return metadataFile;
      }
    }
  }

  return null;
}
 
Example 2
Source File: HadoopTableOperations.java    From iceberg with Apache License 2.0 4 votes vote down vote up
@Override
public void commit(TableMetadata base, TableMetadata metadata) {
  Pair<Integer, TableMetadata> current = versionAndMetadata();
  if (base != current.second()) {
    throw new CommitFailedException("Cannot commit changes based on stale table metadata");
  }

  if (base == metadata) {
    LOG.info("Nothing to commit.");
    return;
  }

  Preconditions.checkArgument(base == null || base.location().equals(metadata.location()),
      "Hadoop path-based tables cannot be relocated");
  Preconditions.checkArgument(
      !metadata.properties().containsKey(TableProperties.WRITE_METADATA_LOCATION),
      "Hadoop path-based tables cannot relocate metadata");

  String codecName = metadata.property(
      TableProperties.METADATA_COMPRESSION, TableProperties.METADATA_COMPRESSION_DEFAULT);
  TableMetadataParser.Codec codec = TableMetadataParser.Codec.fromName(codecName);
  String fileExtension = TableMetadataParser.getFileExtension(codec);
  Path tempMetadataFile = metadataPath(UUID.randomUUID().toString() + fileExtension);
  TableMetadataParser.write(metadata, io().newOutputFile(tempMetadataFile.toString()));

  int nextVersion = (current.first() != null ? current.first() : 0) + 1;
  Path finalMetadataFile = metadataFilePath(nextVersion, codec);
  FileSystem fs = getFileSystem(tempMetadataFile, conf);

  try {
    if (fs.exists(finalMetadataFile)) {
      throw new CommitFailedException(
          "Version %d already exists: %s", nextVersion, finalMetadataFile);
    }
  } catch (IOException e) {
    throw new RuntimeIOException(e,
        "Failed to check if next version exists: " + finalMetadataFile);
  }

  // this rename operation is the atomic commit operation
  renameToFinal(fs, tempMetadataFile, finalMetadataFile);

  // update the best-effort version pointer
  writeVersionHint(nextVersion);

  deleteRemovedMetadataFiles(base, metadata);

  this.shouldRefresh = true;
}
 
Example 3
Source File: HadoopTableOperations.java    From iceberg with Apache License 2.0 4 votes vote down vote up
private Path metadataFilePath(int metadataVersion, TableMetadataParser.Codec codec) {
  return metadataPath("v" + metadataVersion + TableMetadataParser.getFileExtension(codec));
}
 
Example 4
Source File: HadoopTableOperations.java    From iceberg with Apache License 2.0 4 votes vote down vote up
private Path oldMetadataFilePath(int metadataVersion, TableMetadataParser.Codec codec) {
  return metadataPath("v" + metadataVersion + TableMetadataParser.getOldFileExtension(codec));
}