org.apache.hadoop.hive.common.FileUtils Java Examples

The following examples show how to use org.apache.hadoop.hive.common.FileUtils. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: AWSCatalogMetastoreClient.java    From aws-glue-data-catalog-client-for-apache-hive-metastore with Apache License 2.0 6 votes vote down vote up
private void verifyDestinationLocation(FileSystem srcFs, FileSystem destFs, Path srcPath, Path destPath, org.apache.hadoop.hive.metastore.api.Table tbl, org.apache.hadoop.hive.metastore.api.Partition newPartition)
      throws InvalidOperationException {
    String oldPartLoc = srcPath.toString();
    String newPartLoc = destPath.toString();

    // check that src and dest are on the same file system
    if (!FileUtils.equalsFileSystem(srcFs, destFs)) {
        throw new InvalidOperationException("table new location " + destPath
              + " is on a different file system than the old location "
              + srcPath + ". This operation is not supported");
    }
    try {
        srcFs.exists(srcPath); // check that src exists and also checks
        if (newPartLoc.compareTo(oldPartLoc) != 0 && destFs.exists(destPath)) {
            throw new InvalidOperationException("New location for this partition "
                  + tbl.getDbName() + "." + tbl.getTableName() + "." + newPartition.getValues()
                  + " already exists : " + destPath);
        }
    } catch (IOException e) {
        throw new InvalidOperationException("Unable to access new location "
              + destPath + " for partition " + tbl.getDbName() + "."
              + tbl.getTableName() + " " + newPartition.getValues());
    }
}
 
Example #2
Source File: AWSCatalogMetastoreClient.java    From aws-glue-data-catalog-client-for-apache-hive-metastore with Apache License 2.0 6 votes vote down vote up
private void verifyDestinationLocation(FileSystem srcFs, FileSystem destFs, Path srcPath, Path destPath, org.apache.hadoop.hive.metastore.api.Table tbl, org.apache.hadoop.hive.metastore.api.Partition newPartition)
      throws InvalidOperationException {
  String oldPartLoc = srcPath.toString();
  String newPartLoc = destPath.toString();

  // check that src and dest are on the same file system
  if (!FileUtils.equalsFileSystem(srcFs, destFs)) {
    throw new InvalidOperationException("table new location " + destPath
          + " is on a different file system than the old location "
          + srcPath + ". This operation is not supported");
  }
  try {
    srcFs.exists(srcPath); // check that src exists and also checks
    if (newPartLoc.compareTo(oldPartLoc) != 0 && destFs.exists(destPath)) {
      throw new InvalidOperationException("New location for this partition "
            + tbl.getDbName() + "." + tbl.getTableName() + "." + newPartition.getValues()
            + " already exists : " + destPath);
    }
  } catch (IOException e) {
    throw new InvalidOperationException("Unable to access new location "
          + destPath + " for partition " + tbl.getDbName() + "."
          + tbl.getTableName() + " " + newPartition.getValues());
  }
}
 
Example #3
Source File: MetastoreUtil.java    From presto with Apache License 2.0 5 votes vote down vote up
public static String toPartitionName(List<String> names, List<String> values)
{
    checkArgument(names.size() == values.size(), "partition value count must match partition column count");
    checkArgument(values.stream().allMatch(Objects::nonNull), "partition value must not be null");

    return FileUtils.makePartName(names, values);
}
 
Example #4
Source File: HivePartitionManager.java    From presto with Apache License 2.0 5 votes vote down vote up
public static List<String> extractPartitionValues(String partitionName)
{
    ImmutableList.Builder<String> values = ImmutableList.builder();

    boolean inKey = true;
    int valueStart = -1;
    for (int i = 0; i < partitionName.length(); i++) {
        char current = partitionName.charAt(i);
        if (inKey) {
            checkArgument(current != '/', "Invalid partition spec: %s", partitionName);
            if (current == '=') {
                inKey = false;
                valueStart = i + 1;
            }
        }
        else if (current == '/') {
            checkArgument(valueStart != -1, "Invalid partition spec: %s", partitionName);
            values.add(FileUtils.unescapePathName(partitionName.substring(valueStart, i)));
            inKey = true;
            valueStart = -1;
        }
    }
    checkArgument(!inKey, "Invalid partition spec: %s", partitionName);
    values.add(FileUtils.unescapePathName(partitionName.substring(valueStart)));

    return values.build();
}
 
Example #5
Source File: HiveShimV1.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public boolean moveToTrash(FileSystem fs, Path path, Configuration conf, boolean purge) throws IOException {
	try {
		Method method = FileUtils.class.getDeclaredMethod("moveToTrash", FileSystem.class, Path.class, Configuration.class);
		return (boolean) method.invoke(null, fs, path, conf);
	} catch (NoSuchMethodException | InvocationTargetException | IllegalAccessException e) {
		throw new IOException("Failed to move " + path + " to trash", e);
	}
}
 
Example #6
Source File: HiveShimV2.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public boolean moveToTrash(FileSystem fs, Path path, Configuration conf, boolean purge) throws IOException {
	try {
		Method method = FileUtils.class.getDeclaredMethod("moveToTrash", FileSystem.class, Path.class,
				Configuration.class, boolean.class);
		return (boolean) method.invoke(null, fs, path, conf, purge);
	} catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException e) {
		throw new IOException("Failed to move " + path + " to trash", e);
	}
}
 
Example #7
Source File: HiveConvertersImpl.java    From metacat with Apache License 2.0 5 votes vote down vote up
/**
 * {@inheritDoc}
 */
@Override
public String getNameFromPartVals(final TableDto tableDto, final List<String> partVals) {
    final List<String> partitionKeys = tableDto.getPartition_keys();
    if (partitionKeys.size() != partVals.size()) {
        throw new IllegalArgumentException("Not the same number of partition columns and partition values");
    }
    return FileUtils.makePartName(partitionKeys, partVals, "");
}
 
Example #8
Source File: CreateEmptyPartitionProcedure.java    From presto with Apache License 2.0 4 votes vote down vote up
private void doCreateEmptyPartition(ConnectorSession session, String schemaName, String tableName, List<String> partitionColumnNames, List<String> partitionValues)
{
    TransactionalMetadata hiveMetadata = hiveMetadataFactory.create();
    HiveTableHandle tableHandle = (HiveTableHandle) hiveMetadata.getTableHandle(session, new SchemaTableName(schemaName, tableName));
    if (tableHandle == null) {
        throw new PrestoException(INVALID_PROCEDURE_ARGUMENT, format("Table '%s' does not exist", new SchemaTableName(schemaName, tableName)));
    }

    List<String> actualPartitionColumnNames = tableHandle.getPartitionColumns().stream()
            .map(HiveColumnHandle::getName)
            .collect(toImmutableList());

    if (!Objects.equals(partitionColumnNames, actualPartitionColumnNames)) {
        throw new PrestoException(INVALID_PROCEDURE_ARGUMENT, "Provided partition column names do not match actual partition column names: " + actualPartitionColumnNames);
    }

    if (metastore.getPartition(new HiveIdentity(session), schemaName, tableName, partitionValues).isPresent()) {
        throw new PrestoException(ALREADY_EXISTS, "Partition already exists");
    }
    HiveInsertTableHandle hiveInsertTableHandle = (HiveInsertTableHandle) hiveMetadata.beginInsert(session, tableHandle);
    String partitionName = FileUtils.makePartName(actualPartitionColumnNames, partitionValues);

    WriteInfo writeInfo = locationService.getPartitionWriteInfo(hiveInsertTableHandle.getLocationHandle(), Optional.empty(), partitionName);
    Slice serializedPartitionUpdate = Slices.wrappedBuffer(
            partitionUpdateJsonCodec.toJsonBytes(
                    new PartitionUpdate(
                            partitionName,
                            UpdateMode.NEW,
                            writeInfo.getWritePath(),
                            writeInfo.getTargetPath(),
                            ImmutableList.of(),
                            0,
                            0,
                            0)));

    hiveMetadata.finishInsert(
            session,
            hiveInsertTableHandle,
            ImmutableList.of(serializedPartitionUpdate),
            ImmutableList.of());
    hiveMetadata.commit();
}
 
Example #9
Source File: CatalogThriftHiveMetastore.java    From metacat with Apache License 2.0 4 votes vote down vote up
/**
 * {@inheritDoc}
 */
@Override
public DropPartitionsResult drop_partitions_req(final DropPartitionsRequest request) throws TException {
    return requestWrapper("drop_partitions_req",
        new Object[]{request}, () -> {
            final String databaseName = request.getDbName();
            final String tableName = request.getTblName();
            final boolean ifExists = request.isSetIfExists() && request.isIfExists();
            final boolean needResult = !request.isSetNeedResult() || request.isNeedResult();

            final List<Partition> parts = Lists.newArrayList();
            final List<String> partNames = Lists.newArrayList();
            int minCount = 0;
            final RequestPartsSpec spec = request.getParts();
            if (spec.isSetExprs()) {
                final Table table = get_table(databaseName, tableName);
                // Dropping by expressions.
                for (DropPartitionsExpr expr : spec.getExprs()) {
                    ++minCount; // At least one partition per expression, if not ifExists
                    final PartitionsByExprResult partitionsByExprResult = get_partitions_by_expr(
                        new PartitionsByExprRequest(databaseName, tableName, expr.bufferForExpr()));
                    if (partitionsByExprResult.isHasUnknownPartitions()) {
                        // Expr is built by DDLSA, it should only contain part cols and simple ops
                        throw new MetaException("Unexpected unknown partitions to drop");
                    }
                    parts.addAll(partitionsByExprResult.getPartitions());
                }

                final List<String> colNames = new ArrayList<>(table.getPartitionKeys().size());
                for (FieldSchema col : table.getPartitionKeys()) {
                    colNames.add(col.getName());
                }
                if (!colNames.isEmpty()) {
                    parts.forEach(
                        partition -> partNames.add(FileUtils.makePartName(colNames, partition.getValues())));
                }
            } else if (spec.isSetNames()) {
                partNames.addAll(spec.getNames());
                minCount = partNames.size();
                parts.addAll(get_partitions_by_names(databaseName, tableName, partNames));
            } else {
                throw new MetaException("Partition spec is not set");
            }

            if ((parts.size() < minCount) && !ifExists) {
                throw new NoSuchObjectException("Some partitions to drop are missing");
            }

            partV1.deletePartitions(catalogName, databaseName, tableName, partNames);
            final DropPartitionsResult result = new DropPartitionsResult();
            if (needResult) {
                result.setPartitions(parts);
            }
            return result;
        });
}
 
Example #10
Source File: PartitionUtil.java    From metacat with Apache License 2.0 2 votes vote down vote up
/**
 * Escape partition name.
 *
 * @param partName    partition name
 * @return Escaped partition name
 */
public static String escapePartitionName(final String partName) {
    final LinkedHashMap<String, String> partSpec = new LinkedHashMap<>();
    Warehouse.makeSpecFromName(partSpec, new Path(partName));
    return FileUtils.makePartName(new ArrayList<>(partSpec.keySet()), new ArrayList<>(partSpec.values()));
}