Java Code Examples for org.apache.hadoop.hive.metastore.api.Partition

The following examples show how to use org.apache.hadoop.hive.metastore.api.Partition. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: data-highway   Source File: LanderTaskRunner.java    License: Apache License 2.0 6 votes vote down vote up
void updateMetadata(LanderConfiguration config) {
  changeState(State.UPDATING);
  String acquisitionInstant = config.getAcquisitionInstant();
  log.info("Updating table to add partition {}, {}.", config.getRoadName(), acquisitionInstant);

  String partitionSpec = ACQUISITION_INSTANT + "=" + acquisitionInstant;
  List<String> partitionValues = singletonList(acquisitionInstant);
  try {
    Optional<Partition> partition = hivePartitionManager.addPartition(roadName, partitionValues,
        config.getS3KeyPrefix());
    Map<Integer, Long> offsets = new HashMap<>();
    config.getOffsets().forEach((pid, range) -> offsets.put(pid, range.getEnd()));
    offsetManager.commitOffsets(topicName, offsets);
    if (partition.isPresent()) {
      long recordCount = config.getOffsets().values().stream().mapToLong(r -> r.getEnd() - r.getStart()).sum();
      landingHandler.handlePartitionCreated(roadName, partition.get(), partitionSpec, recordCount);
    } else {
      // Partition already exists
      partitionMutationCounter.increment();
      log.warn("Data landed into existing partition; road={} partitionSpec={}", roadName, partitionSpec);
    }
  } catch (MetaStoreException e) {
    metaStoreErrorMeter.increment();
    throw e;
  }
}
 
Example 2
Source Project: circus-train   Source File: ReplicaTableFactoryTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void newPartition() {
  Path replicaPartitionPath = new Path(REPLICA_DATA_DESTINATION, REPLICA_PARTITION_SUBPATH);
  Partition replica = factory.newReplicaPartition(EVENT_ID, sourceTable, sourcePartition, DB_NAME, TABLE_NAME,
      replicaPartitionPath, FULL);

  assertThat(replica.getDbName(), is(sourceTable.getDbName()));
  assertThat(replica.getTableName(), is(sourceTable.getTableName()));
  assertThat(replica.getSd().getInputFormat(), is(INPUT_FORMAT));
  assertThat(replica.getSd().getOutputFormat(), is(OUTPUT_FORMAT));
  assertThat(replica.getSd().getLocation(), is(replicaPartitionPath.toUri().toString()));
  assertThat(replica.getParameters().get("com.hotels.bdp.circustrain.source.table"), is(DB_NAME + "." + TABLE_NAME));
  assertThat(replica.getParameters().get("com.hotels.bdp.circustrain.source.metastore.uris"),
      is(SOURCE_META_STORE_URIS));
  assertThat(replica.getParameters().get("com.hotels.bdp.circustrain.source.location"), is(PARTITION_LOCATION));
  assertThat(replica.getParameters().get("com.hotels.bdp.circustrain.replication.event"), is(EVENT_ID));
  assertThat(replica.getParameters().get("com.hotels.bdp.circustrain.last.replicated"), is(not(nullValue())));
  assertThat(replica.getParameters().get("com.hotels.bdp.circustrain.replication.mode"), is(FULL.name()));
  assertThat(replica.getParameters().get("DO_NOT_UPDATE_STATS"), is("true"));
  assertThat(replica.getParameters().get("STATS_GENERATED_VIA_STATS_TASK"), is("true"));
  assertThat(replica.getParameters().get("STATS_GENERATED"), is("true"));
  assertThat(replica.getParameters().get(StatsSetupConst.ROW_COUNT), is("1"));
}
 
Example 3
Source Project: data-highway   Source File: HivePartitionManagerTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void addPartition() throws Exception {
  doReturn(addedPartition).when(metaStoreClient).add_partition(any());
  doReturn(URI.create("resolved/location")).when(locationResolver).resolveLocation(LOCATION, false);
  doReturn(Instant.ofEpochSecond(1526462225L)).when(clock).instant();

  Partition result = underTest.addPartition(TABLE, PARTITION_VALUES, LOCATION).get();

  ArgumentCaptor<Partition> captor = ArgumentCaptor.forClass(Partition.class);
  verify(metaStoreClient).add_partition(captor.capture());

  Partition partition = captor.getValue();
  assertThat(partition.getParameters().get("data-highway.version"), is(DataHighwayVersion.VERSION));
  assertThat(partition.getParameters().get("data-highway.last-revision"), is("2018-05-16T09:17:05Z"));
  assertThat(partition.getSd().getLocation(), is("resolved/location"));

  assertThat(result, is(addedPartition));
}
 
Example 4
Source Project: metacat   Source File: CatalogThriftHiveMetastore.java    License: Apache License 2.0 6 votes vote down vote up
private List<Partition> getPartitionsByFilter(final String dbName, final String tblName,
                                              @Nullable final String filter, final short maxParts) {
    final String databaseName = normalizeIdentifier(dbName);
    final String tableName = normalizeIdentifier(tblName);
    final TableDto tableDto = v1.getTable(catalogName, databaseName, tableName, true, false, false);

    final Integer maxValues = maxParts > 0 ? Short.toUnsignedInt(maxParts) : null;
    final GetPartitionsRequestDto dto = new GetPartitionsRequestDto(filter, null, true, false);
    final List<PartitionDto> metacatPartitions = partV1.getPartitionsForRequest(catalogName, dbName, tblName,
        null, null, null, maxValues, false, dto);
    final List<Partition> result = Lists.newArrayListWithCapacity(metacatPartitions.size());
    for (PartitionDto partition : metacatPartitions) {
        result.add(hiveConverters.metacatToHivePartition(partition, tableDto));
    }
    return result;
}
 
Example 5
Source Project: circus-train   Source File: HdfsSnapshotLocationManager.java    License: Apache License 2.0 6 votes vote down vote up
HdfsSnapshotLocationManager(
    HiveConf sourceHiveConf,
    String eventId,
    Table sourceTable,
    List<Partition> sourcePartitions,
    boolean snapshotsDisabled,
    String tableBasePath,
    FileSystemFactory fileSystemFactory,
    SourceCatalogListener sourceCatalogListener) throws IOException {
  this.sourceHiveConf = sourceHiveConf;
  this.eventId = eventId;
  this.sourceTable = sourceTable;
  this.snapshotsDisabled = snapshotsDisabled;
  this.sourceCatalogListener = sourceCatalogListener;
  this.fileSystemFactory = fileSystemFactory;
  String sourceDataLocation;
  if (StringUtils.isNotBlank(tableBasePath)) {
    sourceDataLocation = tableBasePath;
  } else {
    sourceDataLocation = sourceTable.getSd().getLocation();
  }
  sourceDataPath = new Path(sourceDataLocation);
  copyBasePath = createSnapshot();
  String copyBaseLocation = copyBasePath.toString();
  subPaths = calculateSubPaths(sourcePartitions, sourceDataLocation, copyBaseLocation);
}
 
Example 6
Source Project: metacat   Source File: CatalogThriftHiveMetastore.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * {@inheritDoc}
 */
@Override
public void alter_partition_with_environment_context(
    final String dbName,
    final String tblName,
    final Partition newPart,
    @Nullable final EnvironmentContext ec
) throws TException {
    final String databaseName = normalizeIdentifier(dbName);
    final String tableName = normalizeIdentifier(tblName);
    requestWrapper("alter_partition_with_environment_context", new Object[]{databaseName, tableName, ec},
        () -> {
            addPartitionsCore(dbName, tableName, ImmutableList.of(newPart), false);
            return null;
        });
}
 
Example 7
private void setupHiveTables() throws TException, IOException {
  List<FieldSchema> partitionKeys = Lists.newArrayList(newFieldSchema("p1"), newFieldSchema("p2"));

  File tableLocation = new File("db1", "table1");
  StorageDescriptor sd = newStorageDescriptor(tableLocation, "col0");
  table1 = newTable("table1", "db1", partitionKeys, sd);
  Partition partition1 = newPartition(table1, "value1", "value2");
  Partition partition2 = newPartition(table1, "value11", "value22");
  table1Partitions = Arrays.asList(partition1, partition2); //
  table1PartitionNames = Arrays
      .asList(Warehouse.makePartName(partitionKeys, partition1.getValues()),
          Warehouse.makePartName(partitionKeys, partition2.getValues()));

  File tableLocation2 = new File("db2", "table2");
  StorageDescriptor sd2 = newStorageDescriptor(tableLocation2, "col0");
  table2 = newTable("table2", "db2", partitionKeys, sd2);
}
 
Example 8
Source Project: incubator-gobblin   Source File: HiveMetaStoreUtils.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Convert a {@link HivePartition} into a {@link Partition}.
 */
public static Partition getPartition(HivePartition hivePartition) {
  State props = hivePartition.getProps();
  Partition partition = new Partition();
  partition.setDbName(hivePartition.getDbName());
  partition.setTableName(hivePartition.getTableName());
  partition.setValues(hivePartition.getValues());
  partition.setParameters(getParameters(props));
  if (hivePartition.getCreateTime().isPresent()) {
    partition.setCreateTime(Ints.checkedCast(hivePartition.getCreateTime().get()));
  } else if (props.contains(HiveConstants.CREATE_TIME)) {
    partition.setCreateTime(props.getPropAsInt(HiveConstants.CREATE_TIME));
  }
  if (props.contains(HiveConstants.LAST_ACCESS_TIME)) {
    partition.setLastAccessTime(props.getPropAsInt(HiveConstants.LAST_ACCESS_TIME));
  }
  partition.setSd(getStorageDescriptor(hivePartition));
  return partition;
}
 
Example 9
Source Project: circus-train   Source File: HdfsSnapshotLocationManager.java    License: Apache License 2.0 6 votes vote down vote up
static List<Path> calculateSubPaths(
    List<Partition> sourcePartitions,
    String sourceDataLocation,
    String copyBaseLocation) {
  List<Path> paths = new ArrayList<>(sourcePartitions.size());
  for (Partition partition : sourcePartitions) {
    String partitionLocation = partition.getSd().getLocation();
    String partitionBranch = partitionLocation.replace(sourceDataLocation, "");
    while (partitionBranch.startsWith("/")) {
      partitionBranch = partitionBranch.substring(1);
    }
    Path copyPartitionPath = new Path(copyBaseLocation, partitionBranch);
    paths.add(copyPartitionPath);
    LOG.debug("Added sub-path {}.", copyPartitionPath.toString());
  }
  return paths;
}
 
Example 10
Source Project: presto   Source File: InMemoryThriftMetastore.java    License: Apache License 2.0 6 votes vote down vote up
private static boolean partitionMatches(Partition partition, String databaseName, String tableName, List<String> parts)
{
    if (!partition.getDbName().equals(databaseName) ||
            !partition.getTableName().equals(tableName)) {
        return false;
    }
    List<String> values = partition.getValues();
    if (values.size() != parts.size()) {
        return false;
    }
    for (int i = 0; i < values.size(); i++) {
        String part = parts.get(i);
        if (!part.isEmpty() && !values.get(i).equals(part)) {
            return false;
        }
    }
    return true;
}
 
Example 11
@Test
public void tablesAreDifferent() throws Exception {
  Table sourceTable = catalog.client().getTable(DATABASE, SOURCE_TABLE);
  sourceTable.getParameters().put("com.company.team", "value");
  catalog.client().alter_table(DATABASE, SOURCE_TABLE, sourceTable);

  // Reload table object
  sourceTable = catalog.client().getTable(DATABASE, SOURCE_TABLE);
  Table replicaTable = catalog.client().getTable(DATABASE, REPLICA_TABLE);

  HiveDifferences
      .builder(diffListener)
      .comparatorRegistry(comparatorRegistry)
      .source(configuration, sourceTable, new PartitionIterator(catalog.client(), sourceTable, PARTITION_BATCH_SIZE))
      .replica(Optional.of(replicaTable),
          Optional.of(new BufferedPartitionFetcher(catalog.client(), replicaTable, PARTITION_BATCH_SIZE)))
      .checksumFunction(checksumFunction)
      .build()
      .run();
  verify(diffListener, times(1)).onChangedTable(anyList());
  verify(diffListener, never()).onNewPartition(anyString(), any(Partition.class));
  verify(diffListener, never()).onChangedPartition(anyString(), any(Partition.class), anyList());
  verify(diffListener, never()).onDataChanged(anyString(), any(Partition.class));
}
 
Example 12
Source Project: metacat   Source File: CatalogThriftHiveMetastore.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * {@inheritDoc}
 */
@Override
public Partition append_partition_with_environment_context(
    final String dbName,
    final String tblName,
    final List<String> partVals,
    @Nullable final EnvironmentContext environmentContext
) throws TException {
    return requestWrapper("append_partition_by_name_with_environment_context",
        new Object[]{dbName, tblName, partVals}, () -> {
            final TableDto tableDto = getTableDto(dbName, tblName);
            final String partName = hiveConverters.getNameFromPartVals(tableDto, partVals);
            appendPartitionsCore(dbName, tblName, partName);
            return hiveConverters.metacatToHivePartition(getPartitionDtoByName(tableDto, partName), tableDto);
        });
}
 
Example 13
Source Project: metacat   Source File: CatalogThriftHiveMetastore.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * {@inheritDoc}
 */
@Override
public int add_partitions_pspec(final List<PartitionSpec> newParts) throws TException {
    if (newParts == null || newParts.isEmpty()) {
        return 0;
    }
    final String dbName = newParts.get(0).getDbName();
    final String tableName = newParts.get(0).getTableName();
    return requestWrapper("add_partition", new Object[]{dbName, tableName}, () -> {
        final PartitionSpecProxy partitionSpecProxy = PartitionSpecProxy.Factory.get(newParts);
        final PartitionSpecProxy.PartitionIterator partitionIterator = partitionSpecProxy.getPartitionIterator();
        final List<Partition> partitions = addPartitionsCore(dbName, tableName,
            Lists.newArrayList(partitionIterator), false);
        return partitions.size();
    });
}
 
Example 14
Source Project: dremio-oss   Source File: HiveMetadataUtils.java    License: Apache License 2.0 6 votes vote down vote up
public static List<PartitionValue> getPartitionValues(Table table, Partition partition, boolean enableVarcharWidth) {
  if (partition == null) {
    return Collections.emptyList();
  }

  final List<String> partitionValues = partition.getValues();
  final List<PartitionValue> output = new ArrayList<>();
  final List<FieldSchema> partitionKeys = table.getPartitionKeys();
  for (int i = 0; i < partitionKeys.size(); i++) {
    final PartitionValue value = getPartitionValue(partitionKeys.get(i), partitionValues.get(i), enableVarcharWidth);
    if (value != null) {
      output.add(value);
    }
  }
  return output;
}
 
Example 15
@Test
public void noMatchingPartitions() throws Exception {
  when(replica.getLocationManager(TableType.PARTITIONED, targetTableLocation, EVENT_ID, sourceLocationManager))
      .thenReturn(replicaLocationManager);
  PartitionsAndStatistics emptyPartitionsAndStats = new PartitionsAndStatistics(sourceTable.getPartitionKeys(),
      Collections.<Partition>emptyList(), Collections.<String, List<ColumnStatisticsObj>>emptyMap());
  when(source.getPartitions(sourceTable, PARTITION_PREDICATE, MAX_PARTITIONS)).thenReturn(emptyPartitionsAndStats);
  when(source.getLocationManager(sourceTable, Collections.<Partition>emptyList(), EVENT_ID, copierOptions))
      .thenReturn(sourceLocationManager);

  PartitionedTableReplication replication = new PartitionedTableReplication(DATABASE, TABLE, partitionPredicate,
      source, replica, copierFactoryManager, eventIdFactory, targetTableLocation, DATABASE, TABLE, copierOptions,
      listener, dataManipulatorFactoryManager);
  replication.replicate();

  verifyZeroInteractions(copier);
  InOrder replicationOrder = inOrder(sourceLocationManager, replica, replicaLocationManager, listener);
  replicationOrder.verify(replica).validateReplicaTable(DATABASE, TABLE);
  replicationOrder
      .verify(replica)
      .updateMetadata(EVENT_ID, sourceTableAndStatistics, DATABASE, TABLE, replicaLocationManager);
}
 
Example 16
private PartitionsAndStatistics filterOnReplicatedPartitions(
    CloseableMetaStoreClient replicaClient,
    PartitionsAndStatistics sourcePartitionsAndStatistics,
    List<FieldSchema> partitionKeys)
  throws TException {
  Map<Partition, ColumnStatistics> statisticsByPartition = new LinkedHashMap<>();
  for (Partition partition : sourcePartitionsAndStatistics.getPartitions()) {
    try {
      replicaClient.getPartition(replicaDatabaseName, replicaTableName, partition.getValues());
      statisticsByPartition.put(partition, sourcePartitionsAndStatistics.getStatisticsForPartition(partition));
    } catch (NoSuchObjectException e) {
      LOG.debug("Partition {} doesn't exist, skipping it...", Warehouse.getQualifiedName(partition));
    }
  }
  return new PartitionsAndStatistics(partitionKeys, statisticsByPartition);
}
 
Example 17
private void addOrAlterPartitionWithPushMode(IMetaStoreClient client, Table table, HivePartition partition)
    throws TException, IOException {
  Partition nativePartition = HiveMetaStoreUtils.getPartition(partition);

  Preconditions.checkArgument(table.getPartitionKeysSize() == nativePartition.getValues().size(),
      String.format("Partition key size is %s but partition value size is %s", table.getPartitionKeys().size(),
          nativePartition.getValues().size()));

  try (AutoCloseableHiveLock lock =
      this.locks.getPartitionLock(table.getDbName(), table.getTableName(), nativePartition.getValues())) {

    try {
      try (Timer.Context context = this.metricContext.timer(ADD_PARTITION_TIMER).time()) {
        client.add_partition(getPartitionWithCreateTimeNow(nativePartition));
      }
      log.info(String.format("Added partition %s to table %s with location %s", stringifyPartition(nativePartition),
          table.getTableName(), nativePartition.getSd().getLocation()));
    } catch (TException e) {
      try {
        if (this.skipDiffComputation) {
          onPartitionExistWithoutComputingDiff(table, nativePartition, e);
        } else {
          onPartitionExist(client, table, partition, nativePartition, null);
        }
      } catch (Throwable e2) {
        log.error(String.format(
            "Unable to add or alter partition %s in table %s with location %s: " + e2.getMessage(),
            stringifyPartitionVerbose(nativePartition), table.getTableName(), nativePartition.getSd().getLocation()), e2);
        throw e2;
      }
    }
  }
}
 
Example 18
Source Project: circus-train   Source File: ReplicaTableFactoryTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void newReplicaPartitionStatistics() throws MetaException {
  sourceTable.setPartitionKeys(
      Arrays.asList(new FieldSchema("one", "string", null), new FieldSchema("two", "string", null)));

  Partition replicaPartition = new Partition(sourcePartition);
  replicaPartition.setDbName(MAPPED_DB_NAME);
  replicaPartition.setTableName(MAPPED_TABLE_NAME);
  replicaPartition.setValues(Arrays.asList("A", "B"));

  ColumnStatisticsObj columnStatisticsObj1 = new ColumnStatisticsObj();
  ColumnStatisticsObj columnStatisticsObj2 = new ColumnStatisticsObj();
  List<ColumnStatisticsObj> columnStatisticsObjs = Arrays.asList(columnStatisticsObj1, columnStatisticsObj2);

  ColumnStatisticsDesc columnStatisticsDesc = new ColumnStatisticsDesc(false, DB_NAME, TABLE_NAME);
  columnStatisticsDesc
      .setPartName(Warehouse.makePartName(sourceTable.getPartitionKeys(), replicaPartition.getValues()));

  ColumnStatistics sourcePartitionStatistics = new ColumnStatistics(columnStatisticsDesc, columnStatisticsObjs);

  ColumnStatistics replicaPartitionStatistics = factory.newReplicaPartitionStatistics(sourceTable, replicaPartition,
      sourcePartitionStatistics);

  assertThat(replicaPartitionStatistics.getStatsDesc().getDbName(), is(MAPPED_DB_NAME));
  assertThat(replicaPartitionStatistics.getStatsDesc().getTableName(), is(MAPPED_TABLE_NAME));
  assertThat(replicaPartitionStatistics.getStatsDesc().getPartName(), is("one=A/two=B"));
  assertThat(replicaPartitionStatistics.getStatsObj().size(), is(2));
  assertThat(replicaPartitionStatistics.getStatsObj().get(0), is(columnStatisticsObj1));
  assertThat(replicaPartitionStatistics.getStatsObj().get(1), is(columnStatisticsObj2));
}
 
Example 19
@Test
public void onChangedPartition() throws Exception {
  Partition partition1 = new Partition(Lists.newArrayList("val1", "val2"), DB, TABLE, 1, 1, null, null);
  Partition partition2 = new Partition(Lists.newArrayList("val11", "val22"), DB, TABLE, 1, 1, null, null);
  listener.onDiffStart(source, replica);
  listener.onChangedPartition("p1", partition1, differences);
  listener.onChangedPartition("p2", partition2, differences);
  assertThat(listener.getPartitionSpecFilter(), is("(p1='val1' AND p2=val2) OR (p1='val11' AND p2=val22)"));
}
 
Example 20
Source Project: Hue-Ctrip-DI   Source File: TestHiveCleanService.java    License: MIT License 5 votes vote down vote up
private void add_partition(HiveMetaStoreClient client, Table table,
	      List<String> vals, String location) throws InvalidObjectException,
	        AlreadyExistsException, MetaException, TException {

    Partition part = new Partition();
    part.setDbName(table.getDbName());
    part.setTableName(table.getTableName());
    part.setValues(vals);
    part.setParameters(new HashMap<String, String>());
    part.setSd(table.getSd());
    part.getSd().setSerdeInfo(table.getSd().getSerdeInfo());
    part.getSd().setLocation(table.getSd().getLocation() + location);

    client.add_partition(part);
}
 
Example 21
Source Project: circus-train   Source File: HiveDifferencesTest.java    License: Apache License 2.0 5 votes vote down vote up
private static Partition newPartition(String databaseName, String tableName, String location) {
  Partition partition = new Partition();

  partition.setDbName(databaseName);
  partition.setTableName(tableName);
  partition.setParameters(new HashMap<String, String>());
  partition.setValues(Arrays.asList("01"));

  StorageDescriptor sd = new StorageDescriptor();
  sd.setLocation(location);
  partition.setSd(sd);

  return partition;
}
 
Example 22
@Override
public boolean requireCalStats(
    Configuration conf,
    Partition oldPart,
    Partition newPart,
    Table tbl,
    EnvironmentContext environmentContext) {
  return MetaStoreUtils.requireCalStats(conf, oldPart, newPart, tbl);
}
 
Example 23
Source Project: incubator-sentry   Source File: AuthorizingObjectStore.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public List<Partition> getPartitionsByFilter(String dbName,
    String tblName, String filter, short maxParts) throws MetaException,
    NoSuchObjectException {
  if (filterTables(dbName, Lists.newArrayList(tblName)).isEmpty()) {
    throw new MetaException(getNoAccessMessageForTable(dbName, tblName));
  }
  return super.getPartitionsByFilter(dbName, tblName, filter, maxParts);
}
 
Example 24
Source Project: beam   Source File: HCatalogUtils.java    License: Apache License 2.0 5 votes vote down vote up
static int getSplitCount(Read readRequest, Partition partitionToRead) throws Exception {
  int desiredSplitCount = 1;
  long estimatedSizeBytes = getFileSizeForPartition(readRequest, partitionToRead);
  if (estimatedSizeBytes > 0) {
    desiredSplitCount = (int) Math.ceil((double) estimatedSizeBytes / DESIRED_BUNDLE_SIZE_BYTES);
  }
  return desiredSplitCount;
}
 
Example 25
Source Project: dremio-oss   Source File: HiveMetadataUtils.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Wrapper around {@link MetaStoreUtils#getPartitionMetadata(Partition, Table)} which also adds parameters from table
 * to properties returned by {@link MetaStoreUtils#getPartitionMetadata(Partition, Table)}.
 *
 * @param partition the source of partition level parameters
 * @param table     the source of table level parameters
 * @return properties
 */
public static Properties buildPartitionProperties(final Partition partition, final Table table) {
  final Properties properties = MetaStoreUtils.getPartitionMetadata(partition, table);

  // SerDe expects properties from Table, but above call doesn't add Table properties.
  // Include Table properties in final list in order to not to break SerDes that depend on
  // Table properties. For example AvroSerDe gets the schema from properties (passed as second argument)
  for (Map.Entry<String, String> entry : table.getParameters().entrySet()) {
    if (entry.getKey() != null && entry.getValue() != null) {
      properties.put(entry.getKey(), entry.getValue());
    }
  }

  return properties;
}
 
Example 26
Source Project: metacat   Source File: HiveConnectorPartitionService.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * {@inheritDoc}.
 */
@Override
public List<String> getPartitionUris(
    final ConnectorRequestContext requestContext,
    final QualifiedName table,
    final PartitionListRequest partitionsRequest,
    final TableInfo tableInfo
) {
    final List<String> uris = Lists.newArrayList();
    for (Partition partition : getPartitions(table, partitionsRequest.getFilter(),
        partitionsRequest.getPartitionNames(), partitionsRequest.getSort(), partitionsRequest.getPageable())) {
        uris.add(partition.getSd().getLocation());
    }
    return uris;
}
 
Example 27
Source Project: waggle-dance   Source File: FederatedHMSHandler.java    License: Apache License 2.0 5 votes vote down vote up
@Override
@Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME)
public AddPartitionsResult add_partitions_req(AddPartitionsRequest request)
    throws InvalidObjectException, AlreadyExistsException, MetaException, TException {
  DatabaseMapping mapping = checkWritePermissions(request.getDbName());
  for (Partition partition : request.getParts()) {
    mapping.checkWritePermissions(partition.getDbName());
  }
  AddPartitionsResult result = mapping
      .getClient()
      .add_partitions_req(mapping.transformInboundAddPartitionsRequest(request));
  return mapping.transformOutboundAddPartitionsResult(result);
}
 
Example 28
Source Project: waggle-dance   Source File: FederatedHMSHandler.java    License: Apache License 2.0 5 votes vote down vote up
@Override
@Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME)
public int add_partitions(List<Partition> new_parts)
    throws InvalidObjectException, AlreadyExistsException, MetaException, TException {
  if (!new_parts.isEmpty()) {
    // Need to pick one mapping and use that for permissions and getting the client.
    // If the partitions added are for different databases in different clients that won't work with waggle-dance
    DatabaseMapping mapping = databaseMappingService.databaseMapping(new_parts.get(0).getDbName());
    for (Partition partition : new_parts) {
      mapping.checkWritePermissions(partition.getDbName());
    }
    return mapping.getClient().add_partitions(mapping.transformInboundPartitions(new_parts));
  }
  return 0;
}
 
Example 29
Source Project: circus-train   Source File: TestUtils.java    License: Apache License 2.0 5 votes vote down vote up
public static Partition newTablePartition(Table hiveTable, List<String> values, URI location) {
  Partition partition = new Partition();
  partition.setDbName(hiveTable.getDbName());
  partition.setTableName(hiveTable.getTableName());
  partition.setValues(values);
  partition.setSd(new StorageDescriptor(hiveTable.getSd()));
  partition.getSd().setLocation(location.toString());
  return partition;
}
 
Example 30
Source Project: dremio-oss   Source File: PartitionMetadata.java    License: Apache License 2.0 5 votes vote down vote up
private PartitionMetadata(final int partitionId, final Partition partition, List<PartitionValue> partitionValues,
                          InputSplitBatchIterator inputSplitBatchIterator, DatasetSplitBuildConf datasetSplitBuildConf,
                          PartitionXattr partitionXattr) {
  this.partitionId = partitionId;
  this.partition = partition;
  this.partitionValues = Collections.unmodifiableList(partitionValues);
  this.inputSplitBatchIterator = inputSplitBatchIterator;
  this.datasetSplitBuildConf = datasetSplitBuildConf;
  this.partitionXattr = partitionXattr;
}