Java Code Examples for org.apache.hadoop.hive.ql.metadata.Table

The following examples show how to use org.apache.hadoop.hive.ql.metadata.Table. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
@Test
public void testStateStoreReadWrite() throws Exception {

  String dbName = "testStateStoreReadWrite";
  LocalHiveMetastoreTestUtils.getInstance().dropDatabaseIfExists(dbName);

  PartitionLevelWatermarker watermarker0 = new PartitionLevelWatermarker(new SourceState());
  Table mockTable = localTestTable(dbName, "table1", true);

  watermarker0.onTableProcessBegin(mockTable, 0l);
  long now = new DateTime().getMillis();
  watermarker0.onPartitionProcessBegin(localTestPartition(mockTable, ImmutableList.of("2016")), 0, now);
  List<WorkUnit> workunits = Lists.newArrayList();
  watermarker0.onGetWorkunitsEnd(workunits);

  @SuppressWarnings("deprecation")
  WorkUnitState previousWus = new WorkUnitState(workunits.get(0));
  watermarker0.setActualHighWatermark(previousWus);

  SourceState state = new SourceState(new State(), Lists.newArrayList(previousWus));
  PartitionLevelWatermarker watermarker = new PartitionLevelWatermarker(state);

  Assert.assertEquals(watermarker.getPreviousWatermarks().size(), 1);
  Assert.assertEquals(watermarker.getPreviousWatermarks().get(dbName + "@table1"), ImmutableMap.of("2016", now));

}
 
Example 2
Source Project: atlas   Source File: HiveMetaStoreBridge.java    License: Apache License 2.0 6 votes vote down vote up
private AtlasEntityWithExtInfo registerTable(AtlasEntity dbEntity, Table table) throws AtlasHookException {
    try {
        AtlasEntityWithExtInfo ret;
        AtlasEntityWithExtInfo tableEntity = findTableEntity(table);

        if (tableEntity == null) {
            tableEntity = toTableEntity(dbEntity, table);

            ret = registerInstance(tableEntity);
        } else {
            LOG.info("Table {}.{} is already registered with id {}. Updating entity.", table.getDbName(), table.getTableName(), tableEntity.getEntity().getGuid());

            ret = toTableEntity(dbEntity, table, tableEntity);

            updateInstance(ret);
        }

        return ret;
    } catch (Exception e) {
        throw new AtlasHookException("HiveMetaStoreBridge.registerTable() failed.", e);
    }
}
 
Example 3
Source Project: incubator-gobblin   Source File: AvroSchemaManagerTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testGetSchemaFromUrlUsingHiveSchema() throws IOException, HiveException {
  FileSystem fs = FileSystem.getLocal(new Configuration());

  String jobId = "123";
  State state = new State();
  state.setProp(ConfigurationKeys.JOB_ID_KEY, jobId);

  AvroSchemaManager asm = new AvroSchemaManager(fs, state);
  Partition partition = getTestPartition(new Table("testDb", "testTable"));
  Path schemaPath = asm.getSchemaUrl(partition);

  Schema actualSchema = AvroUtils.parseSchemaFromFile(schemaPath, fs);
  String expectedSchema = new String(Files.readAllBytes(
      Paths.get(getClass().getClassLoader().getResource("avroSchemaManagerTest/expectedSchema.avsc").getFile())));
  Assert.assertEquals(actualSchema.toString(), expectedSchema);
}
 
Example 4
Source Project: atlas   Source File: HiveMetaStoreBridge.java    License: Apache License 2.0 6 votes vote down vote up
private String getCreateTableString(Table table, String location){
    String            colString = "";
    List<FieldSchema> colList   = table.getAllCols();

    if (colList != null) {
        for (FieldSchema col : colList) {
            colString += col.getName() + " " + col.getType() + ",";
        }

        if (colList.size() > 0) {
            colString = colString.substring(0, colString.length() - 1);
            colString = "(" + colString + ")";
        }
    }

    String query = "create external table " + table.getTableName() +  colString + " location '" + location + "'";

    return query;
}
 
Example 5
Source Project: atlas   Source File: HiveMetastoreHookImpl.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void onAlterTable(AlterTableEvent tableEvent) {
    HiveOperationContext context = new HiveOperationContext(tableEvent);
    Table                oldTable = toTable(tableEvent.getOldTable());
    Table                newTable = toTable(tableEvent.getNewTable());

    if (isTableRename(oldTable, newTable)) {
        context.setOperation(ALTERTABLE_RENAME);
    } else if (isColumnRename(oldTable, newTable, context)) {
        context.setOperation(ALTERTABLE_RENAMECOL);
    } else if(isAlterTableProperty(tableEvent, "last_modified_time") ||
            isAlterTableProperty(tableEvent, "transient_lastDdlTime")) {
        context.setOperation(ALTERTABLE_PROPERTIES); // map other alter table operations to ALTERTABLE_PROPERTIES
    }

    hook.handleEvent(context);
}
 
Example 6
@Before
public void setup() throws Exception {
  conf = new HiveConf();
  baseDir = Files.createTempDir();
  baseDir.setWritable(true, false);
  conf.setVar(HiveConf.ConfVars.SCRATCHDIR, baseDir.getAbsolutePath());
  SessionState.start(conf);
  conf.setVar(ConfVars.HIVE_AUTHORIZATION_TASK_FACTORY,
      SentryHiveAuthorizationTaskFactoryImpl.class.getName());

  db = Mockito.mock(Hive.class);
  table = new Table(DB, TABLE);
  partition = new Partition(table);
  context = new Context(conf);
  parseDriver = new ParseDriver();
  analyzer = new DDLSemanticAnalyzer(conf, db);
  SessionState.start(conf);
  Mockito.when(db.getTable(TABLE, false)).thenReturn(table);
  Mockito.when(db.getPartition(table, new HashMap<String, String>(), false))
  .thenReturn(partition);

  HadoopDefaultAuthenticator auth = new HadoopDefaultAuthenticator();
  auth.setConf(conf);
  currentUser = auth.getUserName();

}
 
Example 7
Source Project: atlas   Source File: BaseHiveEvent.java    License: Apache License 2.0 6 votes vote down vote up
HBaseTableInfo(Table table) {
    Map<String, String> parameters = table.getParameters();

    if (MapUtils.isNotEmpty(parameters)) {
        hbaseNameSpace = HBASE_DEFAULT_NAMESPACE;
        hbaseTableName = parameters.get(HBASE_PARAM_TABLE_NAME);

        if (hbaseTableName != null) {
            if (hbaseTableName.contains(HBASE_NAMESPACE_TABLE_DELIMITER)) {
                String[] hbaseTableInfo = hbaseTableName.split(HBASE_NAMESPACE_TABLE_DELIMITER);

                if (hbaseTableInfo.length > 1) {
                    hbaseNameSpace = hbaseTableInfo[0];
                    hbaseTableName = hbaseTableInfo[1];
                }
            }
        }
    }
}
 
Example 8
Source Project: incubator-gobblin   Source File: HiveCopyEntityHelper.java    License: Apache License 2.0 6 votes vote down vote up
@VisibleForTesting
static void addMetadataToTargetTable(Table targetTable, Path targetLocation, String targetDatabase, long startTime)
    throws IOException {
  targetTable.setDbName(targetDatabase);
  targetTable.setDataLocation(targetLocation);
  /*
   * Need to set the table owner as the flow executor
   */
  targetTable.setOwner(UserGroupInformation.getCurrentUser().getShortUserName());
  targetTable.getTTable().putToParameters(HiveDataset.REGISTERER, GOBBLIN_DISTCP);
  targetTable.getTTable().putToParameters(HiveDataset.REGISTRATION_GENERATION_TIME_MILLIS,
      Long.toString(startTime));

  /**
   * Only set the this constants when source table has it.
   */
  targetTable.getTTable().getSd().getSerdeInfo().getParameters()
      .computeIfPresent(HiveConstants.PATH, (k,v) -> targetLocation.toString());
  targetTable.getTTable().unsetCreateTime();
}
 
Example 9
Source Project: incubator-atlas   Source File: HiveMetaStoreBridgeTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testImportContinuesWhenTableRegistrationFails() throws Exception {
    setupDB(hiveClient, TEST_DB_NAME);
    final String table2Name = TEST_TABLE_NAME + "_1";
    List<Table> hiveTables = setupTables(hiveClient, TEST_DB_NAME, TEST_TABLE_NAME, table2Name);

    returnExistingDatabase(TEST_DB_NAME, atlasClient, CLUSTER_NAME);
    when(hiveClient.getTable(TEST_DB_NAME, TEST_TABLE_NAME)).thenThrow(new RuntimeException("Timeout while reading data from hive metastore"));

    when(atlasClient.getEntity(HiveDataTypes.HIVE_TABLE.getName(), AtlasClient.REFERENCEABLE_ATTRIBUTE_NAME, HiveMetaStoreBridge.getTableQualifiedName(CLUSTER_NAME, TEST_DB_NAME,
        table2Name))).thenReturn(
        getEntityReference(HiveDataTypes.HIVE_TABLE.getName(), "82e06b34-9151-4023-aa9d-b82103a50e77"));
    when(atlasClient.getEntity("82e06b34-9151-4023-aa9d-b82103a50e77")).thenReturn(createTableReference());
    String processQualifiedName = HiveMetaStoreBridge.getTableProcessQualifiedName(CLUSTER_NAME, hiveTables.get(1));
    when(atlasClient.getEntity(HiveDataTypes.HIVE_PROCESS.getName(), AtlasClient.REFERENCEABLE_ATTRIBUTE_NAME,
        processQualifiedName)).thenReturn(getEntityReference(HiveDataTypes.HIVE_PROCESS.getName(), "82e06b34-9151-4023-aa9d-b82103a50e77"));

    HiveMetaStoreBridge bridge = new HiveMetaStoreBridge(CLUSTER_NAME, hiveClient, atlasClient);
    try {
        bridge.importHiveMetadata(false);
    } catch (Exception e) {
        Assert.fail("Table registration failed with exception", e);
    }
}
 
Example 10
Source Project: incubator-gobblin   Source File: HiveMaterializerSource.java    License: Apache License 2.0 6 votes vote down vote up
private HiveDataset getHiveDataset(String tableString, FileSystem fs, State state) throws IOException {
  try {
    HiveMetastoreClientPool pool = HiveMetastoreClientPool.get(state.getProperties(),
        Optional.fromNullable(state.getProp(HIVE_METASTORE_URI_KEY)));

    List<String> tokens = Splitter.on(".").splitToList(tableString);
    DbAndTable sourceDbAndTable = new DbAndTable(tokens.get(0), tokens.get(1));

    try (AutoReturnableObject<IMetaStoreClient> client = pool.getClient()) {
      Table sourceTable = new Table(client.get().getTable(sourceDbAndTable.getDb(), sourceDbAndTable.getTable()));
      return new HiveDataset(fs, pool, sourceTable, ConfigUtils.propertiesToConfig(state.getProperties()));
    }
  } catch (TException exc) {
    throw new RuntimeException(exc);
  }
}
 
Example 11
Source Project: streamx   Source File: AvroHiveUtil.java    License: Apache License 2.0 6 votes vote down vote up
private Table constructAvroTable(String database, String tableName, Schema schema, Partitioner partitioner)
    throws HiveMetaStoreException {
  Table table = new Table(database, tableName);
  table.setTableType(TableType.EXTERNAL_TABLE);
  table.getParameters().put("EXTERNAL", "TRUE");
  String tablePath = FileUtils.hiveDirectoryName(url, topicsDir, tableName);
  table.setDataLocation(new Path(tablePath));
  table.setSerializationLib(avroSerde);
  try {
    table.setInputFormatClass(avroInputFormat);
    table.setOutputFormatClass(avroOutputFormat);
  } catch (HiveException e) {
    throw new HiveMetaStoreException("Cannot find input/output format:", e);
  }
  List<FieldSchema> columns = HiveSchemaConverter.convertSchema(schema);
  table.setFields(columns);
  table.setPartCols(partitioner.partitionFields());
  table.getParameters().put(AVRO_SCHEMA_LITERAL, avroData.fromConnectSchema(schema).toString());
  return table;
}
 
Example 12
Source Project: streamx   Source File: ParquetHiveUtil.java    License: Apache License 2.0 6 votes vote down vote up
private Table constructParquetTable(String database, String tableName, Schema schema, Partitioner partitioner) throws HiveMetaStoreException {
  Table table = new Table(database, tableName);
  table.setTableType(TableType.EXTERNAL_TABLE);
  table.getParameters().put("EXTERNAL", "TRUE");
  String tablePath = FileUtils.hiveDirectoryName(url, topicsDir, tableName);
  table.setDataLocation(new Path(tablePath));
  table.setSerializationLib(getHiveParquetSerde());
  try {
    table.setInputFormatClass(getHiveParquetInputFormat());
    table.setOutputFormatClass(getHiveParquetOutputFormat());
  } catch (HiveException e) {
    throw new HiveMetaStoreException("Cannot find input/output format:", e);
  }
  // convert copycat schema schema to Hive columns
  List<FieldSchema> columns = HiveSchemaConverter.convertSchema(schema);
  table.setFields(columns);
  table.setPartCols(partitioner.partitionFields());
  return table;
}
 
Example 13
@Test
public void testDroppedPartitions() throws Exception {
  WorkUnitState previousWus = new WorkUnitState();
  previousWus.setProp(ConfigurationKeys.DATASET_URN_KEY, "[email protected]_dataset_urn");
  previousWus.setProp(PartitionLevelWatermarker.IS_WATERMARK_WORKUNIT_KEY, true);
  previousWus
      .setActualHighWatermark(new MultiKeyValueLongWatermark(ImmutableMap.of("2015-01", 100l, "2015-02", 101l)));

  SourceState state = new SourceState(new State(), Lists.newArrayList(previousWus));
  PartitionLevelWatermarker watermarker = new PartitionLevelWatermarker(state);

  Table table = mockTable("test_dataset_urn");
  Mockito.when(table.getPartitionKeys()).thenReturn(ImmutableList.of(new FieldSchema("year", "string", "")));

  Partition partition2015 = mockPartition(table, ImmutableList.of("2015"));

  // partition 2015 replaces 2015-01 and 2015-02
  Mockito.when(partition2015.getParameters()).thenReturn(
      ImmutableMap.of(AbstractAvroToOrcConverter.REPLACED_PARTITIONS_HIVE_METASTORE_KEY, "2015-01|2015-02"));
  watermarker.onPartitionProcessBegin(partition2015, 0l, 0l);

  Assert.assertEquals(watermarker.getExpectedHighWatermarks().get("[email protected]_dataset_urn"), ImmutableMap.of("2015", 0l));
}
 
Example 14
Source Project: beam   Source File: HCatalogUtils.java    License: Apache License 2.0 6 votes vote down vote up
private static long getFileSizeForPartition(Read readRequest, Partition partitionToRead)
    throws Exception {
  IMetaStoreClient client = null;
  try {
    HiveConf hiveConf = HCatalogUtils.createHiveConf(readRequest);
    client = HCatalogUtils.createMetaStoreClient(hiveConf);
    List<org.apache.hadoop.hive.ql.metadata.Partition> p = new ArrayList<>();
    Table table = HCatUtil.getTable(client, readRequest.getDatabase(), readRequest.getTable());
    final org.apache.hadoop.hive.ql.metadata.Partition partition =
        new org.apache.hadoop.hive.ql.metadata.Partition(table, partitionToRead);
    p.add(partition);
    final List<Long> fileSizeForPartitions = StatsUtils.getFileSizeForPartitions(hiveConf, p);
    return fileSizeForPartitions.get(0);
  } finally {
    // IMetaStoreClient is not AutoCloseable, closing it manually
    if (client != null) {
      client.close();
    }
  }
}
 
Example 15
Source Project: beam   Source File: HCatalogIO.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Returns the size of the table in bytes, does not take into consideration filter/partition
 * details passed, if any.
 */
@Override
public long getEstimatedSizeBytes(PipelineOptions pipelineOptions) throws Exception {
  IMetaStoreClient client = null;
  try {
    HiveConf hiveConf = HCatalogUtils.createHiveConf(spec);
    client = HCatalogUtils.createMetaStoreClient(hiveConf);
    Table table = HCatUtil.getTable(client, spec.getDatabase(), spec.getTable());
    return StatsUtils.getFileSizeForTable(hiveConf, table);
  } finally {
    // IMetaStoreClient is not AutoCloseable, closing it manually
    if (client != null) {
      client.close();
    }
  }
}
 
Example 16
Source Project: incubator-gobblin   Source File: HiveDataset.java    License: Apache License 2.0 6 votes vote down vote up
public HiveDataset(FileSystem fs, HiveMetastoreClientPool clientPool, Table table, Properties properties, Config datasetConfig) {
  this.fs = fs;
  this.clientPool = clientPool;
  this.table = table;
  this.properties = properties;

  this.tableRootPath = PathUtils.isGlob(this.table.getDataLocation()) ? Optional.<Path> absent() :
      Optional.fromNullable(this.table.getDataLocation());

  this.tableIdentifier = this.table.getDbName() + "." + this.table.getTableName();

  this.datasetNamePattern = Optional.fromNullable(ConfigUtils.getString(datasetConfig, DATASET_NAME_PATTERN_KEY, null));
  this.dbAndTable = new DbAndTable(table.getDbName(), table.getTableName());
  if (this.datasetNamePattern.isPresent()) {
    this.logicalDbAndTable = parseLogicalDbAndTable(this.datasetNamePattern.get(), this.dbAndTable, LOGICAL_DB_TOKEN, LOGICAL_TABLE_TOKEN);
  } else {
    this.logicalDbAndTable = this.dbAndTable;
  }
  this.datasetConfig = resolveConfig(datasetConfig, dbAndTable, logicalDbAndTable);
  this.metricContext = Instrumented.getMetricContext(new State(properties), HiveDataset.class,
      Lists.<Tag<?>> newArrayList(new Tag<>(DATABASE, table.getDbName()), new Tag<>(TABLE, table.getTableName())));
}
 
Example 17
Source Project: incubator-atlas   Source File: HiveMetaStoreBridge.java    License: Apache License 2.0 6 votes vote down vote up
private Referenceable registerTable(Referenceable dbReference, Table table) throws AtlasHookException {
    try {
        String dbName = table.getDbName();
        String tableName = table.getTableName();
        LOG.info("Attempting to register table [{}]", tableName);
        Referenceable tableReference = getTableReference(table);
        LOG.info("Found result {}", tableReference);
        if (tableReference == null) {
            tableReference = createTableInstance(dbReference, table);
            tableReference = registerInstance(tableReference);
        } else {
            LOG.info("Table {}.{} is already registered with id {}. Updating entity.", dbName, tableName,
                    tableReference.getId().id);
            tableReference = createOrUpdateTableInstance(dbReference, tableReference, table);
            updateInstance(tableReference);
        }
        return tableReference;
    } catch (Exception e) {
        throw new AtlasHookException("HiveMetaStoreBridge.getStorageDescQFName() failed.", e);
    }
}
 
Example 18
Source Project: incubator-atlas   Source File: HiveHook.java    License: Apache License 2.0 6 votes vote down vote up
private Referenceable replaceTableQFName(HiveEventContext event, Table oldTable, Table newTable, final Referenceable tableEntity, final String oldTableQFName, final String newTableQFName) throws HiveException {
    tableEntity.set(AtlasClient.NAME,  oldTable.getTableName().toLowerCase());
    tableEntity.set(AtlasClient.REFERENCEABLE_ATTRIBUTE_NAME, oldTableQFName);

    //Replace table entity with new name
    final Referenceable newEntity = new Referenceable(HiveDataTypes.HIVE_TABLE.getName());
    newEntity.set(AtlasClient.NAME, newTable.getTableName().toLowerCase());
    newEntity.set(AtlasClient.REFERENCEABLE_ATTRIBUTE_NAME, newTableQFName);

    ArrayList<String> alias_list = new ArrayList<>();
    alias_list.add(oldTable.getTableName().toLowerCase());
    newEntity.set(HiveMetaStoreBridge.TABLE_ALIAS_LIST, alias_list);
    event.addMessage(new HookNotification.EntityPartialUpdateRequest(event.getUser(),
        HiveDataTypes.HIVE_TABLE.getName(), AtlasClient.REFERENCEABLE_ATTRIBUTE_NAME,
        oldTableQFName, newEntity));

    return newEntity;
}
 
Example 19
Source Project: incubator-atlas   Source File: HiveMetaStoreBridgeTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testImportThatUpdatesRegisteredTable() throws Exception {
    setupDB(hiveClient, TEST_DB_NAME);

    List<Table> hiveTables = setupTables(hiveClient, TEST_DB_NAME, TEST_TABLE_NAME);

    returnExistingDatabase(TEST_DB_NAME, atlasClient, CLUSTER_NAME);

    // return existing table
    when(atlasClient.getEntity(HiveDataTypes.HIVE_TABLE.getName(),
        AtlasClient.REFERENCEABLE_ATTRIBUTE_NAME, HiveMetaStoreBridge.getTableQualifiedName(CLUSTER_NAME, TEST_DB_NAME, TEST_TABLE_NAME)))
        .thenReturn(getEntityReference(HiveDataTypes.HIVE_TABLE.getName(), "82e06b34-9151-4023-aa9d-b82103a50e77"));
    when(atlasClient.getEntity("82e06b34-9151-4023-aa9d-b82103a50e77")).thenReturn(createTableReference());
    String processQualifiedName = HiveMetaStoreBridge.getTableProcessQualifiedName(CLUSTER_NAME, hiveTables.get(0));
    when(atlasClient.getEntity(HiveDataTypes.HIVE_PROCESS.getName(),
        AtlasClient.REFERENCEABLE_ATTRIBUTE_NAME, processQualifiedName)).thenReturn(getEntityReference(HiveDataTypes.HIVE_PROCESS.getName(), "82e06b34-9151-4023-aa9d-b82103a50e77"));

    HiveMetaStoreBridge bridge = new HiveMetaStoreBridge(CLUSTER_NAME, hiveClient, atlasClient);
    bridge.importHiveMetadata(true);

    // verify update is called on table
    verify(atlasClient).updateEntity(eq("82e06b34-9151-4023-aa9d-b82103a50e77"),
            (Referenceable) argThat(new MatchesReferenceableProperty(HiveMetaStoreBridge.TABLE_TYPE_ATTR,
                    TableType.EXTERNAL_TABLE.name())));
}
 
Example 20
Source Project: incubator-atlas   Source File: HiveMetaStoreBridgeTest.java    License: Apache License 2.0 5 votes vote down vote up
private List<Table> setupTables(Hive hiveClient, String databaseName, String... tableNames) throws HiveException {
    List<Table> tables = new ArrayList<>();
    when(hiveClient.getAllTables(databaseName)).thenReturn(Arrays.asList(tableNames));
    for(String tableName : tableNames) {
        Table testTable = createTestTable(databaseName, tableName);
        when(hiveClient.getTable(databaseName, tableName)).thenReturn(testTable);
        tables.add(testTable);
    }
    return tables;
}
 
Example 21
/**
 * Tell whether a hive table is actually an Avro table
 * @param table a hive {@link Table}
 * @return true if it is a hive table
 */
public static boolean isHiveTableAvroType(Table table) {
  String serializationLib = table.getTTable().getSd().getSerdeInfo().getSerializationLib();
  String inputFormat = table.getTTable().getSd().getInputFormat();
  String outputFormat = table.getTTable().getSd().getOutputFormat();

  return inputFormat.endsWith(AvroContainerInputFormat.class.getSimpleName())
      || outputFormat.endsWith(AvroContainerOutputFormat.class.getSimpleName())
      || serializationLib.endsWith(AvroSerDe.class.getSimpleName());
}
 
Example 22
Source Project: atlas   Source File: AtlasHiveHookContext.java    License: Apache License 2.0 5 votes vote down vote up
public String getQualifiedName(Table table) {
    String tableName = table.getTableName();

    if (table.isTemporary()) {
        if (SessionState.get() != null && SessionState.get().getSessionId() != null) {
            tableName = tableName + TEMP_TABLE_PREFIX + SessionState.get().getSessionId();
        } else {
            tableName = tableName + TEMP_TABLE_PREFIX + RandomStringUtils.random(10);
        }
    }

    return (table.getDbName() + QNAME_SEP_ENTITY_NAME + tableName + QNAME_SEP_METADATA_NAMESPACE).toLowerCase() + getMetadataNamespace();
}
 
Example 23
Source Project: incubator-gobblin   Source File: HiveCopyEntityHelper.java    License: Apache License 2.0 5 votes vote down vote up
@VisibleForTesting
protected int addTableDeregisterSteps(List<CopyEntity> copyEntities, String fileSet, int initialPriority, Table table)
    throws IOException {

  int stepPriority = initialPriority;
  Collection<Path> tablePaths = Lists.newArrayList();

  switch (this.getDeleteMethod()) {
    case RECURSIVE:
      tablePaths = Lists.newArrayList(table.getDataLocation());
      break;
    case INPUT_FORMAT:
      InputFormat<?, ?> inputFormat = HiveUtils.getInputFormat(table.getSd());

      HiveLocationDescriptor targetLocation = new HiveLocationDescriptor(table.getDataLocation(), inputFormat,
          this.getTargetFs(), this.getDataset().getProperties());

      tablePaths = targetLocation.getPaths().keySet();
      break;
    case NO_DELETE:
      tablePaths = Lists.newArrayList();
      break;
    default:
      tablePaths = Lists.newArrayList();
  }

  if (!tablePaths.isEmpty()) {
    DeleteFileCommitStep deletePaths = DeleteFileCommitStep.fromPaths(this.getTargetFs(), tablePaths,
        this.getDataset().getProperties(), table.getDataLocation());
    copyEntities.add(new PostPublishStep(fileSet, Maps.<String, String> newHashMap(), deletePaths, stepPriority++));
  }

  TableDeregisterStep deregister =
      new TableDeregisterStep(table.getTTable(), this.getTargetURI(), this.getHiveRegProps());
  copyEntities.add(new PostPublishStep(fileSet, Maps.<String, String> newHashMap(), deregister, stepPriority++));
  return stepPriority;
}
 
Example 24
public TableLikeStageableTableMetadata(Table referenceTable, Config config) {
  super(HiveDataset.resolveTemplate(config.getString(StageableTableMetadata.DESTINATION_TABLE_KEY), referenceTable),
      HiveDataset.resolveTemplate(config.getString(StageableTableMetadata.DESTINATION_TABLE_KEY), referenceTable) + "_STAGING",
      HiveDataset.resolveTemplate(config.getString(StageableTableMetadata.DESTINATION_DB_KEY), referenceTable),
      HiveDataset.resolveTemplate(config.getString(DESTINATION_DATA_PATH_KEY), referenceTable),
      (!config.hasPath(StageableTableMetadata.DESTINATION_DATA_PATH_ADD_SUBDIR) ||
          Boolean.parseBoolean(HiveDataset.resolveTemplate(config.getString(StageableTableMetadata.DESTINATION_DATA_PATH_ADD_SUBDIR), referenceTable))),
      getTableProperties(referenceTable), new ArrayList<>(), Optional.of(referenceTable.getNumBuckets()),
      new Properties(), false, false, Optional.absent(), new ArrayList<>());
}
 
Example 25
Source Project: atlas   Source File: BaseHiveEvent.java    License: Apache License 2.0 5 votes vote down vote up
protected boolean isHBaseStore(Table table) {
    boolean             ret        = false;
    Map<String, String> parameters = table.getParameters();

    if (MapUtils.isNotEmpty(parameters)) {
        String storageHandler = parameters.get(ATTRIBUTE_STORAGE_HANDLER);

        ret = (storageHandler != null && storageHandler.equals(HBASE_STORAGE_HANDLER_CLASS));
    }

    return ret;
}
 
Example 26
Source Project: incubator-atlas   Source File: HiveMetaStoreBridgeTest.java    License: Apache License 2.0 5 votes vote down vote up
private Table createTestTable(String databaseName, String tableName) throws HiveException {
    Table table = new Table(databaseName, tableName);
    table.setInputFormatClass(TextInputFormat.class);
    table.setFields(new ArrayList<FieldSchema>() {{
        add(new FieldSchema("col1", "string", "comment1"));
    }
    });
    table.setTableType(TableType.EXTERNAL_TABLE);
    table.setDataLocation(new Path("somehdfspath"));
    return table;
}
 
Example 27
Source Project: atlas   Source File: CreateTable.java    License: Apache License 2.0 5 votes vote down vote up
public AtlasEntitiesWithExtInfo getHiveEntities() throws Exception {
    AtlasEntitiesWithExtInfo ret   = new AtlasEntitiesWithExtInfo();
    Table                    table = null;

    if (CollectionUtils.isNotEmpty(getOutputs())) {
        for (Entity entity : getOutputs()) {
            if (entity.getType() == Entity.Type.TABLE) {
                table = entity.getTable();

                if (table != null) {
                    table = getHive().getTable(table.getDbName(), table.getTableName());

                    if (table != null) {
                        if (skipTemporaryTable(table)) {
                            table = null;
                        } else {
                            break;
                        }
                    }
                }
            }
        }
    }

    processTable(table, ret);

    addProcessedEntities(ret);

    return ret;
}
 
Example 28
Source Project: atlas   Source File: AlterTableRenameCol.java    License: Apache License 2.0 5 votes vote down vote up
public List<HookNotification> getHiveMetastoreMessages() throws Exception {
    List<HookNotification> baseMsgs = super.getNotificationMessages();
    List<HookNotification> ret      = new ArrayList<>(baseMsgs);
    AlterTableEvent        tblEvent = (AlterTableEvent) context.getMetastoreEvent();
    Table                  oldTable = toTable(tblEvent.getOldTable());
    Table                  newTable = toTable(tblEvent.getNewTable());

    processColumns(oldTable, newTable, ret);

    return ret;
}
 
Example 29
Source Project: tajo   Source File: HiveCatalogUtil.java    License: Apache License 2.0 5 votes vote down vote up
public static void validateSchema(Table tblSchema) {
  for (FieldSchema fieldSchema : tblSchema.getCols()) {
    String fieldType = fieldSchema.getType();
    if (fieldType.equalsIgnoreCase("ARRAY") || fieldType.equalsIgnoreCase("STRUCT")
      || fieldType.equalsIgnoreCase("MAP")) {
      throw new TajoRuntimeException(new UnsupportedException("data type '" + fieldType.toUpperCase() + "'"));
    }
  }
}
 
Example 30
Source Project: atlas   Source File: AlterTableRenameCol.java    License: Apache License 2.0 5 votes vote down vote up
public static FieldSchema findRenamedColumn(Table inputTable, Table outputTable) {
    FieldSchema       ret           = null;
    List<FieldSchema> inputColumns  = inputTable.getCols();
    List<FieldSchema> outputColumns = outputTable.getCols();

    for (FieldSchema inputColumn : inputColumns) {
        if (!outputColumns.contains(inputColumn)) {
            ret = inputColumn;

            break;
        }
    }

    return ret;
}