Java Code Examples for org.apache.flink.api.java.hadoop.mapreduce.utils.HadoopUtils

The following examples show how to use org.apache.flink.api.java.hadoop.mapreduce.utils.HadoopUtils. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: flink   Source File: HiveTableOutputFormat.java    License: Apache License 2.0 6 votes vote down vote up
public HiveTableOutputFormat(JobConf jobConf, ObjectPath tablePath, CatalogTable table, HiveTablePartition hiveTablePartition,
							Properties tableProperties, boolean overwrite) {
	super(jobConf.getCredentials());

	Preconditions.checkNotNull(table, "table cannot be null");
	Preconditions.checkNotNull(hiveTablePartition, "HiveTablePartition cannot be null");
	Preconditions.checkNotNull(tableProperties, "Table properties cannot be null");

	HadoopUtils.mergeHadoopConf(jobConf);
	this.jobConf = jobConf;
	this.tablePath = tablePath;
	this.partitionColumns = table.getPartitionKeys();
	TableSchema tableSchema = table.getSchema();
	this.fieldNames = tableSchema.getFieldNames();
	this.fieldTypes = tableSchema.getFieldDataTypes();
	this.hiveTablePartition = hiveTablePartition;
	this.tableProperties = tableProperties;
	this.overwrite = overwrite;
	isPartitioned = partitionColumns != null && !partitionColumns.isEmpty();
	isDynamicPartition = isPartitioned && partitionColumns.size() > hiveTablePartition.getPartitionSpec().size();
	hiveVersion = Preconditions.checkNotNull(jobConf.get(HiveCatalogValidator.CATALOG_HIVE_VERSION),
			"Hive version is not defined");
}
 
Example 2
Source Project: Flink-CEPplus   Source File: HadoopInputFormatBase.java    License: Apache License 2.0 5 votes vote down vote up
public HadoopInputFormatBase(org.apache.hadoop.mapreduce.InputFormat<K, V> mapreduceInputFormat, Class<K> key, Class<V> value, Job job) {
	super(Preconditions.checkNotNull(job, "Job can not be null").getCredentials());
	this.mapreduceInputFormat = Preconditions.checkNotNull(mapreduceInputFormat);
	this.keyClass = Preconditions.checkNotNull(key);
	this.valueClass = Preconditions.checkNotNull(value);
	this.configuration = job.getConfiguration();
	HadoopUtils.mergeHadoopConf(configuration);
}
 
Example 3
Source Project: Flink-CEPplus   Source File: HCatInputFormatBase.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Creates a HCatInputFormat for the given database, table, and
 * {@link org.apache.hadoop.conf.Configuration}.
 * By default, the InputFormat returns {@link org.apache.hive.hcatalog.data.HCatRecord}.
 * The return type of the InputFormat can be changed to Flink-native tuples by calling
 * {@link HCatInputFormatBase#asFlinkTuples()}.
 *
 * @param database The name of the database to read from.
 * @param table The name of the table to read.
 * @param config The Configuration for the InputFormat.
 * @throws java.io.IOException
 */
public HCatInputFormatBase(String database, String table, Configuration config) throws IOException {
	super();
	this.configuration = config;
	HadoopUtils.mergeHadoopConf(this.configuration);

	this.hCatInputFormat = org.apache.hive.hcatalog.mapreduce.HCatInputFormat.setInput(this.configuration, database, table);
	this.outputSchema = org.apache.hive.hcatalog.mapreduce.HCatInputFormat.getTableSchema(this.configuration);

	// configure output schema of HCatFormat
	configuration.set("mapreduce.lib.hcat.output.schema", HCatUtil.serialize(outputSchema));
	// set type information
	this.resultType = new WritableTypeInfo(DefaultHCatRecord.class);
}
 
Example 4
Source Project: flink   Source File: HadoopInputFormatBase.java    License: Apache License 2.0 5 votes vote down vote up
public HadoopInputFormatBase(org.apache.hadoop.mapreduce.InputFormat<K, V> mapreduceInputFormat, Class<K> key, Class<V> value, Job job) {
	super(Preconditions.checkNotNull(job, "Job can not be null").getCredentials());
	this.mapreduceInputFormat = Preconditions.checkNotNull(mapreduceInputFormat);
	this.keyClass = Preconditions.checkNotNull(key);
	this.valueClass = Preconditions.checkNotNull(value);
	this.configuration = job.getConfiguration();
	HadoopUtils.mergeHadoopConf(configuration);
}
 
Example 5
Source Project: flink   Source File: HCatInputFormatBase.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Creates a HCatInputFormat for the given database, table, and
 * {@link org.apache.hadoop.conf.Configuration}.
 * By default, the InputFormat returns {@link org.apache.hive.hcatalog.data.HCatRecord}.
 * The return type of the InputFormat can be changed to Flink-native tuples by calling
 * {@link HCatInputFormatBase#asFlinkTuples()}.
 *
 * @param database The name of the database to read from.
 * @param table The name of the table to read.
 * @param config The Configuration for the InputFormat.
 * @throws java.io.IOException
 */
public HCatInputFormatBase(String database, String table, Configuration config) throws IOException {
	super();
	this.configuration = config;
	HadoopUtils.mergeHadoopConf(this.configuration);

	this.hCatInputFormat = org.apache.hive.hcatalog.mapreduce.HCatInputFormat.setInput(this.configuration, database, table);
	this.outputSchema = org.apache.hive.hcatalog.mapreduce.HCatInputFormat.getTableSchema(this.configuration);

	// configure output schema of HCatFormat
	configuration.set("mapreduce.lib.hcat.output.schema", HCatUtil.serialize(outputSchema));
	// set type information
	this.resultType = new WritableTypeInfo(DefaultHCatRecord.class);
}
 
Example 6
Source Project: flink   Source File: HadoopInputFormatBase.java    License: Apache License 2.0 5 votes vote down vote up
public HadoopInputFormatBase(org.apache.hadoop.mapreduce.InputFormat<K, V> mapreduceInputFormat, Class<K> key, Class<V> value, Job job) {
	super(Preconditions.checkNotNull(job, "Job can not be null").getCredentials());
	this.mapreduceInputFormat = Preconditions.checkNotNull(mapreduceInputFormat);
	this.keyClass = Preconditions.checkNotNull(key);
	this.valueClass = Preconditions.checkNotNull(value);
	this.configuration = job.getConfiguration();
	HadoopUtils.mergeHadoopConf(configuration);
}
 
Example 7
Source Project: flink   Source File: HCatInputFormatBase.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Creates a HCatInputFormat for the given database, table, and
 * {@link org.apache.hadoop.conf.Configuration}.
 * By default, the InputFormat returns {@link org.apache.hive.hcatalog.data.HCatRecord}.
 * The return type of the InputFormat can be changed to Flink-native tuples by calling
 * {@link HCatInputFormatBase#asFlinkTuples()}.
 *
 * @param database The name of the database to read from.
 * @param table The name of the table to read.
 * @param config The Configuration for the InputFormat.
 * @throws java.io.IOException
 */
public HCatInputFormatBase(String database, String table, Configuration config) throws IOException {
	super();
	this.configuration = config;
	HadoopUtils.mergeHadoopConf(this.configuration);

	this.hCatInputFormat = org.apache.hive.hcatalog.mapreduce.HCatInputFormat.setInput(this.configuration, database, table);
	this.outputSchema = org.apache.hive.hcatalog.mapreduce.HCatInputFormat.getTableSchema(this.configuration);

	// configure output schema of HCatFormat
	configuration.set("mapreduce.lib.hcat.output.schema", HCatUtil.serialize(outputSchema));
	// set type information
	this.resultType = new WritableTypeInfo(DefaultHCatRecord.class);
}
 
Example 8
Source Project: Flink-CEPplus   Source File: HadoopOutputFormatBase.java    License: Apache License 2.0 4 votes vote down vote up
public HadoopOutputFormatBase(org.apache.hadoop.mapreduce.OutputFormat<K, V> mapreduceOutputFormat, Job job) {
	super(job.getCredentials());
	this.mapreduceOutputFormat = mapreduceOutputFormat;
	this.configuration = job.getConfiguration();
	HadoopUtils.mergeHadoopConf(configuration);
}
 
Example 9
Source Project: flink   Source File: HadoopOutputFormatBase.java    License: Apache License 2.0 4 votes vote down vote up
public HadoopOutputFormatBase(org.apache.hadoop.mapreduce.OutputFormat<K, V> mapreduceOutputFormat, Job job) {
	super(job.getCredentials());
	this.mapreduceOutputFormat = mapreduceOutputFormat;
	this.configuration = job.getConfiguration();
	HadoopUtils.mergeHadoopConf(configuration);
}
 
Example 10
Source Project: flink   Source File: HadoopOutputFormatBase.java    License: Apache License 2.0 4 votes vote down vote up
public HadoopOutputFormatBase(org.apache.hadoop.mapreduce.OutputFormat<K, V> mapreduceOutputFormat, Job job) {
	super(job.getCredentials());
	this.mapreduceOutputFormat = mapreduceOutputFormat;
	this.configuration = job.getConfiguration();
	HadoopUtils.mergeHadoopConf(configuration);
}