Java Code Examples for org.apache.flink.shaded.guava18.com.google.common.collect.Lists#newArrayList()

The following examples show how to use org.apache.flink.shaded.guava18.com.google.common.collect.Lists#newArrayList() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: HiveTableSourceITCase.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Test to read from partition table.
 * @throws Exception
 */
@Test
public void testReadPartitionTable() throws Exception {
	final String dbName = "source_db";
	final String tblName = "test_table_pt";
	TableEnvironment tEnv = createTableEnv();
	tEnv.executeSql("CREATE TABLE source_db.test_table_pt " +
					"(`year` STRING, `value` INT) partitioned by (pt int)");
	HiveTestUtils.createTextTableInserter(hiveShell, dbName, tblName)
			.addRow(new Object[]{"2014", 3})
			.addRow(new Object[]{"2014", 4})
			.commit("pt=0");
	HiveTestUtils.createTextTableInserter(hiveShell, dbName, tblName)
			.addRow(new Object[]{"2015", 2})
			.addRow(new Object[]{"2015", 5})
			.commit("pt=1");
	Table src = tEnv.sqlQuery("select * from hive.source_db.test_table_pt");
	List<Row> rows = Lists.newArrayList(src.execute().collect());

	assertEquals(4, rows.size());
	Object[] rowStrings = rows.stream().map(Row::toString).sorted().toArray();
	assertArrayEquals(new String[]{"2014,3,0", "2014,4,0", "2015,2,1", "2015,5,1"}, rowStrings);
}
 
Example 2
Source File: TableEnvHiveConnectorITCase.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void testNonExistingPartitionFolder() throws Exception {
	TableEnvironment tableEnv = getTableEnvWithHiveCatalog();
	tableEnv.executeSql("create database db1");
	try {
		tableEnv.executeSql("create table db1.part (x int) partitioned by (p int)");
		HiveTestUtils.createTextTableInserter(hiveShell, "db1", "part").addRow(new Object[]{1}).commit("p=1");
		HiveTestUtils.createTextTableInserter(hiveShell, "db1", "part").addRow(new Object[]{2}).commit("p=2");
		tableEnv.executeSql("alter table db1.part add partition (p=3)");
		// remove one partition
		Path toRemove = new Path(hiveCatalog.getHiveTable(new ObjectPath("db1", "part")).getSd().getLocation(), "p=2");
		FileSystem fs = toRemove.getFileSystem(hiveShell.getHiveConf());
		fs.delete(toRemove, true);

		List<Row> results = Lists.newArrayList(tableEnv.sqlQuery("select * from db1.part").execute().collect());
		assertEquals("[1,1]", results.toString());
	} finally {
		tableEnv.executeSql("drop database db1 cascade");
	}
}
 
Example 3
Source File: TableEnvHiveConnectorITCase.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void testDateTimestampPartitionColumns() throws Exception {
	TableEnvironment tableEnv = getTableEnvWithHiveCatalog();
	tableEnv.executeSql("create database db1");
	try {
		tableEnv.executeSql("create table db1.part(x int) partitioned by (dt date,ts timestamp)");
		HiveTestUtils.createTextTableInserter(hiveShell, "db1", "part")
				.addRow(new Object[]{1})
				.addRow(new Object[]{2})
				.commit("dt='2019-12-23',ts='2019-12-23 00:00:00'");
		HiveTestUtils.createTextTableInserter(hiveShell, "db1", "part")
				.addRow(new Object[]{3})
				.commit("dt='2019-12-25',ts='2019-12-25 16:23:43.012'");
		List<Row> results = Lists.newArrayList(tableEnv.sqlQuery("select * from db1.part order by x").execute().collect());
		assertEquals("[1,2019-12-23,2019-12-23T00:00, 2,2019-12-23,2019-12-23T00:00, 3,2019-12-25,2019-12-25T16:23:43.012]", results.toString());

		results = Lists.newArrayList(tableEnv.sqlQuery("select x from db1.part where dt=cast('2019-12-25' as date)").execute().collect());
		assertEquals("[3]", results.toString());

		TableEnvUtil.execInsertSqlAndWaitResult(tableEnv, "insert into db1.part select 4,cast('2019-12-31' as date),cast('2019-12-31 12:00:00.0' as timestamp)");
		results = Lists.newArrayList(tableEnv.sqlQuery("select max(dt) from db1.part").execute().collect());
		assertEquals("[2019-12-31]", results.toString());
	} finally {
		tableEnv.executeSql("drop database db1 cascade");
	}
}
 
Example 4
Source File: HiveCatalogUseBlinkITCase.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void testTimestampUDF() throws Exception {

	TableEnvironment tableEnv = HiveTestUtils.createTableEnvWithBlinkPlannerBatchMode(SqlDialect.HIVE);
	tableEnv.registerCatalog(hiveCatalog.getName(), hiveCatalog);
	tableEnv.useCatalog(hiveCatalog.getName());
	tableEnv.executeSql(String.format("create function myyear as '%s'", UDFYear.class.getName()));
	tableEnv.executeSql("create table src(ts timestamp)");
	try {
		HiveTestUtils.createTextTableInserter(hiveShell, "default", "src")
				.addRow(new Object[]{Timestamp.valueOf("2013-07-15 10:00:00")})
				.addRow(new Object[]{Timestamp.valueOf("2019-05-23 17:32:55")})
				.commit();

		List<Row> results = Lists.newArrayList(
				tableEnv.sqlQuery("select myyear(ts) as y from src").execute().collect());
		Assert.assertEquals(2, results.size());
		Assert.assertEquals("[2013, 2019]", results.toString());
	} finally {
		tableEnv.executeSql("drop table src");
	}
}
 
Example 5
Source File: YARNSessionFIFOSecuredITCase.java    From flink with Apache License 2.0 5 votes vote down vote up
private static void verifyResultContainsKerberosKeytab() throws Exception {
	final String[] mustHave = {"Login successful for user", "using keytab file"};
	final boolean jobManagerRunsWithKerberos = verifyStringsInNamedLogFiles(
		mustHave,
		"jobmanager.log");
	final boolean taskManagerRunsWithKerberos = verifyStringsInNamedLogFiles(
		mustHave, "taskmanager.log");

	Assert.assertThat(
		"The JobManager and the TaskManager should both run with Kerberos.",
		jobManagerRunsWithKerberos && taskManagerRunsWithKerberos,
		Matchers.is(true));

	final List<String> amRMTokens = Lists.newArrayList(AMRMTokenIdentifier.KIND_NAME.toString());
	final String jobmanagerContainerId = getContainerIdByLogName("jobmanager.log");
	final String taskmanagerContainerId = getContainerIdByLogName("taskmanager.log");
	final boolean jobmanagerWithAmRmToken = verifyTokenKindInContainerCredentials(amRMTokens, jobmanagerContainerId);
	final boolean taskmanagerWithAmRmToken = verifyTokenKindInContainerCredentials(amRMTokens, taskmanagerContainerId);

	Assert.assertThat(
		"The JobManager should have AMRMToken.",
		jobmanagerWithAmRmToken,
		Matchers.is(true));
	Assert.assertThat(
		"The TaskManager should not have AMRMToken.",
		taskmanagerWithAmRmToken,
		Matchers.is(false));
}
 
Example 6
Source File: YARNSessionFIFOSecuredITCase.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Test(timeout = 60000) // timeout after a minute.
@Override
public void testDetachedMode() throws InterruptedException, IOException {
	super.testDetachedMode();
	final String[] mustHave = {"Login successful for user", "using keytab file"};
	final boolean jobManagerRunsWithKerberos = verifyStringsInNamedLogFiles(
		mustHave,
		"jobmanager.log");
	final boolean taskManagerRunsWithKerberos = verifyStringsInNamedLogFiles(
		mustHave, "taskmanager.log");

	Assert.assertThat(
		"The JobManager and the TaskManager should both run with Kerberos.",
		jobManagerRunsWithKerberos && taskManagerRunsWithKerberos,
		Matchers.is(true));

	final List<String> amRMTokens = Lists.newArrayList(AMRMTokenIdentifier.KIND_NAME.toString());
	final String jobmanagerContainerId = getContainerIdByLogName("jobmanager.log");
	final String taskmanagerContainerId = getContainerIdByLogName("taskmanager.log");
	final boolean jobmanagerWithAmRmToken = verifyTokenKindInContainerCredentials(amRMTokens, jobmanagerContainerId);
	final boolean taskmanagerWithAmRmToken = verifyTokenKindInContainerCredentials(amRMTokens, taskmanagerContainerId);

	Assert.assertThat(
		"The JobManager should have AMRMToken.",
		jobmanagerWithAmRmToken,
		Matchers.is(true));
	Assert.assertThat(
		"The TaskManager should not have AMRMToken.",
		taskmanagerWithAmRmToken,
		Matchers.is(false));
}
 
Example 7
Source File: SinkTransformation.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
public Collection<StreamTransformation<?>> getTransitivePredecessors() {
	List<StreamTransformation<?>> result = Lists.newArrayList();
	result.add(this);
	result.addAll(input.getTransitivePredecessors());
	return result;
}
 
Example 8
Source File: UnionTransformation.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public Collection<Transformation<?>> getTransitivePredecessors() {
	List<Transformation<?>> result = Lists.newArrayList();
	result.add(this);
	for (Transformation<T> input: inputs) {
		result.addAll(input.getTransitivePredecessors());
	}
	return result;
}
 
Example 9
Source File: UnionTransformation.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Creates a new {@code UnionTransformation} from the given input {@code Transformations}.
 *
 * <p>The input {@code Transformations} must all have the same type.
 *
 * @param inputs The list of input {@code Transformations}
 */
public UnionTransformation(List<Transformation<T>> inputs) {
	super("Union", inputs.get(0).getOutputType(), inputs.get(0).getParallelism());

	for (Transformation<T> input: inputs) {
		if (!input.getOutputType().equals(getOutputType())) {
			throw new UnsupportedOperationException("Type mismatch in input " + input);
		}
	}

	this.inputs = Lists.newArrayList(inputs);
}
 
Example 10
Source File: UnionTransformation.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public Collection<Transformation<?>> getTransitivePredecessors() {
	List<Transformation<?>> result = Lists.newArrayList();
	result.add(this);
	for (Transformation<T> input: inputs) {
		result.addAll(input.getTransitivePredecessors());
	}
	return result;
}
 
Example 11
Source File: SplitTransformation.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
public Collection<StreamTransformation<?>> getTransitivePredecessors() {
	List<StreamTransformation<?>> result = Lists.newArrayList();
	result.add(this);
	result.addAll(input.getTransitivePredecessors());
	return result;
}
 
Example 12
Source File: HiveCatalogITCase.java    From flink with Apache License 2.0 5 votes vote down vote up
private void testReadWriteCsvWithProctime(boolean isStreaming) {
	TableEnvironment tableEnv = prepareTable(isStreaming);
	ArrayList<Row> rows = Lists.newArrayList(
			tableEnv.executeSql("SELECT * FROM proctime_src").collect());
	Assert.assertEquals(5, rows.size());
	tableEnv.executeSql("DROP TABLE proctime_src");
}
 
Example 13
Source File: XmlOutput.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Create a mutable escaper from an existing escaper, which may
 * already be immutable.
 */
public StringEscaper getMutableClone() {
	StringEscaper clone = clone();
	if (clone.translationVector == null) {
		clone.translationVector = Lists.newArrayList(clone.translationTable);
		clone.translationTable = null;
	}
	return clone;
}
 
Example 14
Source File: OneInputTransformation.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
public Collection<StreamTransformation<?>> getTransitivePredecessors() {
	List<StreamTransformation<?>> result = Lists.newArrayList();
	result.add(this);
	result.addAll(input.getTransitivePredecessors());
	return result;
}
 
Example 15
Source File: HiveTableSourceITCase.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testLimitPushDown() throws Exception {
	TableEnvironment tableEnv = createTableEnv();
	tableEnv.executeSql("create table src (a string)");
	try {
		HiveTestUtils.createTextTableInserter(hiveShell, "default", "src")
					.addRow(new Object[]{"a"})
					.addRow(new Object[]{"b"})
					.addRow(new Object[]{"c"})
					.addRow(new Object[]{"d"})
					.commit();
		//Add this to obtain correct stats of table to avoid FLINK-14965 problem
		hiveShell.execute("analyze table src COMPUTE STATISTICS");
		Table table = tableEnv.sqlQuery("select * from hive.`default`.src limit 1");
		String[] explain = table.explain().split("==.*==\n");
		assertEquals(4, explain.length);
		String logicalPlan = explain[2];
		String physicalPlan = explain[3];
		String expectedExplain = "HiveTableSource(a) TablePath: default.src, PartitionPruned: false, " +
								"PartitionNums: null, LimitPushDown true, Limit 1";
		assertTrue(logicalPlan.contains(expectedExplain));
		assertTrue(physicalPlan.contains(expectedExplain));

		List<Row> rows = Lists.newArrayList(table.execute().collect());
		assertEquals(1, rows.size());
		Object[] rowStrings = rows.stream().map(Row::toString).sorted().toArray();
		assertArrayEquals(new String[]{"a"}, rowStrings);
	} finally {
		tableEnv.executeSql("drop table src");
	}
}
 
Example 16
Source File: OneInputTransformation.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public Collection<Transformation<?>> getTransitivePredecessors() {
	List<Transformation<?>> result = Lists.newArrayList();
	result.add(this);
	result.addAll(input.getTransitivePredecessors());
	return result;
}
 
Example 17
Source File: SplitStream.java    From flink with Apache License 2.0 5 votes vote down vote up
private DataStream<OUT> selectOutput(String[] outputNames) {
	for (String outName : outputNames) {
		if (outName == null) {
			throw new RuntimeException("Selected names must not be null");
		}
	}

	SelectTransformation<OUT> selectTransform = new SelectTransformation<OUT>(this.getTransformation(), Lists.newArrayList(outputNames));
	return new DataStream<OUT>(this.getExecutionEnvironment(), selectTransform);
}
 
Example 18
Source File: FlinkKafkaProducer.java    From flink with Apache License 2.0 4 votes vote down vote up
@Override
public void initializeState(FunctionInitializationContext context) throws Exception {
	if (semantic != FlinkKafkaProducer.Semantic.NONE && !((StreamingRuntimeContext) this.getRuntimeContext()).isCheckpointingEnabled()) {
		LOG.warn("Using {} semantic, but checkpointing is not enabled. Switching to {} semantic.", semantic, FlinkKafkaProducer.Semantic.NONE);
		semantic = FlinkKafkaProducer.Semantic.NONE;
	}

	nextTransactionalIdHintState = context.getOperatorStateStore().getUnionListState(
		NEXT_TRANSACTIONAL_ID_HINT_DESCRIPTOR_V2);

	if (context.getOperatorStateStore().getRegisteredStateNames().contains(NEXT_TRANSACTIONAL_ID_HINT_DESCRIPTOR)) {
		migrateNextTransactionalIdHindState(context);
	}

	transactionalIdsGenerator = new TransactionalIdsGenerator(
		getRuntimeContext().getTaskName() + "-" + ((StreamingRuntimeContext) getRuntimeContext()).getOperatorUniqueID(),
		getRuntimeContext().getIndexOfThisSubtask(),
		getRuntimeContext().getNumberOfParallelSubtasks(),
		kafkaProducersPoolSize,
		SAFE_SCALE_DOWN_FACTOR);

	if (semantic != FlinkKafkaProducer.Semantic.EXACTLY_ONCE) {
		nextTransactionalIdHint = null;
	} else {
		ArrayList<FlinkKafkaProducer.NextTransactionalIdHint> transactionalIdHints = Lists.newArrayList(nextTransactionalIdHintState.get());
		if (transactionalIdHints.size() > 1) {
			throw new IllegalStateException(
				"There should be at most one next transactional id hint written by the first subtask");
		} else if (transactionalIdHints.size() == 0) {
			nextTransactionalIdHint = new FlinkKafkaProducer.NextTransactionalIdHint(0, 0);

			// this means that this is either:
			// (1) the first execution of this application
			// (2) previous execution has failed before first checkpoint completed
			//
			// in case of (2) we have to abort all previous transactions
			abortTransactions(transactionalIdsGenerator.generateIdsToAbort());
		} else {
			nextTransactionalIdHint = transactionalIdHints.get(0);
		}
	}

	super.initializeState(context);
}
 
Example 19
Source File: PostgresCatalogITCase.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testWithSchema() {
	List<Row> results = Lists.newArrayList(
		tEnv.sqlQuery(String.format("select * from `%s`", PostgresTablePath.fromFlinkTableName(TABLE1))).execute().collect());
	assertEquals("[1]", results.toString());
}
 
Example 20
Source File: FeedbackTransformation.java    From flink with Apache License 2.0 3 votes vote down vote up
/**
 * Creates a new {@code FeedbackTransformation} from the given input.
 *
 * @param input The input {@code Transformation}
 * @param waitTime The wait time of the feedback operator. After the time expires
 *                          the operation will close and not receive any more feedback elements.
 */
public FeedbackTransformation(Transformation<T> input, Long waitTime) {
	super("Feedback", input.getOutputType(), input.getParallelism());
	this.input = input;
	this.waitTime = waitTime;
	this.feedbackEdges = Lists.newArrayList();
}