org.apache.flink.shaded.guava18.com.google.common.collect.ImmutableList Java Examples

The following examples show how to use org.apache.flink.shaded.guava18.com.google.common.collect.ImmutableList. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ResultPartitionManager.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
public void releasePartitionsProducedBy(ExecutionAttemptID executionId, Throwable cause) {
	synchronized (registeredPartitions) {
		final Map<IntermediateResultPartitionID, ResultPartition> partitions =
				registeredPartitions.row(executionId);

		for (ResultPartition partition : partitions.values()) {
			partition.release(cause);
		}

		for (IntermediateResultPartitionID partitionId : ImmutableList
				.copyOf(partitions.keySet())) {

			registeredPartitions.remove(executionId, partitionId);
		}

		LOG.debug("Released all partitions produced by {}.", executionId);
	}
}
 
Example #2
Source File: JarManifestParserTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Test
public void testFindOnlyEntryClassSingleJar() throws IOException {
	File jarFile = TestJob.getTestJobJar();

	JarFileWithEntryClass jarFileWithEntryClass = JarManifestParser.findOnlyEntryClass(ImmutableList.of(jarFile));

	assertThat(jarFileWithEntryClass.getEntryClass(), is(equalTo(TestJob.class.getCanonicalName())));
}
 
Example #3
Source File: RelDescriptionWriterImpl.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public RelWriter done(RelNode node) {
	final List<Pair<String, Object>> valuesCopy = ImmutableList.copyOf(values);
	values.clear();
	explain(node, valuesCopy);
	pw.flush();
	return this;
}
 
Example #4
Source File: FlinkPreparingTableBase.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Creates a {@link org.apache.calcite.prepare.Prepare.AbstractPreparingTable} instance.
 *
 * @param relOptSchema The RelOptSchema that this table comes from
 * @param rowType      The table row type
 * @param names        The table qualified name
 * @param statistic    The table statistics
 */
public FlinkPreparingTableBase(
		@Nullable RelOptSchema relOptSchema,
		RelDataType rowType,
		Iterable<String> names,
		FlinkStatistic statistic) {
	this.relOptSchema = relOptSchema;
	this.rowType = Objects.requireNonNull(rowType);
	this.names = Objects.requireNonNull(ImmutableList.copyOf(names));
	this.statistic = Objects.requireNonNull(statistic);
}
 
Example #5
Source File: FlinkPreparingTableBase.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Returns the digest of the {@link TableSource} instance.
 */
protected List<String> explainSourceAsString(TableSource<?> ts) {
	String tsDigest = ts.explainSource();
	if (!Strings.isNullOrEmpty(tsDigest)) {
		return ImmutableList.<String>builder()
			.addAll(Util.skipLast(names))
			.add(String.format("%s, source: [%s]", Util.last(names), tsDigest))
			.build();
	} else {
		return names;
	}
}
 
Example #6
Source File: KubernetesResourceManagerTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testStartAndRecoverVariousResourceSpec() throws Exception {
	new Context() {{
		final WorkerResourceSpec workerResourceSpec1 = new WorkerResourceSpec.Builder().setTaskHeapMemoryMB(100).build();
		final WorkerResourceSpec workerResourceSpec2 = new WorkerResourceSpec.Builder().setTaskHeapMemoryMB(99).build();
		slotManager = new TestingSlotManagerBuilder()
			.setGetRequiredResourcesSupplier(() -> Collections.singletonMap(workerResourceSpec1, 1))
			.createSlotManager();

		runTest(() -> {
			// Start two workers with different resources
			resourceManager.startNewWorker(workerResourceSpec1);
			resourceManager.startNewWorker(workerResourceSpec2);

			// Verify two pods with both worker resources are started
			final PodList initialPodList = kubeClient.pods().list();
			assertEquals(2, initialPodList.getItems().size());
			final Pod initialPod1 = getPodContainsStrInArgs(initialPodList, TaskManagerOptions.TASK_HEAP_MEMORY.key() + "=" + (100L << 20));
			final Pod initialPod2 = getPodContainsStrInArgs(initialPodList, TaskManagerOptions.TASK_HEAP_MEMORY.key() + "=" + (99L << 20));

			// Notify resource manager about pods added.
			final KubernetesPod initialKubernetesPod1 = new KubernetesPod(initialPod1);
			final KubernetesPod initialKubernetesPod2 = new KubernetesPod(initialPod2);
			resourceManager.onAdded(ImmutableList.of(initialKubernetesPod1, initialKubernetesPod2));

			// Terminate pod1.
			terminatePod(initialPod1);
			resourceManager.onModified(Collections.singletonList(initialKubernetesPod1));

			// Verify original pod1 is removed, a new pod1 with the same worker resource is requested.
			// Meantime, pod2 is not changes.
			final PodList activePodList = kubeClient.pods().list();
			assertEquals(2, activePodList.getItems().size());
			assertFalse(activePodList.getItems().contains(initialPod1));
			assertTrue(activePodList.getItems().contains(initialPod2));
			getPodContainsStrInArgs(initialPodList, TaskManagerOptions.TASK_HEAP_MEMORY.key() + "=" + (100L << 20));
		});
	}};
}
 
Example #7
Source File: YarnResourceManagerTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Tests that YarnResourceManager will not request more containers than needs during
 * callback from Yarn when container is Completed.
 */
@Test
public void testOnContainerCompleted() throws Exception {
	new Context() {{
		runTest(() -> {
			CompletableFuture<?> registerSlotRequestFuture = resourceManager.runInMainThread(() -> {
				rmServices.slotManager.registerSlotRequest(
					new SlotRequest(new JobID(), new AllocationID(), resourceProfile1, taskHost));
				return null;
			});

			// wait for the registerSlotRequest completion
			registerSlotRequestFuture.get();

			// Callback from YARN when container is allocated.
			Container testingContainer = mockContainer("container", 1234, 1, resourceManager.getContainerResource());

			doReturn(Collections.singletonList(Collections.singletonList(resourceManager.getContainerRequest())))
				.when(mockResourceManagerClient).getMatchingRequests(any(Priority.class), anyString(), any(Resource.class));

			resourceManager.onContainersAllocated(ImmutableList.of(testingContainer));
			verify(mockResourceManagerClient).addContainerRequest(any(AMRMClient.ContainerRequest.class));
			verify(mockResourceManagerClient).removeContainerRequest(any(AMRMClient.ContainerRequest.class));
			verify(mockNMClient).startContainer(eq(testingContainer), any(ContainerLaunchContext.class));

			// Callback from YARN when container is Completed, pending request can not be fulfilled by pending
			// containers, need to request new container.
			ContainerStatus testingContainerStatus = mockContainerStatus(testingContainer.getId());

			resourceManager.onContainersCompleted(ImmutableList.of(testingContainerStatus));
			verify(mockResourceManagerClient, times(2)).addContainerRequest(any(AMRMClient.ContainerRequest.class));

			// Callback from YARN when container is Completed happened before global fail, pending request
			// slot is already fulfilled by pending containers, no need to request new container.
			resourceManager.onContainersCompleted(ImmutableList.of(testingContainerStatus));
			verify(mockResourceManagerClient, times(2)).addContainerRequest(any(AMRMClient.ContainerRequest.class));
		});
	}};
}
 
Example #8
Source File: YarnResourceManagerTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testStopWorkerBeforeRegistration() throws Exception {
	new Context() {{
		final List<CompletableFuture<Void>> addContainerRequestFutures = new ArrayList<>();
		addContainerRequestFutures.add(new CompletableFuture<>());
		addContainerRequestFutures.add(new CompletableFuture<>());
		final AtomicInteger addContainerRequestFuturesNumCompleted = new AtomicInteger(0);
		final CompletableFuture<Void> removeContainerRequestFuture = new CompletableFuture<>();
		final CompletableFuture<Void> startContainerAsyncFuture = new CompletableFuture<>();

		testingYarnAMRMClientAsync.setGetMatchingRequestsFunction(ignored ->
			Collections.singletonList(Collections.singletonList(resourceManager.getContainerRequest(containerResource))));
		testingYarnAMRMClientAsync.setAddContainerRequestConsumer((ignored1, ignored2) ->
			addContainerRequestFutures.get(addContainerRequestFuturesNumCompleted.getAndIncrement()).complete(null));
		testingYarnAMRMClientAsync.setRemoveContainerRequestConsumer((ignored1, ignored2) -> removeContainerRequestFuture.complete(null));
		testingYarnNMClientAsync.setStartContainerAsyncConsumer((ignored1, ignored2, ignored3) -> startContainerAsyncFuture.complete(null));

		runTest(() -> {
			// Request slot from SlotManager.
			registerSlotRequest(resourceManager, rmServices, resourceProfile1, taskHost);

			// Callback from YARN when container is allocated.
			Container testingContainer = createTestingContainer();
			resourceManager.onContainersAllocated(ImmutableList.of(testingContainer));

			verifyFutureCompleted(addContainerRequestFutures.get(0));
			verifyFutureCompleted(removeContainerRequestFuture);
			verifyFutureCompleted(startContainerAsyncFuture);

			ContainerStatus testingContainerStatus = createTestingContainerStatus(testingContainer.getId());
			resourceManager.onContainersCompleted(ImmutableList.of(testingContainerStatus));

			verifyFutureCompleted(addContainerRequestFutures.get(1));
		});
	}};
}
 
Example #9
Source File: JarManifestParserTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testFindOnlyEntryClassMultipleJarsWithSingleManifestEntry() throws IOException {
	File jarWithNoManifest = createJarFileWithManifest(ImmutableMap.of());
	File jarFile = TestJob.getTestJobJar();

	JarFileWithEntryClass jarFileWithEntryClass = JarManifestParser
		.findOnlyEntryClass(ImmutableList.of(jarWithNoManifest, jarFile));

	assertThat(jarFileWithEntryClass.getEntryClass(), is(equalTo(TestJob.class.getCanonicalName())));
}
 
Example #10
Source File: JarManifestParserTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testFindOnlyEntryClassSingleJar() throws IOException {
	File jarFile = TestJob.getTestJobJar();

	JarFileWithEntryClass jarFileWithEntryClass = JarManifestParser.findOnlyEntryClass(ImmutableList.of(jarFile));

	assertThat(jarFileWithEntryClass.getEntryClass(), is(equalTo(TestJob.class.getCanonicalName())));
}
 
Example #11
Source File: YarnResourceManagerTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testOnStartContainerError() throws Exception {
	new Context() {{
		final List<CompletableFuture<Void>> addContainerRequestFutures = new ArrayList<>();
		addContainerRequestFutures.add(new CompletableFuture<>());
		addContainerRequestFutures.add(new CompletableFuture<>());
		final AtomicInteger addContainerRequestFuturesNumCompleted = new AtomicInteger(0);
		final CompletableFuture<Void> removeContainerRequestFuture = new CompletableFuture<>();
		final CompletableFuture<Void> releaseAssignedContainerFuture = new CompletableFuture<>();
		final CompletableFuture<Void> startContainerAsyncFuture = new CompletableFuture<>();

		testingYarnAMRMClientAsync.setGetMatchingRequestsFunction(ignored ->
			Collections.singletonList(Collections.singletonList(resourceManager.getContainerRequest(containerResource))));
		testingYarnAMRMClientAsync.setAddContainerRequestConsumer((ignored1, ignored2) ->
			addContainerRequestFutures.get(addContainerRequestFuturesNumCompleted.getAndIncrement()).complete(null));
		testingYarnAMRMClientAsync.setRemoveContainerRequestConsumer((ignored1, ignored2) -> removeContainerRequestFuture.complete(null));
		testingYarnAMRMClientAsync.setReleaseAssignedContainerConsumer((ignored1, ignored2) -> releaseAssignedContainerFuture.complete(null));
		testingYarnNMClientAsync.setStartContainerAsyncConsumer((ignored1, ignored2, ignored3) -> startContainerAsyncFuture.complete(null));

		runTest(() -> {
			registerSlotRequest(resourceManager, rmServices, resourceProfile1, taskHost);
			Container testingContainer = createTestingContainer();

			resourceManager.onContainersAllocated(ImmutableList.of(testingContainer));
			verifyFutureCompleted(addContainerRequestFutures.get(0));
			verifyFutureCompleted(removeContainerRequestFuture);
			verifyFutureCompleted(startContainerAsyncFuture);

			resourceManager.onStartContainerError(testingContainer.getId(), new Exception("start error"));
			verifyFutureCompleted(releaseAssignedContainerFuture);
			verifyFutureCompleted(addContainerRequestFutures.get(1));
		});
	}};
}
 
Example #12
Source File: RelDescriptionWriterImpl.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public RelWriter done(RelNode node) {
	final List<Pair<String, Object>> valuesCopy = ImmutableList.copyOf(values);
	values.clear();
	explain(node, valuesCopy);
	pw.flush();
	return this;
}
 
Example #13
Source File: JarManifestParserTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testFindOnlyEntryClassSingleJar() throws IOException {
	File jarFile = TestJob.getTestJobJar();

	JarManifestParser.JarFileWithEntryClass jarFileWithEntryClass = JarManifestParser.findOnlyEntryClass(ImmutableList.of(jarFile));

	assertThat(jarFileWithEntryClass.getEntryClass(), is(equalTo(TestJob.class.getCanonicalName())));
}
 
Example #14
Source File: JarManifestParserTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testFindOnlyEntryClassMultipleJarsWithSingleManifestEntry() throws IOException {
	File jarWithNoManifest = createJarFileWithManifest(ImmutableMap.of());
	File jarFile = TestJob.getTestJobJar();

	JarManifestParser.JarFileWithEntryClass jarFileWithEntryClass = JarManifestParser
		.findOnlyEntryClass(ImmutableList.of(jarWithNoManifest, jarFile));

	assertThat(jarFileWithEntryClass.getEntryClass(), is(equalTo(TestJob.class.getCanonicalName())));
}
 
Example #15
Source File: YarnResourceManagerTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
/**
 * Tests that YarnResourceManager will not request more containers than needs during
 * callback from Yarn when container is Completed.
 */
@Test
public void testOnContainerCompleted() throws Exception {
	new Context() {{
		runTest(() -> {
			CompletableFuture<?> registerSlotRequestFuture = resourceManager.runInMainThread(() -> {
				rmServices.slotManager.registerSlotRequest(
					new SlotRequest(new JobID(), new AllocationID(), resourceProfile1, taskHost));
				return null;
			});

			// wait for the registerSlotRequest completion
			registerSlotRequestFuture.get();

			// Callback from YARN when container is allocated.
			Container testingContainer = mockContainer("container", 1234, 1, resourceManager.getContainerResource());

			doReturn(Collections.singletonList(Collections.singletonList(resourceManager.getContainerRequest())))
				.when(mockResourceManagerClient).getMatchingRequests(any(Priority.class), anyString(), any(Resource.class));

			resourceManager.onContainersAllocated(ImmutableList.of(testingContainer));
			verify(mockResourceManagerClient).addContainerRequest(any(AMRMClient.ContainerRequest.class));
			verify(mockResourceManagerClient).removeContainerRequest(any(AMRMClient.ContainerRequest.class));
			verify(mockNMClient).startContainer(eq(testingContainer), any(ContainerLaunchContext.class));

			// Callback from YARN when container is Completed, pending request can not be fulfilled by pending
			// containers, need to request new container.
			ContainerStatus testingContainerStatus = mockContainerStatus(testingContainer.getId());

			resourceManager.onContainersCompleted(ImmutableList.of(testingContainerStatus));
			verify(mockResourceManagerClient, times(2)).addContainerRequest(any(AMRMClient.ContainerRequest.class));

			// Callback from YARN when container is Completed happened before global fail, pending request
			// slot is already fulfilled by pending containers, no need to request new container.
			resourceManager.onContainersCompleted(ImmutableList.of(testingContainerStatus));
			verify(mockResourceManagerClient, times(2)).addContainerRequest(any(AMRMClient.ContainerRequest.class));
		});
	}};
}
 
Example #16
Source File: JarManifestParserTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Test
public void testFindOnlyEntryClassMultipleJarsWithSingleManifestEntry() throws IOException {
	File jarWithNoManifest = createJarFileWithManifest(ImmutableMap.of());
	File jarFile = TestJob.getTestJobJar();

	JarFileWithEntryClass jarFileWithEntryClass = JarManifestParser
		.findOnlyEntryClass(ImmutableList.of(jarWithNoManifest, jarFile));

	assertThat(jarFileWithEntryClass.getEntryClass(), is(equalTo(TestJob.class.getCanonicalName())));
}
 
Example #17
Source File: YarnResourceManagerTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testStartWorkerVariousSpec_SameContainerResource() throws Exception{
	final WorkerResourceSpec workerResourceSpec1 = new WorkerResourceSpec.Builder()
		.setCpuCores(1)
		.setTaskHeapMemoryMB(100)
		.setTaskOffHeapMemoryMB(0)
		.setNetworkMemoryMB(100)
		.setManagedMemoryMB(100)
		.build();
	final WorkerResourceSpec workerResourceSpec2 = new WorkerResourceSpec.Builder()
		.setCpuCores(1)
		.setTaskHeapMemoryMB(99)
		.setTaskOffHeapMemoryMB(0)
		.setNetworkMemoryMB(100)
		.setManagedMemoryMB(100)
		.build();

	final SlotManager slotManager = new TestingSlotManagerBuilder()
		.setGetRequiredResourcesSupplier(() -> Collections.singletonMap(workerResourceSpec1, 1))
		.createSlotManager();

	new Context(flinkConfig, slotManager) {{
		final Resource containerResource = resourceManager.getContainerResource(workerResourceSpec1).get();

		final List<CompletableFuture<Void>> addContainerRequestFutures = new ArrayList<>();
		addContainerRequestFutures.add(new CompletableFuture<>());
		addContainerRequestFutures.add(new CompletableFuture<>());
		addContainerRequestFutures.add(new CompletableFuture<>());
		addContainerRequestFutures.add(new CompletableFuture<>());
		final AtomicInteger addContainerRequestFuturesNumCompleted = new AtomicInteger(0);

		final String startCommand1 = TaskManagerOptions.TASK_HEAP_MEMORY.key() + "=" + (100L << 20);
		final String startCommand2 = TaskManagerOptions.TASK_HEAP_MEMORY.key() + "=" + (99L << 20);
		final CompletableFuture<Void> startContainerAsyncCommandFuture1 = new CompletableFuture<>();
		final CompletableFuture<Void> startContainerAsyncCommandFuture2 = new CompletableFuture<>();

		testingYarnAMRMClientAsync.setGetMatchingRequestsFunction(ignored ->
			Collections.singletonList(ImmutableList.of(
				resourceManager.getContainerRequest(resourceManager.getContainerResource(workerResourceSpec1).get()),
				resourceManager.getContainerRequest(resourceManager.getContainerResource(workerResourceSpec2).get()))));
		testingYarnAMRMClientAsync.setAddContainerRequestConsumer((ignored1, ignored2) ->
			addContainerRequestFutures.get(addContainerRequestFuturesNumCompleted.getAndIncrement()).complete(null));
		testingYarnNMClientAsync.setStartContainerAsyncConsumer((ignored1, context, ignored2) -> {
			if (containsStartCommand(context, startCommand1)) {
				startContainerAsyncCommandFuture1.complete(null);
			} else if (containsStartCommand(context, startCommand2)) {
				startContainerAsyncCommandFuture2.complete(null);
			}
		});

		runTest(() -> {
			// Make sure two worker resource spec will be normalized to the same container resource
			assertEquals(containerResource, resourceManager.getContainerResource(workerResourceSpec2).get());

			resourceManager.startNewWorker(workerResourceSpec1);
			resourceManager.startNewWorker(workerResourceSpec2);

			// Verify both containers requested
			verifyFutureCompleted(addContainerRequestFutures.get(0));
			verifyFutureCompleted(addContainerRequestFutures.get(1));

			// Mock that both containers are allocated
			Container container1 = createTestingContainerWithResource(containerResource);
			Container container2 = createTestingContainerWithResource(containerResource);
			resourceManager.onContainersAllocated(ImmutableList.of(container1, container2));

			// Verify workers with both spec are started.
			verifyFutureCompleted(startContainerAsyncCommandFuture1);
			verifyFutureCompleted(startContainerAsyncCommandFuture2);

			// Mock that one container is completed, while the worker is still pending
			ContainerStatus testingContainerStatus = createTestingContainerStatus(container1.getId());
			resourceManager.onContainersCompleted(Collections.singletonList(testingContainerStatus));

			// Verify that only one more container is requested.
			verifyFutureCompleted(addContainerRequestFutures.get(2));
			assertFalse(addContainerRequestFutures.get(3).isDone());
		});
	}};
}
 
Example #18
Source File: JarManifestParserTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test(expected = NoSuchElementException.class)
public void testFindOnlyEntryClassSingleJarWithNoManifest() throws IOException {
	File jarWithNoManifest = createJarFileWithManifest(ImmutableMap.of());
	JarManifestParser.findOnlyEntryClass(ImmutableList.of(jarWithNoManifest));
}
 
Example #19
Source File: YarnResourceManagerTest.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Tests that YarnResourceManager will not request more containers than needs during
 * callback from Yarn when container is Completed.
 */
@Test
public void testOnContainerCompleted() throws Exception {
	new Context() {{
		final List<CompletableFuture<Void>> addContainerRequestFutures = new ArrayList<>();
		addContainerRequestFutures.add(new CompletableFuture<>());
		addContainerRequestFutures.add(new CompletableFuture<>());
		addContainerRequestFutures.add(new CompletableFuture<>());
		final AtomicInteger addContainerRequestFuturesNumCompleted = new AtomicInteger(0);
		final CompletableFuture<Void> removeContainerRequestFuture = new CompletableFuture<>();
		final CompletableFuture<Void> startContainerAsyncFuture = new CompletableFuture<>();

		testingYarnAMRMClientAsync.setGetMatchingRequestsFunction(ignored ->
			Collections.singletonList(Collections.singletonList(resourceManager.getContainerRequest(containerResource))));
		testingYarnAMRMClientAsync.setAddContainerRequestConsumer((ignored1, ignored2) ->
			addContainerRequestFutures.get(addContainerRequestFuturesNumCompleted.getAndIncrement()).complete(null));
		testingYarnAMRMClientAsync.setRemoveContainerRequestConsumer((ignored1, ignored2) -> removeContainerRequestFuture.complete(null));
		testingYarnNMClientAsync.setStartContainerAsyncConsumer((ignored1, ignored2, ignored3) -> startContainerAsyncFuture.complete(null));

		runTest(() -> {
			registerSlotRequest(resourceManager, rmServices, resourceProfile1, taskHost);

			// Callback from YARN when container is allocated.
			Container testingContainer = createTestingContainer();

			resourceManager.onContainersAllocated(ImmutableList.of(testingContainer));
			verifyFutureCompleted(addContainerRequestFutures.get(0));
			verifyFutureCompleted(removeContainerRequestFuture);
			verifyFutureCompleted(startContainerAsyncFuture);

			// Callback from YARN when container is Completed, pending request can not be fulfilled by pending
			// containers, need to request new container.
			ContainerStatus testingContainerStatus = createTestingContainerStatus(testingContainer.getId());

			resourceManager.onContainersCompleted(ImmutableList.of(testingContainerStatus));
			verifyFutureCompleted(addContainerRequestFutures.get(1));

			// Callback from YARN when container is Completed happened before global fail, pending request
			// slot is already fulfilled by pending containers, no need to request new container.
			resourceManager.onContainersCompleted(ImmutableList.of(testingContainerStatus));
			assertFalse(addContainerRequestFutures.get(2).isDone());
		});
	}};
}
 
Example #20
Source File: JarManifestParserTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test(expected = IllegalArgumentException.class)
public void testFindOnlyEntryClassMultipleJarsWithMultipleManifestEntries() throws IOException {
	File jarFile = TestJob.getTestJobJar();

	JarManifestParser.findOnlyEntryClass(ImmutableList.of(jarFile, jarFile, jarFile));
}
 
Example #21
Source File: AggregationFunctionTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void minMaxByTest() throws Exception {
	// Tuples are grouped on field 0, aggregated on field 1

	// preparing expected outputs
	List<Tuple3<Integer, Integer, Integer>> maxByFirstExpected = ImmutableList.of(
			Tuple3.of(0, 0, 0), Tuple3.of(0, 1, 1), Tuple3.of(0, 2, 2),
			Tuple3.of(0, 2, 2), Tuple3.of(0, 2, 2), Tuple3.of(0, 2, 2),
			Tuple3.of(0, 2, 2), Tuple3.of(0, 2, 2), Tuple3.of(0, 2, 2));

	List<Tuple3<Integer, Integer, Integer>> maxByLastExpected = ImmutableList.of(
			Tuple3.of(0, 0, 0), Tuple3.of(0, 1, 1), Tuple3.of(0, 2, 2),
			Tuple3.of(0, 2, 2), Tuple3.of(0, 2, 2), Tuple3.of(0, 2, 5),
			Tuple3.of(0, 2, 5), Tuple3.of(0, 2, 5), Tuple3.of(0, 2, 8));

	List<Tuple3<Integer, Integer, Integer>> minByFirstExpected = ImmutableList.of(
			Tuple3.of(0, 0, 0), Tuple3.of(0, 0, 0), Tuple3.of(0, 0, 0),
			Tuple3.of(0, 0, 0), Tuple3.of(0, 0, 0), Tuple3.of(0, 0, 0),
			Tuple3.of(0, 0, 0), Tuple3.of(0, 0, 0), Tuple3.of(0, 0, 0));

	List<Tuple3<Integer, Integer, Integer>> minByLastExpected = ImmutableList.of(
			Tuple3.of(0, 0, 0), Tuple3.of(0, 0, 0), Tuple3.of(0, 0, 0),
			Tuple3.of(0, 0, 3), Tuple3.of(0, 0, 3), Tuple3.of(0, 0, 3),
			Tuple3.of(0, 0, 6), Tuple3.of(0, 0, 6), Tuple3.of(0, 0, 6));

	// some necessary boiler plate
	TypeInformation<Tuple3<Integer, Integer, Integer>> typeInfo = TypeExtractor
			.getForObject(Tuple3.of(0, 0, 0));

	ExecutionConfig config = new ExecutionConfig();

	KeySelector<Tuple3<Integer, Integer, Integer>, Tuple> keySelector = KeySelectorUtil.getSelectorForKeys(
			new Keys.ExpressionKeys<>(new int[]{0}, typeInfo),
			typeInfo, config);
	TypeInformation<Tuple> keyType = TypeExtractor.getKeySelectorTypes(keySelector, typeInfo);

	// aggregations tested
	ReduceFunction<Tuple3<Integer, Integer, Integer>> maxByFunctionFirst =
			new ComparableAggregator<>(1, typeInfo, AggregationType.MAXBY, true, config);
	ReduceFunction<Tuple3<Integer, Integer, Integer>> maxByFunctionLast =
			new ComparableAggregator<>(1, typeInfo, AggregationType.MAXBY, false, config);
	ReduceFunction<Tuple3<Integer, Integer, Integer>> minByFunctionFirst =
			new ComparableAggregator<>(1, typeInfo, AggregationType.MINBY, true, config);
	ReduceFunction<Tuple3<Integer, Integer, Integer>> minByFunctionLast =
			new ComparableAggregator<>(1, typeInfo, AggregationType.MINBY, false, config);

	assertEquals(maxByFirstExpected, MockContext.createAndExecuteForKeyedStream(
			new StreamGroupedReduce<>(maxByFunctionFirst, typeInfo.createSerializer(config)),
			getInputByList(),
			keySelector, keyType));

	assertEquals(maxByLastExpected, MockContext.createAndExecuteForKeyedStream(
			new StreamGroupedReduce<>(maxByFunctionLast, typeInfo.createSerializer(config)),
			getInputByList(),
			keySelector, keyType));

	assertEquals(minByLastExpected, MockContext.createAndExecuteForKeyedStream(
			new StreamGroupedReduce<>(minByFunctionLast, typeInfo.createSerializer(config)),
			getInputByList(),
			keySelector, keyType));

	assertEquals(minByFirstExpected, MockContext.createAndExecuteForKeyedStream(
			new StreamGroupedReduce<>(minByFunctionFirst, typeInfo.createSerializer(config)),
			getInputByList(),
			keySelector, keyType));
}
 
Example #22
Source File: ParquetRecordReaderTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testNestedArrayGroup() throws IOException {
	Schema nestedArraySchema = unWrapSchema(NESTED_SCHEMA.getField("nestedArray").schema());
	Preconditions.checkState(nestedArraySchema.getType().equals(Schema.Type.ARRAY));

	Schema arrayItemSchema = nestedArraySchema.getElementType();
	GenericRecord item = new GenericRecordBuilder(arrayItemSchema)
		.set("type", "nested")
		.set("value", 1L).build();

	ImmutableList.Builder<GenericRecord> list = ImmutableList.builder();
	list.add(item);

	GenericRecord record = new GenericRecordBuilder(NESTED_SCHEMA)
		.set("nestedArray", list.build())
		.set("foo", 34L).build();

	Path path = createTempParquetFile(tempRoot.getRoot(), NESTED_SCHEMA, Collections.singletonList(record));
	MessageType readSchema = (new AvroSchemaConverter()).convert(NESTED_SCHEMA);
	ParquetRecordReader<Row> rowReader = new ParquetRecordReader<>(new RowReadSupport(), readSchema);

	InputFile inputFile =
		HadoopInputFile.fromPath(new org.apache.hadoop.fs.Path(path.toUri()), testConfig);
	ParquetReadOptions options = ParquetReadOptions.builder().build();
	ParquetFileReader fileReader = new ParquetFileReader(inputFile, options);

	rowReader.initialize(fileReader, testConfig);
	assertFalse(rowReader.reachEnd());

	Row row = rowReader.nextRecord();
	assertEquals(7, row.getArity());

	assertEquals(34L, row.getField(0));
	Object[] result = (Object[]) row.getField(6);

	assertEquals(1, result.length);

	Row nestedRow = (Row) result[0];
	assertEquals("nested", nestedRow.getField(0));
	assertEquals(1L, nestedRow.getField(1));
}
 
Example #23
Source File: JarManifestParserTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Test(expected = NoSuchElementException.class)
public void testFindOnlyEntryClassSingleJarWithNoManifest() throws IOException {
	File jarWithNoManifest = createJarFileWithManifest(ImmutableMap.of());
	JarManifestParser.findOnlyEntryClass(ImmutableList.of(jarWithNoManifest));
}
 
Example #24
Source File: AggregationFunctionTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void pojoMinMaxByTest() throws Exception {
	// Pojos are grouped on field 0, aggregated on field 1

	// preparing expected outputs
	List<MyPojo3> maxByFirstExpected = ImmutableList.of(
			new MyPojo3(0, 0), new MyPojo3(1, 1), new MyPojo3(2, 2),
			new MyPojo3(2, 2), new MyPojo3(2, 2), new MyPojo3(2, 2),
			new MyPojo3(2, 2), new MyPojo3(2, 2), new MyPojo3(2, 2));

	List<MyPojo3> maxByLastExpected = ImmutableList.of(
			new MyPojo3(0, 0), new MyPojo3(1, 1), new MyPojo3(2, 2),
			new MyPojo3(2, 2), new MyPojo3(2, 2), new MyPojo3(2, 5),
			new MyPojo3(2, 5), new MyPojo3(2, 5), new MyPojo3(2, 8));

	List<MyPojo3> minByFirstExpected = ImmutableList.of(
			new MyPojo3(0, 0), new MyPojo3(0, 0), new MyPojo3(0, 0),
			new MyPojo3(0, 0), new MyPojo3(0, 0), new MyPojo3(0, 0),
			new MyPojo3(0, 0), new MyPojo3(0, 0), new MyPojo3(0, 0));

	List<MyPojo3> minByLastExpected = ImmutableList.of(
			new MyPojo3(0, 0), new MyPojo3(0, 0), new MyPojo3(0, 0),
			new MyPojo3(0, 3), new MyPojo3(0, 3), new MyPojo3(0, 3),
			new MyPojo3(0, 6), new MyPojo3(0, 6), new MyPojo3(0, 6));

	// some necessary boiler plate
	TypeInformation<MyPojo3> typeInfo = TypeExtractor.getForObject(new MyPojo3(0, 0));

	ExecutionConfig config = new ExecutionConfig();

	KeySelector<MyPojo3, Tuple> keySelector = KeySelectorUtil.getSelectorForKeys(
			new Keys.ExpressionKeys<>(new String[]{"f0"}, typeInfo),
			typeInfo, config);
	TypeInformation<Tuple> keyType = TypeExtractor.getKeySelectorTypes(keySelector, typeInfo);

	// aggregations tested
	ReduceFunction<MyPojo3> maxByFunctionFirst =
			new ComparableAggregator<>("f1", typeInfo, AggregationType.MAXBY, true, config);
	ReduceFunction<MyPojo3> maxByFunctionLast =
			new ComparableAggregator<>("f1", typeInfo, AggregationType.MAXBY, false, config);
	ReduceFunction<MyPojo3> minByFunctionFirst =
			new ComparableAggregator<>("f1", typeInfo, AggregationType.MINBY, true, config);
	ReduceFunction<MyPojo3> minByFunctionLast =
			new ComparableAggregator<>("f1", typeInfo, AggregationType.MINBY, false, config);

	assertEquals(maxByFirstExpected, MockContext.createAndExecuteForKeyedStream(
					new StreamGroupedReduce<>(maxByFunctionFirst, typeInfo.createSerializer(config)),
					getInputByPojoList(),
					keySelector, keyType));

	assertEquals(maxByLastExpected, MockContext.createAndExecuteForKeyedStream(
			new StreamGroupedReduce<>(maxByFunctionLast, typeInfo.createSerializer(config)),
			getInputByPojoList(),
			keySelector, keyType));

	assertEquals(minByLastExpected, MockContext.createAndExecuteForKeyedStream(
			new StreamGroupedReduce<>(minByFunctionLast, typeInfo.createSerializer(config)),
			getInputByPojoList(),
			keySelector, keyType));

	assertEquals(minByFirstExpected, MockContext.createAndExecuteForKeyedStream(
			new StreamGroupedReduce<>(minByFunctionFirst, typeInfo.createSerializer(config)),
			getInputByPojoList(),
			keySelector, keyType));
}
 
Example #25
Source File: JarManifestParserTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test(expected = IllegalArgumentException.class)
public void testFindOnlyEntryClassMultipleJarsWithMultipleManifestEntries() throws IOException {
	File jarFile = TestJob.getTestJobJar();

	JarManifestParser.findOnlyEntryClass(ImmutableList.of(jarFile, jarFile, jarFile));
}
 
Example #26
Source File: JarManifestParserTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Test(expected = IllegalArgumentException.class)
public void testFindOnlyEntryClassMultipleJarsWithMultipleManifestEntries() throws IOException {
	File jarFile = TestJob.getTestJobJar();

	JarManifestParser.findOnlyEntryClass(ImmutableList.of(jarFile, jarFile, jarFile));
}
 
Example #27
Source File: YarnResourceManagerTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Test
public void testStopWorker() throws Exception {
	new Context() {{
		runTest(() -> {
			// Request slot from SlotManager.
			CompletableFuture<?> registerSlotRequestFuture = resourceManager.runInMainThread(() -> {
				rmServices.slotManager.registerSlotRequest(
					new SlotRequest(new JobID(), new AllocationID(), resourceProfile1, taskHost));
				return null;
			});

			// wait for the registerSlotRequest completion
			registerSlotRequestFuture.get();

			// Callback from YARN when container is allocated.
			Container testingContainer = mockContainer("container", 1234, 1, resourceManager.getContainerResource());

			doReturn(Collections.singletonList(Collections.singletonList(resourceManager.getContainerRequest())))
				.when(mockResourceManagerClient).getMatchingRequests(any(Priority.class), anyString(), any(Resource.class));

			resourceManager.onContainersAllocated(ImmutableList.of(testingContainer));
			verify(mockResourceManagerClient).addContainerRequest(any(AMRMClient.ContainerRequest.class));
			verify(mockNMClient).startContainer(eq(testingContainer), any(ContainerLaunchContext.class));

			// Remote task executor registers with YarnResourceManager.
			TaskExecutorGateway mockTaskExecutorGateway = mock(TaskExecutorGateway.class);
			rpcService.registerGateway(taskHost, mockTaskExecutorGateway);

			final ResourceManagerGateway rmGateway = resourceManager.getSelfGateway(ResourceManagerGateway.class);

			final ResourceID taskManagerResourceId = new ResourceID(testingContainer.getId().toString());
			final SlotReport slotReport = new SlotReport(
				new SlotStatus(
					new SlotID(taskManagerResourceId, 1),
					new ResourceProfile(10, 1, 1, 1, 0, Collections.emptyMap())));

			CompletableFuture<Integer> numberRegisteredSlotsFuture = rmGateway
				.registerTaskExecutor(
					taskHost,
					taskManagerResourceId,
					dataPort,
					hardwareDescription,
					Time.seconds(10L))
				.thenCompose(
					(RegistrationResponse response) -> {
						assertThat(response, instanceOf(TaskExecutorRegistrationSuccess.class));
						final TaskExecutorRegistrationSuccess success = (TaskExecutorRegistrationSuccess) response;
						return rmGateway.sendSlotReport(
							taskManagerResourceId,
							success.getRegistrationId(),
							slotReport,
							Time.seconds(10L));
					})
				.handleAsync(
					(Acknowledge ignored, Throwable throwable) -> rmServices.slotManager.getNumberRegisteredSlots(),
					resourceManager.getMainThreadExecutorForTesting());

			final int numberRegisteredSlots = numberRegisteredSlotsFuture.get();

			assertEquals(1, numberRegisteredSlots);

			// Unregister all task executors and release all containers.
			CompletableFuture<?> unregisterAndReleaseFuture = resourceManager.runInMainThread(() -> {
				rmServices.slotManager.unregisterTaskManagersAndReleaseResources();
				return null;
			});

			unregisterAndReleaseFuture.get();

			verify(mockNMClient).stopContainer(any(ContainerId.class), any(NodeId.class));
			verify(mockResourceManagerClient).releaseAssignedContainer(any(ContainerId.class));
		});

		// It's now safe to access the SlotManager state since the ResourceManager has been stopped.
		assertThat(rmServices.slotManager.getNumberRegisteredSlots(), Matchers.equalTo(0));
		assertThat(resourceManager.getNumberOfRegisteredTaskManagers().get(), Matchers.equalTo(0));
	}};
}
 
Example #28
Source File: AggregationFunctionTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Test
public void minMaxByTest() throws Exception {
	// Tuples are grouped on field 0, aggregated on field 1

	// preparing expected outputs
	List<Tuple3<Integer, Integer, Integer>> maxByFirstExpected = ImmutableList.of(
			Tuple3.of(0, 0, 0), Tuple3.of(0, 1, 1), Tuple3.of(0, 2, 2),
			Tuple3.of(0, 2, 2), Tuple3.of(0, 2, 2), Tuple3.of(0, 2, 2),
			Tuple3.of(0, 2, 2), Tuple3.of(0, 2, 2), Tuple3.of(0, 2, 2));

	List<Tuple3<Integer, Integer, Integer>> maxByLastExpected = ImmutableList.of(
			Tuple3.of(0, 0, 0), Tuple3.of(0, 1, 1), Tuple3.of(0, 2, 2),
			Tuple3.of(0, 2, 2), Tuple3.of(0, 2, 2), Tuple3.of(0, 2, 5),
			Tuple3.of(0, 2, 5), Tuple3.of(0, 2, 5), Tuple3.of(0, 2, 8));

	List<Tuple3<Integer, Integer, Integer>> minByFirstExpected = ImmutableList.of(
			Tuple3.of(0, 0, 0), Tuple3.of(0, 0, 0), Tuple3.of(0, 0, 0),
			Tuple3.of(0, 0, 0), Tuple3.of(0, 0, 0), Tuple3.of(0, 0, 0),
			Tuple3.of(0, 0, 0), Tuple3.of(0, 0, 0), Tuple3.of(0, 0, 0));

	List<Tuple3<Integer, Integer, Integer>> minByLastExpected = ImmutableList.of(
			Tuple3.of(0, 0, 0), Tuple3.of(0, 0, 0), Tuple3.of(0, 0, 0),
			Tuple3.of(0, 0, 3), Tuple3.of(0, 0, 3), Tuple3.of(0, 0, 3),
			Tuple3.of(0, 0, 6), Tuple3.of(0, 0, 6), Tuple3.of(0, 0, 6));

	// some necessary boiler plate
	TypeInformation<Tuple3<Integer, Integer, Integer>> typeInfo = TypeExtractor
			.getForObject(Tuple3.of(0, 0, 0));

	ExecutionConfig config = new ExecutionConfig();

	KeySelector<Tuple3<Integer, Integer, Integer>, Tuple> keySelector = KeySelectorUtil.getSelectorForKeys(
			new Keys.ExpressionKeys<>(new int[]{0}, typeInfo),
			typeInfo, config);
	TypeInformation<Tuple> keyType = TypeExtractor.getKeySelectorTypes(keySelector, typeInfo);

	// aggregations tested
	ReduceFunction<Tuple3<Integer, Integer, Integer>> maxByFunctionFirst =
			new ComparableAggregator<>(1, typeInfo, AggregationType.MAXBY, true, config);
	ReduceFunction<Tuple3<Integer, Integer, Integer>> maxByFunctionLast =
			new ComparableAggregator<>(1, typeInfo, AggregationType.MAXBY, false, config);
	ReduceFunction<Tuple3<Integer, Integer, Integer>> minByFunctionFirst =
			new ComparableAggregator<>(1, typeInfo, AggregationType.MINBY, true, config);
	ReduceFunction<Tuple3<Integer, Integer, Integer>> minByFunctionLast =
			new ComparableAggregator<>(1, typeInfo, AggregationType.MINBY, false, config);

	assertEquals(maxByFirstExpected, MockContext.createAndExecuteForKeyedStream(
			new StreamGroupedReduce<>(maxByFunctionFirst, typeInfo.createSerializer(config)),
			getInputByList(),
			keySelector, keyType));

	assertEquals(maxByLastExpected, MockContext.createAndExecuteForKeyedStream(
			new StreamGroupedReduce<>(maxByFunctionLast, typeInfo.createSerializer(config)),
			getInputByList(),
			keySelector, keyType));

	assertEquals(minByLastExpected, MockContext.createAndExecuteForKeyedStream(
			new StreamGroupedReduce<>(minByFunctionLast, typeInfo.createSerializer(config)),
			getInputByList(),
			keySelector, keyType));

	assertEquals(minByFirstExpected, MockContext.createAndExecuteForKeyedStream(
			new StreamGroupedReduce<>(minByFunctionFirst, typeInfo.createSerializer(config)),
			getInputByList(),
			keySelector, keyType));
}
 
Example #29
Source File: AggregationFunctionTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Test
public void pojoMinMaxByTest() throws Exception {
	// Pojos are grouped on field 0, aggregated on field 1

	// preparing expected outputs
	List<MyPojo3> maxByFirstExpected = ImmutableList.of(
			new MyPojo3(0, 0), new MyPojo3(1, 1), new MyPojo3(2, 2),
			new MyPojo3(2, 2), new MyPojo3(2, 2), new MyPojo3(2, 2),
			new MyPojo3(2, 2), new MyPojo3(2, 2), new MyPojo3(2, 2));

	List<MyPojo3> maxByLastExpected = ImmutableList.of(
			new MyPojo3(0, 0), new MyPojo3(1, 1), new MyPojo3(2, 2),
			new MyPojo3(2, 2), new MyPojo3(2, 2), new MyPojo3(2, 5),
			new MyPojo3(2, 5), new MyPojo3(2, 5), new MyPojo3(2, 8));

	List<MyPojo3> minByFirstExpected = ImmutableList.of(
			new MyPojo3(0, 0), new MyPojo3(0, 0), new MyPojo3(0, 0),
			new MyPojo3(0, 0), new MyPojo3(0, 0), new MyPojo3(0, 0),
			new MyPojo3(0, 0), new MyPojo3(0, 0), new MyPojo3(0, 0));

	List<MyPojo3> minByLastExpected = ImmutableList.of(
			new MyPojo3(0, 0), new MyPojo3(0, 0), new MyPojo3(0, 0),
			new MyPojo3(0, 3), new MyPojo3(0, 3), new MyPojo3(0, 3),
			new MyPojo3(0, 6), new MyPojo3(0, 6), new MyPojo3(0, 6));

	// some necessary boiler plate
	TypeInformation<MyPojo3> typeInfo = TypeExtractor.getForObject(new MyPojo3(0, 0));

	ExecutionConfig config = new ExecutionConfig();

	KeySelector<MyPojo3, Tuple> keySelector = KeySelectorUtil.getSelectorForKeys(
			new Keys.ExpressionKeys<>(new String[]{"f0"}, typeInfo),
			typeInfo, config);
	TypeInformation<Tuple> keyType = TypeExtractor.getKeySelectorTypes(keySelector, typeInfo);

	// aggregations tested
	ReduceFunction<MyPojo3> maxByFunctionFirst =
			new ComparableAggregator<>("f1", typeInfo, AggregationType.MAXBY, true, config);
	ReduceFunction<MyPojo3> maxByFunctionLast =
			new ComparableAggregator<>("f1", typeInfo, AggregationType.MAXBY, false, config);
	ReduceFunction<MyPojo3> minByFunctionFirst =
			new ComparableAggregator<>("f1", typeInfo, AggregationType.MINBY, true, config);
	ReduceFunction<MyPojo3> minByFunctionLast =
			new ComparableAggregator<>("f1", typeInfo, AggregationType.MINBY, false, config);

	assertEquals(maxByFirstExpected, MockContext.createAndExecuteForKeyedStream(
					new StreamGroupedReduce<>(maxByFunctionFirst, typeInfo.createSerializer(config)),
					getInputByPojoList(),
					keySelector, keyType));

	assertEquals(maxByLastExpected, MockContext.createAndExecuteForKeyedStream(
			new StreamGroupedReduce<>(maxByFunctionLast, typeInfo.createSerializer(config)),
			getInputByPojoList(),
			keySelector, keyType));

	assertEquals(minByLastExpected, MockContext.createAndExecuteForKeyedStream(
			new StreamGroupedReduce<>(minByFunctionLast, typeInfo.createSerializer(config)),
			getInputByPojoList(),
			keySelector, keyType));

	assertEquals(minByFirstExpected, MockContext.createAndExecuteForKeyedStream(
			new StreamGroupedReduce<>(minByFunctionFirst, typeInfo.createSerializer(config)),
			getInputByPojoList(),
			keySelector, keyType));
}
 
Example #30
Source File: SqlFirstLastValueAggFunction.java    From flink with Apache License 2.0 4 votes vote down vote up
@SuppressWarnings("deprecation")
public List<RelDataType> getParameterTypes(RelDataTypeFactory typeFactory) {
	return ImmutableList.of(
			typeFactory.createTypeWithNullability(
					typeFactory.createSqlType(SqlTypeName.ANY), true));
}