Java Code Examples for org.apache.flink.util.NetUtils

The following examples show how to use org.apache.flink.util.NetUtils. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: Flink-CEPplus   Source File: Kafka08PartitionDiscoverer.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Validate that at least one seed broker is valid in case of a
 * ClosedChannelException.
 *
 * @param seedBrokers
 *            array containing the seed brokers e.g. ["host1:port1",
 *            "host2:port2"]
 * @param exception
 *            instance
 */
private static void validateSeedBrokers(String[] seedBrokers, Exception exception) {
	if (!(exception instanceof ClosedChannelException)) {
		return;
	}
	int unknownHosts = 0;
	for (String broker : seedBrokers) {
		URL brokerUrl = NetUtils.getCorrectHostnamePort(broker.trim());
		try {
			InetAddress.getByName(brokerUrl.getHost());
		} catch (UnknownHostException e) {
			unknownHosts++;
		}
	}
	// throw meaningful exception if all the provided hosts are invalid
	if (unknownHosts == seedBrokers.length) {
		throw new IllegalArgumentException("All the servers provided in: '"
			+ ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG + "' config are invalid. (unknown hosts)");
	}
}
 
Example 2
Source Project: Flink-CEPplus   Source File: RollingSinkITCase.java    License: Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void setup() throws Exception {

	LOG.info("In RollingSinkITCase: Starting MiniDFSCluster ");

	dataDir = tempFolder.newFolder();

	conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dataDir.getAbsolutePath());
	MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
	hdfsCluster = builder.build();

	dfs = hdfsCluster.getFileSystem();

	hdfsURI = "hdfs://"
			+ NetUtils.hostAndPortToUrlString(hdfsCluster.getURI().getHost(), hdfsCluster.getNameNodePort())
			+ "/";

	miniClusterResource = new MiniClusterResource(
		new MiniClusterResourceConfiguration.Builder()
			.setNumberTaskManagers(1)
			.setNumberSlotsPerTaskManager(4)
			.build());

	miniClusterResource.before();
}
 
Example 3
@BeforeClass
public static void createHDFS() throws IOException {
	Configuration conf = new Configuration();

	File dataDir = tempFolder.newFolder();

	conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dataDir.getAbsolutePath());
	MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
	hdfsCluster = builder.build();

	dfs = hdfsCluster.getFileSystem();

	outPath = "hdfs://"
			+ NetUtils.hostAndPortToUrlString(hdfsCluster.getURI().getHost(), hdfsCluster.getNameNodePort())
			+ "/string-non-rolling-out";
}
 
Example 4
Source Project: Flink-CEPplus   Source File: BucketingSinkTest.java    License: Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void createHDFS() throws IOException {
	Assume.assumeTrue("HDFS cluster cannot be started on Windows without extensions.", !OperatingSystem.isWindows());

	Configuration conf = new Configuration();

	File dataDir = tempFolder.newFolder();

	conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dataDir.getAbsolutePath());
	MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
	hdfsCluster = builder.build();

	dfs = hdfsCluster.getFileSystem();

	hdfsURI = "hdfs://"
		+ NetUtils.hostAndPortToUrlString(hdfsCluster.getURI().getHost(), hdfsCluster.getNameNodePort())
		+ "/";
}
 
Example 5
@BeforeClass
public static void createHDFS() throws IOException {
	Configuration conf = new Configuration();

	File dataDir = tempFolder.newFolder();

	conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dataDir.getAbsolutePath());
	MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
	hdfsCluster = builder.build();

	dfs = hdfsCluster.getFileSystem();

	outPath = "hdfs://"
			+ NetUtils.hostAndPortToUrlString(hdfsCluster.getURI().getHost(), hdfsCluster.getNameNodePort())
			+ "/string-non-rolling-out";
}
 
Example 6
Source Project: Flink-CEPplus   Source File: AkkaRpcServiceUtils.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * @param hostname The hostname or address where the target RPC service is listening.
 * @param port The port where the target RPC service is listening.
 * @param endpointName The name of the RPC endpoint.
 * @param addressResolution Whether to try address resolution of the given hostname or not.
 *                          This allows to fail fast in case that the hostname cannot be resolved.
 * @param akkaProtocol True, if security/encryption is enabled, false otherwise.
 *
 * @return The RPC URL of the specified RPC endpoint.
 */
public static String getRpcUrl(
		String hostname,
		int port,
		String endpointName,
		HighAvailabilityServicesUtils.AddressResolution addressResolution,
		AkkaProtocol akkaProtocol) throws UnknownHostException {

	checkNotNull(hostname, "hostname is null");
	checkNotNull(endpointName, "endpointName is null");
	checkArgument(port > 0 && port <= 65535, "port must be in [1, 65535]");

	final String protocolPrefix = akkaProtocol == AkkaProtocol.SSL_TCP ? AKKA_SSL_TCP : AKKA_TCP;

	if (addressResolution == AddressResolution.TRY_ADDRESS_RESOLUTION) {
		// Fail fast if the hostname cannot be resolved
		//noinspection ResultOfMethodCallIgnored
		InetAddress.getByName(hostname);
	}

	final String hostPort = NetUtils.unresolvedHostAndPortToNormalizedString(hostname, port);

	return String.format("%s://[email protected]%s/user/%s", protocolPrefix, hostPort, endpointName);
}
 
Example 7
Source Project: flink   Source File: QueryableStateConfiguration.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Creates the {@link QueryableStateConfiguration} from the given Configuration.
 */
public static QueryableStateConfiguration fromConfiguration(Configuration config) {
	if (!config.getBoolean(QueryableStateOptions.ENABLE_QUERYABLE_STATE_PROXY_SERVER)) {
		return null;
	}

	final Iterator<Integer> proxyPorts = NetUtils.getPortRangeFromString(
		config.getString(QueryableStateOptions.PROXY_PORT_RANGE));
	final Iterator<Integer> serverPorts = NetUtils.getPortRangeFromString(
		config.getString(QueryableStateOptions.SERVER_PORT_RANGE));

	final int numProxyServerNetworkThreads = config.getInteger(QueryableStateOptions.PROXY_NETWORK_THREADS);
	final int numProxyServerQueryThreads = config.getInteger(QueryableStateOptions.PROXY_ASYNC_QUERY_THREADS);

	final int numStateServerNetworkThreads = config.getInteger(QueryableStateOptions.SERVER_NETWORK_THREADS);
	final int numStateServerQueryThreads = config.getInteger(QueryableStateOptions.SERVER_ASYNC_QUERY_THREADS);

	return new QueryableStateConfiguration(
		proxyPorts,
		serverPorts,
		numProxyServerNetworkThreads,
		numProxyServerQueryThreads,
		numStateServerNetworkThreads,
		numStateServerQueryThreads);
}
 
Example 8
Source Project: flink   Source File: PrometheusReporter.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void open(MetricConfig config) {
	super.open(config);

	String portsConfig = config.getString(ARG_PORT, DEFAULT_PORT);
	Iterator<Integer> ports = NetUtils.getPortRangeFromString(portsConfig);

	while (ports.hasNext()) {
		int port = ports.next();
		try {
			// internally accesses CollectorRegistry.defaultRegistry
			httpServer = new HTTPServer(port);
			this.port = port;
			log.info("Started PrometheusReporter HTTP server on port {}.", port);
			break;
		} catch (IOException ioe) { //assume port conflict
			log.debug("Could not start PrometheusReporter HTTP server on port {}.", port, ioe);
		}
	}
	if (httpServer == null) {
		throw new RuntimeException("Could not start PrometheusReporter HTTP server on any configured port. Ports: " + portsConfig);
	}
}
 
Example 9
Source Project: flink   Source File: Kafka08PartitionDiscoverer.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Validate that at least one seed broker is valid in case of a
 * ClosedChannelException.
 *
 * @param seedBrokers
 *            array containing the seed brokers e.g. ["host1:port1",
 *            "host2:port2"]
 * @param exception
 *            instance
 */
private static void validateSeedBrokers(String[] seedBrokers, Exception exception) {
	if (!(exception instanceof ClosedChannelException)) {
		return;
	}
	int unknownHosts = 0;
	for (String broker : seedBrokers) {
		URL brokerUrl = NetUtils.getCorrectHostnamePort(broker.trim());
		try {
			InetAddress.getByName(brokerUrl.getHost());
		} catch (UnknownHostException e) {
			unknownHosts++;
		}
	}
	// throw meaningful exception if all the provided hosts are invalid
	if (unknownHosts == seedBrokers.length) {
		throw new IllegalArgumentException("All the servers provided in: '"
			+ ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG + "' config are invalid. (unknown hosts)");
	}
}
 
Example 10
Source Project: flink   Source File: BucketingSinkFaultToleranceITCase.java    License: Apache License 2.0 6 votes vote down vote up
@Before
public void createHDFS() throws IOException {
	Configuration conf = new Configuration();

	File dataDir = tempFolder.newFolder();

	conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dataDir.getAbsolutePath());
	MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
	hdfsCluster = builder.build();

	dfs = hdfsCluster.getFileSystem();

	outPath = "hdfs://"
			+ NetUtils.hostAndPortToUrlString(hdfsCluster.getURI().getHost(), hdfsCluster.getNameNodePort())
			+ "/string-non-rolling-out";
}
 
Example 11
Source Project: flink   Source File: BucketingSinkTest.java    License: Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void createHDFS() throws IOException {
	Assume.assumeTrue("HDFS cluster cannot be started on Windows without extensions.", !OperatingSystem.isWindows());

	Configuration conf = new Configuration();

	File dataDir = tempFolder.newFolder();

	conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dataDir.getAbsolutePath());
	MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
	hdfsCluster = builder.build();

	dfs = hdfsCluster.getFileSystem();

	hdfsURI = "hdfs://"
		+ NetUtils.hostAndPortToUrlString(hdfsCluster.getURI().getHost(), hdfsCluster.getNameNodePort())
		+ "/";
}
 
Example 12
Source Project: flink   Source File: DistributedCacheDfsTest.java    License: Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void setup() throws Exception {
	File dataDir = TEMP_FOLDER.newFolder();

	conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dataDir.getAbsolutePath());
	MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
	hdfsCluster = builder.build();

	String hdfsURI = "hdfs://"
		+ NetUtils.hostAndPortToUrlString(hdfsCluster.getURI().getHost(), hdfsCluster.getNameNodePort())
		+ "/";

	FileSystem dfs = FileSystem.get(new URI(hdfsURI));
	testFile = writeFile(dfs, dfs.getHomeDirectory(), "testFile");

	testDir = new Path(dfs.getHomeDirectory(), "testDir");
	dfs.mkdirs(testDir);
	writeFile(dfs, testDir, "testFile1");
	writeFile(dfs, testDir, "testFile2");
}
 
Example 13
Source Project: flink   Source File: TaskManagerLocation.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Gets the hostname of the TaskManager based on the network address.
 *
 * @param inetAddress the network address that the TaskManager binds its sockets to
 * @return hostname of the TaskManager
 */
public static String getHostName(InetAddress inetAddress) {
	String hostName;
	String fqdnHostName = getFqdnHostName(inetAddress);

	if (fqdnHostName.equals(inetAddress.getHostAddress())) {
		// this happens when the name lookup fails, either due to an exception,
		// or because no hostname can be found for the address
		// take IP textual representation
		hostName = fqdnHostName;
		LOG.warn("No hostname could be resolved for the IP address {}, using IP address as host name. "
			+ "Local input split assignment (such as for HDFS files) may be impacted.", inetAddress.getHostAddress());
	} else {
		hostName = NetUtils.getHostnameFromFQDN(fqdnHostName);
	}

	return hostName;
}
 
Example 14
Source Project: flink   Source File: AkkaRpcServiceUtils.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * @param hostname The hostname or address where the target RPC service is listening.
 * @param port The port where the target RPC service is listening.
 * @param endpointName The name of the RPC endpoint.
 * @param addressResolution Whether to try address resolution of the given hostname or not.
 *                          This allows to fail fast in case that the hostname cannot be resolved.
 * @param akkaProtocol True, if security/encryption is enabled, false otherwise.
 *
 * @return The RPC URL of the specified RPC endpoint.
 */
public static String getRpcUrl(
		String hostname,
		int port,
		String endpointName,
		HighAvailabilityServicesUtils.AddressResolution addressResolution,
		AkkaProtocol akkaProtocol) throws UnknownHostException {

	checkNotNull(hostname, "hostname is null");
	checkNotNull(endpointName, "endpointName is null");
	checkArgument(port > 0 && port <= 65535, "port must be in [1, 65535]");

	final String protocolPrefix = akkaProtocol == AkkaProtocol.SSL_TCP ? AKKA_SSL_TCP : AKKA_TCP;

	if (addressResolution == AddressResolution.TRY_ADDRESS_RESOLUTION) {
		// Fail fast if the hostname cannot be resolved
		//noinspection ResultOfMethodCallIgnored
		InetAddress.getByName(hostname);
	}

	final String hostPort = NetUtils.unresolvedHostAndPortToNormalizedString(hostname, port);

	return String.format("%s://[email protected]%s/user/%s", protocolPrefix, hostPort, endpointName);
}
 
Example 15
Source Project: flink   Source File: QueryableStateConfiguration.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Creates the {@link QueryableStateConfiguration} from the given Configuration.
 */
public static QueryableStateConfiguration fromConfiguration(Configuration config) {
	if (!config.getBoolean(QueryableStateOptions.ENABLE_QUERYABLE_STATE_PROXY_SERVER)) {
		return null;
	}

	final Iterator<Integer> proxyPorts = NetUtils.getPortRangeFromString(
		config.getString(QueryableStateOptions.PROXY_PORT_RANGE));
	final Iterator<Integer> serverPorts = NetUtils.getPortRangeFromString(
		config.getString(QueryableStateOptions.SERVER_PORT_RANGE));

	final int numProxyServerNetworkThreads = config.getInteger(QueryableStateOptions.PROXY_NETWORK_THREADS);
	final int numProxyServerQueryThreads = config.getInteger(QueryableStateOptions.PROXY_ASYNC_QUERY_THREADS);

	final int numStateServerNetworkThreads = config.getInteger(QueryableStateOptions.SERVER_NETWORK_THREADS);
	final int numStateServerQueryThreads = config.getInteger(QueryableStateOptions.SERVER_ASYNC_QUERY_THREADS);

	return new QueryableStateConfiguration(
		proxyPorts,
		serverPorts,
		numProxyServerNetworkThreads,
		numProxyServerQueryThreads,
		numStateServerNetworkThreads,
		numStateServerQueryThreads);
}
 
Example 16
Source Project: flink   Source File: TaskExecutorTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test(timeout = 10000L)
public void testLogNotFoundHandling() throws Throwable {
	final int dataPort = NetUtils.getAvailablePort();
	Configuration config = new Configuration();
	config.setInteger(NettyShuffleEnvironmentOptions.DATA_PORT, dataPort);
	config.setInteger(NettyShuffleEnvironmentOptions.NETWORK_REQUEST_BACKOFF_INITIAL, 100);
	config.setInteger(NettyShuffleEnvironmentOptions.NETWORK_REQUEST_BACKOFF_MAX, 200);
	config.setString(ConfigConstants.TASK_MANAGER_LOG_PATH_KEY, "/i/dont/exist");

	try (TaskSubmissionTestEnvironment env =
		new TaskSubmissionTestEnvironment.Builder(jobId)
			.setConfiguration(config)
			.setLocalCommunication(false)
			.build()) {
		TaskExecutorGateway tmGateway = env.getTaskExecutorGateway();
		try {
			CompletableFuture<TransientBlobKey> logFuture =
				tmGateway.requestFileUpload(FileType.LOG, timeout);
			logFuture.get();
		} catch (Exception e) {
			assertThat(e.getMessage(), containsString("The file LOG does not exist on the TaskExecutor."));
		}
	}
}
 
Example 17
Source Project: flink   Source File: ClientTest.java    License: Apache License 2.0 6 votes vote down vote up
@Before
public void setUp() throws Exception {

	ExecutionEnvironment env = ExecutionEnvironment.createLocalEnvironment();
	env.generateSequence(1, 1000).output(new DiscardingOutputFormat<Long>());

	Plan plan = env.createProgramPlan();
	JobWithJars jobWithJars = new JobWithJars(plan, Collections.<URL>emptyList(),  Collections.<URL>emptyList());

	program = mock(PackagedProgram.class);
	when(program.getPlanWithJars()).thenReturn(jobWithJars);

	final int freePort = NetUtils.getAvailablePort();
	config = new Configuration();
	config.setString(JobManagerOptions.ADDRESS, "localhost");
	config.setInteger(JobManagerOptions.PORT, freePort);
	config.setString(AkkaOptions.ASK_TIMEOUT, AkkaOptions.ASK_TIMEOUT.defaultValue());
}
 
Example 18
Source Project: flink   Source File: PrometheusReporter.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void open(MetricConfig config) {
	super.open(config);

	String portsConfig = config.getString(ARG_PORT, DEFAULT_PORT);
	Iterator<Integer> ports = NetUtils.getPortRangeFromString(portsConfig);

	while (ports.hasNext()) {
		int port = ports.next();
		try {
			// internally accesses CollectorRegistry.defaultRegistry
			httpServer = new HTTPServer(port);
			this.port = port;
			log.info("Started PrometheusReporter HTTP server on port {}.", port);
			break;
		} catch (IOException ioe) { //assume port conflict
			log.debug("Could not start PrometheusReporter HTTP server on port {}.", port, ioe);
		}
	}
	if (httpServer == null) {
		throw new RuntimeException("Could not start PrometheusReporter HTTP server on any configured port. Ports: " + portsConfig);
	}
}
 
Example 19
Source Project: flink   Source File: AkkaRpcServiceUtils.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * @param hostname The hostname or address where the target RPC service is listening.
 * @param port The port where the target RPC service is listening.
 * @param endpointName The name of the RPC endpoint.
 * @param addressResolution Whether to try address resolution of the given hostname or not.
 *                          This allows to fail fast in case that the hostname cannot be resolved.
 * @param akkaProtocol True, if security/encryption is enabled, false otherwise.
 *
 * @return The RPC URL of the specified RPC endpoint.
 */
public static String getRpcUrl(
		String hostname,
		int port,
		String endpointName,
		HighAvailabilityServicesUtils.AddressResolution addressResolution,
		AkkaProtocol akkaProtocol) throws UnknownHostException {

	checkNotNull(hostname, "hostname is null");
	checkNotNull(endpointName, "endpointName is null");
	checkArgument(isValidClientPort(port), "port must be in [1, 65535]");

	if (addressResolution == AddressResolution.TRY_ADDRESS_RESOLUTION) {
		// Fail fast if the hostname cannot be resolved
		//noinspection ResultOfMethodCallIgnored
		InetAddress.getByName(hostname);
	}

	final String hostPort = NetUtils.unresolvedHostAndPortToNormalizedString(hostname, port);

	return internalRpcUrl(endpointName, Optional.of(new RemoteAddressInformation(hostPort, akkaProtocol)));
}
 
Example 20
Source Project: flink   Source File: TaskExecutorTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test(timeout = 10000L)
public void testLogNotFoundHandling() throws Throwable {
	final int dataPort = NetUtils.getAvailablePort();
	Configuration config = new Configuration();
	config.setInteger(NettyShuffleEnvironmentOptions.DATA_PORT, dataPort);
	config.setInteger(NettyShuffleEnvironmentOptions.NETWORK_REQUEST_BACKOFF_INITIAL, 100);
	config.setInteger(NettyShuffleEnvironmentOptions.NETWORK_REQUEST_BACKOFF_MAX, 200);
	config.setString(ConfigConstants.TASK_MANAGER_LOG_PATH_KEY, "/i/dont/exist");

	try (TaskSubmissionTestEnvironment env =
		new Builder(jobId)
			.setConfiguration(config)
			.setLocalCommunication(false)
			.build()) {
		TaskExecutorGateway tmGateway = env.getTaskExecutorGateway();
		try {
			CompletableFuture<TransientBlobKey> logFuture =
				tmGateway.requestFileUploadByType(FileType.LOG, timeout);
			logFuture.get();
		} catch (Exception e) {
			assertThat(e.getMessage(), containsString("The file LOG does not exist on the TaskExecutor."));
		}
	}
}
 
Example 21
Source Project: flink   Source File: BucketingSinkFaultToleranceITCase.java    License: Apache License 2.0 6 votes vote down vote up
@Before
public void createHDFS() throws IOException {
	Configuration conf = new Configuration();

	File dataDir = tempFolder.newFolder();

	conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dataDir.getAbsolutePath());
	MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
	hdfsCluster = builder.build();

	dfs = hdfsCluster.getFileSystem();

	outPath = "hdfs://"
			+ NetUtils.hostAndPortToUrlString(hdfsCluster.getURI().getHost(), hdfsCluster.getNameNodePort())
			+ "/string-non-rolling-out";
}
 
Example 22
Source Project: flink   Source File: BucketingSinkTest.java    License: Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void createHDFS() throws IOException {
	Assume.assumeTrue("HDFS cluster cannot be started on Windows without extensions.", !OperatingSystem.isWindows());

	Configuration conf = new Configuration();

	File dataDir = tempFolder.newFolder();

	conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dataDir.getAbsolutePath());
	MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
	hdfsCluster = builder.build();

	dfs = hdfsCluster.getFileSystem();

	hdfsURI = "hdfs://"
		+ NetUtils.hostAndPortToUrlString(hdfsCluster.getURI().getHost(), hdfsCluster.getNameNodePort())
		+ "/";
}
 
Example 23
Source Project: flink   Source File: QueryableStateClient.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Create the Queryable State Client.
 * @param remoteAddress the {@link InetAddress address} of the {@code Client Proxy} to connect to.
 * @param remotePort the port of the proxy to connect to.
 */
public QueryableStateClient(final InetAddress remoteAddress, final int remotePort) {
	Preconditions.checkArgument(NetUtils.isValidHostPort(remotePort),
			"Remote Port " + remotePort + " is out of valid port range [0-65535].");

	this.remoteAddress = new InetSocketAddress(remoteAddress, remotePort);

	final MessageSerializer<KvStateRequest, KvStateResponse> messageSerializer =
			new MessageSerializer<>(
					new KvStateRequest.KvStateRequestDeserializer(),
					new KvStateResponse.KvStateResponseDeserializer());

	this.client = new Client<>(
			"Queryable State Client",
			1,
			messageSerializer,
			new DisabledKvStateRequestStats());
}
 
Example 24
Source Project: flink   Source File: DistributedCacheDfsTest.java    License: Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void setup() throws Exception {
	File dataDir = TEMP_FOLDER.newFolder();

	conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dataDir.getAbsolutePath());
	MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
	hdfsCluster = builder.build();

	String hdfsURI = "hdfs://"
		+ NetUtils.hostAndPortToUrlString(hdfsCluster.getURI().getHost(), hdfsCluster.getNameNodePort())
		+ "/";

	FileSystem dfs = FileSystem.get(new URI(hdfsURI));
	testFile = writeFile(dfs, dfs.getHomeDirectory(), "testFile");

	testDir = new Path(dfs.getHomeDirectory(), "testDir");
	dfs.mkdirs(testDir);
	writeFile(dfs, testDir, "testFile1");
	writeFile(dfs, testDir, "testFile2");
}
 
Example 25
Source Project: flink   Source File: BootstrapTools.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Starts a remote ActorSystem at given address and specific port range.
 * @param configuration The Flink configuration
 * @param externalAddress The external address to access the ActorSystem.
 * @param externalPortRange The choosing range of the external port to access the ActorSystem.
 * @param logger The logger to output log information.
 * @return The ActorSystem which has been started
 * @throws Exception Thrown when actor system cannot be started in specified port range
 */
@VisibleForTesting
public static ActorSystem startRemoteActorSystem(
	Configuration configuration,
	String externalAddress,
	String externalPortRange,
	Logger logger) throws Exception {
	return startRemoteActorSystem(
		configuration,
		AkkaUtils.getFlinkActorSystemName(),
		externalAddress,
		externalPortRange,
		NetUtils.getWildcardIPAddress(),
		Optional.empty(),
		logger,
		ForkJoinExecutorConfiguration.fromConfiguration(configuration),
		null);
}
 
Example 26
Source Project: flink   Source File: TaskManagerLocation.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Gets the hostname of the TaskManager based on the network address.
 *
 * @param inetAddress the network address that the TaskManager binds its sockets to
 * @return hostname of the TaskManager
 */
public static String getHostName(InetAddress inetAddress) {
	String hostName;
	String fqdnHostName = getFqdnHostName(inetAddress);

	if (fqdnHostName.equals(inetAddress.getHostAddress())) {
		// this happens when the name lookup fails, either due to an exception,
		// or because no hostname can be found for the address
		// take IP textual representation
		hostName = fqdnHostName;
		LOG.warn("No hostname could be resolved for the IP address {}, using IP address as host name. "
			+ "Local input split assignment (such as for HDFS files) may be impacted.", inetAddress.getHostAddress());
	} else {
		hostName = NetUtils.getHostnameFromFQDN(fqdnHostName);
	}

	return hostName;
}
 
Example 27
Source Project: Flink-CEPplus   Source File: Kafka08PartitionDiscoverer.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Re-establish broker connection using the next available seed broker address.
 */
private void useNextAddressAsNewContactSeedBroker() {
	if (++currentContactSeedBrokerIndex == seedBrokerAddresses.length) {
		currentContactSeedBrokerIndex = 0;
	}

	URL newContactUrl = NetUtils.getCorrectHostnamePort(seedBrokerAddresses[currentContactSeedBrokerIndex]);
	this.consumer = new SimpleConsumer(newContactUrl.getHost(), newContactUrl.getPort(), soTimeout, bufferSize, dummyClientId);
}
 
Example 28
Source Project: Flink-CEPplus   Source File: KafkaConsumer08Test.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testAllBoostrapServerHostsAreInvalid() {
	try {
		String unknownHost = "foobar:11111";

		URL unknownHostURL = NetUtils.getCorrectHostnamePort(unknownHost);

		PowerMockito.mockStatic(InetAddress.class);
		when(InetAddress.getByName(Matchers.eq(unknownHostURL.getHost()))).thenThrow(new UnknownHostException("Test exception"));

		String zookeeperConnect = "localhost:56794";
		String groupId = "non-existent-group";
		Properties props = createKafkaProps(zookeeperConnect, unknownHost, groupId);

		FlinkKafkaConsumer08<String> consumer = new FlinkKafkaConsumer08<>(
			Collections.singletonList("no op topic"), new SimpleStringSchema(), props);
		StreamingRuntimeContext mockRuntimeContext = mock(StreamingRuntimeContext.class);
		Mockito.when(mockRuntimeContext.isCheckpointingEnabled()).thenReturn(true);
		consumer.setRuntimeContext(mockRuntimeContext);

		consumer.open(new Configuration());

		fail();
	} catch (Exception expected) {
		assertTrue("Exception should be thrown containing 'all bootstrap servers invalid' message!",
				expected.getMessage().contains("All the servers provided in: '" + ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG
						+ "' config are invalid"));
	}
}
 
Example 29
Source Project: Flink-CEPplus   Source File: KafkaConsumer08Test.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testAtLeastOneBootstrapServerHostIsValid() throws Exception {
	try {
		String zookeeperConnect = "localhost:56794";
		String unknownHost = "foobar:11111";
		// we declare one valid bootstrap server, namely the one with 'localhost'
		String bootstrapServers = unknownHost + ", localhost:22222";

		URL unknownHostURL = NetUtils.getCorrectHostnamePort(unknownHost);

		PowerMockito.mockStatic(InetAddress.class);
		when(InetAddress.getByName(Matchers.eq(unknownHostURL.getHost()))).thenThrow(new UnknownHostException("Test exception"));

		String groupId = "non-existent-group";
		Properties props = createKafkaProps(zookeeperConnect, bootstrapServers, groupId);
		DummyFlinkKafkaConsumer consumer = new DummyFlinkKafkaConsumer(
			"no op topic",
			new SimpleStringSchema(),
			props);
		consumer.open(new Configuration());

		// no exception should be thrown, because we have one valid bootstrap server; test passes if we reach here
	} catch (Exception e) {
		assertFalse("No exception should be thrown containing 'all bootstrap servers invalid' message!",
			e.getMessage().contains("All the servers provided in: '" + ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG
				+ "' config are invalid"));
	}
}
 
Example 30
Source Project: Flink-CEPplus   Source File: FlinkKafkaProducer.java    License: Apache License 2.0 5 votes vote down vote up
private static Properties getPropertiesFromBrokerList(String brokerList) {
	String[] elements = brokerList.split(",");

	// validate the broker addresses
	for (String broker: elements) {
		NetUtils.getCorrectHostnamePort(broker);
	}

	Properties props = new Properties();
	props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList);
	return props;
}