org.apache.flink.util.NetUtils Java Examples

The following examples show how to use org.apache.flink.util.NetUtils. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: BucketingSinkFaultToleranceITCase.java    From flink with Apache License 2.0 6 votes vote down vote up
@Before
public void createHDFS() throws IOException {
	Configuration conf = new Configuration();

	File dataDir = tempFolder.newFolder();

	conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dataDir.getAbsolutePath());
	MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
	hdfsCluster = builder.build();

	dfs = hdfsCluster.getFileSystem();

	outPath = "hdfs://"
			+ NetUtils.hostAndPortToUrlString(hdfsCluster.getURI().getHost(), hdfsCluster.getNameNodePort())
			+ "/string-non-rolling-out";
}
 
Example #2
Source File: TaskExecutorTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test(timeout = 10000L)
public void testLogNotFoundHandling() throws Throwable {
	final int dataPort = NetUtils.getAvailablePort();
	Configuration config = new Configuration();
	config.setInteger(NettyShuffleEnvironmentOptions.DATA_PORT, dataPort);
	config.setInteger(NettyShuffleEnvironmentOptions.NETWORK_REQUEST_BACKOFF_INITIAL, 100);
	config.setInteger(NettyShuffleEnvironmentOptions.NETWORK_REQUEST_BACKOFF_MAX, 200);
	config.setString(ConfigConstants.TASK_MANAGER_LOG_PATH_KEY, "/i/dont/exist");

	try (TaskSubmissionTestEnvironment env =
		new Builder(jobId)
			.setConfiguration(config)
			.setLocalCommunication(false)
			.build()) {
		TaskExecutorGateway tmGateway = env.getTaskExecutorGateway();
		try {
			CompletableFuture<TransientBlobKey> logFuture =
				tmGateway.requestFileUploadByType(FileType.LOG, timeout);
			logFuture.get();
		} catch (Exception e) {
			assertThat(e.getMessage(), containsString("The file LOG does not exist on the TaskExecutor."));
		}
	}
}
 
Example #3
Source File: BucketingSinkFaultToleranceITCase.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void createHDFS() throws IOException {
	Configuration conf = new Configuration();

	File dataDir = tempFolder.newFolder();

	conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dataDir.getAbsolutePath());
	MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
	hdfsCluster = builder.build();

	dfs = hdfsCluster.getFileSystem();

	outPath = "hdfs://"
			+ NetUtils.hostAndPortToUrlString(hdfsCluster.getURI().getHost(), hdfsCluster.getNameNodePort())
			+ "/string-non-rolling-out";
}
 
Example #4
Source File: AkkaRpcServiceUtils.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * @param hostname The hostname or address where the target RPC service is listening.
 * @param port The port where the target RPC service is listening.
 * @param endpointName The name of the RPC endpoint.
 * @param addressResolution Whether to try address resolution of the given hostname or not.
 *                          This allows to fail fast in case that the hostname cannot be resolved.
 * @param akkaProtocol True, if security/encryption is enabled, false otherwise.
 *
 * @return The RPC URL of the specified RPC endpoint.
 */
public static String getRpcUrl(
		String hostname,
		int port,
		String endpointName,
		HighAvailabilityServicesUtils.AddressResolution addressResolution,
		AkkaProtocol akkaProtocol) throws UnknownHostException {

	checkNotNull(hostname, "hostname is null");
	checkNotNull(endpointName, "endpointName is null");
	checkArgument(isValidClientPort(port), "port must be in [1, 65535]");

	if (addressResolution == AddressResolution.TRY_ADDRESS_RESOLUTION) {
		// Fail fast if the hostname cannot be resolved
		//noinspection ResultOfMethodCallIgnored
		InetAddress.getByName(hostname);
	}

	final String hostPort = NetUtils.unresolvedHostAndPortToNormalizedString(hostname, port);

	return internalRpcUrl(endpointName, Optional.of(new RemoteAddressInformation(hostPort, akkaProtocol)));
}
 
Example #5
Source File: BucketingSinkFaultToleranceITCase.java    From flink with Apache License 2.0 6 votes vote down vote up
@Before
public void createHDFS() throws IOException {
	Configuration conf = new Configuration();

	File dataDir = tempFolder.newFolder();

	conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dataDir.getAbsolutePath());
	MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
	hdfsCluster = builder.build();

	dfs = hdfsCluster.getFileSystem();

	outPath = "hdfs://"
			+ NetUtils.hostAndPortToUrlString(hdfsCluster.getURI().getHost(), hdfsCluster.getNameNodePort())
			+ "/string-non-rolling-out";
}
 
Example #6
Source File: BucketingSinkTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void createHDFS() throws IOException {
	Assume.assumeTrue("HDFS cluster cannot be started on Windows without extensions.", !OperatingSystem.isWindows());

	Configuration conf = new Configuration();

	File dataDir = tempFolder.newFolder();

	conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dataDir.getAbsolutePath());
	MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
	hdfsCluster = builder.build();

	dfs = hdfsCluster.getFileSystem();

	hdfsURI = "hdfs://"
		+ NetUtils.hostAndPortToUrlString(hdfsCluster.getURI().getHost(), hdfsCluster.getNameNodePort())
		+ "/";
}
 
Example #7
Source File: QueryableStateClient.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Create the Queryable State Client.
 * @param remoteAddress the {@link InetAddress address} of the {@code Client Proxy} to connect to.
 * @param remotePort the port of the proxy to connect to.
 */
public QueryableStateClient(final InetAddress remoteAddress, final int remotePort) {
	Preconditions.checkArgument(NetUtils.isValidHostPort(remotePort),
			"Remote Port " + remotePort + " is out of valid port range [0-65535].");

	this.remoteAddress = new InetSocketAddress(remoteAddress, remotePort);

	final MessageSerializer<KvStateRequest, KvStateResponse> messageSerializer =
			new MessageSerializer<>(
					new KvStateRequest.KvStateRequestDeserializer(),
					new KvStateResponse.KvStateResponseDeserializer());

	this.client = new Client<>(
			"Queryable State Client",
			1,
			messageSerializer,
			new DisabledKvStateRequestStats());
}
 
Example #8
Source File: DistributedCacheDfsTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void setup() throws Exception {
	File dataDir = TEMP_FOLDER.newFolder();

	conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dataDir.getAbsolutePath());
	MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
	hdfsCluster = builder.build();

	String hdfsURI = "hdfs://"
		+ NetUtils.hostAndPortToUrlString(hdfsCluster.getURI().getHost(), hdfsCluster.getNameNodePort())
		+ "/";

	FileSystem dfs = FileSystem.get(new URI(hdfsURI));
	testFile = writeFile(dfs, dfs.getHomeDirectory(), "testFile");

	testDir = new Path(dfs.getHomeDirectory(), "testDir");
	dfs.mkdirs(testDir);
	writeFile(dfs, testDir, "testFile1");
	writeFile(dfs, testDir, "testFile2");
}
 
Example #9
Source File: BootstrapTools.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Starts a remote ActorSystem at given address and specific port range.
 * @param configuration The Flink configuration
 * @param externalAddress The external address to access the ActorSystem.
 * @param externalPortRange The choosing range of the external port to access the ActorSystem.
 * @param logger The logger to output log information.
 * @return The ActorSystem which has been started
 * @throws Exception Thrown when actor system cannot be started in specified port range
 */
@VisibleForTesting
public static ActorSystem startRemoteActorSystem(
	Configuration configuration,
	String externalAddress,
	String externalPortRange,
	Logger logger) throws Exception {
	return startRemoteActorSystem(
		configuration,
		AkkaUtils.getFlinkActorSystemName(),
		externalAddress,
		externalPortRange,
		NetUtils.getWildcardIPAddress(),
		Optional.empty(),
		logger,
		ForkJoinExecutorConfiguration.fromConfiguration(configuration),
		null);
}
 
Example #10
Source File: Kafka08PartitionDiscoverer.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
/**
 * Validate that at least one seed broker is valid in case of a
 * ClosedChannelException.
 *
 * @param seedBrokers
 *            array containing the seed brokers e.g. ["host1:port1",
 *            "host2:port2"]
 * @param exception
 *            instance
 */
private static void validateSeedBrokers(String[] seedBrokers, Exception exception) {
	if (!(exception instanceof ClosedChannelException)) {
		return;
	}
	int unknownHosts = 0;
	for (String broker : seedBrokers) {
		URL brokerUrl = NetUtils.getCorrectHostnamePort(broker.trim());
		try {
			InetAddress.getByName(brokerUrl.getHost());
		} catch (UnknownHostException e) {
			unknownHosts++;
		}
	}
	// throw meaningful exception if all the provided hosts are invalid
	if (unknownHosts == seedBrokers.length) {
		throw new IllegalArgumentException("All the servers provided in: '"
			+ ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG + "' config are invalid. (unknown hosts)");
	}
}
 
Example #11
Source File: RollingSinkITCase.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void setup() throws Exception {

	LOG.info("In RollingSinkITCase: Starting MiniDFSCluster ");

	dataDir = tempFolder.newFolder();

	conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dataDir.getAbsolutePath());
	MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
	hdfsCluster = builder.build();

	dfs = hdfsCluster.getFileSystem();

	hdfsURI = "hdfs://"
			+ NetUtils.hostAndPortToUrlString(hdfsCluster.getURI().getHost(), hdfsCluster.getNameNodePort())
			+ "/";

	miniClusterResource = new MiniClusterResource(
		new MiniClusterResourceConfiguration.Builder()
			.setNumberTaskManagers(1)
			.setNumberSlotsPerTaskManager(4)
			.build());

	miniClusterResource.before();
}
 
Example #12
Source File: BucketingSinkTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void createHDFS() throws IOException {
	Assume.assumeTrue("HDFS cluster cannot be started on Windows without extensions.", !OperatingSystem.isWindows());

	Configuration conf = new Configuration();

	File dataDir = tempFolder.newFolder();

	conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dataDir.getAbsolutePath());
	MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
	hdfsCluster = builder.build();

	dfs = hdfsCluster.getFileSystem();

	hdfsURI = "hdfs://"
		+ NetUtils.hostAndPortToUrlString(hdfsCluster.getURI().getHost(), hdfsCluster.getNameNodePort())
		+ "/";
}
 
Example #13
Source File: RollingSinkFaultToleranceITCase.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void createHDFS() throws IOException {
	Configuration conf = new Configuration();

	File dataDir = tempFolder.newFolder();

	conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dataDir.getAbsolutePath());
	MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
	hdfsCluster = builder.build();

	dfs = hdfsCluster.getFileSystem();

	outPath = "hdfs://"
			+ NetUtils.hostAndPortToUrlString(hdfsCluster.getURI().getHost(), hdfsCluster.getNameNodePort())
			+ "/string-non-rolling-out";
}
 
Example #14
Source File: ClientTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Before
public void setUp() throws Exception {

	ExecutionEnvironment env = ExecutionEnvironment.createLocalEnvironment();
	env.generateSequence(1, 1000).output(new DiscardingOutputFormat<Long>());

	Plan plan = env.createProgramPlan();
	JobWithJars jobWithJars = new JobWithJars(plan, Collections.<URL>emptyList(),  Collections.<URL>emptyList());

	program = mock(PackagedProgram.class);
	when(program.getPlanWithJars()).thenReturn(jobWithJars);

	final int freePort = NetUtils.getAvailablePort();
	config = new Configuration();
	config.setString(JobManagerOptions.ADDRESS, "localhost");
	config.setInteger(JobManagerOptions.PORT, freePort);
	config.setString(AkkaOptions.ASK_TIMEOUT, AkkaOptions.ASK_TIMEOUT.defaultValue());
}
 
Example #15
Source File: TaskManagerLocation.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Gets the hostname of the TaskManager based on the network address.
 *
 * @param inetAddress the network address that the TaskManager binds its sockets to
 * @return hostname of the TaskManager
 */
public static String getHostName(InetAddress inetAddress) {
	String hostName;
	String fqdnHostName = getFqdnHostName(inetAddress);

	if (fqdnHostName.equals(inetAddress.getHostAddress())) {
		// this happens when the name lookup fails, either due to an exception,
		// or because no hostname can be found for the address
		// take IP textual representation
		hostName = fqdnHostName;
		LOG.warn("No hostname could be resolved for the IP address {}, using IP address as host name. "
			+ "Local input split assignment (such as for HDFS files) may be impacted.", inetAddress.getHostAddress());
	} else {
		hostName = NetUtils.getHostnameFromFQDN(fqdnHostName);
	}

	return hostName;
}
 
Example #16
Source File: BucketingSinkTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void createHDFS() throws IOException {
	Assume.assumeTrue("HDFS cluster cannot be started on Windows without extensions.", !OperatingSystem.isWindows());

	Configuration conf = new Configuration();

	File dataDir = tempFolder.newFolder();

	conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dataDir.getAbsolutePath());
	MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
	hdfsCluster = builder.build();

	dfs = hdfsCluster.getFileSystem();

	hdfsURI = "hdfs://"
		+ NetUtils.hostAndPortToUrlString(hdfsCluster.getURI().getHost(), hdfsCluster.getNameNodePort())
		+ "/";
}
 
Example #17
Source File: Kafka08PartitionDiscoverer.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Validate that at least one seed broker is valid in case of a
 * ClosedChannelException.
 *
 * @param seedBrokers
 *            array containing the seed brokers e.g. ["host1:port1",
 *            "host2:port2"]
 * @param exception
 *            instance
 */
private static void validateSeedBrokers(String[] seedBrokers, Exception exception) {
	if (!(exception instanceof ClosedChannelException)) {
		return;
	}
	int unknownHosts = 0;
	for (String broker : seedBrokers) {
		URL brokerUrl = NetUtils.getCorrectHostnamePort(broker.trim());
		try {
			InetAddress.getByName(brokerUrl.getHost());
		} catch (UnknownHostException e) {
			unknownHosts++;
		}
	}
	// throw meaningful exception if all the provided hosts are invalid
	if (unknownHosts == seedBrokers.length) {
		throw new IllegalArgumentException("All the servers provided in: '"
			+ ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG + "' config are invalid. (unknown hosts)");
	}
}
 
Example #18
Source File: DistributedCacheDfsTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void setup() throws Exception {
	File dataDir = TEMP_FOLDER.newFolder();

	conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dataDir.getAbsolutePath());
	MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
	hdfsCluster = builder.build();

	String hdfsURI = "hdfs://"
		+ NetUtils.hostAndPortToUrlString(hdfsCluster.getURI().getHost(), hdfsCluster.getNameNodePort())
		+ "/";

	FileSystem dfs = FileSystem.get(new URI(hdfsURI));
	testFile = writeFile(dfs, dfs.getHomeDirectory(), "testFile");

	testDir = new Path(dfs.getHomeDirectory(), "testDir");
	dfs.mkdirs(testDir);
	writeFile(dfs, testDir, "testFile1");
	writeFile(dfs, testDir, "testFile2");
}
 
Example #19
Source File: TaskManagerLocation.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Gets the hostname of the TaskManager based on the network address.
 *
 * @param inetAddress the network address that the TaskManager binds its sockets to
 * @return hostname of the TaskManager
 */
public static String getHostName(InetAddress inetAddress) {
	String hostName;
	String fqdnHostName = getFqdnHostName(inetAddress);

	if (fqdnHostName.equals(inetAddress.getHostAddress())) {
		// this happens when the name lookup fails, either due to an exception,
		// or because no hostname can be found for the address
		// take IP textual representation
		hostName = fqdnHostName;
		LOG.warn("No hostname could be resolved for the IP address {}, using IP address as host name. "
			+ "Local input split assignment (such as for HDFS files) may be impacted.", inetAddress.getHostAddress());
	} else {
		hostName = NetUtils.getHostnameFromFQDN(fqdnHostName);
	}

	return hostName;
}
 
Example #20
Source File: AkkaRpcServiceUtils.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * @param hostname The hostname or address where the target RPC service is listening.
 * @param port The port where the target RPC service is listening.
 * @param endpointName The name of the RPC endpoint.
 * @param addressResolution Whether to try address resolution of the given hostname or not.
 *                          This allows to fail fast in case that the hostname cannot be resolved.
 * @param akkaProtocol True, if security/encryption is enabled, false otherwise.
 *
 * @return The RPC URL of the specified RPC endpoint.
 */
public static String getRpcUrl(
		String hostname,
		int port,
		String endpointName,
		HighAvailabilityServicesUtils.AddressResolution addressResolution,
		AkkaProtocol akkaProtocol) throws UnknownHostException {

	checkNotNull(hostname, "hostname is null");
	checkNotNull(endpointName, "endpointName is null");
	checkArgument(port > 0 && port <= 65535, "port must be in [1, 65535]");

	final String protocolPrefix = akkaProtocol == AkkaProtocol.SSL_TCP ? AKKA_SSL_TCP : AKKA_TCP;

	if (addressResolution == AddressResolution.TRY_ADDRESS_RESOLUTION) {
		// Fail fast if the hostname cannot be resolved
		//noinspection ResultOfMethodCallIgnored
		InetAddress.getByName(hostname);
	}

	final String hostPort = NetUtils.unresolvedHostAndPortToNormalizedString(hostname, port);

	return String.format("%s://flink@%s/user/%s", protocolPrefix, hostPort, endpointName);
}
 
Example #21
Source File: QueryableStateConfiguration.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Creates the {@link QueryableStateConfiguration} from the given Configuration.
 */
public static QueryableStateConfiguration fromConfiguration(Configuration config) {
	if (!config.getBoolean(QueryableStateOptions.ENABLE_QUERYABLE_STATE_PROXY_SERVER)) {
		return null;
	}

	final Iterator<Integer> proxyPorts = NetUtils.getPortRangeFromString(
		config.getString(QueryableStateOptions.PROXY_PORT_RANGE));
	final Iterator<Integer> serverPorts = NetUtils.getPortRangeFromString(
		config.getString(QueryableStateOptions.SERVER_PORT_RANGE));

	final int numProxyServerNetworkThreads = config.getInteger(QueryableStateOptions.PROXY_NETWORK_THREADS);
	final int numProxyServerQueryThreads = config.getInteger(QueryableStateOptions.PROXY_ASYNC_QUERY_THREADS);

	final int numStateServerNetworkThreads = config.getInteger(QueryableStateOptions.SERVER_NETWORK_THREADS);
	final int numStateServerQueryThreads = config.getInteger(QueryableStateOptions.SERVER_ASYNC_QUERY_THREADS);

	return new QueryableStateConfiguration(
		proxyPorts,
		serverPorts,
		numProxyServerNetworkThreads,
		numProxyServerQueryThreads,
		numStateServerNetworkThreads,
		numStateServerQueryThreads);
}
 
Example #22
Source File: PrometheusReporter.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public void open(MetricConfig config) {
	super.open(config);

	String portsConfig = config.getString(ARG_PORT, DEFAULT_PORT);
	Iterator<Integer> ports = NetUtils.getPortRangeFromString(portsConfig);

	while (ports.hasNext()) {
		int port = ports.next();
		try {
			// internally accesses CollectorRegistry.defaultRegistry
			httpServer = new HTTPServer(port);
			this.port = port;
			log.info("Started PrometheusReporter HTTP server on port {}.", port);
			break;
		} catch (IOException ioe) { //assume port conflict
			log.debug("Could not start PrometheusReporter HTTP server on port {}.", port, ioe);
		}
	}
	if (httpServer == null) {
		throw new RuntimeException("Could not start PrometheusReporter HTTP server on any configured port. Ports: " + portsConfig);
	}
}
 
Example #23
Source File: TaskExecutorTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test(timeout = 10000L)
public void testLogNotFoundHandling() throws Throwable {
	final int dataPort = NetUtils.getAvailablePort();
	Configuration config = new Configuration();
	config.setInteger(NettyShuffleEnvironmentOptions.DATA_PORT, dataPort);
	config.setInteger(NettyShuffleEnvironmentOptions.NETWORK_REQUEST_BACKOFF_INITIAL, 100);
	config.setInteger(NettyShuffleEnvironmentOptions.NETWORK_REQUEST_BACKOFF_MAX, 200);
	config.setString(ConfigConstants.TASK_MANAGER_LOG_PATH_KEY, "/i/dont/exist");

	try (TaskSubmissionTestEnvironment env =
		new TaskSubmissionTestEnvironment.Builder(jobId)
			.setConfiguration(config)
			.setLocalCommunication(false)
			.build()) {
		TaskExecutorGateway tmGateway = env.getTaskExecutorGateway();
		try {
			CompletableFuture<TransientBlobKey> logFuture =
				tmGateway.requestFileUpload(FileType.LOG, timeout);
			logFuture.get();
		} catch (Exception e) {
			assertThat(e.getMessage(), containsString("The file LOG does not exist on the TaskExecutor."));
		}
	}
}
 
Example #24
Source File: QueryableStateConfiguration.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Creates the {@link QueryableStateConfiguration} from the given Configuration.
 */
public static QueryableStateConfiguration fromConfiguration(Configuration config) {
	if (!config.getBoolean(QueryableStateOptions.ENABLE_QUERYABLE_STATE_PROXY_SERVER)) {
		return null;
	}

	final Iterator<Integer> proxyPorts = NetUtils.getPortRangeFromString(
		config.getString(QueryableStateOptions.PROXY_PORT_RANGE));
	final Iterator<Integer> serverPorts = NetUtils.getPortRangeFromString(
		config.getString(QueryableStateOptions.SERVER_PORT_RANGE));

	final int numProxyServerNetworkThreads = config.getInteger(QueryableStateOptions.PROXY_NETWORK_THREADS);
	final int numProxyServerQueryThreads = config.getInteger(QueryableStateOptions.PROXY_ASYNC_QUERY_THREADS);

	final int numStateServerNetworkThreads = config.getInteger(QueryableStateOptions.SERVER_NETWORK_THREADS);
	final int numStateServerQueryThreads = config.getInteger(QueryableStateOptions.SERVER_ASYNC_QUERY_THREADS);

	return new QueryableStateConfiguration(
		proxyPorts,
		serverPorts,
		numProxyServerNetworkThreads,
		numProxyServerQueryThreads,
		numStateServerNetworkThreads,
		numStateServerQueryThreads);
}
 
Example #25
Source File: AkkaRpcServiceUtils.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
/**
 * @param hostname The hostname or address where the target RPC service is listening.
 * @param port The port where the target RPC service is listening.
 * @param endpointName The name of the RPC endpoint.
 * @param addressResolution Whether to try address resolution of the given hostname or not.
 *                          This allows to fail fast in case that the hostname cannot be resolved.
 * @param akkaProtocol True, if security/encryption is enabled, false otherwise.
 *
 * @return The RPC URL of the specified RPC endpoint.
 */
public static String getRpcUrl(
		String hostname,
		int port,
		String endpointName,
		HighAvailabilityServicesUtils.AddressResolution addressResolution,
		AkkaProtocol akkaProtocol) throws UnknownHostException {

	checkNotNull(hostname, "hostname is null");
	checkNotNull(endpointName, "endpointName is null");
	checkArgument(port > 0 && port <= 65535, "port must be in [1, 65535]");

	final String protocolPrefix = akkaProtocol == AkkaProtocol.SSL_TCP ? AKKA_SSL_TCP : AKKA_TCP;

	if (addressResolution == AddressResolution.TRY_ADDRESS_RESOLUTION) {
		// Fail fast if the hostname cannot be resolved
		//noinspection ResultOfMethodCallIgnored
		InetAddress.getByName(hostname);
	}

	final String hostPort = NetUtils.unresolvedHostAndPortToNormalizedString(hostname, port);

	return String.format("%s://flink@%s/user/%s", protocolPrefix, hostPort, endpointName);
}
 
Example #26
Source File: PrometheusReporter.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public void open(MetricConfig config) {
	super.open(config);

	String portsConfig = config.getString(ARG_PORT, DEFAULT_PORT);
	Iterator<Integer> ports = NetUtils.getPortRangeFromString(portsConfig);

	while (ports.hasNext()) {
		int port = ports.next();
		try {
			// internally accesses CollectorRegistry.defaultRegistry
			httpServer = new HTTPServer(port);
			this.port = port;
			log.info("Started PrometheusReporter HTTP server on port {}.", port);
			break;
		} catch (IOException ioe) { //assume port conflict
			log.debug("Could not start PrometheusReporter HTTP server on port {}.", port, ioe);
		}
	}
	if (httpServer == null) {
		throw new RuntimeException("Could not start PrometheusReporter HTTP server on any configured port. Ports: " + portsConfig);
	}
}
 
Example #27
Source File: BootstrapTools.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Starts a remote Actor System at given address and specific port.
 * @param configuration The Flink configuration.
 * @param actorSystemName Name of the started {@link ActorSystem}
 * @param externalAddress The external address to access the ActorSystem.
 * @param externalPort The external port to access the ActorSystem.
 * @param bindAddress The local address to bind to.
 * @param bindPort The local port to bind to.
 * @param logger the logger to output log information.
 * @param actorSystemExecutorConfiguration configuration for the ActorSystem's underlying executor
 * @param customConfig Custom Akka config to be combined with the config derived from Flink configuration.
 * @return The ActorSystem which has been started.
 * @throws Exception
 */
private static ActorSystem startRemoteActorSystem(
	Configuration configuration,
	String actorSystemName,
	String externalAddress,
	int externalPort,
	String bindAddress,
	int bindPort,
	Logger logger,
	ActorSystemExecutorConfiguration actorSystemExecutorConfiguration,
	Config customConfig) throws Exception {

	String externalHostPortUrl = NetUtils.unresolvedHostAndPortToNormalizedString(externalAddress, externalPort);
	String bindHostPortUrl = NetUtils.unresolvedHostAndPortToNormalizedString(bindAddress, bindPort);
	logger.info("Trying to start actor system, external address {}, bind address {}.", externalHostPortUrl, bindHostPortUrl);

	try {
		Config akkaConfig = AkkaUtils.getAkkaConfig(
			configuration,
			new Some<>(new Tuple2<>(externalAddress, externalPort)),
			new Some<>(new Tuple2<>(bindAddress, bindPort)),
			actorSystemExecutorConfiguration.getAkkaConfig());

		if (customConfig != null) {
			akkaConfig = customConfig.withFallback(akkaConfig);
		}

		return startActorSystem(akkaConfig, actorSystemName, logger);
	}
	catch (Throwable t) {
		if (t instanceof ChannelException) {
			Throwable cause = t.getCause();
			if (cause != null && t.getCause() instanceof BindException) {
				throw new IOException("Unable to create ActorSystem at address " + bindHostPortUrl +
					" : " + cause.getMessage(), t);
			}
		}
		throw new Exception("Could not create actor system", t);
	}
}
 
Example #28
Source File: FlinkKafkaProducer.java    From flink with Apache License 2.0 5 votes vote down vote up
private static Properties getPropertiesFromBrokerList(String brokerList) {
	String[] elements = brokerList.split(",");

	// validate the broker addresses
	for (String broker: elements) {
		NetUtils.getCorrectHostnamePort(broker);
	}

	Properties props = new Properties();
	props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList);
	return props;
}
 
Example #29
Source File: LocatableInputSplitAssigner.java    From flink with Apache License 2.0 5 votes vote down vote up
private static final boolean isLocal(String flinkHost, String[] hosts) {
	if (flinkHost == null || hosts == null) {
		return false;
	}
	for (String h : hosts) {
		if (h != null && NetUtils.getHostnameFromFQDN(h.toLowerCase()).equals(flinkHost)) {
			return true;
		}
	}

	return false;
}
 
Example #30
Source File: StringWriterTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void createHDFS() throws IOException {
	org.apache.hadoop.conf.Configuration conf = new org.apache.hadoop.conf.Configuration();

	File dataDir = TEMPORARY_FOLDER.newFolder();

	conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dataDir.getAbsolutePath());
	MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
	hdfsCluster = builder.build();

	dfs = hdfsCluster.getFileSystem();

	outputDir = "hdfs://"
		+ NetUtils.hostAndPortToUrlString(hdfsCluster.getURI().getHost(), hdfsCluster.getNameNodePort());
}