Java Code Examples for org.apache.hadoop.hdfs.server.namenode.NameNode#DEFAULT_PORT
The following examples show how to use
org.apache.hadoop.hdfs.server.namenode.NameNode#DEFAULT_PORT .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DistributedFileSystem.java From hadoop-gpu with Apache License 2.0 | 6 votes |
/** Normalize paths that explicitly specify the default port. */ public Path makeQualified(Path path) { URI thisUri = this.getUri(); URI thatUri = path.toUri(); String thatAuthority = thatUri.getAuthority(); if (thatUri.getScheme() != null && thatUri.getScheme().equalsIgnoreCase(thisUri.getScheme()) && thatUri.getPort() == NameNode.DEFAULT_PORT && thisUri.getPort() == -1 && thatAuthority.substring(0,thatAuthority.indexOf(":")) .equalsIgnoreCase(thisUri.getAuthority())) { path = new Path(thisUri.getScheme(), thisUri.getAuthority(), thatUri.getPath()); } return super.makeQualified(path); }
Example 2
Source File: DelegationTokenSelector.java From hadoop with Apache License 2.0 | 6 votes |
/** * Select the delegation token for hdfs. The port will be rewritten to * the port of hdfs.service.host_$nnAddr, or the default rpc namenode port. * This method should only be called by non-hdfs filesystems that do not * use the rpc port to acquire tokens. Ex. webhdfs, hftp * @param nnUri of the remote namenode * @param tokens as a collection * @param conf hadoop configuration * @return Token */ public Token<DelegationTokenIdentifier> selectToken( final URI nnUri, Collection<Token<?>> tokens, final Configuration conf) { // this guesses the remote cluster's rpc service port. // the current token design assumes it's the same as the local cluster's // rpc port unless a config key is set. there should be a way to automatic // and correctly determine the value Text serviceName = SecurityUtil.buildTokenService(nnUri); final String nnServiceName = conf.get(SERVICE_NAME_KEY + serviceName); int nnRpcPort = NameNode.DEFAULT_PORT; if (nnServiceName != null) { nnRpcPort = NetUtils.createSocketAddr(nnServiceName, nnRpcPort).getPort(); } // use original hostname from the uri to avoid unintentional host resolving serviceName = SecurityUtil.buildTokenService( NetUtils.createSocketAddrForHost(nnUri.getHost(), nnRpcPort)); return selectToken(serviceName, tokens); }
Example 3
Source File: DistributedFileSystem.java From RDFS with Apache License 2.0 | 6 votes |
/** Normalize paths that explicitly specify the default port. */ public Path makeQualified(Path path) { URI thisUri = this.getUri(); URI thatUri = path.toUri(); String thatAuthority = thatUri.getAuthority(); if (thatUri.getScheme() != null && thatUri.getScheme().equalsIgnoreCase(thisUri.getScheme()) && thatUri.getPort() == NameNode.DEFAULT_PORT && thisUri.getPort() == -1 && thatAuthority.substring(0,thatAuthority.indexOf(":")) .equalsIgnoreCase(thisUri.getAuthority())) { path = new Path(thisUri.getScheme(), thisUri.getAuthority(), thatUri.getPath()); } return super.makeQualified(path); }
Example 4
Source File: TestDFSClientFailover.java From hadoop with Apache License 2.0 | 6 votes |
/** * Make sure that client failover works when an active NN dies and the standby * takes over. */ @Test public void testDfsClientFailover() throws IOException, URISyntaxException { FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf); DFSTestUtil.createFile(fs, TEST_FILE, FILE_LENGTH_TO_VERIFY, (short)1, 1L); assertEquals(fs.getFileStatus(TEST_FILE).getLen(), FILE_LENGTH_TO_VERIFY); cluster.shutdownNameNode(0); cluster.transitionToActive(1); assertEquals(fs.getFileStatus(TEST_FILE).getLen(), FILE_LENGTH_TO_VERIFY); // Check that it functions even if the URL becomes canonicalized // to include a port number. Path withPort = new Path("hdfs://" + HATestUtil.getLogicalHostname(cluster) + ":" + NameNode.DEFAULT_PORT + "/" + TEST_FILE.toUri().getPath()); FileSystem fs2 = withPort.getFileSystem(fs.getConf()); assertTrue(fs2.exists(withPort)); fs.close(); }
Example 5
Source File: DelegationTokenSelector.java From big-c with Apache License 2.0 | 6 votes |
/** * Select the delegation token for hdfs. The port will be rewritten to * the port of hdfs.service.host_$nnAddr, or the default rpc namenode port. * This method should only be called by non-hdfs filesystems that do not * use the rpc port to acquire tokens. Ex. webhdfs, hftp * @param nnUri of the remote namenode * @param tokens as a collection * @param conf hadoop configuration * @return Token */ public Token<DelegationTokenIdentifier> selectToken( final URI nnUri, Collection<Token<?>> tokens, final Configuration conf) { // this guesses the remote cluster's rpc service port. // the current token design assumes it's the same as the local cluster's // rpc port unless a config key is set. there should be a way to automatic // and correctly determine the value Text serviceName = SecurityUtil.buildTokenService(nnUri); final String nnServiceName = conf.get(SERVICE_NAME_KEY + serviceName); int nnRpcPort = NameNode.DEFAULT_PORT; if (nnServiceName != null) { nnRpcPort = NetUtils.createSocketAddr(nnServiceName, nnRpcPort).getPort(); } // use original hostname from the uri to avoid unintentional host resolving serviceName = SecurityUtil.buildTokenService( NetUtils.createSocketAddrForHost(nnUri.getHost(), nnRpcPort)); return selectToken(serviceName, tokens); }
Example 6
Source File: TestDFSClientFailover.java From big-c with Apache License 2.0 | 6 votes |
/** * Make sure that client failover works when an active NN dies and the standby * takes over. */ @Test public void testDfsClientFailover() throws IOException, URISyntaxException { FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf); DFSTestUtil.createFile(fs, TEST_FILE, FILE_LENGTH_TO_VERIFY, (short)1, 1L); assertEquals(fs.getFileStatus(TEST_FILE).getLen(), FILE_LENGTH_TO_VERIFY); cluster.shutdownNameNode(0); cluster.transitionToActive(1); assertEquals(fs.getFileStatus(TEST_FILE).getLen(), FILE_LENGTH_TO_VERIFY); // Check that it functions even if the URL becomes canonicalized // to include a port number. Path withPort = new Path("hdfs://" + HATestUtil.getLogicalHostname(cluster) + ":" + NameNode.DEFAULT_PORT + "/" + TEST_FILE.toUri().getPath()); FileSystem fs2 = withPort.getFileSystem(fs.getConf()); assertTrue(fs2.exists(withPort)); fs.close(); }
Example 7
Source File: DistributedFileSystem.java From hadoop-gpu with Apache License 2.0 | 5 votes |
/** Permit paths which explicitly specify the default port. */ protected void checkPath(Path path) { URI thisUri = this.getUri(); URI thatUri = path.toUri(); String thatAuthority = thatUri.getAuthority(); if (thatUri.getScheme() != null && thatUri.getScheme().equalsIgnoreCase(thisUri.getScheme()) && thatUri.getPort() == NameNode.DEFAULT_PORT && thisUri.getPort() == -1 && thatAuthority.substring(0,thatAuthority.indexOf(":")) .equalsIgnoreCase(thisUri.getAuthority())) return; super.checkPath(path); }
Example 8
Source File: Hdfs.java From big-c with Apache License 2.0 | 5 votes |
/** * This constructor has the signature needed by * {@link AbstractFileSystem#createFileSystem(URI, Configuration)} * * @param theUri which must be that of Hdfs * @param conf configuration * @throws IOException */ Hdfs(final URI theUri, final Configuration conf) throws IOException, URISyntaxException { super(theUri, HdfsConstants.HDFS_URI_SCHEME, true, NameNode.DEFAULT_PORT); if (!theUri.getScheme().equalsIgnoreCase(HdfsConstants.HDFS_URI_SCHEME)) { throw new IllegalArgumentException("Passed URI's scheme is not for Hdfs"); } String host = theUri.getHost(); if (host == null) { throw new IOException("Incomplete HDFS URI, no host: " + theUri); } this.dfs = new DFSClient(theUri, conf, getStatistics()); }
Example 9
Source File: DistributedFileSystem.java From RDFS with Apache License 2.0 | 5 votes |
/** Permit paths which explicitly specify the default port. */ protected void checkPath(Path path) { URI thisUri = this.getUri(); URI thatUri = path.toUri(); String thatAuthority = thatUri.getAuthority(); if (thatUri.getScheme() != null && thatUri.getScheme().equalsIgnoreCase(thisUri.getScheme()) && thatUri.getPort() == NameNode.DEFAULT_PORT && thisUri.getPort() == -1 && thatAuthority.substring(0,thatAuthority.indexOf(":")) .equalsIgnoreCase(thisUri.getAuthority())) return; super.checkPath(path); }
Example 10
Source File: Hdfs.java From hadoop with Apache License 2.0 | 5 votes |
/** * This constructor has the signature needed by * {@link AbstractFileSystem#createFileSystem(URI, Configuration)} * * @param theUri which must be that of Hdfs * @param conf configuration * @throws IOException */ Hdfs(final URI theUri, final Configuration conf) throws IOException, URISyntaxException { super(theUri, HdfsConstants.HDFS_URI_SCHEME, true, NameNode.DEFAULT_PORT); if (!theUri.getScheme().equalsIgnoreCase(HdfsConstants.HDFS_URI_SCHEME)) { throw new IllegalArgumentException("Passed URI's scheme is not for Hdfs"); } String host = theUri.getHost(); if (host == null) { throw new IOException("Incomplete HDFS URI, no host: " + theUri); } this.dfs = new DFSClient(theUri, conf, getStatistics()); }
Example 11
Source File: PathParts.java From examples with Apache License 2.0 | 5 votes |
public int getPort() { int port = normalizedPath.toUri().getPort(); if (port == -1) { port = fs.getWorkingDirectory().toUri().getPort(); if (port == -1) { port = NameNode.DEFAULT_PORT; } } return port; }
Example 12
Source File: NameNodeProxies.java From big-c with Apache License 2.0 | 4 votes |
/** Creates the Failover proxy provider instance*/ @VisibleForTesting public static <T> AbstractNNFailoverProxyProvider<T> createFailoverProxyProvider( Configuration conf, URI nameNodeUri, Class<T> xface, boolean checkPort, AtomicBoolean fallbackToSimpleAuth) throws IOException { Class<FailoverProxyProvider<T>> failoverProxyProviderClass = null; AbstractNNFailoverProxyProvider<T> providerNN; Preconditions.checkArgument( xface.isAssignableFrom(NamenodeProtocols.class), "Interface %s is not a NameNode protocol", xface); try { // Obtain the class of the proxy provider failoverProxyProviderClass = getFailoverProxyProviderClass(conf, nameNodeUri); if (failoverProxyProviderClass == null) { return null; } // Create a proxy provider instance. Constructor<FailoverProxyProvider<T>> ctor = failoverProxyProviderClass .getConstructor(Configuration.class, URI.class, Class.class); FailoverProxyProvider<T> provider = ctor.newInstance(conf, nameNodeUri, xface); // If the proxy provider is of an old implementation, wrap it. if (!(provider instanceof AbstractNNFailoverProxyProvider)) { providerNN = new WrappedFailoverProxyProvider<T>(provider); } else { providerNN = (AbstractNNFailoverProxyProvider<T>)provider; } } catch (Exception e) { String message = "Couldn't create proxy provider " + failoverProxyProviderClass; if (LOG.isDebugEnabled()) { LOG.debug(message, e); } if (e.getCause() instanceof IOException) { throw (IOException) e.getCause(); } else { throw new IOException(message, e); } } // Check the port in the URI, if it is logical. if (checkPort && providerNN.useLogicalURI()) { int port = nameNodeUri.getPort(); if (port > 0 && port != NameNode.DEFAULT_PORT) { // Throwing here without any cleanup is fine since we have not // actually created the underlying proxies yet. throw new IOException("Port " + port + " specified in URI " + nameNodeUri + " but host '" + nameNodeUri.getHost() + "' is a logical (HA) namenode" + " and does not use port information."); } } providerNN.setFallbackToSimpleAuth(fallbackToSimpleAuth); return providerNN; }
Example 13
Source File: DistributedFileSystem.java From big-c with Apache License 2.0 | 4 votes |
@Override protected int getDefaultPort() { return NameNode.DEFAULT_PORT; }
Example 14
Source File: Hdfs.java From big-c with Apache License 2.0 | 4 votes |
@Override public int getUriDefaultPort() { return NameNode.DEFAULT_PORT; }
Example 15
Source File: DistributedFileSystem.java From hadoop with Apache License 2.0 | 4 votes |
@Override protected int getDefaultPort() { return NameNode.DEFAULT_PORT; }
Example 16
Source File: NameNodeProxies.java From hadoop with Apache License 2.0 | 4 votes |
/** Creates the Failover proxy provider instance*/ @VisibleForTesting public static <T> AbstractNNFailoverProxyProvider<T> createFailoverProxyProvider( Configuration conf, URI nameNodeUri, Class<T> xface, boolean checkPort, AtomicBoolean fallbackToSimpleAuth) throws IOException { Class<FailoverProxyProvider<T>> failoverProxyProviderClass = null; AbstractNNFailoverProxyProvider<T> providerNN; Preconditions.checkArgument( xface.isAssignableFrom(NamenodeProtocols.class), "Interface %s is not a NameNode protocol", xface); try { // Obtain the class of the proxy provider failoverProxyProviderClass = getFailoverProxyProviderClass(conf, nameNodeUri); if (failoverProxyProviderClass == null) { return null; } // Create a proxy provider instance. Constructor<FailoverProxyProvider<T>> ctor = failoverProxyProviderClass .getConstructor(Configuration.class, URI.class, Class.class); FailoverProxyProvider<T> provider = ctor.newInstance(conf, nameNodeUri, xface); // If the proxy provider is of an old implementation, wrap it. if (!(provider instanceof AbstractNNFailoverProxyProvider)) { providerNN = new WrappedFailoverProxyProvider<T>(provider); } else { providerNN = (AbstractNNFailoverProxyProvider<T>)provider; } } catch (Exception e) { String message = "Couldn't create proxy provider " + failoverProxyProviderClass; if (LOG.isDebugEnabled()) { LOG.debug(message, e); } if (e.getCause() instanceof IOException) { throw (IOException) e.getCause(); } else { throw new IOException(message, e); } } // Check the port in the URI, if it is logical. if (checkPort && providerNN.useLogicalURI()) { int port = nameNodeUri.getPort(); if (port > 0 && port != NameNode.DEFAULT_PORT) { // Throwing here without any cleanup is fine since we have not // actually created the underlying proxies yet. throw new IOException("Port " + port + " specified in URI " + nameNodeUri + " but host '" + nameNodeUri.getHost() + "' is a logical (HA) namenode" + " and does not use port information."); } } providerNN.setFallbackToSimpleAuth(fallbackToSimpleAuth); return providerNN; }
Example 17
Source File: Hdfs.java From hadoop with Apache License 2.0 | 4 votes |
@Override public int getUriDefaultPort() { return NameNode.DEFAULT_PORT; }