Java Code Examples for org.apache.hadoop.hdfs.server.namenode.NameNode#getAddress()
The following examples show how to use
org.apache.hadoop.hdfs.server.namenode.NameNode#getAddress() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: BootstrapStandby.java From big-c with Apache License 2.0 | 6 votes |
@Override public int run(String[] args) throws Exception { parseArgs(args); parseConfAndFindOtherNN(); NameNode.checkAllowFormat(conf); InetSocketAddress myAddr = NameNode.getAddress(conf); SecurityUtil.login(conf, DFS_NAMENODE_KEYTAB_FILE_KEY, DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, myAddr.getHostName()); return SecurityUtil.doAsLoginUserOrFatal(new PrivilegedAction<Integer>() { @Override public Integer run() { try { return doRun(); } catch (IOException e) { throw new RuntimeException(e); } } }); }
Example 2
Source File: IPFailoverProxyProvider.java From hadoop with Apache License 2.0 | 6 votes |
@Override public synchronized ProxyInfo<T> getProxy() { // Create a non-ha proxy if not already created. if (nnProxyInfo == null) { try { // Create a proxy that is not wrapped in RetryProxy InetSocketAddress nnAddr = NameNode.getAddress(nameNodeUri); nnProxyInfo = new ProxyInfo<T>(NameNodeProxies.createNonHAProxy( conf, nnAddr, xface, UserGroupInformation.getCurrentUser(), false).getProxy(), nnAddr.toString()); } catch (IOException ioe) { throw new RuntimeException(ioe); } } return nnProxyInfo; }
Example 3
Source File: TestDFSShellGenericOptions.java From RDFS with Apache License 2.0 | 6 votes |
private void execute(String [] args, String namenode) { FsShell shell=new FsShell(); FileSystem fs=null; try { ToolRunner.run(shell, args); fs = new DistributedFileSystem(NameNode.getAddress(namenode), shell.getConf()); assertTrue("Directory does not get created", fs.isDirectory(new Path("/data"))); fs.delete(new Path("/data"), true); } catch (Exception e) { System.err.println(e.getMessage()); e.printStackTrace(); } finally { if (fs!=null) { try { fs.close(); } catch (IOException ignored) { } } } }
Example 4
Source File: DistributedFileSystemMetadata.java From hdfs-metadata with GNU General Public License v3.0 | 6 votes |
public HashMap<String, Integer> getNumberOfDataDirsPerHost(){ HashMap<String, Integer> disksPerHost = new HashMap<>(); try { @SuppressWarnings("resource") DFSClient dfsClient = new DFSClient(NameNode.getAddress(getConf()), getConf()); DatanodeStorageReport[] datanodeStorageReports = dfsClient.getDatanodeStorageReport(DatanodeReportType.ALL); for (DatanodeStorageReport datanodeStorageReport : datanodeStorageReports) { disksPerHost.put( datanodeStorageReport.getDatanodeInfo().getHostName(), datanodeStorageReport.getStorageReports().length); } } catch (IOException e) { LOG.warn("number of data directories (disks) per node could not be collected (requieres higher privilegies)."); } return disksPerHost; }
Example 5
Source File: NameNodeProxies.java From hadoop with Apache License 2.0 | 5 votes |
/** * Creates the namenode proxy with the passed protocol. This will handle * creation of either HA- or non-HA-enabled proxy objects, depending upon * if the provided URI is a configured logical URI. * * @param conf the configuration containing the required IPC * properties, client failover configurations, etc. * @param nameNodeUri the URI pointing either to a specific NameNode * or to a logical nameservice. * @param xface the IPC interface which should be created * @param fallbackToSimpleAuth set to true or false during calls to indicate if * a secure client falls back to simple auth * @return an object containing both the proxy and the associated * delegation token service it corresponds to * @throws IOException if there is an error creating the proxy **/ @SuppressWarnings("unchecked") public static <T> ProxyAndInfo<T> createProxy(Configuration conf, URI nameNodeUri, Class<T> xface, AtomicBoolean fallbackToSimpleAuth) throws IOException { AbstractNNFailoverProxyProvider<T> failoverProxyProvider = createFailoverProxyProvider(conf, nameNodeUri, xface, true, fallbackToSimpleAuth); if (failoverProxyProvider == null) { // Non-HA case return createNonHAProxy(conf, NameNode.getAddress(nameNodeUri), xface, UserGroupInformation.getCurrentUser(), true, fallbackToSimpleAuth); } else { // HA case Conf config = new Conf(conf); T proxy = (T) RetryProxy.create(xface, failoverProxyProvider, RetryPolicies.failoverOnNetworkException( RetryPolicies.TRY_ONCE_THEN_FAIL, config.maxFailoverAttempts, config.maxRetryAttempts, config.failoverSleepBaseMillis, config.failoverSleepMaxMillis)); Text dtService; if (failoverProxyProvider.useLogicalURI()) { dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri, HdfsConstants.HDFS_URI_SCHEME); } else { dtService = SecurityUtil.buildTokenService( NameNode.getAddress(nameNodeUri)); } return new ProxyAndInfo<T>(proxy, dtService, NameNode.getAddress(nameNodeUri)); } }
Example 6
Source File: NameNodeProxies.java From big-c with Apache License 2.0 | 5 votes |
/** * Creates the namenode proxy with the passed protocol. This will handle * creation of either HA- or non-HA-enabled proxy objects, depending upon * if the provided URI is a configured logical URI. * * @param conf the configuration containing the required IPC * properties, client failover configurations, etc. * @param nameNodeUri the URI pointing either to a specific NameNode * or to a logical nameservice. * @param xface the IPC interface which should be created * @param fallbackToSimpleAuth set to true or false during calls to indicate if * a secure client falls back to simple auth * @return an object containing both the proxy and the associated * delegation token service it corresponds to * @throws IOException if there is an error creating the proxy **/ @SuppressWarnings("unchecked") public static <T> ProxyAndInfo<T> createProxy(Configuration conf, URI nameNodeUri, Class<T> xface, AtomicBoolean fallbackToSimpleAuth) throws IOException { AbstractNNFailoverProxyProvider<T> failoverProxyProvider = createFailoverProxyProvider(conf, nameNodeUri, xface, true, fallbackToSimpleAuth); if (failoverProxyProvider == null) { // Non-HA case return createNonHAProxy(conf, NameNode.getAddress(nameNodeUri), xface, UserGroupInformation.getCurrentUser(), true, fallbackToSimpleAuth); } else { // HA case Conf config = new Conf(conf); T proxy = (T) RetryProxy.create(xface, failoverProxyProvider, RetryPolicies.failoverOnNetworkException( RetryPolicies.TRY_ONCE_THEN_FAIL, config.maxFailoverAttempts, config.maxRetryAttempts, config.failoverSleepBaseMillis, config.failoverSleepMaxMillis)); Text dtService; if (failoverProxyProvider.useLogicalURI()) { dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri, HdfsConstants.HDFS_URI_SCHEME); } else { dtService = SecurityUtil.buildTokenService( NameNode.getAddress(nameNodeUri)); } return new ProxyAndInfo<T>(proxy, dtService, NameNode.getAddress(nameNodeUri)); } }
Example 7
Source File: DistributedFileSystem.java From hadoop-gpu with Apache License 2.0 | 5 votes |
public void initialize(URI uri, Configuration conf) throws IOException { super.initialize(uri, conf); setConf(conf); String host = uri.getHost(); if (host == null) { throw new IOException("Incomplete HDFS URI, no host: "+ uri); } InetSocketAddress namenode = NameNode.getAddress(uri.getAuthority()); this.dfs = new DFSClient(namenode, conf, statistics); this.uri = NameNode.getUri(namenode); this.workingDir = getHomeDirectory(); }
Example 8
Source File: DataNode.java From RDFS with Apache License 2.0 | 5 votes |
/** * This method returns the address namenode uses to communicate with * datanodes. If this address is not configured the default NameNode * address is used, as it is running only one RPC server. * If it is running multiple servers this address cannot be used by clients!! * @param conf * @return */ public static InetSocketAddress getNameNodeAddress(Configuration conf) { InetSocketAddress addr = null; addr = NameNode.getDNProtocolAddress(conf); if (addr != null) { return addr; } return NameNode.getAddress(conf); }
Example 9
Source File: TestDataNodeRollingUpgrade.java From hadoop with Apache License 2.0 | 4 votes |
@Test (timeout=600000) // Test DatanodeXceiver has correct peer-dataxceiver pairs for sending OOB message public void testDatanodePeersXceiver() throws Exception { try { startCluster(); // Create files in DFS. String testFile1 = "/" + GenericTestUtils.getMethodName() + ".01.dat"; String testFile2 = "/" + GenericTestUtils.getMethodName() + ".02.dat"; String testFile3 = "/" + GenericTestUtils.getMethodName() + ".03.dat"; DFSClient client1 = new DFSClient(NameNode.getAddress(conf), conf); DFSClient client2 = new DFSClient(NameNode.getAddress(conf), conf); DFSClient client3 = new DFSClient(NameNode.getAddress(conf), conf); DFSOutputStream s1 = (DFSOutputStream) client1.create(testFile1, true); DFSOutputStream s2 = (DFSOutputStream) client2.create(testFile2, true); DFSOutputStream s3 = (DFSOutputStream) client3.create(testFile3, true); byte[] toWrite = new byte[1024*1024*8]; Random rb = new Random(1111); rb.nextBytes(toWrite); s1.write(toWrite, 0, 1024*1024*8); s1.flush(); s2.write(toWrite, 0, 1024*1024*8); s2.flush(); s3.write(toWrite, 0, 1024*1024*8); s3.flush(); assertTrue(dn0.getXferServer().getNumPeersXceiver() == dn0.getXferServer() .getNumPeersXceiver()); s1.close(); s2.close(); s3.close(); assertTrue(dn0.getXferServer().getNumPeersXceiver() == dn0.getXferServer() .getNumPeersXceiver()); client1.close(); client2.close(); client3.close(); } finally { shutdownCluster(); } }
Example 10
Source File: DFSZKFailoverController.java From big-c with Apache License 2.0 | 4 votes |
@Override public void loginAsFCUser() throws IOException { InetSocketAddress socAddr = NameNode.getAddress(conf); SecurityUtil.login(conf, DFS_NAMENODE_KEYTAB_FILE_KEY, DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName()); }
Example 11
Source File: NameNodeProxies.java From big-c with Apache License 2.0 | 4 votes |
/** * Generate a dummy namenode proxy instance that utilizes our hacked * {@link LossyRetryInvocationHandler}. Proxy instance generated using this * method will proactively drop RPC responses. Currently this method only * support HA setup. null will be returned if the given configuration is not * for HA. * * @param config the configuration containing the required IPC * properties, client failover configurations, etc. * @param nameNodeUri the URI pointing either to a specific NameNode * or to a logical nameservice. * @param xface the IPC interface which should be created * @param numResponseToDrop The number of responses to drop for each RPC call * @param fallbackToSimpleAuth set to true or false during calls to indicate if * a secure client falls back to simple auth * @return an object containing both the proxy and the associated * delegation token service it corresponds to. Will return null of the * given configuration does not support HA. * @throws IOException if there is an error creating the proxy */ @SuppressWarnings("unchecked") public static <T> ProxyAndInfo<T> createProxyWithLossyRetryHandler( Configuration config, URI nameNodeUri, Class<T> xface, int numResponseToDrop, AtomicBoolean fallbackToSimpleAuth) throws IOException { Preconditions.checkArgument(numResponseToDrop > 0); AbstractNNFailoverProxyProvider<T> failoverProxyProvider = createFailoverProxyProvider(config, nameNodeUri, xface, true, fallbackToSimpleAuth); if (failoverProxyProvider != null) { // HA case int delay = config.getInt( DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY, DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT); int maxCap = config.getInt( DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY, DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT); int maxFailoverAttempts = config.getInt( DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY, DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT); int maxRetryAttempts = config.getInt( DFS_CLIENT_RETRY_MAX_ATTEMPTS_KEY, DFS_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT); InvocationHandler dummyHandler = new LossyRetryInvocationHandler<T>( numResponseToDrop, failoverProxyProvider, RetryPolicies.failoverOnNetworkException( RetryPolicies.TRY_ONCE_THEN_FAIL, maxFailoverAttempts, Math.max(numResponseToDrop + 1, maxRetryAttempts), delay, maxCap)); T proxy = (T) Proxy.newProxyInstance( failoverProxyProvider.getInterface().getClassLoader(), new Class[] { xface }, dummyHandler); Text dtService; if (failoverProxyProvider.useLogicalURI()) { dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri, HdfsConstants.HDFS_URI_SCHEME); } else { dtService = SecurityUtil.buildTokenService( NameNode.getAddress(nameNodeUri)); } return new ProxyAndInfo<T>(proxy, dtService, NameNode.getAddress(nameNodeUri)); } else { LOG.warn("Currently creating proxy using " + "LossyRetryInvocationHandler requires NN HA setup"); return null; } }
Example 12
Source File: TestWrites.java From big-c with Apache License 2.0 | 4 votes |
@Test public void testOOOWrites() throws IOException, InterruptedException { NfsConfiguration config = new NfsConfiguration(); MiniDFSCluster cluster = null; RpcProgramNfs3 nfsd; final int bufSize = 32; final int numOOO = 3; SecurityHandler securityHandler = Mockito.mock(SecurityHandler.class); Mockito.when(securityHandler.getUser()).thenReturn( System.getProperty("user.name")); String currentUser = System.getProperty("user.name"); config.set( DefaultImpersonationProvider.getTestProvider(). getProxySuperuserGroupConfKey(currentUser), "*"); config.set( DefaultImpersonationProvider.getTestProvider(). getProxySuperuserIpConfKey(currentUser), "*"); ProxyUsers.refreshSuperUserGroupsConfiguration(config); // Use emphral port in case tests are running in parallel config.setInt("nfs3.mountd.port", 0); config.setInt("nfs3.server.port", 0); try { cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build(); cluster.waitActive(); Nfs3 nfs3 = new Nfs3(config); nfs3.startServiceInternal(false); nfsd = (RpcProgramNfs3) nfs3.getRpcProgram(); DFSClient dfsClient = new DFSClient(NameNode.getAddress(config), config); HdfsFileStatus status = dfsClient.getFileInfo("/"); FileHandle rootHandle = new FileHandle(status.getFileId()); CREATE3Request createReq = new CREATE3Request(rootHandle, "out-of-order-write" + System.currentTimeMillis(), Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0); XDR createXdr = new XDR(); createReq.serialize(createXdr); CREATE3Response createRsp = nfsd.create(createXdr.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); FileHandle handle = createRsp.getObjHandle(); byte[][] oooBuf = new byte[numOOO][bufSize]; for (int i = 0; i < numOOO; i++) { Arrays.fill(oooBuf[i], (byte) i); } for (int i = 0; i < numOOO; i++) { final long offset = (numOOO - 1 - i) * bufSize; WRITE3Request writeReq = new WRITE3Request(handle, offset, bufSize, WriteStableHow.UNSTABLE, ByteBuffer.wrap(oooBuf[i])); XDR writeXdr = new XDR(); writeReq.serialize(writeXdr); nfsd.write(writeXdr.asReadOnlyWrap(), null, 1, securityHandler, new InetSocketAddress("localhost", 1234)); } waitWrite(nfsd, handle, 60000); READ3Request readReq = new READ3Request(handle, bufSize, bufSize); XDR readXdr = new XDR(); readReq.serialize(readXdr); READ3Response readRsp = nfsd.read(readXdr.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", config.getInt( NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY, NfsConfigKeys.DFS_NFS_SERVER_PORT_DEFAULT))); assertTrue(Arrays.equals(oooBuf[1], readRsp.getData().array())); } finally { if (cluster != null) { cluster.shutdown(); } } }
Example 13
Source File: TestDataNodeRollingUpgrade.java From big-c with Apache License 2.0 | 4 votes |
@Test (timeout=600000) // Test DatanodeXceiver has correct peer-dataxceiver pairs for sending OOB message public void testDatanodePeersXceiver() throws Exception { try { startCluster(); // Create files in DFS. String testFile1 = "/" + GenericTestUtils.getMethodName() + ".01.dat"; String testFile2 = "/" + GenericTestUtils.getMethodName() + ".02.dat"; String testFile3 = "/" + GenericTestUtils.getMethodName() + ".03.dat"; DFSClient client1 = new DFSClient(NameNode.getAddress(conf), conf); DFSClient client2 = new DFSClient(NameNode.getAddress(conf), conf); DFSClient client3 = new DFSClient(NameNode.getAddress(conf), conf); DFSOutputStream s1 = (DFSOutputStream) client1.create(testFile1, true); DFSOutputStream s2 = (DFSOutputStream) client2.create(testFile2, true); DFSOutputStream s3 = (DFSOutputStream) client3.create(testFile3, true); byte[] toWrite = new byte[1024*1024*8]; Random rb = new Random(1111); rb.nextBytes(toWrite); s1.write(toWrite, 0, 1024*1024*8); s1.flush(); s2.write(toWrite, 0, 1024*1024*8); s2.flush(); s3.write(toWrite, 0, 1024*1024*8); s3.flush(); assertTrue(dn0.getXferServer().getNumPeersXceiver() == dn0.getXferServer() .getNumPeersXceiver()); s1.close(); s2.close(); s3.close(); assertTrue(dn0.getXferServer().getNumPeersXceiver() == dn0.getXferServer() .getNumPeersXceiver()); client1.close(); client2.close(); client3.close(); } finally { shutdownCluster(); } }
Example 14
Source File: TestFileStatus.java From hadoop-gpu with Apache License 2.0 | 4 votes |
/** * Tests various options of DFSShell. */ public void testFileStatus() throws IOException { Configuration conf = new Configuration(); MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null); FileSystem fs = cluster.getFileSystem(); final DFSClient dfsClient = new DFSClient(NameNode.getAddress(conf), conf); try { // // check that / exists // Path path = new Path("/"); System.out.println("Path : \"" + path.toString() + "\""); System.out.println(fs.isDirectory(path)); System.out.println(fs.getFileStatus(path).isDir()); assertTrue("/ should be a directory", fs.getFileStatus(path).isDir() == true); // make sure getFileInfo returns null for files which do not exist FileStatus fileInfo = dfsClient.getFileInfo("/noSuchFile"); assertTrue(fileInfo == null); // create a file in home directory // Path file1 = new Path("filestatus.dat"); writeFile(fs, file1, 1, fileSize, blockSize); System.out.println("Created file filestatus.dat with one " + " replicas."); checkFile(fs, file1, 1); assertTrue(file1 + " should be a file", fs.getFileStatus(file1).isDir() == false); assertTrue(fs.getFileStatus(file1).getBlockSize() == blockSize); assertTrue(fs.getFileStatus(file1).getReplication() == 1); assertTrue(fs.getFileStatus(file1).getLen() == fileSize); System.out.println("Path : \"" + file1 + "\""); // create an empty directory // Path parentDir = new Path("/test"); Path dir = new Path("/test/mkdirs"); assertTrue(fs.mkdirs(dir)); assertTrue(fs.exists(dir)); assertTrue(dir + " should be a directory", fs.getFileStatus(path).isDir() == true); assertTrue(dir + " should be zero size ", fs.getContentSummary(dir).getLength() == 0); assertTrue(dir + " should be zero size ", fs.getFileStatus(dir).getLen() == 0); System.out.println("Dir : \"" + dir + "\""); // create another file that is smaller than a block. // Path file2 = new Path("/test/mkdirs/filestatus2.dat"); writeFile(fs, file2, 1, blockSize/4, blockSize); System.out.println("Created file filestatus2.dat with one " + " replicas."); checkFile(fs, file2, 1); System.out.println("Path : \"" + file2 + "\""); // verify file attributes assertTrue(fs.getFileStatus(file2).getBlockSize() == blockSize); assertTrue(fs.getFileStatus(file2).getReplication() == 1); // create another file in the same directory Path file3 = new Path("/test/mkdirs/filestatus3.dat"); writeFile(fs, file3, 1, blockSize/4, blockSize); System.out.println("Created file filestatus3.dat with one " + " replicas."); checkFile(fs, file3, 1); // verify that the size of the directory increased by the size // of the two files assertTrue(dir + " size should be " + (blockSize/2), blockSize/2 == fs.getContentSummary(dir).getLength()); } finally { fs.close(); cluster.shutdown(); } }
Example 15
Source File: NameNodeProxies.java From hadoop with Apache License 2.0 | 4 votes |
/** * Generate a dummy namenode proxy instance that utilizes our hacked * {@link LossyRetryInvocationHandler}. Proxy instance generated using this * method will proactively drop RPC responses. Currently this method only * support HA setup. null will be returned if the given configuration is not * for HA. * * @param config the configuration containing the required IPC * properties, client failover configurations, etc. * @param nameNodeUri the URI pointing either to a specific NameNode * or to a logical nameservice. * @param xface the IPC interface which should be created * @param numResponseToDrop The number of responses to drop for each RPC call * @param fallbackToSimpleAuth set to true or false during calls to indicate if * a secure client falls back to simple auth * @return an object containing both the proxy and the associated * delegation token service it corresponds to. Will return null of the * given configuration does not support HA. * @throws IOException if there is an error creating the proxy */ @SuppressWarnings("unchecked") public static <T> ProxyAndInfo<T> createProxyWithLossyRetryHandler( Configuration config, URI nameNodeUri, Class<T> xface, int numResponseToDrop, AtomicBoolean fallbackToSimpleAuth) throws IOException { Preconditions.checkArgument(numResponseToDrop > 0); AbstractNNFailoverProxyProvider<T> failoverProxyProvider = createFailoverProxyProvider(config, nameNodeUri, xface, true, fallbackToSimpleAuth); if (failoverProxyProvider != null) { // HA case int delay = config.getInt( DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY, DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT); int maxCap = config.getInt( DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY, DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT); int maxFailoverAttempts = config.getInt( DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY, DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT); int maxRetryAttempts = config.getInt( DFS_CLIENT_RETRY_MAX_ATTEMPTS_KEY, DFS_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT); InvocationHandler dummyHandler = new LossyRetryInvocationHandler<T>( numResponseToDrop, failoverProxyProvider, RetryPolicies.failoverOnNetworkException( RetryPolicies.TRY_ONCE_THEN_FAIL, maxFailoverAttempts, Math.max(numResponseToDrop + 1, maxRetryAttempts), delay, maxCap)); T proxy = (T) Proxy.newProxyInstance( failoverProxyProvider.getInterface().getClassLoader(), new Class[] { xface }, dummyHandler); Text dtService; if (failoverProxyProvider.useLogicalURI()) { dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri, HdfsConstants.HDFS_URI_SCHEME); } else { dtService = SecurityUtil.buildTokenService( NameNode.getAddress(nameNodeUri)); } return new ProxyAndInfo<T>(proxy, dtService, NameNode.getAddress(nameNodeUri)); } else { LOG.warn("Currently creating proxy using " + "LossyRetryInvocationHandler requires NN HA setup"); return null; } }
Example 16
Source File: TestWrites.java From hadoop with Apache License 2.0 | 4 votes |
@Test public void testOOOWrites() throws IOException, InterruptedException { NfsConfiguration config = new NfsConfiguration(); MiniDFSCluster cluster = null; RpcProgramNfs3 nfsd; final int bufSize = 32; final int numOOO = 3; SecurityHandler securityHandler = Mockito.mock(SecurityHandler.class); Mockito.when(securityHandler.getUser()).thenReturn( System.getProperty("user.name")); String currentUser = System.getProperty("user.name"); config.set( DefaultImpersonationProvider.getTestProvider(). getProxySuperuserGroupConfKey(currentUser), "*"); config.set( DefaultImpersonationProvider.getTestProvider(). getProxySuperuserIpConfKey(currentUser), "*"); ProxyUsers.refreshSuperUserGroupsConfiguration(config); // Use emphral port in case tests are running in parallel config.setInt("nfs3.mountd.port", 0); config.setInt("nfs3.server.port", 0); try { cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build(); cluster.waitActive(); Nfs3 nfs3 = new Nfs3(config); nfs3.startServiceInternal(false); nfsd = (RpcProgramNfs3) nfs3.getRpcProgram(); DFSClient dfsClient = new DFSClient(NameNode.getAddress(config), config); HdfsFileStatus status = dfsClient.getFileInfo("/"); FileHandle rootHandle = new FileHandle(status.getFileId()); CREATE3Request createReq = new CREATE3Request(rootHandle, "out-of-order-write" + System.currentTimeMillis(), Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0); XDR createXdr = new XDR(); createReq.serialize(createXdr); CREATE3Response createRsp = nfsd.create(createXdr.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); FileHandle handle = createRsp.getObjHandle(); byte[][] oooBuf = new byte[numOOO][bufSize]; for (int i = 0; i < numOOO; i++) { Arrays.fill(oooBuf[i], (byte) i); } for (int i = 0; i < numOOO; i++) { final long offset = (numOOO - 1 - i) * bufSize; WRITE3Request writeReq = new WRITE3Request(handle, offset, bufSize, WriteStableHow.UNSTABLE, ByteBuffer.wrap(oooBuf[i])); XDR writeXdr = new XDR(); writeReq.serialize(writeXdr); nfsd.write(writeXdr.asReadOnlyWrap(), null, 1, securityHandler, new InetSocketAddress("localhost", 1234)); } waitWrite(nfsd, handle, 60000); READ3Request readReq = new READ3Request(handle, bufSize, bufSize); XDR readXdr = new XDR(); readReq.serialize(readXdr); READ3Response readRsp = nfsd.read(readXdr.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", config.getInt( NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY, NfsConfigKeys.DFS_NFS_SERVER_PORT_DEFAULT))); assertTrue(Arrays.equals(oooBuf[1], readRsp.getData().array())); } finally { if (cluster != null) { cluster.shutdown(); } } }
Example 17
Source File: DFSClient.java From hadoop-gpu with Apache License 2.0 | 2 votes |
/** * Same as this(NameNode.getAddress(conf), conf); * @see #DFSClient(InetSocketAddress, Configuration) */ public DFSClient(Configuration conf) throws IOException { this(NameNode.getAddress(conf), conf); }
Example 18
Source File: DFSClient.java From RDFS with Apache License 2.0 | 2 votes |
/** * Same as this(NameNode.getAddress(conf), conf); * @see #DFSClient(InetSocketAddress, Configuration) */ public DFSClient(Configuration conf) throws IOException { this(NameNode.getAddress(conf), conf); }
Example 19
Source File: DFSClient.java From hadoop with Apache License 2.0 | 2 votes |
/** * Same as this(NameNode.getAddress(conf), conf); * @see #DFSClient(InetSocketAddress, Configuration) * @deprecated Deprecated at 0.21 */ @Deprecated public DFSClient(Configuration conf) throws IOException { this(NameNode.getAddress(conf), conf); }
Example 20
Source File: DFSClient.java From big-c with Apache License 2.0 | 2 votes |
/** * Same as this(NameNode.getAddress(conf), conf); * @see #DFSClient(InetSocketAddress, Configuration) * @deprecated Deprecated at 0.21 */ @Deprecated public DFSClient(Configuration conf) throws IOException { this(NameNode.getAddress(conf), conf); }