org.apache.hadoop.hdfs.protocol.ClientProtocol Java Examples
The following examples show how to use
org.apache.hadoop.hdfs.protocol.ClientProtocol.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestEncryptionZones.java From hadoop with Apache License 2.0 | 6 votes |
@SuppressWarnings("unchecked") private static void mockCreate(ClientProtocol mcp, CipherSuite suite, CryptoProtocolVersion version) throws Exception { Mockito.doReturn( new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission( (short) 777), "owner", "group", new byte[0], new byte[0], 1010, 0, new FileEncryptionInfo(suite, version, new byte[suite.getAlgorithmBlockSize()], new byte[suite.getAlgorithmBlockSize()], "fakeKey", "fakeVersion"), (byte) 0)) .when(mcp) .create(anyString(), (FsPermission) anyObject(), anyString(), (EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(), anyShort(), anyLong(), (CryptoProtocolVersion[]) anyObject()); }
Example #2
Source File: DFSClient.java From hadoop with Apache License 2.0 | 6 votes |
private static ClientProtocol getNNProxy( Token<DelegationTokenIdentifier> token, Configuration conf) throws IOException { URI uri = HAUtil.getServiceUriFromToken(HdfsConstants.HDFS_URI_SCHEME, token); if (HAUtil.isTokenForLogicalUri(token) && !HAUtil.isLogicalUri(conf, uri)) { // If the token is for a logical nameservice, but the configuration // we have disagrees about that, we can't actually renew it. // This can be the case in MR, for example, if the RM doesn't // have all of the HA clusters configured in its configuration. throw new IOException("Unable to map logical nameservice URI '" + uri + "' to a NameNode. Local configuration does not have " + "a failover proxy provider configured."); } NameNodeProxies.ProxyAndInfo<ClientProtocol> info = NameNodeProxies.createProxy(conf, uri, ClientProtocol.class); assert info.getDelegationTokenService().equals(token.getService()) : "Returned service '" + info.getDelegationTokenService().toString() + "' doesn't match expected service '" + token.getService().toString() + "'"; return info.getProxy(); }
Example #3
Source File: NameNode.java From big-c with Apache License 2.0 | 6 votes |
public long getProtocolVersion(String protocol, long clientVersion) throws IOException { if (protocol.equals(ClientProtocol.class.getName())) { return ClientProtocol.versionID; } else if (protocol.equals(DatanodeProtocol.class.getName())){ return DatanodeProtocol.versionID; } else if (protocol.equals(NamenodeProtocol.class.getName())){ return NamenodeProtocol.versionID; } else if (protocol.equals(RefreshAuthorizationPolicyProtocol.class.getName())){ return RefreshAuthorizationPolicyProtocol.versionID; } else if (protocol.equals(RefreshUserMappingsProtocol.class.getName())){ return RefreshUserMappingsProtocol.versionID; } else if (protocol.equals(RefreshCallQueueProtocol.class.getName())) { return RefreshCallQueueProtocol.versionID; } else if (protocol.equals(GetUserMappingsProtocol.class.getName())){ return GetUserMappingsProtocol.versionID; } else if (protocol.equals(TraceAdminProtocol.class.getName())){ return TraceAdminProtocol.versionID; } else { throw new IOException("Unknown protocol to name node: " + protocol); } }
Example #4
Source File: DFSClient.java From RDFS with Apache License 2.0 | 6 votes |
/** * Create a NameNode proxy for the client if the client and NameNode * are compatible * * @param nameNodeAddr NameNode address * @param conf configuration * @param ugi ticket * @return a NameNode proxy that's compatible with the client */ private void createRPCNamenodeIfCompatible( InetSocketAddress nameNodeAddr, Configuration conf, UnixUserGroupInformation ugi) throws IOException { try { this.namenodeProtocolProxy = createRPCNamenode(nameNodeAddr, conf, ugi); this.rpcNamenode = namenodeProtocolProxy.getProxy(); } catch (RPC.VersionMismatch e) { long clientVersion = e.getClientVersion(); namenodeVersion = e.getServerVersion(); if (clientVersion > namenodeVersion && !ProtocolCompatible.isCompatibleClientProtocol( clientVersion, namenodeVersion)) { throw new RPC.VersionIncompatible( ClientProtocol.class.getName(), clientVersion, namenodeVersion); } this.rpcNamenode = (ClientProtocol)e.getProxy(); } }
Example #5
Source File: DataNodeLocatorUtils.java From twister2 with Apache License 2.0 | 6 votes |
/** * This method retrieve all the datanodes of a hdfs cluster */ private List<String> getDataNodes() throws IOException { Configuration conf = new Configuration(false); conf.addResource(new org.apache.hadoop.fs.Path(HdfsDataContext.getHdfsConfigDirectory(config))); List<String> datanodesList = new ArrayList<>(); InetSocketAddress namenodeAddress = new InetSocketAddress( HdfsDataContext.getHdfsNamenodeDefault(config), HdfsDataContext.getHdfsNamenodePortDefault(config)); DFSClient dfsClient = new DFSClient(namenodeAddress, conf); ClientProtocol nameNode = dfsClient.getNamenode(); DatanodeInfo[] datanodeReport = nameNode.getDatanodeReport(HdfsConstants.DatanodeReportType.ALL); for (DatanodeInfo di : datanodeReport) { datanodesList.add(di.getHostName()); } return datanodesList; }
Example #6
Source File: TestNameNodePorts.java From RDFS with Apache License 2.0 | 6 votes |
public void testSinglePortStartup() throws IOException { Configuration conf = new Configuration(); MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null); NameNode nn = cluster.getNameNode(); InetSocketAddress dnAddress = nn.getNameNodeDNAddress(); InetSocketAddress clientAddress = nn.getNameNodeAddress(); assertEquals(clientAddress, dnAddress); DatanodeProtocol dnProtocol = (DatanodeProtocol) RPC.waitForProxy( DatanodeProtocol.class, DatanodeProtocol.versionID, dnAddress, conf); // perform a dummy call dnProtocol.getProtocolVersion(DatanodeProtocol.class.getName(), DatanodeProtocol.versionID); ClientProtocol client = (ClientProtocol) RPC.waitForProxy( ClientProtocol.class, ClientProtocol.versionID, dnAddress, conf); client.getProtocolVersion(ClientProtocol.class.getName(), ClientProtocol.versionID); cluster.shutdown(); }
Example #7
Source File: DfsServlet.java From hadoop with Apache License 2.0 | 6 votes |
/** * Create a {@link NameNode} proxy from the current {@link ServletContext}. */ protected ClientProtocol createNameNodeProxy() throws IOException { ServletContext context = getServletContext(); // if we are running in the Name Node, use it directly rather than via // rpc NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context); if (nn != null) { return nn.getRpcServer(); } InetSocketAddress nnAddr = NameNodeHttpServer.getNameNodeAddressFromContext(context); Configuration conf = new HdfsConfiguration( NameNodeHttpServer.getConfFromContext(context)); return NameNodeProxies.createProxy(conf, NameNode.getUri(nnAddr), ClientProtocol.class).getProxy(); }
Example #8
Source File: FileDataServlet.java From RDFS with Apache License 2.0 | 6 votes |
/** Select a datanode to service this request, which is the first one * in the returned array. The rest of the elements in the datanode * are possible candidates if the first one fails. * Currently, this looks at no more than the first five blocks of a file, * selecting a datanode randomly from the most represented. */ private static DatanodeInfo[] pickSrcDatanode(FileStatus i, ClientProtocol nnproxy) throws IOException { // a race condition can happen by initializing a static member this way. // A proper fix should make JspHelper a singleton. Since it doesn't affect // correctness, we leave it as is for now. if (jspHelper == null) jspHelper = new JspHelper(); final LocatedBlocks blks = nnproxy.getBlockLocations( i.getPath().toUri().getPath(), 0, 1); if (i.getLen() == 0 || blks.getLocatedBlocks().size() <= 0) { // pick a random datanode return new DatanodeInfo[] { jspHelper.randomNode() }; } return jspHelper.bestNode(blks); }
Example #9
Source File: TestRetryCacheWithHA.java From big-c with Apache License 2.0 | 6 votes |
private DFSClient genClientWithDummyHandler() throws IOException { URI nnUri = dfs.getUri(); FailoverProxyProvider<ClientProtocol> failoverProxyProvider = NameNodeProxies.createFailoverProxyProvider(conf, nnUri, ClientProtocol.class, true, null); InvocationHandler dummyHandler = new DummyRetryInvocationHandler( failoverProxyProvider, RetryPolicies .failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL, Integer.MAX_VALUE, DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT, DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT)); ClientProtocol proxy = (ClientProtocol) Proxy.newProxyInstance( failoverProxyProvider.getInterface().getClassLoader(), new Class[] { ClientProtocol.class }, dummyHandler); DFSClient client = new DFSClient(null, proxy, conf, null); return client; }
Example #10
Source File: DFSAdmin.java From hadoop with Apache License 2.0 | 6 votes |
/** * Dumps DFS data structures into specified file. * Usage: hdfs dfsadmin -metasave filename * @param argv List of of command line parameters. * @param idx The index of the command that is being processed. * @exception IOException if an error occurred while accessing * the file or path. */ public int metaSave(String[] argv, int idx) throws IOException { String pathname = argv[idx]; DistributedFileSystem dfs = getDFS(); Configuration dfsConf = dfs.getConf(); URI dfsUri = dfs.getUri(); boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri); if (isHaEnabled) { String nsId = dfsUri.getHost(); List<ProxyAndInfo<ClientProtocol>> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId, ClientProtocol.class); for (ProxyAndInfo<ClientProtocol> proxy : proxies) { proxy.getProxy().metaSave(pathname); System.out.println("Created metasave file " + pathname + " in the log " + "directory of namenode " + proxy.getAddress()); } } else { dfs.metaSave(pathname); System.out.println("Created metasave file " + pathname + " in the log " + "directory of namenode " + dfs.getUri()); } return 0; }
Example #11
Source File: FileChecksumServlets.java From hadoop-gpu with Apache License 2.0 | 6 votes |
/** {@inheritDoc} */ public void doGet(HttpServletRequest request, HttpServletResponse response ) throws ServletException, IOException { final UnixUserGroupInformation ugi = getUGI(request); final PrintWriter out = response.getWriter(); final String filename = getFilename(request, response); final XMLOutputter xml = new XMLOutputter(out, "UTF-8"); xml.declaration(); final Configuration conf = new Configuration(DataNode.getDataNode().getConf()); final int socketTimeout = conf.getInt("dfs.socket.timeout", HdfsConstants.READ_TIMEOUT); final SocketFactory socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class); UnixUserGroupInformation.saveToConf(conf, UnixUserGroupInformation.UGI_PROPERTY_NAME, ugi); final ClientProtocol nnproxy = DFSClient.createNamenode(conf); try { final MD5MD5CRC32FileChecksum checksum = DFSClient.getFileChecksum( filename, nnproxy, socketFactory, socketTimeout); MD5MD5CRC32FileChecksum.write(xml, checksum); } catch(IOException ioe) { new RemoteException(ioe.getClass().getName(), ioe.getMessage() ).writeXml(filename, xml); } xml.endDocument(); }
Example #12
Source File: TestRetryCacheWithHA.java From hadoop with Apache License 2.0 | 6 votes |
private DFSClient genClientWithDummyHandler() throws IOException { URI nnUri = dfs.getUri(); FailoverProxyProvider<ClientProtocol> failoverProxyProvider = NameNodeProxies.createFailoverProxyProvider(conf, nnUri, ClientProtocol.class, true, null); InvocationHandler dummyHandler = new DummyRetryInvocationHandler( failoverProxyProvider, RetryPolicies .failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL, Integer.MAX_VALUE, DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT, DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT)); ClientProtocol proxy = (ClientProtocol) Proxy.newProxyInstance( failoverProxyProvider.getInterface().getClassLoader(), new Class[] { ClientProtocol.class }, dummyHandler); DFSClient client = new DFSClient(null, proxy, conf, null); return client; }
Example #13
Source File: DFSClient.java From RDFS with Apache License 2.0 | 6 votes |
private FileStatus[] versionBasedListPath(String src) throws IOException { if (namenodeVersion >= ClientProtocol.ITERATIVE_LISTING_VERSION) { return iterativeListing(src); } else if (namenodeVersion >= ClientProtocol.OPTIMIZE_FILE_STATUS_VERSION) { HdfsFileStatus[] hdfsStats = namenode.getHdfsListing(src); if (hdfsStats == null) { return null; } FileStatus[] stats = new FileStatus[hdfsStats.length]; for (int i=0; i<stats.length; i++) { stats[i] = toFileStatus(hdfsStats[i], src); } return stats; } else { return namenode.getListing(src); } }
Example #14
Source File: FanOutOneBlockAsyncDFSOutput.java From hbase with Apache License 2.0 | 6 votes |
FanOutOneBlockAsyncDFSOutput(Configuration conf,DistributedFileSystem dfs, DFSClient client, ClientProtocol namenode, String clientName, String src, long fileId, LocatedBlock locatedBlock, Encryptor encryptor, List<Channel> datanodeList, DataChecksum summer, ByteBufAllocator alloc) { this.conf = conf; this.dfs = dfs; this.client = client; this.namenode = namenode; this.fileId = fileId; this.clientName = clientName; this.src = src; this.block = locatedBlock.getBlock(); this.locations = locatedBlock.getLocations(); this.encryptor = encryptor; this.datanodeList = datanodeList; this.summer = summer; this.maxDataLen = MAX_DATA_LEN - (MAX_DATA_LEN % summer.getBytesPerChecksum()); this.alloc = alloc; this.buf = alloc.directBuffer(sendBufSizePRedictor.initialSize()); this.state = State.STREAMING; setupReceiver(conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, READ_TIMEOUT)); }
Example #15
Source File: NameNodeProxies.java From big-c with Apache License 2.0 | 5 votes |
/** * Creates an explicitly non-HA-enabled proxy object. Most of the time you * don't want to use this, and should instead use {@link NameNodeProxies#createProxy}. * * @param conf the configuration object * @param nnAddr address of the remote NN to connect to * @param xface the IPC interface which should be created * @param ugi the user who is making the calls on the proxy object * @param withRetries certain interfaces have a non-standard retry policy * @param fallbackToSimpleAuth - set to true or false during this method to * indicate if a secure client falls back to simple auth * @return an object containing both the proxy and the associated * delegation token service it corresponds to * @throws IOException */ @SuppressWarnings("unchecked") public static <T> ProxyAndInfo<T> createNonHAProxy( Configuration conf, InetSocketAddress nnAddr, Class<T> xface, UserGroupInformation ugi, boolean withRetries, AtomicBoolean fallbackToSimpleAuth) throws IOException { Text dtService = SecurityUtil.buildTokenService(nnAddr); T proxy; if (xface == ClientProtocol.class) { proxy = (T) createNNProxyWithClientProtocol(nnAddr, conf, ugi, withRetries, fallbackToSimpleAuth); } else if (xface == JournalProtocol.class) { proxy = (T) createNNProxyWithJournalProtocol(nnAddr, conf, ugi); } else if (xface == NamenodeProtocol.class) { proxy = (T) createNNProxyWithNamenodeProtocol(nnAddr, conf, ugi, withRetries); } else if (xface == GetUserMappingsProtocol.class) { proxy = (T) createNNProxyWithGetUserMappingsProtocol(nnAddr, conf, ugi); } else if (xface == RefreshUserMappingsProtocol.class) { proxy = (T) createNNProxyWithRefreshUserMappingsProtocol(nnAddr, conf, ugi); } else if (xface == RefreshAuthorizationPolicyProtocol.class) { proxy = (T) createNNProxyWithRefreshAuthorizationPolicyProtocol(nnAddr, conf, ugi); } else if (xface == RefreshCallQueueProtocol.class) { proxy = (T) createNNProxyWithRefreshCallQueueProtocol(nnAddr, conf, ugi); } else { String message = "Unsupported protocol found when creating the proxy " + "connection to NameNode: " + ((xface != null) ? xface.getClass().getName() : "null"); LOG.error(message); throw new IllegalStateException(message); } return new ProxyAndInfo<T>(proxy, dtService, nnAddr); }
Example #16
Source File: Router.java From nnproxy with Apache License 2.0 | 5 votes |
ClientProtocol getProtocol(String fs) throws IOException { try { return getUpstreamProtocol(Server.getRemoteUser().getUserName(), fs); } catch (ExecutionException e) { throw new WrappedExecutionException(e.getCause()); } }
Example #17
Source File: MiniNNProxy.java From nnproxy with Apache License 2.0 | 5 votes |
public MiniNNProxy(Configuration conf, String mountTable, MiniDFSCluster[] clusters) throws Exception { super(conf); this.mountTable = mountTable; this.clusters = clusters; this.mounts = new MockedMountsManager(); this.start(); UserGroupInformation curr = UserGroupInformation.getCurrentUser(); clientProtocol = NameNodeProxies.createNonHAProxy(conf, getRpcAddress(), ClientProtocol.class, curr, false).getProxy(); dfs = new DFSClient(URI.create("hdfs://127.0.0.1:" + getRpcAddress().getPort()), conf); fs = FileSystem.newInstance(URI.create("hdfs://127.0.0.1:" + getRpcAddress().getPort()), conf, curr.getUserName()); }
Example #18
Source File: TestAvatarCreateFile.java From RDFS with Apache License 2.0 | 5 votes |
@Test public void testCreateFileWithoutOverwrite() throws Exception { InjectionHandler.set(new TestHandler()); cluster.clearZooKeeperNode(0); ClientProtocol namenode = ((DistributedAvatarFileSystem) fs).getClient() .getNameNodeRPC(); new FailoverThread().start(); FsPermission perm = new FsPermission((short) 0264); namenode.create("/test1", perm, ((DistributedAvatarFileSystem) fs) .getClient().getClientName(), false, true, (short) 3, (long) 1024); LOG.info("Done with create"); assertTrue(fs.exists(new Path("/test1"))); assertTrue(pass); }
Example #19
Source File: UpstreamManager.java From nnproxy with Apache License 2.0 | 5 votes |
public Upstream(ClientProtocol protocol, NameNodeProxies.ProxyAndInfo<ClientProtocol> proxyAndInfo, NameNodeProxies.ProxyAndInfo<NamenodeProtocol> nnProxyAndInfo) { this.protocol = protocol; this.proxyAndInfo = proxyAndInfo; this.nnProxyAndInfo = nnProxyAndInfo; }
Example #20
Source File: TestInterDatanodeProtocol.java From hadoop with Apache License 2.0 | 5 votes |
public static LocatedBlock getLastLocatedBlock( ClientProtocol namenode, String src) throws IOException { //get block info for the last block LocatedBlocks locations = namenode.getBlockLocations(src, 0, Long.MAX_VALUE); List<LocatedBlock> blocks = locations.getLocatedBlocks(); DataNode.LOG.info("blocks.size()=" + blocks.size()); assertTrue(blocks.size() > 0); return blocks.get(blocks.size() - 1); }
Example #21
Source File: DfsServlet.java From RDFS with Apache License 2.0 | 5 votes |
/** * Create a {@link NameNode} proxy from the current {@link ServletContext}. */ protected ClientProtocol createNameNodeProxy(UnixUserGroupInformation ugi ) throws IOException { ServletContext context = getServletContext(); InetSocketAddress nnAddr = (InetSocketAddress)context.getAttribute("name.node.address"); if (nnAddr == null) { throw new IOException("The namenode is not out of safemode yet"); } Configuration conf = new Configuration( (Configuration)context.getAttribute("name.conf")); UnixUserGroupInformation.saveToConf(conf, UnixUserGroupInformation.UGI_PROPERTY_NAME, ugi); return DFSClient.createNamenode(nnAddr, conf); }
Example #22
Source File: FileDataServlet.java From RDFS with Apache License 2.0 | 5 votes |
/** Create a redirection URI */ protected URI createUri(FileStatus i, UnixUserGroupInformation ugi, ClientProtocol nnproxy, HttpServletRequest request) throws IOException, URISyntaxException { return createUri(i.getPath().toString(), pickSrcDatanode(i, nnproxy), ugi, request); }
Example #23
Source File: FileDataServlet.java From big-c with Apache License 2.0 | 5 votes |
/** Create a redirection URL */ private URL createRedirectURL(String path, String encodedPath, HdfsFileStatus status, UserGroupInformation ugi, ClientProtocol nnproxy, HttpServletRequest request, String dt) throws IOException { String scheme = request.getScheme(); final LocatedBlocks blks = nnproxy.getBlockLocations( status.getFullPath(new Path(path)).toUri().getPath(), 0, 1); final Configuration conf = NameNodeHttpServer.getConfFromContext( getServletContext()); final DatanodeID host = pickSrcDatanode(blks, status, conf); final String hostname; if (host instanceof DatanodeInfo) { hostname = host.getHostName(); } else { hostname = host.getIpAddr(); } int port = "https".equals(scheme) ? host.getInfoSecurePort() : host .getInfoPort(); String dtParam = ""; if (dt != null) { dtParam = JspHelper.getDelegationTokenUrlParam(dt); } // Add namenode address to the url params NameNode nn = NameNodeHttpServer.getNameNodeFromContext( getServletContext()); String addr = nn.getNameNodeAddressHostPortString(); String addrParam = JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, addr); return new URL(scheme, hostname, port, "/streamFile" + encodedPath + '?' + "ugi=" + ServletUtil.encodeQueryValue(ugi.getShortUserName()) + dtParam + addrParam); }
Example #24
Source File: TestHAStateTransitions.java From hadoop with Apache License 2.0 | 5 votes |
/** * This test also serves to test * {@link HAUtil#getProxiesForAllNameNodesInNameservice(Configuration, String)} and * {@link DFSUtil#getRpcAddressesForNameserviceId(Configuration, String, String)} * by virtue of the fact that it wouldn't work properly if the proxies * returned were not for the correct NNs. */ @Test(timeout = 300000) public void testIsAtLeastOneActive() throws Exception { MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .numDataNodes(0) .build(); try { Configuration conf = new HdfsConfiguration(); HATestUtil.setFailoverConfigurations(cluster, conf); List<ClientProtocol> namenodes = HAUtil.getProxiesForAllNameNodesInNameservice(conf, HATestUtil.getLogicalHostname(cluster)); assertEquals(2, namenodes.size()); assertFalse(HAUtil.isAtLeastOneActive(namenodes)); cluster.transitionToActive(0); assertTrue(HAUtil.isAtLeastOneActive(namenodes)); cluster.transitionToStandby(0); assertFalse(HAUtil.isAtLeastOneActive(namenodes)); cluster.transitionToActive(1); assertTrue(HAUtil.isAtLeastOneActive(namenodes)); cluster.transitionToStandby(1); assertFalse(HAUtil.isAtLeastOneActive(namenodes)); } finally { if (cluster != null) { cluster.shutdown(); } } }
Example #25
Source File: DFSAdmin.java From big-c with Apache License 2.0 | 5 votes |
/** * Command to ask the namenode to save the namespace. * Usage: hdfs dfsadmin -saveNamespace * @exception IOException * @see org.apache.hadoop.hdfs.protocol.ClientProtocol#saveNamespace() */ public int saveNamespace() throws IOException { int exitCode = -1; DistributedFileSystem dfs = getDFS(); Configuration dfsConf = dfs.getConf(); URI dfsUri = dfs.getUri(); boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri); if (isHaEnabled) { String nsId = dfsUri.getHost(); List<ProxyAndInfo<ClientProtocol>> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId, ClientProtocol.class); for (ProxyAndInfo<ClientProtocol> proxy : proxies) { proxy.getProxy().saveNamespace(); System.out.println("Save namespace successful for " + proxy.getAddress()); } } else { dfs.saveNamespace(); System.out.println("Save namespace successful"); } exitCode = 0; return exitCode; }
Example #26
Source File: TestFsckWithMultipleNameNodes.java From hadoop with Apache License 2.0 | 5 votes |
Suite(MiniDFSCluster cluster, final int nNameNodes, final int nDataNodes) throws IOException { this.cluster = cluster; clients = new ClientProtocol[nNameNodes]; for(int i = 0; i < nNameNodes; i++) { clients[i] = cluster.getNameNode(i).getRpcServer(); } replication = (short)Math.max(1, nDataNodes - 1); }
Example #27
Source File: TestIsMethodSupported.java From hadoop with Apache License 2.0 | 5 votes |
@Test public void testClientNamenodeProtocol() throws IOException { ClientProtocol cp = NameNodeProxies.createNonHAProxy( conf, nnAddress, ClientProtocol.class, UserGroupInformation.getCurrentUser(), true).getProxy(); RpcClientUtil.isMethodSupported(cp, ClientNamenodeProtocolPB.class, RPC.RpcKind.RPC_PROTOCOL_BUFFER, RPC.getProtocolVersion(ClientNamenodeProtocolPB.class), "mkdirs"); }
Example #28
Source File: DFSClient.java From hadoop with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") @Override public long renew(Token<?> token, Configuration conf) throws IOException { Token<DelegationTokenIdentifier> delToken = (Token<DelegationTokenIdentifier>) token; ClientProtocol nn = getNNProxy(delToken, conf); try { return nn.renewDelegationToken(delToken); } catch (RemoteException re) { throw re.unwrapRemoteException(InvalidToken.class, AccessControlException.class); } }
Example #29
Source File: DFSClient.java From RDFS with Apache License 2.0 | 5 votes |
/** Version-based save namespace */ private void versionBasedSaveNamespace(boolean force, boolean uncompressed) throws AccessControlException, IOException { if (namenodeVersion >= ClientProtocol.SAVENAMESPACE_FORCE) { namenode.saveNamespace(force, uncompressed); } else { namenode.saveNamespace(); } }
Example #30
Source File: TestDFSClientUpdateNameNodeSignature.java From RDFS with Apache License 2.0 | 5 votes |
/** * This function tests the method signature fingerprint passed back from * name-node with MetaInfo is correct. */ @SuppressWarnings("unchecked") public void testNameNodeFingerprintSent() throws IOException { InetSocketAddress addr = cluster.getNameNode().getNameNodeDNAddress(); DFSClient client = new DFSClient(addr, cluster.getNameNode().getConf()); client.namenode.create("/testNameNodeFingerprintSent.txt", FsPermission .getDefault(), client.getClientName(), true, (short)1, 65535L); Class<? extends VersionedProtocol> inter; try { inter = (Class<? extends VersionedProtocol>)Class.forName(ClientProtocol.class.getName()); } catch (Exception e) { throw new IOException(e); } long serverVersion = ClientProtocol.versionID; int serverFpFromNn = ProtocolSignature.getFingerprint(ProtocolSignature.getProtocolSignature( 0, serverVersion, inter).getMethods()); LocatedBlockWithMetaInfo loc = client.namenode.addBlockAndFetchMetaInfo("/testNameNodeFingerprintSent.txt", client.getClientName(), null, 0L); int serverFp = loc.getMethodFingerPrint(); TestCase.assertEquals(serverFpFromNn, serverFp); FileSystem fs = cluster.getFileSystem(); Path f = new Path("/testNameNodeFingerprintSent1.txt"); DataOutputStream a_out = fs.create(f); a_out.writeBytes("something"); a_out.close(); LocatedBlocksWithMetaInfo locs = client.namenode.openAndFetchMetaInfo("/testNameNodeFingerprintSent.txt", 0L, 0L); TestCase.assertEquals(locs.getMethodFingerPrint(), serverFp); }