org.apache.hadoop.ipc.Client Java Examples
The following examples show how to use
org.apache.hadoop.ipc.Client.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ContainerOperationClient.java From hadoop-ozone with Apache License 2.0 | 6 votes |
public static StorageContainerLocationProtocol newContainerRpcClient( ConfigurationSource configSource) throws IOException { Class<StorageContainerLocationProtocolPB> protocol = StorageContainerLocationProtocolPB.class; Configuration conf = LegacyHadoopConfigurationSource.asHadoopConfiguration(configSource); RPC.setProtocolEngine(conf, protocol, ProtobufRpcEngine.class); long version = RPC.getProtocolVersion(protocol); InetSocketAddress scmAddress = getScmAddressForClients(configSource); UserGroupInformation user = UserGroupInformation.getCurrentUser(); SocketFactory socketFactory = NetUtils.getDefaultSocketFactory(conf); int rpcTimeOut = Client.getRpcTimeout(conf); StorageContainerLocationProtocolPB rpcProxy = RPC.getProxy(protocol, version, scmAddress, user, conf, socketFactory, rpcTimeOut); StorageContainerLocationProtocolClientSideTranslatorPB client = new StorageContainerLocationProtocolClientSideTranslatorPB(rpcProxy); return TracingUtil.createProxy( client, StorageContainerLocationProtocol.class, configSource); }
Example #2
Source File: DefaultCertificateClient.java From hadoop-ozone with Apache License 2.0 | 6 votes |
/** * Create a scm security client, used to get SCM signed certificate. * * @return {@link SCMSecurityProtocol} */ private static SCMSecurityProtocol getScmSecurityClient( OzoneConfiguration conf) throws IOException { RPC.setProtocolEngine(conf, SCMSecurityProtocolPB.class, ProtobufRpcEngine.class); long scmVersion = RPC.getProtocolVersion(ScmBlockLocationProtocolPB.class); InetSocketAddress scmSecurityProtoAdd = HddsServerUtil.getScmAddressForSecurityProtocol(conf); SCMSecurityProtocolClientSideTranslatorPB scmSecurityClient = new SCMSecurityProtocolClientSideTranslatorPB( RPC.getProxy(SCMSecurityProtocolPB.class, scmVersion, scmSecurityProtoAdd, UserGroupInformation.getCurrentUser(), conf, NetUtils.getDefaultSocketFactory(conf), Client.getRpcTimeout(conf))); return scmSecurityClient; }
Example #3
Source File: HddsServerUtil.java From hadoop-ozone with Apache License 2.0 | 6 votes |
/** * Create a scm security client. * @param conf - Ozone configuration. * * @return {@link SCMSecurityProtocol} * @throws IOException */ public static SCMSecurityProtocolClientSideTranslatorPB getScmSecurityClient( OzoneConfiguration conf) throws IOException { RPC.setProtocolEngine(conf, SCMSecurityProtocolPB.class, ProtobufRpcEngine.class); long scmVersion = RPC.getProtocolVersion(ScmBlockLocationProtocolPB.class); InetSocketAddress address = getScmAddressForSecurityProtocol(conf); RetryPolicy retryPolicy = RetryPolicies.retryForeverWithFixedSleep( 1000, TimeUnit.MILLISECONDS); return new SCMSecurityProtocolClientSideTranslatorPB( RPC.getProtocolProxy(SCMSecurityProtocolPB.class, scmVersion, address, UserGroupInformation.getCurrentUser(), conf, NetUtils.getDefaultSocketFactory(conf), Client.getRpcTimeout(conf), retryPolicy).getProxy()); }
Example #4
Source File: MiniOzoneClusterImpl.java From hadoop-ozone with Apache License 2.0 | 6 votes |
/** * Returns an RPC proxy connected to this cluster's StorageContainerManager * for accessing container location information. Callers take ownership of * the proxy and must close it when done. * * @return RPC proxy for accessing container location information * @throws IOException if there is an I/O error */ @Override public StorageContainerLocationProtocolClientSideTranslatorPB getStorageContainerLocationClient() throws IOException { long version = RPC.getProtocolVersion( StorageContainerLocationProtocolPB.class); InetSocketAddress address = scm.getClientRpcAddress(); LOG.info( "Creating StorageContainerLocationProtocol RPC client with address {}", address); return new StorageContainerLocationProtocolClientSideTranslatorPB( RPC.getProxy(StorageContainerLocationProtocolPB.class, version, address, UserGroupInformation.getCurrentUser(), conf, NetUtils.getDefaultSocketFactory(conf), Client.getRpcTimeout(conf))); }
Example #5
Source File: BaseFreonGenerator.java From hadoop-ozone with Apache License 2.0 | 6 votes |
public StorageContainerLocationProtocol createStorageContainerLocationClient( OzoneConfiguration ozoneConf) throws IOException { long version = RPC.getProtocolVersion( StorageContainerLocationProtocolPB.class); InetSocketAddress scmAddress = getScmAddressForClients(ozoneConf); RPC.setProtocolEngine(ozoneConf, StorageContainerLocationProtocolPB.class, ProtobufRpcEngine.class); StorageContainerLocationProtocol client = TracingUtil.createProxy( new StorageContainerLocationProtocolClientSideTranslatorPB( RPC.getProxy(StorageContainerLocationProtocolPB.class, version, scmAddress, UserGroupInformation.getCurrentUser(), ozoneConf, NetUtils.getDefaultSocketFactory(ozoneConf), Client.getRpcTimeout(ozoneConf))), StorageContainerLocationProtocol.class, ozoneConf); return client; }
Example #6
Source File: OzoneManager.java From hadoop-ozone with Apache License 2.0 | 6 votes |
/** * Create a scm block client, used by putKey() and getKey(). * * @return {@link ScmBlockLocationProtocol} * @throws IOException */ private static ScmBlockLocationProtocol getScmBlockClient( OzoneConfiguration conf) throws IOException { RPC.setProtocolEngine(conf, ScmBlockLocationProtocolPB.class, ProtobufRpcEngine.class); long scmVersion = RPC.getProtocolVersion(ScmBlockLocationProtocolPB.class); InetSocketAddress scmBlockAddress = getScmAddressForBlockClients(conf); ScmBlockLocationProtocolClientSideTranslatorPB scmBlockLocationClient = new ScmBlockLocationProtocolClientSideTranslatorPB( RPC.getProxy(ScmBlockLocationProtocolPB.class, scmVersion, scmBlockAddress, UserGroupInformation.getCurrentUser(), conf, NetUtils.getDefaultSocketFactory(conf), Client.getRpcTimeout(conf))); return TracingUtil .createProxy(scmBlockLocationClient, ScmBlockLocationProtocol.class, conf); }
Example #7
Source File: OzoneManager.java From hadoop-ozone with Apache License 2.0 | 6 votes |
/** * Returns a scm container client. * * @return {@link StorageContainerLocationProtocol} * @throws IOException */ private static StorageContainerLocationProtocol getScmContainerClient( OzoneConfiguration conf) throws IOException { RPC.setProtocolEngine(conf, StorageContainerLocationProtocolPB.class, ProtobufRpcEngine.class); long scmVersion = RPC.getProtocolVersion(StorageContainerLocationProtocolPB.class); InetSocketAddress scmAddr = getScmAddressForClients( conf); StorageContainerLocationProtocol scmContainerClient = TracingUtil.createProxy( new StorageContainerLocationProtocolClientSideTranslatorPB( RPC.getProxy(StorageContainerLocationProtocolPB.class, scmVersion, scmAddr, UserGroupInformation.getCurrentUser(), conf, NetUtils.getDefaultSocketFactory(conf), Client.getRpcTimeout(conf))), StorageContainerLocationProtocol.class, conf); return scmContainerClient; }
Example #8
Source File: HddsServerUtil.java From hadoop-ozone with Apache License 2.0 | 5 votes |
/** * Create a scm block client, used by putKey() and getKey(). * * @return {@link ScmBlockLocationProtocol} * @throws IOException */ public static SCMSecurityProtocol getScmSecurityClient( OzoneConfiguration conf, UserGroupInformation ugi) throws IOException { RPC.setProtocolEngine(conf, SCMSecurityProtocolPB.class, ProtobufRpcEngine.class); long scmVersion = RPC.getProtocolVersion(ScmBlockLocationProtocolPB.class); InetSocketAddress scmSecurityProtoAdd = getScmAddressForSecurityProtocol(conf); return new SCMSecurityProtocolClientSideTranslatorPB( RPC.getProxy(SCMSecurityProtocolPB.class, scmVersion, scmSecurityProtoAdd, ugi, conf, NetUtils.getDefaultSocketFactory(conf), Client.getRpcTimeout(conf))); }
Example #9
Source File: TestLeaseRecovery.java From RDFS with Apache License 2.0 | 5 votes |
/** * test the recoverBlock does not leak clients when creating * InterDatanodeProtocol RPC instances */ public void testForClientLeak() throws Exception { Client client = ClientAdapter.getClient( conf, NetUtils.getSocketFactory(conf, InterDatanodeProtocol.class) ); DistributedFileSystem fileSystem = (DistributedFileSystem) cluster.getFileSystem(); int initialRefCount = ClientAdapter.getRefCount(client); String filename = "/file1"; DFSOutputStream out = (DFSOutputStream) ((DistributedFileSystem) fileSystem).getClient().create( filename, FsPermission.getDefault(), true, (short) 5, 1024, new Progressable() { @Override public void progress() { } }, 64 * 1024 ); out.write(DFSTestUtil.generateSequentialBytes(0, 512)); out.sync(); DatanodeInfo[] dataNodeInfos = ((DFSOutputStream)out).getPipeline(); // killing one DN in the pipe and doing a write triggers lease recovery // and will result in the refcount being adjusted; if there's a lease // in Datanode.recoverBlock(), this will trigger it cluster.stopDataNode(dataNodeInfos[0].getName()); out.write(DFSTestUtil.generateSequentialBytes(0, 512)); assertEquals( "Client refcount leak!", initialRefCount - 1, //-1 since we stop a DN above ClientAdapter.getRefCount(client) ); out.close(); }
Example #10
Source File: LeaseRenewal.java From RDFS with Apache License 2.0 | 5 votes |
/** * Computes the renewal period for the lease. * * @return the renewal period in ms */ private long computeRenewalPeriod() { long hardLeaseLimit = conf.getLong( FSConstants.DFS_HARD_LEASE_KEY, FSConstants.LEASE_HARDLIMIT_PERIOD); long softLeaseLimit = conf.getLong( FSConstants.DFS_SOFT_LEASE_KEY, FSConstants.LEASE_SOFTLIMIT_PERIOD); long renewal = Math.min(hardLeaseLimit, softLeaseLimit) / 2; long hdfsTimeout = Client.getTimeout(conf); if (hdfsTimeout > 0) { renewal = Math.min(renewal, hdfsTimeout/2); } return renewal; }
Example #11
Source File: RetryInvocationHandler.java From hadoop with Apache License 2.0 | 4 votes |
@Override public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { RetryPolicy policy = methodNameToPolicyMap.get(method.getName()); if (policy == null) { policy = defaultPolicy; } // The number of times this method invocation has been failed over. int invocationFailoverCount = 0; final boolean isRpc = isRpcInvocation(currentProxy.proxy); final int callId = isRpc? Client.nextCallId(): RpcConstants.INVALID_CALL_ID; int retries = 0; while (true) { // The number of times this invocation handler has ever been failed over, // before this method invocation attempt. Used to prevent concurrent // failed method invocations from triggering multiple failover attempts. long invocationAttemptFailoverCount; synchronized (proxyProvider) { invocationAttemptFailoverCount = proxyProviderFailoverCount; } if (isRpc) { Client.setCallIdAndRetryCount(callId, retries); } try { Object ret = invokeMethod(method, args); hasMadeASuccessfulCall = true; return ret; } catch (Exception e) { boolean isIdempotentOrAtMostOnce = proxyProvider.getInterface() .getMethod(method.getName(), method.getParameterTypes()) .isAnnotationPresent(Idempotent.class); if (!isIdempotentOrAtMostOnce) { isIdempotentOrAtMostOnce = proxyProvider.getInterface() .getMethod(method.getName(), method.getParameterTypes()) .isAnnotationPresent(AtMostOnce.class); } RetryAction action = policy.shouldRetry(e, retries++, invocationFailoverCount, isIdempotentOrAtMostOnce); if (action.action == RetryAction.RetryDecision.FAIL) { if (action.reason != null) { LOG.warn("Exception while invoking " + currentProxy.proxy.getClass() + "." + method.getName() + " over " + currentProxy.proxyInfo + ". Not retrying because " + action.reason, e); } throw e; } else { // retry or failover // avoid logging the failover if this is the first call on this // proxy object, and we successfully achieve the failover without // any flip-flopping boolean worthLogging = !(invocationFailoverCount == 0 && !hasMadeASuccessfulCall); worthLogging |= LOG.isDebugEnabled(); if (action.action == RetryAction.RetryDecision.FAILOVER_AND_RETRY && worthLogging) { String msg = "Exception while invoking " + method.getName() + " of class " + currentProxy.proxy.getClass().getSimpleName() + " over " + currentProxy.proxyInfo; if (invocationFailoverCount > 0) { msg += " after " + invocationFailoverCount + " fail over attempts"; } msg += ". Trying to fail over " + formatSleepMessage(action.delayMillis); LOG.info(msg, e); } else { if(LOG.isDebugEnabled()) { LOG.debug("Exception while invoking " + method.getName() + " of class " + currentProxy.proxy.getClass().getSimpleName() + " over " + currentProxy.proxyInfo + ". Retrying " + formatSleepMessage(action.delayMillis), e); } } if (action.delayMillis > 0) { Thread.sleep(action.delayMillis); } if (action.action == RetryAction.RetryDecision.FAILOVER_AND_RETRY) { // Make sure that concurrent failed method invocations only cause a // single actual fail over. synchronized (proxyProvider) { if (invocationAttemptFailoverCount == proxyProviderFailoverCount) { proxyProvider.performFailover(currentProxy.proxy); proxyProviderFailoverCount++; } else { LOG.warn("A failover has occurred since the start of this method" + " invocation attempt."); } currentProxy = proxyProvider.getProxy(); } invocationFailoverCount++; } } } } }
Example #12
Source File: RetryInvocationHandler.java From big-c with Apache License 2.0 | 4 votes |
@Override public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { RetryPolicy policy = methodNameToPolicyMap.get(method.getName()); if (policy == null) { policy = defaultPolicy; } // The number of times this method invocation has been failed over. int invocationFailoverCount = 0; final boolean isRpc = isRpcInvocation(currentProxy.proxy); final int callId = isRpc? Client.nextCallId(): RpcConstants.INVALID_CALL_ID; int retries = 0; while (true) { // The number of times this invocation handler has ever been failed over, // before this method invocation attempt. Used to prevent concurrent // failed method invocations from triggering multiple failover attempts. long invocationAttemptFailoverCount; synchronized (proxyProvider) { invocationAttemptFailoverCount = proxyProviderFailoverCount; } if (isRpc) { Client.setCallIdAndRetryCount(callId, retries); } try { Object ret = invokeMethod(method, args); hasMadeASuccessfulCall = true; return ret; } catch (Exception e) { boolean isIdempotentOrAtMostOnce = proxyProvider.getInterface() .getMethod(method.getName(), method.getParameterTypes()) .isAnnotationPresent(Idempotent.class); if (!isIdempotentOrAtMostOnce) { isIdempotentOrAtMostOnce = proxyProvider.getInterface() .getMethod(method.getName(), method.getParameterTypes()) .isAnnotationPresent(AtMostOnce.class); } RetryAction action = policy.shouldRetry(e, retries++, invocationFailoverCount, isIdempotentOrAtMostOnce); if (action.action == RetryAction.RetryDecision.FAIL) { if (action.reason != null) { LOG.warn("Exception while invoking " + currentProxy.proxy.getClass() + "." + method.getName() + " over " + currentProxy.proxyInfo + ". Not retrying because " + action.reason, e); } throw e; } else { // retry or failover // avoid logging the failover if this is the first call on this // proxy object, and we successfully achieve the failover without // any flip-flopping boolean worthLogging = !(invocationFailoverCount == 0 && !hasMadeASuccessfulCall); worthLogging |= LOG.isDebugEnabled(); if (action.action == RetryAction.RetryDecision.FAILOVER_AND_RETRY && worthLogging) { String msg = "Exception while invoking " + method.getName() + " of class " + currentProxy.proxy.getClass().getSimpleName() + " over " + currentProxy.proxyInfo; if (invocationFailoverCount > 0) { msg += " after " + invocationFailoverCount + " fail over attempts"; } msg += ". Trying to fail over " + formatSleepMessage(action.delayMillis); LOG.info(msg, e); } else { if(LOG.isDebugEnabled()) { LOG.debug("Exception while invoking " + method.getName() + " of class " + currentProxy.proxy.getClass().getSimpleName() + " over " + currentProxy.proxyInfo + ". Retrying " + formatSleepMessage(action.delayMillis), e); } } if (action.delayMillis > 0) { Thread.sleep(action.delayMillis); } if (action.action == RetryAction.RetryDecision.FAILOVER_AND_RETRY) { // Make sure that concurrent failed method invocations only cause a // single actual fail over. synchronized (proxyProvider) { if (invocationAttemptFailoverCount == proxyProviderFailoverCount) { proxyProvider.performFailover(currentProxy.proxy); proxyProviderFailoverCount++; } else { LOG.warn("A failover has occurred since the start of this method" + " invocation attempt."); } currentProxy = proxyProvider.getProxy(); } invocationFailoverCount++; } } } } }
Example #13
Source File: DFSClient.java From RDFS with Apache License 2.0 | 4 votes |
/** * Create a new DFSClient connected to the given nameNodeAddr or rpcNamenode. * Exactly one of nameNodeAddr or rpcNamenode must be null. */ DFSClient(InetSocketAddress nameNodeAddr, ClientProtocol rpcNamenode, Configuration conf, FileSystem.Statistics stats) throws IOException { this.conf = conf; this.stats = stats; this.socketTimeout = conf.getInt("dfs.socket.timeout", HdfsConstants.READ_TIMEOUT); this.socketReadExtentionTimeout = conf.getInt( HdfsConstants.DFS_DATANODE_READ_EXTENSION, HdfsConstants.READ_TIMEOUT_EXTENSION); this.timeoutValue = this.socketTimeout; this.datanodeWriteTimeout = conf.getInt("dfs.datanode.socket.write.timeout", HdfsConstants.WRITE_TIMEOUT); this.datanodeWriteExtentionTimeout = conf.getInt( HdfsConstants.DFS_DATANODE_WRITE_EXTENTSION, HdfsConstants.WRITE_TIMEOUT_EXTENSION); this.socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class); // dfs.write.packet.size is an internal config variable this.writePacketSize = conf.getInt("dfs.write.packet.size", 64*1024); this.minReadSpeedBps = conf.getLong("dfs.min.read.speed.bps", -1); this.maxBlockAcquireFailures = getMaxBlockAcquireFailures(conf); this.localHost = InetAddress.getLocalHost(); // fetch network location of localhost this.pseuDatanodeInfoForLocalhost = new DatanodeInfo(new DatanodeID( this.localHost.getHostAddress())); this.dnsToSwitchMapping = ReflectionUtils.newInstance( conf.getClass("topology.node.switch.mapping.impl", ScriptBasedMapping.class, DNSToSwitchMapping.class), conf); ArrayList<String> tempList = new ArrayList<String>(); tempList.add(this.localHost.getHostName()); List<String> retList = dnsToSwitchMapping.resolve(tempList); if (retList != null && retList.size() > 0) { localhostNetworkLocation = retList.get(0); this.pseuDatanodeInfoForLocalhost.setNetworkLocation(localhostNetworkLocation); } // The hdfsTimeout is currently the same as the ipc timeout this.hdfsTimeout = Client.getTimeout(conf); this.closeFileTimeout = conf.getLong("dfs.client.closefile.timeout", this.hdfsTimeout); try { this.ugi = UnixUserGroupInformation.login(conf, true); } catch (LoginException e) { throw (IOException)(new IOException().initCause(e)); } String taskId = conf.get("mapred.task.id"); if (taskId != null) { this.clientName = "DFSClient_" + taskId + "_" + r.nextInt() + "_" + Thread.currentThread().getId(); } else { this.clientName = "DFSClient_" + r.nextInt(); } defaultBlockSize = conf.getLong("dfs.block.size", DEFAULT_BLOCK_SIZE); defaultReplication = (short) conf.getInt("dfs.replication", 3); if (nameNodeAddr != null && rpcNamenode == null) { this.nameNodeAddr = nameNodeAddr; getNameNode(); } else if (nameNodeAddr == null && rpcNamenode != null) { //This case is used for testing. if (rpcNamenode instanceof NameNode) { this.namenodeProtocolProxy = createRPCNamenode(((NameNode)rpcNamenode).getNameNodeAddress(), conf, ugi); } this.namenode = this.rpcNamenode = rpcNamenode; } else { throw new IllegalArgumentException( "Expecting exactly one of nameNodeAddr and rpcNamenode being null: " + "nameNodeAddr=" + nameNodeAddr + ", rpcNamenode=" + rpcNamenode); } // read directly from the block file if configured. this.shortCircuitLocalReads = conf.getBoolean("dfs.read.shortcircuit", false); if (this.shortCircuitLocalReads) { LOG.debug("Configured to shortcircuit reads to " + localHost); } this.leasechecker = new LeaseChecker(this.clientName, this.conf); // by default, if the ipTosValue is less than 0(for example -1), // we will not set it in the socket. this.ipTosValue = conf.getInt("dfs.client.tos.value", NetUtils.NOT_SET_IP_TOS); if (this.ipTosValue > NetUtils.IP_TOS_MAX_VALUE) { LOG.warn("dfs.client.tos.value " + ipTosValue + " exceeds the max allowed value " + NetUtils.IP_TOS_MAX_VALUE + ", will not take affect"); this.ipTosValue = NetUtils.NOT_SET_IP_TOS; } }