Java Code Examples for io.grpc.ManagedChannel#newCall()

The following examples show how to use io.grpc.ManagedChannel#newCall() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: AbstractBenchmark.java    From grpc-nebula-java with Apache License 2.0 4 votes vote down vote up
/**
 * Start a continuously executing set of duplex streaming ping-pong calls that will terminate when
 * {@code done.get()} is true. Each completed call will increment the counter by the specified
 * delta which benchmarks can use to measure messages per second or bandwidth.
 */
protected CountDownLatch startStreamingCalls(int callsPerChannel, final AtomicLong counter,
    final AtomicBoolean record, final AtomicBoolean done, final long counterDelta) {
  final CountDownLatch latch = new CountDownLatch(callsPerChannel * channels.length);
  for (final ManagedChannel channel : channels) {
    for (int i = 0; i < callsPerChannel; i++) {
      final ClientCall<ByteBuf, ByteBuf> streamingCall =
          channel.newCall(pingPongMethod, CALL_OPTIONS);
      final AtomicReference<StreamObserver<ByteBuf>> requestObserverRef =
          new AtomicReference<StreamObserver<ByteBuf>>();
      final AtomicBoolean ignoreMessages = new AtomicBoolean();
      StreamObserver<ByteBuf> requestObserver = ClientCalls.asyncBidiStreamingCall(
          streamingCall,
          new StreamObserver<ByteBuf>() {
            @Override
            public void onNext(ByteBuf value) {
              if (done.get()) {
                if (!ignoreMessages.getAndSet(true)) {
                  requestObserverRef.get().onCompleted();
                }
                return;
              }
              requestObserverRef.get().onNext(request.slice());
              if (record.get()) {
                counter.addAndGet(counterDelta);
              }
              // request is called automatically because the observer implicitly has auto
              // inbound flow control
            }

            @Override
            public void onError(Throwable t) {
              logger.log(Level.WARNING, "call error", t);
              latch.countDown();
            }

            @Override
            public void onCompleted() {
              latch.countDown();
            }
          });
      requestObserverRef.set(requestObserver);
      requestObserver.onNext(request.slice());
      requestObserver.onNext(request.slice());
    }
  }
  return latch;
}
 
Example 2
Source File: AbstractBenchmark.java    From grpc-nebula-java with Apache License 2.0 4 votes vote down vote up
/**
 * Start a continuously executing set of duplex streaming ping-pong calls that will terminate when
 * {@code done.get()} is true. Each completed call will increment the counter by the specified
 * delta which benchmarks can use to measure messages per second or bandwidth.
 */
protected CountDownLatch startFlowControlledStreamingCalls(int callsPerChannel,
    final AtomicLong counter, final AtomicBoolean record, final AtomicBoolean done,
    final long counterDelta) {
  final CountDownLatch latch = new CountDownLatch(callsPerChannel * channels.length);
  for (final ManagedChannel channel : channels) {
    for (int i = 0; i < callsPerChannel; i++) {
      final ClientCall<ByteBuf, ByteBuf> streamingCall =
          channel.newCall(flowControlledStreaming, CALL_OPTIONS);
      final AtomicReference<StreamObserver<ByteBuf>> requestObserverRef =
          new AtomicReference<StreamObserver<ByteBuf>>();
      final AtomicBoolean ignoreMessages = new AtomicBoolean();
      StreamObserver<ByteBuf> requestObserver = ClientCalls.asyncBidiStreamingCall(
          streamingCall,
          new StreamObserver<ByteBuf>() {
            @Override
            public void onNext(ByteBuf value) {
              StreamObserver<ByteBuf> obs = requestObserverRef.get();
              if (done.get()) {
                if (!ignoreMessages.getAndSet(true)) {
                  obs.onCompleted();
                }
                return;
              }
              if (record.get()) {
                counter.addAndGet(counterDelta);
              }
              // request is called automatically because the observer implicitly has auto
              // inbound flow control
            }

            @Override
            public void onError(Throwable t) {
              logger.log(Level.WARNING, "call error", t);
              latch.countDown();
            }

            @Override
            public void onCompleted() {
              latch.countDown();
            }
          });
      requestObserverRef.set(requestObserver);

      // Add some outstanding requests to ensure the server is filling the connection
      streamingCall.request(5);
      requestObserver.onNext(request.slice());
    }
  }
  return latch;
}
 
Example 3
Source File: ManagedChannelImplIdlenessTest.java    From grpc-nebula-java with Apache License 2.0 4 votes vote down vote up
@Test
public void oobTransportDoesNotAffectIdleness() {
  // Start a call, which goes to delayed transport
  ClientCall<String, Integer> call = channel.newCall(method, CallOptions.DEFAULT);
  call.start(mockCallListener, new Metadata());

  // Verify that we have exited the idle mode
  ArgumentCaptor<Helper> helperCaptor = ArgumentCaptor.forClass(null);
  verify(mockLoadBalancerFactory).newLoadBalancer(helperCaptor.capture());
  Helper helper = helperCaptor.getValue();

  // Fail the RPC
  SubchannelPicker failingPicker = mock(SubchannelPicker.class);
  when(failingPicker.pickSubchannel(any(PickSubchannelArgs.class)))
      .thenReturn(PickResult.withError(Status.UNAVAILABLE));
  helper.updateBalancingState(TRANSIENT_FAILURE, failingPicker);
  executor.runDueTasks();
  verify(mockCallListener).onClose(same(Status.UNAVAILABLE), any(Metadata.class));

  // ... so that the channel resets its in-use state
  assertFalse(channel.inUseStateAggregator.isInUse());

  // Now make an RPC on an OOB channel
  ManagedChannel oob = helper.createOobChannel(servers.get(0), "oobauthority");
  verify(mockTransportFactory, never())
      .newClientTransport(
          any(SocketAddress.class),
          eq(new ClientTransportFactory.ClientTransportOptions()
            .setAuthority("oobauthority")
            .setUserAgent(USER_AGENT)));
  ClientCall<String, Integer> oobCall = oob.newCall(method, CallOptions.DEFAULT);
  oobCall.start(mockCallListener2, new Metadata());
  verify(mockTransportFactory)
      .newClientTransport(
          any(SocketAddress.class),
          eq(new ClientTransportFactory.ClientTransportOptions()
            .setAuthority("oobauthority")
            .setUserAgent(USER_AGENT)));
  MockClientTransportInfo oobTransportInfo = newTransports.poll();
  assertEquals(0, newTransports.size());
  // The OOB transport reports in-use state
  oobTransportInfo.listener.transportInUse(true);

  // But it won't stop the channel from going idle
  verify(mockLoadBalancer, never()).shutdown();
  timer.forwardTime(IDLE_TIMEOUT_SECONDS, TimeUnit.SECONDS);
  verify(mockLoadBalancer).shutdown();
}
 
Example 4
Source File: ServerCallsTest.java    From grpc-nebula-java with Apache License 2.0 4 votes vote down vote up
@Test
public void inprocessTransportManualFlow() throws Exception {
  final Semaphore semaphore = new Semaphore(1);
  ServerServiceDefinition service = ServerServiceDefinition.builder(
      new ServiceDescriptor("some", STREAMING_METHOD))
      .addMethod(STREAMING_METHOD, ServerCalls.asyncBidiStreamingCall(
          new ServerCalls.BidiStreamingMethod<Integer, Integer>() {
            int iteration;

            @Override
            public StreamObserver<Integer> invoke(StreamObserver<Integer> responseObserver) {
              final ServerCallStreamObserver<Integer> serverCallObserver =
                  (ServerCallStreamObserver<Integer>) responseObserver;
              serverCallObserver.setOnReadyHandler(new Runnable() {
                @Override
                public void run() {
                  while (serverCallObserver.isReady()) {
                    serverCallObserver.onNext(iteration);
                  }
                  iteration++;
                  semaphore.release();
                }
              });
              return new ServerCalls.NoopStreamObserver<Integer>() {
                @Override
                public void onCompleted() {
                  serverCallObserver.onCompleted();
                }
              };
            }
          }))
      .build();
  long tag = System.nanoTime();
  InProcessServerBuilder.forName("go-with-the-flow" + tag).addService(service).build().start();
  ManagedChannel channel = InProcessChannelBuilder.forName("go-with-the-flow" + tag).build();
  final ClientCall<Integer, Integer> clientCall = channel.newCall(STREAMING_METHOD,
      CallOptions.DEFAULT);
  final CountDownLatch latch = new CountDownLatch(1);
  final int[] receivedMessages = new int[6];
  clientCall.start(new ClientCall.Listener<Integer>() {
    int index;

    @Override
    public void onMessage(Integer message) {
      receivedMessages[index++] = message;
    }

    @Override
    public void onClose(Status status, Metadata trailers) {
      latch.countDown();
    }
  }, new Metadata());
  semaphore.acquire();
  clientCall.request(1);
  semaphore.acquire();
  clientCall.request(2);
  semaphore.acquire();
  clientCall.request(3);
  clientCall.halfClose();
  latch.await(5, TimeUnit.SECONDS);
  // Very that number of messages produced in each onReady handler call matches the number
  // requested by the client.
  assertArrayEquals(new int[]{0, 1, 1, 2, 2, 2}, receivedMessages);
}
 
Example 5
Source File: XdsTestClient.java    From grpc-java with Apache License 2.0 4 votes vote down vote up
private void runQps() throws InterruptedException, ExecutionException {
  final SettableFuture<Void> failure = SettableFuture.create();
  final class PeriodicRpc implements Runnable {

    @Override
    public void run() {
      final long requestId;
      final Set<XdsStatsWatcher> savedWatchers = new HashSet<>();
      synchronized (lock) {
        currentRequestId += 1;
        requestId = currentRequestId;
        savedWatchers.addAll(watchers);
      }

      SimpleRequest request = SimpleRequest.newBuilder().setFillServerId(true).build();
      ManagedChannel channel = channels.get((int) (requestId % channels.size()));
      final ClientCall<SimpleRequest, SimpleResponse> call =
          channel.newCall(
              TestServiceGrpc.getUnaryCallMethod(),
              CallOptions.DEFAULT.withDeadlineAfter(rpcTimeoutSec, TimeUnit.SECONDS));
      call.start(
          new ClientCall.Listener<SimpleResponse>() {
            private String hostname;

            @Override
            public void onMessage(SimpleResponse response) {
              hostname = response.getHostname();
              // TODO(ericgribkoff) Currently some test environments cannot access the stats RPC
              // service and rely on parsing stdout.
              if (printResponse) {
                System.out.println(
                    "Greeting: Hello world, this is "
                        + hostname
                        + ", from "
                        + call.getAttributes().get(Grpc.TRANSPORT_ATTR_REMOTE_ADDR));
              }
            }

            @Override
            public void onClose(Status status, Metadata trailers) {
              if (printResponse && !status.isOk()) {
                logger.log(Level.WARNING, "Greeting RPC failed with status {0}", status);
              }
              for (XdsStatsWatcher watcher : savedWatchers) {
                watcher.rpcCompleted(requestId, hostname);
              }
            }
          },
          new Metadata());

      call.sendMessage(request);
      call.request(1);
      call.halfClose();
    }
  }

  long nanosPerQuery = TimeUnit.SECONDS.toNanos(1) / qps;
  ListenableScheduledFuture<?> future =
      exec.scheduleAtFixedRate(new PeriodicRpc(), 0, nanosPerQuery, TimeUnit.NANOSECONDS);

  Futures.addCallback(
      future,
      new FutureCallback<Object>() {

        @Override
        public void onFailure(Throwable t) {
          failure.setException(t);
        }

        @Override
        public void onSuccess(Object o) {}
      },
      MoreExecutors.directExecutor());

  failure.get();
}
 
Example 6
Source File: AbstractBenchmark.java    From grpc-java with Apache License 2.0 4 votes vote down vote up
/**
 * Start a continuously executing set of duplex streaming ping-pong calls that will terminate when
 * {@code done.get()} is true. Each completed call will increment the counter by the specified
 * delta which benchmarks can use to measure messages per second or bandwidth.
 */
protected CountDownLatch startStreamingCalls(int callsPerChannel, final AtomicLong counter,
    final AtomicBoolean record, final AtomicBoolean done, final long counterDelta) {
  final CountDownLatch latch = new CountDownLatch(callsPerChannel * channels.length);
  for (final ManagedChannel channel : channels) {
    for (int i = 0; i < callsPerChannel; i++) {
      final ClientCall<ByteBuf, ByteBuf> streamingCall =
          channel.newCall(pingPongMethod, CALL_OPTIONS);
      final AtomicReference<StreamObserver<ByteBuf>> requestObserverRef =
          new AtomicReference<>();
      final AtomicBoolean ignoreMessages = new AtomicBoolean();
      StreamObserver<ByteBuf> requestObserver = ClientCalls.asyncBidiStreamingCall(
          streamingCall,
          new StreamObserver<ByteBuf>() {
            @Override
            public void onNext(ByteBuf value) {
              if (done.get()) {
                if (!ignoreMessages.getAndSet(true)) {
                  requestObserverRef.get().onCompleted();
                }
                return;
              }
              requestObserverRef.get().onNext(request.slice());
              if (record.get()) {
                counter.addAndGet(counterDelta);
              }
              // request is called automatically because the observer implicitly has auto
              // inbound flow control
            }

            @Override
            public void onError(Throwable t) {
              logger.log(Level.WARNING, "call error", t);
              latch.countDown();
            }

            @Override
            public void onCompleted() {
              latch.countDown();
            }
          });
      requestObserverRef.set(requestObserver);
      requestObserver.onNext(request.slice());
      requestObserver.onNext(request.slice());
    }
  }
  return latch;
}
 
Example 7
Source File: AbstractBenchmark.java    From grpc-java with Apache License 2.0 4 votes vote down vote up
/**
 * Start a continuously executing set of duplex streaming ping-pong calls that will terminate when
 * {@code done.get()} is true. Each completed call will increment the counter by the specified
 * delta which benchmarks can use to measure messages per second or bandwidth.
 */
protected CountDownLatch startFlowControlledStreamingCalls(int callsPerChannel,
    final AtomicLong counter, final AtomicBoolean record, final AtomicBoolean done,
    final long counterDelta) {
  final CountDownLatch latch = new CountDownLatch(callsPerChannel * channels.length);
  for (final ManagedChannel channel : channels) {
    for (int i = 0; i < callsPerChannel; i++) {
      final ClientCall<ByteBuf, ByteBuf> streamingCall =
          channel.newCall(flowControlledStreaming, CALL_OPTIONS);
      final AtomicReference<StreamObserver<ByteBuf>> requestObserverRef =
          new AtomicReference<>();
      final AtomicBoolean ignoreMessages = new AtomicBoolean();
      StreamObserver<ByteBuf> requestObserver = ClientCalls.asyncBidiStreamingCall(
          streamingCall,
          new StreamObserver<ByteBuf>() {
            @Override
            public void onNext(ByteBuf value) {
              StreamObserver<ByteBuf> obs = requestObserverRef.get();
              if (done.get()) {
                if (!ignoreMessages.getAndSet(true)) {
                  obs.onCompleted();
                }
                return;
              }
              if (record.get()) {
                counter.addAndGet(counterDelta);
              }
              // request is called automatically because the observer implicitly has auto
              // inbound flow control
            }

            @Override
            public void onError(Throwable t) {
              logger.log(Level.WARNING, "call error", t);
              latch.countDown();
            }

            @Override
            public void onCompleted() {
              latch.countDown();
            }
          });
      requestObserverRef.set(requestObserver);

      // Add some outstanding requests to ensure the server is filling the connection
      streamingCall.request(5);
      requestObserver.onNext(request.slice());
    }
  }
  return latch;
}
 
Example 8
Source File: ManagedChannelImplIdlenessTest.java    From grpc-java with Apache License 2.0 4 votes vote down vote up
@Test
public void oobTransportDoesNotAffectIdleness() {
  // Start a call, which goes to delayed transport
  ClientCall<String, Integer> call = channel.newCall(method, CallOptions.DEFAULT);
  call.start(mockCallListener, new Metadata());

  // Verify that we have exited the idle mode
  ArgumentCaptor<Helper> helperCaptor = ArgumentCaptor.forClass(null);
  verify(mockLoadBalancerProvider).newLoadBalancer(helperCaptor.capture());
  Helper helper = helperCaptor.getValue();

  // Fail the RPC
  SubchannelPicker failingPicker = mock(SubchannelPicker.class);
  when(failingPicker.pickSubchannel(any(PickSubchannelArgs.class)))
      .thenReturn(PickResult.withError(Status.UNAVAILABLE));
  updateBalancingStateSafely(helper, TRANSIENT_FAILURE, failingPicker);
  executor.runDueTasks();
  verify(mockCallListener).onClose(same(Status.UNAVAILABLE), any(Metadata.class));

  // ... so that the channel resets its in-use state
  assertFalse(channel.inUseStateAggregator.isInUse());

  // Now make an RPC on an OOB channel
  ManagedChannel oob = helper.createOobChannel(servers.get(0), "oobauthority");
  verify(mockTransportFactory, never())
      .newClientTransport(
          any(SocketAddress.class),
          eq(new ClientTransportFactory.ClientTransportOptions()
            .setAuthority("oobauthority")
            .setUserAgent(USER_AGENT)),
          any(ChannelLogger.class));
  ClientCall<String, Integer> oobCall = oob.newCall(method, CallOptions.DEFAULT);
  oobCall.start(mockCallListener2, new Metadata());
  verify(mockTransportFactory)
      .newClientTransport(
          any(SocketAddress.class),
          eq(new ClientTransportFactory.ClientTransportOptions()
            .setAuthority("oobauthority")
            .setUserAgent(USER_AGENT)),
          any(ChannelLogger.class));
  MockClientTransportInfo oobTransportInfo = newTransports.poll();
  assertEquals(0, newTransports.size());
  // The OOB transport reports in-use state
  oobTransportInfo.listener.transportInUse(true);

  // But it won't stop the channel from going idle
  verify(mockLoadBalancer, never()).shutdown();
  timer.forwardTime(IDLE_TIMEOUT_SECONDS, TimeUnit.SECONDS);
  verify(mockLoadBalancer).shutdown();
}
 
Example 9
Source File: ServerCallsTest.java    From grpc-java with Apache License 2.0 4 votes vote down vote up
@Test
public void inprocessTransportManualFlow() throws Exception {
  final Semaphore semaphore = new Semaphore(1);
  ServerServiceDefinition service = ServerServiceDefinition.builder(
      new ServiceDescriptor("some", STREAMING_METHOD))
      .addMethod(STREAMING_METHOD, ServerCalls.asyncBidiStreamingCall(
          new ServerCalls.BidiStreamingMethod<Integer, Integer>() {
            int iteration;

            @Override
            public StreamObserver<Integer> invoke(StreamObserver<Integer> responseObserver) {
              final ServerCallStreamObserver<Integer> serverCallObserver =
                  (ServerCallStreamObserver<Integer>) responseObserver;
              serverCallObserver.setOnReadyHandler(new Runnable() {
                @Override
                public void run() {
                  while (serverCallObserver.isReady()) {
                    serverCallObserver.onNext(iteration);
                  }
                  iteration++;
                  semaphore.release();
                }
              });
              return new ServerCalls.NoopStreamObserver<Integer>() {
                @Override
                public void onCompleted() {
                  serverCallObserver.onCompleted();
                }
              };
            }
          }))
      .build();
  long tag = System.nanoTime();
  InProcessServerBuilder.forName("go-with-the-flow" + tag).addService(service).build().start();
  ManagedChannel channel = InProcessChannelBuilder.forName("go-with-the-flow" + tag).build();
  final ClientCall<Integer, Integer> clientCall = channel.newCall(STREAMING_METHOD,
      CallOptions.DEFAULT);
  final CountDownLatch latch = new CountDownLatch(1);
  final int[] receivedMessages = new int[6];
  clientCall.start(new ClientCall.Listener<Integer>() {
    int index;

    @Override
    public void onMessage(Integer message) {
      receivedMessages[index++] = message;
    }

    @Override
    public void onClose(Status status, Metadata trailers) {
      latch.countDown();
    }
  }, new Metadata());
  semaphore.acquire();
  clientCall.request(1);
  semaphore.acquire();
  clientCall.request(2);
  semaphore.acquire();
  clientCall.request(3);
  clientCall.halfClose();
  assertThat(latch.await(5, TimeUnit.SECONDS)).isTrue();
  // Very that number of messages produced in each onReady handler call matches the number
  // requested by the client.
  assertArrayEquals(new int[]{0, 1, 1, 2, 2, 2}, receivedMessages);
}