org.apache.twill.common.Cancellable Java Examples

The following examples show how to use org.apache.twill.common.Cancellable. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TephraZKClientService.java    From phoenix-tephra with Apache License 2.0 6 votes vote down vote up
@Override
public Cancellable addConnectionWatcher(final Watcher watcher) {
  if (watcher == null) {
    return new Cancellable() {
      @Override
      public void cancel() {
        // No-op
      }
    };
  }

  // Invocation of connection watchers are already done inside the event thread,
  // hence no need to wrap the watcher again.
  connectionWatchers.add(watcher);
  return new Cancellable() {
    @Override
    public void cancel() {
      connectionWatchers.remove(watcher);
    }
  };
}
 
Example #2
Source File: DefaultZKClientService.java    From twill with Apache License 2.0 6 votes vote down vote up
@Override
public Cancellable addConnectionWatcher(final Watcher watcher) {
  if (watcher == null) {
    return new Cancellable() {
      @Override
      public void cancel() {
        // No-op
      }
    };
  }

  // Invocation of connection watchers are already done inside the event thread,
  // hence no need to wrap the watcher again.
  connectionWatchers.add(watcher);
  return new Cancellable() {
    @Override
    public void cancel() {
      connectionWatchers.remove(watcher);
    }
  };
}
 
Example #3
Source File: SimpleKafkaPublisher.java    From twill with Apache License 2.0 6 votes vote down vote up
/**
 * Start the publisher. This method must be called before other methods. This method is only to be called
 * by KafkaClientService who own this object.
 * @return A Cancellable for closing this publish.
 */
Cancellable start() {
  ExecutorService listenerExecutor
    = Executors.newSingleThreadExecutor(Threads.createDaemonThreadFactory("kafka-publisher"));

  // Listen to changes in broker list
  final BrokerListChangeListener listener = new BrokerListChangeListener(listenerCancelled, producer,
                                                                         ack, compression);
  Cancellable cancelChangeListener = brokerService.addChangeListener(listener, listenerExecutor);

  // Invoke the change listener at least once. Since every call to the listener is through the single thread
  // executor, there is no race and for sure the listener always see the latest change, either through this call
  // or from the BrokerService callback.
  Future<?> completion = listenerExecutor.submit(new Runnable() {
    @Override
    public void run() {
      listener.changed(brokerService);
    }
  });

  Futures.getUnchecked(completion);
  return new ProducerCancellable(listenerExecutor, listenerCancelled, cancelChangeListener, producer);
}
 
Example #4
Source File: DiscoveryServiceTestBase.java    From twill with Apache License 2.0 6 votes vote down vote up
@Test
public void manySameDiscoverable() throws Exception {
  Map.Entry<DiscoveryService, DiscoveryServiceClient> entry = create();
  try {
    DiscoveryService discoveryService = entry.getKey();
    DiscoveryServiceClient discoveryServiceClient = entry.getValue();

    List<Cancellable> cancellables = Lists.newArrayList();

    cancellables.add(register(discoveryService, "manyDiscoverable", "localhost", 1));
    cancellables.add(register(discoveryService, "manyDiscoverable", "localhost", 2));
    cancellables.add(register(discoveryService, "manyDiscoverable", "localhost", 3));
    cancellables.add(register(discoveryService, "manyDiscoverable", "localhost", 4));
    cancellables.add(register(discoveryService, "manyDiscoverable", "localhost", 5));

    ServiceDiscovered serviceDiscovered = discoveryServiceClient.discover("manyDiscoverable");
    Assert.assertTrue(waitTillExpected(5, serviceDiscovered));

    for (int i = 0; i < 5; i++) {
      cancellables.get(i).cancel();
      Assert.assertTrue(waitTillExpected(4 - i, serviceDiscovered));
    }
  } finally {
    closeServices(entry);
  }
}
 
Example #5
Source File: DefaultServiceDiscovered.java    From twill with Apache License 2.0 6 votes vote down vote up
@Override
public Cancellable watchChanges(ChangeListener listener, Executor executor) {
  ListenerCaller caller = new ListenerCaller(listener, executor);

  // Add the new listener with a write lock.
  Lock writeLock = callerLock.writeLock();
  writeLock.lock();
  try {
    listenerCallers.add(caller);
  } finally {
    writeLock.unlock();
  }

  // Invoke listener for the first time.
  // Race would happen between this method and the setDiscoverables() method, but it's ok as the contract of
  // adding a new listener is that onChange will be called at least once. The actual changes is already
  // reflected by the atomic reference "discoverables", hence it's consistent.
  caller.invoke();
  return caller;
}
 
Example #6
Source File: SimpleKafkaConsumer.java    From twill with Apache License 2.0 5 votes vote down vote up
/**
 * Wrap a given MessageCallback by a executor so that calls are executed in the given executor.
 * By running the calls through the executor, it also block and wait for the task being completed so that
 * it can block the poller thread depending on the rate of processing that the callback can handle.
 */
private MessageCallback wrapCallback(final MessageCallback callback,
                                     final ExecutorService executor, final Cancellable cancellable) {
  final AtomicBoolean stopped = new AtomicBoolean();
  return new MessageCallback() {
    @Override
    public long onReceived(final Iterator<FetchedMessage> messages) {
      if (stopped.get()) {
        return -1L;
      }
      return Futures.getUnchecked(executor.submit(new Callable<Long>() {
        @Override
        public Long call() {
          if (stopped.get()) {
            return -1L;
          }
          return callback.onReceived(messages);
        }
      }));
    }

    @Override
    public void finished() {
      // Make sure finished only get called once.
      if (!stopped.compareAndSet(false, true)) {
        return;
      }
      Futures.getUnchecked(executor.submit(new Runnable() {
        @Override
        public void run() {
          // When finished is called, also cancel the consumption from all polling thread.
          callback.finished();
          cancellable.cancel();
        }
      }));
    }
  };
}
 
Example #7
Source File: ZKBrokerService.java    From twill with Apache License 2.0 5 votes vote down vote up
@Override
public Cancellable addChangeListener(BrokerChangeListener listener, Executor executor) {
  final ListenerExecutor listenerExecutor = new ListenerExecutor(listener, executor);
  listeners.add(listenerExecutor);

  return new Cancellable() {
    @Override
    public void cancel() {
      listeners.remove(listenerExecutor);
    }
  };
}
 
Example #8
Source File: SimpleKafkaConsumer.java    From twill with Apache License 2.0 5 votes vote down vote up
/**
 * Called to stop all consumers created. This method should only be
 * called by KafkaClientService who own this consumer.
 */
void stop() {
  LOG.info("Stopping Kafka consumer");
  List<Cancellable> cancels = Lists.newLinkedList();
  consumerCancels.drainTo(cancels);
  for (Cancellable cancel : cancels) {
    cancel.cancel();
  }
  consumers.invalidateAll();
  LOG.info("Kafka Consumer stopped");
}
 
Example #9
Source File: SimpleKafkaConsumer.java    From twill with Apache License 2.0 5 votes vote down vote up
SimpleKafkaConsumer(BrokerService brokerService) {
  this.brokerService = brokerService;
  this.consumers = CacheBuilder.newBuilder()
                               .expireAfterAccess(CONSUMER_EXPIRE_MINUTES, TimeUnit.MINUTES)
                               .removalListener(createRemovalListener())
                               .build(createConsumerLoader());
  this.consumerCancels = new LinkedBlockingQueue<Cancellable>();
}
 
Example #10
Source File: SimpleKafkaPublisher.java    From twill with Apache License 2.0 5 votes vote down vote up
private ProducerCancellable(ExecutorService executor, AtomicBoolean listenerCancelled,
                            Cancellable cancelChangeListener,
                            AtomicReference<Producer<Integer, ByteBuffer>> producer) {
  this.executor = executor;
  this.listenerCancelled = listenerCancelled;
  this.cancelChangeListener = cancelChangeListener;
  this.producer = producer;
}
 
Example #11
Source File: KafkaTest.java    From twill with Apache License 2.0 5 votes vote down vote up
@Test
public void testKafkaClient() throws Exception {
  String topic = "testClient";

  Thread t1 = createPublishThread(kafkaClient, topic, Compression.GZIP, "GZIP Testing message", 10);
  Thread t2 = createPublishThread(kafkaClient, topic, Compression.NONE, "Testing message", 10);

  t1.start();
  t2.start();

  Thread t3 = createPublishThread(kafkaClient, topic, Compression.SNAPPY, "Snappy Testing message", 10);
  t2.join();
  t3.start();

  final CountDownLatch latch = new CountDownLatch(30);
  final CountDownLatch stopLatch = new CountDownLatch(1);
  Cancellable cancel = kafkaClient.getConsumer().prepare().add(topic, 0, 0).consume(new KafkaConsumer
    .MessageCallback() {
    @Override
    public long onReceived(Iterator<FetchedMessage> messages) {
      long nextOffset = -1;
      while (messages.hasNext()) {
        FetchedMessage message = messages.next();
        nextOffset = message.getNextOffset();
        LOG.info(Charsets.UTF_8.decode(message.getPayload()).toString());
        latch.countDown();
      }
      return nextOffset;
    }

    @Override
    public void finished() {
      stopLatch.countDown();
    }
  });

  Assert.assertTrue(latch.await(5, TimeUnit.SECONDS));
  cancel.cancel();
  Assert.assertTrue(stopLatch.await(1, TimeUnit.SECONDS));
}
 
Example #12
Source File: UpdateStatisticsTool.java    From phoenix with Apache License 2.0 5 votes vote down vote up
private void configureJob() throws Exception {
    job = Job.getInstance(getConf(),
            "UpdateStatistics-" + tableName + "-" + snapshotName);
    PhoenixMapReduceUtil.setInput(job, NullDBWritable.class,
            snapshotName, tableName, restoreDir);

    PhoenixConfigurationUtil.setMRJobType(job.getConfiguration(), MRJobType.UPDATE_STATS);

    // DO NOT allow mapper splits using statistics since it may result into many smaller chunks
    PhoenixConfigurationUtil.setSplitByStats(job.getConfiguration(), false);

    job.setJarByClass(UpdateStatisticsTool.class);
    job.setMapperClass(TableSnapshotMapper.class);
    job.setMapOutputKeyClass(NullWritable.class);
    job.setMapOutputValueClass(NullWritable.class);
    job.setOutputFormatClass(NullOutputFormat.class);
    job.setNumReduceTasks(0);
    job.setPriority(this.jobPriority);

    TableMapReduceUtil.addDependencyJars(job);
    TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(), PhoenixConnection.class, Chronology.class,
            CharStream.class, TransactionSystemClient.class, TransactionNotInProgressException.class,
            ZKClient.class, DiscoveryServiceClient.class, ZKDiscoveryService.class,
            Cancellable.class, TTransportException.class, SpanReceiver.class, TransactionProcessor.class, Gauge.class, MetricRegistriesImpl.class);
    LOGGER.info("UpdateStatisticsTool running for: " + tableName
            + " on snapshot: " + snapshotName + " with restore dir: " + restoreDir);
}
 
Example #13
Source File: ZKKafkaClientService.java    From twill with Apache License 2.0 5 votes vote down vote up
@Override
protected void shutDown() throws Exception {
  LOG.info("Stopping KafkaClientService");
  scheduler.shutdownNow();
  for (Cancellable cancellable : publishers.values()) {
    cancellable.cancel();
  }
  consumer.stop();

  brokerService.stopAndWait();
  LOG.info("KafkaClientService stopped");
}
 
Example #14
Source File: ElectionRegistry.java    From twill with Apache License 2.0 5 votes vote down vote up
/**
 * Creates a new {@link LeaderElection} for the given arguments, starts the service, and adds it to the registry.
 * @param name Name for the election.
 * @param handler Callback to handle leader and follower transitions.
 * @return An object to cancel the election participation.
 */
public Cancellable register(String name, ElectionHandler handler) {
  LeaderElection election = new LeaderElection(zkClient, name, handler);
  election.start();
  registry.put(name, election);
  return new CancellableElection(name, election);
}
 
Example #15
Source File: SocketServer.java    From twill with Apache License 2.0 5 votes vote down vote up
@Override
public void initialize(TwillContext context) {
  super.initialize(context);
  running = true;
  try {
    serverSocket = new ServerSocket(0);
    LOG.info("Server started: " + serverSocket.getLocalSocketAddress() +
             ", id: " + context.getInstanceId() +
             ", count: " + context.getInstanceCount());

    // Announce with service names as specified in app arguments and runnable arguments
    final List<Cancellable> cancellables = new ArrayList<>();
    for (String[] args : new String[][] {context.getApplicationArguments(), context.getArguments()}) {
      if (args.length > 0) {
        cancellables.add(context.announce(args[0], serverSocket.getLocalPort()));
      }
    }
    canceller = new Cancellable() {
      @Override
      public void cancel() {
        for (Cancellable c : cancellables) {
          c.cancel();
        }
      }
    };
  } catch (IOException e) {
    throw Throwables.propagate(e);
  }
}
 
Example #16
Source File: Hadoop21YarnNMClient.java    From twill with Apache License 2.0 5 votes vote down vote up
@Override
public Cancellable start(YarnContainerInfo containerInfo, YarnLaunchContext launchContext) {
  try {
    Container container = containerInfo.getContainer();
    nmClient.startContainer(container, launchContext.<ContainerLaunchContext>getLaunchContext());
    return new ContainerTerminator(container, nmClient);
  } catch (Exception e) {
    LOG.error("Error in launching process", e);
    throw Throwables.propagate(e);
  }

}
 
Example #17
Source File: DiscoveryModules.java    From phoenix-tephra with Apache License 2.0 5 votes vote down vote up
@Provides
@Singleton
private DiscoveryService providesDiscoveryService(final ZKClientService zkClient,
                                                  final ZKDiscoveryService delegate) {
  return new DiscoveryService() {
    @Override
    public Cancellable register(Discoverable discoverable) {
      if (!zkClient.isRunning()) {
        zkClient.startAndWait();
      }
      return delegate.register(discoverable);
    }
  };
}
 
Example #18
Source File: YarnTwillRunnerService.java    From twill with Apache License 2.0 5 votes vote down vote up
@Override
public Cancellable setSecureStoreRenewer(SecureStoreRenewer renewer, long initialDelay,
                                         long delay, long retryDelay, TimeUnit unit) {
  synchronized (this) {
    if (secureStoreScheduler != null) {
      // Shutdown and block until the schedule is stopped
      stopScheduler(secureStoreScheduler);
    }
    secureStoreScheduler = Executors.newSingleThreadScheduledExecutor(
      Threads.createDaemonThreadFactory("secure-store-renewer"));
  }

  final ScheduledExecutorService currentScheduler = secureStoreScheduler;
  secureStoreScheduler.scheduleWithFixedDelay(
    createSecureStoreUpdateRunnable(currentScheduler, renewer,
                                    ImmutableMultimap.<String, RunId>of(), retryDelay, unit),
    initialDelay, delay, unit);
  return new Cancellable() {
    @Override
    public void cancel() {
      synchronized (YarnTwillRunnerService.this) {
        // Only cancel if the active scheduler is the same as the schedule bind to this cancellable
        if (currentScheduler == secureStoreScheduler) {
          secureStoreScheduler.shutdown();
          secureStoreScheduler = null;
        }
      }
    }
  };
}
 
Example #19
Source File: YarnTwillRunnerService.java    From twill with Apache License 2.0 5 votes vote down vote up
@Override
public Cancellable scheduleSecureStoreUpdate(final SecureStoreUpdater updater,
                                             long initialDelay, long delay, TimeUnit unit) {
  synchronized (this) {
    if (secureStoreScheduler == null) {
      secureStoreScheduler = Executors.newSingleThreadScheduledExecutor(
        Threads.createDaemonThreadFactory("secure-store-renewer"));
    }
  }

  final ScheduledFuture<?> future = secureStoreScheduler.scheduleWithFixedDelay(new Runnable() {
    @Override
    public void run() {
      // Collects all live applications
      Table<String, RunId, YarnTwillController> liveApps;
      synchronized (this) {
        liveApps = HashBasedTable.create(controllers);
      }

      // Update the secure store with merging = true
      renewSecureStore(liveApps, new SecureStoreRenewer() {
        @Override
        public void renew(String application, RunId runId, SecureStoreWriter secureStoreWriter) throws IOException {
          secureStoreWriter.write(updater.update(application, runId));
        }
      }, true);
    }
  }, initialDelay, delay, unit);

  return new Cancellable() {
    @Override
    public void cancel() {
      future.cancel(false);
    }
  };
}
 
Example #20
Source File: RunnableProcessLauncher.java    From twill with Apache License 2.0 5 votes vote down vote up
@Override
protected <R> ProcessController<R> doLaunch(YarnLaunchContext launchContext) {
  Map<String, String> env = Maps.newHashMap(launchContext.getEnvironment());

  // Set extra environments
  env.put(EnvKeys.YARN_CONTAINER_ID, containerInfo.getId());
  env.put(EnvKeys.YARN_CONTAINER_HOST, containerInfo.getHost().getHostName());
  env.put(EnvKeys.YARN_CONTAINER_PORT, Integer.toString(containerInfo.getPort()));
  env.put(EnvKeys.YARN_CONTAINER_MEMORY_MB, Integer.toString(containerInfo.getMemoryMB()));
  env.put(EnvKeys.YARN_CONTAINER_VIRTUAL_CORES, Integer.toString(containerInfo.getVirtualCores()));

  launchContext.setEnvironment(env);

  LOG.info("Launching in container {} at {}:{}, {}",
           containerInfo.getId(), containerInfo.getHost().getHostName(),
           containerInfo.getPort(), launchContext.getCommands());
  final Cancellable cancellable = nmClient.start(containerInfo, launchContext);
  launched = true;

  return new ProcessController<R>() {
    @Override
    public void close() throws Exception {
      // no-op
    }

    @Override
    public R getReport() {
      // No reporting support for runnable launch yet.
      return null;

    }

    @Override
    public void cancel() {
      cancellable.cancel();
    }
  };
}
 
Example #21
Source File: ZKDiscoveryServiceTest.java    From twill with Apache License 2.0 5 votes vote down vote up
@Test
public void testSessionExpires() throws Exception {
  Map.Entry<DiscoveryService, DiscoveryServiceClient> entry = create();
  try {
    DiscoveryService discoveryService = entry.getKey();
    DiscoveryServiceClient discoveryServiceClient = entry.getValue();

    Cancellable cancellable = register(discoveryService, "test_expires", "localhost", 54321);

    ServiceDiscovered discoverables = discoveryServiceClient.discover("test_expires");

    // Discover that registered host:port.
    Assert.assertTrue(waitTillExpected(1, discoverables));

    KillZKSession.kill(zkClient.getZooKeeperSupplier().get(), zkServer.getConnectionStr(), 10000);

    // Register one more endpoint to make sure state has been reflected after reconnection
    Cancellable cancellable2 = register(discoveryService, "test_expires", "localhost", 54322);

    // Reconnection would trigger re-registration.
    Assert.assertTrue(waitTillExpected(2, discoverables));

    cancellable.cancel();
    cancellable2.cancel();

    // Verify that both are now gone.
    Assert.assertTrue(waitTillExpected(0, discoverables));
  } finally {
    closeServices(entry);
  }
}
 
Example #22
Source File: DiscoveryServiceTestBase.java    From twill with Apache License 2.0 5 votes vote down vote up
@Test
public void simpleDiscoverable() throws Exception {
  final byte[] payload = "data".getBytes(StandardCharsets.UTF_8);
  Map.Entry<DiscoveryService, DiscoveryServiceClient> entry = create();
  try {
    DiscoveryService discoveryService = entry.getKey();
    DiscoveryServiceClient discoveryServiceClient = entry.getValue();

    // Register one service running on one host:port
    Cancellable cancellable = register(discoveryService, "foo", "localhost", 8090, payload);

    // Discover that registered host:port.
    ServiceDiscovered serviceDiscovered = discoveryServiceClient.discover("foo");
    Assert.assertTrue(waitTillExpected(1, serviceDiscovered));

    Discoverable discoverable = new Discoverable("foo", new InetSocketAddress("localhost", 8090), payload);

    // Check it exists.
    Assert.assertTrue(serviceDiscovered.contains(discoverable));

    // Remove the service
    cancellable.cancel();

    // There should be no service.
    Assert.assertTrue(waitTillExpected(0, serviceDiscovered));

    Assert.assertFalse(serviceDiscovered.contains(discoverable));
  } finally {
    closeServices(entry);
  }
}
 
Example #23
Source File: BasicTwillContext.java    From twill with Apache License 2.0 4 votes vote down vote up
@Override
public Cancellable electLeader(String name, ElectionHandler participantHandler) {
  return elections.register("/leader/" + name, participantHandler);
}
 
Example #24
Source File: ZKDiscoveryService.java    From twill with Apache License 2.0 4 votes vote down vote up
/**
 * Registers a {@link Discoverable} in zookeeper.
 * <p>
 *   Registering a {@link Discoverable} will create a node &lt;base&gt;/&lt;service-name&gt;
 *   in zookeeper as a ephemeral node. If the node already exists (timeout associated with emphemeral node creation), 
 *   then a runtime exception is thrown to make sure that a service with an intent to register is not started without 
 *   registering. 
 *   When a runtime exception is thrown, expectation is that the process being started will fail and would be started 
 *   again by the monitoring service.
 * </p>
 * @param discoverable Information of the service provider that could be discovered.
 * @return An instance of {@link Cancellable}
 */
@Override
public Cancellable register(final Discoverable discoverable) {
  if (closed.get()) {
    throw new IllegalStateException("Cannot register discoverable through a closed ZKDiscoveryService");
  }

  final SettableFuture<String> future = SettableFuture.create();
  final DiscoveryCancellable cancellable = new DiscoveryCancellable(discoverable);

  // Create the zk ephemeral node.
  Futures.addCallback(doRegister(discoverable), new FutureCallback<String>() {
    @Override
    public void onSuccess(String result) {
      // Set the sequence node path to cancellable for future cancellation.
      cancellable.setPath(result);
      lock.lock();
      try {
        if (!closed.get()) {
          discoverables.put(discoverable, cancellable);
        } else {
          cancellable.asyncCancel();
        }
      } finally {
        lock.unlock();
      }
      LOG.debug("Service registered: {} {}", discoverable, result);
      future.set(result);
    }

    @Override
    public void onFailure(Throwable t) {
      if (t instanceof KeeperException.NodeExistsException) {
        handleRegisterFailure(discoverable, future, this, t);
      } else {
        LOG.warn("Failed to register: {}", discoverable, t);
        future.setException(t);
      }
    }
  }, Threads.SAME_THREAD_EXECUTOR);

  Futures.getUnchecked(future);
  return cancellable;
}
 
Example #25
Source File: KafkaTest.java    From twill with Apache License 2.0 4 votes vote down vote up
@Test
public void testKafkaClientSkipNext() throws Exception {
  String topic = "testClientSkipNext";
  // Publish 30 messages with indecies the same as offsets within the range 0 - 29
  Thread t1 = createPublishThread(kafkaClient, topic, Compression.GZIP, "GZIP Testing message", 10);
  t1.start();
  t1.join();
  Thread t2 = createPublishThread(kafkaClient, topic, Compression.NONE, "Testing message", 10, 10);
  t2.start();
  t2.join();
  Thread t3 = createPublishThread(kafkaClient, topic, Compression.SNAPPY, "Snappy Testing message", 10, 20);
  t3.start();
  t3.join();

  final CountDownLatch stopLatch = new CountDownLatch(1);
  final BlockingQueue<Long> offsetQueue = new LinkedBlockingQueue<>();
  Cancellable cancel = kafkaClient.getConsumer().prepare().add(topic, 0, 0).consume(
    new KafkaConsumer.MessageCallback() {
    @Override
    public long onReceived(Iterator<FetchedMessage> messages) {
      long nextOffset = -1L;
      if (messages.hasNext()) {
        FetchedMessage message = messages.next();
        nextOffset = message.getNextOffset() + 1;
        offsetQueue.offer(message.getOffset());
        LOG.info(Charsets.UTF_8.decode(message.getPayload()).toString());
        return nextOffset;
      }
      return nextOffset;
    }

    @Override
    public void finished() {
      stopLatch.countDown();
    }
  });
  // 15 messages should be in the queue since onReceived returns `message.getNextOffset() + 1` as next offset to read
  for (long i = 0; i < 30; i += 2) {
    Assert.assertEquals(i, (long) offsetQueue.poll(60, TimeUnit.SECONDS));
  }
  Assert.assertNull(offsetQueue.poll(2, TimeUnit.SECONDS));
  cancel.cancel();
  Assert.assertTrue(stopLatch.await(1, TimeUnit.SECONDS));
}
 
Example #26
Source File: ZKDiscoveryService.java    From twill with Apache License 2.0 4 votes vote down vote up
/**
 * Creates a CacheLoader for creating live Iterable for watching instances changes for a given service.
 */
private CacheLoader<String, ServiceDiscoveredCacheEntry> createServiceLoader() {
  return new CacheLoader<String, ServiceDiscoveredCacheEntry>() {
    @Override
    public ServiceDiscoveredCacheEntry load(String service) throws Exception {
      final DefaultServiceDiscovered serviceDiscovered = new DefaultServiceDiscovered(service);
      final String pathBase = "/" + service;

      // Watch for children changes in /service
      Cancellable cancellable = ZKOperations.watchChildren(zkClient, pathBase, new ZKOperations.ChildrenCallback() {
        @Override
        public void updated(NodeChildren nodeChildren) {
          // Fetch data of all children nodes in parallel.
          List<String> children = nodeChildren.getChildren();
          List<OperationFuture<NodeData>> dataFutures = Lists.newArrayListWithCapacity(children.size());
          for (String child : children) {
            dataFutures.add(zkClient.getData(pathBase + "/" + child));
          }

          // Update the service map when all fetching are done.
          final ListenableFuture<List<NodeData>> fetchFuture = Futures.successfulAsList(dataFutures);
          fetchFuture.addListener(new Runnable() {
            @Override
            public void run() {
              ImmutableSet.Builder<Discoverable> builder = ImmutableSet.builder();
              for (NodeData nodeData : Futures.getUnchecked(fetchFuture)) {
                // For successful fetch, decode the content.
                if (nodeData != null) {
                  Discoverable discoverable = DiscoverableAdapter.decode(nodeData.getData());
                  if (discoverable != null) {
                    builder.add(discoverable);
                  }
                }
              }
              serviceDiscovered.setDiscoverables(builder.build());
            }
          }, Threads.SAME_THREAD_EXECUTOR);
        }
      });
      return new ServiceDiscoveredCacheEntry(serviceDiscovered, cancellable);
    }
  };
}
 
Example #27
Source File: KafkaTest.java    From twill with Apache License 2.0 4 votes vote down vote up
@Test
public void testKafkaClientReconnect() throws Exception {
  String topic = "backoff";
  Properties kafkaServerConfig = generateKafkaConfig(zkServer.getConnectionStr() + "/backoff");
  EmbeddedKafkaServer server = new EmbeddedKafkaServer(kafkaServerConfig);

  ZKClientService zkClient = ZKClientService.Builder.of(zkServer.getConnectionStr() + "/backoff").build();
  zkClient.startAndWait();
  try {
    zkClient.create("/", null, CreateMode.PERSISTENT).get();

    ZKKafkaClientService kafkaClient = new ZKKafkaClientService(zkClient);
    kafkaClient.startAndWait();

    try {
      server.startAndWait();
      try {
        // Publish a messages
        createPublishThread(kafkaClient, topic, Compression.NONE, "First message", 1).start();

        // Create a consumer
        final BlockingQueue<String> queue = new LinkedBlockingQueue<>();
        Cancellable cancel = kafkaClient.getConsumer().prepare().add(topic, 0, 0)
          .consume(new KafkaConsumer.MessageCallback() {
            @Override
            public long onReceived(Iterator<FetchedMessage> messages) {
              long nextOffset = -1L;
              while (messages.hasNext()) {
                FetchedMessage message = messages.next();
                nextOffset = message.getNextOffset();
                queue.offer(Charsets.UTF_8.decode(message.getPayload()).toString());
              }
              return nextOffset;
            }

            @Override
            public void finished() {
            }
          });

        // Wait for the first message
        Assert.assertEquals("0 First message", queue.poll(60, TimeUnit.SECONDS));

        // Shutdown the server
        server.stopAndWait();

        // Start the server again.
        // Needs to create a new instance with the same config since guava service cannot be restarted
        server = new EmbeddedKafkaServer(kafkaServerConfig);
        server.startAndWait();

        // Wait a little while to make sure changes is reflected in broker service
        TimeUnit.SECONDS.sleep(3);

        // Publish another message
        createPublishThread(kafkaClient, topic, Compression.NONE, "Second message", 1).start();

        // Should be able to get the second message
        Assert.assertEquals("0 Second message", queue.poll(60, TimeUnit.SECONDS));

        cancel.cancel();
      } finally {
        kafkaClient.stopAndWait();
      }
    } finally {
      server.stopAndWait();
    }
  } finally {
    zkClient.stopAndWait();
  }
}
 
Example #28
Source File: ZKDiscoveryService.java    From twill with Apache License 2.0 4 votes vote down vote up
private ServiceDiscoveredCacheEntry(ServiceDiscovered serviceDiscovered, Cancellable cancellable) {
  this.serviceDiscovered = serviceDiscovered;
  this.cancellable = cancellable;
}
 
Example #29
Source File: ZKDiscoveryService.java    From twill with Apache License 2.0 4 votes vote down vote up
@Override
public Cancellable watchChanges(ChangeListener listener, Executor executor) {
  return serviceDiscovered.watchChanges(listener, executor);
}
 
Example #30
Source File: ZKDiscoveryServiceTest.java    From twill with Apache License 2.0 4 votes vote down vote up
@Test (timeout = 30000)
public void testDoubleRegister() throws Exception {
  Map.Entry<DiscoveryService, DiscoveryServiceClient> entry = create();
  try {
    DiscoveryService discoveryService = entry.getKey();
    DiscoveryServiceClient discoveryServiceClient = entry.getValue();

    // Register on the same host port, it shouldn't fail.
    Cancellable cancellable = register(discoveryService, "test_double_reg", "localhost", 54321);
    Cancellable cancellable2 = register(discoveryService, "test_double_reg", "localhost", 54321);

    ServiceDiscovered discoverables = discoveryServiceClient.discover("test_double_reg");

    Assert.assertTrue(waitTillExpected(1, discoverables));

    cancellable.cancel();
    cancellable2.cancel();

    // Register again with two different clients, but killing session of the first one.
    final ZKClientService zkClient2 = ZKClientServices.delegate(
      ZKClients.retryOnFailure(
        ZKClients.reWatchOnExpire(
          ZKClientService.Builder.of(zkServer.getConnectionStr()).build()),
        RetryStrategies.fixDelay(1, TimeUnit.SECONDS)));
    zkClient2.startAndWait();

    try (ZKDiscoveryService discoveryService2 = new ZKDiscoveryService(zkClient2)) {
      cancellable2 = register(discoveryService2, "test_multi_client", "localhost", 54321);

      // Schedule a thread to shutdown zkClient2.
      new Thread() {
        @Override
        public void run() {
          try {
            TimeUnit.SECONDS.sleep(2);
            zkClient2.stopAndWait();
          } catch (InterruptedException e) {
            LOG.error(e.getMessage(), e);
          }
        }
      }.start();

      // This call would block until zkClient2 is shutdown.
      cancellable = register(discoveryService, "test_multi_client", "localhost", 54321);
      cancellable.cancel();
    } finally {
      zkClient2.stopAndWait();
    }
  } finally {
    closeServices(entry);
  }
}