Java Code Examples for com.google.common.util.concurrent.Uninterruptibles#sleepUninterruptibly()

The following examples show how to use com.google.common.util.concurrent.Uninterruptibles#sleepUninterruptibly() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: Sprinklr.java    From streams with Apache License 2.0 6 votes vote down vote up
@Override
public List<ProfileConversationsResponse> getProfileConversations(ProfileConversationsRequest request) {
  try {
    String requestJson = serializer.serialize(request);
    ObjectMap requestParams = new ObjectMap(requestJson);
    RestCall call = restClient
        .doGet(baseUrl() + "v1/profile/conversations")
        .accept("application/json")
        .contentType("application/json")
        .ignoreErrors()
        .queryIfNE(requestParams);
    String responseJson = call.getResponseAsString();
    List<ProfileConversationsResponse> result = parser.parse(responseJson, List.class, ProfileConversationsResponse.class);
    return result;
  } catch (Exception e) {
    LOGGER.error("Exception", e);
    return new ArrayList<>();
  } finally {
    Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS);
  }
}
 
Example 2
Source File: CheckUtil.java    From bgpcep with Eclipse Public License 1.0 6 votes vote down vote up
private static <R, T extends DataObject> R readData(final DataBroker dataBroker, final LogicalDatastoreType ldt,
        final InstanceIdentifier<T> iid, final Function<T, R> function, final int timeout)
        throws InterruptedException, ExecutionException {
    AssertionError lastError = null;
    final Stopwatch sw = Stopwatch.createStarted();
    do {
        try (ReadTransaction tx = dataBroker.newReadOnlyTransaction()) {
            final Optional<T> data = tx.read(ldt, iid).get();
            if (data.isPresent()) {
                try {
                    return function.apply(data.get());
                } catch (final AssertionError e) {
                    lastError = e;
                    Uninterruptibles.sleepUninterruptibly(SLEEP_FOR, TimeUnit.MILLISECONDS);
                }
            }
        }
    } while (sw.elapsed(TimeUnit.SECONDS) <= timeout);
    throw lastError;
}
 
Example 3
Source File: Metric.java    From SeaCloudsPlatform with Apache License 2.0 6 votes vote down vote up
@Override
public void run() {
    logger.info("Getting Sample...");
    long first = System.currentTimeMillis();
    boolean sampleRetrieved = false;

    do {
        try {
            send(getSample(applicationUrl, user, password), resource);
            sampleRetrieved = true;
            logger.info("Sample retrieved and sent in: "
                    + (System.currentTimeMillis() - first) + "ms");
        } catch (Exception ex) {
            logger.warn("Unable to get sample (ID:" + resource.getId()
                    + " - " + ex.getMessage());
            Uninterruptibles.sleepUninterruptibly(200, TimeUnit.MILLISECONDS);
        }
    } while (!sampleRetrieved);

}
 
Example 4
Source File: LoopbackCommunicationAdapter.java    From openAGV with Apache License 2.0 6 votes vote down vote up
/**
 * Simulates an operation.
 * 模拟操作
 *
 * @param operation A operation
 * @throws InterruptedException If an exception occured while simulating
 */
private void simulateOperation(String operation) {
  requireNonNull(operation, "operation");

  if (isTerminated()) {
    return;
  }

  LOG.debug("Operating...");
  final int operatingTime = getProcessModel().getOperatingTime();
  getProcessModel().setVehicleState(Vehicle.State.EXECUTING);
  for (int timePassed = 0; timePassed < operatingTime && !isTerminated();
       timePassed += simAdvanceTime) {
    Uninterruptibles.sleepUninterruptibly(ADVANCE_TIME, TimeUnit.MILLISECONDS);
    getProcessModel().getVelocityController().advanceTime(simAdvanceTime);
  }
  if (operation.equals(getProcessModel().getLoadOperation())) {
    // Update load handling devices as defined by this operation
    getProcessModel().setVehicleLoadHandlingDevices(
        Arrays.asList(new LoadHandlingDevice(LHD_NAME, true)));
  }
  else if (operation.equals(getProcessModel().getUnloadOperation())) {
    getProcessModel().setVehicleLoadHandlingDevices(
        Arrays.asList(new LoadHandlingDevice(LHD_NAME, false)));
  }
}
 
Example 5
Source File: ExecuteWebhook.java    From jclouds-examples with Apache License 2.0 6 votes vote down vote up
private void executeWebhook() {
   System.out.format("Execute Webhook%n");

   String policyId = Utils.getPolicyId(policyApi);

   Uninterruptibles.sleepUninterruptibly(10, TimeUnit.SECONDS);
   boolean result = policyApi.execute(policyId);

   System.out.format("  %s%n", result);

   System.out.format("Execute Webhook - again, anonymously%n");

   Webhook webhook = webhookApi.list().first().get();
   try {
      result = AutoscaleUtils.execute(webhook.getAnonymousExecutionURI().get());
   } catch (IOException e) {
      e.printStackTrace();
   }

   System.out.format("  %s%n", result);
}
 
Example 6
Source File: TheDataGroup.java    From streams with Apache License 2.0 6 votes vote down vote up
@Override
public LookupResponse lookupMobile(PhoneLookupRequest request) {
  try {
    RestCall call = restClient
            .doPost(baseUrl() + "sync/lookup/mobile")
            .body(request)
            .ignoreErrors();
    String responseJson = call.getResponseAsString();
    LookupResponse response = parser.parse(responseJson, LookupResponse.class);
    return response;
  } catch( Exception e ) {
    LOGGER.error("Exception", e);
    return new LookupResponse();
  } finally {
    Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS);
  }
}
 
Example 7
Source File: Hadoop21YarnNMClient.java    From twill with Apache License 2.0 6 votes vote down vote up
@Override
public void cancel() {
  LOG.info("Request to stop container {}.", container.getId());

  try {
    nmClient.stopContainer(container.getId(), container.getNodeId());
    while (true) {
      ContainerStatus status = nmClient.getContainerStatus(container.getId(), container.getNodeId());
      LOG.trace("Container status: {} {}", status, status.getDiagnostics());
      if (status.getState() == ContainerState.COMPLETE) {
        break;
      }
      Uninterruptibles.sleepUninterruptibly(200, TimeUnit.MILLISECONDS);
    }
    LOG.info("Container {} stopped.", container.getId());
  } catch (Exception e) {
    LOG.error("Fail to stop container {}", container.getId(), e);
    throw Throwables.propagate(e);
  }
}
 
Example 8
Source File: BandwidthThrottle.java    From cyberduck with GNU General Public License v3.0 5 votes vote down vote up
/**
 * Waits until data is _availableBytes.
 */
private void waitForBandwidth() {
    while(true) {
        long now = System.currentTimeMillis();
        updateWindow(now);
        if(availableBytes != 0) {
            break;
        }
        if(log.isInfoEnabled()) {
            log.info(String.format("Throttling bandwidth for %d milliseconds", nextTickTime - now));
        }
        Uninterruptibles.sleepUninterruptibly(nextTickTime - now, TimeUnit.MILLISECONDS);
    }
}
 
Example 9
Source File: CheckUtil.java    From bgpcep with Eclipse Public License 1.0 5 votes vote down vote up
@VisibleForTesting
static void checkReceivedMessages(final ListenerCheck listener, final int numberOfMessages,
        final int timeout) {
    final Stopwatch sw = Stopwatch.createStarted();
    while (sw.elapsed(TimeUnit.SECONDS) <= timeout) {
        if (listener.getListMessageSize() != numberOfMessages) {
            Uninterruptibles.sleepUninterruptibly(SLEEP_FOR, TimeUnit.MILLISECONDS);
        } else {
            return;
        }
    }
    throw new AssertionError("Expected " + numberOfMessages + " but received "
            + listener.getListMessageSize());
}
 
Example 10
Source File: RssStreamProvider.java    From streams with Apache License 2.0 5 votes vote down vote up
/**
 * To use from command line:
 *
 * <p></p>
 * Supply configuration similar to src/test/resources/rss.conf
 *
 * <p></p>
 * Launch using:
 *
 * <p></p>
 * mvn exec:java -Dexec.mainClass=org.apache.streams.rss.provider.RssStreamProvider -Dexec.args="rss.conf articles.json"
 * @param args args
 * @throws Exception Exception
 */
public static void main(String[] args) throws Exception {

  Preconditions.checkArgument(args.length >= 2);

  String configfile = args[0];
  String outfile = args[1];

  Config reference = ConfigFactory.load();
  File file = new File(configfile);
  assert (file.exists());
  Config testResourceConfig = ConfigFactory.parseFileAnySyntax(file, ConfigParseOptions.defaults().setAllowMissing(false));

  Config typesafe  = testResourceConfig.withFallback(reference).resolve();

  StreamsConfiguration streamsConfiguration = StreamsConfigurator.detectConfiguration(typesafe);
  RssStreamConfiguration config = new ComponentConfigurator<>(RssStreamConfiguration.class).detectConfiguration(typesafe, "rss");
  RssStreamProvider provider = new RssStreamProvider(config);

  ObjectMapper mapper = StreamsJacksonMapper.getInstance();

  PrintStream outStream = new PrintStream(new BufferedOutputStream(new FileOutputStream(outfile)));
  provider.prepare(config);
  provider.startStream();
  do {
    Uninterruptibles.sleepUninterruptibly(streamsConfiguration.getBatchFrequencyMs(), TimeUnit.MILLISECONDS);
    for (StreamsDatum datum : provider.readCurrent()) {
      String json;
      try {
        json = mapper.writeValueAsString(datum.getDocument());
        outStream.println(json);
      } catch (JsonProcessingException ex) {
        System.err.println(ex.getMessage());
      }
    }
  }
  while ( provider.isRunning());
  provider.cleanUp();
  outStream.flush();
}
 
Example 11
Source File: UserTask.java    From conductor with Apache License 2.0 5 votes vote down vote up
@Override
public void start(Workflow workflow, Task task, WorkflowExecutor executor) {
	Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS);

	Map<String, Map<String, List<Object>>> map = objectMapper.convertValue(task.getInputData(), mapStringListObjects);
	Map<String, Object> output = new HashMap<>();
	Map<String, List<Object>> defaultLargeInput = new HashMap<>();
	defaultLargeInput.put("TEST_SAMPLE", Collections.singletonList("testDefault"));
	output.put("size", map.getOrDefault("largeInput", defaultLargeInput).get("TEST_SAMPLE").size());
	task.setOutputData(output);
	task.setStatus(Status.COMPLETED);
}
 
Example 12
Source File: PlaySessionResource.java    From judgels with GNU General Public License v2.0 5 votes vote down vote up
@GET
@Path("/client-login/{authCode}/{redirectUri}")
@UnitOfWork
public Response serviceLogIn(
        @Context UriInfo uriInfo,
        @PathParam("authCode") String authCode,
        @PathParam("redirectUri") String redirectUri) {

    Optional<Session> session = Optional.empty();

    // Hack: wait until the auth code actually got written to db
    for (int i = 0; i < 3; i++) {
        session = sessionStore.getSessionByAuthCode(authCode);
        if (session.isPresent()) {
            break;
        }
        Uninterruptibles.sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
    }
    if (!session.isPresent()) {
        throw new IllegalArgumentException();
    }

    sessionStore.deleteAuthCode(authCode);

    return Response.seeOther(URI.create(redirectUri))
            .cookie(new NewCookie(
                    COOKIE_NAME,
                    session.get().getToken(),
                    "/",
                    uriInfo.getBaseUri().getHost(),
                    null,
                    (int) Duration.ofDays(7).getSeconds(),
                    false,
                    true))
            .build();
}
 
Example 13
Source File: PinotHelixTaskResourceManager.java    From incubator-pinot with Apache License 2.0 5 votes vote down vote up
/**
 * Submit a list of child tasks with same task type to the Minion instances with the given tag.
 *
 * @param pinotTaskConfigs List of child task configs to be submitted
 * @param minionInstanceTag Tag of the Minion instances to submit the task to
 * @param taskTimeoutMs Timeout in milliseconds for each task
 * @param numConcurrentTasksPerInstance Maximum number of concurrent tasks allowed per instance
 * @return Name of the submitted parent task
 */
public synchronized String submitTask(List<PinotTaskConfig> pinotTaskConfigs, String minionInstanceTag,
    long taskTimeoutMs, int numConcurrentTasksPerInstance) {
  int numChildTasks = pinotTaskConfigs.size();
  Preconditions.checkState(numChildTasks > 0);
  Preconditions.checkState(numConcurrentTasksPerInstance > 0);

  String taskType = pinotTaskConfigs.get(0).getTaskType();
  String parentTaskName = TASK_PREFIX + taskType + TASK_NAME_SEPARATOR + System.currentTimeMillis();
  LOGGER
      .info("Submitting parent task: {} of type: {} with {} child task configs: {} to Minion instances with tag: {}",
          parentTaskName, taskType, numChildTasks, pinotTaskConfigs, minionInstanceTag);
  List<TaskConfig> helixTaskConfigs = new ArrayList<>(numChildTasks);
  for (int i = 0; i < numChildTasks; i++) {
    PinotTaskConfig pinotTaskConfig = pinotTaskConfigs.get(i);
    Preconditions.checkState(pinotTaskConfig.getTaskType().equals(taskType));
    helixTaskConfigs.add(pinotTaskConfig.toHelixTaskConfig(parentTaskName + TASK_NAME_SEPARATOR + i));
  }

  // Run each task only once no matter whether it succeeds or not, and never fail the job
  // The reason for this is that: we put multiple independent tasks into one job to get them run in parallel, so we
  // don't want one task failure affects other tasks. Also, if one task failed, next time we will re-schedule it
  JobConfig.Builder jobBuilder =
      new JobConfig.Builder().addTaskConfigs(helixTaskConfigs).setInstanceGroupTag(minionInstanceTag)
          .setTimeoutPerTask(taskTimeoutMs).setNumConcurrentTasksPerInstance(numConcurrentTasksPerInstance)
          .setIgnoreDependentJobFailure(true).setMaxAttemptsPerTask(1).setFailureThreshold(Integer.MAX_VALUE);
  _taskDriver.enqueueJob(getHelixJobQueueName(taskType), parentTaskName, jobBuilder);

  // Wait until task state is available
  while (getTaskState(parentTaskName) == null) {
    Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
  }

  return parentTaskName;
}
 
Example 14
Source File: IndefiniteWaitOneShotStartupCheckStrategy.java    From testcontainers-java with MIT License 5 votes vote down vote up
@Override
public boolean waitUntilStartupSuccessful(DockerClient dockerClient, String containerId) {
    while (checkStartupState(dockerClient, containerId) == StartupStatus.NOT_YET_KNOWN) {
        Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
    }

    return checkStartupState(dockerClient, containerId) == StartupStatus.SUCCESSFUL;
}
 
Example 15
Source File: ITCassandraDependencies.java    From zipkin-dependencies with Apache License 2.0 5 votes vote down vote up
static void blockWhileInFlight(CassandraStorage storage) {
  // Now, block until writes complete, notably so we can read them.
  Session.State state = storage.session().getState();
  refresh:
  while (true) {
    for (Host host : state.getConnectedHosts()) {
      if (state.getInFlightQueries(host) > 0) {
        Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
        state = storage.session().getState();
        continue refresh;
      }
    }
    break;
  }
}
 
Example 16
Source File: NoOpStorage.java    From pravega with Apache License 2.0 4 votes vote down vote up
private void noOp() {
    Uninterruptibles.sleepUninterruptibly(this.writeNoOpLatencyMills, TimeUnit.MILLISECONDS);
}
 
Example 17
Source File: WorkflowServiceTest.java    From conductor with Apache License 2.0 4 votes vote down vote up
private void validateWorkflowWithInlineSubWorkflowExecution(String wfId) {
    Workflow workflow = workflowExecutionService.getExecutionStatus(wfId, true);
    assertNotNull(workflow);
    assertNotNull(workflow.getTasks());
    assertEquals(RUNNING, workflow.getStatus());

    // Simulating SystemTaskWorkerCoordinator to execute async system tasks
    String subWorkflowTaskId = workflow.getTaskByRefName("sw1").getTaskId();
    workflowExecutor.executeSystemTask(dummySubWorkflowSystemTask, subWorkflowTaskId, 1);
    Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS);

    workflow = workflowExecutionService.getExecutionStatus(wfId, true);
    assertNotNull(workflow);
    assertNotNull(workflow.getTasks());

    Task subWorkflowTask = workflow.getTasks().stream().filter(t -> t.getTaskType().equals(SUB_WORKFLOW.name())).findAny().get();
    assertNotNull(subWorkflowTask);
    assertNotNull(subWorkflowTask.getOutputData());
    assertNotNull(subWorkflowTask.getInputData());
    assertNotNull("Output: " + subWorkflowTask.getSubWorkflowId() + ", status: " + subWorkflowTask.getStatus(), subWorkflowTask.getSubWorkflowId());
    assertTrue(subWorkflowTask.getInputData().containsKey("workflowInput"));
    assertEquals(3, ((Map<String, Object>) subWorkflowTask.getInputData().get("workflowInput")).get("param2"));
    assertEquals("inline_sw_1", subWorkflowTask.getInputData().get("subWorkflowName"));
    assertEquals(3, subWorkflowTask.getInputData().get("subWorkflowVersion"));
    assertEquals(IN_PROGRESS, subWorkflowTask.getStatus());

    String subWorkflowId = subWorkflowTask.getSubWorkflowId();
    workflow = workflowExecutionService.getExecutionStatus(subWorkflowId, true);
    assertNotNull(workflow);
    assertNotNull(workflow.getTasks());
    assertEquals(wfId, workflow.getParentWorkflowId());
    assertEquals(RUNNING, workflow.getStatus());

    Task simpleTask = workflowExecutionService.poll("junit_task_1", "test");
    String uuid = UUID.nameUUIDFromBytes("hello".getBytes()).toString();
    simpleTask.getOutputData().put("uuid", uuid);
    simpleTask.setStatus(COMPLETED);
    workflowExecutionService.updateTask(simpleTask);

    workflow = workflowExecutionService.getExecutionStatus(subWorkflowId, true);
    assertNotNull(workflow);
    assertEquals(Workflow.WorkflowStatus.COMPLETED, workflow.getStatus());
    assertEquals("inline_sw_1", workflow.getWorkflowName());
    assertNotNull(workflow.getOutput());
    assertTrue(workflow.getOutput().containsKey("o1"));
    assertTrue(workflow.getOutput().containsKey("o2"));
    assertEquals("sub workflow input param1", workflow.getOutput().get("o1"));
    assertEquals(uuid, workflow.getOutput().get("o2"));

    // Simulating SystemTaskWorkerCoordinator to execute async system tasks
    workflowExecutor.executeSystemTask(dummySubWorkflowSystemTask, subWorkflowTaskId, 1);

    workflow = workflowExecutionService.getExecutionStatus(wfId, true);
    assertNotNull(workflow);
    assertEquals(Workflow.WorkflowStatus.COMPLETED, workflow.getStatus());
    assertEquals("sub workflow input param1", workflow.getOutput().get("o1"));
    assertEquals(uuid, workflow.getOutput().get("o2"));

    subWorkflowTask = workflow.getTaskByRefName("sw1");
    assertEquals(COMPLETED, subWorkflowTask.getStatus());
    assertEquals("sub workflow input param1", subWorkflowTask.getOutputData().get("o1"));
    assertEquals(uuid, subWorkflowTask.getOutputData().get("o2"));
}
 
Example 18
Source File: AtomicLongMapTest.java    From pinpoint with Apache License 2.0 4 votes vote down vote up
public void testRemove_thread_safety() throws InterruptedException {
    final AtomicLongMap<String> cache = AtomicLongMap.create();

    final int totalThread = 5;
    final ExecutorService executorService = Executors.newFixedThreadPool(totalThread);

    final AtomicLong totalCounter = new AtomicLong();
    final AtomicBoolean writerThread = new AtomicBoolean(true);
    final AtomicBoolean removeThread = new AtomicBoolean(true);

    final CountDownLatch writerLatch = new CountDownLatch(totalThread);

    for (int i = 0; i < totalThread; i++) {
        final int writerName = i;
        executorService.execute(new Runnable() {
            @Override
            public void run() {
                while (writerThread.get()) {
                    cache.incrementAndGet("aa");
                    cache.incrementAndGet("cc");
                    cache.incrementAndGet("aa");
                    cache.incrementAndGet("bb");
                    cache.incrementAndGet("bb");
                    cache.incrementAndGet("bb");
                    cache.incrementAndGet("cc");
                    cache.incrementAndGet("d");
                    totalCounter.addAndGet(8);
                }
                writerLatch.countDown();
                logger.debug("shutdown {}", writerName);
            }
        });
    }

    final AtomicLong sumCounter = new AtomicLong();
    executorService.execute(new Runnable() {
        @Override
        public void run() {
            while (removeThread.get()) {
                Map<String, Long> remove = AtomicLongMapUtils.remove(cache);
                sumCounter.addAndGet(sum(remove));
                logger.debug("sum:{}", remove);

                Uninterruptibles.sleepUninterruptibly(10, TimeUnit.MILLISECONDS);
            }
        }
    });

    Uninterruptibles.sleepUninterruptibly(5000, TimeUnit.MILLISECONDS);
    writerThread.set(false);
    writerLatch.await();


    Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
    removeThread.set(false);
    Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);


    executorService.shutdown();
    logger.debug("total={} sum:{}", totalCounter.get(), sumCounter.get());
    Assert.assertEquals("concurrent remove and increment", totalCounter.get(), sumCounter.get());


}
 
Example 19
Source File: ReentrantDistributedLockTest.java    From twill with Apache License 2.0 4 votes vote down vote up
@Test (timeout = 60000)
public void testLockRace() throws Exception {
  // Test for multiple clients race on the lock
  // This is for the case when a lock owner release the lock (node deleted)
  // while the other client tries to watch on the node
  ZKClientService zkClient1 = createZKClient();
  ZKClientService zkClient2 = createZKClient();
  try {
    final Lock[] locks = new Lock[] {
      new ReentrantDistributedLock(zkClient1, "/lockrace"),
      new ReentrantDistributedLock(zkClient2, "/lockrace")
    };

    // Have two clients fight for the lock
    final CyclicBarrier barrier = new CyclicBarrier(2);
    final CountDownLatch lockLatch = new CountDownLatch(2);
    for (int i = 0; i < 2; i++) {
      final int threadId = i;
      Thread t = new Thread() {
        @Override
        public void run() {
          try {
            barrier.await();
            for (int i = 0; i < 100; i++) {
              Lock lock = locks[threadId];
              lock.lock();
              try {
                // A short sleep to make the race possible to happen
                Uninterruptibles.sleepUninterruptibly(1, TimeUnit.MILLISECONDS);
              } finally {
                lock.unlock();
              }
            }
            lockLatch.countDown();
          } catch (Exception e) {
            LOG.error("Exception", e);
          }
        }
      };
      t.start();
    }

    Assert.assertTrue(lockLatch.await(30, TimeUnit.SECONDS));

  } finally {
    zkClient1.stopAndWait();
    zkClient2.stopAndWait();
  }
}
 
Example 20
Source File: MonitoredClusteringBuilderState.java    From dsl-devkit with Eclipse Public License 1.0 3 votes vote down vote up
/**
 * Polls the given {@link IProgressMonitor} for cancellation until a timeout of {@link #CANCELLATION_POLLING_TIMEOUT} ms is reached.
 * <p>
 * <em>Note</em>: Throws OperationCanceledException if monitor is cancelled within the given timeout.
 * </p>
 *
 * @param monitor
 *          the {@link IProgressMonitor} to check, must not be {@code null}
 */
private void pollForCancellation(final IProgressMonitor monitor) {
  final long endTime = System.currentTimeMillis() + CANCELLATION_POLLING_TIMEOUT;
  do {
    checkForCancellation(monitor);
    Uninterruptibles.sleepUninterruptibly(CANCELLATION_POLLING_DELAY, TimeUnit.MILLISECONDS);
  } while (System.currentTimeMillis() < endTime);
}