org.apache.beam.sdk.util.FluentBackoff Java Examples

The following examples show how to use org.apache.beam.sdk.util.FluentBackoff. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: BackOffExecutor.java    From feast with Apache License 2.0 6 votes vote down vote up
private void execute(Retriable retriable, FluentBackoff backoff) throws Exception {
  Sleeper sleeper = Sleeper.DEFAULT;
  BackOff backOff = backoff.backoff();
  while (true) {
    try {
      retriable.execute();
      break;
    } catch (Exception e) {
      if (retriable.isExceptionRetriable(e) && BackOffUtils.next(sleeper, backOff)) {
        retriable.cleanUpAfterFailure();
      } else {
        throw e;
      }
    }
  }
}
 
Example #2
Source File: SolrIO.java    From beam with Apache License 2.0 6 votes vote down vote up
@Setup
public void setup() {
  solrClient = spec.getConnectionConfiguration().createClient();

  retryBackoff =
      FluentBackoff.DEFAULT
          .withMaxRetries(0) // default to no retrying
          .withInitialBackoff(RETRY_INITIAL_BACKOFF);

  if (spec.getRetryConfiguration() != null) {
    // FluentBackoff counts retries excluding the original while we count attempts
    // to remove ambiguity (hence the -1)
    retryBackoff =
        retryBackoff
            .withMaxRetries(spec.getRetryConfiguration().getMaxAttempts() - 1)
            .withMaxCumulativeBackoff(spec.getRetryConfiguration().getMaxDuration());
  }
}
 
Example #3
Source File: ElasticsearchIO.java    From beam with Apache License 2.0 6 votes vote down vote up
@Setup
public void setup() throws IOException {
  ConnectionConfiguration connectionConfiguration = spec.getConnectionConfiguration();
  backendVersion = getBackendVersion(connectionConfiguration);
  restClient = connectionConfiguration.createClient();

  retryBackoff =
      FluentBackoff.DEFAULT.withMaxRetries(0).withInitialBackoff(RETRY_INITIAL_BACKOFF);

  if (spec.getRetryConfiguration() != null) {
    retryBackoff =
        FluentBackoff.DEFAULT
            .withInitialBackoff(RETRY_INITIAL_BACKOFF)
            .withMaxRetries(spec.getRetryConfiguration().getMaxAttempts() - 1)
            .withMaxCumulativeBackoff(spec.getRetryConfiguration().getMaxDuration());
  }
  // configure a custom serializer for metadata to be able to change serialization based
  // on ES version
  SimpleModule module = new SimpleModule();
  module.addSerializer(DocumentMetadata.class, new DocumentMetadataSerializer());
  OBJECT_MAPPER.registerModule(module);
}
 
Example #4
Source File: SnsIO.java    From beam with Apache License 2.0 6 votes vote down vote up
@Setup
public void setup() throws Exception {
  // Initialize SnsPublisher
  producer = spec.getSnsClientProvider().getSnsClient();

  retryBackoff =
      FluentBackoff.DEFAULT
          .withMaxRetries(0) // default to no retrying
          .withInitialBackoff(RETRY_INITIAL_BACKOFF);
  if (spec.getRetryConfiguration() != null) {
    retryBackoff =
        retryBackoff
            .withMaxRetries(spec.getRetryConfiguration().getMaxAttempts() - 1)
            .withMaxCumulativeBackoff(spec.getRetryConfiguration().getMaxDuration());
  }
}
 
Example #5
Source File: SnsIO.java    From beam with Apache License 2.0 6 votes vote down vote up
@Setup
public void setup() throws Exception {
  // Initialize SnsPublisher
  producer = spec.getAWSClientsProvider().createSnsPublisher();
  checkArgument(
      topicExists(producer, spec.getTopicName()),
      "Topic %s does not exist",
      spec.getTopicName());

  retryBackoff =
      FluentBackoff.DEFAULT
          .withMaxRetries(0) // default to no retrying
          .withInitialBackoff(RETRY_INITIAL_BACKOFF);
  if (spec.getRetryConfiguration() != null) {
    retryBackoff =
        retryBackoff
            .withMaxRetries(spec.getRetryConfiguration().getMaxAttempts() - 1)
            .withMaxCumulativeBackoff(spec.getRetryConfiguration().getMaxDuration());
  }
}
 
Example #6
Source File: BigQueryToTableIT.java    From beam with Apache License 2.0 6 votes vote down vote up
private List<TableRow> getTableRowsFromQuery(String query, int maxRetry) throws Exception {
  FluentBackoff backoffFactory =
      FluentBackoff.DEFAULT
          .withMaxRetries(maxRetry)
          .withInitialBackoff(Duration.standardSeconds(1L));
  Sleeper sleeper = Sleeper.DEFAULT;
  BackOff backoff = BackOffAdapter.toGcpBackOff(backoffFactory.backoff());
  do {
    LOG.info("Starting querying {}", query);
    QueryResponse response = BQ_CLIENT.queryWithRetries(query, project);
    if (response.getRows() != null) {
      LOG.info("Got table content with query {}", query);
      return response.getRows();
    }
  } while (BackOffUtils.next(sleeper, backoff));
  LOG.info("Got empty table for query {} with retry {}", query, maxRetry);
  return Collections.emptyList();
}
 
Example #7
Source File: BigQueryServicesImplTest.java    From beam with Apache License 2.0 6 votes vote down vote up
/** Tests that {@link BigQueryServicesImpl.JobServiceImpl#startLoadJob} succeeds with a retry. */
@Test
public void testStartLoadJobRetry() throws IOException, InterruptedException {
  Job testJob = new Job();
  JobReference jobRef = new JobReference();
  jobRef.setJobId("jobId");
  jobRef.setProjectId("projectId");
  testJob.setJobReference(jobRef);

  // First response is 403 rate limited, second response has valid payload.
  when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
  when(response.getStatusCode()).thenReturn(403).thenReturn(200);
  when(response.getContent())
      .thenReturn(toStream(errorWithReasonAndStatus("rateLimitExceeded", 403)))
      .thenReturn(toStream(testJob));

  Sleeper sleeper = new FastNanoClockAndSleeper();
  JobServiceImpl.startJob(
      testJob,
      new ApiErrorExtractor(),
      bigquery,
      sleeper,
      BackOffAdapter.toGcpBackOff(FluentBackoff.DEFAULT.backoff()));

  verify(response, times(2)).getStatusCode();
  verify(response, times(2)).getContent();
  verify(response, times(2)).getContentType();
}
 
Example #8
Source File: BigQueryServicesImplTest.java    From beam with Apache License 2.0 6 votes vote down vote up
/**
 * Tests that {@link BigQueryServicesImpl.JobServiceImpl#startLoadJob} succeeds with an already
 * exist job.
 */
@Test
public void testStartLoadJobSucceedsAlreadyExists() throws IOException, InterruptedException {
  Job testJob = new Job();
  JobReference jobRef = new JobReference();
  jobRef.setJobId("jobId");
  jobRef.setProjectId("projectId");
  testJob.setJobReference(jobRef);

  when(response.getStatusCode()).thenReturn(409); // 409 means already exists

  Sleeper sleeper = new FastNanoClockAndSleeper();
  JobServiceImpl.startJob(
      testJob,
      new ApiErrorExtractor(),
      bigquery,
      sleeper,
      BackOffAdapter.toGcpBackOff(FluentBackoff.DEFAULT.backoff()));

  verify(response, times(1)).getStatusCode();
  verify(response, times(1)).getContent();
  verify(response, times(1)).getContentType();
  expectedLogs.verifyNotLogged("Started BigQuery job");
}
 
Example #9
Source File: BigQueryServicesImplTest.java    From beam with Apache License 2.0 6 votes vote down vote up
/** Tests that {@link BigQueryServicesImpl.JobServiceImpl#startLoadJob} succeeds. */
@Test
public void testStartLoadJobSucceeds() throws IOException, InterruptedException {
  Job testJob = new Job();
  JobReference jobRef = new JobReference();
  jobRef.setJobId("jobId");
  jobRef.setProjectId("projectId");
  testJob.setJobReference(jobRef);

  when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
  when(response.getStatusCode()).thenReturn(200);
  when(response.getContent()).thenReturn(toStream(testJob));

  Sleeper sleeper = new FastNanoClockAndSleeper();
  JobServiceImpl.startJob(
      testJob,
      new ApiErrorExtractor(),
      bigquery,
      sleeper,
      BackOffAdapter.toGcpBackOff(FluentBackoff.DEFAULT.backoff()));

  verify(response, times(1)).getStatusCode();
  verify(response, times(1)).getContent();
  verify(response, times(1)).getContentType();
  expectedLogs.verifyInfo(String.format("Started BigQuery job: %s", jobRef));
}
 
Example #10
Source File: FakeJobService.java    From beam with Apache License 2.0 6 votes vote down vote up
@Override
public Job pollJob(JobReference jobRef, int maxAttempts) throws InterruptedException {
  BackOff backoff =
      BackOffAdapter.toGcpBackOff(
          FluentBackoff.DEFAULT
              .withMaxRetries(maxAttempts)
              .withInitialBackoff(Duration.millis(10))
              .withMaxBackoff(Duration.standardSeconds(1))
              .backoff());
  Sleeper sleeper = Sleeper.DEFAULT;
  try {
    do {
      Job job = getJob(jobRef);
      if (job != null) {
        JobStatus status = job.getStatus();
        if (status != null
            && ("DONE".equals(status.getState()) || "FAILED".equals(status.getState()))) {
          return job;
        }
      }
    } while (BackOffUtils.next(sleeper, backoff));
  } catch (IOException e) {
    return null;
  }
  return null;
}
 
Example #11
Source File: GcsUtilTest.java    From beam with Apache License 2.0 6 votes vote down vote up
@Test
public void testGetBucket() throws IOException {
  GcsOptions pipelineOptions = gcsOptionsWithTestCredential();
  GcsUtil gcsUtil = pipelineOptions.getGcsUtil();

  Storage mockStorage = Mockito.mock(Storage.class);
  gcsUtil.setStorageClient(mockStorage);

  Storage.Buckets mockStorageObjects = Mockito.mock(Storage.Buckets.class);
  Storage.Buckets.Get mockStorageGet = Mockito.mock(Storage.Buckets.Get.class);

  BackOff mockBackOff = BackOffAdapter.toGcpBackOff(FluentBackoff.DEFAULT.backoff());

  when(mockStorage.buckets()).thenReturn(mockStorageObjects);
  when(mockStorageObjects.get("testbucket")).thenReturn(mockStorageGet);
  when(mockStorageGet.execute())
      .thenThrow(new SocketTimeoutException("SocketException"))
      .thenReturn(new Bucket());

  assertNotNull(
      gcsUtil.getBucket(
          GcsPath.fromComponents("testbucket", "testobject"),
          mockBackOff,
          new FastNanoClockAndSleeper()));
}
 
Example #12
Source File: GcsUtilTest.java    From beam with Apache License 2.0 6 votes vote down vote up
@Test
public void testBucketAccessible() throws IOException {
  GcsOptions pipelineOptions = gcsOptionsWithTestCredential();
  GcsUtil gcsUtil = pipelineOptions.getGcsUtil();

  Storage mockStorage = Mockito.mock(Storage.class);
  gcsUtil.setStorageClient(mockStorage);

  Storage.Buckets mockStorageObjects = Mockito.mock(Storage.Buckets.class);
  Storage.Buckets.Get mockStorageGet = Mockito.mock(Storage.Buckets.Get.class);

  BackOff mockBackOff = BackOffAdapter.toGcpBackOff(FluentBackoff.DEFAULT.backoff());

  when(mockStorage.buckets()).thenReturn(mockStorageObjects);
  when(mockStorageObjects.get("testbucket")).thenReturn(mockStorageGet);
  when(mockStorageGet.execute())
      .thenThrow(new SocketTimeoutException("SocketException"))
      .thenReturn(new Bucket());

  assertTrue(
      gcsUtil.bucketAccessible(
          GcsPath.fromComponents("testbucket", "testobject"),
          mockBackOff,
          new FastNanoClockAndSleeper()));
}
 
Example #13
Source File: GcsUtilTest.java    From beam with Apache License 2.0 6 votes vote down vote up
@Test
public void testCreateBucket() throws IOException {
  GcsOptions pipelineOptions = gcsOptionsWithTestCredential();
  GcsUtil gcsUtil = pipelineOptions.getGcsUtil();

  Storage.Buckets mockStorageObjects = Mockito.mock(Storage.Buckets.class);
  Storage mockStorage = Mockito.mock(Storage.class);
  gcsUtil.setStorageClient(mockStorage);

  Storage.Buckets.Insert mockStorageInsert = Mockito.mock(Storage.Buckets.Insert.class);

  BackOff mockBackOff = BackOffAdapter.toGcpBackOff(FluentBackoff.DEFAULT.backoff());

  when(mockStorage.buckets()).thenReturn(mockStorageObjects);
  when(mockStorageObjects.insert(any(String.class), any(Bucket.class)))
      .thenReturn(mockStorageInsert);
  when(mockStorageInsert.execute())
      .thenThrow(new SocketTimeoutException("SocketException"))
      .thenReturn(new Bucket());

  gcsUtil.createBucket("a", new Bucket(), mockBackOff, new FastNanoClockAndSleeper());
}
 
Example #14
Source File: GcsUtilTest.java    From beam with Apache License 2.0 6 votes vote down vote up
@Test
public void testBucketDoesNotExist() throws IOException {
  GcsOptions pipelineOptions = gcsOptionsWithTestCredential();
  GcsUtil gcsUtil = pipelineOptions.getGcsUtil();

  Storage mockStorage = Mockito.mock(Storage.class);
  gcsUtil.setStorageClient(mockStorage);

  Storage.Buckets mockStorageObjects = Mockito.mock(Storage.Buckets.class);
  Storage.Buckets.Get mockStorageGet = Mockito.mock(Storage.Buckets.Get.class);

  BackOff mockBackOff = BackOffAdapter.toGcpBackOff(FluentBackoff.DEFAULT.backoff());

  when(mockStorage.buckets()).thenReturn(mockStorageObjects);
  when(mockStorageObjects.get("testbucket")).thenReturn(mockStorageGet);
  when(mockStorageGet.execute())
      .thenThrow(
          googleJsonResponseException(
              HttpStatusCodes.STATUS_CODE_NOT_FOUND, "It don't exist", "Nothing here to see"));

  assertFalse(
      gcsUtil.bucketAccessible(
          GcsPath.fromComponents("testbucket", "testobject"),
          mockBackOff,
          new FastNanoClockAndSleeper()));
}
 
Example #15
Source File: V1TestUtil.java    From beam with Apache License 2.0 5 votes vote down vote up
private void flushBatch() throws DatastoreException, IOException, InterruptedException {
  LOG.info("Writing batch of {} entities", entities.size());
  Sleeper sleeper = Sleeper.DEFAULT;
  BackOff backoff =
      FluentBackoff.DEFAULT
          .withMaxRetries(MAX_RETRIES)
          .withInitialBackoff(INITIAL_BACKOFF)
          .backoff();

  while (true) {
    // Batch mutate entities.
    try {
      CommitRequest.Builder commitRequest = CommitRequest.newBuilder();
      for (Entity entity : entities) {
        commitRequest.addMutations(mutationBuilder.apply(entity));
      }
      commitRequest.setMode(CommitRequest.Mode.NON_TRANSACTIONAL);
      datastore.commit(commitRequest.build());
      // Break if the commit threw no exception.
      break;
    } catch (DatastoreException exception) {
      LOG.error(
          "Error writing to the Datastore ({}): {}",
          exception.getCode(),
          exception.getMessage());
      if (!BackOffUtils.next(sleeper, backoff)) {
        LOG.error("Aborting after {} retries.", MAX_RETRIES);
        throw exception;
      }
    }
  }
  LOG.info("Successfully wrote {} entities", entities.size());
  entities.clear();
}
 
Example #16
Source File: DataflowPipelineJob.java    From beam with Apache License 2.0 5 votes vote down vote up
private static BackOff getMessagesBackoff(Duration duration) {
  FluentBackoff factory = MESSAGES_BACKOFF_FACTORY;

  if (!duration.isShorterThan(Duration.ZERO)) {
    factory = factory.withMaxCumulativeBackoff(duration);
  }

  return BackOffAdapter.toGcpBackOff(factory.backoff());
}
 
Example #17
Source File: MicrobatchSource.java    From beam with Apache License 2.0 5 votes vote down vote up
private Reader(final UnboundedSource.UnboundedReader<T> unboundedReader) {
  this.unboundedReader = unboundedReader;
  backoffFactory =
      FluentBackoff.DEFAULT
          .withInitialBackoff(Duration.millis(10))
          .withMaxBackoff(maxReadTime.minus(1))
          .withMaxCumulativeBackoff(maxReadTime.minus(1));
}
 
Example #18
Source File: DataflowBatchWorkerHarness.java    From beam with Apache License 2.0 5 votes vote down vote up
/** Helper for initializing the BackOff used for retries. */
private static BackOff createBackOff() {
  return FluentBackoff.DEFAULT
      .withInitialBackoff(Duration.millis(BACKOFF_INITIAL_INTERVAL_MILLIS))
      .withMaxBackoff(Duration.millis(BACKOFF_MAX_INTERVAL_MILLIS))
      .backoff();
}
 
Example #19
Source File: LocalSpannerIO.java    From DataflowTemplates with Apache License 2.0 5 votes vote down vote up
@Setup
public void setup() throws Exception {
  // set up non-serializable values here.
  spannerAccessor = ExposedSpannerAccessor.create(spannerConfig);
  bundleWriteBackoff =
      FluentBackoff.DEFAULT
          .withMaxCumulativeBackoff(spannerConfig.getMaxCumulativeBackoff().get())
          .withInitialBackoff(spannerConfig.getMaxCumulativeBackoff().get().dividedBy(60));
}
 
Example #20
Source File: DynamoDBIO.java    From beam with Apache License 2.0 5 votes vote down vote up
@Setup
public void setup() {
  client = spec.getDynamoDbClientProvider().getDynamoDbClient();
  retryBackoff =
      FluentBackoff.DEFAULT
          .withMaxRetries(0) // default to no retrying
          .withInitialBackoff(RETRY_INITIAL_BACKOFF);
  if (spec.getRetryConfiguration() != null) {
    retryBackoff =
        retryBackoff
            .withMaxRetries(spec.getRetryConfiguration().getMaxAttempts() - 1)
            .withMaxCumulativeBackoff(spec.getRetryConfiguration().getMaxDuration());
  }
}
 
Example #21
Source File: TrafficMaxLaneFlowIT.java    From beam with Apache License 2.0 5 votes vote down vote up
@Test
public void testE2ETrafficMaxLaneFlow() throws Exception {
  this.options.setBigQuerySchema(FormatMaxesFn.getSchema());
  this.options.setProject(this.projectId);
  this.options.setBigQueryDataset(this.outputDatasetId);
  this.options.setBigQueryTable(this.outputTable);
  TrafficMaxLaneFlow.runTrafficMaxLaneFlow(this.options);
  FluentBackoff backoffFactory =
      FluentBackoff.DEFAULT.withMaxRetries(4).withInitialBackoff(Duration.standardSeconds(1L));
  Sleeper sleeper = Sleeper.DEFAULT;
  BackOff backoff = BackOffAdapter.toGcpBackOff(backoffFactory.backoff());
  String res = "empty_result";
  // Having 4 retries to get ride of the failure caused by the latency that between data wrote
  // to BigQuery and be able to query from BigQuery.
  // Partial results are still returned making traversal of nested result object NPE prone.
  do {
    QueryResponse response =
        this.bqClient.queryWithRetries(
            String.format(
                "SELECT count(*) as total FROM [%s:%s.%s]",
                this.projectId, this.outputDatasetId, this.outputTable),
            this.projectId);
    // Having 4 retries to get ride of the failure caused by the latency that between data wrote
    // to BigQuery and be able to query from BigQuery.
    // Partial results are still returned making traversal of nested result object NPE prone.
    try {
      res = response.getRows().get(0).getF().get(0).getV().toString();
      break;
    } catch (NullPointerException e) {
      // Ignore NullPointerException during retry.
    }
  } while (BackOffUtils.next(sleeper, backoff));
  assertEquals("9763", res);
}
 
Example #22
Source File: JdbcIO.java    From beam with Apache License 2.0 5 votes vote down vote up
@Setup
public void setup() {
  dataSource = spec.getDataSourceProviderFn().apply(null);
  RetryConfiguration retryConfiguration = spec.getRetryConfiguration();

  retryBackOff =
      FluentBackoff.DEFAULT
          .withInitialBackoff(retryConfiguration.getInitialDuration())
          .withMaxCumulativeBackoff(retryConfiguration.getMaxDuration())
          .withMaxRetries(retryConfiguration.getMaxAttempts());
}
 
Example #23
Source File: DynamoDBIO.java    From beam with Apache License 2.0 5 votes vote down vote up
@Setup
public void setup() {
  client = spec.getAwsClientsProvider().createDynamoDB();
  retryBackoff =
      FluentBackoff.DEFAULT
          .withMaxRetries(0) // default to no retrying
          .withInitialBackoff(RETRY_INITIAL_BACKOFF);
  if (spec.getRetryConfiguration() != null) {
    retryBackoff =
        retryBackoff
            .withMaxRetries(spec.getRetryConfiguration().getMaxAttempts() - 1)
            .withMaxCumulativeBackoff(spec.getRetryConfiguration().getMaxDuration());
  }
}
 
Example #24
Source File: TrafficRoutesIT.java    From beam with Apache License 2.0 5 votes vote down vote up
@Test
public void testE2ETrafficRoutes() throws Exception {
  this.options.setBigQuerySchema(FormatStatsFn.getSchema());
  this.options.setProject(this.projectId);
  this.options.setBigQueryDataset(this.outputDatasetId);
  this.options.setBigQueryTable(this.outputTable);
  TrafficRoutes.runTrafficRoutes(options);
  FluentBackoff backoffFactory =
      FluentBackoff.DEFAULT.withMaxRetries(4).withInitialBackoff(Duration.standardSeconds(1L));
  Sleeper sleeper = Sleeper.DEFAULT;
  BackOff backoff = BackOffAdapter.toGcpBackOff(backoffFactory.backoff());
  String res = "empty_result";
  do {
    QueryResponse response =
        this.bqClient.queryWithRetries(
            String.format(
                "SELECT count(*) as total FROM [%s:%s.%s]",
                this.projectId, this.outputDatasetId, this.outputTable),
            this.projectId);
    // Having 4 retries to get ride of the failure caused by the latency that between data wrote
    // to BigQuery and be able to query from BigQuery.
    // Partial results are still returned making traversal of nested result object NPE prone.
    try {
      res = response.getRows().get(0).getF().get(0).getV().toString();
      break;
    } catch (NullPointerException e) {
      // Ignore NullPointerException during retry.
    }
  } while (BackOffUtils.next(sleeper, backoff));

  assertEquals("27", res);
}
 
Example #25
Source File: SimplifiedKinesisClient.java    From beam with Apache License 2.0 5 votes vote down vote up
public List<Shard> listShards(final String streamName) throws TransientKinesisException {
  return wrapExceptions(
      () -> {
        List<Shard> shards = Lists.newArrayList();
        String lastShardId = null;

        // DescribeStream has limits that can be hit fairly easily if we are attempting
        // to configure multiple KinesisIO inputs in the same account. Retry up to
        // LIST_SHARDS_DESCRIBE_STREAM_MAX_ATTEMPTS times if we end up hitting that limit.
        //
        // Only pass the wrapped exception up once that limit is reached. Use FluentBackoff
        // to implement the retry policy.
        FluentBackoff retryBackoff =
            FluentBackoff.DEFAULT
                .withMaxRetries(LIST_SHARDS_DESCRIBE_STREAM_MAX_ATTEMPTS)
                .withInitialBackoff(LIST_SHARDS_DESCRIBE_STREAM_INITIAL_BACKOFF);
        StreamDescription description = null;
        do {
          BackOff backoff = retryBackoff.backoff();
          Sleeper sleeper = Sleeper.DEFAULT;
          while (true) {
            try {
              description =
                  kinesis.describeStream(streamName, lastShardId).getStreamDescription();
              break;
            } catch (LimitExceededException exc) {
              if (!BackOffUtils.next(sleeper, backoff)) {
                throw exc;
              }
            }
          }

          shards.addAll(description.getShards());
          lastShardId = shards.get(shards.size() - 1).getShardId();
        } while (description.getHasMoreShards());

        return shards;
      });
}
 
Example #26
Source File: StreamingDataflowWorker.java    From beam with Apache License 2.0 5 votes vote down vote up
private void getConfig(String computation) {
  BackOff backoff =
      FluentBackoff.DEFAULT
          .withInitialBackoff(Duration.millis(100))
          .withMaxBackoff(Duration.standardMinutes(1))
          .withMaxCumulativeBackoff(Duration.standardMinutes(5))
          .backoff();
  while (running.get()) {
    try {
      if (windmillServiceEnabled) {
        getConfigFromDataflowService(computation);
      } else {
        getConfigFromWindmill(computation);
      }
      return;
    } catch (IllegalArgumentException | IOException e) {
      LOG.warn("Error fetching config: ", e);
      try {
        if (!BackOffUtils.next(Sleeper.DEFAULT, backoff)) {
          return;
        }
      } catch (IOException ioe) {
        LOG.warn("Error backing off, will not retry: ", ioe);
        return;
      } catch (InterruptedException ie) {
        Thread.currentThread().interrupt();
        return;
      }
    }
  }
}
 
Example #27
Source File: ClickHouseIO.java    From beam with Apache License 2.0 5 votes vote down vote up
@Setup
public void setup() throws SQLException {
  connection = new ClickHouseDataSource(jdbcUrl(), properties()).getConnection();

  retryBackoff =
      FluentBackoff.DEFAULT
          .withMaxRetries(maxRetries())
          .withMaxCumulativeBackoff(maxCumulativeBackoff())
          .withInitialBackoff(initialBackoff());
}
 
Example #28
Source File: GcsUtilTest.java    From beam with Apache License 2.0 5 votes vote down vote up
@Test
public void testRetryFileSizeNonBatch() throws IOException {
  GcsOptions pipelineOptions = gcsOptionsWithTestCredential();
  GcsUtil gcsUtil = pipelineOptions.getGcsUtil();

  Storage mockStorage = Mockito.mock(Storage.class);
  gcsUtil.setStorageClient(mockStorage);

  Storage.Objects mockStorageObjects = Mockito.mock(Storage.Objects.class);
  Storage.Objects.Get mockStorageGet = Mockito.mock(Storage.Objects.Get.class);

  BackOff mockBackOff =
      BackOffAdapter.toGcpBackOff(FluentBackoff.DEFAULT.withMaxRetries(2).backoff());

  when(mockStorage.objects()).thenReturn(mockStorageObjects);
  when(mockStorageObjects.get("testbucket", "testobject")).thenReturn(mockStorageGet);
  when(mockStorageGet.execute())
      .thenThrow(new SocketTimeoutException("SocketException"))
      .thenThrow(new SocketTimeoutException("SocketException"))
      .thenReturn(new StorageObject().setSize(BigInteger.valueOf(1000)));

  assertEquals(
      1000,
      gcsUtil
          .getObject(
              GcsPath.fromComponents("testbucket", "testobject"),
              mockBackOff,
              new FastNanoClockAndSleeper())
          .getSize()
          .longValue());
  assertEquals(BackOff.STOP, mockBackOff.nextBackOffMillis());
}
 
Example #29
Source File: GcsUtilTest.java    From beam with Apache License 2.0 5 votes vote down vote up
@Test
public void testCreateBucketAccessErrors() throws IOException {
  GcsOptions pipelineOptions = gcsOptionsWithTestCredential();
  GcsUtil gcsUtil = pipelineOptions.getGcsUtil();

  Storage mockStorage = Mockito.mock(Storage.class);
  gcsUtil.setStorageClient(mockStorage);

  Storage.Buckets mockStorageObjects = Mockito.mock(Storage.Buckets.class);
  Storage.Buckets.Insert mockStorageInsert = Mockito.mock(Storage.Buckets.Insert.class);

  BackOff mockBackOff = BackOffAdapter.toGcpBackOff(FluentBackoff.DEFAULT.backoff());
  GoogleJsonResponseException expectedException =
      googleJsonResponseException(
          HttpStatusCodes.STATUS_CODE_FORBIDDEN,
          "Waves hand mysteriously",
          "These aren't the buckets you're looking for");

  when(mockStorage.buckets()).thenReturn(mockStorageObjects);
  when(mockStorageObjects.insert(any(String.class), any(Bucket.class)))
      .thenReturn(mockStorageInsert);
  when(mockStorageInsert.execute()).thenThrow(expectedException);

  thrown.expect(AccessDeniedException.class);

  gcsUtil.createBucket("a", new Bucket(), mockBackOff, new FastNanoClockAndSleeper());
}
 
Example #30
Source File: GcsUtilTest.java    From beam with Apache License 2.0 5 votes vote down vote up
@Test
public void testBucketDoesNotExistBecauseOfAccessError() throws IOException {
  GcsOptions pipelineOptions = gcsOptionsWithTestCredential();
  GcsUtil gcsUtil = pipelineOptions.getGcsUtil();

  Storage mockStorage = Mockito.mock(Storage.class);
  gcsUtil.setStorageClient(mockStorage);

  Storage.Buckets mockStorageObjects = Mockito.mock(Storage.Buckets.class);
  Storage.Buckets.Get mockStorageGet = Mockito.mock(Storage.Buckets.Get.class);

  BackOff mockBackOff = BackOffAdapter.toGcpBackOff(FluentBackoff.DEFAULT.backoff());
  GoogleJsonResponseException expectedException =
      googleJsonResponseException(
          HttpStatusCodes.STATUS_CODE_FORBIDDEN,
          "Waves hand mysteriously",
          "These aren't the buckets you're looking for");

  when(mockStorage.buckets()).thenReturn(mockStorageObjects);
  when(mockStorageObjects.get("testbucket")).thenReturn(mockStorageGet);
  when(mockStorageGet.execute()).thenThrow(expectedException);

  assertFalse(
      gcsUtil.bucketAccessible(
          GcsPath.fromComponents("testbucket", "testobject"),
          mockBackOff,
          new FastNanoClockAndSleeper()));
}