Java Code Examples for com.google.common.collect.Lists#partition()

The following examples show how to use com.google.common.collect.Lists#partition() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: MysqlUserMetadataService.java    From metacat with Apache License 2.0 6 votes vote down vote up
@Override
public void deleteDefinitionMetadata(
    @Nonnull final List<QualifiedName> names
) {
    try {
        final List<List<QualifiedName>> subLists =
            Lists.partition(names, config.getUserMetadataMaxInClauseItems());
        for (List<QualifiedName> subNames : subLists) {
            _deleteDefinitionMetadata(subNames);
        }
    } catch (Exception e) {
        final String message = String.format("Failed deleting the definition metadata for %s", names);
        log.error(message, e);
        throw new UserMetadataServiceException(message, e);
    }
}
 
Example 2
Source File: ElasticsearchQueryStore.java    From foxtrot with Apache License 2.0 6 votes vote down vote up
private void deleteIndices(List<String> indicesToDelete) {
    logger.warn("Deleting Indexes - Indexes - {}", indicesToDelete);
    if(!indicesToDelete.isEmpty()) {
        List<List<String>> subLists = Lists.partition(indicesToDelete, 5);
        for(List<String> subList : subLists) {
            try {
                connection.getClient()
                        .admin()
                        .indices()
                        .prepareDelete(subList.toArray(new String[0]))
                        .execute()
                        .actionGet(TimeValue.timeValueMinutes(5));
                logger.warn("Deleted Indexes - Indexes - {}", subList);
            } catch (Exception e) {
                logger.error("Index deletion failed - Indexes - {}", subList, e);
            }
        }
    }
}
 
Example 3
Source File: ListDomainsAction.java    From nomulus with Apache License 2.0 5 votes vote down vote up
@Override
public ImmutableSet<DomainBase> loadObjects() {
  checkArgument(!tlds.isEmpty(), "Must specify TLDs to query");
  assertTldsExist(tlds);
  DateTime now = clock.nowUtc();
  ImmutableList.Builder<DomainBase> domainsBuilder = new ImmutableList.Builder<>();
  for (List<String> tldsBatch : Lists.partition(tlds.asList(), maxNumSubqueries)) {
    domainsBuilder.addAll(
        ofy()
            .load()
            .type(DomainBase.class)
            .filter("tld in", tldsBatch)
            // Get the N most recently created domains (requires ordering in descending order).
            .order("-creationTime")
            .limit(limit)
            .list()
            .stream()
            .map(EppResourceUtils.transformAtTime(now))
            // Deleted entities must be filtered out post-query because queries don't allow
            // ordering with two filters.
            .filter(d -> d.getDeletionTime().isAfter(now))
            .collect(toImmutableList()));
  }
  // Combine the batches together by sorting all domains together with newest first, applying the
  // limit, and then reversing for display order.
  return ImmutableSet.copyOf(
      domainsBuilder
          .build()
          .stream()
          .sorted(comparing(EppResource::getCreationTime).reversed())
          .limit(limit)
          .collect(toImmutableList())
          .reverse());
}
 
Example 4
Source File: GaugesDao.java    From StatsAgg with Apache License 2.0 5 votes vote down vote up
public boolean batchUpsert(List<Gauge> gauges) {
    
    if ((gauges == null) || gauges.isEmpty()) {
        return false;
    }

    try {
        if (DatabaseConfiguration.getType() == DatabaseConfiguration.MYSQL) {
            boolean wasAllUpsertSuccess = true;
            List<List<Gauge>> gaugesPartitions = Lists.partition(gauges, 1000);

            for (List<Gauge> gaugesPartition : gaugesPartitions) {
                List<Object> parameters = new ArrayList<>();

                for (Gauge gauge : gaugesPartition) {
                    parameters.add(gauge.getBucketSha1());
                    parameters.add(gauge.getBucket());
                    parameters.add(gauge.getMetricValue());
                    parameters.add(gauge.getLastModified());
                }

                boolean wasUpsertSuccess = genericDmlStatement(GaugesSql.generateBatchUpsert(gaugesPartition.size()), parameters);
                if (!wasUpsertSuccess) wasAllUpsertSuccess = false;
            }

            return wasAllUpsertSuccess;
        }
        else {
            return upsert(gauges, true);
        }
    }
    catch (Exception e) {
        logger.error(e.toString() + System.lineSeparator() + StackTrace.getStringFromStackTrace(e));
        return false;
    }
    
}
 
Example 5
Source File: Validator.java    From dhis2-core with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
/**
 * Evaluates validation rules for a collection of organisation units. This
 * method breaks the job down by organisation unit. It assigns the
 * evaluation for each organisation unit to a task that can be evaluated
 * independently in a multi-threaded environment.
 * <p/>
 * Return early with no results if there are no organisation units
 * or no validation rules.
 *
 * @return a collection of any validations that were found
 */
public static Collection<ValidationResult> validate( ValidationRunContext context,
    ApplicationContext applicationContext, AnalyticsService analyticsService )
{
    CategoryService categoryService = applicationContext.getBean( CategoryService.class );
            
    int threadPoolSize = getThreadPoolSize( context );

    if ( threadPoolSize == 0 || context.getPeriodTypeXs().isEmpty() )
    {
        return context.getValidationResults();
    }

    ExecutorService executor = Executors.newFixedThreadPool( threadPoolSize );

    List<List<OrganisationUnit>> orgUnitLists = Lists.partition( context.getOrgUnits(), ValidationRunContext.ORG_UNITS_PER_TASK );

    for ( List<OrganisationUnit> orgUnits : orgUnitLists )
    {
        ValidationTask task = (ValidationTask) applicationContext.getBean( DataValidationTask.NAME );
        task.init( orgUnits, context, analyticsService );

        executor.execute( task );
    }

    executor.shutdown();

    try
    {
        executor.awaitTermination( 6, TimeUnit.HOURS );
    }
    catch ( InterruptedException e )
    {
        executor.shutdownNow();
    }

    reloadAttributeOptionCombos( context.getValidationResults(), categoryService );

    return context.getValidationResults();
}
 
Example 6
Source File: AbstractEventService.java    From dhis2-core with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
@Transactional
@Override
public ImportSummaries addEvents( List<Event> events, ImportOptions importOptions, boolean clearSession )
{
    ImportSummaries importSummaries = new ImportSummaries();
    importOptions = updateImportOptions( importOptions );

    List<Event> validEvents = resolveImportableEvents( events, importSummaries );

    List<List<Event>> partitions = Lists.partition( validEvents, FLUSH_FREQUENCY );

    for ( List<Event> _events : partitions )
    {
        reloadUser( importOptions );
        prepareCaches( importOptions.getUser(), _events );

        for ( Event event : _events )
        {
            importSummaries.addImportSummary( addEvent( event, importOptions, true ) );
        }

        if ( clearSession && events.size() >= FLUSH_FREQUENCY )
        {
            clearSession( importOptions.getUser() );
        }
    }

    updateEntities( importOptions.getUser() );

    return importSummaries;
}
 
Example 7
Source File: MysqlUserMetadataService.java    From metacat with Apache License 2.0 5 votes vote down vote up
@Override
public void deleteMetadata(final String userId, final List<HasMetadata> holders) {
    try {
        final List<List<HasMetadata>> subLists =
            Lists.partition(holders, config.getUserMetadataMaxInClauseItems());
        for (List<HasMetadata> hasMetadatas : subLists) {
            final List<QualifiedName> names = hasMetadatas.stream()
                .filter(m -> m instanceof HasDefinitionMetadata)
                .map(m -> ((HasDefinitionMetadata) m).getDefinitionName())
                .collect(Collectors.toList());
            if (!names.isEmpty()) {
                _deleteDefinitionMetadata(names);
            }
            if (config.canSoftDeleteDataMetadata()) {
                final List<String> uris = hasMetadatas.stream()
                    .filter(m -> m instanceof HasDataMetadata && ((HasDataMetadata) m).isDataExternal())
                    .map(m -> ((HasDataMetadata) m).getDataUri()).collect(Collectors.toList());
                if (!uris.isEmpty()) {
                    _softDeleteDataMetadata(userId, uris);
                }
            }
        }
    } catch (Exception e) {
        log.error("Failed deleting metadatas", e);
        throw new UserMetadataServiceException("Failed deleting metadatas", e);
    }
}
 
Example 8
Source File: ListsExample.java    From levelup-java-examples with Apache License 2.0 5 votes vote down vote up
/**
 * Partition list
 */
@Test
public void partition_list () {
	
	List<String> myList = Lists.newArrayList("one", "two", "three");
	
	List<List<String>> myListBy1 = Lists.partition(myList, 1);

    assertThat(myListBy1.get(0), hasItems("one"));
    assertThat(myListBy1.get(1), hasItems("two"));
    assertThat(myListBy1.get(2), hasItems("three"));
}
 
Example 9
Source File: RunningExecutionPlanServiceImpl.java    From score with Apache License 2.0 5 votes vote down vote up
@Override
@Transactional
public int deleteRunningExecutionPlans(Collection<String> executionIds) {
    int count = 0;
    List<List<String>> executionIdsPartitioned = Lists.partition(new ArrayList<>(executionIds), IN_CLAUSE_LIMIT);
    for (List<String> list : executionIdsPartitioned) {
        count += runningExecutionPlanRepository.deleteByExecutionIds(list);
        runningExecutionPlanRepository.flush();
    }
    return count;
}
 
Example 10
Source File: RedisPermissionsRepository.java    From fiat with Apache License 2.0 5 votes vote down vote up
private Table<String, ResourceType, Response<Map<String, String>>> getAllFromRedis(
    Set<String> userIds) {
  if (userIds.size() == 0) {
    return HashBasedTable.create();
  }

  try {
    final Table<String, ResourceType, Response<Map<String, String>>> responseTable =
        ArrayTable.create(
            userIds,
            new ArrayIterator<>(
                resources.stream().map(Resource::getResourceType).toArray(ResourceType[]::new)));
    for (List<String> userIdSubset : Lists.partition(new ArrayList<>(userIds), 10)) {
      redisClientDelegate.withMultiKeyPipeline(
          p -> {
            for (String userId : userIdSubset) {
              resources.stream()
                  .map(Resource::getResourceType)
                  .forEach(
                      r -> {
                        responseTable.put(userId, r, p.hgetAll(userKey(userId, r)));
                      });
            }
            p.sync();
          });
    }
    return responseTable;
  } catch (Exception e) {
    log.error("Storage exception reading all entries.", e);
  }
  return null;
}
 
Example 11
Source File: CollectionGuavaPartitionUnitTest.java    From tutorials with MIT License 5 votes vote down vote up
@Test
public final void givenList_whenParitioningIntoNSublists_thenCorrect() {
    final List<Integer> intList = Lists.newArrayList(1, 2, 3, 4, 5, 6, 7, 8);

    final List<List<Integer>> subSets = Lists.partition(intList, 3);

    // When
    final List<Integer> lastPartition = subSets.get(2);
    final List<Integer> expectedLastPartition = Lists.<Integer> newArrayList(7, 8);
    assertThat(subSets.size(), equalTo(3));
    assertThat(lastPartition, equalTo(expectedLastPartition));
}
 
Example 12
Source File: SalesforceSource.java    From incubator-gobblin with Apache License 2.0 5 votes vote down vote up
/**
 *  Create work units by taking a bulkJobId.
 *  The work units won't contain a query in this case. Instead they will contain a BulkJobId and a list of `batchId:resultId`
 *  So in extractor, the work to do is just to fetch the resultSet files.
 */
private List<WorkUnit> createWorkUnits(
    SourceEntity sourceEntity,
    SourceState state,
    SalesforceExtractor.ResultFileIdsStruct resultFileIdsStruct
) {
  String nameSpaceName = state.getProp(ConfigurationKeys.EXTRACT_NAMESPACE_NAME_KEY);
  Extract.TableType tableType = Extract.TableType.valueOf(state.getProp(ConfigurationKeys.EXTRACT_TABLE_TYPE_KEY).toUpperCase());
  String outputTableName = sourceEntity.getDestTableName();
  Extract extract = createExtract(tableType, nameSpaceName, outputTableName);

  List<WorkUnit> workUnits = Lists.newArrayList();
  int partitionNumber = state.getPropAsInt(SOURCE_MAX_NUMBER_OF_PARTITIONS, 1);
  List<BatchIdAndResultId> batchResultIds = resultFileIdsStruct.getBatchIdAndResultIdList();
  int total = batchResultIds.size();

  // size of every partition should be: math.ceil(total/partitionNumber), use simpler way: (total+partitionNumber-1)/partitionNumber
  int sizeOfPartition = (total + partitionNumber - 1) / partitionNumber;
  List<List<BatchIdAndResultId>> partitionedResultIds = Lists.partition(batchResultIds, sizeOfPartition);
  log.info("----partition strategy: max-parti={}, size={}, actual-parti={}, total={}", partitionNumber, sizeOfPartition, partitionedResultIds.size(), total);

  for (List<BatchIdAndResultId> resultIds : partitionedResultIds) {
    WorkUnit workunit = new WorkUnit(extract);
    String bulkJobId = resultFileIdsStruct.getJobId();
    workunit.setProp(PK_CHUNKING_JOB_ID, bulkJobId);
    String resultIdStr = resultIds.stream().map(x -> x.getBatchId() + ":" + x.getResultId()).collect(Collectors.joining(","));
    workunit.setProp(PK_CHUNKING_BATCH_RESULT_ID_PAIRS, resultIdStr);
    workunit.setProp(ConfigurationKeys.SOURCE_ENTITY, sourceEntity.getSourceEntityName());
    workunit.setProp(ConfigurationKeys.EXTRACT_TABLE_NAME_KEY, sourceEntity.getDestTableName());
    workunit.setProp(WORK_UNIT_STATE_VERSION_KEY, CURRENT_WORK_UNIT_STATE_VERSION);
    addLineageSourceInfo(state, sourceEntity, workunit);
    workUnits.add(workunit);
  }
  return workUnits;
}
 
Example 13
Source File: CollectionGuavaPartitionUnitTest.java    From tutorials with MIT License 5 votes vote down vote up
@Test
public final void givenListPartitioned_whenOriginalListIsModified_thenPartitionsChangeAsWell() {
    // Given
    final List<Integer> intList = Lists.newArrayList(1, 2, 3, 4, 5, 6, 7, 8);
    final List<List<Integer>> subSets = Lists.partition(intList, 3);

    // When
    intList.add(9);
    final List<Integer> lastPartition = subSets.get(2);
    final List<Integer> expectedLastPartition = Lists.<Integer> newArrayList(7, 8, 9);
    assertThat(lastPartition, equalTo(expectedLastPartition));
}
 
Example 14
Source File: JpaControllerManagement.java    From hawkbit with Eclipse Public License 1.0 5 votes vote down vote up
private Void updateLastTargetQueries(final String tenant, final List<TargetPoll> polls) {
    LOG.debug("Persist {} targetqueries.", polls.size());

    final List<List<String>> pollChunks = Lists.partition(
            polls.stream().map(TargetPoll::getControllerId).collect(Collectors.toList()),
            Constants.MAX_ENTRIES_IN_STATEMENT);

    pollChunks.forEach(chunk -> {
        setLastTargetQuery(tenant, System.currentTimeMillis(), chunk);
        chunk.forEach(controllerId -> afterCommit.afterCommit(() -> eventPublisherHolder.getEventPublisher()
                .publishEvent(new TargetPollEvent(controllerId, tenant, eventPublisherHolder.getApplicationId()))));
    });

    return null;
}
 
Example 15
Source File: SmsMessageSender.java    From dhis2-core with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
private OutboundMessageResponse sendMessage( String subject, String text, Set<String> recipients,
    SmsGatewayConfig gatewayConfig )
{
    OutboundMessageResponse status = null;

    for ( SmsGateway smsGateway : smsGateways )
    {
        if ( smsGateway.accept( gatewayConfig ) )
        {
            List<String> temp = new ArrayList<>( recipients );

            List<List<String>> slices = Lists.partition( temp, MAX_RECIPIENTS_ALLOWED );

            for ( List<String> to: slices )
            {
                log.info( "Sending SMS to " + to );

                status = smsGateway.send( subject, text, new HashSet<>( to ), gatewayConfig );

                handleResponse( status );
            }

            return status;
        }
    }

    return new OutboundMessageResponse( NO_CONFIG, GatewayResponse.NO_GATEWAY_CONFIGURATION, false );
}
 
Example 16
Source File: PartitionList.java    From levelup-java-examples with Apache License 2.0 5 votes vote down vote up
@Test
public void partition_list_guava() {

	List<List<String>> decisionsBy2 = Lists.partition(playerDecisions, 2);

	logger.info(decisionsBy2);

	assertThat(decisionsBy2.get(0), hasItems("Hit", "Stand"));
	assertThat(decisionsBy2.get(1), hasItems("Double down", "Split"));
	assertThat(decisionsBy2.get(2), hasItems("Surrender"));
}
 
Example 17
Source File: GetAllocationTokenCommand.java    From nomulus with Apache License 2.0 5 votes vote down vote up
@Override
public void run() {
  ImmutableMap.Builder<String, AllocationToken> builder = new ImmutableMap.Builder<>();
  for (List<String> tokens : Lists.partition(mainParameters, BATCH_SIZE)) {
    ImmutableList<Key<AllocationToken>> tokenKeys =
        tokens.stream().map(t -> Key.create(AllocationToken.class, t)).collect(toImmutableList());
    ofy().load().keys(tokenKeys).forEach((k, v) -> builder.put(k.getName(), v));
  }
  ImmutableMap<String, AllocationToken> loadedTokens = builder.build();
  ImmutableMap<Key<DomainBase>, DomainBase> domains = loadRedeemedDomains(loadedTokens.values());

  for (String token : mainParameters) {
    if (loadedTokens.containsKey(token)) {
      AllocationToken loadedToken = loadedTokens.get(token);
      System.out.println(loadedToken.toString());
      if (loadedToken.getRedemptionHistoryEntry() == null) {
        System.out.printf("Token %s was not redeemed.\n", token);
      } else {
        DomainBase domain =
            domains.get(loadedToken.getRedemptionHistoryEntry().<DomainBase>getParent());
        if (domain == null) {
          System.out.printf("ERROR: Token %s was redeemed but domain can't be loaded.\n", token);
        } else {
          System.out.printf(
              "Token %s was redeemed to create domain %s at %s.\n",
              token, domain.getDomainName(), domain.getCreationTime());
        }
      }
    } else {
      System.out.printf("ERROR: Token %s does not exist.\n", token);
    }
    System.out.println();
  }
}
 
Example 18
Source File: AudioMessageManager.java    From JuniperBot with GNU General Public License v3.0 4 votes vote down vote up
public void onQueue(PlaybackInstance instance, MessageChannel sourceChannel, BotContext context, int pageNum) {
    List<TrackRequest> requests = instance.getQueue();
    if (requests.isEmpty()) {
        onEmptyQueue(sourceChannel);
        return;
    }

    final int pageSize = 25;
    List<List<TrackRequest>> parts = Lists.partition(requests, pageSize);
    final int totalPages = parts.size();
    final int offset = (pageNum - 1) * pageSize + 1 + instance.getCursor();

    final long totalDuration = requests.stream()
            .filter(Objects::nonNull)
            .map(TrackRequest::getTrack)
            .filter(Objects::nonNull)
            .mapToLong(AudioTrack::getDuration).sum();

    if (pageNum > totalPages) {
        onQueueError(sourceChannel, "discord.command.audio.queue.list.totalPages", parts.size());
        return;
    }
    List<TrackRequest> pageRequests = parts.get(pageNum - 1);

    EmbedBuilder builder = getQueueMessage();
    if (instance.getCursor() > 0) {
        builder.setDescription(messageService.getMessage("discord.command.audio.queue.list.playlist.played",
                instance.getCursor(), commonProperties.getBranding().getWebsiteUrl(), instance.getPlaylistUuid()));
    } else {
        builder.setDescription(messageService.getMessage("discord.command.audio.queue.list.playlist",
                commonProperties.getBranding().getWebsiteUrl(), instance.getPlaylistUuid()));
    }

    addQueue(builder, instance, pageRequests, offset, false);

    String queueCommand = messageService.getMessageByLocale("discord.command.queue.key", context.getCommandLocale());

    builder.setFooter(totalPages > 1
            ? messageService.getMessage("discord.command.audio.queue.list.pageFooter",
            pageNum, totalPages, requests.size(), CommonUtils.formatDuration(totalDuration),
            context.getConfig().getPrefix(), queueCommand)
            : messageService.getMessage("discord.command.audio.queue.list.footer",
            requests.size(), CommonUtils.formatDuration(totalDuration)), null);
    messageService.sendMessageSilent(sourceChannel::sendMessage, builder.build());
}
 
Example 19
Source File: ClassifierTrainer.java    From EasySRL with Apache License 2.0 4 votes vote down vote up
@Override
public IterationResult getValues(final double[] featureWeights) {
	final Collection<Callable<IterationResult>> tasks = new ArrayList<>();

	int totalCorrect = 0;
	final int batchSize = trainingData.size() / numThreads;
	for (final List<CachedTrainingExample<L>> batch : Lists.partition(trainingData, batchSize)) {
		tasks.add(new Callable<IterationResult>() {
			@Override
			public IterationResult call() throws Exception {
				return lossFunction.getValues(featureWeights, batch);
			}
		});
	}

	final ExecutorService executor = Executors.newFixedThreadPool(numThreads);
	List<Future<IterationResult>> results;
	try {
		results = executor.invokeAll(tasks);

		// FunctionValues total = new FunctionValues(0.0, new
		// double[featureWeights.length]);

		final double[] totalGradient = new double[featureWeights.length];
		double totalLoss = 0.0;

		for (final Future<IterationResult> result : results) {
			final IterationResult values = result.get();
			totalLoss += values.functionValue;
			Util.add(totalGradient, values.gradient);
			totalCorrect += values.correctPredictions;
		}
		executor.shutdown(); // always reclaim resources

		System.out.println();

		System.out.println("Training accuracy=" + Util.twoDP(100.0 * totalCorrect / trainingData.size()));
		System.out.println("Loss=" + Util.twoDP(totalLoss));
		return new IterationResult(totalCorrect, totalLoss, totalGradient);

	} catch (InterruptedException | ExecutionException e) {
		throw new RuntimeException(e);
	}
}