Python celery.chord() Examples
The following are 9
code examples of celery.chord().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
celery
, or try the search function
.
Example #1
Source File: tasks.py From Inboxen with GNU Affero General Public License v3.0 | 7 votes |
def delete_account(user_id): # first we need to make sure the user can't login user = get_user_model().objects.get(id=user_id) user.set_unusable_password() user.is_active = False user.save() # get ready to delete all inboxes inboxes = user.inbox_set.only('id') if len(inboxes): # pull in all the data delete = chord([disown_inbox.s(inbox.id) for inbox in inboxes], finish_delete_user.s(user_id)) delete.apply_async() else: finish_delete_user.apply_async(args=[None, user_id]) log.info("Deletion tasks for %s sent off", user.username)
Example #2
Source File: tasks.py From django-pushy with MIT License | 6 votes |
def create_push_notification_groups(notification): devices = get_filtered_devices_queryset(notification) date_started = timezone.now() if devices.count() > 0: count = devices.count() limit = getattr(settings, 'PUSHY_DEVICE_KEY_LIMIT', 1000) celery.chord( send_push_notification_group.s(notification, offset, limit) for offset in range(0, count, limit) )(notify_push_notification_sent.si(notification)) if not notification['id']: return try: notification = PushNotification.objects.get(pk=notification['id']) notification.sent = PushNotification.PUSH_IN_PROGRESS notification.date_started = date_started notification.save() except PushNotification.DoesNotExist: return
Example #3
Source File: util.py From SMART with MIT License | 5 votes |
def upload_data(form_data, project, queue=None, irr_queue=None, batch_size=30): """Perform data upload given validated form_data. 1. Add data to database 2. If new project then fill queue (only new project will pass queue object) 3. Save the uploaded data file 4. Create tf_idf file 5. Check and Trigger model """ new_df = add_data(project, form_data) if queue: fill_queue(queue=queue, irr_queue=irr_queue, orderby='random', irr_percent=project.percentage_irr, batch_size=batch_size) # Since User can upload Labeled Data and this data is added to current training_set # we need to check_and_trigger model. However since training model requires # tf_idf to be created we must create a chord which garuntees that tfidf # creation task is completed before check and trigger model task if len(new_df) > 0: save_data_file(new_df, project.pk) if project.classifier is not None: transaction.on_commit( lambda: chord( tasks.send_tfidf_creation_task.s(project.pk), tasks.send_check_and_trigger_model_task.si(project.pk) ).apply_async() )
Example #4
Source File: mytaskflows.py From cumulus with Apache License 2.0 | 5 votes |
def task4(task, *args, **kwargs): task.taskflow.logger.debug('task4') time.sleep(2) header = [task5.s() for i in range(10)] chord(header)(task6.s())
Example #5
Source File: task.py From falsy with MIT License | 5 votes |
def loads(payload): if payload.get('type') != 'normal': raise Exception('celery task loader only support normal mode') tasks = payload.get('tasks', []) cts = [] for task in tasks: ops = [load(id, task.get('args'), task.get('on_error')) if i == 0 else load(id, None, task.get('on_error')) for i, id in enumerate(task['ids'])] cts.append(chain(ops)) callback = payload.get('callback') if callback: return chord(header=group(cts), body=func.load(callback).s()) return group(cts)
Example #6
Source File: worker.py From docassemble with MIT License | 5 votes |
def convert(obj): return result_from_tuple(obj.as_tuple(), app=workerapp) # def async_ocr(*pargs, **kwargs): # sys.stderr.write("async_ocr started in worker\n") # if worker_controller is None: # initialize_db() # collector = ocr_finalize.s() # todo = list() # for item in worker_controller.ocr.ocr_page_tasks(*pargs, **kwargs): # todo.append(ocr_page.s(**item)) # the_chord = chord(todo)(collector) # sys.stderr.write("async_ocr finished in worker\n") # return the_chord
Example #7
Source File: tasks.py From toptal-blog-celery-toy-ex with MIT License | 5 votes |
def produce_hot_repo_report(period, ref_date=None): # 1. parse date ref_date_str = strf_date(period, ref_date=ref_date) # 2. fetch and join fetch_jobs = group([ fetch_hot_repos.s(ref_date_str, 100, 1), fetch_hot_repos.s(ref_date_str, 100, 2), fetch_hot_repos.s(ref_date_str, 100, 3), fetch_hot_repos.s(ref_date_str, 100, 4), fetch_hot_repos.s(ref_date_str, 100, 5) ]) # 3. group by language and # 4. create csv return chord(fetch_jobs)(build_report_task.s(ref_date_str)).get()
Example #8
Source File: tasks.py From figures with MIT License | 4 votes |
def experimental_populate_daily_metrics(date_for=None, force_update=False): '''Experimental task to populate daily metrics WARNING: In Ginkgo devstack, this task tends to gets stuck in the middle of processing course metrics. Not all the courses get processed and the site metrics doesn't get called. We're keeping it in the tasks so that we can continue to debug this. Enabling parallel course tasks will improve the pipeline performance ''' def include_course(course_overview, threshold=50): '''This function let's us skip over courses with many enrollments, speeding up testing. Do not use for production ''' count = CourseEnrollment.objects.filter(course_id=course_overview.id).count() return False if count > threshold else True if date_for: date_for = as_date(date_for) else: date_for = datetime.datetime.utcnow().replace(tzinfo=utc).date() date_for = date_for.strftime("%Y-%m-%d") logger.info( 'Starting task "figures.experimental_populate_daily_metrics" for date "{}"'.format( date_for)) courses = CourseOverview.objects.all() cdm_tasks = [ populate_single_cdm.s( course_id=unicode(course.id), # noqa: F821 date_for=date_for, force_update=force_update) for course in courses if include_course(course) ] results = chord(cdm_tasks)(populate_site_daily_metrics.s( date_for=date_for, force_update=force_update)) # TODO: Are we going to update the SDM for the day if # * course records were created, meaning there are data not added to the SDM # * the SDM record already exists # * force_update is not true logger.info( 'Finished task "figures.experimental_populate_daily_metrics" for date "{}"'.format( date_for)) return results # # Monthly Metrics #
Example #9
Source File: tasks.py From Inboxen with GNU Affero General Public License v3.0 | 4 votes |
def liberate(user_id, options): """ Get set for liberation, expects User object """ options['user'] = user_id user = get_user_model().objects.get(id=user_id) lib_status = user.liberation tar_type = TAR_TYPES[options.get('compression_type', '0')] rstr = get_random_string(7, string.ascii_letters) username = user.username + rstr username = username.encode("utf-8") basename = "%s_%s_%s_%s" % (time.time(), os.getpid(), rstr, hashlib.sha256(username).hexdigest()[:50]) path = os.path.join(settings.SENDFILE_ROOT, basename) tarname = "%s.%s" % (basename, tar_type["ext"]) # Is this safe enough? try: os.mkdir(path, 0o700) except (IOError, OSError) as error: log.info("Couldn't create dir at %s", path) raise liberate.retry(exc=error) try: lib_status.path = tarname lib_status.save() except IntegrityError: os.rmdir(path) raise options["path"] = path options["tarname"] = tarname mail_path = os.path.join(path, 'emails') # make maildir mailbox.Maildir(mail_path, factory=None) inbox_tasks = [liberate_inbox.s(mail_path, inbox.id) for inbox in Inbox.objects.filter(user=user, deleted=False).only('id').iterator()] if len(inbox_tasks) > 0: tasks = chord( inbox_tasks, liberate_collect_emails.s(mail_path, options) ) else: options["noEmails"] = True data = {"results": []} tasks = chain( liberate_fetch_info.s(data, options), liberate_tarball.s(options), liberation_finish.s(options) ) async_result = tasks.apply_async() lib_status.async_result = async_result.id lib_status.save()