Python google.appengine.api.taskqueue.TaskAlreadyExistsError() Examples

The following are code examples for showing how to use google.appengine.api.taskqueue.TaskAlreadyExistsError(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: mlab-ns   Author: m-lab   File: handlers.py    Apache License 2.0 6 votes vote down vote up
def schedule(cls, base_path, mapreduce_spec):
    """Schedule finalize task.

    Args:
      mapreduce_spec: mapreduce specification as MapreduceSpec.
    """
    task_name = mapreduce_spec.mapreduce_id + "-finalize"
    finalize_task = taskqueue.Task(
        name=task_name,
        url=base_path + "/finalizejob_callback",
        params={"mapreduce_id": mapreduce_spec.mapreduce_id})
    queue_name = os.environ.get("HTTP_X_APPENGINE_QUEUENAME", "default")
    if not _run_task_hook(mapreduce_spec.get_hooks(),
                          "enqueue_controller_task",
                          finalize_task,
                          queue_name):
      try:
        finalize_task.add(queue_name)
      except (taskqueue.TombstonedTaskError,
              taskqueue.TaskAlreadyExistsError), e:
        logging.warning("Task %r already exists. %s: %s",
                        task_name, e.__class__, e) 
Example 2
Project: luci-py   Author: luci   File: handlers.py    Apache License 2.0 6 votes vote down vote up
def schedule(cls, mapreduce_spec):
    """Schedule finalize task.

    Args:
      mapreduce_spec: mapreduce specification as MapreduceSpec.
    """
    task_name = mapreduce_spec.mapreduce_id + "-finalize"
    finalize_task = taskqueue.Task(
        name=task_name,
        url=(mapreduce_spec.params["base_path"] + "/finalizejob_callback/" +
             mapreduce_spec.mapreduce_id),
        params={"mapreduce_id": mapreduce_spec.mapreduce_id},
        headers=util._get_task_headers(mapreduce_spec.mapreduce_id))
    queue_name = util.get_queue_name(None)
    if not _run_task_hook(mapreduce_spec.get_hooks(),
                          "enqueue_controller_task",
                          finalize_task,
                          queue_name):
      try:
        finalize_task.add(queue_name)
      except (taskqueue.TombstonedTaskError,
              taskqueue.TaskAlreadyExistsError), e:
        logging.warning("Task %r already exists. %s: %s",
                        task_name, e.__class__, e) 
Example 3
Project: luci-py   Author: luci   File: handlers.py    Apache License 2.0 6 votes vote down vote up
def schedule(cls, mapreduce_spec):
    """Schedule finalize task.

    Args:
      mapreduce_spec: mapreduce specification as MapreduceSpec.
    """
    task_name = mapreduce_spec.mapreduce_id + "-finalize"
    finalize_task = taskqueue.Task(
        name=task_name,
        url=(mapreduce_spec.params["base_path"] + "/finalizejob_callback/" +
             mapreduce_spec.mapreduce_id),
        params={"mapreduce_id": mapreduce_spec.mapreduce_id},
        headers=util._get_task_headers(mapreduce_spec.mapreduce_id))
    queue_name = util.get_queue_name(None)
    if not _run_task_hook(mapreduce_spec.get_hooks(),
                          "enqueue_controller_task",
                          finalize_task,
                          queue_name):
      try:
        finalize_task.add(queue_name)
      except (taskqueue.TombstonedTaskError,
              taskqueue.TaskAlreadyExistsError), e:
        logging.warning("Task %r already exists. %s: %s",
                        task_name, e.__class__, e) 
Example 4
Project: python-compat-runtime   Author: GoogleCloudPlatform   File: handlers.py    Apache License 2.0 6 votes vote down vote up
def schedule(cls, mapreduce_spec):
    """Schedule finalize task.

    Args:
      mapreduce_spec: mapreduce specification as MapreduceSpec.
    """
    task_name = mapreduce_spec.mapreduce_id + "-finalize"
    finalize_task = taskqueue.Task(
        name=task_name,
        url=(mapreduce_spec.params["base_path"] + "/finalizejob_callback/" +
             mapreduce_spec.mapreduce_id),
        params={"mapreduce_id": mapreduce_spec.mapreduce_id},
        headers=util._get_task_headers(mapreduce_spec.mapreduce_id))
    queue_name = util.get_queue_name(None)
    if not _run_task_hook(mapreduce_spec.get_hooks(),
                          "enqueue_controller_task",
                          finalize_task,
                          queue_name,
                          transactional=False):
      try:
        finalize_task.add(queue_name)
      except (taskqueue.TombstonedTaskError,
              taskqueue.TaskAlreadyExistsError), e:
        logging.warning("Task %r already exists. %s: %s",
                        task_name, e.__class__, e) 
Example 5
Project: mlab-ns   Author: m-lab   File: handlers.py    Apache License 2.0 5 votes vote down vote up
def reschedule(cls,
                 mapreduce_state,
                 base_path,
                 mapreduce_spec,
                 serial_id,
                 queue_name=None):
    """Schedule new update status callback task.

    Args:
      mapreduce_state: mapreduce state as model.MapreduceState
      base_path: mapreduce handlers url base path as string.
      mapreduce_spec: mapreduce specification as MapreduceSpec.
      serial_id: id of the invocation as int.
      queue_name: The queue to schedule this task on. Will use the current
        queue of execution if not supplied.
    """
    task_name = ControllerCallbackHandler.get_task_name(
        mapreduce_spec, serial_id)
    task_params = ControllerCallbackHandler.controller_parameters(
        mapreduce_spec, serial_id)
    if not queue_name:
      queue_name = os.environ.get("HTTP_X_APPENGINE_QUEUENAME", "default")

    controller_callback_task = util.HugeTask(
        url=base_path + "/controller_callback",
        name=task_name, params=task_params,
        countdown=_CONTROLLER_PERIOD_SEC)

    if not _run_task_hook(mapreduce_spec.get_hooks(),
                          "enqueue_controller_task",
                          controller_callback_task,
                          queue_name):
      try:
        controller_callback_task.add(queue_name, parent=mapreduce_state)
      except (taskqueue.TombstonedTaskError,
              taskqueue.TaskAlreadyExistsError), e:
        logging.warning("Task %r with params %r already exists. %s: %s",
                        task_name, task_params, e.__class__, e) 
Example 6
Project: mlab-ns   Author: m-lab   File: common.py    Apache License 2.0 5 votes vote down vote up
def run(self, seconds=None):
    task = self.get_callback_task(
        countdown=seconds,
        name='ae-pipeline-delay-' + self.pipeline_id)
    try:
      task.add(self.queue_name)
    except (taskqueue.TombstonedTaskError, taskqueue.TaskAlreadyExistsError):
      pass 
Example 7
Project: mlab-ns   Author: m-lab   File: pipeline.py    Apache License 2.0 5 votes vote down vote up
def post(self):
    if 'HTTP_X_APPENGINE_TASKNAME' not in self.request.environ:
      self.response.set_status(403)
      return

    context = _PipelineContext.from_environ(self.request.environ)

    # Set of stringified db.Keys of children to run.
    all_pipeline_keys = set()

    # For backwards compatibility with the old style of fan-out requests.
    all_pipeline_keys.update(self.request.get_all('pipeline_key'))

    # Fetch the child pipelines from the parent. This works around the 10KB
    # task payload limit. This get() is consistent-on-read and the fan-out
    # task is enqueued in the transaction that updates the parent, so the
    # fanned_out property is consistent here.
    parent_key = self.request.get('parent_key')
    child_indexes = [int(x) for x in self.request.get_all('child_indexes')]
    if parent_key:
      parent_key = db.Key(parent_key)
      parent = db.get(parent_key)
      for index in child_indexes:
        all_pipeline_keys.add(str(parent.fanned_out[index]))

    all_tasks = []
    for pipeline_key in all_pipeline_keys:
      all_tasks.append(taskqueue.Task(
          url=context.pipeline_handler_path,
          params=dict(pipeline_key=pipeline_key),
          headers={'X-Ae-Pipeline-Key': pipeline_key},
          name='ae-pipeline-fan-out-' + db.Key(pipeline_key).name()))

    batch_size = 100  # Limit of taskqueue API bulk add.
    for i in xrange(0, len(all_tasks), batch_size):
      batch = all_tasks[i:i+batch_size]
      try:
        taskqueue.Queue(context.queue_name).add(batch)
      except (taskqueue.TombstonedTaskError, taskqueue.TaskAlreadyExistsError):
        pass 
Example 8
Project: TimeSync   Author: DanForever   File: base.py    Apache License 2.0 5 votes vote down vote up
def UpdateSubscription( self, sub ):
		pebbleToken = sub.key().name()
		
		logging.debug( "UpdateSubscription(): " + str( pebbleToken ) )
		
		# Grab the URL for updating the user's agenda
		url = self.GetUrl \
		(
			"mainhandler",
			{
				'handler'	: "tvshowtime",
				'branch'	: "agenda",
				'action'	: "update"
			},
			full = False,
			scheme = None
		)
		
		# Use this to make the name of the task vaguely unique, assuming we don't
		# want to allow the task to execute more frequently than once a minute
		nameDate = datetime.utcnow().strftime( "Date%Y-%m-%dT%H-%M-" )
		
		config = \
		{
			'name' : "TVST-Sub-" + nameDate + str( pebbleToken ),
			'url' : url,
			'method' : "POST",
			'headers' :
			{
				'X-User-Token' : pebbleToken
			}
		}
		
		logging.debug( "Creating Task: " + str( config ) )
		
		try:
			taskqueue.add( **config )
		except ( taskqueue.TaskAlreadyExistsError, taskqueue.TombstonedTaskError ) as e:
			logging.warning( "Couldn't create task due to name conflict with previously created task: " + str( e ) )
			return ( requests.codes.too_many_requests, { 'status' : "Too soon" } )
		return ( requests.codes.ok, { 'status' : "success" } ) 
Example 9
Project: luci-py   Author: luci   File: common.py    Apache License 2.0 5 votes vote down vote up
def run(self, seconds=None):
    task = self.get_callback_task(
        countdown=seconds,
        name='ae-pipeline-delay-' + self.pipeline_id)
    try:
      task.add(self.queue_name)
    except (taskqueue.TombstonedTaskError, taskqueue.TaskAlreadyExistsError):
      pass 
Example 10
Project: luci-py   Author: luci   File: handlers.py    Apache License 2.0 5 votes vote down vote up
def _add_task(cls,
                worker_task,
                mapreduce_spec,
                queue_name):
    """Schedule slice scanning by adding it to the task queue.

    Args:
      worker_task: a model.HugeTask task for slice. This is NOT a taskqueue
        task.
      mapreduce_spec: an instance of model.MapreduceSpec.
      queue_name: Optional queue to run on; uses the current queue of
        execution or the default queue if unspecified.
    """
    if not _run_task_hook(mapreduce_spec.get_hooks(),
                          "enqueue_worker_task",
                          worker_task,
                          queue_name):
      try:
        # Not adding transactionally because worker_task has name.
        # Named task is not allowed for transactional add.
        worker_task.add(queue_name)
      except (taskqueue.TombstonedTaskError,
              taskqueue.TaskAlreadyExistsError), e:
        logging.warning("Task %r already exists. %s: %s",
                        worker_task.name,
                        e.__class__,
                        e) 
Example 11
Project: luci-py   Author: luci   File: handlers.py    Apache License 2.0 5 votes vote down vote up
def reschedule(cls,
                 mapreduce_state,
                 mapreduce_spec,
                 serial_id,
                 queue_name=None):
    """Schedule new update status callback task.

    Args:
      mapreduce_state: mapreduce state as model.MapreduceState
      mapreduce_spec: mapreduce specification as MapreduceSpec.
      serial_id: id of the invocation as int.
      queue_name: The queue to schedule this task on. Will use the current
        queue of execution if not supplied.
    """
    task_name = ControllerCallbackHandler.get_task_name(
        mapreduce_spec, serial_id)
    task_params = ControllerCallbackHandler.controller_parameters(
        mapreduce_spec, serial_id)
    if not queue_name:
      queue_name = os.environ.get("HTTP_X_APPENGINE_QUEUENAME", "default")

    controller_callback_task = model.HugeTask(
        url=(mapreduce_spec.params["base_path"] + "/controller_callback/" +
             mapreduce_spec.mapreduce_id),
        name=task_name, params=task_params,
        countdown=parameters.config._CONTROLLER_PERIOD_SEC,
        parent=mapreduce_state,
        headers=util._get_task_headers(mapreduce_spec.mapreduce_id))

    if not _run_task_hook(mapreduce_spec.get_hooks(),
                          "enqueue_controller_task",
                          controller_callback_task,
                          queue_name):
      try:
        controller_callback_task.add(queue_name)
      except (taskqueue.TombstonedTaskError,
              taskqueue.TaskAlreadyExistsError), e:
        logging.warning("Task %r with params %r already exists. %s: %s",
                        task_name, task_params, e.__class__, e) 
Example 12
Project: luci-py   Author: luci   File: handlers.py    Apache License 2.0 5 votes vote down vote up
def _add_task(cls,
                worker_task,
                mapreduce_spec,
                queue_name):
    """Schedule slice scanning by adding it to the task queue.

    Args:
      worker_task: a model.HugeTask task for slice. This is NOT a taskqueue
        task.
      mapreduce_spec: an instance of model.MapreduceSpec.
      queue_name: Optional queue to run on; uses the current queue of
        execution or the default queue if unspecified.
    """
    if not _run_task_hook(mapreduce_spec.get_hooks(),
                          "enqueue_worker_task",
                          worker_task,
                          queue_name):
      try:
        # Not adding transactionally because worker_task has name.
        # Named task is not allowed for transactional add.
        worker_task.add(queue_name)
      except (taskqueue.TombstonedTaskError,
              taskqueue.TaskAlreadyExistsError), e:
        logging.warning("Task %r already exists. %s: %s",
                        worker_task.name,
                        e.__class__,
                        e) 
Example 13
Project: luci-py   Author: luci   File: handlers.py    Apache License 2.0 5 votes vote down vote up
def reschedule(cls,
                 mapreduce_state,
                 mapreduce_spec,
                 serial_id,
                 queue_name=None):
    """Schedule new update status callback task.

    Args:
      mapreduce_state: mapreduce state as model.MapreduceState
      mapreduce_spec: mapreduce specification as MapreduceSpec.
      serial_id: id of the invocation as int.
      queue_name: The queue to schedule this task on. Will use the current
        queue of execution if not supplied.
    """
    task_name = ControllerCallbackHandler.get_task_name(
        mapreduce_spec, serial_id)
    task_params = ControllerCallbackHandler.controller_parameters(
        mapreduce_spec, serial_id)
    if not queue_name:
      queue_name = os.environ.get("HTTP_X_APPENGINE_QUEUENAME", "default")

    controller_callback_task = model.HugeTask(
        url=(mapreduce_spec.params["base_path"] + "/controller_callback/" +
             mapreduce_spec.mapreduce_id),
        name=task_name, params=task_params,
        countdown=parameters.config._CONTROLLER_PERIOD_SEC,
        parent=mapreduce_state,
        headers=util._get_task_headers(mapreduce_spec.mapreduce_id))

    if not _run_task_hook(mapreduce_spec.get_hooks(),
                          "enqueue_controller_task",
                          controller_callback_task,
                          queue_name):
      try:
        controller_callback_task.add(queue_name)
      except (taskqueue.TombstonedTaskError,
              taskqueue.TaskAlreadyExistsError), e:
        logging.warning("Task %r with params %r already exists. %s: %s",
                        task_name, task_params, e.__class__, e) 
Example 14
Project: python-compat-runtime   Author: GoogleCloudPlatform   File: handlers.py    Apache License 2.0 5 votes vote down vote up
def _add_task(cls,
                worker_task,
                mapreduce_spec,
                queue_name,
                transactional=False):
    """Schedule slice scanning by adding it to the task queue.

    Args:
      worker_task: a model.HugeTask task for slice. This is NOT a taskqueue
        task.
      mapreduce_spec: an instance of model.MapreduceSpec.
      queue_name: Optional queue to run on; uses the current queue of
        execution or the default queue if unspecified.
      transactional: If the task should be part of an existing transaction.
    """
    if not _run_task_hook(mapreduce_spec.get_hooks(),
                          "enqueue_worker_task",
                          worker_task,
                          queue_name,
                          transactional=transactional):
      try:
        worker_task.add(queue_name, transactional=transactional)
      except (taskqueue.TombstonedTaskError,
              taskqueue.TaskAlreadyExistsError), e:
        logging.warning("Task %r already exists. %s: %s",
                        worker_task.name,
                        e.__class__,
                        e) 
Example 15
Project: python-compat-runtime   Author: GoogleCloudPlatform   File: handlers.py    Apache License 2.0 5 votes vote down vote up
def reschedule(cls,
                 mapreduce_state,
                 mapreduce_spec,
                 serial_id,
                 queue_name=None):
    """Schedule new update status callback task.

    Args:
      mapreduce_state: mapreduce state as model.MapreduceState
      mapreduce_spec: mapreduce specification as MapreduceSpec.
      serial_id: id of the invocation as int.
      queue_name: The queue to schedule this task on. Will use the current
        queue of execution if not supplied.
    """

    task_name = ControllerCallbackHandler.get_task_name(
        mapreduce_spec, serial_id)
    task_params = ControllerCallbackHandler.controller_parameters(
        mapreduce_spec, serial_id)
    if not queue_name:
      queue_name = os.environ.get("HTTP_X_APPENGINE_QUEUENAME", "default")

    controller_callback_task = model.HugeTask(
        url=(mapreduce_spec.params["base_path"] + "/controller_callback/" +
             mapreduce_spec.mapreduce_id),
        name=task_name, params=task_params,
        countdown=parameters.config._CONTROLLER_PERIOD_SEC,
        parent=mapreduce_state,
        headers=util._get_task_headers(mapreduce_spec.mapreduce_id))

    if not _run_task_hook(mapreduce_spec.get_hooks(),
                          "enqueue_controller_task",
                          controller_callback_task,
                          queue_name,
                          transactional=False):
      try:
        controller_callback_task.add(queue_name)
      except (taskqueue.TombstonedTaskError,
              taskqueue.TaskAlreadyExistsError), e:
        logging.warning("Task %r with params %r already exists. %s: %s",
                        task_name, task_params, e.__class__, e) 
Example 16
Project: mlab-ns   Author: m-lab   File: handlers.py    Apache License 2.0 4 votes vote down vote up
def _schedule_slice(cls,
                      shard_state,
                      transient_shard_state,
                      queue_name=None,
                      eta=None,
                      countdown=None):
    """Schedule slice scanning by adding it to the task queue.

    Args:
      shard_state: An instance of ShardState.
      transient_shard_state: An instance of TransientShardState.
      queue_name: Optional queue to run on; uses the current queue of
        execution or the default queue if unspecified.
      eta: Absolute time when the MR should execute. May not be specified
        if 'countdown' is also supplied. This may be timezone-aware or
        timezone-naive.
      countdown: Time in seconds into the future that this MR should execute.
        Defaults to zero.
    """
    base_path = transient_shard_state.base_path
    mapreduce_spec = transient_shard_state.mapreduce_spec

    task_name = MapperWorkerCallbackHandler.get_task_name(
        transient_shard_state.shard_id,
        transient_shard_state.slice_id)
    queue_name = queue_name or os.environ.get("HTTP_X_APPENGINE_QUEUENAME",
                                              "default")

    worker_task = util.HugeTask(url=base_path + "/worker_callback",
                                params=transient_shard_state.to_dict(),
                                name=task_name,
                                eta=eta,
                                countdown=countdown)

    if not _run_task_hook(mapreduce_spec.get_hooks(),
                          "enqueue_worker_task",
                          worker_task,
                          queue_name):
      try:
        worker_task.add(queue_name, parent=shard_state)
      except (taskqueue.TombstonedTaskError,
              taskqueue.TaskAlreadyExistsError), e:
        logging.warning("Task %r with params %r already exists. %s: %s",
                        task_name,
                        transient_shard_state.to_dict(),
                        e.__class__,
                        e) 
Example 17
Project: naziscore   Author: rbanffy   File: scoring.py    GNU Affero General Public License v3.0 4 votes vote down vote up
def get_score_by_screen_name(screen_name, depth):
    # Gets the most recently updated copy, if duplicated.
    key_name = (
        '.' + screen_name if screen_name.startswith('__') else screen_name)
    try:
        score = yield ndb.Key(Score, key_name).get_async()
    except OverQuotaError:
        logging.critical('We are over quota.')
        raise ndb.Return(None)
    if score is None or (
            score.last_updated < datetime.datetime.now()
            - datetime.timedelta(days=MAX_AGE_DAYS)):
            # If we don't have one, or if we have one that's too old, we need
            # to calculate one.
        if score is not None:
            logging.info('Refreshing {}'.format(screen_name))
        else:
            logging.info('Fetching {} for the first time'.format(screen_name))
        task_name = '{}_{}'.format(
                screen_name,
                os.environ['CURRENT_VERSION_ID'].split('.')[0])
        queue_name = 'scoring-direct' if depth == 0 else 'scoring-indirect'
        try:
            _ = yield taskqueue.Task(
                name=task_name,
                params={
                    'screen_name': screen_name,
                    'depth': depth
                }).add_async(queue_name)

            # If this is a direct query, schedule an analysis of the profile
            # picture.
            if depth == 0:
                _ = yield taskqueue.Task(
                    name=task_name,
                    params={
                        'screen_name': screen_name,
                    }).add_async('profile-pic')

            # If we add it to the scoring-direct queue, we should remove
            # the corresponding task from the scoring-indirect queue at this
            # point.
            if queue_name == 'scoring-direct':
                delete_from_scoring_indirect(task_name)

        except taskqueue.TaskAlreadyExistsError:
            # We already are going to check this person. There is nothing
            # to do here.
            logging.warning(
                'Fetch for {} already scheduled on queue {}'.format(
                    task_name, queue_name))

        except taskqueue.TombstonedTaskError:
            # This task is too recent. We shouldn't try again so
            # soon. Thombstoning won't happen across different deploys, as the
            # task name has the deploy timestamp on it.
            logging.warning('Fetch for {} tombstoned'.format(task_name))
    else:
        logging.info('No need to refresh {}'.format(screen_name))

    raise ndb.Return(score) 
Example 18
Project: naziscore   Author: rbanffy   File: scoring.py    GNU Affero General Public License v3.0 4 votes vote down vote up
def get_score_by_twitter_id(twitter_id, depth):
    try:
        score = yield Score.query(Score.twitter_id == twitter_id).get_async()
    except OverQuotaError:
        logging.critical(
            'Over quota fetching {}'.format(twitter_id))
        raise ndb.Return(None)
    if score is None or (
            score.last_updated < datetime.datetime.now()
            - datetime.timedelta(days=MAX_AGE_DAYS)):
            # If we don't have one, or if we have one that's too old, we need
            # to calculate one.
        task_name = '{}_{}'.format(
                twitter_id,
                os.environ['CURRENT_VERSION_ID'].split('.')[0])
        queue_name = 'scoring-direct' if depth == 0 else 'scoring-indirect'
        try:
            _ = yield taskqueue.Task(
                name=task_name,
                params={
                    'twitter_id': twitter_id,
                    'depth': depth
                }).add_async(queue_name)

            # If this is a direct query, schedule an analysis of the profile
            # picture.
            if depth == 0:
                _ = yield taskqueue.Task(
                    name=task_name,
                    params={
                        'twitter_id': twitter_id,
                    }).add_async('profile-pic')

            # If we add it to the scoring-direct queue, we should remove
            # the corresponding task from the scoring-indirect queue at this
            # point.
            if queue_name == 'scoring-direct':
                delete_from_scoring_indirect(task_name)

        except taskqueue.TaskAlreadyExistsError:
            # We already are going to check this person. There is nothing
            # to do here.
            logging.warning(
                'Fetch for {} already scheduled on queue {}'.format(
                    task_name, queue_name))

        except taskqueue.TombstonedTaskError:
            # This task is too recent. We shouldn't try again so
            # soon. Thombstoning won't happen across different deploys, as the
            # task name has the deploy timestamp on it.
            logging.warning('Fetch for {} tombstoned'.format(task_name))
        raise ndb.Return(score)
    else:
        raise ndb.Return(score) 
Example 19
Project: luci-py   Author: luci   File: pipeline.py    Apache License 2.0 4 votes vote down vote up
def post(self):
    if 'HTTP_X_APPENGINE_TASKNAME' not in self.request.environ:
      self.response.set_status(403)
      return

    context = _PipelineContext.from_environ(self.request.environ)

    # Set of stringified db.Keys of children to run.
    all_pipeline_keys = set()

    # For backwards compatibility with the old style of fan-out requests.
    all_pipeline_keys.update(self.request.get_all('pipeline_key'))

    # Fetch the child pipelines from the parent. This works around the 10KB
    # task payload limit. This get() is consistent-on-read and the fan-out
    # task is enqueued in the transaction that updates the parent, so the
    # fanned_out property is consistent here.
    parent_key = self.request.get('parent_key')
    child_indexes = [int(x) for x in self.request.get_all('child_indexes')]
    if parent_key:
      parent_key = db.Key(parent_key)
      parent = db.get(parent_key)
      for index in child_indexes:
        all_pipeline_keys.add(str(parent.fanned_out[index]))

    all_tasks = []
    all_pipelines = db.get([db.Key(pipeline_key) for pipeline_key in all_pipeline_keys])
    for child_pipeline in all_pipelines:
      if child_pipeline is None:
        continue
      pipeline_key = str(child_pipeline.key())
      all_tasks.append(taskqueue.Task(
          url=context.pipeline_handler_path,
          params=dict(pipeline_key=pipeline_key),
          target=child_pipeline.params.get('target'),
          headers={'X-Ae-Pipeline-Key': pipeline_key},
          name='ae-pipeline-fan-out-' + child_pipeline.key().name()))

    batch_size = 100  # Limit of taskqueue API bulk add.
    for i in xrange(0, len(all_tasks), batch_size):
      batch = all_tasks[i:i+batch_size]
      try:
        taskqueue.Queue(context.queue_name).add(batch)
      except (taskqueue.TombstonedTaskError, taskqueue.TaskAlreadyExistsError):
        pass 
Example 20
Project: luci-py   Author: luci   File: pipeline.py    Apache License 2.0 4 votes vote down vote up
def post(self):
    if 'HTTP_X_APPENGINE_TASKNAME' not in self.request.environ:
      self.response.set_status(403)
      return

    context = _PipelineContext.from_environ(self.request.environ)

    # Set of stringified db.Keys of children to run.
    all_pipeline_keys = set()

    # For backwards compatibility with the old style of fan-out requests.
    all_pipeline_keys.update(self.request.get_all('pipeline_key'))

    # Fetch the child pipelines from the parent. This works around the 10KB
    # task payload limit. This get() is consistent-on-read and the fan-out
    # task is enqueued in the transaction that updates the parent, so the
    # fanned_out property is consistent here.
    parent_key = self.request.get('parent_key')
    child_indexes = [int(x) for x in self.request.get_all('child_indexes')]
    if parent_key:
      parent_key = db.Key(parent_key)
      parent = db.get(parent_key)
      for index in child_indexes:
        all_pipeline_keys.add(str(parent.fanned_out[index]))

    all_tasks = []
    all_pipelines = db.get([db.Key(pipeline_key) for pipeline_key in all_pipeline_keys])
    for child_pipeline in all_pipelines:
      if child_pipeline is None:
        continue
      pipeline_key = str(child_pipeline.key())
      all_tasks.append(taskqueue.Task(
          url=context.pipeline_handler_path,
          params=dict(pipeline_key=pipeline_key),
          target=child_pipeline.params.get('target'),
          headers={'X-Ae-Pipeline-Key': pipeline_key},
          name='ae-pipeline-fan-out-' + child_pipeline.key().name()))

    batch_size = 100  # Limit of taskqueue API bulk add.
    for i in xrange(0, len(all_tasks), batch_size):
      batch = all_tasks[i:i+batch_size]
      try:
        taskqueue.Queue(context.queue_name).add(batch)
      except (taskqueue.TombstonedTaskError, taskqueue.TaskAlreadyExistsError):
        pass