Python google.appengine.api.taskqueue.Queue() Examples

The following are code examples for showing how to use google.appengine.api.taskqueue.Queue(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: mlab-ns   Author: m-lab   File: pipeline.py    Apache License 2.0 6 votes vote down vote up
def cleanup(self):
    """Clean up this Pipeline and all Datastore records used for coordination.

    Only works when called on a root pipeline. Child pipelines will ignore
    calls to this method.

    After this method is called, Pipeline.from_id() and related status
    methods will return inconsistent or missing results. This method is
    fire-and-forget and asynchronous.
    """
    if self._root_pipeline_key is None:
      raise UnexpectedPipelineError(
          'Could not cleanup Pipeline with unknown root pipeline ID.')
    if not self.is_root:
      return
    task = taskqueue.Task(
        params=dict(root_pipeline_key=self._root_pipeline_key),
        url=self.base_path + '/cleanup',
        headers={'X-Ae-Pipeline-Key': self._root_pipeline_key})
    taskqueue.Queue(self.queue_name).add(task) 
Example 2
Project: stratego.io   Author: benletchford   File: api.py    MIT License 6 votes vote down vote up
def post(self):
        if not _array_has_values(self.request.arguments(), ['board', 'socket_id']):
            self.response.set_status(STATUS_CODES.INTERNAL_ERROR)
            return

        board = self.request.get('board')
        socket_id = self.request.get('socket_id')

        params = {
            'setup': board,
            'socket_id': socket_id
        }

        q = taskqueue.Queue('pool')
        q.add(
            taskqueue.Task(url='/api/pool/process', params=params, method='post'))

        self.response.set_status(200) 
Example 3
Project: python-docs-samples   Author: GoogleCloudPlatform   File: main.py    Apache License 2.0 6 votes vote down vote up
def get(self):
        """Indefinitely fetch tasks and update the datastore."""
        queue = taskqueue.Queue('pullq')
        while True:
            try:
                tasks = queue.lease_tasks_by_tag(3600, 1000, deadline=60)
            except (taskqueue.TransientError,
                    apiproxy_errors.DeadlineExceededError) as e:
                logging.exception(e)
                time.sleep(1)
                continue

            if tasks:
                key = tasks[0].tag

                try:
                    update_counter(key, tasks)
                except Exception as e:
                    logging.exception(e)
                    raise
                finally:
                    queue.delete_tasks(tasks)

            time.sleep(1) 
Example 4
Project: python-docs-samples   Author: GoogleCloudPlatform   File: application.py    Apache License 2.0 6 votes vote down vote up
def post(self):
        amount = int(self.request.get('amount'))

        queue = taskqueue.Queue(name='default')
        task = taskqueue.Task(
            url='/update_counter',
            target='worker',
            params={'amount': amount})

        rpc = queue.add_async(task)

        # Wait for the rpc to complete and return the queued task.
        task = rpc.get_result()

        self.response.write(
            'Task {} enqueued, ETA {}.'.format(task.name, task.eta)) 
Example 5
Project: luci-py   Author: luci   File: pipeline.py    Apache License 2.0 6 votes vote down vote up
def cleanup(self):
    """Clean up this Pipeline and all Datastore records used for coordination.

    Only works when called on a root pipeline. Child pipelines will ignore
    calls to this method.

    After this method is called, Pipeline.from_id() and related status
    methods will return inconsistent or missing results. This method is
    fire-and-forget and asynchronous.
    """
    if self._root_pipeline_key is None:
      raise UnexpectedPipelineError(
          'Could not cleanup Pipeline with unknown root pipeline ID.')
    if not self.is_root:
      return
    task = taskqueue.Task(
        params=dict(root_pipeline_key=self._root_pipeline_key),
        url=self.base_path + '/cleanup',
        headers={'X-Ae-Pipeline-Key': self._root_pipeline_key})
    taskqueue.Queue(self.queue_name).add(task) 
Example 6
Project: luci-py   Author: luci   File: pipeline.py    Apache License 2.0 6 votes vote down vote up
def cleanup(self):
    """Clean up this Pipeline and all Datastore records used for coordination.

    Only works when called on a root pipeline. Child pipelines will ignore
    calls to this method.

    After this method is called, Pipeline.from_id() and related status
    methods will return inconsistent or missing results. This method is
    fire-and-forget and asynchronous.
    """
    if self._root_pipeline_key is None:
      raise UnexpectedPipelineError(
          'Could not cleanup Pipeline with unknown root pipeline ID.')
    if not self.is_root:
      return
    task = taskqueue.Task(
        params=dict(root_pipeline_key=self._root_pipeline_key),
        url=self.base_path + '/cleanup',
        headers={'X-Ae-Pipeline-Key': self._root_pipeline_key})
    taskqueue.Queue(self.queue_name).add(task) 
Example 7
Project: mlab-ns   Author: m-lab   File: pipeline.py    Apache License 2.0 5 votes vote down vote up
def post(self):
    if 'HTTP_X_APPENGINE_TASKNAME' not in self.request.environ:
      self.response.set_status(403)
      return

    context = _PipelineContext.from_environ(self.request.environ)

    # Set of stringified db.Keys of children to run.
    all_pipeline_keys = set()

    # For backwards compatibility with the old style of fan-out requests.
    all_pipeline_keys.update(self.request.get_all('pipeline_key'))

    # Fetch the child pipelines from the parent. This works around the 10KB
    # task payload limit. This get() is consistent-on-read and the fan-out
    # task is enqueued in the transaction that updates the parent, so the
    # fanned_out property is consistent here.
    parent_key = self.request.get('parent_key')
    child_indexes = [int(x) for x in self.request.get_all('child_indexes')]
    if parent_key:
      parent_key = db.Key(parent_key)
      parent = db.get(parent_key)
      for index in child_indexes:
        all_pipeline_keys.add(str(parent.fanned_out[index]))

    all_tasks = []
    for pipeline_key in all_pipeline_keys:
      all_tasks.append(taskqueue.Task(
          url=context.pipeline_handler_path,
          params=dict(pipeline_key=pipeline_key),
          headers={'X-Ae-Pipeline-Key': pipeline_key},
          name='ae-pipeline-fan-out-' + db.Key(pipeline_key).name()))

    batch_size = 100  # Limit of taskqueue API bulk add.
    for i in xrange(0, len(all_tasks), batch_size):
      batch = all_tasks[i:i+batch_size]
      try:
        taskqueue.Queue(context.queue_name).add(batch)
      except (taskqueue.TombstonedTaskError, taskqueue.TaskAlreadyExistsError):
        pass 
Example 8
Project: roger-api   Author: rogertalk   File: cron.py    MIT License 5 votes vote down vote up
def report_to_bigquery():
    """Flush all pending events of a certain type to BigQuery."""
    # Schedule multiple flush jobs per minute for some events.
    if request.method == 'GET':
        tasks = []
        for delay in xrange(0, 60, 5):
            tasks.append(taskqueue.Task(method='POST', url=request.path,
                                        countdown=delay,
                                        params={'event_name': 'content_vote_v1'}))
        tasks.append(taskqueue.Task(method='POST', url=request.path))
        taskqueue.Queue(config.BIGQUERY_CRON_QUEUE_NAME).add(tasks)
        return ''
    # Retrieve pending events from pull queue.
    try:
        q = taskqueue.Queue(config.BIGQUERY_QUEUE_NAME)
        tasks = q.lease_tasks_by_tag(config.BIGQUERY_LEASE_TIME.total_seconds(),
                                     config.BIGQUERY_LEASE_AMOUNT,
                                     tag=flask_extras.get_parameter('event_name'))
        logging.debug('Leased %d event(s) from %s', len(tasks), config.BIGQUERY_QUEUE_NAME)
    except taskqueue.TransientError:
        logging.warning('Could not lease events due to transient error')
        return '', 503
    if not tasks:
        return ''
    # Insert the events into BigQuery.
    table_id = tasks[0].tag
    rows = [json.loads(t.payload) for t in tasks]
    bigquery_client.insert_rows(table_id, rows)
    # Delete the tasks now that we're done with them.
    q.delete_tasks(tasks)
    return '' 
Example 9
Project: roger-api   Author: rogertalk   File: jobs.py    MIT License 5 votes vote down vote up
def _add_task_list_async(tasks, queue_name=None):
    queue = taskqueue.Queue(queue_name) if queue_name else taskqueue.Queue()
    yield queue.add_async(tasks) 
Example 10
Project: roger-api   Author: rogertalk   File: admin.py    MIT License 5 votes vote down vote up
def post_notify():
    params = {
        'app': request.form['app'],
        'env': request.form['env'],
        'text': request.form['text'],
    }
    title = request.form.get('title')
    if title:
        params['title'] = title
    tokens = re.split(r'\s+', request.files['tokens'].read().strip())
    tasks = []
    total_tasks = 0
    i, j = 0, 500
    while True:
        batch = tokens[i:j]
        if not batch:
            break
        i, j = j, j + j - i
        params['token'] = batch
        tasks.append(taskqueue.Task(method='POST', url='/_ah/jobs/notify_batch',
                                    params=params))
        if len(tasks) == taskqueue.MAX_TASKS_PER_ADD:
            taskqueue.Queue().add(tasks)
            total_tasks += len(tasks)
            tasks = []
    if tasks:
        taskqueue.Queue().add(tasks)
        total_tasks += len(tasks)
    return 'Notifying %d token(s) in %d job(s) • <a href="/admin/notify">Back</a>' % (
            len(tokens),
            total_tasks) 
Example 11
Project: naziscore   Author: rbanffy   File: scoring.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def delete_from_scoring_indirect(task_name):
    logging.debug('Deleting task {} from scoring-indirect'.format(
        task_name))
    return taskqueue.Queue('scoring-indirect').delete_tasks(
        taskqueue.Task(name=task_name)) 
Example 12
Project: python-docs-samples   Author: GoogleCloudPlatform   File: main.py    Apache License 2.0 5 votes vote down vote up
def post(self):
        key = self.request.get('key')
        if key:
            queue = taskqueue.Queue('pullq')
            queue.add(taskqueue.Task(payload='', method='PULL', tag=key))
        self.redirect('/')
    # [END adding_task] 
Example 13
Project: andris-projeto   Author: andris210296   File: gaeutil.py    MIT License 5 votes vote down vote up
def set_up(self):
        self._rpc = taskqueue.create_rpc()
        q = Queue(self._queue_name)
        q.add_async(self._task, rpc=self._rpc) 
Example 14
Project: andris-projeto   Author: andris210296   File: gaeutil.py    MIT License 5 votes vote down vote up
def set_up(self):
        self._rpc = taskqueue.create_rpc()
        q = Queue(self._queue_name)
        q.add_async(self._task, rpc=self._rpc) 
Example 15
Project: luci-py   Author: luci   File: external_scheduler.py    Apache License 2.0 5 votes vote down vote up
def task_batch_handle_notifications():
  """Batches notifications from pull queue, and forwards to push queue."""

  # Number of seconds to lease the tasks. Once it expires, the
  # tasks will be available again for the next worker.
  LEASE_SEC = 60
  # The maximum number of tasks to lease from the pull queue.
  MAX_TASKS = 1000
  queue = taskqueue.Queue('es-notify-tasks-batch')
  tasks = queue.lease_tasks(LEASE_SEC, MAX_TASKS)
  if not tasks:
    return
  requests = {}
  tasks_per_scheduler = collections.defaultdict(list)
  for task in tasks:
    proto = plugin_pb2.NotifyTasksRequest()
    payload = json.loads(task.payload)
    json_format.Parse(payload['request_json'], proto)
    s_tuple = (proto.scheduler_id, payload['es_host'])
    tasks_per_scheduler[s_tuple].append(task)
    if s_tuple not in requests:
      requests[s_tuple] = proto
    else:
      requests[s_tuple].notifications.extend(proto.notifications)

  for s_id, address in requests:
    request_json = json_format.MessageToJson(requests[s_id, address])
    enqueued = utils.enqueue_task(
        '/internal/taskqueue/important/external_scheduler/notify-tasks',
        'es-notify-tasks',
        params={'es_host': address, 'request_json': request_json},
        transactional=ndb.in_transaction())
    if not enqueued:
      logging.warning('Failed to enqueue external scheduler task, skipping')
      continue
    queue.delete_tasks(tasks_per_scheduler[s_id, address]) 
Example 16
Project: remember-that-telegram-bot   Author: guiferviz   File: service.py    MIT License 5 votes vote down vote up
def _process_delete_task(cls, chat_id, tokens):
        """
            Deletes all tasks of the default queue.
            TODO: delete only tasks of the chat_id user.
        """
        q = taskqueue.Queue('default')
        q.purge()
        main.BOT.sendMessage(chat_id, main.DELETE_ALL_TXT)
        return True 
Example 17
Project: crmint   Author: google   File: models_tests.py    Apache License 2.0 5 votes vote down vote up
def test_stop_succeeds_with_outdated_tasks(self):
    pipeline = models.Pipeline.create()
    job1 = models.Job.create(pipeline_id=pipeline.id)
    self.assertTrue(pipeline.get_ready())
    task1 = job1.start()
    self.assertIsNotNone(task1)
    taskqueue.Queue().delete_tasks([taskqueue.Task(name=task1.name)])
    self.assertTrue(job1.stop())
    self.assertEqual(job1.status, models.Job.STATUS.STOPPING) 
Example 18
Project: crmint   Author: google   File: models.py    Apache License 2.0 5 votes vote down vote up
def cancel_tasks(self):
    task_namespace = self._get_task_namespace()
    enqueued_tasks = TaskEnqueued.where(task_namespace=task_namespace)
    if enqueued_tasks:
      tasks = [taskqueue.Task(name=t.task_name) for t in enqueued_tasks]
      taskqueue.Queue().delete_tasks(tasks)
      TaskEnqueued.where(task_namespace=task_namespace).delete() 
Example 19
Project: upvote   Author: google   File: utils.py    Apache License 2.0 5 votes vote down vote up
def QueueSize(queue=constants.TASK_QUEUE.DEFAULT, deadline=10):
  queue = taskqueue.Queue(name=queue)
  queue_stats = queue.fetch_statistics(deadline=deadline)
  return queue_stats.tasks 
Example 20
Project: luci-py   Author: luci   File: pipeline.py    Apache License 2.0 4 votes vote down vote up
def post(self):
    if 'HTTP_X_APPENGINE_TASKNAME' not in self.request.environ:
      self.response.set_status(403)
      return

    context = _PipelineContext.from_environ(self.request.environ)

    # Set of stringified db.Keys of children to run.
    all_pipeline_keys = set()

    # For backwards compatibility with the old style of fan-out requests.
    all_pipeline_keys.update(self.request.get_all('pipeline_key'))

    # Fetch the child pipelines from the parent. This works around the 10KB
    # task payload limit. This get() is consistent-on-read and the fan-out
    # task is enqueued in the transaction that updates the parent, so the
    # fanned_out property is consistent here.
    parent_key = self.request.get('parent_key')
    child_indexes = [int(x) for x in self.request.get_all('child_indexes')]
    if parent_key:
      parent_key = db.Key(parent_key)
      parent = db.get(parent_key)
      for index in child_indexes:
        all_pipeline_keys.add(str(parent.fanned_out[index]))

    all_tasks = []
    all_pipelines = db.get([db.Key(pipeline_key) for pipeline_key in all_pipeline_keys])
    for child_pipeline in all_pipelines:
      if child_pipeline is None:
        continue
      pipeline_key = str(child_pipeline.key())
      all_tasks.append(taskqueue.Task(
          url=context.pipeline_handler_path,
          params=dict(pipeline_key=pipeline_key),
          target=child_pipeline.params.get('target'),
          headers={'X-Ae-Pipeline-Key': pipeline_key},
          name='ae-pipeline-fan-out-' + child_pipeline.key().name()))

    batch_size = 100  # Limit of taskqueue API bulk add.
    for i in xrange(0, len(all_tasks), batch_size):
      batch = all_tasks[i:i+batch_size]
      try:
        taskqueue.Queue(context.queue_name).add(batch)
      except (taskqueue.TombstonedTaskError, taskqueue.TaskAlreadyExistsError):
        pass 
Example 21
Project: luci-py   Author: luci   File: pipeline.py    Apache License 2.0 4 votes vote down vote up
def post(self):
    if 'HTTP_X_APPENGINE_TASKNAME' not in self.request.environ:
      self.response.set_status(403)
      return

    context = _PipelineContext.from_environ(self.request.environ)

    # Set of stringified db.Keys of children to run.
    all_pipeline_keys = set()

    # For backwards compatibility with the old style of fan-out requests.
    all_pipeline_keys.update(self.request.get_all('pipeline_key'))

    # Fetch the child pipelines from the parent. This works around the 10KB
    # task payload limit. This get() is consistent-on-read and the fan-out
    # task is enqueued in the transaction that updates the parent, so the
    # fanned_out property is consistent here.
    parent_key = self.request.get('parent_key')
    child_indexes = [int(x) for x in self.request.get_all('child_indexes')]
    if parent_key:
      parent_key = db.Key(parent_key)
      parent = db.get(parent_key)
      for index in child_indexes:
        all_pipeline_keys.add(str(parent.fanned_out[index]))

    all_tasks = []
    all_pipelines = db.get([db.Key(pipeline_key) for pipeline_key in all_pipeline_keys])
    for child_pipeline in all_pipelines:
      if child_pipeline is None:
        continue
      pipeline_key = str(child_pipeline.key())
      all_tasks.append(taskqueue.Task(
          url=context.pipeline_handler_path,
          params=dict(pipeline_key=pipeline_key),
          target=child_pipeline.params.get('target'),
          headers={'X-Ae-Pipeline-Key': pipeline_key},
          name='ae-pipeline-fan-out-' + child_pipeline.key().name()))

    batch_size = 100  # Limit of taskqueue API bulk add.
    for i in xrange(0, len(all_tasks), batch_size):
      batch = all_tasks[i:i+batch_size]
      try:
        taskqueue.Queue(context.queue_name).add(batch)
      except (taskqueue.TombstonedTaskError, taskqueue.TaskAlreadyExistsError):
        pass