Python google.appengine.ext.db.Timeout() Examples

The following are 12 code examples of google.appengine.ext.db.Timeout(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module google.appengine.ext.db , or try the search function .
Example #1
Source File: context.py    From locality-sensitive-hashing with MIT License 6 votes vote down vote up
def flush(self):
    """Force a flush."""
    if not self.items:
      return

    retry = 0
    options = {"deadline": DATASTORE_DEADLINE}
    while retry <= self.__timeout_retries:
      try:
        self.__flush_function(self.items, options)
        self.clear()
        break
      except db.Timeout, e:
        logging.warning(e)
        logging.warning("Flushing '%s' timed out. Will retry for the %s time.",
                        self, retry)
        retry += 1
        options["deadline"] *= 2
      except apiproxy_errors.RequestTooLargeError:
        self._log_largest_items()
        raise 
Example #2
Source File: fields.py    From graphene-gae with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def generate_edges_page(ndb_iter, page_size, keys_only, edge_type):
    edges = []
    timeouts = 0
    while len(edges) < page_size:
        try:
            entity = ndb_iter.next()
        except StopIteration:
            break
        except Timeout:
            timeouts += 1
            if timeouts > 2:
                break

            continue
        except DeadlineExceededError:
            break

        if keys_only:
            # entity is actualy an ndb.Key and we need to create an empty entity to hold it
            entity = edge_type._meta.fields['node']._type._meta.model(key=entity)

        edges.append(edge_type(node=entity, cursor=ndb_iter.cursor_after().urlsafe()))

    return edges 
Example #3
Source File: context.py    From python-compat-runtime with Apache License 2.0 6 votes vote down vote up
def flush(self):
    """Force a flush."""
    if not self.items:
      return

    retry = 0
    options = {"deadline": DATASTORE_DEADLINE}
    while retry <= self.__timeout_retries:
      try:
        self.__flush_function(self.items, options)
        self.clear()
        break
      except db.Timeout, e:
        logging.warning(e)
        logging.warning("Flushing '%s' timed out. Will retry for the %s time.",
                        self, retry)
        retry += 1
        options["deadline"] *= 2
      except apiproxy_errors.RequestTooLargeError:
        self._log_largest_items()
        raise 
Example #4
Source File: context.py    From appengine-mapreduce with Apache License 2.0 6 votes vote down vote up
def flush(self):
    """Force a flush."""
    if not self.items:
      return

    retry = 0
    options = {"deadline": DATASTORE_DEADLINE}
    while retry <= self.__timeout_retries:
      try:
        self.__flush_function(self.items, options)
        self.clear()
        break
      except db.Timeout, e:
        logging.warning(e)
        logging.warning("Flushing '%s' timed out. Will retry for the %s time.",
                        self, retry)
        retry += 1
        options["deadline"] *= 2
      except apiproxy_errors.RequestTooLargeError:
        self._log_largest_items()
        raise 
Example #5
Source File: admin.py    From browserscope with Apache License 2.0 5 votes vote down vote up
def DataDumpKeys(request):
  """This is used by bin/data_dump.py to get ResultParent keys."""
  bookmark = request.REQUEST.get('bookmark')
  model_name = request.REQUEST.get('model')
  count = int(request.REQUEST.get('count', 0))
  fetch_limit = int(request.REQUEST.get('fetch_limit', 999))
  created_str = request.REQUEST.get('created', 0)
  created = None
  if created_str:
    created = datetime.datetime.strptime(created_str, '%Y-%m-%d %H:%M:%S')
  models = {
      'UserAgent': UserAgent,
      'ResultParent': ResultParent,
      'ResultTime': ResultTime,
      }
  model = models.get(model_name, UserAgent)
  query = pager.PagerQuery(model, keys_only=True)
  if created:
    query.filter('created >=', created)
    query.order('created')
  try:
    prev_bookmark, results, next_bookmark = query.fetch(fetch_limit, bookmark)
  except db.Timeout:
    logging.warn('db.Timeout during initial fetch.')
    return http.HttpResponseServerError('db.Timeout during initial fetch.')
  response_params = {
      'bookmark': next_bookmark,
      'model': model_name,
      'count': count + len(results),
      'keys': [str(key) for key in results]
      }
  if created_str:
    response_params['created'] = created_str
  return http.HttpResponse(content=simplejson.dumps(response_params),
                           content_type='application/json') 
Example #6
Source File: result_stats.py    From browserscope with Apache License 2.0 5 votes vote down vote up
def UpdateStatsCache(cls, category, browsers):
        """Update the memcache of stats for all the tests for each browser.

        This is also where the summary stats get updated.

        Args:
            category: a category string like 'network'
            browsers: a list of browsers like ['Firefox 3.6', 'IE 8.0']
        Returns:
            a list of browsers that were not processed due to a timeout.
        """
        test_set = all_test_sets.GetTestSet(category)
        test_keys = [t.key for t in test_set.VisibleTests()]
        ua_stats = {}
        unhandled_browsers = []
        is_timed_out = False
        for browser in browsers:
            try:
                medians, num_scores = test_set.GetMediansAndNumScores(browser)
            except db.Timeout:
                is_timed_out = True
            if is_timed_out:
                logging.info('Timed out \'%s\' in UpdateStatsCache doing '
                                         'GetMediansAndNumScores for %s', category, browser)
                unhandled_browsers.append(browser)
            else:
                stats = test_set.GetStats(test_keys, medians, num_scores)
                ua_stats[browser] = stats
        memcache.set_multi(ua_stats, **cls.MemcacheParams(category))
        if not is_timed_out:
            SummaryStatsManager.UpdateStats(category, ua_stats)
        return unhandled_browsers 
Example #7
Source File: rotmodel.py    From python-for-android with Apache License 2.0 5 votes vote down vote up
def put(self):
        count = 0
        while count < 3:
            try:
                return db.Model.put(self)
            except db.Timeout:
                count += 1
        else:
            raise db.Timeout() 
Example #8
Source File: context_test.py    From appengine-mapreduce with Apache License 2.0 5 votes vote down vote up
def flush_function(self, items, options):
    FlushFunction.calls.append((items, dict(options)))
    if len(FlushFunction.calls) <= FlushFunction.timeouts:
      raise db.Timeout()
    FlushFunction.persistent_storage.extend(items)

  # pylint: disable=unused-argument 
Example #9
Source File: context_test.py    From appengine-mapreduce with Apache License 2.0 5 votes vote down vote up
def testFlushTimeoutTooManyTimes(self):
    FlushFunction.timeouts = context._ItemList.DEFAULT_RETRIES + 1
    self.list.append('abc')
    self.assertRaises(db.Timeout, self.list.flush)
    self.assertEqual(context._ItemList.DEFAULT_RETRIES + 1,
                     len(FlushFunction.calls)) 
Example #10
Source File: admin_rankers.py    From browserscope with Apache License 2.0 4 votes vote down vote up
def UploadRankers(request):
  """Rebuild rankers."""
  time_limit = int(request.REQUEST.get('time_limit', 8))
  category = request.REQUEST.get('category')
  params_str = request.REQUEST.get('params_str')
  test_key_browsers_json = request.REQUEST.get('test_key_browsers_json')
  ranker_values_json = request.REQUEST.get('ranker_values_json')

  if not category:
    return http.HttpResponseServerError('Must send "category".')
  if not test_key_browsers_json:
    return http.HttpResponseServerError('Must send "test_key_browsers_json".')
  if not ranker_values_json:
    return http.HttpResponseServerError('Must send "ranker_values_json".')

  test_key_browsers = simplejson.loads(test_key_browsers_json)
  ranker_values = simplejson.loads(ranker_values_json)
  start_time = time.clock()

  message = None
  test_set = all_test_sets.GetTestSet(category)
  test_browsers = [(test_set.GetTest(test_key), browser)
                   for test_key, browser in test_key_browsers]
  rankers = result_ranker.GetOrCreateRankers(test_browsers, params_str)

  for ranker, (median, num_scores, values_str) in zip(rankers, ranker_values):
    if time.clock() - start_time > time_limit:
      message = 'Over time limit'
      break
    if ranker.GetMedianAndNumScores() == (median, num_scores):
      logging.info('Skipping ranker with unchanged values: %s',
                   ranker.key().name())
      continue
    values = map(int, values_str.split('|'))
    try:
      ranker.SetValues(values, num_scores)
    except db.Timeout:
      message = 'db.Timeout'
      break
  response_params = {}
  if message:
    logging.info('message: %s', message)
    response_params['message'] = message
  return http.HttpResponse(simplejson.dumps(response_params)) 
Example #11
Source File: bulkloader.py    From browserscope with Apache License 2.0 4 votes vote down vote up
def PerformWork(self, thread_pool):
    """Perform the work of this work item and report the results.

    Args:
      thread_pool: An AdaptiveThreadPool instance.

    Returns:
      A tuple (status, instruction) of the work status and an instruction
      for the ThreadGate.
    """
    status = adaptive_thread_pool.WorkItem.FAILURE
    instruction = adaptive_thread_pool.ThreadGate.DECREASE


    try:
      self.MarkAsTransferring()



      try:
        transfer_time = self._TransferItem(thread_pool)
        if transfer_time is None:
          status = adaptive_thread_pool.WorkItem.RETRY
          instruction = adaptive_thread_pool.ThreadGate.HOLD
        else:
          logger.debug('[%s] %s Transferred %d entities in %0.1f seconds',
                       threading.currentThread().getName(), self, self.count,
                       transfer_time)
          sys.stdout.write('.')
          sys.stdout.flush()
          status = adaptive_thread_pool.WorkItem.SUCCESS
          if transfer_time <= MAXIMUM_INCREASE_DURATION:
            instruction = adaptive_thread_pool.ThreadGate.INCREASE
          elif transfer_time <= MAXIMUM_HOLD_DURATION:
            instruction = adaptive_thread_pool.ThreadGate.HOLD
      except (db.InternalError, db.NotSavedError, db.Timeout,
              db.TransactionFailedError,
              apiproxy_errors.OverQuotaError,
              apiproxy_errors.DeadlineExceededError,
              apiproxy_errors.ApplicationError), e:

        status = adaptive_thread_pool.WorkItem.RETRY
        logger.exception('Retrying on non-fatal datastore error: %s', e)
      except urllib2.HTTPError, e:
        http_status = e.code
        if http_status >= 500 and http_status < 600:

          status = adaptive_thread_pool.WorkItem.RETRY
          logger.exception('Retrying on non-fatal HTTP error: %d %s',
                           http_status, e.msg)
        else:
          self.SetError()
          status = adaptive_thread_pool.WorkItem.FAILURE 
Example #12
Source File: bulkloader.py    From python-compat-runtime with Apache License 2.0 4 votes vote down vote up
def PerformWork(self, thread_pool):
    """Perform the work of this work item and report the results.

    Args:
      thread_pool: An AdaptiveThreadPool instance.

    Returns:
      A tuple (status, instruction) of the work status and an instruction
      for the ThreadGate.
    """
    status = adaptive_thread_pool.WorkItem.FAILURE
    instruction = adaptive_thread_pool.ThreadGate.DECREASE


    try:
      self.MarkAsTransferring()



      try:
        transfer_time = self._TransferItem(thread_pool)
        if transfer_time is None:
          status = adaptive_thread_pool.WorkItem.RETRY
          instruction = adaptive_thread_pool.ThreadGate.HOLD
        else:
          logger.debug('[%s] %s Transferred %d entities in %0.1f seconds',
                       threading.currentThread().getName(), self, self.count,
                       transfer_time)
          sys.stdout.write('.')
          sys.stdout.flush()
          status = adaptive_thread_pool.WorkItem.SUCCESS
          if transfer_time <= MAXIMUM_INCREASE_DURATION:
            instruction = adaptive_thread_pool.ThreadGate.INCREASE
          elif transfer_time <= MAXIMUM_HOLD_DURATION:
            instruction = adaptive_thread_pool.ThreadGate.HOLD
      except (db.InternalError, db.NotSavedError, db.Timeout,
              db.TransactionFailedError,
              apiproxy_errors.OverQuotaError,
              apiproxy_errors.DeadlineExceededError,
              apiproxy_errors.ApplicationError), e:

        status = adaptive_thread_pool.WorkItem.RETRY
        logger.exception('Retrying on non-fatal datastore error: %s', e)
      except urllib2.HTTPError, e:
        http_status = e.code
        if http_status >= 500 and http_status < 600:

          status = adaptive_thread_pool.WorkItem.RETRY
          logger.exception('Retrying on non-fatal HTTP error: %d %s',
                           http_status, e.msg)
        else:
          self.SetError()
          status = adaptive_thread_pool.WorkItem.FAILURE