Python google.appengine.runtime.apiproxy_errors.OverQuotaError() Examples

The following are code examples for showing how to use google.appengine.runtime.apiproxy_errors.OverQuotaError(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: loaner   Author: google   File: base_model.py    Apache License 2.0 6 votes vote down vote up
def add_docs_to_index(cls, documents):
    """Adds a list of documents to a particular index.

    Args:
      documents: a list of search.Documents to add to the class' index.
    """
    index = cls.get_index()
    for doc in documents:
      try:
        index.put(doc)
      except search.PutError as err:
        result = err.results[0]
        if result.code == search.OperationResult.TRANSIENT_ERROR:
          index.put(doc)
      except (search.Error, apiproxy_errors.OverQuotaError):
        logging.error(_PUT_DOC_ERR_MSG, doc, index) 
Example 2
Project: server   Author: viur-framework   File: session.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def save(self, req):
		"""
			Writes the session to the memcache/datastore.

			Does nothing, if the session hasn't been changed in the current request.
		"""
		if self.changed:
			serialized = base64.b64encode( pickle.dumps(self.session, protocol=pickle.HIGHEST_PROTOCOL ) )
			self.getSessionKey( req )
			# Get the current user id
			userid = None
			try:
				if "user" in dir( conf["viur.mainApp"] ): #Check for our custom user-api
					userid = conf["viur.mainApp"].user.getCurrentUser()["key"]
			except:
				pass
			try:
				dbSession = db.Entity( self.kindName, name=self.key )
				dbSession["data"] = serialized
				dbSession["sslkey"] = self.sslKey
				dbSession["skey"] = self.sessionSecurityKey
				dbSession["lastseen"] = time()
				dbSession["user"] = str(userid) or "guest" #Store the userid inside the sessionobj, so we can kill specific sessions if needed
				dbSession.set_unindexed_properties( ["data","sslkey" ] )
				db.Put( dbSession )
			except (OverQuotaError, CapabilityDisabledError):
				pass
			req.response.headers.add_header( "Set-Cookie", bytes( "%s=%s; Max-Age=99999; Path=/; HttpOnly" % ( self.plainCookieName, self.key ) ) )
			if req.isSSLConnection:
				req.response.headers.add_header( "Set-Cookie", bytes( "%s=%s; Max-Age=99999; Path=/; Secure; HttpOnly" % ( self.sslCookieName, self.sslKey ) ) ) 
Example 3
Project: python-compat-runtime   Author: GoogleCloudPlatform   File: search.py    Apache License 2.0 5 votes vote down vote up
def get_result(self):
    try:
      return super(_PutOperationFuture, self).get_result()
    except apiproxy_errors.OverQuotaError, e:
      message = e.message + '; index = ' + self._index.name
      if self._index.namespace:
        message = message + ' in namespace ' + self._index.namespace
      raise apiproxy_errors.OverQuotaError(message) 
Example 4
Project: kay-template   Author: yosukesuzuki   File: gae_bulkloader.py    MIT License 4 votes vote down vote up
def PerformWork(self, thread_pool):
    """Perform the work of this work item and report the results.

    Args:
      thread_pool: An AdaptiveThreadPool instance.

    Returns:
      A tuple (status, instruction) of the work status and an instruction
      for the ThreadGate.
    """
    status = adaptive_thread_pool.WorkItem.FAILURE
    instruction = adaptive_thread_pool.ThreadGate.DECREASE

    try:
      self.MarkAsTransferring()

      try:
        transfer_time = self._TransferItem(thread_pool)
        if transfer_time is None:
          status = adaptive_thread_pool.WorkItem.RETRY
          instruction = adaptive_thread_pool.ThreadGate.HOLD
        else:
          logger.debug('[%s] %s Transferred %d entities in %0.1f seconds',
                       threading.currentThread().getName(), self, self.count,
                       transfer_time)
          sys.stdout.write('.')
          sys.stdout.flush()
          status = adaptive_thread_pool.WorkItem.SUCCESS
          if transfer_time <= MAXIMUM_INCREASE_DURATION:
            instruction = adaptive_thread_pool.ThreadGate.INCREASE
          elif transfer_time <= MAXIMUM_HOLD_DURATION:
            instruction = adaptive_thread_pool.ThreadGate.HOLD
      except (db.InternalError, db.NotSavedError, db.Timeout,
              db.TransactionFailedError,
              apiproxy_errors.OverQuotaError,
              apiproxy_errors.DeadlineExceededError,
              apiproxy_errors.ApplicationError), e:
        status = adaptive_thread_pool.WorkItem.RETRY
        logger.exception('Retrying on non-fatal datastore error: %s', e)
      except urllib2.HTTPError, e:
        http_status = e.code
        if http_status == 403 or (http_status >= 500 and http_status < 600):
          status = adaptive_thread_pool.WorkItem.RETRY
          logger.exception('Retrying on non-fatal HTTP error: %d %s',
                           http_status, e.msg)
        else:
          self.SetError()
          status = adaptive_thread_pool.WorkItem.FAILURE 
Example 5
Project: naziscore   Author: rbanffy   File: scoring.py    GNU Affero General Public License v3.0 4 votes vote down vote up
def get_score_by_screen_name(screen_name, depth):
    # Gets the most recently updated copy, if duplicated.
    key_name = (
        '.' + screen_name if screen_name.startswith('__') else screen_name)
    try:
        score = yield ndb.Key(Score, key_name).get_async()
    except OverQuotaError:
        logging.critical('We are over quota.')
        raise ndb.Return(None)
    if score is None or (
            score.last_updated < datetime.datetime.now()
            - datetime.timedelta(days=MAX_AGE_DAYS)):
            # If we don't have one, or if we have one that's too old, we need
            # to calculate one.
        if score is not None:
            logging.info('Refreshing {}'.format(screen_name))
        else:
            logging.info('Fetching {} for the first time'.format(screen_name))
        task_name = '{}_{}'.format(
                screen_name,
                os.environ['CURRENT_VERSION_ID'].split('.')[0])
        queue_name = 'scoring-direct' if depth == 0 else 'scoring-indirect'
        try:
            _ = yield taskqueue.Task(
                name=task_name,
                params={
                    'screen_name': screen_name,
                    'depth': depth
                }).add_async(queue_name)

            # If this is a direct query, schedule an analysis of the profile
            # picture.
            if depth == 0:
                _ = yield taskqueue.Task(
                    name=task_name,
                    params={
                        'screen_name': screen_name,
                    }).add_async('profile-pic')

            # If we add it to the scoring-direct queue, we should remove
            # the corresponding task from the scoring-indirect queue at this
            # point.
            if queue_name == 'scoring-direct':
                delete_from_scoring_indirect(task_name)

        except taskqueue.TaskAlreadyExistsError:
            # We already are going to check this person. There is nothing
            # to do here.
            logging.warning(
                'Fetch for {} already scheduled on queue {}'.format(
                    task_name, queue_name))

        except taskqueue.TombstonedTaskError:
            # This task is too recent. We shouldn't try again so
            # soon. Thombstoning won't happen across different deploys, as the
            # task name has the deploy timestamp on it.
            logging.warning('Fetch for {} tombstoned'.format(task_name))
    else:
        logging.info('No need to refresh {}'.format(screen_name))

    raise ndb.Return(score) 
Example 6
Project: naziscore   Author: rbanffy   File: scoring.py    GNU Affero General Public License v3.0 4 votes vote down vote up
def get_score_by_twitter_id(twitter_id, depth):
    try:
        score = yield Score.query(Score.twitter_id == twitter_id).get_async()
    except OverQuotaError:
        logging.critical(
            'Over quota fetching {}'.format(twitter_id))
        raise ndb.Return(None)
    if score is None or (
            score.last_updated < datetime.datetime.now()
            - datetime.timedelta(days=MAX_AGE_DAYS)):
            # If we don't have one, or if we have one that's too old, we need
            # to calculate one.
        task_name = '{}_{}'.format(
                twitter_id,
                os.environ['CURRENT_VERSION_ID'].split('.')[0])
        queue_name = 'scoring-direct' if depth == 0 else 'scoring-indirect'
        try:
            _ = yield taskqueue.Task(
                name=task_name,
                params={
                    'twitter_id': twitter_id,
                    'depth': depth
                }).add_async(queue_name)

            # If this is a direct query, schedule an analysis of the profile
            # picture.
            if depth == 0:
                _ = yield taskqueue.Task(
                    name=task_name,
                    params={
                        'twitter_id': twitter_id,
                    }).add_async('profile-pic')

            # If we add it to the scoring-direct queue, we should remove
            # the corresponding task from the scoring-indirect queue at this
            # point.
            if queue_name == 'scoring-direct':
                delete_from_scoring_indirect(task_name)

        except taskqueue.TaskAlreadyExistsError:
            # We already are going to check this person. There is nothing
            # to do here.
            logging.warning(
                'Fetch for {} already scheduled on queue {}'.format(
                    task_name, queue_name))

        except taskqueue.TombstonedTaskError:
            # This task is too recent. We shouldn't try again so
            # soon. Thombstoning won't happen across different deploys, as the
            # task name has the deploy timestamp on it.
            logging.warning('Fetch for {} tombstoned'.format(task_name))
        raise ndb.Return(score)
    else:
        raise ndb.Return(score) 
Example 7
Project: python-compat-runtime   Author: GoogleCloudPlatform   File: apiproxy.py    Apache License 2.0 4 votes vote down vote up
def _MakeCallDone(self):
    self._state = RPC.FINISHING
    self.cpu_usage_mcycles = self._result_dict['cpu_usage_mcycles']
    if self._result_dict['error'] == APPLICATION_ERROR:
      appl_err = self._result_dict['application_error']
      if appl_err == MEMCACHE_UNAVAILABLE and self.package == 'memcache':


        self._exception = apiproxy_errors.CapabilityDisabledError(
            'The memcache service is temporarily unavailable. %s'
            % self._result_dict['error_detail'])
      else:

        self._exception = apiproxy_errors.ApplicationError(
            appl_err,
            self._result_dict['error_detail'])
    elif self._result_dict['error'] == CAPABILITY_DISABLED:

      if self._result_dict['error_detail']:
        self._exception = apiproxy_errors.CapabilityDisabledError(
            self._result_dict['error_detail'])
      else:
        self._exception = apiproxy_errors.CapabilityDisabledError(
            "The API call %s.%s() is temporarily unavailable." % (
            self.package, self.call))
    elif self._result_dict['error'] == FEATURE_DISABLED:
      self._exception = apiproxy_errors.FeatureNotEnabledError(
            self._result_dict['error_detail'])
    elif self._result_dict['error'] == OVER_QUOTA:
      if self._result_dict['error_detail']:

        self._exception = apiproxy_errors.OverQuotaError(
            ('The API call %s.%s() required more quota than is available. %s' %
             (self.package, self.call, self._result_dict['error_detail'])))
      else:

        exception_entry = _ExceptionsMap[self._result_dict['error']]
        self._exception = exception_entry[0](
            exception_entry[1] % (self.package, self.call))
    elif self._result_dict['error'] in _ExceptionsMap:
      exception_entry = _ExceptionsMap[self._result_dict['error']]
      self._exception = exception_entry[0](
          exception_entry[1] % (self.package, self.call))
    else:
      try:
        self.response.ParseFromString(self._result_dict['result_string'])
      except Exception, e:
        self._exception = e 
Example 8
Project: python-compat-runtime   Author: GoogleCloudPlatform   File: bulkloader.py    Apache License 2.0 4 votes vote down vote up
def PerformWork(self, thread_pool):
    """Perform the work of this work item and report the results.

    Args:
      thread_pool: An AdaptiveThreadPool instance.

    Returns:
      A tuple (status, instruction) of the work status and an instruction
      for the ThreadGate.
    """
    status = adaptive_thread_pool.WorkItem.FAILURE
    instruction = adaptive_thread_pool.ThreadGate.DECREASE


    try:
      self.MarkAsTransferring()



      try:
        transfer_time = self._TransferItem(thread_pool)
        if transfer_time is None:
          status = adaptive_thread_pool.WorkItem.RETRY
          instruction = adaptive_thread_pool.ThreadGate.HOLD
        else:
          logger.debug('[%s] %s Transferred %d entities in %0.1f seconds',
                       threading.currentThread().getName(), self, self.count,
                       transfer_time)
          sys.stdout.write('.')
          sys.stdout.flush()
          status = adaptive_thread_pool.WorkItem.SUCCESS
          if transfer_time <= MAXIMUM_INCREASE_DURATION:
            instruction = adaptive_thread_pool.ThreadGate.INCREASE
          elif transfer_time <= MAXIMUM_HOLD_DURATION:
            instruction = adaptive_thread_pool.ThreadGate.HOLD
      except (db.InternalError, db.NotSavedError, db.Timeout,
              db.TransactionFailedError,
              apiproxy_errors.OverQuotaError,
              apiproxy_errors.DeadlineExceededError,
              apiproxy_errors.ApplicationError), e:

        status = adaptive_thread_pool.WorkItem.RETRY
        logger.exception('Retrying on non-fatal datastore error: %s', e)
      except urllib2.HTTPError, e:
        http_status = e.code
        if http_status >= 500 and http_status < 600:

          status = adaptive_thread_pool.WorkItem.RETRY
          logger.exception('Retrying on non-fatal HTTP error: %d %s',
                           http_status, e.msg)
        else:
          self.SetError()
          status = adaptive_thread_pool.WorkItem.FAILURE