Python google.appengine.runtime.apiproxy_errors.RequestTooLargeError() Examples

The following are 15 code examples of google.appengine.runtime.apiproxy_errors.RequestTooLargeError(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module google.appengine.runtime.apiproxy_errors , or try the search function .
Example #1
Source File: context.py    From locality-sensitive-hashing with MIT License 6 votes vote down vote up
def flush(self):
    """Force a flush."""
    if not self.items:
      return

    retry = 0
    options = {"deadline": DATASTORE_DEADLINE}
    while retry <= self.__timeout_retries:
      try:
        self.__flush_function(self.items, options)
        self.clear()
        break
      except db.Timeout, e:
        logging.warning(e)
        logging.warning("Flushing '%s' timed out. Will retry for the %s time.",
                        self, retry)
        retry += 1
        options["deadline"] *= 2
      except apiproxy_errors.RequestTooLargeError:
        self._log_largest_items()
        raise 
Example #2
Source File: apiproxy_stub.py    From python-compat-runtime with Apache License 2.0 6 votes vote down vote up
def __init__(self, service_name, max_request_size=MAX_REQUEST_SIZE,
               request_data=None):
    """Constructor.

    Args:
      service_name: Service name expected for all calls.
      max_request_size: int, maximum allowable size of the incoming request.  A
        apiproxy_errors.RequestTooLargeError will be raised if the inbound
        request exceeds this size.  Default is 1 MB.
      request_data: A request_info.RequestInfo instance used to look up state
        associated with the request that generated an API call.
    """
    self.__service_name = service_name
    self.__max_request_size = max_request_size
    self.request_data = request_data or request_info._local_request_info



    self._mutex = threading.RLock()
    self.__error = None
    self.__error_dict = {} 
Example #3
Source File: context.py    From python-compat-runtime with Apache License 2.0 6 votes vote down vote up
def flush(self):
    """Force a flush."""
    if not self.items:
      return

    retry = 0
    options = {"deadline": DATASTORE_DEADLINE}
    while retry <= self.__timeout_retries:
      try:
        self.__flush_function(self.items, options)
        self.clear()
        break
      except db.Timeout, e:
        logging.warning(e)
        logging.warning("Flushing '%s' timed out. Will retry for the %s time.",
                        self, retry)
        retry += 1
        options["deadline"] *= 2
      except apiproxy_errors.RequestTooLargeError:
        self._log_largest_items()
        raise 
Example #4
Source File: context.py    From appengine-mapreduce with Apache License 2.0 6 votes vote down vote up
def flush(self):
    """Force a flush."""
    if not self.items:
      return

    retry = 0
    options = {"deadline": DATASTORE_DEADLINE}
    while retry <= self.__timeout_retries:
      try:
        self.__flush_function(self.items, options)
        self.clear()
        break
      except db.Timeout, e:
        logging.warning(e)
        logging.warning("Flushing '%s' timed out. Will retry for the %s time.",
                        self, retry)
        retry += 1
        options["deadline"] *= 2
      except apiproxy_errors.RequestTooLargeError:
        self._log_largest_items()
        raise 
Example #5
Source File: context.py    From locality-sensitive-hashing with MIT License 5 votes vote down vote up
def _log_largest_items(self):
    if not self.__repr_function:
      logging.error("Got RequestTooLargeError but can't interpret items in "
                    "_ItemList %s.", self)
      return

    sizes = [len(self.__repr_function(i)) for i in self.items]
    largest = heapq.nlargest(self._LARGEST_ITEMS_TO_LOG,
                             zip(sizes, self.items),
                             lambda t: t[0])
    # Set field for for test only.
    self._largest = [(s, self.__repr_function(i)) for s, i in largest]
    logging.error("Got RequestTooLargeError. Largest items: %r", self._largest) 
Example #6
Source File: main.py    From cas-eval with Apache License 2.0 5 votes vote down vote up
def save_page():
    @flask.after_this_request
    def add_headers(response):
        response.headers['Access-Control-Allow-Origin'] = '*'
        return response
    values = flask.request.values
    if values.get('type', '') == 'Serp':
        try:
            user_id = Session.get_user_id(values['url'])
        except Exception as e:
            app.logger.error(e)
            return 'Incorrect user_id used', 400
        try:
            query = Session.get_query(values['url'])
        except Exception as e:
            app.logger.error(e)
            return 'No query set?', 400
        for k in ['data', 'tab_id', 'time']:
            if k not in values:
                return 'Missing param: %s' % k, 400
        data = values['data']
        try:
            ts = Session.convert_time(values['time'])
        except Exception as e:
            app.logger.error(e)
            return 'Incorrect timestamp', 400
        session = Session(id=values['tab_id'], user_id=user_id, q=query,
                serp_html=data, start_ts=ts)
        n = len(data)
        while n > 1:
            session.serp_html = data[:n]
            try:
                session.put()
                break
            except apiproxy_errors.RequestTooLargeError as e:
                app.logger.error(e)
                n /= 2
        return 'Saved', 201
    return 'Only support saving SERPs using POST requests, sorry.', 403 
Example #7
Source File: apiproxy_stub.py    From python-compat-runtime with Apache License 2.0 5 votes vote down vote up
def MakeSyncCall(self, service, call, request, response, request_id=None):
    """The main RPC entry point.

    Args:
      service: Must be name as provided to service_name of constructor.
      call: A string representing the rpc to make.  Must be part of
        the underlying services methods and impemented by _Dynamic_<call>.
      request: A protocol buffer of the type corresponding to 'call'.
      response: A protocol buffer of the type corresponding to 'call'.
      request_id: A unique string identifying the request associated with the
          API call.
    """
    assert service == self.__service_name, ('Expected "%s" service name, '
                                            'was "%s"' % (self.__service_name,
                                                          service))
    if request.ByteSize() > self.__max_request_size:
      raise apiproxy_errors.RequestTooLargeError(
          'The request to API call %s.%s() was too large.' % (service, call))
    messages = []
    assert request.IsInitialized(messages), messages




    exception_type, frequency = self.__error_dict.get(call, (None, None))
    if exception_type and frequency:
      if random.random() <= frequency:
        raise exception_type

    if self.__error:
      if random.random() <= self.__error_rate:
        raise self.__error


    method = getattr(self, '_Dynamic_' + call)
    if self._ACCEPTS_REQUEST_ID:
      method(request, response, request_id)
    else:
      method(request, response) 
Example #8
Source File: backup_handler.py    From python-compat-runtime with Apache License 2.0 5 votes vote down vote up
def flush(self):
    """Save aggregated type information to the datastore if changed."""
    if self.__needs_save:

      def update_aggregation_tx():
        aggregation = SchemaAggregationResult.load(
            self.__backup_id, self.__kind, self.__shard_id)
        if aggregation:
          if aggregation.merge(self.__aggregation):
            aggregation.put(force_writes=True)
          self.__aggregation = aggregation
        else:
          self.__aggregation.put(force_writes=True)

      def mark_aggregation_as_partial_tx():
        aggregation = SchemaAggregationResult.load(
            self.__backup_id, self.__kind, self.__shard_id)
        if aggregation is None:
          aggregation = SchemaAggregationResult.create(
              self.__backup_id, self.__kind, self.__shard_id)
        aggregation.is_partial = True
        aggregation.put(force_writes=True)
        self.__aggregation = aggregation

      try:
        db.run_in_transaction(update_aggregation_tx)
      except apiproxy_errors.RequestTooLargeError:
        db.run_in_transaction(mark_aggregation_as_partial_tx)
      self.__needs_save = False 
Example #9
Source File: context.py    From appengine-mapreduce with Apache License 2.0 5 votes vote down vote up
def _log_largest_items(self):
    if not self.__repr_function:
      logging.error("Got RequestTooLargeError but can't interpret items in "
                    "_ItemList %s.", self)
      return

    sizes = [len(self.__repr_function(i)) for i in self.items]
    largest = heapq.nlargest(self._LARGEST_ITEMS_TO_LOG,
                             zip(sizes, self.items),
                             lambda t: t[0])
    # Set field for for test only.
    self._largest = [(s, self.__repr_function(i)) for s, i in largest]
    logging.error("Got RequestTooLargeError. Largest items: %r", self._largest) 
Example #10
Source File: context_test.py    From appengine-mapreduce with Apache License 2.0 5 votes vote down vote up
def flush_function_too_large_error(self, *args, **kwds):
    raise apiproxy_errors.RequestTooLargeError() 
Example #11
Source File: context_test.py    From appengine-mapreduce with Apache License 2.0 5 votes vote down vote up
def testFlushWithTooLargeRequestError(self):
    self.list = context._ItemList(
        self.max_entity_count,
        FlushFunction().flush_function_too_large_error,
        repr_function=lambda item: item)
    items = [(s, 'a'*s) for s in range(10, 1, -1)]
    items_copy = list(items)
    random.seed(1)
    random.shuffle(items_copy)
    for _, i in items_copy:
      self.list.append(i)
    self.assertRaises(apiproxy_errors.RequestTooLargeError,
                      self.list.flush)
    self.assertEqual(items[:context._ItemList._LARGEST_ITEMS_TO_LOG],
                     self.list._largest) 
Example #12
Source File: context_test.py    From appengine-mapreduce with Apache License 2.0 5 votes vote down vote up
def testFlushLogLargestItems(self):
    self.pool = context._MutationPool(max_entity_count=3)
    self.pool.put(TestEntity(tag='a'*1024*1024))
    self.assertRaises(apiproxy_errors.RequestTooLargeError, self.pool.flush)
    self.assertTrue(self.pool.puts._largest)

    self.pool = context._MutationPool(max_entity_count=3)
    self.pool.ndb_put(NdbTestEntity(tag='a'*1024*1024))
    self.assertRaises(apiproxy_errors.RequestTooLargeError, self.pool.flush)
    self.assertTrue(self.pool.ndb_puts._largest) 
Example #13
Source File: photo.py    From personfinder with Apache License 2.0 5 votes vote down vote up
def create_photo(image, repo, url_builder):
    """Creates a new Photo entity for the provided image of type images.Image
    after resizing it and converting to PNG.  It may throw a PhotoError on
    failure, which comes with a localized error message appropriate for
    display."""
    if image == False:  # False means it wasn't valid (see validate_image)
        raise FormatUnrecognizedError()

    if max(image.width, image.height) <= MAX_IMAGE_DIMENSION:
        # No resize needed.  Keep the same size but add a transformation to
        # force re-encoding.
        image.resize(image.width, image.height)
    elif image.width > image.height:
        image.resize(MAX_IMAGE_DIMENSION,
                     image.height * MAX_IMAGE_DIMENSION / image.width)
    else:
        image.resize(image.width * MAX_IMAGE_DIMENSION / image.height,
                     MAX_IMAGE_DIMENSION)

    try:
        image_data = image.execute_transforms(output_encoding=images.PNG)
    except RequestTooLargeError:
        raise SizeTooLargeError()
    except Exception:
        # There are various images.Error exceptions that can be raised, as well
        # as e.g. IOError if the image is corrupt.
        raise PhotoError()

    photo = model.Photo.create(repo, image_data=image_data)
    photo_url = get_photo_url(photo, repo, url_builder)
    return (photo, photo_url) 
Example #14
Source File: photo.py    From personfinder with Apache License 2.0 5 votes vote down vote up
def set_thumbnail(photo):
    """Sets thumbnail data for a photo.

    Args:
        photo: the Photo object to set the thumbnail for
    """
    image = images.Image(photo.image_data)
    if max(image.width, image.height) <= MAX_THUMBNAIL_DIMENSION:
        # Don't need a thumbnail, it's small enough already.
        return
    elif image.width > image.height:
        image.resize(MAX_THUMBNAIL_DIMENSION,
                     image.height * MAX_THUMBNAIL_DIMENSION / image.width)
    else:
        image.resize(image.width * MAX_THUMBNAIL_DIMENSION / image.height,
                     MAX_THUMBNAIL_DIMENSION)
    try:
        thumbnail_data = image.execute_transforms(output_encoding=images.PNG)
    except RequestTooLargeError:
        raise SizeTooLargeError()
    except Exception:
        # There are various images.Error exceptions that can be raised, as well
        # as e.g. IOError if the image is corrupt.
        raise PhotoError()

    photo.thumbnail_data = thumbnail_data
    photo.save() 
Example #15
Source File: urlfetch.py    From python-compat-runtime with Apache License 2.0 4 votes vote down vote up
def _get_fetch_result(rpc):
  """Checks for success, handles exceptions, and returns a converted RPC result.

  This method waits for the RPC if it has not yet finished and calls the
  post-call hooks on the first invocation.

  Args:
    rpc: A UserRPC object.

  Raises:
    InvalidURLError: If the URL was invalid.
    DownloadError: If there was a problem fetching the URL.
    PayloadTooLargeError: If the request and its payload was larger than the
        allowed limit.
    ResponseTooLargeError: If the response was either truncated (and
        `allow_truncated=False` was passed to `make_fetch_call()`), or if it
        was too big for us to download.
    MalformedReplyError: If an invalid HTTP response was returned.
    TooManyRedirectsError: If the redirect limit was hit while `follow_rediects`
        was set to `True`.
    InternalTransientError: An internal error occurred. Wait a few minutes, then
        try again.
    ConnectionClosedError: If the target server prematurely closed the
        connection.
    DNSLookupFailedError: If the DNS lookup for the URL failed.
    DeadlineExceededError: If the deadline was exceeded; occurs when the
        client-supplied `deadline` is invalid or if the client did not specify a
        `deadline` and the system default value is invalid.
    SSLCertificateError: If an invalid server certificate was presented.
    AssertionError: If the `assert` statement fails.

  Returns:
    A `_URLFetchResult` object.
  """
  assert rpc.service == 'urlfetch', repr(rpc.service)
  assert rpc.method == 'Fetch', repr(rpc.method)

  url = rpc.request.url()

  try:
    rpc.check_success()
  except apiproxy_errors.RequestTooLargeError, err:
    raise InvalidURLError(
        'Request body too large fetching URL: ' + url)