Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/googleapiclient/http.py: 20%
672 statements
« prev ^ index » next coverage.py v7.3.2, created at 2023-12-08 06:51 +0000
« prev ^ index » next coverage.py v7.3.2, created at 2023-12-08 06:51 +0000
1# Copyright 2014 Google Inc. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
15"""Classes to encapsulate a single HTTP request.
17The classes implement a command pattern, with every
18object supporting an execute() method that does the
19actual HTTP request.
20"""
21from __future__ import absolute_import
23__author__ = "jcgregorio@google.com (Joe Gregorio)"
25import copy
26import http.client as http_client
27import io
28import json
29import logging
30import mimetypes
31import os
32import random
33import socket
34import time
35import urllib
36import uuid
38import httplib2
40# TODO(issue 221): Remove this conditional import jibbajabba.
41try:
42 import ssl
43except ImportError:
44 _ssl_SSLError = object()
45else:
46 _ssl_SSLError = ssl.SSLError
48from email.generator import Generator
49from email.mime.multipart import MIMEMultipart
50from email.mime.nonmultipart import MIMENonMultipart
51from email.parser import FeedParser
53from googleapiclient import _auth
54from googleapiclient import _helpers as util
55from googleapiclient.errors import (
56 BatchError,
57 HttpError,
58 InvalidChunkSizeError,
59 ResumableUploadError,
60 UnexpectedBodyError,
61 UnexpectedMethodError,
62)
63from googleapiclient.model import JsonModel
65LOGGER = logging.getLogger(__name__)
67DEFAULT_CHUNK_SIZE = 100 * 1024 * 1024
69MAX_URI_LENGTH = 2048
71MAX_BATCH_LIMIT = 1000
73_TOO_MANY_REQUESTS = 429
75DEFAULT_HTTP_TIMEOUT_SEC = 60
77_LEGACY_BATCH_URI = "https://www.googleapis.com/batch"
80def _should_retry_response(resp_status, content):
81 """Determines whether a response should be retried.
83 Args:
84 resp_status: The response status received.
85 content: The response content body.
87 Returns:
88 True if the response should be retried, otherwise False.
89 """
90 reason = None
92 # Retry on 5xx errors.
93 if resp_status >= 500:
94 return True
96 # Retry on 429 errors.
97 if resp_status == _TOO_MANY_REQUESTS:
98 return True
100 # For 403 errors, we have to check for the `reason` in the response to
101 # determine if we should retry.
102 if resp_status == http_client.FORBIDDEN:
103 # If there's no details about the 403 type, don't retry.
104 if not content:
105 return False
107 # Content is in JSON format.
108 try:
109 data = json.loads(content.decode("utf-8"))
110 if isinstance(data, dict):
111 # There are many variations of the error json so we need
112 # to determine the keyword which has the error detail. Make sure
113 # that the order of the keywords below isn't changed as it can
114 # break user code. If the "errors" key exists, we must use that
115 # first.
116 # See Issue #1243
117 # https://github.com/googleapis/google-api-python-client/issues/1243
118 error_detail_keyword = next(
119 (
120 kw
121 for kw in ["errors", "status", "message"]
122 if kw in data["error"]
123 ),
124 "",
125 )
127 if error_detail_keyword:
128 reason = data["error"][error_detail_keyword]
130 if isinstance(reason, list) and len(reason) > 0:
131 reason = reason[0]
132 if "reason" in reason:
133 reason = reason["reason"]
134 else:
135 reason = data[0]["error"]["errors"]["reason"]
136 except (UnicodeDecodeError, ValueError, KeyError):
137 LOGGER.warning("Invalid JSON content from response: %s", content)
138 return False
140 LOGGER.warning('Encountered 403 Forbidden with reason "%s"', reason)
142 # Only retry on rate limit related failures.
143 if reason in ("userRateLimitExceeded", "rateLimitExceeded"):
144 return True
146 # Everything else is a success or non-retriable so break.
147 return False
150def _retry_request(
151 http, num_retries, req_type, sleep, rand, uri, method, *args, **kwargs
152):
153 """Retries an HTTP request multiple times while handling errors.
155 If after all retries the request still fails, last error is either returned as
156 return value (for HTTP 5xx errors) or thrown (for ssl.SSLError).
158 Args:
159 http: Http object to be used to execute request.
160 num_retries: Maximum number of retries.
161 req_type: Type of the request (used for logging retries).
162 sleep, rand: Functions to sleep for random time between retries.
163 uri: URI to be requested.
164 method: HTTP method to be used.
165 args, kwargs: Additional arguments passed to http.request.
167 Returns:
168 resp, content - Response from the http request (may be HTTP 5xx).
169 """
170 resp = None
171 content = None
172 exception = None
173 for retry_num in range(num_retries + 1):
174 if retry_num > 0:
175 # Sleep before retrying.
176 sleep_time = rand() * 2**retry_num
177 LOGGER.warning(
178 "Sleeping %.2f seconds before retry %d of %d for %s: %s %s, after %s",
179 sleep_time,
180 retry_num,
181 num_retries,
182 req_type,
183 method,
184 uri,
185 resp.status if resp else exception,
186 )
187 sleep(sleep_time)
189 try:
190 exception = None
191 resp, content = http.request(uri, method, *args, **kwargs)
192 # Retry on SSL errors and socket timeout errors.
193 except _ssl_SSLError as ssl_error:
194 exception = ssl_error
195 except socket.timeout as socket_timeout:
196 # Needs to be before socket.error as it's a subclass of OSError
197 # socket.timeout has no errorcode
198 exception = socket_timeout
199 except ConnectionError as connection_error:
200 # Needs to be before socket.error as it's a subclass of OSError
201 exception = connection_error
202 except OSError as socket_error:
203 # errno's contents differ by platform, so we have to match by name.
204 # Some of these same errors may have been caught above, e.g. ECONNRESET *should* be
205 # raised as a ConnectionError, but some libraries will raise it as a socket.error
206 # with an errno corresponding to ECONNRESET
207 if socket.errno.errorcode.get(socket_error.errno) not in {
208 "WSAETIMEDOUT",
209 "ETIMEDOUT",
210 "EPIPE",
211 "ECONNABORTED",
212 "ECONNREFUSED",
213 "ECONNRESET",
214 }:
215 raise
216 exception = socket_error
217 except httplib2.ServerNotFoundError as server_not_found_error:
218 exception = server_not_found_error
220 if exception:
221 if retry_num == num_retries:
222 raise exception
223 else:
224 continue
226 if not _should_retry_response(resp.status, content):
227 break
229 return resp, content
232class MediaUploadProgress(object):
233 """Status of a resumable upload."""
235 def __init__(self, resumable_progress, total_size):
236 """Constructor.
238 Args:
239 resumable_progress: int, bytes sent so far.
240 total_size: int, total bytes in complete upload, or None if the total
241 upload size isn't known ahead of time.
242 """
243 self.resumable_progress = resumable_progress
244 self.total_size = total_size
246 def progress(self):
247 """Percent of upload completed, as a float.
249 Returns:
250 the percentage complete as a float, returning 0.0 if the total size of
251 the upload is unknown.
252 """
253 if self.total_size is not None and self.total_size != 0:
254 return float(self.resumable_progress) / float(self.total_size)
255 else:
256 return 0.0
259class MediaDownloadProgress(object):
260 """Status of a resumable download."""
262 def __init__(self, resumable_progress, total_size):
263 """Constructor.
265 Args:
266 resumable_progress: int, bytes received so far.
267 total_size: int, total bytes in complete download.
268 """
269 self.resumable_progress = resumable_progress
270 self.total_size = total_size
272 def progress(self):
273 """Percent of download completed, as a float.
275 Returns:
276 the percentage complete as a float, returning 0.0 if the total size of
277 the download is unknown.
278 """
279 if self.total_size is not None and self.total_size != 0:
280 return float(self.resumable_progress) / float(self.total_size)
281 else:
282 return 0.0
285class MediaUpload(object):
286 """Describes a media object to upload.
288 Base class that defines the interface of MediaUpload subclasses.
290 Note that subclasses of MediaUpload may allow you to control the chunksize
291 when uploading a media object. It is important to keep the size of the chunk
292 as large as possible to keep the upload efficient. Other factors may influence
293 the size of the chunk you use, particularly if you are working in an
294 environment where individual HTTP requests may have a hardcoded time limit,
295 such as under certain classes of requests under Google App Engine.
297 Streams are io.Base compatible objects that support seek(). Some MediaUpload
298 subclasses support using streams directly to upload data. Support for
299 streaming may be indicated by a MediaUpload sub-class and if appropriate for a
300 platform that stream will be used for uploading the media object. The support
301 for streaming is indicated by has_stream() returning True. The stream() method
302 should return an io.Base object that supports seek(). On platforms where the
303 underlying httplib module supports streaming, for example Python 2.6 and
304 later, the stream will be passed into the http library which will result in
305 less memory being used and possibly faster uploads.
307 If you need to upload media that can't be uploaded using any of the existing
308 MediaUpload sub-class then you can sub-class MediaUpload for your particular
309 needs.
310 """
312 def chunksize(self):
313 """Chunk size for resumable uploads.
315 Returns:
316 Chunk size in bytes.
317 """
318 raise NotImplementedError()
320 def mimetype(self):
321 """Mime type of the body.
323 Returns:
324 Mime type.
325 """
326 return "application/octet-stream"
328 def size(self):
329 """Size of upload.
331 Returns:
332 Size of the body, or None of the size is unknown.
333 """
334 return None
336 def resumable(self):
337 """Whether this upload is resumable.
339 Returns:
340 True if resumable upload or False.
341 """
342 return False
344 def getbytes(self, begin, end):
345 """Get bytes from the media.
347 Args:
348 begin: int, offset from beginning of file.
349 length: int, number of bytes to read, starting at begin.
351 Returns:
352 A string of bytes read. May be shorter than length if EOF was reached
353 first.
354 """
355 raise NotImplementedError()
357 def has_stream(self):
358 """Does the underlying upload support a streaming interface.
360 Streaming means it is an io.IOBase subclass that supports seek, i.e.
361 seekable() returns True.
363 Returns:
364 True if the call to stream() will return an instance of a seekable io.Base
365 subclass.
366 """
367 return False
369 def stream(self):
370 """A stream interface to the data being uploaded.
372 Returns:
373 The returned value is an io.IOBase subclass that supports seek, i.e.
374 seekable() returns True.
375 """
376 raise NotImplementedError()
378 @util.positional(1)
379 def _to_json(self, strip=None):
380 """Utility function for creating a JSON representation of a MediaUpload.
382 Args:
383 strip: array, An array of names of members to not include in the JSON.
385 Returns:
386 string, a JSON representation of this instance, suitable to pass to
387 from_json().
388 """
389 t = type(self)
390 d = copy.copy(self.__dict__)
391 if strip is not None:
392 for member in strip:
393 del d[member]
394 d["_class"] = t.__name__
395 d["_module"] = t.__module__
396 return json.dumps(d)
398 def to_json(self):
399 """Create a JSON representation of an instance of MediaUpload.
401 Returns:
402 string, a JSON representation of this instance, suitable to pass to
403 from_json().
404 """
405 return self._to_json()
407 @classmethod
408 def new_from_json(cls, s):
409 """Utility class method to instantiate a MediaUpload subclass from a JSON
410 representation produced by to_json().
412 Args:
413 s: string, JSON from to_json().
415 Returns:
416 An instance of the subclass of MediaUpload that was serialized with
417 to_json().
418 """
419 data = json.loads(s)
420 # Find and call the right classmethod from_json() to restore the object.
421 module = data["_module"]
422 m = __import__(module, fromlist=module.split(".")[:-1])
423 kls = getattr(m, data["_class"])
424 from_json = getattr(kls, "from_json")
425 return from_json(s)
428class MediaIoBaseUpload(MediaUpload):
429 """A MediaUpload for a io.Base objects.
431 Note that the Python file object is compatible with io.Base and can be used
432 with this class also.
434 fh = BytesIO('...Some data to upload...')
435 media = MediaIoBaseUpload(fh, mimetype='image/png',
436 chunksize=1024*1024, resumable=True)
437 farm.animals().insert(
438 id='cow',
439 name='cow.png',
440 media_body=media).execute()
442 Depending on the platform you are working on, you may pass -1 as the
443 chunksize, which indicates that the entire file should be uploaded in a single
444 request. If the underlying platform supports streams, such as Python 2.6 or
445 later, then this can be very efficient as it avoids multiple connections, and
446 also avoids loading the entire file into memory before sending it. Note that
447 Google App Engine has a 5MB limit on request size, so you should never set
448 your chunksize larger than 5MB, or to -1.
449 """
451 @util.positional(3)
452 def __init__(self, fd, mimetype, chunksize=DEFAULT_CHUNK_SIZE, resumable=False):
453 """Constructor.
455 Args:
456 fd: io.Base or file object, The source of the bytes to upload. MUST be
457 opened in blocking mode, do not use streams opened in non-blocking mode.
458 The given stream must be seekable, that is, it must be able to call
459 seek() on fd.
460 mimetype: string, Mime-type of the file.
461 chunksize: int, File will be uploaded in chunks of this many bytes. Only
462 used if resumable=True. Pass in a value of -1 if the file is to be
463 uploaded as a single chunk. Note that Google App Engine has a 5MB limit
464 on request size, so you should never set your chunksize larger than 5MB,
465 or to -1.
466 resumable: bool, True if this is a resumable upload. False means upload
467 in a single request.
468 """
469 super(MediaIoBaseUpload, self).__init__()
470 self._fd = fd
471 self._mimetype = mimetype
472 if not (chunksize == -1 or chunksize > 0):
473 raise InvalidChunkSizeError()
474 self._chunksize = chunksize
475 self._resumable = resumable
477 self._fd.seek(0, os.SEEK_END)
478 self._size = self._fd.tell()
480 def chunksize(self):
481 """Chunk size for resumable uploads.
483 Returns:
484 Chunk size in bytes.
485 """
486 return self._chunksize
488 def mimetype(self):
489 """Mime type of the body.
491 Returns:
492 Mime type.
493 """
494 return self._mimetype
496 def size(self):
497 """Size of upload.
499 Returns:
500 Size of the body, or None of the size is unknown.
501 """
502 return self._size
504 def resumable(self):
505 """Whether this upload is resumable.
507 Returns:
508 True if resumable upload or False.
509 """
510 return self._resumable
512 def getbytes(self, begin, length):
513 """Get bytes from the media.
515 Args:
516 begin: int, offset from beginning of file.
517 length: int, number of bytes to read, starting at begin.
519 Returns:
520 A string of bytes read. May be shorted than length if EOF was reached
521 first.
522 """
523 self._fd.seek(begin)
524 return self._fd.read(length)
526 def has_stream(self):
527 """Does the underlying upload support a streaming interface.
529 Streaming means it is an io.IOBase subclass that supports seek, i.e.
530 seekable() returns True.
532 Returns:
533 True if the call to stream() will return an instance of a seekable io.Base
534 subclass.
535 """
536 return True
538 def stream(self):
539 """A stream interface to the data being uploaded.
541 Returns:
542 The returned value is an io.IOBase subclass that supports seek, i.e.
543 seekable() returns True.
544 """
545 return self._fd
547 def to_json(self):
548 """This upload type is not serializable."""
549 raise NotImplementedError("MediaIoBaseUpload is not serializable.")
552class MediaFileUpload(MediaIoBaseUpload):
553 """A MediaUpload for a file.
555 Construct a MediaFileUpload and pass as the media_body parameter of the
556 method. For example, if we had a service that allowed uploading images:
558 media = MediaFileUpload('cow.png', mimetype='image/png',
559 chunksize=1024*1024, resumable=True)
560 farm.animals().insert(
561 id='cow',
562 name='cow.png',
563 media_body=media).execute()
565 Depending on the platform you are working on, you may pass -1 as the
566 chunksize, which indicates that the entire file should be uploaded in a single
567 request. If the underlying platform supports streams, such as Python 2.6 or
568 later, then this can be very efficient as it avoids multiple connections, and
569 also avoids loading the entire file into memory before sending it. Note that
570 Google App Engine has a 5MB limit on request size, so you should never set
571 your chunksize larger than 5MB, or to -1.
572 """
574 @util.positional(2)
575 def __init__(
576 self, filename, mimetype=None, chunksize=DEFAULT_CHUNK_SIZE, resumable=False
577 ):
578 """Constructor.
580 Args:
581 filename: string, Name of the file.
582 mimetype: string, Mime-type of the file. If None then a mime-type will be
583 guessed from the file extension.
584 chunksize: int, File will be uploaded in chunks of this many bytes. Only
585 used if resumable=True. Pass in a value of -1 if the file is to be
586 uploaded in a single chunk. Note that Google App Engine has a 5MB limit
587 on request size, so you should never set your chunksize larger than 5MB,
588 or to -1.
589 resumable: bool, True if this is a resumable upload. False means upload
590 in a single request.
591 """
592 self._fd = None
593 self._filename = filename
594 self._fd = open(self._filename, "rb")
595 if mimetype is None:
596 # No mimetype provided, make a guess.
597 mimetype, _ = mimetypes.guess_type(filename)
598 if mimetype is None:
599 # Guess failed, use octet-stream.
600 mimetype = "application/octet-stream"
601 super(MediaFileUpload, self).__init__(
602 self._fd, mimetype, chunksize=chunksize, resumable=resumable
603 )
605 def __del__(self):
606 if self._fd:
607 self._fd.close()
609 def to_json(self):
610 """Creating a JSON representation of an instance of MediaFileUpload.
612 Returns:
613 string, a JSON representation of this instance, suitable to pass to
614 from_json().
615 """
616 return self._to_json(strip=["_fd"])
618 @staticmethod
619 def from_json(s):
620 d = json.loads(s)
621 return MediaFileUpload(
622 d["_filename"],
623 mimetype=d["_mimetype"],
624 chunksize=d["_chunksize"],
625 resumable=d["_resumable"],
626 )
629class MediaInMemoryUpload(MediaIoBaseUpload):
630 """MediaUpload for a chunk of bytes.
632 DEPRECATED: Use MediaIoBaseUpload with either io.TextIOBase or io.StringIO for
633 the stream.
634 """
636 @util.positional(2)
637 def __init__(
638 self,
639 body,
640 mimetype="application/octet-stream",
641 chunksize=DEFAULT_CHUNK_SIZE,
642 resumable=False,
643 ):
644 """Create a new MediaInMemoryUpload.
646 DEPRECATED: Use MediaIoBaseUpload with either io.TextIOBase or io.StringIO for
647 the stream.
649 Args:
650 body: string, Bytes of body content.
651 mimetype: string, Mime-type of the file or default of
652 'application/octet-stream'.
653 chunksize: int, File will be uploaded in chunks of this many bytes. Only
654 used if resumable=True.
655 resumable: bool, True if this is a resumable upload. False means upload
656 in a single request.
657 """
658 fd = io.BytesIO(body)
659 super(MediaInMemoryUpload, self).__init__(
660 fd, mimetype, chunksize=chunksize, resumable=resumable
661 )
664class MediaIoBaseDownload(object):
665 """ "Download media resources.
667 Note that the Python file object is compatible with io.Base and can be used
668 with this class also.
671 Example:
672 request = farms.animals().get_media(id='cow')
673 fh = io.FileIO('cow.png', mode='wb')
674 downloader = MediaIoBaseDownload(fh, request, chunksize=1024*1024)
676 done = False
677 while done is False:
678 status, done = downloader.next_chunk()
679 if status:
680 print "Download %d%%." % int(status.progress() * 100)
681 print "Download Complete!"
682 """
684 @util.positional(3)
685 def __init__(self, fd, request, chunksize=DEFAULT_CHUNK_SIZE):
686 """Constructor.
688 Args:
689 fd: io.Base or file object, The stream in which to write the downloaded
690 bytes.
691 request: googleapiclient.http.HttpRequest, the media request to perform in
692 chunks.
693 chunksize: int, File will be downloaded in chunks of this many bytes.
694 """
695 self._fd = fd
696 self._request = request
697 self._uri = request.uri
698 self._chunksize = chunksize
699 self._progress = 0
700 self._total_size = None
701 self._done = False
703 # Stubs for testing.
704 self._sleep = time.sleep
705 self._rand = random.random
707 self._headers = {}
708 for k, v in request.headers.items():
709 # allow users to supply custom headers by setting them on the request
710 # but strip out the ones that are set by default on requests generated by
711 # API methods like Drive's files().get(fileId=...)
712 if not k.lower() in ("accept", "accept-encoding", "user-agent"):
713 self._headers[k] = v
715 @util.positional(1)
716 def next_chunk(self, num_retries=0):
717 """Get the next chunk of the download.
719 Args:
720 num_retries: Integer, number of times to retry with randomized
721 exponential backoff. If all retries fail, the raised HttpError
722 represents the last request. If zero (default), we attempt the
723 request only once.
725 Returns:
726 (status, done): (MediaDownloadProgress, boolean)
727 The value of 'done' will be True when the media has been fully
728 downloaded or the total size of the media is unknown.
730 Raises:
731 googleapiclient.errors.HttpError if the response was not a 2xx.
732 httplib2.HttpLib2Error if a transport error has occurred.
733 """
734 headers = self._headers.copy()
735 headers["range"] = "bytes=%d-%d" % (
736 self._progress,
737 self._progress + self._chunksize - 1,
738 )
739 http = self._request.http
741 resp, content = _retry_request(
742 http,
743 num_retries,
744 "media download",
745 self._sleep,
746 self._rand,
747 self._uri,
748 "GET",
749 headers=headers,
750 )
752 if resp.status in [200, 206]:
753 if "content-location" in resp and resp["content-location"] != self._uri:
754 self._uri = resp["content-location"]
755 self._progress += len(content)
756 self._fd.write(content)
758 if "content-range" in resp:
759 content_range = resp["content-range"]
760 length = content_range.rsplit("/", 1)[1]
761 self._total_size = int(length)
762 elif "content-length" in resp:
763 self._total_size = int(resp["content-length"])
765 if self._total_size is None or self._progress == self._total_size:
766 self._done = True
767 return MediaDownloadProgress(self._progress, self._total_size), self._done
768 elif resp.status == 416:
769 # 416 is Range Not Satisfiable
770 # This typically occurs with a zero byte file
771 content_range = resp["content-range"]
772 length = content_range.rsplit("/", 1)[1]
773 self._total_size = int(length)
774 if self._total_size == 0:
775 self._done = True
776 return (
777 MediaDownloadProgress(self._progress, self._total_size),
778 self._done,
779 )
780 raise HttpError(resp, content, uri=self._uri)
783class _StreamSlice(object):
784 """Truncated stream.
786 Takes a stream and presents a stream that is a slice of the original stream.
787 This is used when uploading media in chunks. In later versions of Python a
788 stream can be passed to httplib in place of the string of data to send. The
789 problem is that httplib just blindly reads to the end of the stream. This
790 wrapper presents a virtual stream that only reads to the end of the chunk.
791 """
793 def __init__(self, stream, begin, chunksize):
794 """Constructor.
796 Args:
797 stream: (io.Base, file object), the stream to wrap.
798 begin: int, the seek position the chunk begins at.
799 chunksize: int, the size of the chunk.
800 """
801 self._stream = stream
802 self._begin = begin
803 self._chunksize = chunksize
804 self._stream.seek(begin)
806 def read(self, n=-1):
807 """Read n bytes.
809 Args:
810 n, int, the number of bytes to read.
812 Returns:
813 A string of length 'n', or less if EOF is reached.
814 """
815 # The data left available to read sits in [cur, end)
816 cur = self._stream.tell()
817 end = self._begin + self._chunksize
818 if n == -1 or cur + n > end:
819 n = end - cur
820 return self._stream.read(n)
823class HttpRequest(object):
824 """Encapsulates a single HTTP request."""
826 @util.positional(4)
827 def __init__(
828 self,
829 http,
830 postproc,
831 uri,
832 method="GET",
833 body=None,
834 headers=None,
835 methodId=None,
836 resumable=None,
837 ):
838 """Constructor for an HttpRequest.
840 Args:
841 http: httplib2.Http, the transport object to use to make a request
842 postproc: callable, called on the HTTP response and content to transform
843 it into a data object before returning, or raising an exception
844 on an error.
845 uri: string, the absolute URI to send the request to
846 method: string, the HTTP method to use
847 body: string, the request body of the HTTP request,
848 headers: dict, the HTTP request headers
849 methodId: string, a unique identifier for the API method being called.
850 resumable: MediaUpload, None if this is not a resumbale request.
851 """
852 self.uri = uri
853 self.method = method
854 self.body = body
855 self.headers = headers or {}
856 self.methodId = methodId
857 self.http = http
858 self.postproc = postproc
859 self.resumable = resumable
860 self.response_callbacks = []
861 self._in_error_state = False
863 # The size of the non-media part of the request.
864 self.body_size = len(self.body or "")
866 # The resumable URI to send chunks to.
867 self.resumable_uri = None
869 # The bytes that have been uploaded.
870 self.resumable_progress = 0
872 # Stubs for testing.
873 self._rand = random.random
874 self._sleep = time.sleep
876 @util.positional(1)
877 def execute(self, http=None, num_retries=0):
878 """Execute the request.
880 Args:
881 http: httplib2.Http, an http object to be used in place of the
882 one the HttpRequest request object was constructed with.
883 num_retries: Integer, number of times to retry with randomized
884 exponential backoff. If all retries fail, the raised HttpError
885 represents the last request. If zero (default), we attempt the
886 request only once.
888 Returns:
889 A deserialized object model of the response body as determined
890 by the postproc.
892 Raises:
893 googleapiclient.errors.HttpError if the response was not a 2xx.
894 httplib2.HttpLib2Error if a transport error has occurred.
895 """
896 if http is None:
897 http = self.http
899 if self.resumable:
900 body = None
901 while body is None:
902 _, body = self.next_chunk(http=http, num_retries=num_retries)
903 return body
905 # Non-resumable case.
907 if "content-length" not in self.headers:
908 self.headers["content-length"] = str(self.body_size)
909 # If the request URI is too long then turn it into a POST request.
910 # Assume that a GET request never contains a request body.
911 if len(self.uri) > MAX_URI_LENGTH and self.method == "GET":
912 self.method = "POST"
913 self.headers["x-http-method-override"] = "GET"
914 self.headers["content-type"] = "application/x-www-form-urlencoded"
915 parsed = urllib.parse.urlparse(self.uri)
916 self.uri = urllib.parse.urlunparse(
917 (parsed.scheme, parsed.netloc, parsed.path, parsed.params, None, None)
918 )
919 self.body = parsed.query
920 self.headers["content-length"] = str(len(self.body))
922 # Handle retries for server-side errors.
923 resp, content = _retry_request(
924 http,
925 num_retries,
926 "request",
927 self._sleep,
928 self._rand,
929 str(self.uri),
930 method=str(self.method),
931 body=self.body,
932 headers=self.headers,
933 )
935 for callback in self.response_callbacks:
936 callback(resp)
937 if resp.status >= 300:
938 raise HttpError(resp, content, uri=self.uri)
939 return self.postproc(resp, content)
941 @util.positional(2)
942 def add_response_callback(self, cb):
943 """add_response_headers_callback
945 Args:
946 cb: Callback to be called on receiving the response headers, of signature:
948 def cb(resp):
949 # Where resp is an instance of httplib2.Response
950 """
951 self.response_callbacks.append(cb)
953 @util.positional(1)
954 def next_chunk(self, http=None, num_retries=0):
955 """Execute the next step of a resumable upload.
957 Can only be used if the method being executed supports media uploads and
958 the MediaUpload object passed in was flagged as using resumable upload.
960 Example:
962 media = MediaFileUpload('cow.png', mimetype='image/png',
963 chunksize=1000, resumable=True)
964 request = farm.animals().insert(
965 id='cow',
966 name='cow.png',
967 media_body=media)
969 response = None
970 while response is None:
971 status, response = request.next_chunk()
972 if status:
973 print "Upload %d%% complete." % int(status.progress() * 100)
976 Args:
977 http: httplib2.Http, an http object to be used in place of the
978 one the HttpRequest request object was constructed with.
979 num_retries: Integer, number of times to retry with randomized
980 exponential backoff. If all retries fail, the raised HttpError
981 represents the last request. If zero (default), we attempt the
982 request only once.
984 Returns:
985 (status, body): (ResumableMediaStatus, object)
986 The body will be None until the resumable media is fully uploaded.
988 Raises:
989 googleapiclient.errors.HttpError if the response was not a 2xx.
990 httplib2.HttpLib2Error if a transport error has occurred.
991 """
992 if http is None:
993 http = self.http
995 if self.resumable.size() is None:
996 size = "*"
997 else:
998 size = str(self.resumable.size())
1000 if self.resumable_uri is None:
1001 start_headers = copy.copy(self.headers)
1002 start_headers["X-Upload-Content-Type"] = self.resumable.mimetype()
1003 if size != "*":
1004 start_headers["X-Upload-Content-Length"] = size
1005 start_headers["content-length"] = str(self.body_size)
1007 resp, content = _retry_request(
1008 http,
1009 num_retries,
1010 "resumable URI request",
1011 self._sleep,
1012 self._rand,
1013 self.uri,
1014 method=self.method,
1015 body=self.body,
1016 headers=start_headers,
1017 )
1019 if resp.status == 200 and "location" in resp:
1020 self.resumable_uri = resp["location"]
1021 else:
1022 raise ResumableUploadError(resp, content)
1023 elif self._in_error_state:
1024 # If we are in an error state then query the server for current state of
1025 # the upload by sending an empty PUT and reading the 'range' header in
1026 # the response.
1027 headers = {"Content-Range": "bytes */%s" % size, "content-length": "0"}
1028 resp, content = http.request(self.resumable_uri, "PUT", headers=headers)
1029 status, body = self._process_response(resp, content)
1030 if body:
1031 # The upload was complete.
1032 return (status, body)
1034 if self.resumable.has_stream():
1035 data = self.resumable.stream()
1036 if self.resumable.chunksize() == -1:
1037 data.seek(self.resumable_progress)
1038 chunk_end = self.resumable.size() - self.resumable_progress - 1
1039 else:
1040 # Doing chunking with a stream, so wrap a slice of the stream.
1041 data = _StreamSlice(
1042 data, self.resumable_progress, self.resumable.chunksize()
1043 )
1044 chunk_end = min(
1045 self.resumable_progress + self.resumable.chunksize() - 1,
1046 self.resumable.size() - 1,
1047 )
1048 else:
1049 data = self.resumable.getbytes(
1050 self.resumable_progress, self.resumable.chunksize()
1051 )
1053 # A short read implies that we are at EOF, so finish the upload.
1054 if len(data) < self.resumable.chunksize():
1055 size = str(self.resumable_progress + len(data))
1057 chunk_end = self.resumable_progress + len(data) - 1
1059 headers = {
1060 # Must set the content-length header here because httplib can't
1061 # calculate the size when working with _StreamSlice.
1062 "Content-Length": str(chunk_end - self.resumable_progress + 1),
1063 }
1065 # An empty file results in chunk_end = -1 and size = 0
1066 # sending "bytes 0--1/0" results in an invalid request
1067 # Only add header "Content-Range" if chunk_end != -1
1068 if chunk_end != -1:
1069 headers["Content-Range"] = "bytes %d-%d/%s" % (
1070 self.resumable_progress,
1071 chunk_end,
1072 size,
1073 )
1075 for retry_num in range(num_retries + 1):
1076 if retry_num > 0:
1077 self._sleep(self._rand() * 2**retry_num)
1078 LOGGER.warning(
1079 "Retry #%d for media upload: %s %s, following status: %d"
1080 % (retry_num, self.method, self.uri, resp.status)
1081 )
1083 try:
1084 resp, content = http.request(
1085 self.resumable_uri, method="PUT", body=data, headers=headers
1086 )
1087 except:
1088 self._in_error_state = True
1089 raise
1090 if not _should_retry_response(resp.status, content):
1091 break
1093 return self._process_response(resp, content)
1095 def _process_response(self, resp, content):
1096 """Process the response from a single chunk upload.
1098 Args:
1099 resp: httplib2.Response, the response object.
1100 content: string, the content of the response.
1102 Returns:
1103 (status, body): (ResumableMediaStatus, object)
1104 The body will be None until the resumable media is fully uploaded.
1106 Raises:
1107 googleapiclient.errors.HttpError if the response was not a 2xx or a 308.
1108 """
1109 if resp.status in [200, 201]:
1110 self._in_error_state = False
1111 return None, self.postproc(resp, content)
1112 elif resp.status == 308:
1113 self._in_error_state = False
1114 # A "308 Resume Incomplete" indicates we are not done.
1115 try:
1116 self.resumable_progress = int(resp["range"].split("-")[1]) + 1
1117 except KeyError:
1118 # If resp doesn't contain range header, resumable progress is 0
1119 self.resumable_progress = 0
1120 if "location" in resp:
1121 self.resumable_uri = resp["location"]
1122 else:
1123 self._in_error_state = True
1124 raise HttpError(resp, content, uri=self.uri)
1126 return (
1127 MediaUploadProgress(self.resumable_progress, self.resumable.size()),
1128 None,
1129 )
1131 def to_json(self):
1132 """Returns a JSON representation of the HttpRequest."""
1133 d = copy.copy(self.__dict__)
1134 if d["resumable"] is not None:
1135 d["resumable"] = self.resumable.to_json()
1136 del d["http"]
1137 del d["postproc"]
1138 del d["_sleep"]
1139 del d["_rand"]
1141 return json.dumps(d)
1143 @staticmethod
1144 def from_json(s, http, postproc):
1145 """Returns an HttpRequest populated with info from a JSON object."""
1146 d = json.loads(s)
1147 if d["resumable"] is not None:
1148 d["resumable"] = MediaUpload.new_from_json(d["resumable"])
1149 return HttpRequest(
1150 http,
1151 postproc,
1152 uri=d["uri"],
1153 method=d["method"],
1154 body=d["body"],
1155 headers=d["headers"],
1156 methodId=d["methodId"],
1157 resumable=d["resumable"],
1158 )
1160 @staticmethod
1161 def null_postproc(resp, contents):
1162 return resp, contents
1165class BatchHttpRequest(object):
1166 """Batches multiple HttpRequest objects into a single HTTP request.
1168 Example:
1169 from googleapiclient.http import BatchHttpRequest
1171 def list_animals(request_id, response, exception):
1172 \"\"\"Do something with the animals list response.\"\"\"
1173 if exception is not None:
1174 # Do something with the exception.
1175 pass
1176 else:
1177 # Do something with the response.
1178 pass
1180 def list_farmers(request_id, response, exception):
1181 \"\"\"Do something with the farmers list response.\"\"\"
1182 if exception is not None:
1183 # Do something with the exception.
1184 pass
1185 else:
1186 # Do something with the response.
1187 pass
1189 service = build('farm', 'v2')
1191 batch = BatchHttpRequest()
1193 batch.add(service.animals().list(), list_animals)
1194 batch.add(service.farmers().list(), list_farmers)
1195 batch.execute(http=http)
1196 """
1198 @util.positional(1)
1199 def __init__(self, callback=None, batch_uri=None):
1200 """Constructor for a BatchHttpRequest.
1202 Args:
1203 callback: callable, A callback to be called for each response, of the
1204 form callback(id, response, exception). The first parameter is the
1205 request id, and the second is the deserialized response object. The
1206 third is an googleapiclient.errors.HttpError exception object if an HTTP error
1207 occurred while processing the request, or None if no error occurred.
1208 batch_uri: string, URI to send batch requests to.
1209 """
1210 if batch_uri is None:
1211 batch_uri = _LEGACY_BATCH_URI
1213 if batch_uri == _LEGACY_BATCH_URI:
1214 LOGGER.warning(
1215 "You have constructed a BatchHttpRequest using the legacy batch "
1216 "endpoint %s. This endpoint will be turned down on August 12, 2020. "
1217 "Please provide the API-specific endpoint or use "
1218 "service.new_batch_http_request(). For more details see "
1219 "https://developers.googleblog.com/2018/03/discontinuing-support-for-json-rpc-and.html"
1220 "and https://developers.google.com/api-client-library/python/guide/batch.",
1221 _LEGACY_BATCH_URI,
1222 )
1223 self._batch_uri = batch_uri
1225 # Global callback to be called for each individual response in the batch.
1226 self._callback = callback
1228 # A map from id to request.
1229 self._requests = {}
1231 # A map from id to callback.
1232 self._callbacks = {}
1234 # List of request ids, in the order in which they were added.
1235 self._order = []
1237 # The last auto generated id.
1238 self._last_auto_id = 0
1240 # Unique ID on which to base the Content-ID headers.
1241 self._base_id = None
1243 # A map from request id to (httplib2.Response, content) response pairs
1244 self._responses = {}
1246 # A map of id(Credentials) that have been refreshed.
1247 self._refreshed_credentials = {}
1249 def _refresh_and_apply_credentials(self, request, http):
1250 """Refresh the credentials and apply to the request.
1252 Args:
1253 request: HttpRequest, the request.
1254 http: httplib2.Http, the global http object for the batch.
1255 """
1256 # For the credentials to refresh, but only once per refresh_token
1257 # If there is no http per the request then refresh the http passed in
1258 # via execute()
1259 creds = None
1260 request_credentials = False
1262 if request.http is not None:
1263 creds = _auth.get_credentials_from_http(request.http)
1264 request_credentials = True
1266 if creds is None and http is not None:
1267 creds = _auth.get_credentials_from_http(http)
1269 if creds is not None:
1270 if id(creds) not in self._refreshed_credentials:
1271 _auth.refresh_credentials(creds)
1272 self._refreshed_credentials[id(creds)] = 1
1274 # Only apply the credentials if we are using the http object passed in,
1275 # otherwise apply() will get called during _serialize_request().
1276 if request.http is None or not request_credentials:
1277 _auth.apply_credentials(creds, request.headers)
1279 def _id_to_header(self, id_):
1280 """Convert an id to a Content-ID header value.
1282 Args:
1283 id_: string, identifier of individual request.
1285 Returns:
1286 A Content-ID header with the id_ encoded into it. A UUID is prepended to
1287 the value because Content-ID headers are supposed to be universally
1288 unique.
1289 """
1290 if self._base_id is None:
1291 self._base_id = uuid.uuid4()
1293 # NB: we intentionally leave whitespace between base/id and '+', so RFC2822
1294 # line folding works properly on Python 3; see
1295 # https://github.com/googleapis/google-api-python-client/issues/164
1296 return "<%s + %s>" % (self._base_id, urllib.parse.quote(id_))
1298 def _header_to_id(self, header):
1299 """Convert a Content-ID header value to an id.
1301 Presumes the Content-ID header conforms to the format that _id_to_header()
1302 returns.
1304 Args:
1305 header: string, Content-ID header value.
1307 Returns:
1308 The extracted id value.
1310 Raises:
1311 BatchError if the header is not in the expected format.
1312 """
1313 if header[0] != "<" or header[-1] != ">":
1314 raise BatchError("Invalid value for Content-ID: %s" % header)
1315 if "+" not in header:
1316 raise BatchError("Invalid value for Content-ID: %s" % header)
1317 base, id_ = header[1:-1].split(" + ", 1)
1319 return urllib.parse.unquote(id_)
1321 def _serialize_request(self, request):
1322 """Convert an HttpRequest object into a string.
1324 Args:
1325 request: HttpRequest, the request to serialize.
1327 Returns:
1328 The request as a string in application/http format.
1329 """
1330 # Construct status line
1331 parsed = urllib.parse.urlparse(request.uri)
1332 request_line = urllib.parse.urlunparse(
1333 ("", "", parsed.path, parsed.params, parsed.query, "")
1334 )
1335 status_line = request.method + " " + request_line + " HTTP/1.1\n"
1336 major, minor = request.headers.get("content-type", "application/json").split(
1337 "/"
1338 )
1339 msg = MIMENonMultipart(major, minor)
1340 headers = request.headers.copy()
1342 if request.http is not None:
1343 credentials = _auth.get_credentials_from_http(request.http)
1344 if credentials is not None:
1345 _auth.apply_credentials(credentials, headers)
1347 # MIMENonMultipart adds its own Content-Type header.
1348 if "content-type" in headers:
1349 del headers["content-type"]
1351 for key, value in headers.items():
1352 msg[key] = value
1353 msg["Host"] = parsed.netloc
1354 msg.set_unixfrom(None)
1356 if request.body is not None:
1357 msg.set_payload(request.body)
1358 msg["content-length"] = str(len(request.body))
1360 # Serialize the mime message.
1361 fp = io.StringIO()
1362 # maxheaderlen=0 means don't line wrap headers.
1363 g = Generator(fp, maxheaderlen=0)
1364 g.flatten(msg, unixfrom=False)
1365 body = fp.getvalue()
1367 return status_line + body
1369 def _deserialize_response(self, payload):
1370 """Convert string into httplib2 response and content.
1372 Args:
1373 payload: string, headers and body as a string.
1375 Returns:
1376 A pair (resp, content), such as would be returned from httplib2.request.
1377 """
1378 # Strip off the status line
1379 status_line, payload = payload.split("\n", 1)
1380 protocol, status, reason = status_line.split(" ", 2)
1382 # Parse the rest of the response
1383 parser = FeedParser()
1384 parser.feed(payload)
1385 msg = parser.close()
1386 msg["status"] = status
1388 # Create httplib2.Response from the parsed headers.
1389 resp = httplib2.Response(msg)
1390 resp.reason = reason
1391 resp.version = int(protocol.split("/", 1)[1].replace(".", ""))
1393 content = payload.split("\r\n\r\n", 1)[1]
1395 return resp, content
1397 def _new_id(self):
1398 """Create a new id.
1400 Auto incrementing number that avoids conflicts with ids already used.
1402 Returns:
1403 string, a new unique id.
1404 """
1405 self._last_auto_id += 1
1406 while str(self._last_auto_id) in self._requests:
1407 self._last_auto_id += 1
1408 return str(self._last_auto_id)
1410 @util.positional(2)
1411 def add(self, request, callback=None, request_id=None):
1412 """Add a new request.
1414 Every callback added will be paired with a unique id, the request_id. That
1415 unique id will be passed back to the callback when the response comes back
1416 from the server. The default behavior is to have the library generate it's
1417 own unique id. If the caller passes in a request_id then they must ensure
1418 uniqueness for each request_id, and if they are not an exception is
1419 raised. Callers should either supply all request_ids or never supply a
1420 request id, to avoid such an error.
1422 Args:
1423 request: HttpRequest, Request to add to the batch.
1424 callback: callable, A callback to be called for this response, of the
1425 form callback(id, response, exception). The first parameter is the
1426 request id, and the second is the deserialized response object. The
1427 third is an googleapiclient.errors.HttpError exception object if an HTTP error
1428 occurred while processing the request, or None if no errors occurred.
1429 request_id: string, A unique id for the request. The id will be passed
1430 to the callback with the response.
1432 Returns:
1433 None
1435 Raises:
1436 BatchError if a media request is added to a batch.
1437 KeyError is the request_id is not unique.
1438 """
1440 if len(self._order) >= MAX_BATCH_LIMIT:
1441 raise BatchError(
1442 "Exceeded the maximum calls(%d) in a single batch request."
1443 % MAX_BATCH_LIMIT
1444 )
1445 if request_id is None:
1446 request_id = self._new_id()
1447 if request.resumable is not None:
1448 raise BatchError("Media requests cannot be used in a batch request.")
1449 if request_id in self._requests:
1450 raise KeyError("A request with this ID already exists: %s" % request_id)
1451 self._requests[request_id] = request
1452 self._callbacks[request_id] = callback
1453 self._order.append(request_id)
1455 def _execute(self, http, order, requests):
1456 """Serialize batch request, send to server, process response.
1458 Args:
1459 http: httplib2.Http, an http object to be used to make the request with.
1460 order: list, list of request ids in the order they were added to the
1461 batch.
1462 requests: list, list of request objects to send.
1464 Raises:
1465 httplib2.HttpLib2Error if a transport error has occurred.
1466 googleapiclient.errors.BatchError if the response is the wrong format.
1467 """
1468 message = MIMEMultipart("mixed")
1469 # Message should not write out it's own headers.
1470 setattr(message, "_write_headers", lambda self: None)
1472 # Add all the individual requests.
1473 for request_id in order:
1474 request = requests[request_id]
1476 msg = MIMENonMultipart("application", "http")
1477 msg["Content-Transfer-Encoding"] = "binary"
1478 msg["Content-ID"] = self._id_to_header(request_id)
1480 body = self._serialize_request(request)
1481 msg.set_payload(body)
1482 message.attach(msg)
1484 # encode the body: note that we can't use `as_string`, because
1485 # it plays games with `From ` lines.
1486 fp = io.StringIO()
1487 g = Generator(fp, mangle_from_=False)
1488 g.flatten(message, unixfrom=False)
1489 body = fp.getvalue()
1491 headers = {}
1492 headers["content-type"] = (
1493 "multipart/mixed; " 'boundary="%s"'
1494 ) % message.get_boundary()
1496 resp, content = http.request(
1497 self._batch_uri, method="POST", body=body, headers=headers
1498 )
1500 if resp.status >= 300:
1501 raise HttpError(resp, content, uri=self._batch_uri)
1503 # Prepend with a content-type header so FeedParser can handle it.
1504 header = "content-type: %s\r\n\r\n" % resp["content-type"]
1505 # PY3's FeedParser only accepts unicode. So we should decode content
1506 # here, and encode each payload again.
1507 content = content.decode("utf-8")
1508 for_parser = header + content
1510 parser = FeedParser()
1511 parser.feed(for_parser)
1512 mime_response = parser.close()
1514 if not mime_response.is_multipart():
1515 raise BatchError(
1516 "Response not in multipart/mixed format.", resp=resp, content=content
1517 )
1519 for part in mime_response.get_payload():
1520 request_id = self._header_to_id(part["Content-ID"])
1521 response, content = self._deserialize_response(part.get_payload())
1522 # We encode content here to emulate normal http response.
1523 if isinstance(content, str):
1524 content = content.encode("utf-8")
1525 self._responses[request_id] = (response, content)
1527 @util.positional(1)
1528 def execute(self, http=None):
1529 """Execute all the requests as a single batched HTTP request.
1531 Args:
1532 http: httplib2.Http, an http object to be used in place of the one the
1533 HttpRequest request object was constructed with. If one isn't supplied
1534 then use a http object from the requests in this batch.
1536 Returns:
1537 None
1539 Raises:
1540 httplib2.HttpLib2Error if a transport error has occurred.
1541 googleapiclient.errors.BatchError if the response is the wrong format.
1542 """
1543 # If we have no requests return
1544 if len(self._order) == 0:
1545 return None
1547 # If http is not supplied use the first valid one given in the requests.
1548 if http is None:
1549 for request_id in self._order:
1550 request = self._requests[request_id]
1551 if request is not None:
1552 http = request.http
1553 break
1555 if http is None:
1556 raise ValueError("Missing a valid http object.")
1558 # Special case for OAuth2Credentials-style objects which have not yet been
1559 # refreshed with an initial access_token.
1560 creds = _auth.get_credentials_from_http(http)
1561 if creds is not None:
1562 if not _auth.is_valid(creds):
1563 LOGGER.info("Attempting refresh to obtain initial access_token")
1564 _auth.refresh_credentials(creds)
1566 self._execute(http, self._order, self._requests)
1568 # Loop over all the requests and check for 401s. For each 401 request the
1569 # credentials should be refreshed and then sent again in a separate batch.
1570 redo_requests = {}
1571 redo_order = []
1573 for request_id in self._order:
1574 resp, content = self._responses[request_id]
1575 if resp["status"] == "401":
1576 redo_order.append(request_id)
1577 request = self._requests[request_id]
1578 self._refresh_and_apply_credentials(request, http)
1579 redo_requests[request_id] = request
1581 if redo_requests:
1582 self._execute(http, redo_order, redo_requests)
1584 # Now process all callbacks that are erroring, and raise an exception for
1585 # ones that return a non-2xx response? Or add extra parameter to callback
1586 # that contains an HttpError?
1588 for request_id in self._order:
1589 resp, content = self._responses[request_id]
1591 request = self._requests[request_id]
1592 callback = self._callbacks[request_id]
1594 response = None
1595 exception = None
1596 try:
1597 if resp.status >= 300:
1598 raise HttpError(resp, content, uri=request.uri)
1599 response = request.postproc(resp, content)
1600 except HttpError as e:
1601 exception = e
1603 if callback is not None:
1604 callback(request_id, response, exception)
1605 if self._callback is not None:
1606 self._callback(request_id, response, exception)
1609class HttpRequestMock(object):
1610 """Mock of HttpRequest.
1612 Do not construct directly, instead use RequestMockBuilder.
1613 """
1615 def __init__(self, resp, content, postproc):
1616 """Constructor for HttpRequestMock
1618 Args:
1619 resp: httplib2.Response, the response to emulate coming from the request
1620 content: string, the response body
1621 postproc: callable, the post processing function usually supplied by
1622 the model class. See model.JsonModel.response() as an example.
1623 """
1624 self.resp = resp
1625 self.content = content
1626 self.postproc = postproc
1627 if resp is None:
1628 self.resp = httplib2.Response({"status": 200, "reason": "OK"})
1629 if "reason" in self.resp:
1630 self.resp.reason = self.resp["reason"]
1632 def execute(self, http=None):
1633 """Execute the request.
1635 Same behavior as HttpRequest.execute(), but the response is
1636 mocked and not really from an HTTP request/response.
1637 """
1638 return self.postproc(self.resp, self.content)
1641class RequestMockBuilder(object):
1642 """A simple mock of HttpRequest
1644 Pass in a dictionary to the constructor that maps request methodIds to
1645 tuples of (httplib2.Response, content, opt_expected_body) that should be
1646 returned when that method is called. None may also be passed in for the
1647 httplib2.Response, in which case a 200 OK response will be generated.
1648 If an opt_expected_body (str or dict) is provided, it will be compared to
1649 the body and UnexpectedBodyError will be raised on inequality.
1651 Example:
1652 response = '{"data": {"id": "tag:google.c...'
1653 requestBuilder = RequestMockBuilder(
1654 {
1655 'plus.activities.get': (None, response),
1656 }
1657 )
1658 googleapiclient.discovery.build("plus", "v1", requestBuilder=requestBuilder)
1660 Methods that you do not supply a response for will return a
1661 200 OK with an empty string as the response content or raise an excpetion
1662 if check_unexpected is set to True. The methodId is taken from the rpcName
1663 in the discovery document.
1665 For more details see the project wiki.
1666 """
1668 def __init__(self, responses, check_unexpected=False):
1669 """Constructor for RequestMockBuilder
1671 The constructed object should be a callable object
1672 that can replace the class HttpResponse.
1674 responses - A dictionary that maps methodIds into tuples
1675 of (httplib2.Response, content). The methodId
1676 comes from the 'rpcName' field in the discovery
1677 document.
1678 check_unexpected - A boolean setting whether or not UnexpectedMethodError
1679 should be raised on unsupplied method.
1680 """
1681 self.responses = responses
1682 self.check_unexpected = check_unexpected
1684 def __call__(
1685 self,
1686 http,
1687 postproc,
1688 uri,
1689 method="GET",
1690 body=None,
1691 headers=None,
1692 methodId=None,
1693 resumable=None,
1694 ):
1695 """Implements the callable interface that discovery.build() expects
1696 of requestBuilder, which is to build an object compatible with
1697 HttpRequest.execute(). See that method for the description of the
1698 parameters and the expected response.
1699 """
1700 if methodId in self.responses:
1701 response = self.responses[methodId]
1702 resp, content = response[:2]
1703 if len(response) > 2:
1704 # Test the body against the supplied expected_body.
1705 expected_body = response[2]
1706 if bool(expected_body) != bool(body):
1707 # Not expecting a body and provided one
1708 # or expecting a body and not provided one.
1709 raise UnexpectedBodyError(expected_body, body)
1710 if isinstance(expected_body, str):
1711 expected_body = json.loads(expected_body)
1712 body = json.loads(body)
1713 if body != expected_body:
1714 raise UnexpectedBodyError(expected_body, body)
1715 return HttpRequestMock(resp, content, postproc)
1716 elif self.check_unexpected:
1717 raise UnexpectedMethodError(methodId=methodId)
1718 else:
1719 model = JsonModel(False)
1720 return HttpRequestMock(None, "{}", model.response)
1723class HttpMock(object):
1724 """Mock of httplib2.Http"""
1726 def __init__(self, filename=None, headers=None):
1727 """
1728 Args:
1729 filename: string, absolute filename to read response from
1730 headers: dict, header to return with response
1731 """
1732 if headers is None:
1733 headers = {"status": "200"}
1734 if filename:
1735 with open(filename, "rb") as f:
1736 self.data = f.read()
1737 else:
1738 self.data = None
1739 self.response_headers = headers
1740 self.headers = None
1741 self.uri = None
1742 self.method = None
1743 self.body = None
1744 self.headers = None
1746 def request(
1747 self,
1748 uri,
1749 method="GET",
1750 body=None,
1751 headers=None,
1752 redirections=1,
1753 connection_type=None,
1754 ):
1755 self.uri = uri
1756 self.method = method
1757 self.body = body
1758 self.headers = headers
1759 return httplib2.Response(self.response_headers), self.data
1761 def close(self):
1762 return None
1765class HttpMockSequence(object):
1766 """Mock of httplib2.Http
1768 Mocks a sequence of calls to request returning different responses for each
1769 call. Create an instance initialized with the desired response headers
1770 and content and then use as if an httplib2.Http instance.
1772 http = HttpMockSequence([
1773 ({'status': '401'}, ''),
1774 ({'status': '200'}, '{"access_token":"1/3w","expires_in":3600}'),
1775 ({'status': '200'}, 'echo_request_headers'),
1776 ])
1777 resp, content = http.request("http://examples.com")
1779 There are special values you can pass in for content to trigger
1780 behavours that are helpful in testing.
1782 'echo_request_headers' means return the request headers in the response body
1783 'echo_request_headers_as_json' means return the request headers in
1784 the response body
1785 'echo_request_body' means return the request body in the response body
1786 'echo_request_uri' means return the request uri in the response body
1787 """
1789 def __init__(self, iterable):
1790 """
1791 Args:
1792 iterable: iterable, a sequence of pairs of (headers, body)
1793 """
1794 self._iterable = iterable
1795 self.follow_redirects = True
1796 self.request_sequence = list()
1798 def request(
1799 self,
1800 uri,
1801 method="GET",
1802 body=None,
1803 headers=None,
1804 redirections=1,
1805 connection_type=None,
1806 ):
1807 # Remember the request so after the fact this mock can be examined
1808 self.request_sequence.append((uri, method, body, headers))
1809 resp, content = self._iterable.pop(0)
1810 if isinstance(content, str):
1811 content = content.encode("utf-8")
1813 if content == b"echo_request_headers":
1814 content = headers
1815 elif content == b"echo_request_headers_as_json":
1816 content = json.dumps(headers)
1817 elif content == b"echo_request_body":
1818 if hasattr(body, "read"):
1819 content = body.read()
1820 else:
1821 content = body
1822 elif content == b"echo_request_uri":
1823 content = uri
1824 if isinstance(content, str):
1825 content = content.encode("utf-8")
1826 return httplib2.Response(resp), content
1829def set_user_agent(http, user_agent):
1830 """Set the user-agent on every request.
1832 Args:
1833 http - An instance of httplib2.Http
1834 or something that acts like it.
1835 user_agent: string, the value for the user-agent header.
1837 Returns:
1838 A modified instance of http that was passed in.
1840 Example:
1842 h = httplib2.Http()
1843 h = set_user_agent(h, "my-app-name/6.0")
1845 Most of the time the user-agent will be set doing auth, this is for the rare
1846 cases where you are accessing an unauthenticated endpoint.
1847 """
1848 request_orig = http.request
1850 # The closure that will replace 'httplib2.Http.request'.
1851 def new_request(
1852 uri,
1853 method="GET",
1854 body=None,
1855 headers=None,
1856 redirections=httplib2.DEFAULT_MAX_REDIRECTS,
1857 connection_type=None,
1858 ):
1859 """Modify the request headers to add the user-agent."""
1860 if headers is None:
1861 headers = {}
1862 if "user-agent" in headers:
1863 headers["user-agent"] = user_agent + " " + headers["user-agent"]
1864 else:
1865 headers["user-agent"] = user_agent
1866 resp, content = request_orig(
1867 uri,
1868 method=method,
1869 body=body,
1870 headers=headers,
1871 redirections=redirections,
1872 connection_type=connection_type,
1873 )
1874 return resp, content
1876 http.request = new_request
1877 return http
1880def tunnel_patch(http):
1881 """Tunnel PATCH requests over POST.
1882 Args:
1883 http - An instance of httplib2.Http
1884 or something that acts like it.
1886 Returns:
1887 A modified instance of http that was passed in.
1889 Example:
1891 h = httplib2.Http()
1892 h = tunnel_patch(h, "my-app-name/6.0")
1894 Useful if you are running on a platform that doesn't support PATCH.
1895 Apply this last if you are using OAuth 1.0, as changing the method
1896 will result in a different signature.
1897 """
1898 request_orig = http.request
1900 # The closure that will replace 'httplib2.Http.request'.
1901 def new_request(
1902 uri,
1903 method="GET",
1904 body=None,
1905 headers=None,
1906 redirections=httplib2.DEFAULT_MAX_REDIRECTS,
1907 connection_type=None,
1908 ):
1909 """Modify the request headers to add the user-agent."""
1910 if headers is None:
1911 headers = {}
1912 if method == "PATCH":
1913 if "oauth_token" in headers.get("authorization", ""):
1914 LOGGER.warning(
1915 "OAuth 1.0 request made with Credentials after tunnel_patch."
1916 )
1917 headers["x-http-method-override"] = "PATCH"
1918 method = "POST"
1919 resp, content = request_orig(
1920 uri,
1921 method=method,
1922 body=body,
1923 headers=headers,
1924 redirections=redirections,
1925 connection_type=connection_type,
1926 )
1927 return resp, content
1929 http.request = new_request
1930 return http
1933def build_http():
1934 """Builds httplib2.Http object
1936 Returns:
1937 A httplib2.Http object, which is used to make http requests, and which has timeout set by default.
1938 To override default timeout call
1940 socket.setdefaulttimeout(timeout_in_sec)
1942 before interacting with this method.
1943 """
1944 if socket.getdefaulttimeout() is not None:
1945 http_timeout = socket.getdefaulttimeout()
1946 else:
1947 http_timeout = DEFAULT_HTTP_TIMEOUT_SEC
1948 http = httplib2.Http(timeout=http_timeout)
1949 # 308's are used by several Google APIs (Drive, YouTube)
1950 # for Resumable Uploads rather than Permanent Redirects.
1951 # This asks httplib2 to exclude 308s from the status codes
1952 # it treats as redirects
1953 try:
1954 http.redirect_codes = http.redirect_codes - {308}
1955 except AttributeError:
1956 # Apache Beam tests depend on this library and cannot
1957 # currently upgrade their httplib2 version
1958 # http.redirect_codes does not exist in previous versions
1959 # of httplib2, so pass
1960 pass
1962 return http