Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/botocore/response.py: 35%

86 statements  

« prev     ^ index     » next       coverage.py v7.3.2, created at 2023-12-08 06:51 +0000

1# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/ 

2# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. 

3# 

4# Licensed under the Apache License, Version 2.0 (the "License"). You 

5# may not use this file except in compliance with the License. A copy of 

6# the License is located at 

7# 

8# http://aws.amazon.com/apache2.0/ 

9# 

10# or in the "license" file accompanying this file. This file is 

11# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF 

12# ANY KIND, either express or implied. See the License for the specific 

13# language governing permissions and limitations under the License. 

14 

15import logging 

16from io import IOBase 

17 

18from urllib3.exceptions import ProtocolError as URLLib3ProtocolError 

19from urllib3.exceptions import ReadTimeoutError as URLLib3ReadTimeoutError 

20 

21from botocore import parsers 

22from botocore.compat import set_socket_timeout 

23from botocore.exceptions import ( 

24 IncompleteReadError, 

25 ReadTimeoutError, 

26 ResponseStreamingError, 

27) 

28 

29# Keep these imported. There's pre-existing code that uses them. 

30from botocore import ScalarTypes # noqa 

31from botocore.compat import XMLParseError # noqa 

32from botocore.hooks import first_non_none_response # noqa 

33 

34 

35logger = logging.getLogger(__name__) 

36 

37 

38class StreamingBody(IOBase): 

39 """Wrapper class for an http response body. 

40 

41 This provides a few additional conveniences that do not exist 

42 in the urllib3 model: 

43 

44 * Set the timeout on the socket (i.e read() timeouts) 

45 * Auto validation of content length, if the amount of bytes 

46 we read does not match the content length, an exception 

47 is raised. 

48 

49 """ 

50 

51 _DEFAULT_CHUNK_SIZE = 1024 

52 

53 def __init__(self, raw_stream, content_length): 

54 self._raw_stream = raw_stream 

55 self._content_length = content_length 

56 self._amount_read = 0 

57 

58 def __del__(self): 

59 # Extending destructor in order to preserve the underlying raw_stream. 

60 # The ability to add custom cleanup logic introduced in Python3.4+. 

61 # https://www.python.org/dev/peps/pep-0442/ 

62 pass 

63 

64 def set_socket_timeout(self, timeout): 

65 """Set the timeout seconds on the socket.""" 

66 # The problem we're trying to solve is to prevent .read() calls from 

67 # hanging. This can happen in rare cases. What we'd like to ideally 

68 # do is set a timeout on the .read() call so that callers can retry 

69 # the request. 

70 # Unfortunately, this isn't currently possible in requests. 

71 # See: https://github.com/kennethreitz/requests/issues/1803 

72 # So what we're going to do is reach into the guts of the stream and 

73 # grab the socket object, which we can set the timeout on. We're 

74 # putting in a check here so in case this interface goes away, we'll 

75 # know. 

76 try: 

77 set_socket_timeout(self._raw_stream, timeout) 

78 except AttributeError: 

79 logger.error( 

80 "Cannot access the socket object of " 

81 "a streaming response. It's possible " 

82 "the interface has changed.", 

83 exc_info=True, 

84 ) 

85 raise 

86 

87 def readable(self): 

88 try: 

89 return self._raw_stream.readable() 

90 except AttributeError: 

91 return False 

92 

93 def read(self, amt=None): 

94 """Read at most amt bytes from the stream. 

95 

96 If the amt argument is omitted, read all data. 

97 """ 

98 try: 

99 chunk = self._raw_stream.read(amt) 

100 except URLLib3ReadTimeoutError as e: 

101 # TODO: the url will be None as urllib3 isn't setting it yet 

102 raise ReadTimeoutError(endpoint_url=e.url, error=e) 

103 except URLLib3ProtocolError as e: 

104 raise ResponseStreamingError(error=e) 

105 self._amount_read += len(chunk) 

106 if amt is None or (not chunk and amt > 0): 

107 # If the server sends empty contents or 

108 # we ask to read all of the contents, then we know 

109 # we need to verify the content length. 

110 self._verify_content_length() 

111 return chunk 

112 

113 def readlines(self): 

114 return self._raw_stream.readlines() 

115 

116 def __iter__(self): 

117 """Return an iterator to yield 1k chunks from the raw stream.""" 

118 return self.iter_chunks(self._DEFAULT_CHUNK_SIZE) 

119 

120 def __next__(self): 

121 """Return the next 1k chunk from the raw stream.""" 

122 current_chunk = self.read(self._DEFAULT_CHUNK_SIZE) 

123 if current_chunk: 

124 return current_chunk 

125 raise StopIteration() 

126 

127 def __enter__(self): 

128 return self._raw_stream 

129 

130 def __exit__(self, type, value, traceback): 

131 self._raw_stream.close() 

132 

133 next = __next__ 

134 

135 def iter_lines(self, chunk_size=_DEFAULT_CHUNK_SIZE, keepends=False): 

136 """Return an iterator to yield lines from the raw stream. 

137 

138 This is achieved by reading chunk of bytes (of size chunk_size) at a 

139 time from the raw stream, and then yielding lines from there. 

140 """ 

141 pending = b'' 

142 for chunk in self.iter_chunks(chunk_size): 

143 lines = (pending + chunk).splitlines(True) 

144 for line in lines[:-1]: 

145 yield line.splitlines(keepends)[0] 

146 pending = lines[-1] 

147 if pending: 

148 yield pending.splitlines(keepends)[0] 

149 

150 def iter_chunks(self, chunk_size=_DEFAULT_CHUNK_SIZE): 

151 """Return an iterator to yield chunks of chunk_size bytes from the raw 

152 stream. 

153 """ 

154 while True: 

155 current_chunk = self.read(chunk_size) 

156 if current_chunk == b"": 

157 break 

158 yield current_chunk 

159 

160 def _verify_content_length(self): 

161 # See: https://github.com/kennethreitz/requests/issues/1855 

162 # Basically, our http library doesn't do this for us, so we have 

163 # to do this ourself. 

164 if self._content_length is not None and self._amount_read != int( 

165 self._content_length 

166 ): 

167 raise IncompleteReadError( 

168 actual_bytes=self._amount_read, 

169 expected_bytes=int(self._content_length), 

170 ) 

171 

172 def tell(self): 

173 return self._raw_stream.tell() 

174 

175 def close(self): 

176 """Close the underlying http response stream.""" 

177 self._raw_stream.close() 

178 

179 

180def get_response(operation_model, http_response): 

181 protocol = operation_model.metadata['protocol'] 

182 response_dict = { 

183 'headers': http_response.headers, 

184 'status_code': http_response.status_code, 

185 } 

186 # TODO: Unfortunately, we have to have error logic here. 

187 # If it looks like an error, in the streaming response case we 

188 # need to actually grab the contents. 

189 if response_dict['status_code'] >= 300: 

190 response_dict['body'] = http_response.content 

191 elif operation_model.has_streaming_output: 

192 response_dict['body'] = StreamingBody( 

193 http_response.raw, response_dict['headers'].get('content-length') 

194 ) 

195 else: 

196 response_dict['body'] = http_response.content 

197 

198 parser = parsers.create_parser(protocol) 

199 return http_response, parser.parse( 

200 response_dict, operation_model.output_shape 

201 )