/src/h2/src/proto/connection.rs
Line | Count | Source |
1 | | use crate::codec::UserError; |
2 | | use crate::frame::{Reason, StreamId}; |
3 | | use crate::{client, server}; |
4 | | |
5 | | use crate::frame::DEFAULT_INITIAL_WINDOW_SIZE; |
6 | | use crate::proto::*; |
7 | | |
8 | | use bytes::Bytes; |
9 | | use futures_core::Stream; |
10 | | use std::io; |
11 | | use std::marker::PhantomData; |
12 | | use std::pin::Pin; |
13 | | use std::task::{Context, Poll}; |
14 | | use std::time::Duration; |
15 | | use tokio::io::AsyncRead; |
16 | | |
17 | | /// An H2 connection |
18 | | #[derive(Debug)] |
19 | | pub(crate) struct Connection<T, P, B: Buf = Bytes> |
20 | | where |
21 | | P: Peer, |
22 | | { |
23 | | /// Read / write frame values |
24 | | codec: Codec<T, Prioritized<B>>, |
25 | | |
26 | | inner: ConnectionInner<P, B>, |
27 | | } |
28 | | |
29 | | // Extracted part of `Connection` which does not depend on `T`. Reduces the amount of duplicated |
30 | | // method instantiations. |
31 | | #[derive(Debug)] |
32 | | struct ConnectionInner<P, B: Buf = Bytes> |
33 | | where |
34 | | P: Peer, |
35 | | { |
36 | | /// Tracks the connection level state transitions. |
37 | | state: State, |
38 | | |
39 | | /// An error to report back once complete. |
40 | | /// |
41 | | /// This exists separately from State in order to support |
42 | | /// graceful shutdown. |
43 | | error: Option<frame::GoAway>, |
44 | | |
45 | | /// Pending GOAWAY frames to write. |
46 | | go_away: GoAway, |
47 | | |
48 | | /// Ping/pong handler |
49 | | ping_pong: PingPong, |
50 | | |
51 | | /// Connection settings |
52 | | settings: Settings, |
53 | | |
54 | | /// Stream state handler |
55 | | streams: Streams<B, P>, |
56 | | |
57 | | /// A `tracing` span tracking the lifetime of the connection. |
58 | | span: tracing::Span, |
59 | | |
60 | | /// Client or server |
61 | | _phantom: PhantomData<P>, |
62 | | } |
63 | | |
64 | | struct DynConnection<'a, B: Buf = Bytes> { |
65 | | state: &'a mut State, |
66 | | |
67 | | go_away: &'a mut GoAway, |
68 | | |
69 | | streams: DynStreams<'a, B>, |
70 | | |
71 | | error: &'a mut Option<frame::GoAway>, |
72 | | |
73 | | ping_pong: &'a mut PingPong, |
74 | | } |
75 | | |
76 | | #[derive(Debug, Clone)] |
77 | | pub(crate) struct Config { |
78 | | pub next_stream_id: StreamId, |
79 | | pub initial_max_send_streams: usize, |
80 | | pub max_send_buffer_size: usize, |
81 | | pub reset_stream_duration: Duration, |
82 | | pub reset_stream_max: usize, |
83 | | pub remote_reset_stream_max: usize, |
84 | | pub local_error_reset_streams_max: Option<usize>, |
85 | | pub settings: frame::Settings, |
86 | | } |
87 | | |
88 | | #[derive(Debug)] |
89 | | enum State { |
90 | | /// Currently open in a sane state |
91 | | Open, |
92 | | |
93 | | /// The codec must be flushed |
94 | | Closing(Reason, Initiator), |
95 | | |
96 | | /// In a closed state |
97 | | Closed(Reason, Initiator), |
98 | | } |
99 | | |
100 | | impl<T, P, B> Connection<T, P, B> |
101 | | where |
102 | | T: AsyncRead + AsyncWrite + Unpin, |
103 | | P: Peer, |
104 | | B: Buf, |
105 | | { |
106 | 13.0k | pub fn new(codec: Codec<T, Prioritized<B>>, config: Config) -> Connection<T, P, B> { |
107 | 13.0k | fn streams_config(config: &Config) -> streams::Config { |
108 | | streams::Config { |
109 | 13.0k | initial_max_send_streams: config.initial_max_send_streams, |
110 | 13.0k | local_max_buffer_size: config.max_send_buffer_size, |
111 | 13.0k | local_next_stream_id: config.next_stream_id, |
112 | 13.0k | local_push_enabled: config.settings.is_push_enabled().unwrap_or(true), |
113 | 13.0k | extended_connect_protocol_enabled: config |
114 | 13.0k | .settings |
115 | 13.0k | .is_extended_connect_protocol_enabled() |
116 | 13.0k | .unwrap_or(false), |
117 | 13.0k | local_reset_duration: config.reset_stream_duration, |
118 | 13.0k | local_reset_max: config.reset_stream_max, |
119 | 13.0k | remote_reset_max: config.remote_reset_stream_max, |
120 | | remote_init_window_sz: DEFAULT_INITIAL_WINDOW_SIZE, |
121 | 13.0k | remote_max_initiated: config |
122 | 13.0k | .settings |
123 | 13.0k | .max_concurrent_streams() |
124 | 13.0k | .map(|max| max as usize), |
125 | 13.0k | local_max_error_reset_streams: config.local_error_reset_streams_max, |
126 | | } |
127 | 13.0k | } |
128 | 13.0k | let streams = Streams::new(streams_config(&config)); |
129 | | Connection { |
130 | 13.0k | codec, |
131 | | inner: ConnectionInner { |
132 | 13.0k | state: State::Open, |
133 | 13.0k | error: None, |
134 | 13.0k | go_away: GoAway::new(), |
135 | 13.0k | ping_pong: PingPong::new(), |
136 | 13.0k | settings: Settings::new(config.settings), |
137 | 13.0k | streams, |
138 | 13.0k | span: tracing::debug_span!("Connection", peer = %P::NAME), |
139 | 13.0k | _phantom: PhantomData, |
140 | | }, |
141 | | } |
142 | 13.0k | } <h2::proto::connection::Connection<h2_support::mock::Mock, h2::client::Peer>>::new Line | Count | Source | 106 | 648 | pub fn new(codec: Codec<T, Prioritized<B>>, config: Config) -> Connection<T, P, B> { | 107 | | fn streams_config(config: &Config) -> streams::Config { | 108 | | streams::Config { | 109 | | initial_max_send_streams: config.initial_max_send_streams, | 110 | | local_max_buffer_size: config.max_send_buffer_size, | 111 | | local_next_stream_id: config.next_stream_id, | 112 | | local_push_enabled: config.settings.is_push_enabled().unwrap_or(true), | 113 | | extended_connect_protocol_enabled: config | 114 | | .settings | 115 | | .is_extended_connect_protocol_enabled() | 116 | | .unwrap_or(false), | 117 | | local_reset_duration: config.reset_stream_duration, | 118 | | local_reset_max: config.reset_stream_max, | 119 | | remote_reset_max: config.remote_reset_stream_max, | 120 | | remote_init_window_sz: DEFAULT_INITIAL_WINDOW_SIZE, | 121 | | remote_max_initiated: config | 122 | | .settings | 123 | | .max_concurrent_streams() | 124 | | .map(|max| max as usize), | 125 | | local_max_error_reset_streams: config.local_error_reset_streams_max, | 126 | | } | 127 | | } | 128 | 648 | let streams = Streams::new(streams_config(&config)); | 129 | | Connection { | 130 | 648 | codec, | 131 | | inner: ConnectionInner { | 132 | 648 | state: State::Open, | 133 | 648 | error: None, | 134 | 648 | go_away: GoAway::new(), | 135 | 648 | ping_pong: PingPong::new(), | 136 | 648 | settings: Settings::new(config.settings), | 137 | 648 | streams, | 138 | 648 | span: tracing::debug_span!("Connection", peer = %P::NAME), | 139 | 648 | _phantom: PhantomData, | 140 | | }, | 141 | | } | 142 | 648 | } |
<h2::proto::connection::Connection<fuzz_e2e::MockIo, h2::client::Peer>>::new Line | Count | Source | 106 | 12.4k | pub fn new(codec: Codec<T, Prioritized<B>>, config: Config) -> Connection<T, P, B> { | 107 | | fn streams_config(config: &Config) -> streams::Config { | 108 | | streams::Config { | 109 | | initial_max_send_streams: config.initial_max_send_streams, | 110 | | local_max_buffer_size: config.max_send_buffer_size, | 111 | | local_next_stream_id: config.next_stream_id, | 112 | | local_push_enabled: config.settings.is_push_enabled().unwrap_or(true), | 113 | | extended_connect_protocol_enabled: config | 114 | | .settings | 115 | | .is_extended_connect_protocol_enabled() | 116 | | .unwrap_or(false), | 117 | | local_reset_duration: config.reset_stream_duration, | 118 | | local_reset_max: config.reset_stream_max, | 119 | | remote_reset_max: config.remote_reset_stream_max, | 120 | | remote_init_window_sz: DEFAULT_INITIAL_WINDOW_SIZE, | 121 | | remote_max_initiated: config | 122 | | .settings | 123 | | .max_concurrent_streams() | 124 | | .map(|max| max as usize), | 125 | | local_max_error_reset_streams: config.local_error_reset_streams_max, | 126 | | } | 127 | | } | 128 | 12.4k | let streams = Streams::new(streams_config(&config)); | 129 | | Connection { | 130 | 12.4k | codec, | 131 | | inner: ConnectionInner { | 132 | 12.4k | state: State::Open, | 133 | 12.4k | error: None, | 134 | 12.4k | go_away: GoAway::new(), | 135 | 12.4k | ping_pong: PingPong::new(), | 136 | 12.4k | settings: Settings::new(config.settings), | 137 | 12.4k | streams, | 138 | 12.4k | span: tracing::debug_span!("Connection", peer = %P::NAME), | 139 | 12.4k | _phantom: PhantomData, | 140 | | }, | 141 | | } | 142 | 12.4k | } |
|
143 | | |
144 | | /// connection flow control |
145 | 0 | pub(crate) fn set_target_window_size(&mut self, size: WindowSize) { |
146 | 0 | let _res = self.inner.streams.set_target_connection_window_size(size); |
147 | | // TODO: proper error handling |
148 | 0 | debug_assert!(_res.is_ok()); |
149 | 0 | } Unexecuted instantiation: <h2::proto::connection::Connection<h2_support::mock::Mock, h2::client::Peer>>::set_target_window_size Unexecuted instantiation: <h2::proto::connection::Connection<fuzz_e2e::MockIo, h2::client::Peer>>::set_target_window_size |
150 | | |
151 | | /// Send a new SETTINGS frame with an updated initial window size. |
152 | | pub(crate) fn set_initial_window_size(&mut self, size: WindowSize) -> Result<(), UserError> { |
153 | | let mut settings = frame::Settings::default(); |
154 | | settings.set_initial_window_size(Some(size)); |
155 | | self.inner.settings.send_settings(settings) |
156 | | } |
157 | | |
158 | | /// Send a new SETTINGS frame with extended CONNECT protocol enabled. |
159 | | pub(crate) fn set_enable_connect_protocol(&mut self) -> Result<(), UserError> { |
160 | | let mut settings = frame::Settings::default(); |
161 | | settings.set_enable_connect_protocol(Some(1)); |
162 | | self.inner.settings.send_settings(settings) |
163 | | } |
164 | | |
165 | | /// Returns the maximum number of concurrent streams that may be initiated |
166 | | /// by this peer. |
167 | | pub(crate) fn max_send_streams(&self) -> usize { |
168 | | self.inner.streams.max_send_streams() |
169 | | } |
170 | | |
171 | | /// Returns the maximum number of concurrent streams that may be initiated |
172 | | /// by the remote peer. |
173 | | pub(crate) fn max_recv_streams(&self) -> usize { |
174 | | self.inner.streams.max_recv_streams() |
175 | | } |
176 | | |
177 | | #[cfg(feature = "unstable")] |
178 | | pub fn num_wired_streams(&self) -> usize { |
179 | | self.inner.streams.num_wired_streams() |
180 | | } |
181 | | |
182 | | /// Returns `Ready` when the connection is ready to receive a frame. |
183 | | /// |
184 | | /// Returns `Error` as this may raise errors that are caused by delayed |
185 | | /// processing of received frames. |
186 | 2.98M | fn poll_ready(&mut self, cx: &mut Context) -> Poll<Result<(), Error>> { |
187 | 2.98M | let _e = self.inner.span.enter(); |
188 | 2.98M | let span = tracing::trace_span!("poll_ready"); |
189 | 2.98M | let _e = span.enter(); |
190 | | // The order of these calls don't really matter too much |
191 | 2.98M | ready!(self.inner.ping_pong.send_pending_pong(cx, &mut self.codec))?; |
192 | 2.98M | ready!(self.inner.ping_pong.send_pending_ping(cx, &mut self.codec))?; |
193 | 2.98M | ready!(self |
194 | 2.98M | .inner |
195 | 2.98M | .settings |
196 | 2.98M | .poll_send(cx, &mut self.codec, &mut self.inner.streams))?; |
197 | 2.98M | ready!(self.inner.streams.send_pending_refusal(cx, &mut self.codec))?; |
198 | | |
199 | 2.98M | Poll::Ready(Ok(())) |
200 | 2.98M | } |
201 | | |
202 | | /// Send any pending GOAWAY frames. |
203 | | /// |
204 | | /// This will return `Some(reason)` if the connection should be closed |
205 | | /// afterwards. If this is a graceful shutdown, this returns `None`. |
206 | 2.98M | fn poll_go_away(&mut self, cx: &mut Context) -> Poll<Option<io::Result<Reason>>> { |
207 | 2.98M | self.inner.go_away.send_pending_go_away(cx, &mut self.codec) |
208 | 2.98M | } |
209 | | |
210 | | pub fn go_away_from_user(&mut self, e: Reason) { |
211 | | self.inner.as_dyn().go_away_from_user(e) |
212 | | } |
213 | | |
214 | 1.05k | fn take_error(&mut self, ours: Reason, initiator: Initiator) -> Result<(), Error> { |
215 | 1.05k | let (debug_data, theirs) = self |
216 | 1.05k | .inner |
217 | 1.05k | .error |
218 | 1.05k | .take() |
219 | 1.05k | .as_ref() |
220 | 1.05k | .map_or((Bytes::new(), Reason::NO_ERROR), |frame| { |
221 | 106 | (frame.debug_data().clone(), frame.reason()) |
222 | 106 | }); |
223 | | |
224 | 1.05k | match (ours, theirs) { |
225 | 842 | (Reason::NO_ERROR, Reason::NO_ERROR) => Ok(()), |
226 | 110 | (ours, Reason::NO_ERROR) => Err(Error::GoAway(Bytes::new(), ours, initiator)), |
227 | | // If both sides reported an error, give their |
228 | | // error back to th user. We assume our error |
229 | | // was a consequence of their error, and less |
230 | | // important. |
231 | 100 | (_, theirs) => Err(Error::remote_go_away(debug_data, theirs)), |
232 | | } |
233 | 1.05k | } |
234 | | |
235 | | /// Closes the connection by transitioning to a GOAWAY state |
236 | | /// iff there are no streams or references |
237 | 2.85M | pub fn maybe_close_connection_if_no_streams(&mut self) { |
238 | | // If we poll() and realize that there are no streams or references |
239 | | // then we can close the connection by transitioning to GOAWAY |
240 | 2.85M | if !self.inner.streams.has_streams_or_other_references() { |
241 | 0 | self.inner.as_dyn().go_away_now(Reason::NO_ERROR); |
242 | 2.85M | } |
243 | 2.85M | } |
244 | | |
245 | | /// Checks if there are any streams |
246 | | pub fn has_streams(&self) -> bool { |
247 | | self.inner.streams.has_streams() |
248 | | } |
249 | | |
250 | | /// Checks if there are any streams or references left |
251 | 5.70M | pub fn has_streams_or_other_references(&self) -> bool { |
252 | | // If we poll() and realize that there are no streams or references |
253 | | // then we can close the connection by transitioning to GOAWAY |
254 | 5.70M | self.inner.streams.has_streams_or_other_references() |
255 | 5.70M | } |
256 | | |
257 | | pub(crate) fn take_user_pings(&mut self) -> Option<UserPings> { |
258 | | self.inner.ping_pong.take_user_pings() |
259 | | } |
260 | | |
261 | | /// Advances the internal state of the connection. |
262 | 2.85M | pub fn poll(&mut self, cx: &mut Context) -> Poll<Result<(), Error>> { |
263 | | // XXX(eliza): cloning the span is unfortunately necessary here in |
264 | | // order to placate the borrow checker — `self` is mutably borrowed by |
265 | | // `poll2`, which means that we can't borrow `self.span` to enter it. |
266 | | // The clone is just an atomic ref bump. |
267 | 2.85M | let span = self.inner.span.clone(); |
268 | 2.85M | let _e = span.enter(); |
269 | 2.85M | let span = tracing::trace_span!("poll"); |
270 | 2.85M | let _e = span.enter(); |
271 | | |
272 | | loop { |
273 | 2.95M | tracing::trace!(connection.state = ?self.inner.state); |
274 | | // TODO: probably clean up this glob of code |
275 | 2.95M | match self.inner.state { |
276 | | // When open, continue to poll a frame |
277 | | State::Open => { |
278 | 2.94M | let result = match self.poll2(cx) { |
279 | 96.9k | Poll::Ready(result) => result, |
280 | | // The connection is not ready to make progress |
281 | | Poll::Pending => { |
282 | | // Ensure all window updates have been sent. |
283 | | // |
284 | | // This will also handle flushing `self.codec` |
285 | 2.84M | ready!(self.inner.streams.poll_complete(cx, &mut self.codec))?; |
286 | | |
287 | 2.35M | if (self.inner.error.is_some() |
288 | 5.58k | || self.inner.go_away.should_close_on_idle()) |
289 | 2.34M | && !self.inner.streams.has_streams() |
290 | | { |
291 | 34 | self.inner.as_dyn().go_away_now(Reason::NO_ERROR); |
292 | 34 | continue; |
293 | 2.35M | } |
294 | | |
295 | 2.35M | return Poll::Pending; |
296 | | } |
297 | | }; |
298 | | |
299 | 96.9k | self.inner.as_dyn().handle_poll2_result(result)? |
300 | | } |
301 | 11.1k | State::Closing(reason, initiator) => { |
302 | 11.1k | tracing::trace!("connection closing after flush"); |
303 | | // Flush/shutdown the codec |
304 | 11.1k | ready!(self.codec.shutdown(cx))?; |
305 | | |
306 | | // Transition the state to error |
307 | 1.05k | self.inner.state = State::Closed(reason, initiator); |
308 | | } |
309 | 1.05k | State::Closed(reason, initiator) => { |
310 | 1.05k | return Poll::Ready(self.take_error(reason, initiator)); |
311 | | } |
312 | | } |
313 | | } |
314 | 2.85M | } |
315 | | |
316 | 2.94M | fn poll2(&mut self, cx: &mut Context) -> Poll<Result<(), Error>> { |
317 | | // This happens outside of the loop to prevent needing to do a clock |
318 | | // check and then comparison of the queue possibly multiple times a |
319 | | // second (and thus, the clock wouldn't have changed enough to matter). |
320 | 2.94M | self.clear_expired_reset_streams(); |
321 | | |
322 | | loop { |
323 | | // First, ensure that the `Connection` is able to receive a frame |
324 | | // |
325 | | // The order here matters: |
326 | | // - poll_go_away may buffer a graceful shutdown GOAWAY frame |
327 | | // - If it has, we've also added a PING to be sent in poll_ready |
328 | 2.98M | if let Some(reason) = ready!(self.poll_go_away(cx)?) { |
329 | 5.94k | if self.inner.go_away.should_close_now() { |
330 | 5.94k | if self.inner.go_away.is_user_initiated() { |
331 | | // A user initiated abrupt shutdown shouldn't return |
332 | | // the same error back to the user. |
333 | 0 | return Poll::Ready(Ok(())); |
334 | | } else { |
335 | 5.94k | return Poll::Ready(Err(Error::library_go_away(reason))); |
336 | | } |
337 | 0 | } |
338 | | // Only NO_ERROR should be waiting for idle |
339 | 0 | debug_assert_eq!( |
340 | | reason, |
341 | | Reason::NO_ERROR, |
342 | 0 | "graceful GOAWAY should be NO_ERROR" |
343 | | ); |
344 | 2.98M | } |
345 | 2.98M | ready!(self.poll_ready(cx))?; |
346 | | |
347 | 2.98M | match self |
348 | 2.98M | .inner |
349 | 2.98M | .as_dyn() |
350 | 2.98M | .recv_frame(ready!(Pin::new(&mut self.codec).poll_next(cx)?))? |
351 | | { |
352 | 7.34k | ReceivedFrame::Settings(frame) => { |
353 | 7.34k | self.inner.settings.recv_settings( |
354 | 7.34k | frame, |
355 | 7.34k | &mut self.codec, |
356 | 7.34k | &mut self.inner.streams, |
357 | 1 | )?; |
358 | | } |
359 | 40.0k | ReceivedFrame::Continue => (), |
360 | | ReceivedFrame::Done => { |
361 | 4.70k | return Poll::Ready(Ok(())); |
362 | | } |
363 | | } |
364 | | } |
365 | 2.94M | } |
366 | | |
367 | 2.94M | fn clear_expired_reset_streams(&mut self) { |
368 | 2.94M | self.inner.streams.clear_expired_reset_streams(); |
369 | 2.94M | } |
370 | | } |
371 | | |
372 | | impl<P, B> ConnectionInner<P, B> |
373 | | where |
374 | | P: Peer, |
375 | | B: Buf, |
376 | | { |
377 | 3.07M | fn as_dyn(&mut self) -> DynConnection<'_, B> { |
378 | | let ConnectionInner { |
379 | 3.07M | state, |
380 | 3.07M | go_away, |
381 | 3.07M | streams, |
382 | 3.07M | error, |
383 | 3.07M | ping_pong, |
384 | | .. |
385 | 3.07M | } = self; |
386 | 3.07M | let streams = streams.as_dyn(); |
387 | 3.07M | DynConnection { |
388 | 3.07M | state, |
389 | 3.07M | go_away, |
390 | 3.07M | streams, |
391 | 3.07M | error, |
392 | 3.07M | ping_pong, |
393 | 3.07M | } |
394 | 3.07M | } |
395 | | } |
396 | | |
397 | | impl<B> DynConnection<'_, B> |
398 | | where |
399 | | B: Buf, |
400 | | { |
401 | 0 | fn go_away(&mut self, id: StreamId, e: Reason) { |
402 | 0 | let frame = frame::GoAway::new(id, e); |
403 | 0 | self.streams.send_go_away(id); |
404 | 0 | self.go_away.go_away(frame); |
405 | 0 | } |
406 | | |
407 | 34 | fn go_away_now(&mut self, e: Reason) { |
408 | 34 | let last_processed_id = self.streams.last_processed_id(); |
409 | 34 | let frame = frame::GoAway::new(last_processed_id, e); |
410 | 34 | self.go_away.go_away_now(frame); |
411 | 34 | } |
412 | | |
413 | 6.09k | fn go_away_now_data(&mut self, e: Reason, data: Bytes) { |
414 | 6.09k | let last_processed_id = self.streams.last_processed_id(); |
415 | 6.09k | let frame = frame::GoAway::with_debug_data(last_processed_id, e, data); |
416 | 6.09k | self.go_away.go_away_now(frame); |
417 | 6.09k | } |
418 | | |
419 | | fn go_away_from_user(&mut self, e: Reason) { |
420 | | let last_processed_id = self.streams.last_processed_id(); |
421 | | let frame = frame::GoAway::new(last_processed_id, e); |
422 | | self.go_away.go_away_from_user(frame); |
423 | | |
424 | | // Notify all streams of reason we're abruptly closing. |
425 | | self.streams.handle_error(Error::user_go_away(e)); |
426 | | } |
427 | | |
428 | 96.9k | fn handle_poll2_result(&mut self, result: Result<(), Error>) -> Result<(), Error> { |
429 | 92.2k | match result { |
430 | | // The connection has shutdown normally |
431 | | Ok(()) => { |
432 | 4.70k | *self.state = State::Closing(Reason::NO_ERROR, Initiator::Library); |
433 | 4.70k | Ok(()) |
434 | | } |
435 | | // Attempting to read a frame resulted in a connection level |
436 | | // error. This is handled by setting a GOAWAY frame followed by |
437 | | // terminating the connection. |
438 | 12.0k | Err(Error::GoAway(debug_data, reason, initiator)) => { |
439 | 12.0k | self.handle_go_away(reason, debug_data, initiator); |
440 | 12.0k | Ok(()) |
441 | | } |
442 | | // Attempting to read a frame resulted in a stream level error. |
443 | | // This is handled by resetting the frame then trying to read |
444 | | // another frame. |
445 | 79.1k | Err(Error::Reset(id, reason, initiator)) => { |
446 | 79.1k | debug_assert_eq!(initiator, Initiator::Library); |
447 | 79.1k | tracing::trace!(?id, ?reason, "stream error"); |
448 | 79.1k | match self.streams.send_reset(id, reason) { |
449 | 79.1k | Ok(()) => (), |
450 | 6 | Err(crate::proto::error::GoAway { debug_data, reason }) => { |
451 | 6 | self.handle_go_away(reason, debug_data, Initiator::Library); |
452 | 6 | } |
453 | | } |
454 | 79.1k | Ok(()) |
455 | | } |
456 | | // Attempting to read a frame resulted in an I/O error. All |
457 | | // active streams must be reset. |
458 | | // |
459 | | // TODO: Are I/O errors recoverable? |
460 | 1.07k | Err(Error::Io(kind, inner)) => { |
461 | 1.07k | tracing::debug!(error = ?kind, "Connection::poll; IO error"); |
462 | 1.07k | let e = Error::Io(kind, inner); |
463 | | |
464 | | // Reset all active streams |
465 | 1.07k | self.streams.handle_error(e.clone()); |
466 | | |
467 | | // Some client implementations drop the connections without notifying its peer |
468 | | // Attempting to read after the client dropped the connection results in UnexpectedEof |
469 | | // If as a server, we don't have anything more to send, just close the connection |
470 | | // without error |
471 | | // |
472 | | // See https://github.com/hyperium/hyper/issues/3427 |
473 | 1.07k | if self.streams.is_buffer_empty() |
474 | 1.07k | && matches!(kind, io::ErrorKind::UnexpectedEof) |
475 | 0 | && (self.streams.is_server() |
476 | 0 | || self.error.as_ref().map(|f| f.reason() == Reason::NO_ERROR) |
477 | 0 | == Some(true)) |
478 | | { |
479 | 0 | *self.state = State::Closed(Reason::NO_ERROR, Initiator::Library); |
480 | 0 | return Ok(()); |
481 | 1.07k | } |
482 | | |
483 | | // Return the error |
484 | 1.07k | Err(e) |
485 | | } |
486 | | } |
487 | 96.9k | } |
488 | | |
489 | 12.0k | fn handle_go_away(&mut self, reason: Reason, debug_data: Bytes, initiator: Initiator) { |
490 | 12.0k | let e = Error::GoAway(debug_data.clone(), reason, initiator); |
491 | 12.0k | tracing::debug!(error = ?e, "Connection::poll; connection error"); |
492 | | |
493 | | // We may have already sent a GOAWAY for this error, |
494 | | // if so, don't send another, just flush and close up. |
495 | 12.0k | if self |
496 | 12.0k | .go_away |
497 | 12.0k | .going_away() |
498 | 12.0k | .map_or(false, |frame| frame.reason() == reason) |
499 | | { |
500 | 5.94k | tracing::trace!(" -> already going away"); |
501 | 5.94k | *self.state = State::Closing(reason, initiator); |
502 | 5.94k | return; |
503 | 6.09k | } |
504 | | |
505 | | // Reset all active streams |
506 | 6.09k | self.streams.handle_error(e); |
507 | 6.09k | self.go_away_now_data(reason, debug_data); |
508 | 12.0k | } |
509 | | |
510 | 100k | fn recv_frame(&mut self, frame: Option<Frame>) -> Result<ReceivedFrame, Error> { |
511 | | use crate::frame::Frame::*; |
512 | 95.9k | match frame { |
513 | 7.91k | Some(Headers(frame)) => { |
514 | 7.91k | tracing::trace!(?frame, "recv HEADERS"); |
515 | 7.91k | self.streams.recv_headers(frame)?; |
516 | | } |
517 | 69.9k | Some(Data(frame)) => { |
518 | 69.9k | tracing::trace!(?frame, "recv DATA"); |
519 | 69.9k | self.streams.recv_data(frame)?; |
520 | | } |
521 | 1.54k | Some(Reset(frame)) => { |
522 | 1.54k | tracing::trace!(?frame, "recv RST_STREAM"); |
523 | 1.54k | self.streams.recv_reset(frame)?; |
524 | | } |
525 | 1.24k | Some(PushPromise(frame)) => { |
526 | 1.24k | tracing::trace!(?frame, "recv PUSH_PROMISE"); |
527 | 1.24k | self.streams.recv_push_promise(frame)?; |
528 | | } |
529 | 7.34k | Some(Settings(frame)) => { |
530 | 7.34k | tracing::trace!(?frame, "recv SETTINGS"); |
531 | 7.34k | return Ok(ReceivedFrame::Settings(frame)); |
532 | | } |
533 | 5.84k | Some(GoAway(frame)) => { |
534 | 5.84k | tracing::trace!(?frame, "recv GOAWAY"); |
535 | | // This should prevent starting new streams, |
536 | | // but should allow continuing to process current streams |
537 | | // until they are all EOS. Once they are, State should |
538 | | // transition to GoAway. |
539 | 5.84k | self.streams.recv_go_away(&frame)?; |
540 | 5.81k | *self.error = Some(frame); |
541 | | } |
542 | 445 | Some(Ping(frame)) => { |
543 | 445 | tracing::trace!(?frame, "recv PING"); |
544 | 445 | let status = self.ping_pong.recv_ping(frame); |
545 | 445 | if status.is_shutdown() { |
546 | 0 | assert!( |
547 | 0 | self.go_away.is_going_away(), |
548 | 0 | "received unexpected shutdown ping" |
549 | | ); |
550 | | |
551 | 0 | let last_processed_id = self.streams.last_processed_id(); |
552 | 0 | self.go_away(last_processed_id, Reason::NO_ERROR); |
553 | 445 | } |
554 | | } |
555 | 1.55k | Some(WindowUpdate(frame)) => { |
556 | 1.55k | tracing::trace!(?frame, "recv WINDOW_UPDATE"); |
557 | 1.55k | self.streams.recv_window_update(frame)?; |
558 | | } |
559 | 99 | Some(Priority(frame)) => { |
560 | 99 | tracing::trace!(?frame, "recv PRIORITY"); |
561 | | // TODO: handle |
562 | | } |
563 | | None => { |
564 | 4.70k | tracing::trace!("codec closed"); |
565 | 4.70k | self.streams.recv_eof(false).expect("mutex poisoned"); |
566 | 4.70k | return Ok(ReceivedFrame::Done); |
567 | | } |
568 | | } |
569 | 40.0k | Ok(ReceivedFrame::Continue) |
570 | 100k | } |
571 | | } |
572 | | |
573 | | enum ReceivedFrame { |
574 | | Settings(frame::Settings), |
575 | | Continue, |
576 | | Done, |
577 | | } |
578 | | |
579 | | impl<T, B> Connection<T, client::Peer, B> |
580 | | where |
581 | | T: AsyncRead + AsyncWrite, |
582 | | B: Buf, |
583 | | { |
584 | 13.0k | pub(crate) fn streams(&self) -> &Streams<B, client::Peer> { |
585 | 13.0k | &self.inner.streams |
586 | 13.0k | } <h2::proto::connection::Connection<h2_support::mock::Mock, h2::client::Peer>>::streams Line | Count | Source | 584 | 648 | pub(crate) fn streams(&self) -> &Streams<B, client::Peer> { | 585 | 648 | &self.inner.streams | 586 | 648 | } |
<h2::proto::connection::Connection<fuzz_e2e::MockIo, h2::client::Peer>>::streams Line | Count | Source | 584 | 12.4k | pub(crate) fn streams(&self) -> &Streams<B, client::Peer> { | 585 | 12.4k | &self.inner.streams | 586 | 12.4k | } |
|
587 | | } |
588 | | |
589 | | impl<T, B> Connection<T, server::Peer, B> |
590 | | where |
591 | | T: AsyncRead + AsyncWrite + Unpin, |
592 | | B: Buf, |
593 | | { |
594 | | pub fn next_incoming(&mut self) -> Option<StreamRef<B>> { |
595 | | self.inner.streams.next_incoming() |
596 | | } |
597 | | |
598 | | // Graceful shutdown only makes sense for server peers. |
599 | | pub fn go_away_gracefully(&mut self) { |
600 | | if self.inner.go_away.is_going_away() { |
601 | | // No reason to start a new one. |
602 | | return; |
603 | | } |
604 | | |
605 | | // According to http://httpwg.org/specs/rfc7540.html#GOAWAY: |
606 | | // |
607 | | // > A server that is attempting to gracefully shut down a connection |
608 | | // > SHOULD send an initial GOAWAY frame with the last stream |
609 | | // > identifier set to 2^31-1 and a NO_ERROR code. This signals to the |
610 | | // > client that a shutdown is imminent and that initiating further |
611 | | // > requests is prohibited. After allowing time for any in-flight |
612 | | // > stream creation (at least one round-trip time), the server can |
613 | | // > send another GOAWAY frame with an updated last stream identifier. |
614 | | // > This ensures that a connection can be cleanly shut down without |
615 | | // > losing requests. |
616 | | self.inner.as_dyn().go_away(StreamId::MAX, Reason::NO_ERROR); |
617 | | |
618 | | // We take the advice of waiting 1 RTT literally, and wait |
619 | | // for a pong before proceeding. |
620 | | self.inner.ping_pong.ping_shutdown(); |
621 | | } |
622 | | } |
623 | | |
624 | | impl<T, P, B> Drop for Connection<T, P, B> |
625 | | where |
626 | | P: Peer, |
627 | | B: Buf, |
628 | | { |
629 | 13.0k | fn drop(&mut self) { |
630 | | // Ignore errors as this indicates that the mutex is poisoned. |
631 | 13.0k | let _ = self.inner.streams.recv_eof(true); |
632 | 13.0k | } <h2::proto::connection::Connection<h2_support::mock::Mock, h2::client::Peer> as core::ops::drop::Drop>::drop Line | Count | Source | 629 | 648 | fn drop(&mut self) { | 630 | | // Ignore errors as this indicates that the mutex is poisoned. | 631 | 648 | let _ = self.inner.streams.recv_eof(true); | 632 | 648 | } |
<h2::proto::connection::Connection<fuzz_e2e::MockIo, h2::client::Peer> as core::ops::drop::Drop>::drop Line | Count | Source | 629 | 12.4k | fn drop(&mut self) { | 630 | | // Ignore errors as this indicates that the mutex is poisoned. | 631 | 12.4k | let _ = self.inner.streams.recv_eof(true); | 632 | 12.4k | } |
|
633 | | } |