/src/uWebSockets/fuzzing/EpollEchoServer.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | /* We rely on wrapped syscalls */ |
2 | | #include "libEpollFuzzer/epoll_fuzzer.h" |
3 | | |
4 | | #include "App.h" |
5 | | |
6 | | /* We keep this one for teardown later on */ |
7 | | struct us_listen_socket_t *listen_socket; |
8 | | |
9 | | /* This test is run by libEpollFuzzer */ |
10 | 7.19k | void test() { |
11 | | |
12 | 7.19k | struct PerSocketData { |
13 | 7.19k | int nothing; |
14 | 7.19k | std::shared_ptr<bool> valid; |
15 | 7.19k | }; |
16 | | |
17 | | /* First byte determines what compressor to use */ |
18 | 7.19k | unsigned char compressorByte; |
19 | 7.19k | if (consume_byte(&compressorByte)) { |
20 | | //uWS::Loop::get()->free(); |
21 | 0 | return; |
22 | 0 | } |
23 | | |
24 | 7.19k | uWS::CompressOptions compressors[] = { |
25 | 7.19k | uWS::DISABLED, |
26 | 7.19k | uWS::SHARED_COMPRESSOR, |
27 | 7.19k | uWS::DEDICATED_COMPRESSOR_3KB, |
28 | 7.19k | uWS::DEDICATED_COMPRESSOR_4KB, |
29 | 7.19k | uWS::DEDICATED_COMPRESSOR_8KB, |
30 | 7.19k | uWS::DEDICATED_COMPRESSOR_16KB, |
31 | 7.19k | uWS::DEDICATED_COMPRESSOR_32KB, |
32 | 7.19k | uWS::DEDICATED_COMPRESSOR_64KB, |
33 | 7.19k | uWS::DEDICATED_COMPRESSOR_128KB, |
34 | 7.19k | uWS::DEDICATED_COMPRESSOR_256KB |
35 | 7.19k | }; |
36 | | |
37 | 7.19k | uWS::CompressOptions compressor = compressors[compressorByte % 10]; |
38 | | |
39 | 7.19k | { |
40 | 7.19k | auto app = uWS::App().ws<PerSocketData>("/broadcast", { |
41 | | /* Settings */ |
42 | 7.19k | .compression = compressor, |
43 | | /* We want this to be low so that we can hit it, yet bigger than 256 */ |
44 | 7.19k | .maxPayloadLength = 300, |
45 | 7.19k | .idleTimeout = 12, |
46 | | /* Handlers */ |
47 | 177k | .open = [](auto *ws) { |
48 | | /* Subscribe to anything */ |
49 | 177k | ws->subscribe(/*req->getHeader(*/"topic"/*)*/); |
50 | 177k | }, |
51 | 56.5k | .message = [](auto *ws, std::string_view message, uWS::OpCode opCode) { |
52 | 56.5k | if (message.length() && message[0] == 'C') { |
53 | 303 | ws->close(); |
54 | 56.2k | } else if (message.length() && message[0] == 'E') { |
55 | 536 | ws->end(1006); |
56 | 55.6k | } else { |
57 | | /* Publish to topic sent by message */ |
58 | 55.6k | ws->publish(message, message, opCode, true); |
59 | | |
60 | 55.6k | if (message.length() && message[0] == 'U') { |
61 | 1.39k | ws->unsubscribe(message); |
62 | 1.39k | } |
63 | 55.6k | } |
64 | 56.5k | }, |
65 | 45.9k | .drain = [](auto *ws) { |
66 | | /* Check getBufferedAmount here */ |
67 | 45.9k | }, |
68 | 7.19k | .ping = [](auto *ws, std::string_view) { |
69 | | |
70 | 2.74k | }, |
71 | 7.19k | .pong = [](auto *ws, std::string_view) { |
72 | | |
73 | 5.07k | }, |
74 | 177k | .close = [](auto *ws, int code, std::string_view message) { |
75 | | /* Cause reported crash */ |
76 | 177k | ws->close(); |
77 | 177k | } |
78 | 7.19k | }).ws<PerSocketData>("/*", { |
79 | | /* Settings */ |
80 | 7.19k | .compression = compressor, |
81 | | /* We want this to be low so that we can hit it, yet bigger than 256 */ |
82 | 7.19k | .maxPayloadLength = 300, |
83 | 7.19k | .idleTimeout = 12, |
84 | | /* Handlers */ |
85 | 38.2k | .open = [](auto *ws) { |
86 | | |
87 | 38.2k | ws->getUserData()->valid.reset(new bool{true}); |
88 | | |
89 | | //if (req->getHeader("close_me").length()) { |
90 | | // ws->close(); |
91 | | //} else if (req->getHeader("end_me").length()) { |
92 | | // ws->end(1006); |
93 | | //} |
94 | 38.2k | }, |
95 | 12.9k | .message = [](auto *ws, std::string_view message, uWS::OpCode opCode) { |
96 | 12.9k | if (message.length() > 300) { |
97 | | /* Inform the sanitizer of the fault */ |
98 | 0 | fprintf(stderr, "Too long message passed\n"); |
99 | 0 | free((void *) -1); |
100 | 0 | } |
101 | | |
102 | 12.9k | if (message.length() && message[0] == 'C') { |
103 | 262 | ws->close(); |
104 | 12.7k | } else if (message.length() && message[0] == 'E') { |
105 | 343 | ws->end(1006); |
106 | 12.3k | } else { |
107 | 12.3k | ws->send(message, opCode, true); |
108 | 12.3k | } |
109 | 12.9k | }, |
110 | 7.19k | .drain = [](auto *ws) { |
111 | | /* Check getBufferedAmount here */ |
112 | 4.43k | }, |
113 | 9.77k | .ping = [](auto *ws, std::string_view) { |
114 | | /* Here we test send and end while uncorked, by having them send from deferred */ |
115 | 9.77k | PerSocketData *psd = (PerSocketData *) ws->getUserData(); |
116 | | |
117 | 9.77k | uWS::Loop::get()->defer([ws, valid = psd->valid]() { |
118 | 6.03k | if (*valid.get()) { |
119 | | /* We haven't been closed */ |
120 | 1.86k | ws->send("Hello!", uWS::TEXT, false); |
121 | 1.86k | ws->end(1000); |
122 | 1.86k | } |
123 | 6.03k | }); |
124 | 9.77k | }, |
125 | 7.19k | .pong = [](auto *ws, std::string_view) { |
126 | | |
127 | 1.48k | }, |
128 | 38.2k | .close = [](auto *ws, int code, std::string_view message) { |
129 | 38.2k | (*ws->getUserData()->valid.get()) = false; |
130 | 38.2k | } |
131 | 7.19k | }).listen(9001, [](us_listen_socket_t *listenSocket) { |
132 | 7.19k | listen_socket = listenSocket; |
133 | 7.19k | }); |
134 | | |
135 | 7.19k | app.run(); |
136 | 7.19k | } |
137 | | |
138 | 7.19k | uWS::Loop::get()->free(); |
139 | 7.19k | } |
140 | | |
141 | | /* Thus function should shutdown the event-loop and let the test fall through */ |
142 | 16.4k | void teardown() { |
143 | | /* If we are called twice there's a bug (it potentially could if |
144 | | * all open sockets cannot be error-closed in one epoll_wait call). |
145 | | * But we only allow 1k FDs and we have a buffer of 1024 from epoll_wait */ |
146 | 16.4k | if (!listen_socket) { |
147 | 0 | exit(-1); |
148 | 0 | } |
149 | | |
150 | | /* We might have open sockets still, and these will be error-closed by epoll_wait */ |
151 | | // us_socket_context_close - close all open sockets created with this socket context |
152 | 16.4k | if (listen_socket) { |
153 | 16.4k | us_listen_socket_close(0, listen_socket); |
154 | 16.4k | listen_socket = NULL; |
155 | 16.4k | } |
156 | 16.4k | } |