/src/wireshark/epan/dissectors/packet-nvme-rdma.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* packet-nvme-rdma.c |
2 | | * Routines for NVM Express over Fabrics(RDMA) dissection |
3 | | * Copyright 2016 |
4 | | * Code by Parav Pandit |
5 | | * |
6 | | * Wireshark - Network traffic analyzer |
7 | | * By Gerald Combs <gerald@wireshark.org> |
8 | | * Copyright 1998 Gerald Combs |
9 | | * |
10 | | * SPDX-License-Identifier: GPL-2.0-or-later |
11 | | */ |
12 | | |
13 | | /* |
14 | | NVM Express is high speed interface for accessing solid state drives. |
15 | | NVM Express specifications are maintained by NVM Express industry |
16 | | association at http://www.nvmexpress.org. |
17 | | |
18 | | This file adds support to dissect NVM Express over fabrics packets |
19 | | for RDMA. This adds very basic support for dissecting commands |
20 | | completions. |
21 | | |
22 | | Current dissection supports dissection of |
23 | | (a) NVMe cmd and cqe |
24 | | (b) NVMe Fabric command and cqe |
25 | | As part of it, it also calculates cmd completion latencies. |
26 | | |
27 | | This protocol is similar to iSCSI and SCSI dissection where iSCSI is |
28 | | transport protocol for carying SCSI commands and responses. Similarly |
29 | | NVMe Fabrics - RDMA transport protocol carries NVMe commands. |
30 | | |
31 | | +----------+ |
32 | | | NVMe | |
33 | | +------+---+ |
34 | | | |
35 | | +-----------+---------+ |
36 | | | NVMe Fabrics | |
37 | | +----+-----------+----+ |
38 | | | | |
39 | | +----+---+ +---+----+ |
40 | | | RDMA | | FC | |
41 | | +--------+ +--------+ |
42 | | |
43 | | References: |
44 | | NVMe Express fabrics specification is located at |
45 | | http://www.nvmexpress.org/wp-content/uploads/NVMe_over_Fabrics_1_0_Gold_20160605.pdf |
46 | | |
47 | | NVMe Express specification is located at |
48 | | http://www.nvmexpress.org/wp-content/uploads/NVM-Express-1_2a.pdf |
49 | | |
50 | | NVM Express RDMA TCP port assigned by IANA that maps to RDMA IP service |
51 | | TCP port can be found at |
52 | | http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=NVM+Express |
53 | | |
54 | | */ |
55 | | #include "config.h" |
56 | | |
57 | | #include <stdlib.h> |
58 | | |
59 | | #include <epan/packet.h> |
60 | | #include <epan/prefs.h> |
61 | | #include <epan/conversation.h> |
62 | | #include <epan/addr_resolv.h> |
63 | | |
64 | | #include "packet-infiniband.h" |
65 | | #include "packet-nvme.h" |
66 | | |
67 | 0 | #define SID_ULP_MASK 0x00000000FF000000 |
68 | 0 | #define SID_PROTO_MASK 0x0000000000FF0000 |
69 | 0 | #define SID_PORT_MASK 0x000000000000FFFF |
70 | | |
71 | 0 | #define SID_ULP 0x01 |
72 | 0 | #define SID_PROTO_TCP 0x06 |
73 | 14 | #define NVME_RDMA_TCP_PORT_RANGE "4420" /* IANA registered */ |
74 | | |
75 | 0 | #define SID_MASK (SID_ULP_MASK | SID_PROTO_MASK) |
76 | 0 | #define SID_ULP_TCP ((SID_ULP << 3 * 8) | (SID_PROTO_TCP << 2 * 8)) |
77 | | |
78 | 14 | #define NVME_FABRICS_RDMA "NVMe Fabrics RDMA" |
79 | | |
80 | 0 | #define NVME_FABRIC_CMD_SIZE NVME_CMD_SIZE |
81 | 0 | #define NVME_FABRIC_CQE_SIZE NVME_CQE_SIZE |
82 | | |
83 | | struct nvme_rdma_cmd_ctx; |
84 | | |
85 | | /* The idea of RDMA context matching is as follows: |
86 | | * addresses, sizes, and keys are registred with nvme_add_data_request() |
87 | | * at RDMA request, the packet is matched to queue (this is already done) |
88 | | * at RDMA request, we see address, size, key, and find command with nvme_lookup_data_request() |
89 | | * we store comand context and packet sequence in the queue |
90 | | * the next RDMA transfer with the same sequence number will find a macth from queue to the command |
91 | | * knowing command context, we can decode the buffer |
92 | | * We expect all RDMA transfers to be done in order, so storing in queue context is OK |
93 | | */ |
94 | | struct nvme_rdma_q_ctx { |
95 | | struct nvme_q_ctx n_q_ctx; |
96 | | struct { |
97 | | struct nvme_rdma_cmd_ctx *cmd_ctx; |
98 | | uint32_t first_psn; |
99 | | uint32_t psn; |
100 | | } rdma_ctx; |
101 | | }; |
102 | | |
103 | | struct nvme_rdma_cmd_ctx { |
104 | | struct nvme_cmd_ctx n_cmd_ctx; |
105 | | }; |
106 | | |
107 | | void proto_reg_handoff_nvme_rdma(void); |
108 | | void proto_register_nvme_rdma(void); |
109 | | |
110 | | static int proto_nvme_rdma; |
111 | | static dissector_handle_t ib_handler; |
112 | | static int proto_ib; |
113 | | |
114 | | /* NVMe Fabrics RDMA CM Private data */ |
115 | | static int hf_nvmeof_rdma_cm_req_recfmt; |
116 | | static int hf_nvmeof_rdma_cm_req_qid; |
117 | | static int hf_nvmeof_rdma_cm_req_hrqsize; |
118 | | static int hf_nvmeof_rdma_cm_req_hsqsize; |
119 | | static int hf_nvmeof_rdma_cm_req_cntlid; |
120 | | static int hf_nvmeof_rdma_cm_req_reserved; |
121 | | |
122 | | static int hf_nvmeof_rdma_cm_rsp_recfmt; |
123 | | static int hf_nvmeof_rdma_cm_rsp_crqsize; |
124 | | static int hf_nvmeof_rdma_cm_rsp_reserved; |
125 | | |
126 | | static int hf_nvmeof_rdma_cm_rej_recfmt; |
127 | | static int hf_nvmeof_rdma_cm_rej_status; |
128 | | |
129 | | /* Data Transfers */ |
130 | | static int hf_nvmeof_from_host_unknown_data; |
131 | | static int hf_nvmeof_read_to_host_req; |
132 | | static int hf_nvmeof_read_to_host_unmatched; |
133 | | static int hf_nvmeof_read_from_host_resp; |
134 | | static int hf_nvmeof_read_from_host_prev; |
135 | | static int hf_nvmeof_read_from_host_next; |
136 | | static int hf_nvmeof_read_from_host_unmatched; |
137 | | static int hf_nvmeof_write_to_host_req; |
138 | | static int hf_nvmeof_write_to_host_prev; |
139 | | static int hf_nvmeof_write_to_host_next; |
140 | | static int hf_nvmeof_write_to_host_unmatched; |
141 | | static int hf_nvmeof_to_host_unknown_data; |
142 | | |
143 | | /* Tracking commands, transfers and CQEs */ |
144 | | static int hf_nvmeof_data_resp; |
145 | | static int hf_nvmeof_cmd_qid; |
146 | | |
147 | | |
148 | | /* Initialize the subtree pointers */ |
149 | | static int ett_cm; |
150 | | static int ett_data; |
151 | | |
152 | | static range_t *gPORT_RANGE; |
153 | | |
154 | | static struct nvme_rdma_cmd_ctx* nvme_cmd_to_nvme_rdma_cmd(struct nvme_cmd_ctx *nvme_cmd) |
155 | 0 | { |
156 | 0 | return (struct nvme_rdma_cmd_ctx*)(((char *)nvme_cmd) - offsetof(struct nvme_rdma_cmd_ctx, n_cmd_ctx)); |
157 | 0 | } |
158 | | |
159 | | static conversation_infiniband_data *get_conversion_data(conversation_t *conv) |
160 | 0 | { |
161 | 0 | conversation_infiniband_data *conv_data; |
162 | |
|
163 | 0 | conv_data = (conversation_infiniband_data *)conversation_get_proto_data(conv, proto_ib); |
164 | 0 | if (!conv_data) |
165 | 0 | return NULL; |
166 | | |
167 | 0 | if ((conv_data->service_id & SID_MASK) != SID_ULP_TCP) |
168 | 0 | return NULL; /* the service id doesn't match that of TCP ULP - nothing for us to do here */ |
169 | | |
170 | 0 | if (!(value_is_in_range(gPORT_RANGE, (uint32_t)(conv_data->service_id & SID_PORT_MASK)))) |
171 | 0 | return NULL; /* the port doesn't match that of NVM Express Fabrics - nothing for us to do here */ |
172 | 0 | return conv_data; |
173 | 0 | } |
174 | | |
175 | | static conversation_t* |
176 | | find_ib_conversation(packet_info *pinfo, conversation_infiniband_data **uni_conv_data) |
177 | 322 | { |
178 | 322 | conversation_t *conv; |
179 | 322 | conversation_infiniband_data *conv_data; |
180 | | |
181 | 322 | conv = find_conversation(pinfo->num, &pinfo->dst, &pinfo->dst, |
182 | 322 | CONVERSATION_IBQP, pinfo->destport, pinfo->destport, |
183 | 322 | NO_ADDR_B|NO_PORT_B); |
184 | 322 | if (!conv) |
185 | 322 | return NULL; /* nothing to do with no conversation context */ |
186 | | |
187 | 0 | conv_data = get_conversion_data(conv); |
188 | 0 | *uni_conv_data = conv_data; |
189 | 0 | if (!conv_data) |
190 | 0 | return NULL; |
191 | | |
192 | | /* now that we found unidirectional conversation, find bidirectional |
193 | | * conversation, so that we can relate to nvme q. |
194 | | */ |
195 | 0 | return find_conversation(pinfo->num, &pinfo->src, &pinfo->dst, |
196 | 0 | CONVERSATION_IBQP, pinfo->srcport, pinfo->destport, 0); |
197 | 0 | } |
198 | | |
199 | | static uint16_t find_nvme_qid(packet_info *pinfo) |
200 | 0 | { |
201 | 0 | conversation_t *conv; |
202 | 0 | conversation_infiniband_data *conv_data; |
203 | 0 | uint16_t qid; |
204 | |
|
205 | 0 | conv = find_conversation(pinfo->num, &pinfo->dst, &pinfo->dst, |
206 | 0 | CONVERSATION_IBQP, pinfo->destport, pinfo->destport, |
207 | 0 | NO_ADDR_B|NO_PORT_B); |
208 | 0 | if (!conv) |
209 | 0 | return 0; /* nothing to do with no conversation context */ |
210 | | |
211 | 0 | conv_data = get_conversion_data(conv); |
212 | 0 | if (!conv_data) |
213 | 0 | return 0; |
214 | | |
215 | 0 | if (conv_data->client_to_server == false) { |
216 | 0 | memcpy(&qid, &conv_data->mad_private_data[178], 2); |
217 | 0 | return qid; |
218 | 0 | } |
219 | 0 | conv = find_conversation(pinfo->num, &pinfo->src, &pinfo->src, |
220 | 0 | CONVERSATION_IBQP, conv_data->src_qp, conv_data->src_qp, |
221 | 0 | NO_ADDR_B|NO_PORT_B); |
222 | 0 | if (!conv) |
223 | 0 | return 0; |
224 | 0 | conv_data = get_conversion_data(conv); |
225 | 0 | if (!conv_data) |
226 | 0 | return 0; |
227 | 0 | memcpy(&qid, &conv_data->mad_private_data[178], 2); |
228 | 0 | return qid; |
229 | 0 | } |
230 | | |
231 | | static struct nvme_rdma_q_ctx* |
232 | | find_add_q_ctx(packet_info *pinfo, conversation_t *conv) |
233 | 0 | { |
234 | 0 | struct nvme_rdma_q_ctx *q_ctx; |
235 | 0 | uint16_t qid; |
236 | |
|
237 | 0 | q_ctx = (struct nvme_rdma_q_ctx*)conversation_get_proto_data(conv, proto_nvme_rdma); |
238 | 0 | if (!q_ctx) { |
239 | 0 | qid = find_nvme_qid(pinfo); |
240 | 0 | q_ctx = wmem_new0(wmem_file_scope(), struct nvme_rdma_q_ctx); |
241 | 0 | q_ctx->n_q_ctx.pending_cmds = wmem_tree_new(wmem_file_scope()); |
242 | 0 | q_ctx->n_q_ctx.done_cmds = wmem_tree_new(wmem_file_scope()); |
243 | 0 | q_ctx->n_q_ctx.data_requests = wmem_tree_new(wmem_file_scope()); |
244 | 0 | q_ctx->n_q_ctx.data_responses = wmem_tree_new(wmem_file_scope()); |
245 | 0 | q_ctx->n_q_ctx.data_offsets = wmem_tree_new(wmem_file_scope()); |
246 | 0 | q_ctx->n_q_ctx.qid = qid; |
247 | 0 | conversation_add_proto_data(conv, proto_nvme_rdma, q_ctx); |
248 | 0 | } |
249 | 0 | return q_ctx; |
250 | 0 | } |
251 | | |
252 | | static conversation_infiniband_data* |
253 | | find_ib_cm_conversation(packet_info *pinfo) |
254 | 0 | { |
255 | 0 | conversation_t *conv; |
256 | |
|
257 | 0 | conv = find_conversation(pinfo->num, &pinfo->src, &pinfo->dst, |
258 | 0 | CONVERSATION_IBQP, pinfo->srcport, pinfo->destport, 0); |
259 | 0 | if (!conv) |
260 | 0 | return NULL; |
261 | | |
262 | 0 | return get_conversion_data(conv); |
263 | 0 | } |
264 | | |
265 | | static void add_rdma_cm_qid(char *result, uint32_t val) |
266 | 0 | { |
267 | 0 | snprintf(result, ITEM_LABEL_LENGTH, "%x (%s)", val, val ? "IOQ" : "AQ"); |
268 | 0 | } |
269 | | |
270 | | static void add_zero_base(char *result, uint32_t val) |
271 | 0 | { |
272 | 0 | snprintf(result, ITEM_LABEL_LENGTH, "%u", val+1); |
273 | 0 | } |
274 | | |
275 | | static void dissect_rdma_cm_req_packet(tvbuff_t *tvb, proto_tree *tree) |
276 | 0 | { |
277 | 0 | proto_tree *cm_tree; |
278 | 0 | proto_item *ti; |
279 | | /* NVME-RDMA connect private data starts at offset 0 of RDMA-CM |
280 | | * private data |
281 | | */ |
282 | | |
283 | | /* create display subtree for private data */ |
284 | 0 | ti = proto_tree_add_item(tree, proto_nvme_rdma, tvb, 0, 32, ENC_NA); |
285 | 0 | cm_tree = proto_item_add_subtree(ti, ett_cm); |
286 | |
|
287 | 0 | proto_tree_add_item(cm_tree, hf_nvmeof_rdma_cm_req_recfmt, tvb, |
288 | 0 | 0, 2, ENC_LITTLE_ENDIAN); |
289 | |
|
290 | 0 | proto_tree_add_item(cm_tree, hf_nvmeof_rdma_cm_req_qid, tvb, |
291 | 0 | 2, 2, ENC_LITTLE_ENDIAN); |
292 | 0 | proto_tree_add_item(cm_tree, hf_nvmeof_rdma_cm_req_hrqsize, tvb, |
293 | 0 | 4, 2, ENC_LITTLE_ENDIAN); |
294 | 0 | proto_tree_add_item(cm_tree, hf_nvmeof_rdma_cm_req_hsqsize, tvb, |
295 | 0 | 6, 2, ENC_LITTLE_ENDIAN); |
296 | 0 | proto_tree_add_item(cm_tree, hf_nvmeof_rdma_cm_req_cntlid, tvb, |
297 | 0 | 8, 2, ENC_LITTLE_ENDIAN); |
298 | 0 | proto_tree_add_item(cm_tree, hf_nvmeof_rdma_cm_req_reserved, tvb, |
299 | 0 | 10, 22, ENC_NA); |
300 | 0 | } |
301 | | |
302 | | static void dissect_rdma_cm_rsp_packet(tvbuff_t *tvb, proto_tree *tree) |
303 | 0 | { |
304 | 0 | proto_tree *cm_tree; |
305 | 0 | proto_item *ti; |
306 | | |
307 | | /* create display subtree for the private datat that start at offset 0 */ |
308 | 0 | ti = proto_tree_add_item(tree, proto_nvme_rdma, tvb, 0, 32, ENC_NA); |
309 | 0 | cm_tree = proto_item_add_subtree(ti, ett_cm); |
310 | |
|
311 | 0 | proto_tree_add_item(cm_tree, hf_nvmeof_rdma_cm_rsp_recfmt, tvb, |
312 | 0 | 0, 2, ENC_LITTLE_ENDIAN); |
313 | 0 | proto_tree_add_item(cm_tree, hf_nvmeof_rdma_cm_rsp_crqsize, tvb, |
314 | 0 | 2, 2, ENC_LITTLE_ENDIAN); |
315 | 0 | proto_tree_add_item(cm_tree, hf_nvmeof_rdma_cm_rsp_reserved, tvb, |
316 | 0 | 4, 28, ENC_NA); |
317 | 0 | } |
318 | | |
319 | | static void dissect_rdma_cm_rej_packet(tvbuff_t *tvb, proto_tree *tree) |
320 | 0 | { |
321 | 0 | proto_tree *cm_tree; |
322 | 0 | proto_item *ti; |
323 | | |
324 | | /* create display subtree for the private datat that start at offset 0 */ |
325 | 0 | ti = proto_tree_add_item(tree, proto_nvme_rdma, tvb, 0, 4, ENC_NA); |
326 | 0 | cm_tree = proto_item_add_subtree(ti, ett_cm); |
327 | |
|
328 | 0 | proto_tree_add_item(cm_tree, hf_nvmeof_rdma_cm_rej_recfmt, tvb, |
329 | 0 | 0, 2, ENC_LITTLE_ENDIAN); |
330 | 0 | proto_tree_add_item(cm_tree, hf_nvmeof_rdma_cm_rej_status, tvb, |
331 | 0 | 2, 2, ENC_LITTLE_ENDIAN); |
332 | 0 | } |
333 | | |
334 | | static bool dissect_rdma_cm_packet(tvbuff_t *tvb, proto_tree *tree, |
335 | | uint16_t cm_attribute_id) |
336 | 0 | { |
337 | 0 | switch (cm_attribute_id) { |
338 | 0 | case ATTR_CM_REQ: |
339 | 0 | dissect_rdma_cm_req_packet(tvb, tree); |
340 | 0 | break; |
341 | 0 | case ATTR_CM_REP: |
342 | 0 | dissect_rdma_cm_rsp_packet(tvb, tree); |
343 | 0 | break; |
344 | 0 | case ATTR_CM_REJ: |
345 | 0 | dissect_rdma_cm_rej_packet(tvb, tree); |
346 | 0 | break; |
347 | 0 | default: |
348 | 0 | break; |
349 | 0 | } |
350 | 0 | return true; |
351 | 0 | } |
352 | | |
353 | | static bool |
354 | | dissect_nvme_ib_cm(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, |
355 | | void *data) |
356 | 0 | { |
357 | | /* infiniband dissector dissects RDMA-CM header and passes RDMA-CM |
358 | | * private data for further decoding, so we start at RDMA-CM |
359 | | * private data here |
360 | | */ |
361 | 0 | conversation_infiniband_data *conv_data = NULL; |
362 | 0 | struct infinibandinfo *info = (struct infinibandinfo *)data; |
363 | |
|
364 | 0 | conv_data = find_ib_cm_conversation(pinfo); |
365 | 0 | if (!conv_data) |
366 | 0 | return false; |
367 | | |
368 | 0 | col_set_str(pinfo->cinfo, COL_PROTOCOL, NVME_FABRICS_RDMA); |
369 | 0 | return dissect_rdma_cm_packet(tvb, tree, info->cm_attribute_id); |
370 | 0 | } |
371 | | |
372 | | |
373 | | static struct nvme_rdma_cmd_ctx* |
374 | | bind_cmd_to_qctx(packet_info *pinfo, struct nvme_q_ctx *q_ctx, |
375 | | uint16_t cmd_id) |
376 | 0 | { |
377 | 0 | struct nvme_rdma_cmd_ctx *ctx; |
378 | |
|
379 | 0 | if (!PINFO_FD_VISITED(pinfo)) { |
380 | 0 | ctx = wmem_new0(wmem_file_scope(), struct nvme_rdma_cmd_ctx); |
381 | |
|
382 | 0 | nvme_add_cmd_to_pending_list(pinfo, q_ctx, |
383 | 0 | &ctx->n_cmd_ctx, (void*)ctx, cmd_id); |
384 | 0 | } else { |
385 | | /* Already visited this frame */ |
386 | 0 | ctx = (struct nvme_rdma_cmd_ctx*) |
387 | 0 | nvme_lookup_cmd_in_done_list(pinfo, q_ctx, cmd_id); |
388 | | /* if we have already visited frame but haven't found completion yet, |
389 | | * we won't find cmd in done q, so allocate a dummy ctx for doing |
390 | | * rest of the processing. |
391 | | */ |
392 | 0 | if (!ctx) |
393 | 0 | ctx = wmem_new0(wmem_file_scope(), struct nvme_rdma_cmd_ctx); |
394 | 0 | } |
395 | 0 | return ctx; |
396 | 0 | } |
397 | | |
398 | | static void |
399 | | dissect_nvme_rdma_cmd(tvbuff_t *nvme_tvb, packet_info *pinfo, proto_tree *root_tree, |
400 | | proto_tree *nvme_tree, struct nvme_rdma_q_ctx *q_ctx) |
401 | 0 | { |
402 | 0 | struct nvme_rdma_cmd_ctx *cmd_ctx; |
403 | 0 | uint16_t cmd_id; |
404 | 0 | uint8_t opcode; |
405 | |
|
406 | 0 | opcode = tvb_get_uint8(nvme_tvb, 0); |
407 | 0 | cmd_id = tvb_get_uint16(nvme_tvb, 2, ENC_LITTLE_ENDIAN); |
408 | 0 | cmd_ctx = bind_cmd_to_qctx(pinfo, &q_ctx->n_q_ctx, cmd_id); |
409 | 0 | if (opcode == NVME_FABRIC_OPC) { |
410 | 0 | cmd_ctx->n_cmd_ctx.fabric = true; |
411 | 0 | dissect_nvmeof_fabric_cmd(nvme_tvb, pinfo, nvme_tree, &q_ctx->n_q_ctx, &cmd_ctx->n_cmd_ctx, 0, true); |
412 | 0 | } else { |
413 | 0 | cmd_ctx->n_cmd_ctx.fabric = false; |
414 | 0 | dissect_nvme_cmd(nvme_tvb, pinfo, root_tree, &q_ctx->n_q_ctx, &cmd_ctx->n_cmd_ctx); |
415 | 0 | } |
416 | 0 | } |
417 | | |
418 | | static void dissect_rdma_read_transfer(tvbuff_t *data_tvb, packet_info *pinfo, proto_tree *data_tree, |
419 | | struct nvme_rdma_q_ctx *q_ctx, struct nvme_rdma_cmd_ctx *rdma_cmd, unsigned len) |
420 | 0 | { |
421 | 0 | if (rdma_cmd->n_cmd_ctx.fabric == true) |
422 | 0 | dissect_nvmeof_cmd_data(data_tvb, pinfo, data_tree, 0, &q_ctx->n_q_ctx, &rdma_cmd->n_cmd_ctx, len); |
423 | 0 | else |
424 | 0 | dissect_nvme_data_response(data_tvb, pinfo, data_tree, &q_ctx->n_q_ctx, &rdma_cmd->n_cmd_ctx, len, false); |
425 | 0 | } |
426 | | |
427 | | static void |
428 | | dissect_nvme_from_host(tvbuff_t *nvme_tvb, packet_info *pinfo, |
429 | | proto_tree *root_tree, proto_tree *nvme_tree, |
430 | | struct infinibandinfo *info, |
431 | | struct nvme_rdma_q_ctx *q_ctx, |
432 | | unsigned len) |
433 | | |
434 | 0 | { |
435 | 0 | switch (info->opCode) { |
436 | 0 | case RC_RDMA_READ_RESPONSE_FIRST: |
437 | 0 | case RC_RDMA_READ_RESPONSE_MIDDLE: |
438 | 0 | case RC_RDMA_READ_RESPONSE_LAST: |
439 | 0 | case RC_RDMA_READ_RESPONSE_ONLY: |
440 | 0 | { |
441 | 0 | struct nvme_cmd_ctx *cmd = NULL; |
442 | 0 | unsigned idx = 0; |
443 | 0 | if (info->opCode == RC_RDMA_READ_RESPONSE_FIRST || info->opCode == RC_RDMA_READ_RESPONSE_ONLY) { |
444 | 0 | cmd = nvme_lookup_data_tr_pkt(&q_ctx->n_q_ctx, 0, info->packet_seq_num); |
445 | 0 | if (cmd && !PINFO_FD_VISITED(pinfo)) { |
446 | 0 | q_ctx->rdma_ctx.cmd_ctx = nvme_cmd_to_nvme_rdma_cmd(cmd); |
447 | 0 | q_ctx->rdma_ctx.psn = q_ctx->rdma_ctx.first_psn = info->packet_seq_num; |
448 | 0 | cmd->tr_bytes = 0; |
449 | 0 | cmd->first_tr_psn = info->packet_seq_num; |
450 | 0 | cmd->data_tr_pkt_num[0] = pinfo->num; |
451 | 0 | } |
452 | 0 | } else { |
453 | 0 | if (!PINFO_FD_VISITED(pinfo)) { |
454 | 0 | if (q_ctx->rdma_ctx.cmd_ctx && (q_ctx->rdma_ctx.psn + 1) == info->packet_seq_num) { |
455 | 0 | idx = info->packet_seq_num - q_ctx->rdma_ctx.first_psn; |
456 | 0 | q_ctx->rdma_ctx.psn++; |
457 | 0 | cmd = &q_ctx->rdma_ctx.cmd_ctx->n_cmd_ctx; |
458 | 0 | if (idx < NVME_CMD_MAX_TRS) |
459 | 0 | cmd->data_tr_pkt_num[idx] = pinfo->num; |
460 | 0 | nvme_add_data_tr_pkt(&q_ctx->n_q_ctx, cmd, 0, info->packet_seq_num); |
461 | 0 | nvme_add_data_tr_off(&q_ctx->n_q_ctx, cmd->tr_bytes, pinfo->num); |
462 | 0 | } |
463 | 0 | } else { |
464 | 0 | cmd = nvme_lookup_data_tr_pkt(&q_ctx->n_q_ctx, 0, info->packet_seq_num); |
465 | 0 | if (cmd) |
466 | 0 | idx = info->packet_seq_num - cmd->first_tr_psn; |
467 | 0 | } |
468 | 0 | } |
469 | 0 | if (cmd) { |
470 | 0 | proto_item *ti = proto_tree_add_item(nvme_tree, hf_nvmeof_read_from_host_resp, nvme_tvb, 0, len, ENC_NA); |
471 | 0 | proto_tree *rdma_tree = proto_item_add_subtree(ti, ett_data); |
472 | 0 | nvme_publish_to_cmd_link(rdma_tree, nvme_tvb, hf_nvmeof_cmd_pkt, cmd); |
473 | 0 | nvme_publish_to_data_req_link(rdma_tree, nvme_tvb, hf_nvmeof_data_req, cmd); |
474 | 0 | if (idx && (idx-1) < NVME_CMD_MAX_TRS) |
475 | 0 | nvme_publish_link(rdma_tree, nvme_tvb, hf_nvmeof_read_from_host_prev , cmd->data_tr_pkt_num[idx-1], false); |
476 | 0 | if ((idx + 1) < NVME_CMD_MAX_TRS) |
477 | 0 | nvme_publish_link(rdma_tree, nvme_tvb, hf_nvmeof_read_from_host_next , cmd->data_tr_pkt_num[idx+1], false); |
478 | |
|
479 | 0 | dissect_rdma_read_transfer(nvme_tvb, pinfo, rdma_tree, q_ctx, nvme_cmd_to_nvme_rdma_cmd(cmd), len); |
480 | 0 | if (!PINFO_FD_VISITED(pinfo)) |
481 | 0 | cmd->tr_bytes += len; |
482 | 0 | } else { |
483 | 0 | proto_tree_add_item(nvme_tree, hf_nvmeof_read_from_host_unmatched, |
484 | 0 | nvme_tvb, 0, len, ENC_NA); |
485 | 0 | } |
486 | 0 | break; |
487 | 0 | } |
488 | 0 | case RC_SEND_ONLY: |
489 | 0 | if (len >= NVME_FABRIC_CMD_SIZE) |
490 | 0 | dissect_nvme_rdma_cmd(nvme_tvb, pinfo, root_tree, nvme_tree, q_ctx); |
491 | 0 | else |
492 | 0 | proto_tree_add_item(nvme_tree, hf_nvmeof_from_host_unknown_data, |
493 | 0 | nvme_tvb, 0, len, ENC_NA); |
494 | 0 | break; |
495 | 0 | default: |
496 | 0 | proto_tree_add_item(nvme_tree, hf_nvmeof_from_host_unknown_data, nvme_tvb, |
497 | 0 | 0, len, ENC_NA); |
498 | 0 | break; |
499 | 0 | } |
500 | 0 | } |
501 | | |
502 | | static void |
503 | | dissect_nvme_rdma_cqe(tvbuff_t *nvme_tvb, packet_info *pinfo, |
504 | | proto_tree *root_tree, proto_tree *nvme_tree, |
505 | | struct nvme_rdma_q_ctx *q_ctx) |
506 | 0 | { |
507 | 0 | struct nvme_rdma_cmd_ctx *cmd_ctx; |
508 | 0 | uint16_t cmd_id; |
509 | |
|
510 | 0 | cmd_id = tvb_get_uint16(nvme_tvb, 12, ENC_LITTLE_ENDIAN); |
511 | |
|
512 | 0 | if (!PINFO_FD_VISITED(pinfo)) { |
513 | |
|
514 | 0 | cmd_ctx = (struct nvme_rdma_cmd_ctx*) |
515 | 0 | nvme_lookup_cmd_in_pending_list(&q_ctx->n_q_ctx, cmd_id); |
516 | 0 | if (!cmd_ctx) |
517 | 0 | goto not_found; |
518 | | |
519 | | /* we have already seen this cqe, or an identical one */ |
520 | 0 | if (cmd_ctx->n_cmd_ctx.cqe_pkt_num) |
521 | 0 | goto not_found; |
522 | | |
523 | 0 | cmd_ctx->n_cmd_ctx.cqe_pkt_num = pinfo->num; |
524 | 0 | nvme_add_cmd_cqe_to_done_list(&q_ctx->n_q_ctx, &cmd_ctx->n_cmd_ctx, cmd_id); |
525 | 0 | } else { |
526 | | /* Already visited this frame */ |
527 | 0 | cmd_ctx = (struct nvme_rdma_cmd_ctx*) |
528 | 0 | nvme_lookup_cmd_in_done_list(pinfo, &q_ctx->n_q_ctx, cmd_id); |
529 | 0 | if (!cmd_ctx) |
530 | 0 | goto not_found; |
531 | 0 | } |
532 | | |
533 | 0 | nvme_update_cmd_end_info(pinfo, &cmd_ctx->n_cmd_ctx); |
534 | |
|
535 | 0 | if (cmd_ctx->n_cmd_ctx.fabric) |
536 | 0 | dissect_nvmeof_fabric_cqe(nvme_tvb, pinfo, nvme_tree, &cmd_ctx->n_cmd_ctx, 0); |
537 | 0 | else |
538 | 0 | dissect_nvme_cqe(nvme_tvb, pinfo, root_tree, &q_ctx->n_q_ctx, &cmd_ctx->n_cmd_ctx); |
539 | 0 | return; |
540 | | |
541 | 0 | not_found: |
542 | 0 | proto_tree_add_item(nvme_tree, hf_nvmeof_to_host_unknown_data, nvme_tvb, |
543 | 0 | 0, NVME_FABRIC_CQE_SIZE, ENC_NA); |
544 | 0 | } |
545 | | |
546 | | static void |
547 | | dissect_nvme_to_host(tvbuff_t *nvme_tvb, packet_info *pinfo, |
548 | | proto_tree *root_tree, proto_tree *nvme_tree, |
549 | | struct infinibandinfo *info, |
550 | | struct nvme_rdma_q_ctx *q_ctx, unsigned len) |
551 | 0 | { |
552 | 0 | switch (info->opCode) { |
553 | 0 | case RC_RDMA_READ_REQUEST: |
554 | 0 | { |
555 | 0 | struct keyed_data_req req = { |
556 | 0 | .addr = info->reth_remote_address, |
557 | 0 | .key = info->reth_remote_key, |
558 | 0 | .size = info->reth_dma_length |
559 | 0 | }; |
560 | 0 | struct nvme_cmd_ctx *cmd = NULL; |
561 | 0 | if (!PINFO_FD_VISITED(pinfo)) { |
562 | 0 | cmd = nvme_lookup_data_request(&q_ctx->n_q_ctx, &req); |
563 | 0 | if (cmd) |
564 | 0 | nvme_add_data_tr_pkt(&q_ctx->n_q_ctx, cmd, 0, info->packet_seq_num); |
565 | 0 | } else { |
566 | 0 | cmd = nvme_lookup_data_tr_pkt(&q_ctx->n_q_ctx, 0, info->packet_seq_num); |
567 | 0 | } |
568 | 0 | if (cmd) { |
569 | 0 | proto_item *ti = proto_tree_add_item(nvme_tree, |
570 | 0 | hf_nvmeof_read_to_host_req, nvme_tvb, 0, 0, ENC_NA); |
571 | 0 | proto_tree *rdma_tree = proto_item_add_subtree(ti, ett_data); |
572 | 0 | cmd->data_req_pkt_num = pinfo->num; |
573 | 0 | nvme_publish_to_data_resp_link(rdma_tree, nvme_tvb, |
574 | 0 | hf_nvmeof_data_resp, cmd); |
575 | 0 | nvme_publish_to_cmd_link(rdma_tree, nvme_tvb, |
576 | 0 | hf_nvmeof_cmd_pkt, cmd); |
577 | 0 | nvme_update_transfer_request(pinfo, cmd, &q_ctx->n_q_ctx); |
578 | 0 | } else { |
579 | 0 | proto_tree_add_item(nvme_tree, hf_nvmeof_read_to_host_unmatched, |
580 | 0 | nvme_tvb, 0, len, ENC_NA); |
581 | 0 | } |
582 | 0 | break; |
583 | 0 | } |
584 | 0 | case RC_SEND_ONLY: |
585 | 0 | case RC_SEND_ONLY_INVAL: |
586 | 0 | if (len == NVME_FABRIC_CQE_SIZE) |
587 | 0 | dissect_nvme_rdma_cqe(nvme_tvb, pinfo, root_tree, nvme_tree, q_ctx); |
588 | 0 | else |
589 | 0 | proto_tree_add_item(nvme_tree, hf_nvmeof_to_host_unknown_data, nvme_tvb, |
590 | 0 | 0, len, ENC_NA); |
591 | 0 | break; |
592 | 0 | case RC_RDMA_WRITE_ONLY: |
593 | 0 | case RC_RDMA_WRITE_FIRST: |
594 | 0 | case RC_RDMA_WRITE_LAST: |
595 | 0 | case RC_RDMA_WRITE_MIDDLE: |
596 | 0 | { |
597 | 0 | struct nvme_cmd_ctx *cmd = NULL; |
598 | 0 | unsigned idx = 0; |
599 | 0 | if (info->opCode == RC_RDMA_WRITE_ONLY || info->opCode == RC_RDMA_WRITE_FIRST) { |
600 | 0 | struct keyed_data_req req = { |
601 | 0 | .addr = info->reth_remote_address, |
602 | 0 | .key = info->reth_remote_key, |
603 | 0 | .size = info->reth_dma_length |
604 | 0 | }; |
605 | 0 | if (!PINFO_FD_VISITED(pinfo)) { |
606 | 0 | cmd = nvme_lookup_data_request(&q_ctx->n_q_ctx, &req); |
607 | 0 | if (cmd) { |
608 | 0 | nvme_add_data_tr_pkt(&q_ctx->n_q_ctx, cmd, 0, info->packet_seq_num); |
609 | 0 | cmd->first_tr_psn = info->packet_seq_num; |
610 | 0 | cmd->data_tr_pkt_num[0] = pinfo->num; |
611 | 0 | q_ctx->rdma_ctx.cmd_ctx = nvme_cmd_to_nvme_rdma_cmd(cmd); |
612 | 0 | q_ctx->rdma_ctx.first_psn = q_ctx->rdma_ctx.psn = info->packet_seq_num; |
613 | 0 | } |
614 | 0 | } else { |
615 | 0 | cmd = nvme_lookup_data_tr_pkt(&q_ctx->n_q_ctx, 0, info->packet_seq_num); |
616 | 0 | } |
617 | 0 | } else { |
618 | 0 | if (PINFO_FD_VISITED(pinfo)) { |
619 | 0 | cmd = nvme_lookup_data_tr_pkt(&q_ctx->n_q_ctx, 0, info->packet_seq_num); |
620 | 0 | if (cmd) |
621 | 0 | idx = info->packet_seq_num - cmd->first_tr_psn; |
622 | 0 | } else if (q_ctx->rdma_ctx.cmd_ctx && (q_ctx->rdma_ctx.psn + 1) == info->packet_seq_num) { |
623 | 0 | idx = info->packet_seq_num - q_ctx->rdma_ctx.first_psn; |
624 | 0 | q_ctx->rdma_ctx.psn++; |
625 | 0 | cmd = &q_ctx->rdma_ctx.cmd_ctx->n_cmd_ctx; |
626 | 0 | if (idx < NVME_CMD_MAX_TRS) |
627 | 0 | cmd->data_tr_pkt_num[idx] = pinfo->num; |
628 | 0 | nvme_add_data_tr_pkt(&q_ctx->n_q_ctx, cmd, 0, info->packet_seq_num); |
629 | 0 | nvme_add_data_tr_off(&q_ctx->n_q_ctx, cmd->tr_bytes, pinfo->num); |
630 | 0 | } |
631 | 0 | } |
632 | 0 | if (cmd) { |
633 | 0 | proto_item *ti = proto_tree_add_item(nvme_tree, hf_nvmeof_write_to_host_req, nvme_tvb, 0, 0, ENC_NA); |
634 | 0 | proto_tree *rdma_tree = proto_item_add_subtree(ti, ett_data); |
635 | 0 | nvme_publish_to_cmd_link(rdma_tree, nvme_tvb, hf_nvmeof_cmd_pkt, cmd); |
636 | 0 | if (idx && (idx-1) < NVME_CMD_MAX_TRS) |
637 | 0 | nvme_publish_link(rdma_tree, nvme_tvb, hf_nvmeof_write_to_host_prev , cmd->data_tr_pkt_num[idx-1], false); |
638 | 0 | if ((idx + 1) < NVME_CMD_MAX_TRS) |
639 | 0 | nvme_publish_link(rdma_tree, nvme_tvb, hf_nvmeof_write_to_host_next , cmd->data_tr_pkt_num[idx+1], false); |
640 | 0 | dissect_nvme_data_response(nvme_tvb, pinfo, root_tree, &q_ctx->n_q_ctx, cmd, len, false); |
641 | 0 | if (!PINFO_FD_VISITED(pinfo)) |
642 | 0 | cmd->tr_bytes += len; |
643 | 0 | } else { |
644 | 0 | proto_tree_add_item(nvme_tree, hf_nvmeof_write_to_host_unmatched, nvme_tvb, 0, len, ENC_NA); |
645 | 0 | } |
646 | 0 | break; |
647 | 0 | } |
648 | 0 | default: |
649 | 0 | proto_tree_add_item(nvme_tree, hf_nvmeof_to_host_unknown_data, nvme_tvb, |
650 | 0 | 0, len, ENC_NA); |
651 | 0 | break; |
652 | 0 | } |
653 | 0 | } |
654 | | |
655 | | static bool |
656 | | dissect_nvme_ib(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void *data) |
657 | 322 | { |
658 | 322 | struct infinibandinfo *info = (struct infinibandinfo *)data; |
659 | 322 | conversation_infiniband_data *conv_data = NULL; |
660 | 322 | conversation_t *conv; |
661 | 322 | proto_tree *nvme_tree; |
662 | 322 | proto_item *ti; |
663 | 322 | struct nvme_rdma_q_ctx *q_ctx; |
664 | 322 | unsigned len = tvb_reported_length(tvb); |
665 | | |
666 | 322 | conv = find_ib_conversation(pinfo, &conv_data); |
667 | 322 | if (!conv) |
668 | 322 | return false; |
669 | | |
670 | 0 | q_ctx = find_add_q_ctx(pinfo, conv); |
671 | 0 | if (!q_ctx) |
672 | 0 | return false; |
673 | | |
674 | 0 | col_set_str(pinfo->cinfo, COL_PROTOCOL, NVME_FABRICS_RDMA); |
675 | |
|
676 | 0 | ti = proto_tree_add_item(tree, proto_nvme_rdma, tvb, 0, len, ENC_NA); |
677 | 0 | nvme_tree = proto_item_add_subtree(ti, ett_data); |
678 | |
|
679 | 0 | nvme_publish_qid(nvme_tree, hf_nvmeof_cmd_qid, q_ctx->n_q_ctx.qid); |
680 | |
|
681 | 0 | if (conv_data->client_to_server) |
682 | 0 | dissect_nvme_from_host(tvb, pinfo, tree, nvme_tree, info, q_ctx, len); |
683 | 0 | else |
684 | 0 | dissect_nvme_to_host(tvb, pinfo, tree, nvme_tree, info, q_ctx, len); |
685 | |
|
686 | 0 | return true; |
687 | 0 | } |
688 | | |
689 | | void |
690 | | proto_register_nvme_rdma(void) |
691 | 14 | { |
692 | 14 | module_t *nvme_rdma_module; |
693 | 14 | static hf_register_info hf[] = { |
694 | | /* IB RDMA CM fields */ |
695 | 14 | { &hf_nvmeof_rdma_cm_req_recfmt, |
696 | 14 | { "Record Format", "nvme-rdma.cm.req.recfmt", |
697 | 14 | FT_UINT16, BASE_DEC, NULL, 0x0, NULL, HFILL} |
698 | 14 | }, |
699 | 14 | { &hf_nvmeof_rdma_cm_req_qid, |
700 | 14 | { "Queue Id", "nvme-rdma.cm.req.qid", |
701 | 14 | FT_UINT16, BASE_CUSTOM, CF_FUNC(add_rdma_cm_qid), 0x0, NULL, HFILL} |
702 | 14 | }, |
703 | 14 | { &hf_nvmeof_rdma_cm_req_hrqsize, |
704 | 14 | { "RDMA QP Host Receive Queue Size", "nvme-rdma.cm.req.hrqsize", |
705 | 14 | FT_UINT16, BASE_DEC, NULL, 0x0, NULL, HFILL} |
706 | 14 | }, |
707 | 14 | { &hf_nvmeof_rdma_cm_req_hsqsize, |
708 | 14 | { "RDMA QP Host Send Queue Size", "nvme-rdma.cm.req.hsqsize", |
709 | 14 | FT_UINT16, BASE_CUSTOM, CF_FUNC(add_zero_base), 0x0, NULL, HFILL} |
710 | 14 | }, |
711 | 14 | { &hf_nvmeof_rdma_cm_req_cntlid, |
712 | 14 | { "Controller ID", "nvme-rdma.cm.req.cntlid", |
713 | 14 | FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL} |
714 | 14 | }, |
715 | 14 | { &hf_nvmeof_rdma_cm_req_reserved, |
716 | 14 | { "Reserved", "nvme-rdma.cm.req.reserved", |
717 | 14 | FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL} |
718 | 14 | }, |
719 | 14 | { &hf_nvmeof_rdma_cm_rsp_recfmt, |
720 | 14 | { "Record Format", "nvme-rdma.cm.rsp.recfmt", |
721 | 14 | FT_UINT16, BASE_DEC, NULL, 0x0, NULL, HFILL} |
722 | 14 | }, |
723 | 14 | { &hf_nvmeof_rdma_cm_rsp_crqsize, |
724 | 14 | { "RDMA QP Controller Receive Queue Size", "nvme-rdma.cm.rsp.crqsize", |
725 | 14 | FT_UINT16, BASE_DEC, NULL, 0x0, NULL, HFILL} |
726 | 14 | }, |
727 | 14 | { &hf_nvmeof_rdma_cm_rsp_reserved, |
728 | 14 | { "Reserved", "nvme-rdma.cm.rsp.reserved", |
729 | 14 | FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL} |
730 | 14 | }, |
731 | 14 | { &hf_nvmeof_rdma_cm_rej_recfmt, |
732 | 14 | { "Record Format", "nvme-rdma.cm.rej.recfmt", |
733 | 14 | FT_UINT16, BASE_DEC, NULL, 0x0, NULL, HFILL} |
734 | 14 | }, |
735 | 14 | { &hf_nvmeof_rdma_cm_rej_status, |
736 | 14 | { "Status", "nvme-rdma.cm.rej.status", |
737 | 14 | FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL} |
738 | 14 | }, |
739 | 14 | { &hf_nvmeof_from_host_unknown_data, |
740 | 14 | { "Dissection unsupported", "nvme-rdma.unknown_data", |
741 | 14 | FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL} |
742 | 14 | }, |
743 | 14 | { &hf_nvmeof_read_to_host_req, |
744 | 14 | { "RDMA Read Request Sent to Host", "nvme-rdma.read_to_host_req", |
745 | 14 | FT_NONE, BASE_NONE, NULL, 0x0, NULL, HFILL} |
746 | 14 | }, |
747 | 14 | { &hf_nvmeof_read_to_host_unmatched, |
748 | 14 | { "RDMA Read Request Sent to Host (no Command Match)", "nvme-rdma.read_to_host_req", |
749 | 14 | FT_NONE, BASE_NONE, NULL, 0x0, NULL, HFILL} |
750 | 14 | }, |
751 | 14 | { &hf_nvmeof_read_from_host_resp, |
752 | 14 | { "RDMA Read Transfer Sent from Host", "nvme-rdma.read_from_host_resp", |
753 | 14 | FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL} |
754 | 14 | }, |
755 | 14 | { &hf_nvmeof_read_from_host_prev, |
756 | 14 | { "Previous Read Transfer", "nvme-rdma.read_from_host_prev", |
757 | 14 | FT_FRAMENUM, BASE_NONE, NULL, 0x0, "Previous read transfer is in this frame", HFILL} |
758 | 14 | }, |
759 | 14 | { &hf_nvmeof_read_from_host_next, |
760 | 14 | { "Next Read Transfer", "nvme-rdma.read_from_host_next", |
761 | 14 | FT_FRAMENUM, BASE_NONE, NULL, 0x0, "Next read transfer is in this frame", HFILL} |
762 | 14 | }, |
763 | 14 | { &hf_nvmeof_read_from_host_unmatched, |
764 | 14 | { "RDMA Read Transfer Sent from Host (no Command Match)", "nvme-rdma.read_from_host_resp", |
765 | 14 | FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL} |
766 | 14 | }, |
767 | 14 | { &hf_nvmeof_write_to_host_req, |
768 | 14 | { "RDMA Write Request Sent to Host", "nvme-rdma.write_to_host_req", |
769 | 14 | FT_NONE, BASE_NONE, NULL, 0x0, NULL, HFILL} |
770 | 14 | }, |
771 | 14 | { &hf_nvmeof_write_to_host_prev, |
772 | 14 | { "Previous Write Transfer", "nvme-rdma.write_to_host_prev", |
773 | 14 | FT_FRAMENUM, BASE_NONE, NULL, 0x0, "Previous write transfer is in this frame", HFILL} |
774 | 14 | }, |
775 | 14 | { &hf_nvmeof_write_to_host_next, |
776 | 14 | { "Next Write Transfer", "nvme-rdma.write_to_host_next", |
777 | 14 | FT_FRAMENUM, BASE_NONE, NULL, 0x0, "Next write transfer is in this frame", HFILL} |
778 | 14 | }, |
779 | 14 | { &hf_nvmeof_write_to_host_unmatched, |
780 | 14 | { "RDMA Write Request Sent to Host (no Command Match)", "nvme-rdma.write_to_host_req", |
781 | 14 | FT_NONE, BASE_NONE, NULL, 0x0, NULL, HFILL} |
782 | 14 | }, |
783 | 14 | { &hf_nvmeof_to_host_unknown_data, |
784 | 14 | { "Dissection unsupported", "nvme-rdma.unknown_data", |
785 | 14 | FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL} |
786 | 14 | }, |
787 | 14 | { &hf_nvmeof_data_resp, |
788 | 14 | { "DATA Transfer Response", "nvme-rdma.data_resp", |
789 | 14 | FT_FRAMENUM, BASE_NONE, NULL, 0, |
790 | 14 | "DATA transfer response for this transaction is in this frame", HFILL } |
791 | 14 | }, |
792 | 14 | { &hf_nvmeof_cmd_qid, |
793 | 14 | { "Cmd Qid", "nvme-rdma.cmd.qid", |
794 | 14 | FT_UINT16, BASE_HEX, NULL, 0x0, |
795 | 14 | "Qid on which command is issued", HFILL } |
796 | 14 | }, |
797 | 14 | }; |
798 | 14 | static int *ett[] = { |
799 | 14 | &ett_cm, |
800 | 14 | &ett_data, |
801 | 14 | }; |
802 | | |
803 | 14 | proto_nvme_rdma = proto_register_protocol("NVM Express Fabrics RDMA", |
804 | 14 | NVME_FABRICS_RDMA, "nvme-rdma"); |
805 | | |
806 | 14 | proto_register_field_array(proto_nvme_rdma, hf, array_length(hf)); |
807 | 14 | proto_register_subtree_array(ett, array_length(ett)); |
808 | | |
809 | | /* Register preferences */ |
810 | 14 | nvme_rdma_module = prefs_register_protocol(proto_nvme_rdma, NULL); |
811 | | |
812 | 14 | range_convert_str(wmem_epan_scope(), &gPORT_RANGE, NVME_RDMA_TCP_PORT_RANGE, MAX_TCP_PORT); |
813 | 14 | prefs_register_range_preference(nvme_rdma_module, |
814 | 14 | "subsystem_ports", |
815 | 14 | "Subsystem Ports Range", |
816 | 14 | "Range of NVMe Subsystem ports" |
817 | 14 | "(default " NVME_RDMA_TCP_PORT_RANGE ")", |
818 | 14 | &gPORT_RANGE, MAX_TCP_PORT); |
819 | 14 | } |
820 | | |
821 | | void |
822 | | proto_reg_handoff_nvme_rdma(void) |
823 | 14 | { |
824 | 14 | heur_dissector_add("infiniband.mad.cm.private", dissect_nvme_ib_cm, |
825 | 14 | "NVMe Fabrics RDMA CM packets", |
826 | 14 | "nvme_rdma_cm_private", proto_nvme_rdma, HEURISTIC_ENABLE); |
827 | 14 | heur_dissector_add("infiniband.payload", dissect_nvme_ib, |
828 | 14 | "NVMe Fabrics RDMA packets", |
829 | 14 | "nvme_rdma", proto_nvme_rdma, HEURISTIC_ENABLE); |
830 | 14 | ib_handler = find_dissector_add_dependency("infiniband", proto_nvme_rdma); |
831 | 14 | proto_ib = dissector_handle_get_protocol_index(ib_handler); |
832 | 14 | } |
833 | | |
834 | | /* |
835 | | * Editor modelines - https://www.wireshark.org/tools/modelines.html |
836 | | * |
837 | | * Local variables: |
838 | | * c-basic-offset: 4 |
839 | | * tab-width: 8 |
840 | | * indent-tabs-mode: nil |
841 | | * End: |
842 | | * |
843 | | * vi: set shiftwidth=4 tabstop=8 expandtab: |
844 | | * :indentSize=4:tabSize=8:noTabs=true: |
845 | | */ |