Coverage Report

Created: 2025-08-04 07:15

/src/wireshark/epan/dissectors/packet-nvme-tcp.c
Line
Count
Source (jump to first uncovered line)
1
/* packet-nvme-tcp.c
2
 * Routines for NVM Express over Fabrics(TCP) dissection
3
 * Code by Solganik Alexander <solganik@gmail.com>
4
 *
5
 * Wireshark - Network traffic analyzer
6
 * By Gerald Combs <gerald@wireshark.org>
7
 * Copyright 1998 Gerald Combs
8
 *
9
 * SPDX-License-Identifier: GPL-2.0-or-later
10
 */
11
12
/*
13
 * Copyright (C) 2019 Lightbits Labs Ltd. - All Rights Reserved
14
*/
15
16
/*
17
 NVM Express is high speed interface for accessing solid state drives.
18
 NVM Express specifications are maintained by NVM Express industry
19
 association at http://www.nvmexpress.org.
20
21
 This file adds support to dissect NVM Express over fabrics packets
22
 for TCP. This adds very basic support for dissecting commands
23
 completions.
24
25
 Current dissection supports dissection of
26
 (a) NVMe cmd and cqe
27
 (b) NVMe Fabric command and cqe
28
 As part of it, it also calculates cmd completion latencies.
29
30
 NVM Express TCP TCP port assigned by IANA that maps to NVMe-oF service
31
 TCP port can be found at
32
 http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=NVM+Express
33
34
 */
35
36
#include "config.h"
37
#include <epan/packet.h>
38
#include <epan/prefs.h>
39
#include <epan/conversation.h>
40
#include <epan/crc32-tvb.h>
41
#include <epan/tfs.h>
42
#include <wsutil/array.h>
43
#include "packet-tcp.h"
44
#include "packet-nvme.h"
45
46
#include "packet-tls.h"
47
48
static int proto_nvme_tcp;
49
static dissector_handle_t nvmet_tcp_handle;
50
static dissector_handle_t nvmet_tls_handle;
51
52
14
#define NVME_TCP_PORT_RANGE    "4420,8009" /* IANA registered */
53
54
42
#define NVME_FABRICS_TCP "NVMe/TCP"
55
95
#define NVME_TCP_HEADER_SIZE 8
56
14
#define PDU_LEN_OFFSET_FROM_HEADER 4
57
static range_t *gPORT_RANGE;
58
static bool nvme_tcp_check_hdgst;
59
static bool nvme_tcp_check_ddgst;
60
5
#define NVME_TCP_DATA_PDU_SIZE 24
61
62
enum nvme_tcp_pdu_type {
63
    nvme_tcp_icreq = 0x0,
64
    nvme_tcp_icresp = 0x1,
65
    nvme_tcp_h2c_term = 0x2,
66
    nvme_tcp_c2h_term = 0x3,
67
    nvme_tcp_cmd = 0x4,
68
    nvme_tcp_rsp = 0x5,
69
    nvme_tcp_h2c_data = 0x6,
70
    nvme_tcp_c2h_data = 0x7,
71
    nvme_tcp_r2t = 0x9,
72
    nvme_tcp_kdreq = 0xa,
73
    nvme_tcp_kdresp = 0xb,
74
    NVMET_MAX_PDU_TYPE = nvme_tcp_kdresp
75
};
76
77
static const value_string nvme_tcp_pdu_type_vals[] = {
78
    { nvme_tcp_icreq, "ICReq" },
79
    { nvme_tcp_icresp, "ICResp" },
80
    { nvme_tcp_h2c_term, "H2CTerm" },
81
    { nvme_tcp_c2h_term, "C2HTerm" },
82
    { nvme_tcp_cmd, "CapsuleCommand" },
83
    { nvme_tcp_rsp, "CapsuleResponse" },
84
    { nvme_tcp_h2c_data, "H2CData" },
85
    { nvme_tcp_c2h_data, "C2HData" },
86
    { nvme_tcp_r2t, "Ready To Transfer" },
87
    { nvme_tcp_kdreq, "Kickstart Discovery Request" },
88
    { nvme_tcp_kdresp, "Kickstart Discovery Response" },
89
    { 0, NULL }
90
};
91
92
static const value_string nvme_tcp_termreq_fes[] = {
93
    {0x0, "Reserved"                        },
94
    {0x1, "Invalid PDU Header Field"        },
95
    {0x2, "PDU Sequence Error"              },
96
    {0x3, "Header Digest Error"             },
97
    {0x4, "Data Transfer Out of Range"      },
98
    {0x5, "R2T Limit Exceeded"              },
99
    {0x6, "Unsupported Parameter"           },
100
    {0,   NULL                              },
101
};
102
103
enum nvme_tcp_fatal_error_status
104
{
105
    NVME_TCP_FES_INVALID_PDU_HDR =      0x01,
106
    NVME_TCP_FES_PDU_SEQ_ERR =          0x02,
107
    NVME_TCP_FES_HDR_DIGEST_ERR =       0x03,
108
    NVME_TCP_FES_DATA_OUT_OF_RANGE =    0x04,
109
    NVME_TCP_FES_R2T_LIMIT_EXCEEDED =   0x05,
110
    NVME_TCP_FES_DATA_LIMIT_EXCEEDED =  0x05,
111
    NVME_TCP_FES_UNSUPPORTED_PARAM =    0x06,
112
};
113
114
enum nvme_tcp_pdu_flags {
115
    NVME_TCP_F_HDGST         = (1 << 0),
116
    NVME_TCP_F_DDGST         = (1 << 1),
117
    NVME_TCP_F_DATA_LAST     = (1 << 2),
118
    NVME_TCP_F_DATA_SUCCESS  = (1 << 3),
119
};
120
121
122
enum nvme_tcp_digest_option {
123
    NVME_TCP_HDR_DIGEST_ENABLE = (1 << 0),
124
    NVME_TCP_DATA_DIGEST_ENABLE = (1 << 1),
125
};
126
127
128
0
#define NVME_FABRIC_CMD_SIZE NVME_CMD_SIZE
129
1
#define NVME_FABRIC_CQE_SIZE NVME_CQE_SIZE
130
5
#define NVME_TCP_DIGEST_LENGTH  4
131
132
struct nvme_tcp_q_ctx {
133
    struct nvme_q_ctx n_q_ctx;
134
};
135
136
struct nvme_tcp_cmd_ctx {
137
    struct nvme_cmd_ctx n_cmd_ctx;
138
};
139
140
void proto_reg_handoff_nvme_tcp(void);
141
void proto_register_nvme_tcp(void);
142
143
144
static int hf_nvme_tcp_type;
145
static int hf_nvme_tcp_flags;
146
static int hf_pdu_flags_hdgst;
147
static int hf_pdu_flags_ddgst;
148
static int hf_pdu_flags_data_last;
149
static int hf_pdu_flags_data_success;
150
151
static int * const nvme_tcp_pdu_flags[] = {
152
    &hf_pdu_flags_hdgst,
153
    &hf_pdu_flags_ddgst,
154
    &hf_pdu_flags_data_last,
155
    &hf_pdu_flags_data_success,
156
    NULL
157
};
158
159
static int hf_nvme_tcp_hdgst;
160
static int hf_nvme_tcp_ddgst;
161
static int hf_nvme_tcp_hlen;
162
static int hf_nvme_tcp_pdo;
163
static int hf_nvme_tcp_plen;
164
static int hf_nvme_tcp_hdgst_status;
165
static int hf_nvme_tcp_ddgst_status;
166
167
/* NVMe tcp icreq/icresp fields */
168
static int hf_nvme_tcp_icreq;
169
static int hf_nvme_tcp_icreq_pfv;
170
static int hf_nvme_tcp_icreq_maxr2t;
171
static int hf_nvme_tcp_icreq_hpda;
172
static int hf_nvme_tcp_icreq_digest;
173
static int hf_nvme_tcp_icresp;
174
static int hf_nvme_tcp_icresp_pfv;
175
static int hf_nvme_tcp_icresp_cpda;
176
static int hf_nvme_tcp_icresp_digest;
177
static int hf_nvme_tcp_icresp_maxdata;
178
179
/* NVMe tcp c2h/h2c termreq fields */
180
static int hf_nvme_tcp_c2htermreq;
181
static int hf_nvme_tcp_c2htermreq_fes;
182
static int hf_nvme_tcp_c2htermreq_phfo;
183
static int hf_nvme_tcp_c2htermreq_phd;
184
static int hf_nvme_tcp_c2htermreq_upfo;
185
static int hf_nvme_tcp_c2htermreq_reserved;
186
static int hf_nvme_tcp_c2htermreq_data;
187
static int hf_nvme_tcp_h2ctermreq;
188
static int hf_nvme_tcp_h2ctermreq_fes;
189
static int hf_nvme_tcp_h2ctermreq_phfo;
190
static int hf_nvme_tcp_h2ctermreq_phd;
191
static int hf_nvme_tcp_h2ctermreq_upfo;
192
static int hf_nvme_tcp_h2ctermreq_reserved;
193
static int hf_nvme_tcp_h2ctermreq_data;
194
195
/* NVMe fabrics command */
196
static int hf_nvme_fabrics_cmd_cid;
197
198
/* NVMe fabrics command data*/
199
static int hf_nvme_fabrics_cmd_data;
200
static int hf_nvme_tcp_unknown_data;
201
202
static int hf_nvme_tcp_r2t_pdu;
203
static int hf_nvme_tcp_r2t_offset;
204
static int hf_nvme_tcp_r2t_length;
205
static int hf_nvme_tcp_r2t_resvd;
206
207
/* tracking Cmd and its respective CQE */
208
static int hf_nvme_tcp_cmd_pkt;
209
static int hf_nvme_fabrics_cmd_qid;
210
211
/* Data response fields */
212
static int hf_nvme_tcp_data_pdu;
213
static int hf_nvme_tcp_pdu_ttag;
214
static int hf_nvme_tcp_data_pdu_data_offset;
215
static int hf_nvme_tcp_data_pdu_data_length;
216
static int hf_nvme_tcp_data_pdu_data_resvd;
217
218
static int ett_nvme_tcp;
219
220
static unsigned
221
get_nvme_tcp_pdu_len(packet_info *pinfo _U_,
222
                     tvbuff_t *tvb,
223
                     int offset,
224
                     void* data _U_)
225
14
{
226
14
    return tvb_get_letohl(tvb, offset + PDU_LEN_OFFSET_FROM_HEADER);
227
14
}
228
229
static void
230
dissect_nvme_tcp_icreq(tvbuff_t *tvb,
231
                       packet_info *pinfo,
232
                       int offset,
233
                       proto_tree *tree)
234
1
{
235
1
    proto_item *tf;
236
1
    proto_item *icreq_tree;
237
238
1
    col_set_str(pinfo->cinfo, COL_INFO, "Initialize Connection Request");
239
1
    tf = proto_tree_add_item(tree, hf_nvme_tcp_icreq, tvb, offset, 8, ENC_NA);
240
1
    icreq_tree = proto_item_add_subtree(tf, ett_nvme_tcp);
241
242
1
    proto_tree_add_item(icreq_tree, hf_nvme_tcp_icreq_pfv, tvb, offset, 2,
243
1
            ENC_LITTLE_ENDIAN);
244
1
    proto_tree_add_item(icreq_tree, hf_nvme_tcp_icreq_hpda, tvb, offset + 2, 1,
245
1
            ENC_NA);
246
1
    proto_tree_add_item(icreq_tree, hf_nvme_tcp_icreq_digest, tvb, offset + 3,
247
1
            1, ENC_NA);
248
1
    proto_tree_add_item(icreq_tree, hf_nvme_tcp_icreq_maxr2t, tvb, offset + 4,
249
1
            4, ENC_LITTLE_ENDIAN);
250
1
}
251
252
static void
253
dissect_nvme_tcp_icresp(tvbuff_t *tvb,
254
                        packet_info *pinfo,
255
                        int offset,
256
                        proto_tree *tree)
257
2
{
258
2
    proto_item *tf;
259
2
    proto_item *icresp_tree;
260
261
2
    col_set_str(pinfo->cinfo, COL_INFO, "Initialize Connection Response");
262
2
    tf = proto_tree_add_item(tree, hf_nvme_tcp_icresp, tvb, offset, 8, ENC_NA);
263
2
    icresp_tree = proto_item_add_subtree(tf, ett_nvme_tcp);
264
265
2
    proto_tree_add_item(icresp_tree, hf_nvme_tcp_icresp_pfv, tvb, offset, 2,
266
2
            ENC_LITTLE_ENDIAN);
267
2
    proto_tree_add_item(icresp_tree, hf_nvme_tcp_icresp_cpda, tvb, offset + 2,
268
2
            1, ENC_NA);
269
2
    proto_tree_add_item(icresp_tree, hf_nvme_tcp_icresp_digest, tvb, offset + 3,
270
2
            1, ENC_NA);
271
2
    proto_tree_add_item(icresp_tree, hf_nvme_tcp_icresp_maxdata, tvb,
272
2
            offset + 4, 4, ENC_LITTLE_ENDIAN);
273
2
}
274
275
static struct nvme_tcp_cmd_ctx*
276
bind_cmd_to_qctx(packet_info *pinfo,
277
                 struct nvme_q_ctx *q_ctx,
278
                 uint16_t cmd_id)
279
6
{
280
6
    struct nvme_tcp_cmd_ctx *ctx;
281
282
    /* wireshark will dissect same packet multiple times
283
     * when display is refreshed*/
284
6
    if (!PINFO_FD_VISITED(pinfo)) {
285
6
        ctx = wmem_new0(wmem_file_scope(), struct nvme_tcp_cmd_ctx);
286
6
        nvme_add_cmd_to_pending_list(pinfo, q_ctx, &ctx->n_cmd_ctx, (void*) ctx,
287
6
                cmd_id);
288
6
    } else {
289
        /* Already visited this frame */
290
0
        ctx = (struct nvme_tcp_cmd_ctx*) nvme_lookup_cmd_in_done_list(pinfo,
291
0
                q_ctx, cmd_id);
292
        /* if we have already visited frame but haven't found completion yet,
293
         * we won't find cmd in done q, so allocate a dummy ctx for doing
294
         * rest of the processing.
295
         */
296
0
        if (!ctx)
297
0
            ctx = wmem_new0(wmem_file_scope(), struct nvme_tcp_cmd_ctx);
298
0
    }
299
300
6
    return ctx;
301
6
}
302
303
static void
304
dissect_nvme_tcp_command(tvbuff_t *tvb,
305
                         packet_info *pinfo,
306
                         proto_tree *root_tree,
307
                         proto_tree *nvme_tcp_tree,
308
                         proto_item *nvme_tcp_ti,
309
                         struct nvme_tcp_q_ctx *queue, int offset,
310
                         uint32_t incapsuled_data_size,
311
                         uint32_t data_offset)
312
6
{
313
6
    struct nvme_tcp_cmd_ctx *cmd_ctx;
314
6
    uint16_t cmd_id;
315
6
    uint8_t opcode;
316
6
    const char *cmd_string;
317
318
6
    opcode = tvb_get_uint8(tvb, offset);
319
6
    cmd_id = tvb_get_uint16(tvb, offset + 2, ENC_LITTLE_ENDIAN);
320
6
    cmd_ctx = bind_cmd_to_qctx(pinfo, &queue->n_q_ctx, cmd_id);
321
322
    /* if record did not contain connect command we wont know qid,
323
     * so lets guess if this is an admin queue */
324
6
    if ((queue->n_q_ctx.qid == UINT16_MAX) && !nvme_is_io_queue_opcode(opcode))
325
5
        queue->n_q_ctx.qid = 0;
326
327
6
    if (opcode == NVME_FABRIC_OPC) {
328
0
        cmd_ctx->n_cmd_ctx.fabric = true;
329
0
        dissect_nvmeof_fabric_cmd(tvb, pinfo, nvme_tcp_tree, &queue->n_q_ctx, &cmd_ctx->n_cmd_ctx, offset, false);
330
0
        if (cmd_ctx->n_cmd_ctx.cmd_ctx.fabric_cmd.fctype == NVME_FCTYPE_CONNECT)
331
0
            queue->n_q_ctx.qid = cmd_ctx->n_cmd_ctx.cmd_ctx.fabric_cmd.cnct.qid;
332
0
        cmd_string = get_nvmeof_cmd_string(cmd_ctx->n_cmd_ctx.cmd_ctx.fabric_cmd.fctype);
333
0
        proto_item_append_text(nvme_tcp_ti,
334
0
                ", Fabrics Type: %s (0x%02x) Cmd ID: 0x%04x", cmd_string,
335
0
                cmd_ctx->n_cmd_ctx.cmd_ctx.fabric_cmd.fctype, cmd_id);
336
0
        if (incapsuled_data_size > 0) {
337
0
            proto_tree *data_tree;
338
0
            proto_item *ti;
339
340
0
            ti = proto_tree_add_item(nvme_tcp_tree, hf_nvme_fabrics_cmd_data, tvb, offset, incapsuled_data_size, ENC_NA);
341
0
            data_tree = proto_item_add_subtree(ti, ett_nvme_tcp);
342
0
            dissect_nvmeof_cmd_data(tvb, pinfo, data_tree, offset + NVME_FABRIC_CMD_SIZE + data_offset, &queue->n_q_ctx, &cmd_ctx->n_cmd_ctx, incapsuled_data_size);
343
0
        }
344
0
        return;
345
0
    }
346
347
    /* In case of incapsuled nvme command tcp length is only a header */
348
6
    proto_item_set_len(nvme_tcp_ti, NVME_TCP_HEADER_SIZE);
349
6
    tvbuff_t *nvme_tvbuff;
350
6
    cmd_ctx->n_cmd_ctx.fabric = false;
351
6
    nvme_tvbuff = tvb_new_subset_remaining(tvb, NVME_TCP_HEADER_SIZE);
352
6
    cmd_string = nvme_get_opcode_string(opcode, queue->n_q_ctx.qid);
353
6
    dissect_nvme_cmd(nvme_tvbuff, pinfo, root_tree, &queue->n_q_ctx,
354
6
            &cmd_ctx->n_cmd_ctx);
355
6
    proto_item_append_text(nvme_tcp_ti,
356
6
            ", NVMe Opcode: %s (0x%02x) Cmd ID: 0x%04x", cmd_string, opcode,
357
6
            cmd_id);
358
359
    /* This is an inline write */
360
6
    if (incapsuled_data_size > 0) {
361
6
        tvbuff_t *nvme_data;
362
363
6
        nvme_data = tvb_new_subset_remaining(tvb, offset +
364
6
                NVME_CMD_SIZE + data_offset);
365
6
        dissect_nvme_data_response(nvme_data, pinfo, root_tree, &queue->n_q_ctx,
366
6
                &cmd_ctx->n_cmd_ctx, incapsuled_data_size, true);
367
6
    }
368
6
}
369
370
static uint32_t
371
dissect_nvme_tcp_data_pdu(tvbuff_t *tvb,
372
                          packet_info *pinfo,
373
                          int offset,
374
2
                          proto_tree *tree) {
375
2
    uint32_t data_length;
376
2
    proto_item *tf;
377
2
    proto_item *data_tree;
378
379
2
    col_set_str(pinfo->cinfo, COL_PROTOCOL, "NVMe");
380
381
2
    tf = proto_tree_add_item(tree, hf_nvme_tcp_data_pdu, tvb, offset,
382
2
            NVME_TCP_DATA_PDU_SIZE - NVME_TCP_HEADER_SIZE, ENC_NA);
383
2
    data_tree = proto_item_add_subtree(tf, ett_nvme_tcp);
384
385
2
    proto_tree_add_item(data_tree, hf_nvme_fabrics_cmd_cid, tvb, offset, 2,
386
2
            ENC_LITTLE_ENDIAN);
387
388
2
    proto_tree_add_item(data_tree, hf_nvme_tcp_pdu_ttag, tvb, offset + 2, 2,
389
2
            ENC_LITTLE_ENDIAN);
390
391
2
    proto_tree_add_item(data_tree, hf_nvme_tcp_data_pdu_data_offset, tvb,
392
2
            offset + 4, 4, ENC_LITTLE_ENDIAN);
393
394
2
    data_length = tvb_get_uint32(tvb, offset + 8, ENC_LITTLE_ENDIAN);
395
2
    proto_tree_add_item(data_tree, hf_nvme_tcp_data_pdu_data_length, tvb,
396
2
            offset + 8, 4, ENC_LITTLE_ENDIAN);
397
398
2
    proto_tree_add_item(data_tree, hf_nvme_tcp_data_pdu_data_resvd, tvb,
399
2
            offset + 12, 4, ENC_NA);
400
401
2
    return data_length;
402
2
}
403
404
static void
405
dissect_nvme_tcp_c2h_data(tvbuff_t *tvb,
406
                          packet_info *pinfo,
407
                          proto_tree *root_tree,
408
                          proto_tree *nvme_tcp_tree,
409
                          proto_item *nvme_tcp_ti,
410
                          struct nvme_tcp_q_ctx *queue,
411
                          int offset,
412
                          uint32_t data_offset)
413
1
{
414
1
    struct nvme_tcp_cmd_ctx *cmd_ctx;
415
1
    uint32_t cmd_id;
416
1
    uint32_t data_length;
417
1
    tvbuff_t *nvme_data;
418
1
    const char *cmd_string;
419
420
1
    cmd_id = tvb_get_uint16(tvb, offset, ENC_LITTLE_ENDIAN);
421
1
    data_length = dissect_nvme_tcp_data_pdu(tvb, pinfo, offset, nvme_tcp_tree);
422
423
    /* This can identify our packet uniquely  */
424
1
    if (!PINFO_FD_VISITED(pinfo)) {
425
1
        cmd_ctx = (struct nvme_tcp_cmd_ctx*) nvme_lookup_cmd_in_pending_list(
426
1
                &queue->n_q_ctx, cmd_id);
427
1
        if (!cmd_ctx) {
428
1
            proto_tree_add_item(root_tree, hf_nvme_tcp_unknown_data, tvb, offset + 16,
429
1
                                data_length, ENC_NA);
430
1
            return;
431
1
        }
432
433
        /* In order to later lookup for command context lets add this command
434
         * to data responses */
435
0
        cmd_ctx->n_cmd_ctx.data_tr_pkt_num[0] = pinfo->num;
436
0
        nvme_add_data_tr_pkt(&queue->n_q_ctx, &cmd_ctx->n_cmd_ctx, cmd_id, pinfo->num);
437
0
    } else {
438
0
        cmd_ctx = (struct nvme_tcp_cmd_ctx*) nvme_lookup_data_tr_pkt(&queue->n_q_ctx,
439
0
                                cmd_id, pinfo->num);
440
0
        if (!cmd_ctx) {
441
0
            proto_tree_add_item(root_tree, hf_nvme_tcp_unknown_data, tvb, offset + 16,
442
0
                                data_length, ENC_NA);
443
0
            return;
444
0
        }
445
0
    }
446
447
0
    nvme_publish_to_cmd_link(nvme_tcp_tree, tvb,
448
0
            hf_nvme_tcp_cmd_pkt, &cmd_ctx->n_cmd_ctx);
449
450
0
    if (cmd_ctx->n_cmd_ctx.fabric) {
451
0
        cmd_string = get_nvmeof_cmd_string(cmd_ctx->n_cmd_ctx.cmd_ctx.fabric_cmd.fctype);
452
0
        proto_item_append_text(nvme_tcp_ti,
453
0
                ", C2HData Fabrics Type: %s (0x%02x), Cmd ID: 0x%04x, Len: %u",
454
0
                cmd_string, cmd_ctx->n_cmd_ctx.cmd_ctx.fabric_cmd.fctype, cmd_id, data_length);
455
0
    } else {
456
0
        cmd_string = nvme_get_opcode_string(cmd_ctx->n_cmd_ctx.opcode,
457
0
                queue->n_q_ctx.qid);
458
0
        proto_item_append_text(nvme_tcp_ti,
459
0
                ", C2HData Opcode: %s (0x%02x), Cmd ID: 0x%04x, Len: %u",
460
0
                cmd_string, cmd_ctx->n_cmd_ctx.opcode, cmd_id, data_length);
461
0
    }
462
463
0
    nvme_data = tvb_new_subset_remaining(tvb, NVME_TCP_DATA_PDU_SIZE + data_offset);
464
465
0
    dissect_nvme_data_response(nvme_data, pinfo, root_tree, &queue->n_q_ctx,
466
0
            &cmd_ctx->n_cmd_ctx, data_length, false);
467
468
0
}
469
470
static void nvme_tcp_build_cmd_key(uint32_t *frame_num, uint32_t *cmd_id, wmem_tree_key_t *key)
471
0
{
472
0
    key[0].key = frame_num;
473
0
    key[0].length = 1;
474
0
    key[1].key = cmd_id;
475
0
    key[1].length = 1;
476
0
    key[2].key = NULL;
477
0
    key[2].length = 0;
478
0
}
479
480
static void nvme_tcp_add_data_request(packet_info *pinfo, struct nvme_q_ctx *q_ctx,
481
        struct nvme_tcp_cmd_ctx *cmd_ctx, uint16_t cmd_id)
482
0
{
483
0
    wmem_tree_key_t cmd_key[3];
484
0
    uint32_t cmd_id_key = cmd_id;
485
486
0
    nvme_tcp_build_cmd_key(&pinfo->num, &cmd_id_key, cmd_key);
487
0
    cmd_ctx->n_cmd_ctx.data_req_pkt_num = pinfo->num;
488
0
    cmd_ctx->n_cmd_ctx.data_tr_pkt_num[0] = 0;
489
0
    wmem_tree_insert32_array(q_ctx->data_requests, cmd_key, (void *)cmd_ctx);
490
0
}
491
492
static struct nvme_tcp_cmd_ctx* nvme_tcp_lookup_data_request(packet_info *pinfo,
493
        struct nvme_q_ctx *q_ctx,
494
        uint16_t cmd_id)
495
0
{
496
0
    wmem_tree_key_t cmd_key[3];
497
0
    uint32_t cmd_id_key = cmd_id;
498
499
0
    nvme_tcp_build_cmd_key(&pinfo->num, &cmd_id_key, cmd_key);
500
0
    return (struct nvme_tcp_cmd_ctx*)wmem_tree_lookup32_array(q_ctx->data_requests, cmd_key);
501
0
}
502
503
static void
504
dissect_nvme_tcp_h2c_data(tvbuff_t *tvb,
505
                          packet_info *pinfo,
506
                          proto_tree *root_tree,
507
                          proto_tree *nvme_tcp_tree,
508
                          proto_item *nvme_tcp_ti,
509
                          struct nvme_tcp_q_ctx *queue,
510
                          int offset,
511
                          uint32_t data_offset)
512
1
{
513
1
    struct nvme_tcp_cmd_ctx *cmd_ctx;
514
1
    uint16_t cmd_id;
515
1
    uint32_t data_length;
516
1
    tvbuff_t *nvme_data;
517
1
    const char *cmd_string;
518
519
1
    cmd_id = tvb_get_uint16(tvb, offset, ENC_LITTLE_ENDIAN);
520
1
    data_length = dissect_nvme_tcp_data_pdu(tvb, pinfo, offset, nvme_tcp_tree);
521
522
1
    if (!PINFO_FD_VISITED(pinfo)) {
523
0
        cmd_ctx = (struct nvme_tcp_cmd_ctx*) nvme_lookup_cmd_in_pending_list(
524
0
                &queue->n_q_ctx, cmd_id);
525
0
        if (!cmd_ctx) {
526
0
            proto_tree_add_item(root_tree, hf_nvme_tcp_unknown_data, tvb, offset + 16,
527
0
                        data_length, ENC_NA);
528
0
            return;
529
0
        }
530
531
        /* Fill this for "adding data request call,
532
         * this will be the key to fetch data request later */
533
0
        nvme_tcp_add_data_request(pinfo, &queue->n_q_ctx, cmd_ctx, cmd_id);
534
1
    } else {
535
1
        cmd_ctx = nvme_tcp_lookup_data_request(pinfo, &queue->n_q_ctx, cmd_id);
536
1
        if (!cmd_ctx) {
537
0
            proto_tree_add_item(root_tree, hf_nvme_tcp_unknown_data, tvb, offset + 16,
538
0
                        data_length, ENC_NA);
539
0
            return;
540
0
        }
541
1
    }
542
543
1
    nvme_publish_to_cmd_link(nvme_tcp_tree, tvb,
544
1
                hf_nvme_tcp_cmd_pkt, &cmd_ctx->n_cmd_ctx);
545
546
    /* fabrics commands should not have h2cdata*/
547
1
    if (cmd_ctx->n_cmd_ctx.fabric) {
548
0
        cmd_string = get_nvmeof_cmd_string(cmd_ctx->n_cmd_ctx.cmd_ctx.fabric_cmd.fctype);
549
0
        proto_item_append_text(nvme_tcp_ti,
550
0
                ", H2CData Fabrics Type: %s (0x%02x), Cmd ID: 0x%04x, Len: %u",
551
0
                cmd_string, cmd_ctx->n_cmd_ctx.cmd_ctx.fabric_cmd.fctype, cmd_id, data_length);
552
0
        proto_tree_add_item(root_tree, hf_nvme_tcp_unknown_data, tvb, offset + 16,
553
0
                    data_length, ENC_NA);
554
0
        return;
555
0
    }
556
557
1
    cmd_string = nvme_get_opcode_string(cmd_ctx->n_cmd_ctx.opcode,
558
1
            queue->n_q_ctx.qid);
559
1
    proto_item_append_text(nvme_tcp_ti,
560
1
            ", H2CData Opcode: %s (0x%02x), Cmd ID: 0x%04x, Len: %u",
561
1
            cmd_string, cmd_ctx->n_cmd_ctx.opcode, cmd_id, data_length);
562
563
1
    nvme_data = tvb_new_subset_remaining(tvb, NVME_TCP_DATA_PDU_SIZE + data_offset);
564
1
    dissect_nvme_data_response(nvme_data, pinfo, root_tree, &queue->n_q_ctx,
565
1
            &cmd_ctx->n_cmd_ctx, data_length, false);
566
1
}
567
568
static void
569
dissect_nvme_tcp_h2ctermreq(tvbuff_t *tvb, packet_info *pinfo,
570
                            proto_tree *tree, uint32_t packet_len, int offset)
571
0
{
572
0
    proto_item *tf;
573
0
    proto_item *h2ctermreq_tree;
574
0
    uint16_t fes;
575
576
0
    col_set_str(pinfo->cinfo, COL_INFO,
577
0
                "Host to Controller Termination Request");
578
0
    tf = proto_tree_add_item(tree, hf_nvme_tcp_h2ctermreq,
579
0
                             tvb, offset, 8, ENC_NA);
580
0
    h2ctermreq_tree = proto_item_add_subtree(tf, ett_nvme_tcp);
581
582
0
    proto_tree_add_item(h2ctermreq_tree, hf_nvme_tcp_h2ctermreq_fes,
583
0
                        tvb, offset + 8, 2, ENC_LITTLE_ENDIAN);
584
0
    fes = tvb_get_uint16(tvb, offset + 8, ENC_LITTLE_ENDIAN);
585
0
    switch (fes) {
586
0
    case NVME_TCP_FES_INVALID_PDU_HDR:
587
0
        proto_tree_add_item(h2ctermreq_tree, hf_nvme_tcp_h2ctermreq_phfo,
588
0
                            tvb, offset + 10, 4, ENC_LITTLE_ENDIAN);
589
0
        break;
590
0
    case NVME_TCP_FES_HDR_DIGEST_ERR:
591
0
        proto_tree_add_item(h2ctermreq_tree, hf_nvme_tcp_h2ctermreq_phd,
592
0
                            tvb, offset + 10, 4, ENC_LITTLE_ENDIAN);
593
0
        break;
594
0
    case NVME_TCP_FES_UNSUPPORTED_PARAM:
595
0
        proto_tree_add_item(h2ctermreq_tree, hf_nvme_tcp_h2ctermreq_upfo,
596
0
                            tvb, offset + 10, 4, ENC_LITTLE_ENDIAN);
597
0
        break;
598
0
    default:
599
0
        proto_tree_add_item(h2ctermreq_tree, hf_nvme_tcp_h2ctermreq_reserved,
600
0
                            tvb, offset + 10, 4, ENC_LITTLE_ENDIAN);
601
0
        break;
602
0
    }
603
0
    proto_tree_add_item(h2ctermreq_tree, hf_nvme_tcp_h2ctermreq_data,
604
0
                        tvb, offset + 24, packet_len - 24, ENC_NA);
605
0
}
606
607
static void
608
dissect_nvme_tcp_c2htermreq(tvbuff_t *tvb, packet_info *pinfo,
609
                            proto_tree *tree, uint32_t packet_len, int offset)
610
0
{
611
0
    proto_item *tf;
612
0
    proto_item *c2htermreq_tree;
613
0
    uint16_t fes;
614
615
0
    col_set_str(pinfo->cinfo, COL_INFO,
616
0
                "Controller to Host Termination Request");
617
0
    tf = proto_tree_add_item(tree, hf_nvme_tcp_c2htermreq,
618
0
                             tvb, offset, 8, ENC_NA);
619
0
    c2htermreq_tree = proto_item_add_subtree(tf, ett_nvme_tcp);
620
621
0
    proto_tree_add_item(tree, hf_nvme_tcp_c2htermreq_fes, tvb, offset + 8, 2,
622
0
                        ENC_LITTLE_ENDIAN);
623
0
    fes = tvb_get_uint16(tvb, offset + 8, ENC_LITTLE_ENDIAN);
624
0
    switch (fes) {
625
0
    case NVME_TCP_FES_INVALID_PDU_HDR:
626
0
        proto_tree_add_item(c2htermreq_tree, hf_nvme_tcp_c2htermreq_phfo,
627
0
                            tvb, offset + 10, 4, ENC_LITTLE_ENDIAN);
628
0
        break;
629
0
    case NVME_TCP_FES_HDR_DIGEST_ERR:
630
0
        proto_tree_add_item(c2htermreq_tree, hf_nvme_tcp_c2htermreq_phd,
631
0
                            tvb, offset + 10, 4, ENC_LITTLE_ENDIAN);
632
0
        break;
633
0
    case NVME_TCP_FES_UNSUPPORTED_PARAM:
634
0
        proto_tree_add_item(c2htermreq_tree, hf_nvme_tcp_c2htermreq_upfo,
635
0
                            tvb, offset + 10, 4, ENC_LITTLE_ENDIAN);
636
0
        break;
637
0
    default:
638
0
        proto_tree_add_item(c2htermreq_tree, hf_nvme_tcp_c2htermreq_reserved,
639
0
                            tvb, offset + 10, 4, ENC_LITTLE_ENDIAN);
640
0
        break;
641
0
    }
642
0
    proto_tree_add_item(c2htermreq_tree, hf_nvme_tcp_c2htermreq_data,
643
0
                        tvb, offset + 24, packet_len - 24, ENC_NA);
644
0
}
645
646
static void
647
dissect_nvme_tcp_cqe(tvbuff_t *tvb,
648
                     packet_info *pinfo,
649
                     proto_tree *root_tree,
650
                     proto_tree *nvme_tree,
651
                     proto_item *ti,
652
                     struct nvme_tcp_q_ctx *queue,
653
                     int offset)
654
1
{
655
1
    struct nvme_tcp_cmd_ctx *cmd_ctx;
656
1
    uint16_t cmd_id;
657
1
    const char *cmd_string;
658
659
1
    cmd_id = tvb_get_uint16(tvb, offset + 12, ENC_LITTLE_ENDIAN);
660
661
    /* wireshark will dissect packet several times when display is refreshed
662
     * we need to track state changes only once */
663
1
    if (!PINFO_FD_VISITED(pinfo)) {
664
1
        cmd_ctx = (struct nvme_tcp_cmd_ctx*) nvme_lookup_cmd_in_pending_list(
665
1
                &queue->n_q_ctx, cmd_id);
666
1
        if (!cmd_ctx || cmd_ctx->n_cmd_ctx.cqe_pkt_num) {
667
1
            proto_tree_add_item(nvme_tree, hf_nvme_tcp_unknown_data, tvb, offset,
668
1
                                NVME_FABRIC_CQE_SIZE, ENC_NA);
669
1
            return;
670
1
        }
671
672
0
        cmd_ctx->n_cmd_ctx.cqe_pkt_num = pinfo->num;
673
0
        nvme_add_cmd_cqe_to_done_list(&queue->n_q_ctx, &cmd_ctx->n_cmd_ctx,
674
0
                cmd_id);
675
676
0
    } else {
677
0
        cmd_ctx = (struct nvme_tcp_cmd_ctx *) nvme_lookup_cmd_in_done_list(pinfo,
678
0
                                                                           &queue->n_q_ctx, cmd_id);
679
0
        if (!cmd_ctx) {
680
0
            proto_tree_add_item(nvme_tree, hf_nvme_tcp_unknown_data, tvb, offset,
681
0
                                NVME_FABRIC_CQE_SIZE, ENC_NA);
682
0
            return;
683
0
        }
684
0
    }
685
686
0
    nvme_update_cmd_end_info(pinfo, &cmd_ctx->n_cmd_ctx);
687
688
0
    if (cmd_ctx->n_cmd_ctx.fabric) {
689
0
        cmd_string = get_nvmeof_cmd_string(cmd_ctx->n_cmd_ctx.cmd_ctx.fabric_cmd.fctype);
690
0
        proto_item_append_text(ti,
691
0
                ", Cqe Fabrics Cmd: %s (0x%02x) Cmd ID: 0x%04x", cmd_string,
692
0
               cmd_ctx->n_cmd_ctx.cmd_ctx.fabric_cmd.fctype , cmd_id);
693
694
0
        dissect_nvmeof_fabric_cqe(tvb, pinfo, nvme_tree, &cmd_ctx->n_cmd_ctx, offset);
695
0
    } else {
696
0
        tvbuff_t *nvme_tvb;
697
0
        proto_item_set_len(ti, NVME_TCP_HEADER_SIZE);
698
0
        cmd_string = nvme_get_opcode_string(cmd_ctx->n_cmd_ctx.opcode,
699
0
                queue->n_q_ctx.qid);
700
701
0
        proto_item_append_text(ti, ", Cqe NVMe Cmd: %s (0x%02x) Cmd ID: 0x%04x",
702
0
                cmd_string, cmd_ctx->n_cmd_ctx.opcode, cmd_id);
703
        /* get incapsuled nvme command */
704
0
        nvme_tvb = tvb_new_subset_remaining(tvb, NVME_TCP_HEADER_SIZE);
705
0
        dissect_nvme_cqe(nvme_tvb, pinfo, root_tree, &queue->n_q_ctx, &cmd_ctx->n_cmd_ctx);
706
0
    }
707
0
}
708
709
static void
710
dissect_nvme_tcp_r2t(tvbuff_t *tvb,
711
                     packet_info *pinfo,
712
                     int offset,
713
                     proto_tree *tree)
714
0
{
715
0
    proto_item *tf;
716
0
    proto_item *r2t_tree;
717
718
0
    tf = proto_tree_add_item(tree, hf_nvme_tcp_r2t_pdu, tvb, offset, -1,
719
0
            ENC_NA);
720
0
    r2t_tree = proto_item_add_subtree(tf, ett_nvme_tcp);
721
722
0
    col_append_sep_fstr(pinfo->cinfo, COL_INFO, " | ", "Ready To Transfer");
723
724
0
    proto_tree_add_item(r2t_tree, hf_nvme_fabrics_cmd_cid, tvb, offset, 2,
725
0
            ENC_LITTLE_ENDIAN);
726
0
    proto_tree_add_item(r2t_tree, hf_nvme_tcp_pdu_ttag, tvb, offset + 2, 2,
727
0
            ENC_LITTLE_ENDIAN);
728
0
    proto_tree_add_item(r2t_tree, hf_nvme_tcp_r2t_offset, tvb, offset + 4, 4,
729
0
            ENC_LITTLE_ENDIAN);
730
0
    proto_tree_add_item(r2t_tree, hf_nvme_tcp_r2t_length, tvb, offset + 8, 4,
731
0
            ENC_LITTLE_ENDIAN);
732
0
    proto_tree_add_item(r2t_tree, hf_nvme_tcp_r2t_resvd, tvb, offset + 12, 4,
733
0
            ENC_NA);
734
0
}
735
736
static int
737
dissect_nvme_tcp_pdu(tvbuff_t *tvb,
738
                     packet_info *pinfo,
739
                     proto_tree *tree,
740
                     void* data _U_)
741
14
{
742
14
    conversation_t *conversation;
743
14
    struct nvme_tcp_q_ctx *q_ctx;
744
14
    proto_item *ti;
745
14
    int offset = 0;
746
14
    int nvme_tcp_pdu_offset;
747
14
    proto_tree *nvme_tcp_tree;
748
14
    unsigned packet_type;
749
14
    uint8_t hlen, pdo;
750
14
    uint8_t pdu_flags;
751
14
    uint32_t plen;
752
14
    uint32_t incapsuled_data_size;
753
14
    uint32_t pdu_data_offset = 0;
754
755
14
    conversation = find_or_create_conversation(pinfo);
756
14
    q_ctx = (struct nvme_tcp_q_ctx *)
757
14
            conversation_get_proto_data(conversation, proto_nvme_tcp);
758
759
14
    if (!q_ctx) {
760
12
        q_ctx = wmem_new0(wmem_file_scope(), struct nvme_tcp_q_ctx);
761
12
        q_ctx->n_q_ctx.pending_cmds = wmem_tree_new(wmem_file_scope());
762
12
        q_ctx->n_q_ctx.done_cmds = wmem_tree_new(wmem_file_scope());
763
12
        q_ctx->n_q_ctx.data_requests = wmem_tree_new(wmem_file_scope());
764
12
        q_ctx->n_q_ctx.data_responses = wmem_tree_new(wmem_file_scope());
765
        /* Initially set to non-0 so that by default queues are io queues
766
         * this is required to be able to dissect correctly even
767
         * if we miss connect command*/
768
12
        q_ctx->n_q_ctx.qid = UINT16_MAX;
769
12
        conversation_add_proto_data(conversation, proto_nvme_tcp, q_ctx);
770
12
    }
771
772
14
    ti = proto_tree_add_item(tree, proto_nvme_tcp, tvb, 0, -1, ENC_NA);
773
14
    nvme_tcp_tree = proto_item_add_subtree(ti, ett_nvme_tcp);
774
775
14
    if (q_ctx->n_q_ctx.qid != UINT16_MAX)
776
0
        nvme_publish_qid(nvme_tcp_tree, hf_nvme_fabrics_cmd_qid,
777
0
                q_ctx->n_q_ctx.qid);
778
779
14
    packet_type = tvb_get_uint8(tvb, offset);
780
14
    proto_tree_add_item(nvme_tcp_tree, hf_nvme_tcp_type, tvb, offset, 1,
781
14
            ENC_NA);
782
783
14
    pdu_flags = tvb_get_uint8(tvb, offset + 1);
784
14
    proto_tree_add_bitmask_value(nvme_tcp_tree, tvb, offset + 1, hf_nvme_tcp_flags,
785
14
            ett_nvme_tcp, nvme_tcp_pdu_flags, (uint64_t)pdu_flags);
786
787
14
    hlen = tvb_get_int8(tvb, offset + 2);
788
14
    proto_tree_add_item(nvme_tcp_tree, hf_nvme_tcp_hlen, tvb, offset + 2, 1,
789
14
            ENC_NA);
790
791
14
    pdo = tvb_get_int8(tvb, offset + 3);
792
14
    proto_tree_add_uint(nvme_tcp_tree, hf_nvme_tcp_pdo, tvb, offset + 3, 1,
793
14
            pdo);
794
14
    plen = tvb_get_letohl(tvb, offset + 4);
795
14
    proto_tree_add_item(nvme_tcp_tree, hf_nvme_tcp_plen, tvb, offset + 4, 4,
796
14
            ENC_LITTLE_ENDIAN);
797
14
    col_set_str(pinfo->cinfo, COL_PROTOCOL, NVME_FABRICS_TCP);
798
799
14
    if (pdu_flags & NVME_TCP_F_HDGST) {
800
2
        unsigned hdgst_flags = PROTO_CHECKSUM_NO_FLAGS;
801
2
        uint32_t crc = 0;
802
803
2
        if (nvme_tcp_check_hdgst) {
804
0
            hdgst_flags = PROTO_CHECKSUM_VERIFY;
805
0
            crc = ~crc32c_tvb_offset_calculate(tvb, 0, hlen, ~0);
806
0
        }
807
2
        proto_tree_add_checksum(nvme_tcp_tree, tvb, hlen, hf_nvme_tcp_hdgst,
808
2
                    hf_nvme_tcp_hdgst_status, NULL, pinfo,
809
2
                    crc, ENC_NA, hdgst_flags);
810
2
        pdu_data_offset = NVME_TCP_DIGEST_LENGTH;
811
2
    }
812
813
14
    nvme_tcp_pdu_offset = offset + NVME_TCP_HEADER_SIZE;
814
14
    incapsuled_data_size = plen - hlen - pdu_data_offset;
815
816
    /* check for overflow (invalid packet)*/
817
14
    if (incapsuled_data_size > tvb_reported_length(tvb)) {
818
0
        proto_tree_add_item(nvme_tcp_tree, hf_nvme_tcp_unknown_data,
819
0
                               tvb, NVME_TCP_HEADER_SIZE, -1, ENC_NA);
820
0
        return tvb_reported_length(tvb);
821
0
    }
822
823
14
    if (pdu_flags & NVME_TCP_F_DDGST) {
824
1
        unsigned ddgst_flags = PROTO_CHECKSUM_NO_FLAGS;
825
1
        uint32_t crc = 0;
826
827
        /* Check that data has enough space (invalid packet) */
828
1
        if (incapsuled_data_size <= NVME_TCP_DIGEST_LENGTH) {
829
0
            proto_tree_add_item(nvme_tcp_tree, hf_nvme_tcp_unknown_data,
830
0
                                           tvb, NVME_TCP_HEADER_SIZE, -1, ENC_NA);
831
0
            return tvb_reported_length(tvb);
832
0
        }
833
834
1
        incapsuled_data_size -= NVME_TCP_DIGEST_LENGTH;
835
1
        if (nvme_tcp_check_ddgst) {
836
0
            ddgst_flags = PROTO_CHECKSUM_VERIFY;
837
0
            crc = ~crc32c_tvb_offset_calculate(tvb, pdo,
838
0
                                               incapsuled_data_size, ~0);
839
0
        }
840
1
        proto_tree_add_checksum(nvme_tcp_tree, tvb,
841
1
                         plen - NVME_TCP_DIGEST_LENGTH, hf_nvme_tcp_ddgst,
842
1
                         hf_nvme_tcp_ddgst_status, NULL, pinfo,
843
1
                         crc, ENC_NA, ddgst_flags);
844
1
    }
845
846
14
    switch (packet_type) {
847
1
    case nvme_tcp_icreq:
848
1
        dissect_nvme_tcp_icreq(tvb, pinfo, nvme_tcp_pdu_offset, nvme_tcp_tree);
849
1
        proto_item_set_len(ti, hlen);
850
1
        break;
851
2
    case nvme_tcp_icresp:
852
2
        dissect_nvme_tcp_icresp(tvb, pinfo, nvme_tcp_pdu_offset, nvme_tcp_tree);
853
2
        proto_item_set_len(ti, hlen);
854
2
        break;
855
6
    case nvme_tcp_cmd:
856
6
        dissect_nvme_tcp_command(tvb, pinfo, tree, nvme_tcp_tree, ti, q_ctx,
857
6
                nvme_tcp_pdu_offset, incapsuled_data_size, pdu_data_offset);
858
6
        break;
859
1
    case nvme_tcp_rsp:
860
1
        dissect_nvme_tcp_cqe(tvb, pinfo, tree, nvme_tcp_tree, ti, q_ctx,
861
1
                nvme_tcp_pdu_offset);
862
1
        proto_item_set_len(ti, NVME_TCP_HEADER_SIZE);
863
1
        break;
864
1
    case nvme_tcp_c2h_data:
865
1
        dissect_nvme_tcp_c2h_data(tvb, pinfo, tree, nvme_tcp_tree, ti, q_ctx,
866
1
                nvme_tcp_pdu_offset, pdu_data_offset);
867
1
        proto_item_set_len(ti, NVME_TCP_DATA_PDU_SIZE);
868
1
        break;
869
1
    case nvme_tcp_h2c_data:
870
1
        dissect_nvme_tcp_h2c_data(tvb, pinfo, tree, nvme_tcp_tree, ti, q_ctx,
871
1
                nvme_tcp_pdu_offset, pdu_data_offset);
872
1
        proto_item_set_len(ti, NVME_TCP_DATA_PDU_SIZE);
873
1
        break;
874
0
    case nvme_tcp_r2t:
875
0
        dissect_nvme_tcp_r2t(tvb, pinfo, nvme_tcp_pdu_offset, nvme_tcp_tree);
876
0
        break;
877
0
    case nvme_tcp_h2c_term:
878
0
        dissect_nvme_tcp_h2ctermreq(tvb, pinfo, tree, plen, offset);
879
0
        break;
880
0
    case nvme_tcp_c2h_term:
881
0
        dissect_nvme_tcp_c2htermreq(tvb, pinfo, tree, plen, offset);
882
0
        break;
883
0
    default:
884
        // TODO: nvme_tcp_kdreq, nvme_tcp_kdresp
885
0
        proto_tree_add_item(nvme_tcp_tree, hf_nvme_tcp_unknown_data, tvb,
886
0
                offset, plen, ENC_NA);
887
0
        break;
888
14
    }
889
890
5
    return tvb_reported_length(tvb);
891
14
}
892
893
static int
894
dissect_nvme_tcp(tvbuff_t *tvb,
895
                 packet_info *pinfo,
896
                 proto_tree *tree,
897
                 void *data)
898
14
{
899
14
    col_clear(pinfo->cinfo, COL_INFO);
900
14
    col_set_str(pinfo->cinfo, COL_PROTOCOL, NVME_FABRICS_TCP);
901
14
    tcp_dissect_pdus(tvb, pinfo, tree, true, NVME_TCP_HEADER_SIZE,
902
14
            get_nvme_tcp_pdu_len, dissect_nvme_tcp_pdu, data);
903
904
14
    return tvb_reported_length(tvb);
905
14
}
906
907
static bool
908
test_nvme(packet_info *pinfo _U_, tvbuff_t *tvb, int offset, void *data _U_)
909
23
{
910
    /* This is not the strongest heuristic, but the port is IANA assigned,
911
     * so this is not a normal heuristic dissector but simply to distinguish
912
     * between NVMe/TCP and NVMe/TLS/TCP, and also to detect PDU starts.
913
     */
914
23
    if (tvb_captured_length_remaining(tvb, offset) < NVME_TCP_HEADER_SIZE) {
915
2
        return false;
916
2
    }
917
918
21
    if (tvb_get_uint8(tvb, offset) > NVMET_MAX_PDU_TYPE) {
919
5
        return false;
920
5
    }
921
922
16
    offset += 2;
923
16
    if (tvb_get_uint8(tvb, offset) < NVME_TCP_HEADER_SIZE) {
924
        // Header length - we could strengthen by using the PDU type.
925
3
        return false;
926
3
    }
927
928
    // Next byte is PDU Data Offset. Reserved in most types. (Does that
929
    // mean zero? That would strengthen the heuristic.)
930
931
13
    offset += 2;
932
13
    if (tvb_get_uint32(tvb, offset, ENC_LITTLE_ENDIAN) < NVME_TCP_HEADER_SIZE) {
933
        // PDU Length (inc. header) - could strengthen by using the PDU type.
934
1
        return false;
935
1
    }
936
937
12
    return true;
938
13
}
939
940
static int
941
dissect_nvme_tcp_heur(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void* data)
942
23
{
943
    /* NVMe/TCP allows PDUs to span TCP segments (see Figure 5 of the NVMe/TCP
944
     * Transport Specification.) Also, some connections are over TLS.
945
     * Luckily, the PDU types for NVMe/TCP occupy the first byte, same as
946
     * the Content Type for TLS Records, and while these PDU types go to 11,
947
     * TLS Content Types start at 20 (and won't change, to enable multiplexing,
948
     * see RFC 9443.)
949
     *
950
     * So if this doesn't look like the start of a NVMe/TCP PDU, reject it.
951
     * It might be TLS, or it might be the middle of a PDU.
952
     */
953
23
    if (!test_nvme(pinfo, tvb, 0, data)) {
954
11
        return 0;
955
        /* The TLS heuristic dissector should catch the TLS version. */
956
11
    }
957
958
    /* The start of a PDU. Set the other handle for this connection.
959
     * We can call tcp_dissect_pdus safely starting from here.
960
     */
961
12
    conversation_t *conversation = find_or_create_conversation(pinfo);
962
12
    conversation_set_dissector_from_frame_number(conversation, pinfo->num, nvmet_tls_handle);
963
964
12
    return dissect_nvme_tcp(tvb, pinfo, tree, data);
965
23
}
966
967
14
void proto_register_nvme_tcp(void) {
968
969
14
    static hf_register_info hf[] = {
970
14
       { &hf_nvme_tcp_type,
971
14
           { "Pdu Type", "nvme-tcp.type",
972
14
             FT_UINT8, BASE_DEC, VALS(nvme_tcp_pdu_type_vals),
973
14
             0x0, NULL, HFILL } },
974
14
       { &hf_nvme_tcp_flags,
975
14
           { "Pdu Specific Flags", "nvme-tcp.flags",
976
14
             FT_UINT8, BASE_HEX, NULL, 0x0, NULL, HFILL } },
977
14
       { &hf_pdu_flags_hdgst,
978
14
           { "PDU Header Digest", "nvme-tcp.flags.pdu.hdgst",
979
14
             FT_BOOLEAN, 8, TFS(&tfs_set_notset),
980
14
             NVME_TCP_F_HDGST, NULL, HFILL} },
981
14
       { &hf_pdu_flags_ddgst,
982
14
           { "PDU Data Digest", "nvme-tcp.flags.pdu.ddgst",
983
14
             FT_BOOLEAN, 8, TFS(&tfs_set_notset),
984
14
             NVME_TCP_F_DDGST, NULL, HFILL} },
985
14
       { &hf_pdu_flags_data_last,
986
14
           { "PDU Data Last", "nvme-tcp.flags.pdu.data_last",
987
14
              FT_BOOLEAN, 8, TFS(&tfs_set_notset),
988
14
              NVME_TCP_F_DATA_LAST, NULL, HFILL} },
989
14
       { &hf_pdu_flags_data_success,
990
14
          { "PDU Data Success", "nvme-tcp.flags.pdu.data_success",
991
14
            FT_BOOLEAN, 8, TFS(&tfs_set_notset),
992
14
            NVME_TCP_F_DATA_SUCCESS, NULL, HFILL} },
993
14
       { &hf_nvme_tcp_hdgst,
994
14
           { "PDU Header Digest", "nvme-tcp.hdgst",
995
14
            FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL } },
996
14
        { &hf_nvme_tcp_ddgst,
997
14
           { "PDU Data Digest", "nvme-tcp.ddgst",
998
14
            FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL } },
999
14
        { &hf_nvme_tcp_hdgst_status,
1000
14
          { "Header Digest Status",    "nvme-tcp.hdgst.status",
1001
14
            FT_UINT8, BASE_NONE, VALS(proto_checksum_vals),
1002
14
            0x0, NULL, HFILL }},
1003
14
        { &hf_nvme_tcp_ddgst_status,
1004
14
          { "Data Digest Status",    "nvme-tcp.ddgst.status",
1005
14
            FT_UINT8, BASE_NONE, VALS(proto_checksum_vals),
1006
14
            0x0, NULL, HFILL }},
1007
14
       { &hf_nvme_tcp_hlen,
1008
14
           { "Pdu Header Length", "nvme-tcp.hlen",
1009
14
             FT_UINT8, BASE_DEC, NULL, 0x0, NULL, HFILL } },
1010
14
       { &hf_nvme_tcp_pdo,
1011
14
           { "Pdu Data Offset", "nvme-tcp.pdo",
1012
14
             FT_UINT8, BASE_DEC, NULL, 0x0, NULL, HFILL } },
1013
14
       { &hf_nvme_tcp_plen,
1014
14
           { "Packet Length", "nvme-tcp.plen",
1015
14
            FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL } },
1016
14
       { &hf_nvme_tcp_icreq,
1017
14
           { "ICReq", "nvme-tcp.icreq",
1018
14
             FT_NONE, BASE_NONE, NULL, 0x0, NULL, HFILL } },
1019
14
       { &hf_nvme_tcp_icreq_pfv,
1020
14
           { "Pdu Version Format", "nvme-tcp.icreq.pfv",
1021
14
            FT_UINT16, BASE_DEC, NULL, 0x0, NULL, HFILL } },
1022
14
       { &hf_nvme_tcp_icreq_maxr2t,
1023
14
           { "Maximum r2ts per request", "nvme-tcp.icreq.maxr2t",
1024
14
             FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL } },
1025
14
       { &hf_nvme_tcp_icreq_hpda,
1026
14
           { "Host Pdu data alignment", "nvme-tcp.icreq.hpda",
1027
14
             FT_UINT8, BASE_DEC, NULL, 0x0, NULL, HFILL } },
1028
14
       { &hf_nvme_tcp_icreq_digest,
1029
14
           { "Digest Types Enabled", "nvme-tcp.icreq.digest",
1030
14
             FT_UINT8, BASE_DEC, NULL, 0x0, NULL, HFILL } },
1031
14
       { &hf_nvme_tcp_icresp,
1032
14
           { "ICResp", "nvme-tcp.icresp",
1033
14
             FT_NONE, BASE_NONE, NULL, 0x0, NULL, HFILL } },
1034
14
       { &hf_nvme_tcp_icresp_pfv,
1035
14
           { "Pdu Version Format", "nvme-tcp.icresp.pfv",
1036
14
             FT_UINT16, BASE_DEC, NULL, 0x0,
1037
14
             NULL, HFILL } },
1038
14
       { &hf_nvme_tcp_icresp_cpda,
1039
14
           { "Controller Pdu data alignment", "nvme-tcp.icresp.cpda",
1040
14
             FT_UINT8, BASE_DEC, NULL, 0x0, NULL, HFILL } },
1041
14
       { &hf_nvme_tcp_icresp_digest,
1042
14
           { "Digest types enabled", "nvme-tcp.icresp.digest",
1043
14
             FT_UINT8, BASE_DEC, NULL, 0x0, NULL, HFILL } },
1044
14
       { &hf_nvme_tcp_icresp_maxdata,
1045
14
           { "Maximum data capsules per r2t supported", "nvme-tcp.icresp.maxdata",
1046
14
             FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL } },
1047
       /* NVMe tcp c2h/h2c termreq fields */
1048
14
       { &hf_nvme_tcp_c2htermreq,
1049
14
           { "C2HTermReq", "nvme-tcp.c2htermreq",
1050
14
             FT_NONE, BASE_NONE, NULL, 0x0, NULL, HFILL } },
1051
14
       { &hf_nvme_tcp_c2htermreq_fes,
1052
14
           { "Fatal error status", "nvme-tcp.c2htermreq.fes",
1053
14
             FT_UINT16, BASE_HEX, VALS(nvme_tcp_termreq_fes),
1054
14
             0x0, NULL, HFILL } },
1055
14
       { &hf_nvme_tcp_c2htermreq_phfo,
1056
14
           { "PDU header field offset", "nvme-tcp.c2htermreq.phfo",
1057
14
             FT_UINT32, BASE_HEX, NULL, 0, NULL, HFILL } },
1058
14
       { &hf_nvme_tcp_c2htermreq_phd,
1059
14
           { "PDU header digest", "nvme-tcp.c2htermreq.phd",
1060
14
             FT_UINT32, BASE_HEX, NULL, 0, NULL, HFILL } },
1061
14
       { &hf_nvme_tcp_c2htermreq_upfo,
1062
14
           { "Unsupported parameter field offset", "nvme-tcp.c2htermreq.upfo",
1063
14
             FT_UINT32, BASE_HEX, NULL, 0, NULL, HFILL } },
1064
14
       { &hf_nvme_tcp_c2htermreq_reserved,
1065
14
           { "Reserved", "nvme-tcp.c2htermreq.reserved",
1066
14
             FT_UINT32, BASE_HEX, NULL, 0, NULL, HFILL } },
1067
14
       { &hf_nvme_tcp_c2htermreq_data,
1068
14
           { "Terminated PDU header", "nvme-tcp.c2htermreq.data",
1069
14
             FT_NONE, BASE_NONE, NULL, 0, NULL, HFILL } },
1070
14
       { &hf_nvme_tcp_h2ctermreq,
1071
14
           { "H2CTermReq", "nvme-tcp.h2ctermreq",
1072
14
             FT_NONE, BASE_NONE, NULL, 0x0, NULL, HFILL } },
1073
14
       { &hf_nvme_tcp_h2ctermreq_fes,
1074
14
           { "Fatal error status", "nvme-tcp.h2ctermreq.fes",
1075
14
             FT_UINT16, BASE_HEX, VALS(nvme_tcp_termreq_fes),
1076
14
             0x0, NULL, HFILL } },
1077
14
       { &hf_nvme_tcp_h2ctermreq_phfo,
1078
14
           { "PDU header field offset", "nvme-tcp.h2ctermreq.phfo",
1079
14
             FT_UINT32, BASE_HEX, NULL, 0, NULL, HFILL } },
1080
14
       { &hf_nvme_tcp_h2ctermreq_phd,
1081
14
           { "PDU header digest", "nvme-tcp.h2ctermreq.phd",
1082
14
             FT_UINT32, BASE_HEX, NULL, 0, NULL, HFILL } },
1083
14
       { &hf_nvme_tcp_h2ctermreq_upfo,
1084
14
           { "Unsupported parameter field offset", "nvme-tcp.h2ctermreq.upfo",
1085
14
             FT_UINT32, BASE_HEX, NULL, 0, NULL, HFILL } },
1086
14
       { &hf_nvme_tcp_h2ctermreq_reserved,
1087
14
           { "Reserved", "nvme-tcp.h2ctermreq.reserved",
1088
14
             FT_UINT32, BASE_HEX, NULL, 0, NULL, HFILL } },
1089
14
       { &hf_nvme_tcp_h2ctermreq_data,
1090
14
           { "Terminated PDU header", "nvme-tcp.h2ctermreq.data",
1091
14
             FT_NONE, BASE_NONE, NULL, 0, NULL, HFILL } },
1092
14
       { &hf_nvme_fabrics_cmd_cid,
1093
14
           { "Command ID", "nvme-tcp.cmd.cid",
1094
14
             FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL } },
1095
14
       { &hf_nvme_tcp_unknown_data,
1096
14
           { "Unknown Data", "nvme-tcp.unknown_data",
1097
14
             FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL } },
1098
       /* NVMe command data */
1099
14
       { &hf_nvme_fabrics_cmd_data,
1100
14
           { "Data", "nvme-tcp.cmd.data",
1101
14
             FT_NONE, BASE_NONE, NULL, 0x0, NULL, HFILL } },
1102
14
       { &hf_nvme_tcp_cmd_pkt,
1103
14
            { "Cmd in", "nvme-tcp.cmd_pkt",
1104
14
              FT_FRAMENUM, BASE_NONE, NULL, 0,
1105
14
              "The Cmd for this transaction is in this frame", HFILL } },
1106
14
       { &hf_nvme_fabrics_cmd_qid,
1107
14
           { "Cmd Qid", "nvme-tcp.cmd.qid",
1108
14
             FT_UINT16, BASE_HEX, NULL, 0x0,
1109
14
             "Qid on which command is issued", HFILL } },
1110
      /* NVMe TCP data response */
1111
14
      { &hf_nvme_tcp_data_pdu,
1112
14
           { "NVMe/TCP Data PDU", "nvme-tcp.data",
1113
14
             FT_NONE, BASE_NONE, NULL, 0x0, NULL, HFILL } },
1114
14
      { &hf_nvme_tcp_pdu_ttag,
1115
14
           { "Transfer Tag", "nvme-tcp.ttag",
1116
14
             FT_UINT16, BASE_HEX, NULL, 0x0,
1117
14
             "Transfer tag (controller generated)", HFILL } },
1118
14
      { &hf_nvme_tcp_data_pdu_data_offset,
1119
14
           { "Data Offset", "nvme-tcp.data.offset",
1120
14
             FT_UINT32, BASE_DEC, NULL, 0x0,
1121
14
             "Offset from the start of the command data", HFILL } },
1122
14
      { &hf_nvme_tcp_data_pdu_data_length,
1123
14
           { "Data Length", "nvme-tcp.data.length",
1124
14
             FT_UINT32, BASE_DEC, NULL, 0x0,
1125
14
             "Length of the data stream", HFILL } },
1126
14
      { &hf_nvme_tcp_data_pdu_data_resvd,
1127
14
           { "Reserved", "nvme-tcp.data.rsvd",
1128
14
             FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL } },
1129
      /* NVMEe TCP R2T pdu */
1130
14
      { &hf_nvme_tcp_r2t_pdu,
1131
14
           { "R2T", "nvme-tcp.r2t",
1132
14
              FT_NONE, BASE_NONE, NULL, 0x0, NULL, HFILL } },
1133
14
      { &hf_nvme_tcp_r2t_offset,
1134
14
           { "R2T Offset", "nvme-tcp.r2t.offset",
1135
14
             FT_UINT32, BASE_DEC, NULL, 0x0,
1136
14
             "Offset from the start of the command data", HFILL } },
1137
14
      { &hf_nvme_tcp_r2t_length,
1138
14
           { "R2T Length", "nvme-tcp.r2t.length",
1139
14
             FT_UINT32, BASE_DEC, NULL, 0x0,
1140
14
             "Length of the data stream", HFILL } },
1141
14
      { &hf_nvme_tcp_r2t_resvd,
1142
14
           { "Reserved", "nvme-tcp.r2t.rsvd",
1143
14
             FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL } }
1144
14
    };
1145
1146
14
    static int *ett[] = {
1147
14
        &ett_nvme_tcp
1148
14
    };
1149
1150
14
    proto_nvme_tcp = proto_register_protocol("NVM Express Fabrics TCP",
1151
14
            NVME_FABRICS_TCP, "nvme-tcp");
1152
1153
14
    proto_register_field_array(proto_nvme_tcp, hf, array_length(hf));
1154
14
    proto_register_subtree_array(ett, array_length(ett));
1155
1156
    /* These names actually work for their purpose. Note if we're already
1157
     * over TLS we don't need to do heuristics (it can't be more TLS instead
1158
     * instead, and since we managed to decrypt the TLS we shouldn't have
1159
     * missing frames and thus aren't in the middle of a PDU.)
1160
     */
1161
14
    nvmet_tcp_handle = register_dissector("nvme-tcp", dissect_nvme_tcp_heur,
1162
14
            proto_nvme_tcp);
1163
14
    nvmet_tls_handle = register_dissector_with_description("nvme-tls",
1164
14
            "NVMe-over-TCP with TLS", dissect_nvme_tcp, proto_nvme_tcp);
1165
14
}
1166
1167
14
void proto_reg_handoff_nvme_tcp(void) {
1168
14
    module_t *nvme_tcp_module;
1169
14
    nvme_tcp_module = prefs_register_protocol(proto_nvme_tcp, NULL);
1170
14
    range_convert_str(wmem_epan_scope(), &gPORT_RANGE, NVME_TCP_PORT_RANGE,
1171
14
            MAX_TCP_PORT);
1172
14
    prefs_register_range_preference(nvme_tcp_module,
1173
14
                                    "subsystem_ports",
1174
14
                                    "Subsystem Ports Range",
1175
14
                                    "Range of NVMe Subsystem ports"
1176
14
                                    "(default " NVME_TCP_PORT_RANGE ")",
1177
14
                                    &gPORT_RANGE,
1178
14
                                    MAX_TCP_PORT);
1179
14
    prefs_register_bool_preference(nvme_tcp_module, "check_hdgst",
1180
14
        "Validate PDU header digest",
1181
14
        "Whether to validate the PDU header digest or not.",
1182
14
        &nvme_tcp_check_hdgst);
1183
14
    prefs_register_bool_preference(nvme_tcp_module, "check_ddgst",
1184
14
            "Validate PDU data digest",
1185
14
            "Whether to validate the PDU data digest or not.",
1186
14
            &nvme_tcp_check_ddgst);
1187
14
    ssl_dissector_add(0, nvmet_tls_handle);
1188
14
    dissector_add_uint_range("tcp.port", gPORT_RANGE, nvmet_tcp_handle);
1189
14
    dissector_add_uint_range("tls.port", gPORT_RANGE, nvmet_tls_handle);
1190
14
}
1191
1192
/*
1193
 * Editor modelines  -  https://www.wireshark.org/tools/modelines.html
1194
 *
1195
 * Local variables:
1196
 * c-basic-offset: 4
1197
 * tab-width: 8
1198
 * indent-tabs-mode: nil
1199
 * End:
1200
 *
1201
 * vi: set shiftwidth=4 tabstop=8 expandtab:
1202
 * :indentSize=4:tabSize=8:noTabs=true:
1203
 */