Total coverage: 269767 (18%)of 1575583
7 7 7 1 1 2 1 1 2 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 // SPDX-License-Identifier: GPL-2.0-only /* * vivid-radio-common.c - common radio rx/tx support functions. * * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved. */ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/videodev2.h> #include "vivid-core.h" #include "vivid-ctrls.h" #include "vivid-radio-common.h" #include "vivid-rds-gen.h" /* * These functions are shared between the vivid receiver and transmitter * since both use the same frequency bands. */ const struct v4l2_frequency_band vivid_radio_bands[TOT_BANDS] = { /* Band FM */ { .type = V4L2_TUNER_RADIO, .index = 0, .capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_FREQ_BANDS, .rangelow = FM_FREQ_RANGE_LOW, .rangehigh = FM_FREQ_RANGE_HIGH, .modulation = V4L2_BAND_MODULATION_FM, }, /* Band AM */ { .type = V4L2_TUNER_RADIO, .index = 1, .capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_FREQ_BANDS, .rangelow = AM_FREQ_RANGE_LOW, .rangehigh = AM_FREQ_RANGE_HIGH, .modulation = V4L2_BAND_MODULATION_AM, }, /* Band SW */ { .type = V4L2_TUNER_RADIO, .index = 2, .capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_FREQ_BANDS, .rangelow = SW_FREQ_RANGE_LOW, .rangehigh = SW_FREQ_RANGE_HIGH, .modulation = V4L2_BAND_MODULATION_AM, }, }; /* * Initialize the RDS generator. If we can loop, then the RDS generator * is set up with the values from the RDS TX controls, otherwise it * will fill in standard values using one of two alternates. */ void vivid_radio_rds_init(struct vivid_dev *dev) { struct vivid_rds_gen *rds = &dev->rds_gen; bool alt = dev->radio_rx_rds_use_alternates; /* Do nothing, blocks will be filled by the transmitter */ if (dev->radio_rds_loop && !dev->radio_tx_rds_controls) return; if (dev->radio_rds_loop) { v4l2_ctrl_lock(dev->radio_tx_rds_pi); rds->picode = dev->radio_tx_rds_pi->cur.val; rds->pty = dev->radio_tx_rds_pty->cur.val; rds->mono_stereo = dev->radio_tx_rds_mono_stereo->cur.val; rds->art_head = dev->radio_tx_rds_art_head->cur.val; rds->compressed = dev->radio_tx_rds_compressed->cur.val; rds->dyn_pty = dev->radio_tx_rds_dyn_pty->cur.val; rds->ta = dev->radio_tx_rds_ta->cur.val; rds->tp = dev->radio_tx_rds_tp->cur.val; rds->ms = dev->radio_tx_rds_ms->cur.val; strscpy(rds->psname, dev->radio_tx_rds_psname->p_cur.p_char, sizeof(rds->psname)); strscpy(rds->radiotext, dev->radio_tx_rds_radiotext->p_cur.p_char + alt * 64, sizeof(rds->radiotext)); v4l2_ctrl_unlock(dev->radio_tx_rds_pi); } else { vivid_rds_gen_fill(rds, dev->radio_rx_freq, alt); } if (dev->radio_rx_rds_controls) { v4l2_ctrl_s_ctrl(dev->radio_rx_rds_pty, rds->pty); v4l2_ctrl_s_ctrl(dev->radio_rx_rds_ta, rds->ta); v4l2_ctrl_s_ctrl(dev->radio_rx_rds_tp, rds->tp); v4l2_ctrl_s_ctrl(dev->radio_rx_rds_ms, rds->ms); v4l2_ctrl_s_ctrl_string(dev->radio_rx_rds_psname, rds->psname); v4l2_ctrl_s_ctrl_string(dev->radio_rx_rds_radiotext, rds->radiotext); if (!dev->radio_rds_loop) dev->radio_rx_rds_use_alternates = !dev->radio_rx_rds_use_alternates; } vivid_rds_generate(rds); } /* * Calculate the emulated signal quality taking into account the frequency * the transmitter is using. */ static void vivid_radio_calc_sig_qual(struct vivid_dev *dev) { int mod = 16000; int delta = 800; int sig_qual, sig_qual_tx = mod; /* * For SW and FM there is a channel every 1000 kHz, for AM there is one * every 100 kHz. */ if (dev->radio_rx_freq <= AM_FREQ_RANGE_HIGH) { mod /= 10; delta /= 10; } sig_qual = (dev->radio_rx_freq + delta) % mod - delta; if (dev->has_radio_tx) sig_qual_tx = dev->radio_rx_freq - dev->radio_tx_freq; if (abs(sig_qual_tx) <= abs(sig_qual)) { sig_qual = sig_qual_tx; /* * Zero the internal rds buffer if we are going to loop * rds blocks. */ if (!dev->radio_rds_loop && !dev->radio_tx_rds_controls) memset(dev->rds_gen.data, 0, sizeof(dev->rds_gen.data)); dev->radio_rds_loop = dev->radio_rx_freq >= FM_FREQ_RANGE_LOW; } else { dev->radio_rds_loop = false; } if (dev->radio_rx_freq <= AM_FREQ_RANGE_HIGH) sig_qual *= 10; dev->radio_rx_sig_qual = sig_qual; } int vivid_radio_g_frequency(struct file *file, const unsigned *pfreq, struct v4l2_frequency *vf) { if (vf->tuner != 0) return -EINVAL; vf->frequency = *pfreq; return 0; } int vivid_radio_s_frequency(struct file *file, unsigned *pfreq, const struct v4l2_frequency *vf) { struct vivid_dev *dev = video_drvdata(file); unsigned freq; unsigned band; if (vf->tuner != 0) return -EINVAL; if (vf->frequency >= (FM_FREQ_RANGE_LOW + SW_FREQ_RANGE_HIGH) / 2) band = BAND_FM; else if (vf->frequency <= (AM_FREQ_RANGE_HIGH + SW_FREQ_RANGE_LOW) / 2) band = BAND_AM; else band = BAND_SW; freq = clamp_t(u32, vf->frequency, vivid_radio_bands[band].rangelow, vivid_radio_bands[band].rangehigh); *pfreq = freq; /* * For both receiver and transmitter recalculate the signal quality * (since that depends on both frequencies) and re-init the rds * generator. */ vivid_radio_calc_sig_qual(dev); vivid_radio_rds_init(dev); return 0; }
31 31 31 31 31 31 31 31 31 31 30 31 31 31 1 31 31 31 18 1 3 14 12 2 11 1 2 1 11 2 9 4 11 2 11 2 11 2 11 2 11 2 11 2 11 2 12 1 13 13 1 19 1 18 14 6 6 6 6 6 6 6 14 14 18 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 // SPDX-License-Identifier: GPL-2.0-only /* Flow Queue PIE discipline * * Copyright (C) 2019 Mohit P. Tahiliani <tahiliani@nitk.edu.in> * Copyright (C) 2019 Sachin D. Patil <sdp.sachin@gmail.com> * Copyright (C) 2019 V. Saicharan <vsaicharan1998@gmail.com> * Copyright (C) 2019 Mohit Bhasi <mohitbhasi1998@gmail.com> * Copyright (C) 2019 Leslie Monis <lesliemonis@gmail.com> * Copyright (C) 2019 Gautam Ramakrishnan <gautamramk@gmail.com> */ #include <linux/jhash.h> #include <linux/module.h> #include <linux/sizes.h> #include <linux/vmalloc.h> #include <net/pkt_cls.h> #include <net/pie.h> /* Flow Queue PIE * * Principles: * - Packets are classified on flows. * - This is a Stochastic model (as we use a hash, several flows might * be hashed to the same slot) * - Each flow has a PIE managed queue. * - Flows are linked onto two (Round Robin) lists, * so that new flows have priority on old ones. * - For a given flow, packets are not reordered. * - Drops during enqueue only. * - ECN capability is off by default. * - ECN threshold (if ECN is enabled) is at 10% by default. * - Uses timestamps to calculate queue delay by default. */ /** * struct fq_pie_flow - contains data for each flow * @vars: pie vars associated with the flow * @deficit: number of remaining byte credits * @backlog: size of data in the flow * @qlen: number of packets in the flow * @flowchain: flowchain for the flow * @head: first packet in the flow * @tail: last packet in the flow */ struct fq_pie_flow { struct pie_vars vars; s32 deficit; u32 backlog; u32 qlen; struct list_head flowchain; struct sk_buff *head; struct sk_buff *tail; }; struct fq_pie_sched_data { struct tcf_proto __rcu *filter_list; /* optional external classifier */ struct tcf_block *block; struct fq_pie_flow *flows; struct Qdisc *sch; struct list_head old_flows; struct list_head new_flows; struct pie_params p_params; u32 ecn_prob; u32 flows_cnt; u32 flows_cursor; u32 quantum; u32 memory_limit; u32 new_flow_count; u32 memory_usage; u32 overmemory; struct pie_stats stats; struct timer_list adapt_timer; }; static unsigned int fq_pie_hash(const struct fq_pie_sched_data *q, struct sk_buff *skb) { return reciprocal_scale(skb_get_hash(skb), q->flows_cnt); } static unsigned int fq_pie_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) { struct fq_pie_sched_data *q = qdisc_priv(sch); struct tcf_proto *filter; struct tcf_result res; int result; if (TC_H_MAJ(skb->priority) == sch->handle && TC_H_MIN(skb->priority) > 0 && TC_H_MIN(skb->priority) <= q->flows_cnt) return TC_H_MIN(skb->priority); filter = rcu_dereference_bh(q->filter_list); if (!filter) return fq_pie_hash(q, skb) + 1; *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; result = tcf_classify(skb, NULL, filter, &res, false); if (result >= 0) { #ifdef CONFIG_NET_CLS_ACT switch (result) { case TC_ACT_STOLEN: case TC_ACT_QUEUED: case TC_ACT_TRAP: *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; fallthrough; case TC_ACT_SHOT: return 0; } #endif if (TC_H_MIN(res.classid) <= q->flows_cnt) return TC_H_MIN(res.classid); } return 0; } /* add skb to flow queue (tail add) */ static inline void flow_queue_add(struct fq_pie_flow *flow, struct sk_buff *skb) { if (!flow->head) flow->head = skb; else flow->tail->next = skb; flow->tail = skb; skb->next = NULL; } static int fq_pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { enum skb_drop_reason reason = SKB_DROP_REASON_QDISC_OVERLIMIT; struct fq_pie_sched_data *q = qdisc_priv(sch); struct fq_pie_flow *sel_flow; int ret; u8 memory_limited = false; u8 enqueue = false; u32 pkt_len; u32 idx; /* Classifies packet into corresponding flow */ idx = fq_pie_classify(skb, sch, &ret); if (idx == 0) { if (ret & __NET_XMIT_BYPASS) qdisc_qstats_drop(sch); __qdisc_drop(skb, to_free); return ret; } idx--; sel_flow = &q->flows[idx]; /* Checks whether adding a new packet would exceed memory limit */ get_pie_cb(skb)->mem_usage = skb->truesize; memory_limited = q->memory_usage > q->memory_limit + skb->truesize; /* Checks if the qdisc is full */ if (unlikely(qdisc_qlen(sch) >= sch->limit)) { q->stats.overlimit++; goto out; } else if (unlikely(memory_limited)) { q->overmemory++; } reason = SKB_DROP_REASON_QDISC_CONGESTED; if (!pie_drop_early(sch, &q->p_params, &sel_flow->vars, sel_flow->backlog, skb->len)) { enqueue = true; } else if (q->p_params.ecn && sel_flow->vars.prob <= (MAX_PROB / 100) * q->ecn_prob && INET_ECN_set_ce(skb)) { /* If packet is ecn capable, mark it if drop probability * is lower than the parameter ecn_prob, else drop it. */ q->stats.ecn_mark++; enqueue = true; } if (enqueue) { /* Set enqueue time only when dq_rate_estimator is disabled. */ if (!q->p_params.dq_rate_estimator) pie_set_enqueue_time(skb); pkt_len = qdisc_pkt_len(skb); q->stats.packets_in++; q->memory_usage += skb->truesize; sch->qstats.backlog += pkt_len; sch->q.qlen++; flow_queue_add(sel_flow, skb); if (list_empty(&sel_flow->flowchain)) { list_add_tail(&sel_flow->flowchain, &q->new_flows); q->new_flow_count++; sel_flow->deficit = q->quantum; sel_flow->qlen = 0; sel_flow->backlog = 0; } sel_flow->qlen++; sel_flow->backlog += pkt_len; return NET_XMIT_SUCCESS; } out: q->stats.dropped++; sel_flow->vars.accu_prob = 0; qdisc_drop_reason(skb, sch, to_free, reason); return NET_XMIT_CN; } static const struct netlink_range_validation fq_pie_q_range = { .min = 1, .max = 1 << 20, }; static const struct nla_policy fq_pie_policy[TCA_FQ_PIE_MAX + 1] = { [TCA_FQ_PIE_LIMIT] = {.type = NLA_U32}, [TCA_FQ_PIE_FLOWS] = {.type = NLA_U32}, [TCA_FQ_PIE_TARGET] = {.type = NLA_U32}, [TCA_FQ_PIE_TUPDATE] = {.type = NLA_U32}, [TCA_FQ_PIE_ALPHA] = {.type = NLA_U32}, [TCA_FQ_PIE_BETA] = {.type = NLA_U32}, [TCA_FQ_PIE_QUANTUM] = NLA_POLICY_FULL_RANGE(NLA_U32, &fq_pie_q_range), [TCA_FQ_PIE_MEMORY_LIMIT] = {.type = NLA_U32}, [TCA_FQ_PIE_ECN_PROB] = {.type = NLA_U32}, [TCA_FQ_PIE_ECN] = {.type = NLA_U32}, [TCA_FQ_PIE_BYTEMODE] = {.type = NLA_U32}, [TCA_FQ_PIE_DQ_RATE_ESTIMATOR] = {.type = NLA_U32}, }; static inline struct sk_buff *dequeue_head(struct fq_pie_flow *flow) { struct sk_buff *skb = flow->head; flow->head = skb->next; skb->next = NULL; return skb; } static struct sk_buff *fq_pie_qdisc_dequeue(struct Qdisc *sch) { struct fq_pie_sched_data *q = qdisc_priv(sch); struct sk_buff *skb = NULL; struct fq_pie_flow *flow; struct list_head *head; u32 pkt_len; begin: head = &q->new_flows; if (list_empty(head)) { head = &q->old_flows; if (list_empty(head)) return NULL; } flow = list_first_entry(head, struct fq_pie_flow, flowchain); /* Flow has exhausted all its credits */ if (flow->deficit <= 0) { flow->deficit += q->quantum; list_move_tail(&flow->flowchain, &q->old_flows); goto begin; } if (flow->head) { skb = dequeue_head(flow); pkt_len = qdisc_pkt_len(skb); sch->qstats.backlog -= pkt_len; sch->q.qlen--; qdisc_bstats_update(sch, skb); } if (!skb) { /* force a pass through old_flows to prevent starvation */ if (head == &q->new_flows && !list_empty(&q->old_flows)) list_move_tail(&flow->flowchain, &q->old_flows); else list_del_init(&flow->flowchain); goto begin; } flow->qlen--; flow->deficit -= pkt_len; flow->backlog -= pkt_len; q->memory_usage -= get_pie_cb(skb)->mem_usage; pie_process_dequeue(skb, &q->p_params, &flow->vars, flow->backlog); return skb; } static int fq_pie_change(struct Qdisc *sch, struct nlattr *opt, struct netlink_ext_ack *extack) { struct fq_pie_sched_data *q = qdisc_priv(sch); struct nlattr *tb[TCA_FQ_PIE_MAX + 1]; unsigned int len_dropped = 0; unsigned int num_dropped = 0; int err; err = nla_parse_nested(tb, TCA_FQ_PIE_MAX, opt, fq_pie_policy, extack); if (err < 0) return err; sch_tree_lock(sch); if (tb[TCA_FQ_PIE_LIMIT]) { u32 limit = nla_get_u32(tb[TCA_FQ_PIE_LIMIT]); WRITE_ONCE(q->p_params.limit, limit); WRITE_ONCE(sch->limit, limit); } if (tb[TCA_FQ_PIE_FLOWS]) { if (q->flows) { NL_SET_ERR_MSG_MOD(extack, "Number of flows cannot be changed"); goto flow_error; } q->flows_cnt = nla_get_u32(tb[TCA_FQ_PIE_FLOWS]); if (!q->flows_cnt || q->flows_cnt > 65536) { NL_SET_ERR_MSG_MOD(extack, "Number of flows must range in [1..65536]"); goto flow_error; } } /* convert from microseconds to pschedtime */ if (tb[TCA_FQ_PIE_TARGET]) { /* target is in us */ u32 target = nla_get_u32(tb[TCA_FQ_PIE_TARGET]); /* convert to pschedtime */ WRITE_ONCE(q->p_params.target, PSCHED_NS2TICKS((u64)target * NSEC_PER_USEC)); } /* tupdate is in jiffies */ if (tb[TCA_FQ_PIE_TUPDATE]) WRITE_ONCE(q->p_params.tupdate, usecs_to_jiffies(nla_get_u32(tb[TCA_FQ_PIE_TUPDATE]))); if (tb[TCA_FQ_PIE_ALPHA]) WRITE_ONCE(q->p_params.alpha, nla_get_u32(tb[TCA_FQ_PIE_ALPHA])); if (tb[TCA_FQ_PIE_BETA]) WRITE_ONCE(q->p_params.beta, nla_get_u32(tb[TCA_FQ_PIE_BETA])); if (tb[TCA_FQ_PIE_QUANTUM]) WRITE_ONCE(q->quantum, nla_get_u32(tb[TCA_FQ_PIE_QUANTUM])); if (tb[TCA_FQ_PIE_MEMORY_LIMIT]) WRITE_ONCE(q->memory_limit, nla_get_u32(tb[TCA_FQ_PIE_MEMORY_LIMIT])); if (tb[TCA_FQ_PIE_ECN_PROB]) WRITE_ONCE(q->ecn_prob, nla_get_u32(tb[TCA_FQ_PIE_ECN_PROB])); if (tb[TCA_FQ_PIE_ECN]) WRITE_ONCE(q->p_params.ecn, nla_get_u32(tb[TCA_FQ_PIE_ECN])); if (tb[TCA_FQ_PIE_BYTEMODE]) WRITE_ONCE(q->p_params.bytemode, nla_get_u32(tb[TCA_FQ_PIE_BYTEMODE])); if (tb[TCA_FQ_PIE_DQ_RATE_ESTIMATOR]) WRITE_ONCE(q->p_params.dq_rate_estimator, nla_get_u32(tb[TCA_FQ_PIE_DQ_RATE_ESTIMATOR])); /* Drop excess packets if new limit is lower */ while (sch->q.qlen > sch->limit) { struct sk_buff *skb = fq_pie_qdisc_dequeue(sch); len_dropped += qdisc_pkt_len(skb); num_dropped += 1; rtnl_kfree_skbs(skb, skb); } qdisc_tree_reduce_backlog(sch, num_dropped, len_dropped); sch_tree_unlock(sch); return 0; flow_error: sch_tree_unlock(sch); return -EINVAL; } static void fq_pie_timer(struct timer_list *t) { struct fq_pie_sched_data *q = from_timer(q, t, adapt_timer); unsigned long next, tupdate; struct Qdisc *sch = q->sch; spinlock_t *root_lock; /* to lock qdisc for probability calculations */ int max_cnt, i; rcu_read_lock(); root_lock = qdisc_lock(qdisc_root_sleeping(sch)); spin_lock(root_lock); /* Limit this expensive loop to 2048 flows per round. */ max_cnt = min_t(int, q->flows_cnt - q->flows_cursor, 2048); for (i = 0; i < max_cnt; i++) { pie_calculate_probability(&q->p_params, &q->flows[q->flows_cursor].vars, q->flows[q->flows_cursor].backlog); q->flows_cursor++; } tupdate = q->p_params.tupdate; next = 0; if (q->flows_cursor >= q->flows_cnt) { q->flows_cursor = 0; next = tupdate; } if (tupdate) mod_timer(&q->adapt_timer, jiffies + next); spin_unlock(root_lock); rcu_read_unlock(); } static int fq_pie_init(struct Qdisc *sch, struct nlattr *opt, struct netlink_ext_ack *extack) { struct fq_pie_sched_data *q = qdisc_priv(sch); int err; u32 idx; pie_params_init(&q->p_params); sch->limit = 10 * 1024; q->p_params.limit = sch->limit; q->quantum = psched_mtu(qdisc_dev(sch)); q->sch = sch; q->ecn_prob = 10; q->flows_cnt = 1024; q->memory_limit = SZ_32M; INIT_LIST_HEAD(&q->new_flows); INIT_LIST_HEAD(&q->old_flows); timer_setup(&q->adapt_timer, fq_pie_timer, 0); if (opt) { err = fq_pie_change(sch, opt, extack); if (err) return err; } err = tcf_block_get(&q->block, &q->filter_list, sch, extack); if (err) goto init_failure; q->flows = kvcalloc(q->flows_cnt, sizeof(struct fq_pie_flow), GFP_KERNEL); if (!q->flows) { err = -ENOMEM; goto init_failure; } for (idx = 0; idx < q->flows_cnt; idx++) { struct fq_pie_flow *flow = q->flows + idx; INIT_LIST_HEAD(&flow->flowchain); pie_vars_init(&flow->vars); } mod_timer(&q->adapt_timer, jiffies + HZ / 2); return 0; init_failure: q->flows_cnt = 0; return err; } static int fq_pie_dump(struct Qdisc *sch, struct sk_buff *skb) { struct fq_pie_sched_data *q = qdisc_priv(sch); struct nlattr *opts; opts = nla_nest_start(skb, TCA_OPTIONS); if (!opts) return -EMSGSIZE; /* convert target from pschedtime to us */ if (nla_put_u32(skb, TCA_FQ_PIE_LIMIT, READ_ONCE(sch->limit)) || nla_put_u32(skb, TCA_FQ_PIE_FLOWS, READ_ONCE(q->flows_cnt)) || nla_put_u32(skb, TCA_FQ_PIE_TARGET, ((u32)PSCHED_TICKS2NS(READ_ONCE(q->p_params.target))) / NSEC_PER_USEC) || nla_put_u32(skb, TCA_FQ_PIE_TUPDATE, jiffies_to_usecs(READ_ONCE(q->p_params.tupdate))) || nla_put_u32(skb, TCA_FQ_PIE_ALPHA, READ_ONCE(q->p_params.alpha)) || nla_put_u32(skb, TCA_FQ_PIE_BETA, READ_ONCE(q->p_params.beta)) || nla_put_u32(skb, TCA_FQ_PIE_QUANTUM, READ_ONCE(q->quantum)) || nla_put_u32(skb, TCA_FQ_PIE_MEMORY_LIMIT, READ_ONCE(q->memory_limit)) || nla_put_u32(skb, TCA_FQ_PIE_ECN_PROB, READ_ONCE(q->ecn_prob)) || nla_put_u32(skb, TCA_FQ_PIE_ECN, READ_ONCE(q->p_params.ecn)) || nla_put_u32(skb, TCA_FQ_PIE_BYTEMODE, READ_ONCE(q->p_params.bytemode)) || nla_put_u32(skb, TCA_FQ_PIE_DQ_RATE_ESTIMATOR, READ_ONCE(q->p_params.dq_rate_estimator))) goto nla_put_failure; return nla_nest_end(skb, opts); nla_put_failure: nla_nest_cancel(skb, opts); return -EMSGSIZE; } static int fq_pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d) { struct fq_pie_sched_data *q = qdisc_priv(sch); struct tc_fq_pie_xstats st = { .packets_in = q->stats.packets_in, .overlimit = q->stats.overlimit, .overmemory = q->overmemory, .dropped = q->stats.dropped, .ecn_mark = q->stats.ecn_mark, .new_flow_count = q->new_flow_count, .memory_usage = q->memory_usage, }; struct list_head *pos; sch_tree_lock(sch); list_for_each(pos, &q->new_flows) st.new_flows_len++; list_for_each(pos, &q->old_flows) st.old_flows_len++; sch_tree_unlock(sch); return gnet_stats_copy_app(d, &st, sizeof(st)); } static void fq_pie_reset(struct Qdisc *sch) { struct fq_pie_sched_data *q = qdisc_priv(sch); u32 idx; INIT_LIST_HEAD(&q->new_flows); INIT_LIST_HEAD(&q->old_flows); for (idx = 0; idx < q->flows_cnt; idx++) { struct fq_pie_flow *flow = q->flows + idx; /* Removes all packets from flow */ rtnl_kfree_skbs(flow->head, flow->tail); flow->head = NULL; INIT_LIST_HEAD(&flow->flowchain); pie_vars_init(&flow->vars); } } static void fq_pie_destroy(struct Qdisc *sch) { struct fq_pie_sched_data *q = qdisc_priv(sch); tcf_block_put(q->block); q->p_params.tupdate = 0; del_timer_sync(&q->adapt_timer); kvfree(q->flows); } static struct Qdisc_ops fq_pie_qdisc_ops __read_mostly = { .id = "fq_pie", .priv_size = sizeof(struct fq_pie_sched_data), .enqueue = fq_pie_qdisc_enqueue, .dequeue = fq_pie_qdisc_dequeue, .peek = qdisc_peek_dequeued, .init = fq_pie_init, .destroy = fq_pie_destroy, .reset = fq_pie_reset, .change = fq_pie_change, .dump = fq_pie_dump, .dump_stats = fq_pie_dump_stats, .owner = THIS_MODULE, }; MODULE_ALIAS_NET_SCH("fq_pie"); static int __init fq_pie_module_init(void) { return register_qdisc(&fq_pie_qdisc_ops); } static void __exit fq_pie_module_exit(void) { unregister_qdisc(&fq_pie_qdisc_ops); } module_init(fq_pie_module_init); module_exit(fq_pie_module_exit); MODULE_DESCRIPTION("Flow Queue Proportional Integral controller Enhanced (FQ-PIE)"); MODULE_AUTHOR("Mohit P. Tahiliani"); MODULE_LICENSE("GPL");
336 6869 1854 397 3997 296 6 173 849 38 874 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_DCACHE_H #define __LINUX_DCACHE_H #include <linux/atomic.h> #include <linux/list.h> #include <linux/math.h> #include <linux/rculist.h> #include <linux/rculist_bl.h> #include <linux/spinlock.h> #include <linux/seqlock.h> #include <linux/cache.h> #include <linux/rcupdate.h> #include <linux/lockref.h> #include <linux/stringhash.h> #include <linux/wait.h> struct path; struct file; struct vfsmount; /* * linux/include/linux/dcache.h * * Dirent cache data structures * * (C) Copyright 1997 Thomas Schoebel-Theuer, * with heavy changes by Linus Torvalds */ #define IS_ROOT(x) ((x) == (x)->d_parent) /* The hash is always the low bits of hash_len */ #ifdef __LITTLE_ENDIAN #define HASH_LEN_DECLARE u32 hash; u32 len #define bytemask_from_count(cnt) (~(~0ul << (cnt)*8)) #else #define HASH_LEN_DECLARE u32 len; u32 hash #define bytemask_from_count(cnt) (~(~0ul >> (cnt)*8)) #endif /* * "quick string" -- eases parameter passing, but more importantly * saves "metadata" about the string (ie length and the hash). * * hash comes first so it snuggles against d_parent in the * dentry. */ struct qstr { union { struct { HASH_LEN_DECLARE; }; u64 hash_len; }; const unsigned char *name; }; #define QSTR_INIT(n,l) { { { .len = l } }, .name = n } #define QSTR(n) (struct qstr)QSTR_INIT(n, strlen(n)) extern const struct qstr empty_name; extern const struct qstr slash_name; extern const struct qstr dotdot_name; /* * Try to keep struct dentry aligned on 64 byte cachelines (this will * give reasonable cacheline footprint with larger lines without the * large memory footprint increase). */ #ifdef CONFIG_64BIT # define DNAME_INLINE_WORDS 5 /* 192 bytes */ #else # ifdef CONFIG_SMP # define DNAME_INLINE_WORDS 9 /* 128 bytes */ # else # define DNAME_INLINE_WORDS 11 /* 128 bytes */ # endif #endif #define DNAME_INLINE_LEN (DNAME_INLINE_WORDS*sizeof(unsigned long)) union shortname_store { unsigned char string[DNAME_INLINE_LEN]; unsigned long words[DNAME_INLINE_WORDS]; }; #define d_lock d_lockref.lock #define d_iname d_shortname.string struct dentry { /* RCU lookup touched fields */ unsigned int d_flags; /* protected by d_lock */ seqcount_spinlock_t d_seq; /* per dentry seqlock */ struct hlist_bl_node d_hash; /* lookup hash list */ struct dentry *d_parent; /* parent directory */ struct qstr d_name; struct inode *d_inode; /* Where the name belongs to - NULL is * negative */ union shortname_store d_shortname; /* --- cacheline 1 boundary (64 bytes) was 32 bytes ago --- */ /* Ref lookup also touches following */ const struct dentry_operations *d_op; struct super_block *d_sb; /* The root of the dentry tree */ unsigned long d_time; /* used by d_revalidate */ void *d_fsdata; /* fs-specific data */ /* --- cacheline 2 boundary (128 bytes) --- */ struct lockref d_lockref; /* per-dentry lock and refcount * keep separate from RCU lookup area if * possible! */ union { struct list_head d_lru; /* LRU list */ wait_queue_head_t *d_wait; /* in-lookup ones only */ }; struct hlist_node d_sib; /* child of parent list */ struct hlist_head d_children; /* our children */ /* * d_alias and d_rcu can share memory */ union { struct hlist_node d_alias; /* inode alias list */ struct hlist_bl_node d_in_lookup_hash; /* only for in-lookup ones */ struct rcu_head d_rcu; } d_u; }; /* * dentry->d_lock spinlock nesting subclasses: * * 0: normal * 1: nested */ enum dentry_d_lock_class { DENTRY_D_LOCK_NORMAL, /* implicitly used by plain spin_lock() APIs. */ DENTRY_D_LOCK_NESTED }; enum d_real_type { D_REAL_DATA, D_REAL_METADATA, }; struct dentry_operations { int (*d_revalidate)(struct inode *, const struct qstr *, struct dentry *, unsigned int); int (*d_weak_revalidate)(struct dentry *, unsigned int); int (*d_hash)(const struct dentry *, struct qstr *); int (*d_compare)(const struct dentry *, unsigned int, const char *, const struct qstr *); int (*d_delete)(const struct dentry *); int (*d_init)(struct dentry *); void (*d_release)(struct dentry *); void (*d_prune)(struct dentry *); void (*d_iput)(struct dentry *, struct inode *); char *(*d_dname)(struct dentry *, char *, int); struct vfsmount *(*d_automount)(struct path *); int (*d_manage)(const struct path *, bool); struct dentry *(*d_real)(struct dentry *, enum d_real_type type); bool (*d_unalias_trylock)(const struct dentry *); void (*d_unalias_unlock)(const struct dentry *); } ____cacheline_aligned; /* * Locking rules for dentry_operations callbacks are to be found in * Documentation/filesystems/locking.rst. Keep it updated! * * FUrther descriptions are found in Documentation/filesystems/vfs.rst. * Keep it updated too! */ /* d_flags entries */ #define DCACHE_OP_HASH BIT(0) #define DCACHE_OP_COMPARE BIT(1) #define DCACHE_OP_REVALIDATE BIT(2) #define DCACHE_OP_DELETE BIT(3) #define DCACHE_OP_PRUNE BIT(4) #define DCACHE_DISCONNECTED BIT(5) /* This dentry is possibly not currently connected to the dcache tree, in * which case its parent will either be itself, or will have this flag as * well. nfsd will not use a dentry with this bit set, but will first * endeavour to clear the bit either by discovering that it is connected, * or by performing lookup operations. Any filesystem which supports * nfsd_operations MUST have a lookup function which, if it finds a * directory inode with a DCACHE_DISCONNECTED dentry, will d_move that * dentry into place and return that dentry rather than the passed one, * typically using d_splice_alias. */ #define DCACHE_REFERENCED BIT(6) /* Recently used, don't discard. */ #define DCACHE_DONTCACHE BIT(7) /* Purge from memory on final dput() */ #define DCACHE_CANT_MOUNT BIT(8) #define DCACHE_GENOCIDE BIT(9) #define DCACHE_SHRINK_LIST BIT(10) #define DCACHE_OP_WEAK_REVALIDATE BIT(11) #define DCACHE_NFSFS_RENAMED BIT(12) /* this dentry has been "silly renamed" and has to be deleted on the last * dput() */ #define DCACHE_FSNOTIFY_PARENT_WATCHED BIT(14) /* Parent inode is watched by some fsnotify listener */ #define DCACHE_DENTRY_KILLED BIT(15) #define DCACHE_MOUNTED BIT(16) /* is a mountpoint */ #define DCACHE_NEED_AUTOMOUNT BIT(17) /* handle automount on this dir */ #define DCACHE_MANAGE_TRANSIT BIT(18) /* manage transit from this dirent */ #define DCACHE_MANAGED_DENTRY \ (DCACHE_MOUNTED|DCACHE_NEED_AUTOMOUNT|DCACHE_MANAGE_TRANSIT) #define DCACHE_LRU_LIST BIT(19) #define DCACHE_ENTRY_TYPE (7 << 20) /* bits 20..22 are for storing type: */ #define DCACHE_MISS_TYPE (0 << 20) /* Negative dentry */ #define DCACHE_WHITEOUT_TYPE (1 << 20) /* Whiteout dentry (stop pathwalk) */ #define DCACHE_DIRECTORY_TYPE (2 << 20) /* Normal directory */ #define DCACHE_AUTODIR_TYPE (3 << 20) /* Lookupless directory (presumed automount) */ #define DCACHE_REGULAR_TYPE (4 << 20) /* Regular file type */ #define DCACHE_SPECIAL_TYPE (5 << 20) /* Other file type */ #define DCACHE_SYMLINK_TYPE (6 << 20) /* Symlink */ #define DCACHE_NOKEY_NAME BIT(25) /* Encrypted name encoded without key */ #define DCACHE_OP_REAL BIT(26) #define DCACHE_PAR_LOOKUP BIT(28) /* being looked up (with parent locked shared) */ #define DCACHE_DENTRY_CURSOR BIT(29) #define DCACHE_NORCU BIT(30) /* No RCU delay for freeing */ extern seqlock_t rename_lock; /* * These are the low-level FS interfaces to the dcache.. */ extern void d_instantiate(struct dentry *, struct inode *); extern void d_instantiate_new(struct dentry *, struct inode *); extern void __d_drop(struct dentry *dentry); extern void d_drop(struct dentry *dentry); extern void d_delete(struct dentry *); extern void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op); /* allocate/de-allocate */ extern struct dentry * d_alloc(struct dentry *, const struct qstr *); extern struct dentry * d_alloc_anon(struct super_block *); extern struct dentry * d_alloc_parallel(struct dentry *, const struct qstr *, wait_queue_head_t *); extern struct dentry * d_splice_alias(struct inode *, struct dentry *); extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *); extern bool d_same_name(const struct dentry *dentry, const struct dentry *parent, const struct qstr *name); extern struct dentry * d_exact_alias(struct dentry *, struct inode *); extern struct dentry *d_find_any_alias(struct inode *inode); extern struct dentry * d_obtain_alias(struct inode *); extern struct dentry * d_obtain_root(struct inode *); extern void shrink_dcache_sb(struct super_block *); extern void shrink_dcache_parent(struct dentry *); extern void d_invalidate(struct dentry *); /* only used at mount-time */ extern struct dentry * d_make_root(struct inode *); extern void d_mark_tmpfile(struct file *, struct inode *); extern void d_tmpfile(struct file *, struct inode *); extern struct dentry *d_find_alias(struct inode *); extern void d_prune_aliases(struct inode *); extern struct dentry *d_find_alias_rcu(struct inode *); /* test whether we have any submounts in a subdir tree */ extern int path_has_submounts(const struct path *); /* * This adds the entry to the hash queues. */ extern void d_rehash(struct dentry *); extern void d_add(struct dentry *, struct inode *); /* used for rename() and baskets */ extern void d_move(struct dentry *, struct dentry *); extern void d_exchange(struct dentry *, struct dentry *); extern struct dentry *d_ancestor(struct dentry *, struct dentry *); extern struct dentry *d_lookup(const struct dentry *, const struct qstr *); extern struct dentry *d_hash_and_lookup(struct dentry *, struct qstr *); static inline unsigned d_count(const struct dentry *dentry) { return dentry->d_lockref.count; } ino_t d_parent_ino(struct dentry *dentry); /* * helper function for dentry_operations.d_dname() members */ extern __printf(3, 4) char *dynamic_dname(char *, int, const char *, ...); extern char *__d_path(const struct path *, const struct path *, char *, int); extern char *d_absolute_path(const struct path *, char *, int); extern char *d_path(const struct path *, char *, int); extern char *dentry_path_raw(const struct dentry *, char *, int); extern char *dentry_path(const struct dentry *, char *, int); /* Allocation counts.. */ /** * dget_dlock - get a reference to a dentry * @dentry: dentry to get a reference to * * Given a live dentry, increment the reference count and return the dentry. * Caller must hold @dentry->d_lock. Making sure that dentry is alive is * caller's resonsibility. There are many conditions sufficient to guarantee * that; e.g. anything with non-negative refcount is alive, so's anything * hashed, anything positive, anyone's parent, etc. */ static inline struct dentry *dget_dlock(struct dentry *dentry) { dentry->d_lockref.count++; return dentry; } /** * dget - get a reference to a dentry * @dentry: dentry to get a reference to * * Given a dentry or %NULL pointer increment the reference count * if appropriate and return the dentry. A dentry will not be * destroyed when it has references. Conversely, a dentry with * no references can disappear for any number of reasons, starting * with memory pressure. In other words, that primitive is * used to clone an existing reference; using it on something with * zero refcount is a bug. * * NOTE: it will spin if @dentry->d_lock is held. From the deadlock * avoidance point of view it is equivalent to spin_lock()/increment * refcount/spin_unlock(), so calling it under @dentry->d_lock is * always a bug; so's calling it under ->d_lock on any of its descendents. * */ static inline struct dentry *dget(struct dentry *dentry) { if (dentry) lockref_get(&dentry->d_lockref); return dentry; } extern struct dentry *dget_parent(struct dentry *dentry); /** * d_unhashed - is dentry hashed * @dentry: entry to check * * Returns true if the dentry passed is not currently hashed. */ static inline int d_unhashed(const struct dentry *dentry) { return hlist_bl_unhashed(&dentry->d_hash); } static inline int d_unlinked(const struct dentry *dentry) { return d_unhashed(dentry) && !IS_ROOT(dentry); } static inline int cant_mount(const struct dentry *dentry) { return (dentry->d_flags & DCACHE_CANT_MOUNT); } static inline void dont_mount(struct dentry *dentry) { spin_lock(&dentry->d_lock); dentry->d_flags |= DCACHE_CANT_MOUNT; spin_unlock(&dentry->d_lock); } extern void __d_lookup_unhash_wake(struct dentry *dentry); static inline int d_in_lookup(const struct dentry *dentry) { return dentry->d_flags & DCACHE_PAR_LOOKUP; } static inline void d_lookup_done(struct dentry *dentry) { if (unlikely(d_in_lookup(dentry))) __d_lookup_unhash_wake(dentry); } extern void dput(struct dentry *); static inline bool d_managed(const struct dentry *dentry) { return dentry->d_flags & DCACHE_MANAGED_DENTRY; } static inline bool d_mountpoint(const struct dentry *dentry) { return dentry->d_flags & DCACHE_MOUNTED; } /* * Directory cache entry type accessor functions. */ static inline unsigned __d_entry_type(const struct dentry *dentry) { return dentry->d_flags & DCACHE_ENTRY_TYPE; } static inline bool d_is_miss(const struct dentry *dentry) { return __d_entry_type(dentry) == DCACHE_MISS_TYPE; } static inline bool d_is_whiteout(const struct dentry *dentry) { return __d_entry_type(dentry) == DCACHE_WHITEOUT_TYPE; } static inline bool d_can_lookup(const struct dentry *dentry) { return __d_entry_type(dentry) == DCACHE_DIRECTORY_TYPE; } static inline bool d_is_autodir(const struct dentry *dentry) { return __d_entry_type(dentry) == DCACHE_AUTODIR_TYPE; } static inline bool d_is_dir(const struct dentry *dentry) { return d_can_lookup(dentry) || d_is_autodir(dentry); } static inline bool d_is_symlink(const struct dentry *dentry) { return __d_entry_type(dentry) == DCACHE_SYMLINK_TYPE; } static inline bool d_is_reg(const struct dentry *dentry) { return __d_entry_type(dentry) == DCACHE_REGULAR_TYPE; } static inline bool d_is_special(const struct dentry *dentry) { return __d_entry_type(dentry) == DCACHE_SPECIAL_TYPE; } static inline bool d_is_file(const struct dentry *dentry) { return d_is_reg(dentry) || d_is_special(dentry); } static inline bool d_is_negative(const struct dentry *dentry) { // TODO: check d_is_whiteout(dentry) also. return d_is_miss(dentry); } static inline bool d_flags_negative(unsigned flags) { return (flags & DCACHE_ENTRY_TYPE) == DCACHE_MISS_TYPE; } static inline bool d_is_positive(const struct dentry *dentry) { return !d_is_negative(dentry); } /** * d_really_is_negative - Determine if a dentry is really negative (ignoring fallthroughs) * @dentry: The dentry in question * * Returns true if the dentry represents either an absent name or a name that * doesn't map to an inode (ie. ->d_inode is NULL). The dentry could represent * a true miss, a whiteout that isn't represented by a 0,0 chardev or a * fallthrough marker in an opaque directory. * * Note! (1) This should be used *only* by a filesystem to examine its own * dentries. It should not be used to look at some other filesystem's * dentries. (2) It should also be used in combination with d_inode() to get * the inode. (3) The dentry may have something attached to ->d_lower and the * type field of the flags may be set to something other than miss or whiteout. */ static inline bool d_really_is_negative(const struct dentry *dentry) { return dentry->d_inode == NULL; } /** * d_really_is_positive - Determine if a dentry is really positive (ignoring fallthroughs) * @dentry: The dentry in question * * Returns true if the dentry represents a name that maps to an inode * (ie. ->d_inode is not NULL). The dentry might still represent a whiteout if * that is represented on medium as a 0,0 chardev. * * Note! (1) This should be used *only* by a filesystem to examine its own * dentries. It should not be used to look at some other filesystem's * dentries. (2) It should also be used in combination with d_inode() to get * the inode. */ static inline bool d_really_is_positive(const struct dentry *dentry) { return dentry->d_inode != NULL; } static inline int simple_positive(const struct dentry *dentry) { return d_really_is_positive(dentry) && !d_unhashed(dentry); } extern int sysctl_vfs_cache_pressure; static inline unsigned long vfs_pressure_ratio(unsigned long val) { return mult_frac(val, sysctl_vfs_cache_pressure, 100); } /** * d_inode - Get the actual inode of this dentry * @dentry: The dentry to query * * This is the helper normal filesystems should use to get at their own inodes * in their own dentries and ignore the layering superimposed upon them. */ static inline struct inode *d_inode(const struct dentry *dentry) { return dentry->d_inode; } /** * d_inode_rcu - Get the actual inode of this dentry with READ_ONCE() * @dentry: The dentry to query * * This is the helper normal filesystems should use to get at their own inodes * in their own dentries and ignore the layering superimposed upon them. */ static inline struct inode *d_inode_rcu(const struct dentry *dentry) { return READ_ONCE(dentry->d_inode); } /** * d_backing_inode - Get upper or lower inode we should be using * @upper: The upper layer * * This is the helper that should be used to get at the inode that will be used * if this dentry were to be opened as a file. The inode may be on the upper * dentry or it may be on a lower dentry pinned by the upper. * * Normal filesystems should not use this to access their own inodes. */ static inline struct inode *d_backing_inode(const struct dentry *upper) { struct inode *inode = upper->d_inode; return inode; } /** * d_real - Return the real dentry * @dentry: the dentry to query * @type: the type of real dentry (data or metadata) * * If dentry is on a union/overlay, then return the underlying, real dentry. * Otherwise return the dentry itself. * * See also: Documentation/filesystems/vfs.rst */ static inline struct dentry *d_real(struct dentry *dentry, enum d_real_type type) { if (unlikely(dentry->d_flags & DCACHE_OP_REAL)) return dentry->d_op->d_real(dentry, type); else return dentry; } /** * d_real_inode - Return the real inode hosting the data * @dentry: The dentry to query * * If dentry is on a union/overlay, then return the underlying, real inode. * Otherwise return d_inode(). */ static inline struct inode *d_real_inode(const struct dentry *dentry) { /* This usage of d_real() results in const dentry */ return d_inode(d_real((struct dentry *) dentry, D_REAL_DATA)); } struct name_snapshot { struct qstr name; union shortname_store inline_name; }; void take_dentry_name_snapshot(struct name_snapshot *, struct dentry *); void release_dentry_name_snapshot(struct name_snapshot *); static inline struct dentry *d_first_child(const struct dentry *dentry) { return hlist_entry_safe(dentry->d_children.first, struct dentry, d_sib); } static inline struct dentry *d_next_sibling(const struct dentry *dentry) { return hlist_entry_safe(dentry->d_sib.next, struct dentry, d_sib); } #endif /* __LINUX_DCACHE_H */
38 38 35 35 39 39 29 14 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 // SPDX-License-Identifier: GPL-2.0-or-later /* * Squashfs - a compressed read only filesystem for Linux * * Copyright (c) 2010 LG Electronics * Chan Jeong <chan.jeong@lge.com> * * lzo_wrapper.c */ #include <linux/mutex.h> #include <linux/bio.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/lzo.h> #include "squashfs_fs.h" #include "squashfs_fs_sb.h" #include "squashfs.h" #include "decompressor.h" #include "page_actor.h" struct squashfs_lzo { void *input; void *output; }; static void *lzo_init(struct squashfs_sb_info *msblk, void *buff) { int block_size = max_t(int, msblk->block_size, SQUASHFS_METADATA_SIZE); struct squashfs_lzo *stream = kzalloc(sizeof(*stream), GFP_KERNEL); if (stream == NULL) goto failed; stream->input = vmalloc(block_size); if (stream->input == NULL) goto failed; stream->output = vmalloc(block_size); if (stream->output == NULL) goto failed2; return stream; failed2: vfree(stream->input); failed: ERROR("Failed to allocate lzo workspace\n"); kfree(stream); return ERR_PTR(-ENOMEM); } static void lzo_free(void *strm) { struct squashfs_lzo *stream = strm; if (stream) { vfree(stream->input); vfree(stream->output); } kfree(stream); } static int lzo_uncompress(struct squashfs_sb_info *msblk, void *strm, struct bio *bio, int offset, int length, struct squashfs_page_actor *output) { struct bvec_iter_all iter_all = {}; struct bio_vec *bvec = bvec_init_iter_all(&iter_all); struct squashfs_lzo *stream = strm; void *buff = stream->input, *data; int bytes = length, res; size_t out_len = output->length; while (bio_next_segment(bio, &iter_all)) { int avail = min(bytes, ((int)bvec->bv_len) - offset); data = bvec_virt(bvec); memcpy(buff, data + offset, avail); buff += avail; bytes -= avail; offset = 0; } res = lzo1x_decompress_safe(stream->input, (size_t)length, stream->output, &out_len); if (res != LZO_E_OK) goto failed; res = bytes = (int)out_len; data = squashfs_first_page(output); buff = stream->output; while (data) { if (bytes <= PAGE_SIZE) { if (!IS_ERR(data)) memcpy(data, buff, bytes); break; } else { if (!IS_ERR(data)) memcpy(data, buff, PAGE_SIZE); buff += PAGE_SIZE; bytes -= PAGE_SIZE; data = squashfs_next_page(output); } } squashfs_finish_page(output); return res; failed: return -EIO; } const struct squashfs_decompressor squashfs_lzo_comp_ops = { .init = lzo_init, .free = lzo_free, .decompress = lzo_uncompress, .id = LZO_COMPRESSION, .name = "lzo", .alloc_buffer = 0, .supported = 1 };
26 28 28 26 26 26 26 26 26 28 28 26 26 26 26 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 // SPDX-License-Identifier: GPL-2.0 #include "bcachefs.h" #include "btree_cache.h" #include "disk_groups.h" #include "error.h" #include "opts.h" #include "replicas.h" #include "sb-members.h" #include "super-io.h" void bch2_dev_missing(struct bch_fs *c, unsigned dev) { if (dev != BCH_SB_MEMBER_INVALID) bch2_fs_inconsistent(c, "pointer to nonexistent device %u", dev); } void bch2_dev_bucket_missing(struct bch_fs *c, struct bpos bucket) { bch2_fs_inconsistent(c, "pointer to nonexistent bucket %llu:%llu", bucket.inode, bucket.offset); } #define x(t, n, ...) [n] = #t, static const char * const bch2_iops_measurements[] = { BCH_IOPS_MEASUREMENTS() NULL }; char * const bch2_member_error_strs[] = { BCH_MEMBER_ERROR_TYPES() NULL }; #undef x /* Code for bch_sb_field_members_v1: */ struct bch_member *bch2_members_v2_get_mut(struct bch_sb *sb, int i) { return __bch2_members_v2_get_mut(bch2_sb_field_get(sb, members_v2), i); } static struct bch_member members_v2_get(struct bch_sb_field_members_v2 *mi, int i) { struct bch_member ret, *p = __bch2_members_v2_get_mut(mi, i); memset(&ret, 0, sizeof(ret)); memcpy(&ret, p, min_t(size_t, le16_to_cpu(mi->member_bytes), sizeof(ret))); return ret; } static struct bch_member *members_v1_get_mut(struct bch_sb_field_members_v1 *mi, int i) { return (void *) mi->_members + (i * BCH_MEMBER_V1_BYTES); } static struct bch_member members_v1_get(struct bch_sb_field_members_v1 *mi, int i) { struct bch_member ret, *p = members_v1_get_mut(mi, i); memset(&ret, 0, sizeof(ret)); memcpy(&ret, p, min_t(size_t, BCH_MEMBER_V1_BYTES, sizeof(ret))); return ret; } struct bch_member bch2_sb_member_get(struct bch_sb *sb, int i) { struct bch_sb_field_members_v2 *mi2 = bch2_sb_field_get(sb, members_v2); if (mi2) return members_v2_get(mi2, i); struct bch_sb_field_members_v1 *mi1 = bch2_sb_field_get(sb, members_v1); return members_v1_get(mi1, i); } static int sb_members_v2_resize_entries(struct bch_fs *c) { struct bch_sb_field_members_v2 *mi = bch2_sb_field_get(c->disk_sb.sb, members_v2); if (le16_to_cpu(mi->member_bytes) < sizeof(struct bch_member)) { unsigned u64s = DIV_ROUND_UP((sizeof(*mi) + sizeof(mi->_members[0]) * c->disk_sb.sb->nr_devices), 8); mi = bch2_sb_field_resize(&c->disk_sb, members_v2, u64s); if (!mi) return -BCH_ERR_ENOSPC_sb_members_v2; for (int i = c->disk_sb.sb->nr_devices - 1; i >= 0; --i) { void *dst = (void *) mi->_members + (i * sizeof(struct bch_member)); memmove(dst, __bch2_members_v2_get_mut(mi, i), le16_to_cpu(mi->member_bytes)); memset(dst + le16_to_cpu(mi->member_bytes), 0, (sizeof(struct bch_member) - le16_to_cpu(mi->member_bytes))); } mi->member_bytes = cpu_to_le16(sizeof(struct bch_member)); } return 0; } int bch2_sb_members_v2_init(struct bch_fs *c) { struct bch_sb_field_members_v1 *mi1; struct bch_sb_field_members_v2 *mi2; if (!bch2_sb_field_get(c->disk_sb.sb, members_v2)) { mi2 = bch2_sb_field_resize(&c->disk_sb, members_v2, DIV_ROUND_UP(sizeof(*mi2) + sizeof(struct bch_member) * c->sb.nr_devices, sizeof(u64))); mi1 = bch2_sb_field_get(c->disk_sb.sb, members_v1); memcpy(&mi2->_members[0], &mi1->_members[0], BCH_MEMBER_V1_BYTES * c->sb.nr_devices); memset(&mi2->pad[0], 0, sizeof(mi2->pad)); mi2->member_bytes = cpu_to_le16(BCH_MEMBER_V1_BYTES); } return sb_members_v2_resize_entries(c); } int bch2_sb_members_cpy_v2_v1(struct bch_sb_handle *disk_sb) { struct bch_sb_field_members_v1 *mi1; struct bch_sb_field_members_v2 *mi2; mi1 = bch2_sb_field_resize(disk_sb, members_v1, DIV_ROUND_UP(sizeof(*mi1) + BCH_MEMBER_V1_BYTES * disk_sb->sb->nr_devices, sizeof(u64))); if (!mi1) return -BCH_ERR_ENOSPC_sb_members; mi2 = bch2_sb_field_get(disk_sb->sb, members_v2); for (unsigned i = 0; i < disk_sb->sb->nr_devices; i++) memcpy(members_v1_get_mut(mi1, i), __bch2_members_v2_get_mut(mi2, i), BCH_MEMBER_V1_BYTES); return 0; } static int validate_member(struct printbuf *err, struct bch_member m, struct bch_sb *sb, int i) { if (le64_to_cpu(m.nbuckets) > BCH_MEMBER_NBUCKETS_MAX) { prt_printf(err, "device %u: too many buckets (got %llu, max %u)", i, le64_to_cpu(m.nbuckets), BCH_MEMBER_NBUCKETS_MAX); return -BCH_ERR_invalid_sb_members; } if (le64_to_cpu(m.nbuckets) - le16_to_cpu(m.first_bucket) < BCH_MIN_NR_NBUCKETS) { prt_printf(err, "device %u: not enough buckets (got %llu, max %u)", i, le64_to_cpu(m.nbuckets), BCH_MIN_NR_NBUCKETS); return -BCH_ERR_invalid_sb_members; } if (le16_to_cpu(m.bucket_size) < le16_to_cpu(sb->block_size)) { prt_printf(err, "device %u: bucket size %u smaller than block size %u", i, le16_to_cpu(m.bucket_size), le16_to_cpu(sb->block_size)); return -BCH_ERR_invalid_sb_members; } if (le16_to_cpu(m.bucket_size) < BCH_SB_BTREE_NODE_SIZE(sb)) { prt_printf(err, "device %u: bucket size %u smaller than btree node size %llu", i, le16_to_cpu(m.bucket_size), BCH_SB_BTREE_NODE_SIZE(sb)); return -BCH_ERR_invalid_sb_members; } if (m.btree_bitmap_shift >= BCH_MI_BTREE_BITMAP_SHIFT_MAX) { prt_printf(err, "device %u: invalid btree_bitmap_shift %u", i, m.btree_bitmap_shift); return -BCH_ERR_invalid_sb_members; } return 0; } static void member_to_text(struct printbuf *out, struct bch_member m, struct bch_sb_field_disk_groups *gi, struct bch_sb *sb, int i) { unsigned data_have = bch2_sb_dev_has_data(sb, i); u64 bucket_size = le16_to_cpu(m.bucket_size); u64 device_size = le64_to_cpu(m.nbuckets) * bucket_size; if (!bch2_member_alive(&m)) return; prt_printf(out, "Device:\t%u\n", i); printbuf_indent_add(out, 2); prt_printf(out, "Label:\t"); if (BCH_MEMBER_GROUP(&m)) { unsigned idx = BCH_MEMBER_GROUP(&m) - 1; if (idx < disk_groups_nr(gi)) prt_printf(out, "%s (%u)", gi->entries[idx].label, idx); else prt_printf(out, "(bad disk labels section)"); } else { prt_printf(out, "(none)"); } prt_newline(out); prt_printf(out, "UUID:\t"); pr_uuid(out, m.uuid.b); prt_newline(out); prt_printf(out, "Size:\t"); prt_units_u64(out, device_size << 9); prt_newline(out); for (unsigned i = 0; i < BCH_MEMBER_ERROR_NR; i++) prt_printf(out, "%s errors:\t%llu\n", bch2_member_error_strs[i], le64_to_cpu(m.errors[i])); for (unsigned i = 0; i < BCH_IOPS_NR; i++) prt_printf(out, "%s iops:\t%u\n", bch2_iops_measurements[i], le32_to_cpu(m.iops[i])); prt_printf(out, "Bucket size:\t"); prt_units_u64(out, bucket_size << 9); prt_newline(out); prt_printf(out, "First bucket:\t%u\n", le16_to_cpu(m.first_bucket)); prt_printf(out, "Buckets:\t%llu\n", le64_to_cpu(m.nbuckets)); prt_printf(out, "Last mount:\t"); if (m.last_mount) bch2_prt_datetime(out, le64_to_cpu(m.last_mount)); else prt_printf(out, "(never)"); prt_newline(out); prt_printf(out, "Last superblock write:\t%llu\n", le64_to_cpu(m.seq)); prt_printf(out, "State:\t%s\n", BCH_MEMBER_STATE(&m) < BCH_MEMBER_STATE_NR ? bch2_member_states[BCH_MEMBER_STATE(&m)] : "unknown"); prt_printf(out, "Data allowed:\t"); if (BCH_MEMBER_DATA_ALLOWED(&m)) prt_bitflags(out, __bch2_data_types, BCH_MEMBER_DATA_ALLOWED(&m)); else prt_printf(out, "(none)"); prt_newline(out); prt_printf(out, "Has data:\t"); if (data_have) prt_bitflags(out, __bch2_data_types, data_have); else prt_printf(out, "(none)"); prt_newline(out); prt_printf(out, "Btree allocated bitmap blocksize:\t"); if (m.btree_bitmap_shift < 64) prt_units_u64(out, 1ULL << m.btree_bitmap_shift); else prt_printf(out, "(invalid shift %u)", m.btree_bitmap_shift); prt_newline(out); prt_printf(out, "Btree allocated bitmap:\t"); bch2_prt_u64_base2_nbits(out, le64_to_cpu(m.btree_allocated_bitmap), 64); prt_newline(out); prt_printf(out, "Durability:\t%llu\n", BCH_MEMBER_DURABILITY(&m) ? BCH_MEMBER_DURABILITY(&m) - 1 : 1); prt_printf(out, "Discard:\t%llu\n", BCH_MEMBER_DISCARD(&m)); prt_printf(out, "Freespace initialized:\t%llu\n", BCH_MEMBER_FREESPACE_INITIALIZED(&m)); printbuf_indent_sub(out, 2); } static int bch2_sb_members_v1_validate(struct bch_sb *sb, struct bch_sb_field *f, enum bch_validate_flags flags, struct printbuf *err) { struct bch_sb_field_members_v1 *mi = field_to_type(f, members_v1); unsigned i; if ((void *) members_v1_get_mut(mi, sb->nr_devices) > vstruct_end(&mi->field)) { prt_printf(err, "too many devices for section size"); return -BCH_ERR_invalid_sb_members; } for (i = 0; i < sb->nr_devices; i++) { struct bch_member m = members_v1_get(mi, i); int ret = validate_member(err, m, sb, i); if (ret) return ret; } return 0; } static void bch2_sb_members_v1_to_text(struct printbuf *out, struct bch_sb *sb, struct bch_sb_field *f) { struct bch_sb_field_members_v1 *mi = field_to_type(f, members_v1); struct bch_sb_field_disk_groups *gi = bch2_sb_field_get(sb, disk_groups); unsigned i; for (i = 0; i < sb->nr_devices; i++) member_to_text(out, members_v1_get(mi, i), gi, sb, i); } const struct bch_sb_field_ops bch_sb_field_ops_members_v1 = { .validate = bch2_sb_members_v1_validate, .to_text = bch2_sb_members_v1_to_text, }; static void bch2_sb_members_v2_to_text(struct printbuf *out, struct bch_sb *sb, struct bch_sb_field *f) { struct bch_sb_field_members_v2 *mi = field_to_type(f, members_v2); struct bch_sb_field_disk_groups *gi = bch2_sb_field_get(sb, disk_groups); unsigned i; for (i = 0; i < sb->nr_devices; i++) member_to_text(out, members_v2_get(mi, i), gi, sb, i); } static int bch2_sb_members_v2_validate(struct bch_sb *sb, struct bch_sb_field *f, enum bch_validate_flags flags, struct printbuf *err) { struct bch_sb_field_members_v2 *mi = field_to_type(f, members_v2); size_t mi_bytes = (void *) __bch2_members_v2_get_mut(mi, sb->nr_devices) - (void *) mi; if (mi_bytes > vstruct_bytes(&mi->field)) { prt_printf(err, "section too small (%zu > %zu)", mi_bytes, vstruct_bytes(&mi->field)); return -BCH_ERR_invalid_sb_members; } for (unsigned i = 0; i < sb->nr_devices; i++) { int ret = validate_member(err, members_v2_get(mi, i), sb, i); if (ret) return ret; } return 0; } const struct bch_sb_field_ops bch_sb_field_ops_members_v2 = { .validate = bch2_sb_members_v2_validate, .to_text = bch2_sb_members_v2_to_text, }; void bch2_sb_members_from_cpu(struct bch_fs *c) { struct bch_sb_field_members_v2 *mi = bch2_sb_field_get(c->disk_sb.sb, members_v2); rcu_read_lock(); for_each_member_device_rcu(c, ca, NULL) { struct bch_member *m = __bch2_members_v2_get_mut(mi, ca->dev_idx); for (unsigned e = 0; e < BCH_MEMBER_ERROR_NR; e++) m->errors[e] = cpu_to_le64(atomic64_read(&ca->errors[e])); } rcu_read_unlock(); } void bch2_dev_io_errors_to_text(struct printbuf *out, struct bch_dev *ca) { struct bch_fs *c = ca->fs; struct bch_member m; mutex_lock(&ca->fs->sb_lock); m = bch2_sb_member_get(c->disk_sb.sb, ca->dev_idx); mutex_unlock(&ca->fs->sb_lock); printbuf_tabstop_push(out, 12); prt_str(out, "IO errors since filesystem creation"); prt_newline(out); printbuf_indent_add(out, 2); for (unsigned i = 0; i < BCH_MEMBER_ERROR_NR; i++) prt_printf(out, "%s:\t%llu\n", bch2_member_error_strs[i], atomic64_read(&ca->errors[i])); printbuf_indent_sub(out, 2); prt_str(out, "IO errors since "); bch2_pr_time_units(out, (ktime_get_real_seconds() - le64_to_cpu(m.errors_reset_time)) * NSEC_PER_SEC); prt_str(out, " ago"); prt_newline(out); printbuf_indent_add(out, 2); for (unsigned i = 0; i < BCH_MEMBER_ERROR_NR; i++) prt_printf(out, "%s:\t%llu\n", bch2_member_error_strs[i], atomic64_read(&ca->errors[i]) - le64_to_cpu(m.errors_at_reset[i])); printbuf_indent_sub(out, 2); } void bch2_dev_errors_reset(struct bch_dev *ca) { struct bch_fs *c = ca->fs; struct bch_member *m; mutex_lock(&c->sb_lock); m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx); for (unsigned i = 0; i < ARRAY_SIZE(m->errors_at_reset); i++) m->errors_at_reset[i] = cpu_to_le64(atomic64_read(&ca->errors[i])); m->errors_reset_time = cpu_to_le64(ktime_get_real_seconds()); bch2_write_super(c); mutex_unlock(&c->sb_lock); } /* * Per member "range has btree nodes" bitmap: * * This is so that if we ever have to run the btree node scan to repair we don't * have to scan full devices: */ bool bch2_dev_btree_bitmap_marked(struct bch_fs *c, struct bkey_s_c k) { bool ret = true; rcu_read_lock(); bkey_for_each_ptr(bch2_bkey_ptrs_c(k), ptr) { struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev); if (!ca) continue; if (!bch2_dev_btree_bitmap_marked_sectors(ca, ptr->offset, btree_sectors(c))) { ret = false; break; } } rcu_read_unlock(); return ret; } static void __bch2_dev_btree_bitmap_mark(struct bch_sb_field_members_v2 *mi, unsigned dev, u64 start, unsigned sectors) { struct bch_member *m = __bch2_members_v2_get_mut(mi, dev); u64 bitmap = le64_to_cpu(m->btree_allocated_bitmap); u64 end = start + sectors; int resize = ilog2(roundup_pow_of_two(end)) - (m->btree_bitmap_shift + 6); if (resize > 0) { u64 new_bitmap = 0; for (unsigned i = 0; i < 64; i++) if (bitmap & BIT_ULL(i)) new_bitmap |= BIT_ULL(i >> resize); bitmap = new_bitmap; m->btree_bitmap_shift += resize; } BUG_ON(m->btree_bitmap_shift >= BCH_MI_BTREE_BITMAP_SHIFT_MAX); BUG_ON(end > 64ULL << m->btree_bitmap_shift); for (unsigned bit = start >> m->btree_bitmap_shift; (u64) bit << m->btree_bitmap_shift < end; bit++) bitmap |= BIT_ULL(bit); m->btree_allocated_bitmap = cpu_to_le64(bitmap); } void bch2_dev_btree_bitmap_mark(struct bch_fs *c, struct bkey_s_c k) { lockdep_assert_held(&c->sb_lock); struct bch_sb_field_members_v2 *mi = bch2_sb_field_get(c->disk_sb.sb, members_v2); bkey_for_each_ptr(bch2_bkey_ptrs_c(k), ptr) { if (!bch2_member_exists(c->disk_sb.sb, ptr->dev)) continue; __bch2_dev_btree_bitmap_mark(mi, ptr->dev, ptr->offset, btree_sectors(c)); } } unsigned bch2_sb_nr_devices(const struct bch_sb *sb) { unsigned nr = 0; for (unsigned i = 0; i < sb->nr_devices; i++) nr += bch2_member_exists((struct bch_sb *) sb, i); return nr; } int bch2_sb_member_alloc(struct bch_fs *c) { unsigned dev_idx = c->sb.nr_devices; struct bch_sb_field_members_v2 *mi; unsigned nr_devices; unsigned u64s; int best = -1; u64 best_last_mount = 0; if (dev_idx < BCH_SB_MEMBERS_MAX) goto have_slot; for (dev_idx = 0; dev_idx < BCH_SB_MEMBERS_MAX; dev_idx++) { /* eventually BCH_SB_MEMBERS_MAX will be raised */ if (dev_idx == BCH_SB_MEMBER_INVALID) continue; struct bch_member m = bch2_sb_member_get(c->disk_sb.sb, dev_idx); if (bch2_member_alive(&m)) continue; u64 last_mount = le64_to_cpu(m.last_mount); if (best < 0 || last_mount < best_last_mount) { best = dev_idx; best_last_mount = last_mount; } } if (best >= 0) { dev_idx = best; goto have_slot; } return -BCH_ERR_ENOSPC_sb_members; have_slot: nr_devices = max_t(unsigned, dev_idx + 1, c->sb.nr_devices); mi = bch2_sb_field_get(c->disk_sb.sb, members_v2); u64s = DIV_ROUND_UP(sizeof(struct bch_sb_field_members_v2) + le16_to_cpu(mi->member_bytes) * nr_devices, sizeof(u64)); mi = bch2_sb_field_resize(&c->disk_sb, members_v2, u64s); if (!mi) return -BCH_ERR_ENOSPC_sb_members; c->disk_sb.sb->nr_devices = nr_devices; return dev_idx; }
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _NFNETLINK_H #define _NFNETLINK_H #include <linux/netlink.h> #include <linux/capability.h> #include <net/netlink.h> #include <uapi/linux/netfilter/nfnetlink.h> struct nfnl_info { struct net *net; struct sock *sk; const struct nlmsghdr *nlh; const struct nfgenmsg *nfmsg; struct netlink_ext_ack *extack; }; enum nfnl_callback_type { NFNL_CB_UNSPEC = 0, NFNL_CB_MUTEX, NFNL_CB_RCU, NFNL_CB_BATCH, }; struct nfnl_callback { int (*call)(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const cda[]); const struct nla_policy *policy; enum nfnl_callback_type type; __u16 attr_count; }; enum nfnl_abort_action { NFNL_ABORT_NONE = 0, NFNL_ABORT_AUTOLOAD, NFNL_ABORT_VALIDATE, }; struct nfnetlink_subsystem { const char *name; __u8 subsys_id; /* nfnetlink subsystem ID */ __u8 cb_count; /* number of callbacks */ const struct nfnl_callback *cb; /* callback for individual types */ struct module *owner; int (*commit)(struct net *net, struct sk_buff *skb); int (*abort)(struct net *net, struct sk_buff *skb, enum nfnl_abort_action action); bool (*valid_genid)(struct net *net, u32 genid); }; int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n); int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n); int nfnetlink_has_listeners(struct net *net, unsigned int group); int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid, unsigned int group, int echo, gfp_t flags); int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error); int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid); void nfnetlink_broadcast(struct net *net, struct sk_buff *skb, __u32 portid, __u32 group, gfp_t allocation); static inline u16 nfnl_msg_type(u8 subsys, u8 msg_type) { return subsys << 8 | msg_type; } static inline void nfnl_fill_hdr(struct nlmsghdr *nlh, u8 family, u8 version, __be16 res_id) { struct nfgenmsg *nfmsg; nfmsg = nlmsg_data(nlh); nfmsg->nfgen_family = family; nfmsg->version = version; nfmsg->res_id = res_id; } static inline struct nlmsghdr *nfnl_msg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int flags, u8 family, u8 version, __be16 res_id) { struct nlmsghdr *nlh; nlh = nlmsg_put(skb, portid, seq, type, sizeof(struct nfgenmsg), flags); if (!nlh) return NULL; nfnl_fill_hdr(nlh, family, version, res_id); return nlh; } void nfnl_lock(__u8 subsys_id); void nfnl_unlock(__u8 subsys_id); #ifdef CONFIG_PROVE_LOCKING bool lockdep_nfnl_is_held(__u8 subsys_id); #else static inline bool lockdep_nfnl_is_held(__u8 subsys_id) { return true; } #endif /* CONFIG_PROVE_LOCKING */ #define MODULE_ALIAS_NFNL_SUBSYS(subsys) \ MODULE_ALIAS("nfnetlink-subsys-" __stringify(subsys)) #endif /* _NFNETLINK_H */
1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 // SPDX-License-Identifier: GPL-2.0-or-later /* * * Copyright (c) International Business Machines Corp., 2002,2008 * Author(s): Steve French (sfrench@us.ibm.com) * * Error mapping routines from Samba libsmb/errormap.c * Copyright (C) Andrew Tridgell 2001 */ #include <linux/net.h> #include <linux/string.h> #include <linux/in.h> #include <linux/ctype.h> #include <linux/fs.h> #include <asm/div64.h> #include <asm/byteorder.h> #include <linux/inet.h> #include "cifsfs.h" #include "cifspdu.h" #include "cifsglob.h" #include "cifsproto.h" #include "smberr.h" #include "cifs_debug.h" #include "nterr.h" struct smb_to_posix_error { __u16 smb_err; int posix_code; }; static const struct smb_to_posix_error mapping_table_ERRDOS[] = { {ERRbadfunc, -EINVAL}, {ERRbadfile, -ENOENT}, {ERRbadpath, -ENOTDIR}, {ERRnofids, -EMFILE}, {ERRnoaccess, -EACCES}, {ERRbadfid, -EBADF}, {ERRbadmcb, -EIO}, {ERRnomem, -EREMOTEIO}, {ERRbadmem, -EFAULT}, {ERRbadenv, -EFAULT}, {ERRbadformat, -EINVAL}, {ERRbadaccess, -EACCES}, {ERRbaddata, -EIO}, {ERRbaddrive, -ENXIO}, {ERRremcd, -EACCES}, {ERRdiffdevice, -EXDEV}, {ERRnofiles, -ENOENT}, {ERRwriteprot, -EROFS}, {ERRbadshare, -EBUSY}, {ERRlock, -EACCES}, {ERRunsup, -EINVAL}, {ERRnosuchshare, -ENXIO}, {ERRfilexists, -EEXIST}, {ERRinvparm, -EINVAL}, {ERRdiskfull, -ENOSPC}, {ERRinvname, -ENOENT}, {ERRinvlevel, -EOPNOTSUPP}, {ERRdirnotempty, -ENOTEMPTY}, {ERRnotlocked, -ENOLCK}, {ERRcancelviolation, -ENOLCK}, {ERRalreadyexists, -EEXIST}, {ERRmoredata, -EOVERFLOW}, {ERReasnotsupported, -EOPNOTSUPP}, {ErrQuota, -EDQUOT}, {ErrNotALink, -ENOLINK}, {ERRnetlogonNotStarted, -ENOPROTOOPT}, {ERRsymlink, -EOPNOTSUPP}, {ErrTooManyLinks, -EMLINK}, {0, 0} }; static const struct smb_to_posix_error mapping_table_ERRSRV[] = { {ERRerror, -EIO}, {ERRbadpw, -EACCES}, /* was EPERM */ {ERRbadtype, -EREMOTE}, {ERRaccess, -EACCES}, {ERRinvtid, -ENXIO}, {ERRinvnetname, -ENXIO}, {ERRinvdevice, -ENXIO}, {ERRqfull, -ENOSPC}, {ERRqtoobig, -ENOSPC}, {ERRqeof, -EIO}, {ERRinvpfid, -EBADF}, {ERRsmbcmd, -EBADRQC}, {ERRsrverror, -EIO}, {ERRbadBID, -EIO}, {ERRfilespecs, -EINVAL}, {ERRbadLink, -EIO}, {ERRbadpermits, -EINVAL}, {ERRbadPID, -ESRCH}, {ERRsetattrmode, -EINVAL}, {ERRpaused, -EHOSTDOWN}, {ERRmsgoff, -EHOSTDOWN}, {ERRnoroom, -ENOSPC}, {ERRrmuns, -EUSERS}, {ERRtimeout, -ETIME}, {ERRnoresource, -EREMOTEIO}, {ERRtoomanyuids, -EUSERS}, {ERRbaduid, -EACCES}, {ERRusempx, -EIO}, {ERRusestd, -EIO}, {ERR_NOTIFY_ENUM_DIR, -ENOBUFS}, {ERRnoSuchUser, -EACCES}, /* {ERRaccountexpired, -EACCES}, {ERRbadclient, -EACCES}, {ERRbadLogonTime, -EACCES}, {ERRpasswordExpired, -EACCES},*/ {ERRaccountexpired, -EKEYEXPIRED}, {ERRbadclient, -EACCES}, {ERRbadLogonTime, -EACCES}, {ERRpasswordExpired, -EKEYEXPIRED}, {ERRnosupport, -EINVAL}, {0, 0} }; /* * Convert a string containing text IPv4 or IPv6 address to binary form. * * Returns 0 on failure. */ static int cifs_inet_pton(const int address_family, const char *cp, int len, void *dst) { int ret = 0; /* calculate length by finding first slash or NULL */ if (address_family == AF_INET) ret = in4_pton(cp, len, dst, '\\', NULL); else if (address_family == AF_INET6) ret = in6_pton(cp, len, dst , '\\', NULL); cifs_dbg(NOISY, "address conversion returned %d for %*.*s\n", ret, len, len, cp); if (ret > 0) ret = 1; return ret; } /* * Try to convert a string to an IPv4 address and then attempt to convert * it to an IPv6 address if that fails. Set the family field if either * succeeds. If it's an IPv6 address and it has a '%' sign in it, try to * treat the part following it as a numeric sin6_scope_id. * * Returns 0 on failure. */ int cifs_convert_address(struct sockaddr *dst, const char *src, int len) { int rc, alen, slen; const char *pct; char scope_id[13]; struct sockaddr_in *s4 = (struct sockaddr_in *) dst; struct sockaddr_in6 *s6 = (struct sockaddr_in6 *) dst; /* IPv4 address */ if (cifs_inet_pton(AF_INET, src, len, &s4->sin_addr.s_addr)) { s4->sin_family = AF_INET; return 1; } /* attempt to exclude the scope ID from the address part */ pct = memchr(src, '%', len); alen = pct ? pct - src : len; rc = cifs_inet_pton(AF_INET6, src, alen, &s6->sin6_addr.s6_addr); if (!rc) return rc; s6->sin6_family = AF_INET6; if (pct) { /* grab the scope ID */ slen = len - (alen + 1); if (slen <= 0 || slen > 12) return 0; memcpy(scope_id, pct + 1, slen); scope_id[slen] = '\0'; rc = kstrtouint(scope_id, 0, &s6->sin6_scope_id); rc = (rc == 0) ? 1 : 0; } return rc; } void cifs_set_port(struct sockaddr *addr, const unsigned short int port) { switch (addr->sa_family) { case AF_INET: ((struct sockaddr_in *)addr)->sin_port = htons(port); break; case AF_INET6: ((struct sockaddr_in6 *)addr)->sin6_port = htons(port); break; } } /***************************************************************************** convert a NT status code to a dos class/code *****************************************************************************/ /* NT status -> dos error map */ static const struct { __u8 dos_class; __u16 dos_code; __u32 ntstatus; } ntstatus_to_dos_map[] = { { ERRDOS, ERRgeneral, NT_STATUS_UNSUCCESSFUL}, { ERRDOS, ERRbadfunc, NT_STATUS_NOT_IMPLEMENTED}, { ERRDOS, ERRinvlevel, NT_STATUS_INVALID_INFO_CLASS}, { ERRDOS, 24, NT_STATUS_INFO_LENGTH_MISMATCH}, { ERRHRD, ERRgeneral, NT_STATUS_ACCESS_VIOLATION}, { ERRHRD, ERRgeneral, NT_STATUS_IN_PAGE_ERROR}, { ERRHRD, ERRgeneral, NT_STATUS_PAGEFILE_QUOTA}, { ERRDOS, ERRbadfid, NT_STATUS_INVALID_HANDLE}, { ERRHRD, ERRgeneral, NT_STATUS_BAD_INITIAL_STACK}, { ERRDOS, 193, NT_STATUS_BAD_INITIAL_PC}, { ERRDOS, 87, NT_STATUS_INVALID_CID}, { ERRHRD, ERRgeneral, NT_STATUS_TIMER_NOT_CANCELED}, { ERRDOS, 87, NT_STATUS_INVALID_PARAMETER}, { ERRDOS, ERRbadfile, NT_STATUS_NO_SUCH_DEVICE}, { ERRDOS, ERRbadfile, NT_STATUS_NO_SUCH_FILE}, { ERRDOS, ERRbadfunc, NT_STATUS_INVALID_DEVICE_REQUEST}, { ERRDOS, 38, NT_STATUS_END_OF_FILE}, { ERRDOS, 34, NT_STATUS_WRONG_VOLUME}, { ERRDOS, 21, NT_STATUS_NO_MEDIA_IN_DEVICE}, { ERRHRD, ERRgeneral, NT_STATUS_UNRECOGNIZED_MEDIA}, { ERRDOS, 27, NT_STATUS_NONEXISTENT_SECTOR}, /* { This NT error code was 'sqashed' from NT_STATUS_MORE_PROCESSING_REQUIRED to NT_STATUS_OK during the session setup } */ { ERRDOS, ERRnomem, NT_STATUS_NO_MEMORY}, { ERRDOS, 487, NT_STATUS_CONFLICTING_ADDRESSES}, { ERRDOS, 487, NT_STATUS_NOT_MAPPED_VIEW}, { ERRDOS, 87, NT_STATUS_UNABLE_TO_FREE_VM}, { ERRDOS, 87, NT_STATUS_UNABLE_TO_DELETE_SECTION}, { ERRDOS, 2142, NT_STATUS_INVALID_SYSTEM_SERVICE}, { ERRHRD, ERRgeneral, NT_STATUS_ILLEGAL_INSTRUCTION}, { ERRDOS, ERRnoaccess, NT_STATUS_INVALID_LOCK_SEQUENCE}, { ERRDOS, ERRnoaccess, NT_STATUS_INVALID_VIEW_SIZE}, { ERRDOS, 193, NT_STATUS_INVALID_FILE_FOR_SECTION}, { ERRDOS, ERRnoaccess, NT_STATUS_ALREADY_COMMITTED}, /* { This NT error code was 'sqashed' from NT_STATUS_ACCESS_DENIED to NT_STATUS_TRUSTED_RELATIONSHIP_FAILURE during the session setup } */ { ERRDOS, ERRnoaccess, NT_STATUS_ACCESS_DENIED}, { ERRDOS, 111, NT_STATUS_BUFFER_TOO_SMALL}, { ERRDOS, ERRbadfid, NT_STATUS_OBJECT_TYPE_MISMATCH}, { ERRHRD, ERRgeneral, NT_STATUS_NONCONTINUABLE_EXCEPTION}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_DISPOSITION}, { ERRHRD, ERRgeneral, NT_STATUS_UNWIND}, { ERRHRD, ERRgeneral, NT_STATUS_BAD_STACK}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_UNWIND_TARGET}, { ERRDOS, 158, NT_STATUS_NOT_LOCKED}, { ERRHRD, ERRgeneral, NT_STATUS_PARITY_ERROR}, { ERRDOS, 487, NT_STATUS_UNABLE_TO_DECOMMIT_VM}, { ERRDOS, 487, NT_STATUS_NOT_COMMITTED}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_PORT_ATTRIBUTES}, { ERRHRD, ERRgeneral, NT_STATUS_PORT_MESSAGE_TOO_LONG}, { ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_MIX}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_QUOTA_LOWER}, { ERRHRD, ERRgeneral, NT_STATUS_DISK_CORRUPT_ERROR}, { /* mapping changed since shell does lookup on * expects FileNotFound */ ERRDOS, ERRbadfile, NT_STATUS_OBJECT_NAME_INVALID}, { ERRDOS, ERRbadfile, NT_STATUS_OBJECT_NAME_NOT_FOUND}, { ERRDOS, ERRalreadyexists, NT_STATUS_OBJECT_NAME_COLLISION}, { ERRHRD, ERRgeneral, NT_STATUS_HANDLE_NOT_WAITABLE}, { ERRDOS, ERRbadfid, NT_STATUS_PORT_DISCONNECTED}, { ERRHRD, ERRgeneral, NT_STATUS_DEVICE_ALREADY_ATTACHED}, { ERRDOS, 161, NT_STATUS_OBJECT_PATH_INVALID}, { ERRDOS, ERRbadpath, NT_STATUS_OBJECT_PATH_NOT_FOUND}, { ERRDOS, 161, NT_STATUS_OBJECT_PATH_SYNTAX_BAD}, { ERRHRD, ERRgeneral, NT_STATUS_DATA_OVERRUN}, { ERRHRD, ERRgeneral, NT_STATUS_DATA_LATE_ERROR}, { ERRDOS, 23, NT_STATUS_DATA_ERROR}, { ERRDOS, 23, NT_STATUS_CRC_ERROR}, { ERRDOS, ERRnomem, NT_STATUS_SECTION_TOO_BIG}, { ERRDOS, ERRnoaccess, NT_STATUS_PORT_CONNECTION_REFUSED}, { ERRDOS, ERRbadfid, NT_STATUS_INVALID_PORT_HANDLE}, { ERRDOS, ERRbadshare, NT_STATUS_SHARING_VIOLATION}, { ERRHRD, ERRgeneral, NT_STATUS_QUOTA_EXCEEDED}, { ERRDOS, 87, NT_STATUS_INVALID_PAGE_PROTECTION}, { ERRDOS, 288, NT_STATUS_MUTANT_NOT_OWNED}, { ERRDOS, 298, NT_STATUS_SEMAPHORE_LIMIT_EXCEEDED}, { ERRDOS, 87, NT_STATUS_PORT_ALREADY_SET}, { ERRDOS, 87, NT_STATUS_SECTION_NOT_IMAGE}, { ERRDOS, 156, NT_STATUS_SUSPEND_COUNT_EXCEEDED}, { ERRDOS, ERRnoaccess, NT_STATUS_THREAD_IS_TERMINATING}, { ERRDOS, 87, NT_STATUS_BAD_WORKING_SET_LIMIT}, { ERRDOS, 87, NT_STATUS_INCOMPATIBLE_FILE_MAP}, { ERRDOS, 87, NT_STATUS_SECTION_PROTECTION}, { ERRDOS, ERReasnotsupported, NT_STATUS_EAS_NOT_SUPPORTED}, { ERRDOS, 255, NT_STATUS_EA_TOO_LARGE}, { ERRHRD, ERRgeneral, NT_STATUS_NONEXISTENT_EA_ENTRY}, { ERRHRD, ERRgeneral, NT_STATUS_NO_EAS_ON_FILE}, { ERRHRD, ERRgeneral, NT_STATUS_EA_CORRUPT_ERROR}, { ERRDOS, ERRlock, NT_STATUS_FILE_LOCK_CONFLICT}, { ERRDOS, ERRlock, NT_STATUS_LOCK_NOT_GRANTED}, { ERRDOS, ERRbadfile, NT_STATUS_DELETE_PENDING}, { ERRDOS, ERRunsup, NT_STATUS_CTL_FILE_NOT_SUPPORTED}, { ERRHRD, ERRgeneral, NT_STATUS_UNKNOWN_REVISION}, { ERRHRD, ERRgeneral, NT_STATUS_REVISION_MISMATCH}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_OWNER}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_PRIMARY_GROUP}, { ERRHRD, ERRgeneral, NT_STATUS_NO_IMPERSONATION_TOKEN}, { ERRHRD, ERRgeneral, NT_STATUS_CANT_DISABLE_MANDATORY}, { ERRDOS, 2215, NT_STATUS_NO_LOGON_SERVERS}, { ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_LOGON_SESSION}, { ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_PRIVILEGE}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_ACCOUNT_NAME}, { ERRHRD, ERRgeneral, NT_STATUS_USER_EXISTS}, /* { This NT error code was 'sqashed' from NT_STATUS_NO_SUCH_USER to NT_STATUS_LOGON_FAILURE during the session setup } */ { ERRDOS, ERRnoaccess, NT_STATUS_NO_SUCH_USER}, { /* could map to 2238 */ ERRHRD, ERRgeneral, NT_STATUS_GROUP_EXISTS}, { ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_GROUP}, { ERRHRD, ERRgeneral, NT_STATUS_MEMBER_IN_GROUP}, { ERRHRD, ERRgeneral, NT_STATUS_MEMBER_NOT_IN_GROUP}, { ERRHRD, ERRgeneral, NT_STATUS_LAST_ADMIN}, /* { This NT error code was 'sqashed' from NT_STATUS_WRONG_PASSWORD to NT_STATUS_LOGON_FAILURE during the session setup } */ { ERRSRV, ERRbadpw, NT_STATUS_WRONG_PASSWORD}, { ERRHRD, ERRgeneral, NT_STATUS_ILL_FORMED_PASSWORD}, { ERRHRD, ERRgeneral, NT_STATUS_PASSWORD_RESTRICTION}, { ERRDOS, ERRnoaccess, NT_STATUS_LOGON_FAILURE}, { ERRHRD, ERRgeneral, NT_STATUS_ACCOUNT_RESTRICTION}, { ERRSRV, ERRbadLogonTime, NT_STATUS_INVALID_LOGON_HOURS}, { ERRSRV, ERRbadclient, NT_STATUS_INVALID_WORKSTATION}, { ERRSRV, ERRpasswordExpired, NT_STATUS_PASSWORD_EXPIRED}, { ERRSRV, ERRaccountexpired, NT_STATUS_ACCOUNT_DISABLED}, { ERRHRD, ERRgeneral, NT_STATUS_NONE_MAPPED}, { ERRHRD, ERRgeneral, NT_STATUS_TOO_MANY_LUIDS_REQUESTED}, { ERRHRD, ERRgeneral, NT_STATUS_LUIDS_EXHAUSTED}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_SUB_AUTHORITY}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_ACL}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_SID}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_SECURITY_DESCR}, { ERRDOS, 127, NT_STATUS_PROCEDURE_NOT_FOUND}, { ERRDOS, 193, NT_STATUS_INVALID_IMAGE_FORMAT}, { ERRHRD, ERRgeneral, NT_STATUS_NO_TOKEN}, { ERRHRD, ERRgeneral, NT_STATUS_BAD_INHERITANCE_ACL}, { ERRDOS, 158, NT_STATUS_RANGE_NOT_LOCKED}, { ERRDOS, 112, NT_STATUS_DISK_FULL}, { ERRHRD, ERRgeneral, NT_STATUS_SERVER_DISABLED}, { ERRHRD, ERRgeneral, NT_STATUS_SERVER_NOT_DISABLED}, { ERRDOS, 68, NT_STATUS_TOO_MANY_GUIDS_REQUESTED}, { ERRDOS, 259, NT_STATUS_GUIDS_EXHAUSTED}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_ID_AUTHORITY}, { ERRDOS, 259, NT_STATUS_AGENTS_EXHAUSTED}, { ERRDOS, 154, NT_STATUS_INVALID_VOLUME_LABEL}, { ERRDOS, 14, NT_STATUS_SECTION_NOT_EXTENDED}, { ERRDOS, 487, NT_STATUS_NOT_MAPPED_DATA}, { ERRHRD, ERRgeneral, NT_STATUS_RESOURCE_DATA_NOT_FOUND}, { ERRHRD, ERRgeneral, NT_STATUS_RESOURCE_TYPE_NOT_FOUND}, { ERRHRD, ERRgeneral, NT_STATUS_RESOURCE_NAME_NOT_FOUND}, { ERRHRD, ERRgeneral, NT_STATUS_ARRAY_BOUNDS_EXCEEDED}, { ERRHRD, ERRgeneral, NT_STATUS_FLOAT_DENORMAL_OPERAND}, { ERRHRD, ERRgeneral, NT_STATUS_FLOAT_DIVIDE_BY_ZERO}, { ERRHRD, ERRgeneral, NT_STATUS_FLOAT_INEXACT_RESULT}, { ERRHRD, ERRgeneral, NT_STATUS_FLOAT_INVALID_OPERATION}, { ERRHRD, ERRgeneral, NT_STATUS_FLOAT_OVERFLOW}, { ERRHRD, ERRgeneral, NT_STATUS_FLOAT_STACK_CHECK}, { ERRHRD, ERRgeneral, NT_STATUS_FLOAT_UNDERFLOW}, { ERRHRD, ERRgeneral, NT_STATUS_INTEGER_DIVIDE_BY_ZERO}, { ERRDOS, 534, NT_STATUS_INTEGER_OVERFLOW}, { ERRHRD, ERRgeneral, NT_STATUS_PRIVILEGED_INSTRUCTION}, { ERRDOS, ERRnomem, NT_STATUS_TOO_MANY_PAGING_FILES}, { ERRHRD, ERRgeneral, NT_STATUS_FILE_INVALID}, { ERRHRD, ERRgeneral, NT_STATUS_ALLOTTED_SPACE_EXCEEDED}, /* { This NT error code was 'sqashed' from NT_STATUS_INSUFFICIENT_RESOURCES to NT_STATUS_INSUFF_SERVER_RESOURCES during the session setup } */ { ERRDOS, ERRnoresource, NT_STATUS_INSUFFICIENT_RESOURCES}, { ERRDOS, ERRbadpath, NT_STATUS_DFS_EXIT_PATH_FOUND}, { ERRDOS, 23, NT_STATUS_DEVICE_DATA_ERROR}, { ERRHRD, ERRgeneral, NT_STATUS_DEVICE_NOT_CONNECTED}, { ERRDOS, 21, NT_STATUS_DEVICE_POWER_FAILURE}, { ERRDOS, 487, NT_STATUS_FREE_VM_NOT_AT_BASE}, { ERRDOS, 487, NT_STATUS_MEMORY_NOT_ALLOCATED}, { ERRHRD, ERRgeneral, NT_STATUS_WORKING_SET_QUOTA}, { ERRDOS, 19, NT_STATUS_MEDIA_WRITE_PROTECTED}, { ERRDOS, 21, NT_STATUS_DEVICE_NOT_READY}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_GROUP_ATTRIBUTES}, { ERRHRD, ERRgeneral, NT_STATUS_BAD_IMPERSONATION_LEVEL}, { ERRHRD, ERRgeneral, NT_STATUS_CANT_OPEN_ANONYMOUS}, { ERRHRD, ERRgeneral, NT_STATUS_BAD_VALIDATION_CLASS}, { ERRHRD, ERRgeneral, NT_STATUS_BAD_TOKEN_TYPE}, { ERRDOS, 87, NT_STATUS_BAD_MASTER_BOOT_RECORD}, { ERRHRD, ERRgeneral, NT_STATUS_INSTRUCTION_MISALIGNMENT}, { ERRDOS, ERRpipebusy, NT_STATUS_INSTANCE_NOT_AVAILABLE}, { ERRDOS, ERRpipebusy, NT_STATUS_PIPE_NOT_AVAILABLE}, { ERRDOS, ERRbadpipe, NT_STATUS_INVALID_PIPE_STATE}, { ERRDOS, ERRpipebusy, NT_STATUS_PIPE_BUSY}, { ERRDOS, ERRbadfunc, NT_STATUS_ILLEGAL_FUNCTION}, { ERRDOS, ERRnotconnected, NT_STATUS_PIPE_DISCONNECTED}, { ERRDOS, ERRpipeclosing, NT_STATUS_PIPE_CLOSING}, { ERRHRD, ERRgeneral, NT_STATUS_PIPE_CONNECTED}, { ERRHRD, ERRgeneral, NT_STATUS_PIPE_LISTENING}, { ERRDOS, ERRbadpipe, NT_STATUS_INVALID_READ_MODE}, { ERRDOS, 121, NT_STATUS_IO_TIMEOUT}, { ERRDOS, 38, NT_STATUS_FILE_FORCED_CLOSED}, { ERRHRD, ERRgeneral, NT_STATUS_PROFILING_NOT_STARTED}, { ERRHRD, ERRgeneral, NT_STATUS_PROFILING_NOT_STOPPED}, { ERRHRD, ERRgeneral, NT_STATUS_COULD_NOT_INTERPRET}, { ERRDOS, ERRnoaccess, NT_STATUS_FILE_IS_A_DIRECTORY}, { ERRDOS, ERRunsup, NT_STATUS_NOT_SUPPORTED}, { ERRDOS, 51, NT_STATUS_REMOTE_NOT_LISTENING}, { ERRDOS, 52, NT_STATUS_DUPLICATE_NAME}, { ERRDOS, 53, NT_STATUS_BAD_NETWORK_PATH}, { ERRDOS, 54, NT_STATUS_NETWORK_BUSY}, { ERRDOS, 55, NT_STATUS_DEVICE_DOES_NOT_EXIST}, { ERRDOS, 56, NT_STATUS_TOO_MANY_COMMANDS}, { ERRDOS, 57, NT_STATUS_ADAPTER_HARDWARE_ERROR}, { ERRDOS, 58, NT_STATUS_INVALID_NETWORK_RESPONSE}, { ERRDOS, 59, NT_STATUS_UNEXPECTED_NETWORK_ERROR}, { ERRDOS, 60, NT_STATUS_BAD_REMOTE_ADAPTER}, { ERRDOS, 61, NT_STATUS_PRINT_QUEUE_FULL}, { ERRDOS, 62, NT_STATUS_NO_SPOOL_SPACE}, { ERRDOS, 63, NT_STATUS_PRINT_CANCELLED}, { ERRDOS, 64, NT_STATUS_NETWORK_NAME_DELETED}, { ERRDOS, 65, NT_STATUS_NETWORK_ACCESS_DENIED}, { ERRDOS, 66, NT_STATUS_BAD_DEVICE_TYPE}, { ERRDOS, ERRnosuchshare, NT_STATUS_BAD_NETWORK_NAME}, { ERRDOS, 68, NT_STATUS_TOO_MANY_NAMES}, { ERRDOS, 69, NT_STATUS_TOO_MANY_SESSIONS}, { ERRDOS, 70, NT_STATUS_SHARING_PAUSED}, { ERRDOS, 71, NT_STATUS_REQUEST_NOT_ACCEPTED}, { ERRDOS, 72, NT_STATUS_REDIRECTOR_PAUSED}, { ERRDOS, 88, NT_STATUS_NET_WRITE_FAULT}, { ERRHRD, ERRgeneral, NT_STATUS_PROFILING_AT_LIMIT}, { ERRDOS, ERRdiffdevice, NT_STATUS_NOT_SAME_DEVICE}, { ERRDOS, ERRnoaccess, NT_STATUS_FILE_RENAMED}, { ERRDOS, 240, NT_STATUS_VIRTUAL_CIRCUIT_CLOSED}, { ERRHRD, ERRgeneral, NT_STATUS_NO_SECURITY_ON_OBJECT}, { ERRHRD, ERRgeneral, NT_STATUS_CANT_WAIT}, { ERRDOS, ERRpipeclosing, NT_STATUS_PIPE_EMPTY}, { ERRHRD, ERRgeneral, NT_STATUS_CANT_ACCESS_DOMAIN_INFO}, { ERRHRD, ERRgeneral, NT_STATUS_CANT_TERMINATE_SELF}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_SERVER_STATE}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_DOMAIN_STATE}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_DOMAIN_ROLE}, { ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_DOMAIN}, { ERRHRD, ERRgeneral, NT_STATUS_DOMAIN_EXISTS}, { ERRHRD, ERRgeneral, NT_STATUS_DOMAIN_LIMIT_EXCEEDED}, { ERRDOS, 300, NT_STATUS_OPLOCK_NOT_GRANTED}, { ERRDOS, 301, NT_STATUS_INVALID_OPLOCK_PROTOCOL}, { ERRHRD, ERRgeneral, NT_STATUS_INTERNAL_DB_CORRUPTION}, { ERRHRD, ERRgeneral, NT_STATUS_INTERNAL_ERROR}, { ERRHRD, ERRgeneral, NT_STATUS_GENERIC_NOT_MAPPED}, { ERRHRD, ERRgeneral, NT_STATUS_BAD_DESCRIPTOR_FORMAT}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_USER_BUFFER}, { ERRHRD, ERRgeneral, NT_STATUS_UNEXPECTED_IO_ERROR}, { ERRHRD, ERRgeneral, NT_STATUS_UNEXPECTED_MM_CREATE_ERR}, { ERRHRD, ERRgeneral, NT_STATUS_UNEXPECTED_MM_MAP_ERROR}, { ERRHRD, ERRgeneral, NT_STATUS_UNEXPECTED_MM_EXTEND_ERR}, { ERRHRD, ERRgeneral, NT_STATUS_NOT_LOGON_PROCESS}, { ERRHRD, ERRgeneral, NT_STATUS_LOGON_SESSION_EXISTS}, { ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_1}, { ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_2}, { ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_3}, { ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_4}, { ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_5}, { ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_6}, { ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_7}, { ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_8}, { ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_9}, { ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_10}, { ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_11}, { ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_12}, { ERRDOS, ERRbadpath, NT_STATUS_REDIRECTOR_NOT_STARTED}, { ERRHRD, ERRgeneral, NT_STATUS_REDIRECTOR_STARTED}, { ERRHRD, ERRgeneral, NT_STATUS_STACK_OVERFLOW}, { ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_PACKAGE}, { ERRHRD, ERRgeneral, NT_STATUS_BAD_FUNCTION_TABLE}, { ERRDOS, 203, 0xc0000100}, { ERRDOS, 145, NT_STATUS_DIRECTORY_NOT_EMPTY}, { ERRHRD, ERRgeneral, NT_STATUS_FILE_CORRUPT_ERROR}, { ERRDOS, 267, NT_STATUS_NOT_A_DIRECTORY}, { ERRHRD, ERRgeneral, NT_STATUS_BAD_LOGON_SESSION_STATE}, { ERRHRD, ERRgeneral, NT_STATUS_LOGON_SESSION_COLLISION}, { ERRDOS, 206, NT_STATUS_NAME_TOO_LONG}, { ERRDOS, 2401, NT_STATUS_FILES_OPEN}, { ERRDOS, 2404, NT_STATUS_CONNECTION_IN_USE}, { ERRHRD, ERRgeneral, NT_STATUS_MESSAGE_NOT_FOUND}, { ERRDOS, ERRnoaccess, NT_STATUS_PROCESS_IS_TERMINATING}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_LOGON_TYPE}, { ERRHRD, ERRgeneral, NT_STATUS_NO_GUID_TRANSLATION}, { ERRHRD, ERRgeneral, NT_STATUS_CANNOT_IMPERSONATE}, { ERRHRD, ERRgeneral, NT_STATUS_IMAGE_ALREADY_LOADED}, { ERRHRD, ERRgeneral, NT_STATUS_ABIOS_NOT_PRESENT}, { ERRHRD, ERRgeneral, NT_STATUS_ABIOS_LID_NOT_EXIST}, { ERRHRD, ERRgeneral, NT_STATUS_ABIOS_LID_ALREADY_OWNED}, { ERRHRD, ERRgeneral, NT_STATUS_ABIOS_NOT_LID_OWNER}, { ERRHRD, ERRgeneral, NT_STATUS_ABIOS_INVALID_COMMAND}, { ERRHRD, ERRgeneral, NT_STATUS_ABIOS_INVALID_LID}, { ERRHRD, ERRgeneral, NT_STATUS_ABIOS_SELECTOR_NOT_AVAILABLE}, { ERRHRD, ERRgeneral, NT_STATUS_ABIOS_INVALID_SELECTOR}, { ERRHRD, ERRgeneral, NT_STATUS_NO_LDT}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_LDT_SIZE}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_LDT_OFFSET}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_LDT_DESCRIPTOR}, { ERRDOS, 193, NT_STATUS_INVALID_IMAGE_NE_FORMAT}, { ERRHRD, ERRgeneral, NT_STATUS_RXACT_INVALID_STATE}, { ERRHRD, ERRgeneral, NT_STATUS_RXACT_COMMIT_FAILURE}, { ERRHRD, ERRgeneral, NT_STATUS_MAPPED_FILE_SIZE_ZERO}, { ERRDOS, ERRnofids, NT_STATUS_TOO_MANY_OPENED_FILES}, { ERRHRD, ERRgeneral, NT_STATUS_CANCELLED}, { ERRDOS, ERRnoaccess, NT_STATUS_CANNOT_DELETE}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_COMPUTER_NAME}, { ERRDOS, ERRnoaccess, NT_STATUS_FILE_DELETED}, { ERRHRD, ERRgeneral, NT_STATUS_SPECIAL_ACCOUNT}, { ERRHRD, ERRgeneral, NT_STATUS_SPECIAL_GROUP}, { ERRHRD, ERRgeneral, NT_STATUS_SPECIAL_USER}, { ERRHRD, ERRgeneral, NT_STATUS_MEMBERS_PRIMARY_GROUP}, { ERRDOS, ERRbadfid, NT_STATUS_FILE_CLOSED}, { ERRHRD, ERRgeneral, NT_STATUS_TOO_MANY_THREADS}, { ERRHRD, ERRgeneral, NT_STATUS_THREAD_NOT_IN_PROCESS}, { ERRHRD, ERRgeneral, NT_STATUS_TOKEN_ALREADY_IN_USE}, { ERRHRD, ERRgeneral, NT_STATUS_PAGEFILE_QUOTA_EXCEEDED}, { ERRHRD, ERRgeneral, NT_STATUS_COMMITMENT_LIMIT}, { ERRDOS, 193, NT_STATUS_INVALID_IMAGE_LE_FORMAT}, { ERRDOS, 193, NT_STATUS_INVALID_IMAGE_NOT_MZ}, { ERRDOS, 193, NT_STATUS_INVALID_IMAGE_PROTECT}, { ERRDOS, 193, NT_STATUS_INVALID_IMAGE_WIN_16}, { ERRHRD, ERRgeneral, NT_STATUS_LOGON_SERVER_CONFLICT}, { ERRHRD, ERRgeneral, NT_STATUS_TIME_DIFFERENCE_AT_DC}, { ERRHRD, ERRgeneral, NT_STATUS_SYNCHRONIZATION_REQUIRED}, { ERRDOS, 126, NT_STATUS_DLL_NOT_FOUND}, { ERRHRD, ERRgeneral, NT_STATUS_OPEN_FAILED}, { ERRHRD, ERRgeneral, NT_STATUS_IO_PRIVILEGE_FAILED}, { ERRDOS, 182, NT_STATUS_ORDINAL_NOT_FOUND}, { ERRDOS, 127, NT_STATUS_ENTRYPOINT_NOT_FOUND}, { ERRHRD, ERRgeneral, NT_STATUS_CONTROL_C_EXIT}, { ERRDOS, 64, NT_STATUS_LOCAL_DISCONNECT}, { ERRDOS, 64, NT_STATUS_REMOTE_DISCONNECT}, { ERRDOS, 51, NT_STATUS_REMOTE_RESOURCES}, { ERRDOS, 59, NT_STATUS_LINK_FAILED}, { ERRDOS, 59, NT_STATUS_LINK_TIMEOUT}, { ERRDOS, 59, NT_STATUS_INVALID_CONNECTION}, { ERRDOS, 59, NT_STATUS_INVALID_ADDRESS}, { ERRHRD, ERRgeneral, NT_STATUS_DLL_INIT_FAILED}, { ERRHRD, ERRgeneral, NT_STATUS_MISSING_SYSTEMFILE}, { ERRHRD, ERRgeneral, NT_STATUS_UNHANDLED_EXCEPTION}, { ERRHRD, ERRgeneral, NT_STATUS_APP_INIT_FAILURE}, { ERRHRD, ERRgeneral, NT_STATUS_PAGEFILE_CREATE_FAILED}, { ERRHRD, ERRgeneral, NT_STATUS_NO_PAGEFILE}, { ERRDOS, 124, NT_STATUS_INVALID_LEVEL}, { ERRDOS, 86, NT_STATUS_WRONG_PASSWORD_CORE}, { ERRHRD, ERRgeneral, NT_STATUS_ILLEGAL_FLOAT_CONTEXT}, { ERRDOS, 109, NT_STATUS_PIPE_BROKEN}, { ERRHRD, ERRgeneral, NT_STATUS_REGISTRY_CORRUPT}, { ERRHRD, ERRgeneral, NT_STATUS_REGISTRY_IO_FAILED}, { ERRHRD, ERRgeneral, NT_STATUS_NO_EVENT_PAIR}, { ERRHRD, ERRgeneral, NT_STATUS_UNRECOGNIZED_VOLUME}, { ERRHRD, ERRgeneral, NT_STATUS_SERIAL_NO_DEVICE_INITED}, { ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_ALIAS}, { ERRHRD, ERRgeneral, NT_STATUS_MEMBER_NOT_IN_ALIAS}, { ERRHRD, ERRgeneral, NT_STATUS_MEMBER_IN_ALIAS}, { ERRHRD, ERRgeneral, NT_STATUS_ALIAS_EXISTS}, { ERRHRD, ERRgeneral, NT_STATUS_LOGON_NOT_GRANTED}, { ERRHRD, ERRgeneral, NT_STATUS_TOO_MANY_SECRETS}, { ERRHRD, ERRgeneral, NT_STATUS_SECRET_TOO_LONG}, { ERRHRD, ERRgeneral, NT_STATUS_INTERNAL_DB_ERROR}, { ERRHRD, ERRgeneral, NT_STATUS_FULLSCREEN_MODE}, { ERRHRD, ERRgeneral, NT_STATUS_TOO_MANY_CONTEXT_IDS}, { ERRDOS, ERRnoaccess, NT_STATUS_LOGON_TYPE_NOT_GRANTED}, { ERRHRD, ERRgeneral, NT_STATUS_NOT_REGISTRY_FILE}, { ERRHRD, ERRgeneral, NT_STATUS_NT_CROSS_ENCRYPTION_REQUIRED}, { ERRHRD, ERRgeneral, NT_STATUS_DOMAIN_CTRLR_CONFIG_ERROR}, { ERRHRD, ERRgeneral, NT_STATUS_FT_MISSING_MEMBER}, { ERRHRD, ERRgeneral, NT_STATUS_ILL_FORMED_SERVICE_ENTRY}, { ERRHRD, ERRgeneral, NT_STATUS_ILLEGAL_CHARACTER}, { ERRHRD, ERRgeneral, NT_STATUS_UNMAPPABLE_CHARACTER}, { ERRHRD, ERRgeneral, NT_STATUS_UNDEFINED_CHARACTER}, { ERRHRD, ERRgeneral, NT_STATUS_FLOPPY_VOLUME}, { ERRHRD, ERRgeneral, NT_STATUS_FLOPPY_ID_MARK_NOT_FOUND}, { ERRHRD, ERRgeneral, NT_STATUS_FLOPPY_WRONG_CYLINDER}, { ERRHRD, ERRgeneral, NT_STATUS_FLOPPY_UNKNOWN_ERROR}, { ERRHRD, ERRgeneral, NT_STATUS_FLOPPY_BAD_REGISTERS}, { ERRHRD, ERRgeneral, NT_STATUS_DISK_RECALIBRATE_FAILED}, { ERRHRD, ERRgeneral, NT_STATUS_DISK_OPERATION_FAILED}, { ERRHRD, ERRgeneral, NT_STATUS_DISK_RESET_FAILED}, { ERRHRD, ERRgeneral, NT_STATUS_SHARED_IRQ_BUSY}, { ERRHRD, ERRgeneral, NT_STATUS_FT_ORPHANING}, { ERRHRD, ERRgeneral, 0xc000016e}, { ERRHRD, ERRgeneral, 0xc000016f}, { ERRHRD, ERRgeneral, 0xc0000170}, { ERRHRD, ERRgeneral, 0xc0000171}, { ERRHRD, ERRgeneral, NT_STATUS_PARTITION_FAILURE}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_BLOCK_LENGTH}, { ERRHRD, ERRgeneral, NT_STATUS_DEVICE_NOT_PARTITIONED}, { ERRHRD, ERRgeneral, NT_STATUS_UNABLE_TO_LOCK_MEDIA}, { ERRHRD, ERRgeneral, NT_STATUS_UNABLE_TO_UNLOAD_MEDIA}, { ERRHRD, ERRgeneral, NT_STATUS_EOM_OVERFLOW}, { ERRHRD, ERRgeneral, NT_STATUS_NO_MEDIA}, { ERRHRD, ERRgeneral, 0xc0000179}, { ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_MEMBER}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_MEMBER}, { ERRHRD, ERRgeneral, NT_STATUS_KEY_DELETED}, { ERRHRD, ERRgeneral, NT_STATUS_NO_LOG_SPACE}, { ERRHRD, ERRgeneral, NT_STATUS_TOO_MANY_SIDS}, { ERRHRD, ERRgeneral, NT_STATUS_LM_CROSS_ENCRYPTION_REQUIRED}, { ERRHRD, ERRgeneral, NT_STATUS_KEY_HAS_CHILDREN}, { ERRHRD, ERRgeneral, NT_STATUS_CHILD_MUST_BE_VOLATILE}, { ERRDOS, 87, NT_STATUS_DEVICE_CONFIGURATION_ERROR}, { ERRHRD, ERRgeneral, NT_STATUS_DRIVER_INTERNAL_ERROR}, { ERRDOS, 22, NT_STATUS_INVALID_DEVICE_STATE}, { ERRHRD, ERRgeneral, NT_STATUS_IO_DEVICE_ERROR}, { ERRHRD, ERRgeneral, NT_STATUS_DEVICE_PROTOCOL_ERROR}, { ERRHRD, ERRgeneral, NT_STATUS_BACKUP_CONTROLLER}, { ERRHRD, ERRgeneral, NT_STATUS_LOG_FILE_FULL}, { ERRDOS, 19, NT_STATUS_TOO_LATE}, { ERRDOS, ERRnoaccess, NT_STATUS_NO_TRUST_LSA_SECRET}, /* { This NT error code was 'sqashed' from NT_STATUS_NO_TRUST_SAM_ACCOUNT to NT_STATUS_TRUSTED_RELATIONSHIP_FAILURE during the session setup } */ { ERRDOS, ERRnoaccess, NT_STATUS_NO_TRUST_SAM_ACCOUNT}, { ERRDOS, ERRnoaccess, NT_STATUS_TRUSTED_DOMAIN_FAILURE}, { ERRDOS, ERRnoaccess, NT_STATUS_TRUSTED_RELATIONSHIP_FAILURE}, { ERRHRD, ERRgeneral, NT_STATUS_EVENTLOG_FILE_CORRUPT}, { ERRHRD, ERRgeneral, NT_STATUS_EVENTLOG_CANT_START}, { ERRDOS, ERRnoaccess, NT_STATUS_TRUST_FAILURE}, { ERRHRD, ERRgeneral, NT_STATUS_MUTANT_LIMIT_EXCEEDED}, { ERRDOS, ERRnetlogonNotStarted, NT_STATUS_NETLOGON_NOT_STARTED}, { ERRSRV, ERRaccountexpired, NT_STATUS_ACCOUNT_EXPIRED}, { ERRHRD, ERRgeneral, NT_STATUS_POSSIBLE_DEADLOCK}, { ERRHRD, ERRgeneral, NT_STATUS_NETWORK_CREDENTIAL_CONFLICT}, { ERRHRD, ERRgeneral, NT_STATUS_REMOTE_SESSION_LIMIT}, { ERRHRD, ERRgeneral, NT_STATUS_EVENTLOG_FILE_CHANGED}, { ERRDOS, ERRnoaccess, NT_STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT}, { ERRDOS, ERRnoaccess, NT_STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT}, { ERRDOS, ERRnoaccess, NT_STATUS_NOLOGON_SERVER_TRUST_ACCOUNT}, /* { This NT error code was 'sqashed' from NT_STATUS_DOMAIN_TRUST_INCONSISTENT to NT_STATUS_LOGON_FAILURE during the session setup } */ { ERRDOS, ERRnoaccess, NT_STATUS_DOMAIN_TRUST_INCONSISTENT}, { ERRHRD, ERRgeneral, NT_STATUS_FS_DRIVER_REQUIRED}, { ERRHRD, ERRgeneral, NT_STATUS_NO_USER_SESSION_KEY}, { ERRDOS, 59, NT_STATUS_USER_SESSION_DELETED}, { ERRHRD, ERRgeneral, NT_STATUS_RESOURCE_LANG_NOT_FOUND}, { ERRDOS, ERRnoresource, NT_STATUS_INSUFF_SERVER_RESOURCES}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_BUFFER_SIZE}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_ADDRESS_COMPONENT}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_ADDRESS_WILDCARD}, { ERRDOS, 68, NT_STATUS_TOO_MANY_ADDRESSES}, { ERRDOS, 52, NT_STATUS_ADDRESS_ALREADY_EXISTS}, { ERRDOS, 64, NT_STATUS_ADDRESS_CLOSED}, { ERRDOS, 64, NT_STATUS_CONNECTION_DISCONNECTED}, { ERRDOS, 64, NT_STATUS_CONNECTION_RESET}, { ERRDOS, 68, NT_STATUS_TOO_MANY_NODES}, { ERRDOS, 59, NT_STATUS_TRANSACTION_ABORTED}, { ERRDOS, 59, NT_STATUS_TRANSACTION_TIMED_OUT}, { ERRDOS, 59, NT_STATUS_TRANSACTION_NO_RELEASE}, { ERRDOS, 59, NT_STATUS_TRANSACTION_NO_MATCH}, { ERRDOS, 59, NT_STATUS_TRANSACTION_RESPONDED}, { ERRDOS, 59, NT_STATUS_TRANSACTION_INVALID_ID}, { ERRDOS, 59, NT_STATUS_TRANSACTION_INVALID_TYPE}, { ERRDOS, ERRunsup, NT_STATUS_NOT_SERVER_SESSION}, { ERRDOS, ERRunsup, NT_STATUS_NOT_CLIENT_SESSION}, { ERRHRD, ERRgeneral, NT_STATUS_CANNOT_LOAD_REGISTRY_FILE}, { ERRHRD, ERRgeneral, NT_STATUS_DEBUG_ATTACH_FAILED}, { ERRHRD, ERRgeneral, NT_STATUS_SYSTEM_PROCESS_TERMINATED}, { ERRHRD, ERRgeneral, NT_STATUS_DATA_NOT_ACCEPTED}, { ERRHRD, ERRgeneral, NT_STATUS_NO_BROWSER_SERVERS_FOUND}, { ERRHRD, ERRgeneral, NT_STATUS_VDM_HARD_ERROR}, { ERRHRD, ERRgeneral, NT_STATUS_DRIVER_CANCEL_TIMEOUT}, { ERRHRD, ERRgeneral, NT_STATUS_REPLY_MESSAGE_MISMATCH}, { ERRHRD, ERRgeneral, NT_STATUS_MAPPED_ALIGNMENT}, { ERRDOS, 193, NT_STATUS_IMAGE_CHECKSUM_MISMATCH}, { ERRHRD, ERRgeneral, NT_STATUS_LOST_WRITEBEHIND_DATA}, { ERRHRD, ERRgeneral, NT_STATUS_CLIENT_SERVER_PARAMETERS_INVALID}, { ERRSRV, ERRpasswordExpired, NT_STATUS_PASSWORD_MUST_CHANGE}, { ERRHRD, ERRgeneral, NT_STATUS_NOT_FOUND}, { ERRHRD, ERRgeneral, NT_STATUS_NOT_TINY_STREAM}, { ERRHRD, ERRgeneral, NT_STATUS_RECOVERY_FAILURE}, { ERRHRD, ERRgeneral, NT_STATUS_STACK_OVERFLOW_READ}, { ERRHRD, ERRgeneral, NT_STATUS_FAIL_CHECK}, { ERRHRD, ERRgeneral, NT_STATUS_DUPLICATE_OBJECTID}, { ERRHRD, ERRgeneral, NT_STATUS_OBJECTID_EXISTS}, { ERRHRD, ERRgeneral, NT_STATUS_CONVERT_TO_LARGE}, { ERRHRD, ERRgeneral, NT_STATUS_RETRY}, { ERRHRD, ERRgeneral, NT_STATUS_FOUND_OUT_OF_SCOPE}, { ERRHRD, ERRgeneral, NT_STATUS_ALLOCATE_BUCKET}, { ERRHRD, ERRgeneral, NT_STATUS_PROPSET_NOT_FOUND}, { ERRHRD, ERRgeneral, NT_STATUS_MARSHALL_OVERFLOW}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_VARIANT}, { ERRHRD, ERRgeneral, NT_STATUS_DOMAIN_CONTROLLER_NOT_FOUND}, { ERRDOS, ERRnoaccess, NT_STATUS_ACCOUNT_LOCKED_OUT}, { ERRDOS, ERRbadfid, NT_STATUS_HANDLE_NOT_CLOSABLE}, { ERRHRD, ERRgeneral, NT_STATUS_CONNECTION_REFUSED}, { ERRHRD, ERRgeneral, NT_STATUS_GRACEFUL_DISCONNECT}, { ERRHRD, ERRgeneral, NT_STATUS_ADDRESS_ALREADY_ASSOCIATED}, { ERRHRD, ERRgeneral, NT_STATUS_ADDRESS_NOT_ASSOCIATED}, { ERRHRD, ERRgeneral, NT_STATUS_CONNECTION_INVALID}, { ERRHRD, ERRgeneral, NT_STATUS_CONNECTION_ACTIVE}, { ERRHRD, ERRgeneral, NT_STATUS_NETWORK_UNREACHABLE}, { ERRHRD, ERRgeneral, NT_STATUS_HOST_UNREACHABLE}, { ERRHRD, ERRgeneral, NT_STATUS_PROTOCOL_UNREACHABLE}, { ERRHRD, ERRgeneral, NT_STATUS_PORT_UNREACHABLE}, { ERRHRD, ERRgeneral, NT_STATUS_REQUEST_ABORTED}, { ERRHRD, ERRgeneral, NT_STATUS_CONNECTION_ABORTED}, { ERRHRD, ERRgeneral, NT_STATUS_BAD_COMPRESSION_BUFFER}, { ERRHRD, ERRgeneral, NT_STATUS_USER_MAPPED_FILE}, { ERRHRD, ERRgeneral, NT_STATUS_AUDIT_FAILED}, { ERRHRD, ERRgeneral, NT_STATUS_TIMER_RESOLUTION_NOT_SET}, { ERRHRD, ERRgeneral, NT_STATUS_CONNECTION_COUNT_LIMIT}, { ERRHRD, ERRgeneral, NT_STATUS_LOGIN_TIME_RESTRICTION}, { ERRHRD, ERRgeneral, NT_STATUS_LOGIN_WKSTA_RESTRICTION}, { ERRDOS, 193, NT_STATUS_IMAGE_MP_UP_MISMATCH}, { ERRHRD, ERRgeneral, 0xc000024a}, { ERRHRD, ERRgeneral, 0xc000024b}, { ERRHRD, ERRgeneral, 0xc000024c}, { ERRHRD, ERRgeneral, 0xc000024d}, { ERRHRD, ERRgeneral, 0xc000024e}, { ERRHRD, ERRgeneral, 0xc000024f}, { ERRHRD, ERRgeneral, NT_STATUS_INSUFFICIENT_LOGON_INFO}, { ERRHRD, ERRgeneral, NT_STATUS_BAD_DLL_ENTRYPOINT}, { ERRHRD, ERRgeneral, NT_STATUS_BAD_SERVICE_ENTRYPOINT}, { ERRHRD, ERRgeneral, NT_STATUS_LPC_REPLY_LOST}, { ERRHRD, ERRgeneral, NT_STATUS_IP_ADDRESS_CONFLICT1}, { ERRHRD, ERRgeneral, NT_STATUS_IP_ADDRESS_CONFLICT2}, { ERRHRD, ERRgeneral, NT_STATUS_REGISTRY_QUOTA_LIMIT}, { ERRSRV, 3, NT_STATUS_PATH_NOT_COVERED}, { ERRHRD, ERRgeneral, NT_STATUS_NO_CALLBACK_ACTIVE}, { ERRHRD, ERRgeneral, NT_STATUS_LICENSE_QUOTA_EXCEEDED}, { ERRHRD, ERRgeneral, NT_STATUS_PWD_TOO_SHORT}, { ERRHRD, ERRgeneral, NT_STATUS_PWD_TOO_RECENT}, { ERRHRD, ERRgeneral, NT_STATUS_PWD_HISTORY_CONFLICT}, { ERRHRD, ERRgeneral, 0xc000025d}, { ERRHRD, ERRgeneral, NT_STATUS_PLUGPLAY_NO_DEVICE}, { ERRHRD, ERRgeneral, NT_STATUS_UNSUPPORTED_COMPRESSION}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_HW_PROFILE}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_PLUGPLAY_DEVICE_PATH}, { ERRDOS, 182, NT_STATUS_DRIVER_ORDINAL_NOT_FOUND}, { ERRDOS, 127, NT_STATUS_DRIVER_ENTRYPOINT_NOT_FOUND}, { ERRDOS, 288, NT_STATUS_RESOURCE_NOT_OWNED}, { ERRDOS, ErrTooManyLinks, NT_STATUS_TOO_MANY_LINKS}, { ERRHRD, ERRgeneral, NT_STATUS_QUOTA_LIST_INCONSISTENT}, { ERRHRD, ERRgeneral, NT_STATUS_FILE_IS_OFFLINE}, { ERRDOS, 21, 0xc000026e}, { ERRDOS, 161, 0xc0000281}, { ERRDOS, ERRnoaccess, 0xc000028a}, { ERRDOS, ERRnoaccess, 0xc000028b}, { ERRHRD, ERRgeneral, 0xc000028c}, { ERRDOS, ERRnoaccess, 0xc000028d}, { ERRDOS, ERRnoaccess, 0xc000028e}, { ERRDOS, ERRnoaccess, 0xc000028f}, { ERRDOS, ERRnoaccess, 0xc0000290}, { ERRDOS, ERRbadfunc, 0xc000029c}, { ERRDOS, ERRsymlink, NT_STATUS_STOPPED_ON_SYMLINK}, { ERRDOS, ERRinvlevel, 0x007c0001}, { 0, 0, 0 } }; /***************************************************************************** Print an error message from the status code *****************************************************************************/ static void cifs_print_status(__u32 status_code) { int idx = 0; while (nt_errs[idx].nt_errstr != NULL) { if (nt_errs[idx].nt_errcode == status_code) { pr_notice("Status code returned 0x%08x %s\n", status_code, nt_errs[idx].nt_errstr); return; } idx++; } return; } static void ntstatus_to_dos(__u32 ntstatus, __u8 *eclass, __u16 *ecode) { int i; if (ntstatus == 0) { *eclass = 0; *ecode = 0; return; } for (i = 0; ntstatus_to_dos_map[i].ntstatus; i++) { if (ntstatus == ntstatus_to_dos_map[i].ntstatus) { *eclass = ntstatus_to_dos_map[i].dos_class; *ecode = ntstatus_to_dos_map[i].dos_code; return; } } *eclass = ERRHRD; *ecode = ERRgeneral; } int map_smb_to_linux_error(char *buf, bool logErr) { struct smb_hdr *smb = (struct smb_hdr *)buf; unsigned int i; int rc = -EIO; /* if transport error smb error may not be set */ __u8 smberrclass; __u16 smberrcode; /* BB if NT Status codes - map NT BB */ /* old style smb error codes */ if (smb->Status.CifsError == 0) return 0; if (smb->Flags2 & SMBFLG2_ERR_STATUS) { /* translate the newer STATUS codes to old style SMB errors * and then to POSIX errors */ __u32 err = le32_to_cpu(smb->Status.CifsError); if (logErr && (err != (NT_STATUS_MORE_PROCESSING_REQUIRED))) cifs_print_status(err); else if (cifsFYI & CIFS_RC) cifs_print_status(err); ntstatus_to_dos(err, &smberrclass, &smberrcode); } else { smberrclass = smb->Status.DosError.ErrorClass; smberrcode = le16_to_cpu(smb->Status.DosError.Error); } /* old style errors */ /* DOS class smb error codes - map DOS */ if (smberrclass == ERRDOS) { /* 1 byte field no need to byte reverse */ for (i = 0; i < sizeof(mapping_table_ERRDOS) / sizeof(struct smb_to_posix_error); i++) { if (mapping_table_ERRDOS[i].smb_err == 0) break; else if (mapping_table_ERRDOS[i].smb_err == smberrcode) { rc = mapping_table_ERRDOS[i].posix_code; break; } /* else try next error mapping one to see if match */ } } else if (smberrclass == ERRSRV) { /* server class of error codes */ for (i = 0; i < sizeof(mapping_table_ERRSRV) / sizeof(struct smb_to_posix_error); i++) { if (mapping_table_ERRSRV[i].smb_err == 0) break; else if (mapping_table_ERRSRV[i].smb_err == smberrcode) { rc = mapping_table_ERRSRV[i].posix_code; break; } /* else try next error mapping to see if match */ } } /* else ERRHRD class errors or junk - return EIO */ /* special cases for NT status codes which cannot be translated to DOS codes */ if (smb->Flags2 & SMBFLG2_ERR_STATUS) { __u32 err = le32_to_cpu(smb->Status.CifsError); if (err == (NT_STATUS_NOT_A_REPARSE_POINT)) rc = -ENODATA; else if (err == (NT_STATUS_PRIVILEGE_NOT_HELD)) rc = -EPERM; } cifs_dbg(FYI, "Mapping smb error code 0x%x to POSIX err %d\n", le32_to_cpu(smb->Status.CifsError), rc); /* generic corrective action e.g. reconnect SMB session on * ERRbaduid could be added */ return rc; } int map_and_check_smb_error(struct mid_q_entry *mid, bool logErr) { int rc; struct smb_hdr *smb = (struct smb_hdr *)mid->resp_buf; rc = map_smb_to_linux_error((char *)smb, logErr); if (rc == -EACCES && !(smb->Flags2 & SMBFLG2_ERR_STATUS)) { /* possible ERRBaduid */ __u8 class = smb->Status.DosError.ErrorClass; __u16 code = le16_to_cpu(smb->Status.DosError.Error); /* switch can be used to handle different errors */ if (class == ERRSRV && code == ERRbaduid) { cifs_dbg(FYI, "Server returned 0x%x, reconnecting session...\n", code); cifs_signal_cifsd_for_reconnect(mid->server, false); } } return rc; } /* * calculate the size of the SMB message based on the fixed header * portion, the number of word parameters and the data portion of the message */ unsigned int smbCalcSize(void *buf) { struct smb_hdr *ptr = buf; return (sizeof(struct smb_hdr) + (2 * ptr->WordCount) + 2 /* size of the bcc field */ + get_bcc(ptr)); } /* The following are taken from fs/ntfs/util.c */ #define NTFS_TIME_OFFSET ((u64)(369*365 + 89) * 24 * 3600 * 10000000) /* * Convert the NT UTC (based 1601-01-01, in hundred nanosecond units) * into Unix UTC (based 1970-01-01, in seconds). */ struct timespec64 cifs_NTtimeToUnix(__le64 ntutc) { struct timespec64 ts; /* BB what about the timezone? BB */ /* Subtract the NTFS time offset, then convert to 1s intervals. */ s64 t = le64_to_cpu(ntutc) - NTFS_TIME_OFFSET; u64 abs_t; /* * Unfortunately can not use normal 64 bit division on 32 bit arch, but * the alternative, do_div, does not work with negative numbers so have * to special case them */ if (t < 0) { abs_t = -t; ts.tv_nsec = (time64_t)(do_div(abs_t, 10000000) * 100); ts.tv_nsec = -ts.tv_nsec; ts.tv_sec = -abs_t; } else { abs_t = t; ts.tv_nsec = (time64_t)do_div(abs_t, 10000000) * 100; ts.tv_sec = abs_t; } return ts; } /* Convert the Unix UTC into NT UTC. */ u64 cifs_UnixTimeToNT(struct timespec64 t) { /* Convert to 100ns intervals and then add the NTFS time offset. */ return (u64) t.tv_sec * 10000000 + t.tv_nsec/100 + NTFS_TIME_OFFSET; } static const int total_days_of_prev_months[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 }; struct timespec64 cnvrtDosUnixTm(__le16 le_date, __le16 le_time, int offset) { struct timespec64 ts; time64_t sec, days; int min, day, month, year; u16 date = le16_to_cpu(le_date); u16 time = le16_to_cpu(le_time); SMB_TIME *st = (SMB_TIME *)&time; SMB_DATE *sd = (SMB_DATE *)&date; cifs_dbg(FYI, "date %d time %d\n", date, time); sec = 2 * st->TwoSeconds; min = st->Minutes; if ((sec > 59) || (min > 59)) cifs_dbg(VFS, "Invalid time min %d sec %lld\n", min, sec); sec += (min * 60); sec += 60 * 60 * st->Hours; if (st->Hours > 24) cifs_dbg(VFS, "Invalid hours %d\n", st->Hours); day = sd->Day; month = sd->Month; if (day < 1 || day > 31 || month < 1 || month > 12) { cifs_dbg(VFS, "Invalid date, month %d day: %d\n", month, day); day = clamp(day, 1, 31); month = clamp(month, 1, 12); } month -= 1; days = day + total_days_of_prev_months[month]; days += 3652; /* account for difference in days between 1980 and 1970 */ year = sd->Year; days += year * 365; days += (year/4); /* leap year */ /* generalized leap year calculation is more complex, ie no leap year for years/100 except for years/400, but since the maximum number for DOS year is 2**7, the last year is 1980+127, which means we need only consider 2 special case years, ie the years 2000 and 2100, and only adjust for the lack of leap year for the year 2100, as 2000 was a leap year (divisible by 400) */ if (year >= 120) /* the year 2100 */ days = days - 1; /* do not count leap year for the year 2100 */ /* adjust for leap year where we are still before leap day */ if (year != 120) days -= ((year & 0x03) == 0) && (month < 2 ? 1 : 0); sec += 24 * 60 * 60 * days; ts.tv_sec = sec + offset; /* cifs_dbg(FYI, "sec after cnvrt dos to unix time %d\n",sec); */ ts.tv_nsec = 0; return ts; }
6 1 5 6 6 6 6 6 6 3 3 3 3 3 6 1 1 6 3 6 1 8 2 1 17 1 4 1 10 13 1 2 1 9 2 1 1 16 16 16 1 16 16 16 12 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 // SPDX-License-Identifier: GPL-2.0-only /****************************************************************************** * * Driver for Option High Speed Mobile Devices. * * Copyright (C) 2008 Option International * Filip Aben <f.aben@option.com> * Denis Joseph Barrow <d.barow@option.com> * Jan Dumon <j.dumon@option.com> * Copyright (C) 2007 Andrew Bird (Sphere Systems Ltd) * <ajb@spheresystems.co.uk> * Copyright (C) 2008 Greg Kroah-Hartman <gregkh@suse.de> * Copyright (C) 2008 Novell, Inc. * *****************************************************************************/ /****************************************************************************** * * Description of the device: * * Interface 0: Contains the IP network interface on the bulk end points. * The multiplexed serial ports are using the interrupt and * control endpoints. * Interrupt contains a bitmap telling which multiplexed * serialport needs servicing. * * Interface 1: Diagnostics port, uses bulk only, do not submit urbs until the * port is opened, as this have a huge impact on the network port * throughput. * * Interface 2: Standard modem interface - circuit switched interface, this * can be used to make a standard ppp connection however it * should not be used in conjunction with the IP network interface * enabled for USB performance reasons i.e. if using this set * ideally disable_net=1. * *****************************************************************************/ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/module.h> #include <linux/ethtool.h> #include <linux/usb.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/kmod.h> #include <linux/rfkill.h> #include <linux/ip.h> #include <linux/uaccess.h> #include <linux/usb/cdc.h> #include <net/arp.h> #include <asm/byteorder.h> #include <linux/serial_core.h> #include <linux/serial.h> #define MOD_AUTHOR "Option Wireless" #define MOD_DESCRIPTION "USB High Speed Option driver" #define HSO_MAX_NET_DEVICES 10 #define HSO__MAX_MTU 2048 #define DEFAULT_MTU 1500 #define DEFAULT_MRU 1500 #define CTRL_URB_RX_SIZE 1024 #define CTRL_URB_TX_SIZE 64 #define BULK_URB_RX_SIZE 4096 #define BULK_URB_TX_SIZE 8192 #define MUX_BULK_RX_BUF_SIZE HSO__MAX_MTU #define MUX_BULK_TX_BUF_SIZE HSO__MAX_MTU #define MUX_BULK_RX_BUF_COUNT 4 #define USB_TYPE_OPTION_VENDOR 0x20 /* These definitions are used with the struct hso_net flags element */ /* - use *_bit operations on it. (bit indices not values.) */ #define HSO_NET_RUNNING 0 #define HSO_NET_TX_TIMEOUT (HZ*10) #define HSO_SERIAL_MAGIC 0x48534f31 /* Number of ttys to handle */ #define HSO_SERIAL_TTY_MINORS 256 #define MAX_RX_URBS 2 /*****************************************************************************/ /* Debugging functions */ /*****************************************************************************/ #define hso_dbg(lvl, fmt, ...) \ do { \ if ((lvl) & debug) \ pr_info("[%d:%s] " fmt, \ __LINE__, __func__, ##__VA_ARGS__); \ } while (0) /*****************************************************************************/ /* Enumerators */ /*****************************************************************************/ enum pkt_parse_state { WAIT_IP, WAIT_DATA, WAIT_SYNC }; /*****************************************************************************/ /* Structs */ /*****************************************************************************/ struct hso_shared_int { struct usb_endpoint_descriptor *intr_endp; void *shared_intr_buf; struct urb *shared_intr_urb; struct usb_device *usb; int use_count; int ref_count; struct mutex shared_int_lock; }; struct hso_net { struct hso_device *parent; struct net_device *net; struct rfkill *rfkill; char name[24]; struct usb_endpoint_descriptor *in_endp; struct usb_endpoint_descriptor *out_endp; struct urb *mux_bulk_rx_urb_pool[MUX_BULK_RX_BUF_COUNT]; struct urb *mux_bulk_tx_urb; void *mux_bulk_rx_buf_pool[MUX_BULK_RX_BUF_COUNT]; void *mux_bulk_tx_buf; struct sk_buff *skb_rx_buf; struct sk_buff *skb_tx_buf; enum pkt_parse_state rx_parse_state; spinlock_t net_lock; unsigned short rx_buf_size; unsigned short rx_buf_missing; struct iphdr rx_ip_hdr; unsigned long flags; }; enum rx_ctrl_state{ RX_IDLE, RX_SENT, RX_PENDING }; #define BM_REQUEST_TYPE (0xa1) #define B_NOTIFICATION (0x20) #define W_VALUE (0x0) #define W_LENGTH (0x2) #define B_OVERRUN (0x1<<6) #define B_PARITY (0x1<<5) #define B_FRAMING (0x1<<4) #define B_RING_SIGNAL (0x1<<3) #define B_BREAK (0x1<<2) #define B_TX_CARRIER (0x1<<1) #define B_RX_CARRIER (0x1<<0) struct hso_serial_state_notification { u8 bmRequestType; u8 bNotification; u16 wValue; u16 wIndex; u16 wLength; u16 UART_state_bitmap; } __packed; struct hso_tiocmget { struct mutex mutex; wait_queue_head_t waitq; int intr_completed; struct usb_endpoint_descriptor *endp; struct urb *urb; struct hso_serial_state_notification *serial_state_notification; u16 prev_UART_state_bitmap; struct uart_icount icount; }; struct hso_serial { struct hso_device *parent; int magic; u8 minor; struct hso_shared_int *shared_int; /* rx/tx urb could be either a bulk urb or a control urb depending on which serial port it is used on. */ struct urb *rx_urb[MAX_RX_URBS]; u8 num_rx_urbs; u8 *rx_data[MAX_RX_URBS]; u16 rx_data_length; /* should contain allocated length */ struct urb *tx_urb; u8 *tx_data; u8 *tx_buffer; u16 tx_data_length; /* should contain allocated length */ u16 tx_data_count; u16 tx_buffer_count; struct usb_ctrlrequest ctrl_req_tx; struct usb_ctrlrequest ctrl_req_rx; struct usb_endpoint_descriptor *in_endp; struct usb_endpoint_descriptor *out_endp; enum rx_ctrl_state rx_state; u8 rts_state; u8 dtr_state; unsigned tx_urb_used:1; struct tty_port port; /* from usb_serial_port */ spinlock_t serial_lock; int (*write_data) (struct hso_serial *serial); struct hso_tiocmget *tiocmget; /* Hacks required to get flow control * working on the serial receive buffers * so as not to drop characters on the floor. */ int curr_rx_urb_idx; u8 rx_urb_filled[MAX_RX_URBS]; struct tasklet_struct unthrottle_tasklet; }; struct hso_device { union { struct hso_serial *dev_serial; struct hso_net *dev_net; } port_data; u32 port_spec; u8 is_active; u8 usb_gone; struct work_struct async_get_intf; struct work_struct async_put_intf; struct usb_device *usb; struct usb_interface *interface; struct device *dev; struct kref ref; struct mutex mutex; }; /* Type of interface */ #define HSO_INTF_MASK 0xFF00 #define HSO_INTF_MUX 0x0100 #define HSO_INTF_BULK 0x0200 /* Type of port */ #define HSO_PORT_MASK 0xFF #define HSO_PORT_NO_PORT 0x0 #define HSO_PORT_CONTROL 0x1 #define HSO_PORT_APP 0x2 #define HSO_PORT_GPS 0x3 #define HSO_PORT_PCSC 0x4 #define HSO_PORT_APP2 0x5 #define HSO_PORT_GPS_CONTROL 0x6 #define HSO_PORT_MSD 0x7 #define HSO_PORT_VOICE 0x8 #define HSO_PORT_DIAG2 0x9 #define HSO_PORT_DIAG 0x10 #define HSO_PORT_MODEM 0x11 #define HSO_PORT_NETWORK 0x12 /* Additional device info */ #define HSO_INFO_MASK 0xFF000000 #define HSO_INFO_CRC_BUG 0x01000000 /*****************************************************************************/ /* Prototypes */ /*****************************************************************************/ /* Serial driver functions */ static int hso_serial_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear); static void ctrl_callback(struct urb *urb); static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial); static void hso_kick_transmit(struct hso_serial *serial); /* Helper functions */ static int hso_mux_submit_intr_urb(struct hso_shared_int *mux_int, struct usb_device *usb, gfp_t gfp); static void handle_usb_error(int status, const char *function, struct hso_device *hso_dev); static struct usb_endpoint_descriptor *hso_get_ep(struct usb_interface *intf, int type, int dir); static int hso_get_mux_ports(struct usb_interface *intf, unsigned char *ports); static void hso_free_interface(struct usb_interface *intf); static int hso_start_serial_device(struct hso_device *hso_dev, gfp_t flags); static int hso_stop_serial_device(struct hso_device *hso_dev); static int hso_start_net_device(struct hso_device *hso_dev); static void hso_free_shared_int(struct hso_shared_int *shared_int); static int hso_stop_net_device(struct hso_device *hso_dev); static void hso_serial_ref_free(struct kref *ref); static void hso_std_serial_read_bulk_callback(struct urb *urb); static int hso_mux_serial_read(struct hso_serial *serial); static void async_get_intf(struct work_struct *data); static void async_put_intf(struct work_struct *data); static int hso_put_activity(struct hso_device *hso_dev); static int hso_get_activity(struct hso_device *hso_dev); static void tiocmget_intr_callback(struct urb *urb); /*****************************************************************************/ /* Helping functions */ /*****************************************************************************/ /* #define DEBUG */ static inline struct hso_net *dev2net(struct hso_device *hso_dev) { return hso_dev->port_data.dev_net; } static inline struct hso_serial *dev2ser(struct hso_device *hso_dev) { return hso_dev->port_data.dev_serial; } /* Debugging functions */ #ifdef DEBUG static void dbg_dump(int line_count, const char *func_name, unsigned char *buf, unsigned int len) { static char name[255]; sprintf(name, "hso[%d:%s]", line_count, func_name); print_hex_dump_bytes(name, DUMP_PREFIX_NONE, buf, len); } #define DUMP(buf_, len_) \ dbg_dump(__LINE__, __func__, (unsigned char *)buf_, len_) #define DUMP1(buf_, len_) \ do { \ if (0x01 & debug) \ DUMP(buf_, len_); \ } while (0) #else #define DUMP(buf_, len_) #define DUMP1(buf_, len_) #endif /* module parameters */ static int debug; static int tty_major; static int disable_net; /* driver info */ static const char driver_name[] = "hso"; static const char tty_filename[] = "ttyHS"; /* the usb driver itself (registered in hso_init) */ static struct usb_driver hso_driver; /* serial structures */ static struct tty_driver *tty_drv; static struct hso_device *serial_table[HSO_SERIAL_TTY_MINORS]; static struct hso_device *network_table[HSO_MAX_NET_DEVICES]; static DEFINE_SPINLOCK(serial_table_lock); static const s32 default_port_spec[] = { HSO_INTF_MUX | HSO_PORT_NETWORK, HSO_INTF_BULK | HSO_PORT_DIAG, HSO_INTF_BULK | HSO_PORT_MODEM, 0 }; static const s32 icon321_port_spec[] = { HSO_INTF_MUX | HSO_PORT_NETWORK, HSO_INTF_BULK | HSO_PORT_DIAG2, HSO_INTF_BULK | HSO_PORT_MODEM, HSO_INTF_BULK | HSO_PORT_DIAG, 0 }; #define default_port_device(vendor, product) \ USB_DEVICE(vendor, product), \ .driver_info = (kernel_ulong_t)default_port_spec #define icon321_port_device(vendor, product) \ USB_DEVICE(vendor, product), \ .driver_info = (kernel_ulong_t)icon321_port_spec /* list of devices we support */ static const struct usb_device_id hso_ids[] = { {default_port_device(0x0af0, 0x6711)}, {default_port_device(0x0af0, 0x6731)}, {default_port_device(0x0af0, 0x6751)}, {default_port_device(0x0af0, 0x6771)}, {default_port_device(0x0af0, 0x6791)}, {default_port_device(0x0af0, 0x6811)}, {default_port_device(0x0af0, 0x6911)}, {default_port_device(0x0af0, 0x6951)}, {default_port_device(0x0af0, 0x6971)}, {default_port_device(0x0af0, 0x7011)}, {default_port_device(0x0af0, 0x7031)}, {default_port_device(0x0af0, 0x7051)}, {default_port_device(0x0af0, 0x7071)}, {default_port_device(0x0af0, 0x7111)}, {default_port_device(0x0af0, 0x7211)}, {default_port_device(0x0af0, 0x7251)}, {default_port_device(0x0af0, 0x7271)}, {default_port_device(0x0af0, 0x7311)}, {default_port_device(0x0af0, 0xc031)}, /* Icon-Edge */ {icon321_port_device(0x0af0, 0xd013)}, /* Module HSxPA */ {icon321_port_device(0x0af0, 0xd031)}, /* Icon-321 */ {icon321_port_device(0x0af0, 0xd033)}, /* Icon-322 */ {USB_DEVICE(0x0af0, 0x7301)}, /* GE40x */ {USB_DEVICE(0x0af0, 0x7361)}, /* GE40x */ {USB_DEVICE(0x0af0, 0x7381)}, /* GE40x */ {USB_DEVICE(0x0af0, 0x7401)}, /* GI 0401 */ {USB_DEVICE(0x0af0, 0x7501)}, /* GTM 382 */ {USB_DEVICE(0x0af0, 0x7601)}, /* GE40x */ {USB_DEVICE(0x0af0, 0x7701)}, {USB_DEVICE(0x0af0, 0x7706)}, {USB_DEVICE(0x0af0, 0x7801)}, {USB_DEVICE(0x0af0, 0x7901)}, {USB_DEVICE(0x0af0, 0x7A01)}, {USB_DEVICE(0x0af0, 0x7A05)}, {USB_DEVICE(0x0af0, 0x8200)}, {USB_DEVICE(0x0af0, 0x8201)}, {USB_DEVICE(0x0af0, 0x8300)}, {USB_DEVICE(0x0af0, 0x8302)}, {USB_DEVICE(0x0af0, 0x8304)}, {USB_DEVICE(0x0af0, 0x8400)}, {USB_DEVICE(0x0af0, 0x8600)}, {USB_DEVICE(0x0af0, 0x8800)}, {USB_DEVICE(0x0af0, 0x8900)}, {USB_DEVICE(0x0af0, 0x9000)}, {USB_DEVICE(0x0af0, 0x9200)}, /* Option GTM671WFS */ {USB_DEVICE(0x0af0, 0xd035)}, {USB_DEVICE(0x0af0, 0xd055)}, {USB_DEVICE(0x0af0, 0xd155)}, {USB_DEVICE(0x0af0, 0xd255)}, {USB_DEVICE(0x0af0, 0xd057)}, {USB_DEVICE(0x0af0, 0xd157)}, {USB_DEVICE(0x0af0, 0xd257)}, {USB_DEVICE(0x0af0, 0xd357)}, {USB_DEVICE(0x0af0, 0xd058)}, {USB_DEVICE(0x0af0, 0xc100)}, {} }; MODULE_DEVICE_TABLE(usb, hso_ids); /* Sysfs attribute */ static ssize_t hsotype_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hso_device *hso_dev = dev_get_drvdata(dev); char *port_name; if (!hso_dev) return 0; switch (hso_dev->port_spec & HSO_PORT_MASK) { case HSO_PORT_CONTROL: port_name = "Control"; break; case HSO_PORT_APP: port_name = "Application"; break; case HSO_PORT_APP2: port_name = "Application2"; break; case HSO_PORT_GPS: port_name = "GPS"; break; case HSO_PORT_GPS_CONTROL: port_name = "GPS Control"; break; case HSO_PORT_PCSC: port_name = "PCSC"; break; case HSO_PORT_DIAG: port_name = "Diagnostic"; break; case HSO_PORT_DIAG2: port_name = "Diagnostic2"; break; case HSO_PORT_MODEM: port_name = "Modem"; break; case HSO_PORT_NETWORK: port_name = "Network"; break; default: port_name = "Unknown"; break; } return sprintf(buf, "%s\n", port_name); } static DEVICE_ATTR_RO(hsotype); static struct attribute *hso_serial_dev_attrs[] = { &dev_attr_hsotype.attr, NULL }; ATTRIBUTE_GROUPS(hso_serial_dev); static int hso_urb_to_index(struct hso_serial *serial, struct urb *urb) { int idx; for (idx = 0; idx < serial->num_rx_urbs; idx++) if (serial->rx_urb[idx] == urb) return idx; dev_err(serial->parent->dev, "hso_urb_to_index failed\n"); return -1; } /* converts mux value to a port spec value */ static u32 hso_mux_to_port(int mux) { u32 result; switch (mux) { case 0x1: result = HSO_PORT_CONTROL; break; case 0x2: result = HSO_PORT_APP; break; case 0x4: result = HSO_PORT_PCSC; break; case 0x8: result = HSO_PORT_GPS; break; case 0x10: result = HSO_PORT_APP2; break; default: result = HSO_PORT_NO_PORT; } return result; } /* converts port spec value to a mux value */ static u32 hso_port_to_mux(int port) { u32 result; switch (port & HSO_PORT_MASK) { case HSO_PORT_CONTROL: result = 0x0; break; case HSO_PORT_APP: result = 0x1; break; case HSO_PORT_PCSC: result = 0x2; break; case HSO_PORT_GPS: result = 0x3; break; case HSO_PORT_APP2: result = 0x4; break; default: result = 0x0; } return result; } static struct hso_serial *get_serial_by_shared_int_and_type( struct hso_shared_int *shared_int, int mux) { int i, port; port = hso_mux_to_port(mux); for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) { if (serial_table[i] && (dev2ser(serial_table[i])->shared_int == shared_int) && ((serial_table[i]->port_spec & HSO_PORT_MASK) == port)) { return dev2ser(serial_table[i]); } } return NULL; } static struct hso_serial *get_serial_by_index(unsigned index) { struct hso_serial *serial = NULL; unsigned long flags; spin_lock_irqsave(&serial_table_lock, flags); if (serial_table[index]) serial = dev2ser(serial_table[index]); spin_unlock_irqrestore(&serial_table_lock, flags); return serial; } static int obtain_minor(struct hso_serial *serial) { int index; unsigned long flags; spin_lock_irqsave(&serial_table_lock, flags); for (index = 0; index < HSO_SERIAL_TTY_MINORS; index++) { if (serial_table[index] == NULL) { serial_table[index] = serial->parent; serial->minor = index; spin_unlock_irqrestore(&serial_table_lock, flags); return 0; } } spin_unlock_irqrestore(&serial_table_lock, flags); pr_err("%s: no free serial devices in table\n", __func__); return -1; } static void release_minor(struct hso_serial *serial) { unsigned long flags; spin_lock_irqsave(&serial_table_lock, flags); serial_table[serial->minor] = NULL; spin_unlock_irqrestore(&serial_table_lock, flags); } static void handle_usb_error(int status, const char *function, struct hso_device *hso_dev) { char *explanation; switch (status) { case -ENODEV: explanation = "no device"; break; case -ENOENT: explanation = "endpoint not enabled"; break; case -EPIPE: explanation = "endpoint stalled"; break; case -ENOSPC: explanation = "not enough bandwidth"; break; case -ESHUTDOWN: explanation = "device disabled"; break; case -EHOSTUNREACH: explanation = "device suspended"; break; case -EINVAL: case -EAGAIN: case -EFBIG: case -EMSGSIZE: explanation = "internal error"; break; case -EILSEQ: case -EPROTO: case -ETIME: case -ETIMEDOUT: explanation = "protocol error"; if (hso_dev) usb_queue_reset_device(hso_dev->interface); break; default: explanation = "unknown status"; break; } /* log a meaningful explanation of an USB status */ hso_dbg(0x1, "%s: received USB status - %s (%d)\n", function, explanation, status); } /* Network interface functions */ /* called when net interface is brought up by ifconfig */ static int hso_net_open(struct net_device *net) { struct hso_net *odev = netdev_priv(net); unsigned long flags = 0; if (!odev) { dev_err(&net->dev, "No net device !\n"); return -ENODEV; } odev->skb_tx_buf = NULL; /* setup environment */ spin_lock_irqsave(&odev->net_lock, flags); odev->rx_parse_state = WAIT_IP; odev->rx_buf_size = 0; odev->rx_buf_missing = sizeof(struct iphdr); spin_unlock_irqrestore(&odev->net_lock, flags); /* We are up and running. */ set_bit(HSO_NET_RUNNING, &odev->flags); hso_start_net_device(odev->parent); /* Tell the kernel we are ready to start receiving from it */ netif_start_queue(net); return 0; } /* called when interface is brought down by ifconfig */ static int hso_net_close(struct net_device *net) { struct hso_net *odev = netdev_priv(net); /* we don't need the queue anymore */ netif_stop_queue(net); /* no longer running */ clear_bit(HSO_NET_RUNNING, &odev->flags); hso_stop_net_device(odev->parent); /* done */ return 0; } /* USB tells is xmit done, we should start the netqueue again */ static void write_bulk_callback(struct urb *urb) { struct hso_net *odev = urb->context; int status = urb->status; /* Sanity check */ if (!odev || !test_bit(HSO_NET_RUNNING, &odev->flags)) { dev_err(&urb->dev->dev, "%s: device not running\n", __func__); return; } /* Do we still have a valid kernel network device? */ if (!netif_device_present(odev->net)) { dev_err(&urb->dev->dev, "%s: net device not present\n", __func__); return; } /* log status, but don't act on it, we don't need to resubmit anything * anyhow */ if (status) handle_usb_error(status, __func__, odev->parent); hso_put_activity(odev->parent); /* Tell the network interface we are ready for another frame */ netif_wake_queue(odev->net); } /* called by kernel when we need to transmit a packet */ static netdev_tx_t hso_net_start_xmit(struct sk_buff *skb, struct net_device *net) { struct hso_net *odev = netdev_priv(net); int result; /* Tell the kernel, "No more frames 'til we are done with this one." */ netif_stop_queue(net); if (hso_get_activity(odev->parent) == -EAGAIN) { odev->skb_tx_buf = skb; return NETDEV_TX_OK; } /* log if asked */ DUMP1(skb->data, skb->len); /* Copy it from kernel memory to OUR memory */ memcpy(odev->mux_bulk_tx_buf, skb->data, skb->len); hso_dbg(0x1, "len: %d/%d\n", skb->len, MUX_BULK_TX_BUF_SIZE); /* Fill in the URB for shipping it out. */ usb_fill_bulk_urb(odev->mux_bulk_tx_urb, odev->parent->usb, usb_sndbulkpipe(odev->parent->usb, odev->out_endp-> bEndpointAddress & 0x7F), odev->mux_bulk_tx_buf, skb->len, write_bulk_callback, odev); /* Deal with the Zero Length packet problem, I hope */ odev->mux_bulk_tx_urb->transfer_flags |= URB_ZERO_PACKET; /* Send the URB on its merry way. */ result = usb_submit_urb(odev->mux_bulk_tx_urb, GFP_ATOMIC); if (result) { dev_warn(&odev->parent->interface->dev, "failed mux_bulk_tx_urb %d\n", result); net->stats.tx_errors++; netif_start_queue(net); } else { net->stats.tx_packets++; net->stats.tx_bytes += skb->len; } dev_kfree_skb(skb); /* we're done */ return NETDEV_TX_OK; } static const struct ethtool_ops ops = { .get_link = ethtool_op_get_link }; /* called when a packet did not ack after watchdogtimeout */ static void hso_net_tx_timeout(struct net_device *net, unsigned int txqueue) { struct hso_net *odev = netdev_priv(net); if (!odev) return; /* Tell syslog we are hosed. */ dev_warn(&net->dev, "Tx timed out.\n"); /* Tear the waiting frame off the list */ if (odev->mux_bulk_tx_urb) usb_unlink_urb(odev->mux_bulk_tx_urb); /* Update statistics */ net->stats.tx_errors++; } /* make a real packet from the received USB buffer */ static void packetizeRx(struct hso_net *odev, unsigned char *ip_pkt, unsigned int count, unsigned char is_eop) { unsigned short temp_bytes; unsigned short buffer_offset = 0; unsigned short frame_len; /* log if needed */ hso_dbg(0x1, "Rx %d bytes\n", count); DUMP(ip_pkt, min(128, (int)count)); while (count) { switch (odev->rx_parse_state) { case WAIT_IP: /* waiting for IP header. */ /* wanted bytes - size of ip header */ temp_bytes = (count < odev->rx_buf_missing) ? count : odev-> rx_buf_missing; memcpy(((unsigned char *)(&odev->rx_ip_hdr)) + odev->rx_buf_size, ip_pkt + buffer_offset, temp_bytes); odev->rx_buf_size += temp_bytes; buffer_offset += temp_bytes; odev->rx_buf_missing -= temp_bytes; count -= temp_bytes; if (!odev->rx_buf_missing) { /* header is complete allocate an sk_buffer and * continue to WAIT_DATA */ frame_len = ntohs(odev->rx_ip_hdr.tot_len); if ((frame_len > DEFAULT_MRU) || (frame_len < sizeof(struct iphdr))) { dev_err(&odev->net->dev, "Invalid frame (%d) length\n", frame_len); odev->rx_parse_state = WAIT_SYNC; continue; } /* Allocate an sk_buff */ odev->skb_rx_buf = netdev_alloc_skb(odev->net, frame_len); if (!odev->skb_rx_buf) { /* We got no receive buffer. */ hso_dbg(0x1, "could not allocate memory\n"); odev->rx_parse_state = WAIT_SYNC; continue; } /* Copy what we got so far. make room for iphdr * after tail. */ skb_put_data(odev->skb_rx_buf, (char *)&(odev->rx_ip_hdr), sizeof(struct iphdr)); /* ETH_HLEN */ odev->rx_buf_size = sizeof(struct iphdr); /* Filip actually use .tot_len */ odev->rx_buf_missing = frame_len - sizeof(struct iphdr); odev->rx_parse_state = WAIT_DATA; } break; case WAIT_DATA: temp_bytes = (count < odev->rx_buf_missing) ? count : odev->rx_buf_missing; /* Copy the rest of the bytes that are left in the * buffer into the waiting sk_buf. */ /* Make room for temp_bytes after tail. */ skb_put_data(odev->skb_rx_buf, ip_pkt + buffer_offset, temp_bytes); odev->rx_buf_missing -= temp_bytes; count -= temp_bytes; buffer_offset += temp_bytes; odev->rx_buf_size += temp_bytes; if (!odev->rx_buf_missing) { /* Packet is complete. Inject into stack. */ /* We have IP packet here */ odev->skb_rx_buf->protocol = cpu_to_be16(ETH_P_IP); skb_reset_mac_header(odev->skb_rx_buf); /* Ship it off to the kernel */ netif_rx(odev->skb_rx_buf); /* No longer our buffer. */ odev->skb_rx_buf = NULL; /* update out statistics */ odev->net->stats.rx_packets++; odev->net->stats.rx_bytes += odev->rx_buf_size; odev->rx_buf_size = 0; odev->rx_buf_missing = sizeof(struct iphdr); odev->rx_parse_state = WAIT_IP; } break; case WAIT_SYNC: hso_dbg(0x1, " W_S\n"); count = 0; break; default: hso_dbg(0x1, "\n"); count--; break; } } /* Recovery mechanism for WAIT_SYNC state. */ if (is_eop) { if (odev->rx_parse_state == WAIT_SYNC) { odev->rx_parse_state = WAIT_IP; odev->rx_buf_size = 0; odev->rx_buf_missing = sizeof(struct iphdr); } } } static void fix_crc_bug(struct urb *urb, __le16 max_packet_size) { static const u8 crc_check[4] = { 0xDE, 0xAD, 0xBE, 0xEF }; u32 rest = urb->actual_length % le16_to_cpu(max_packet_size); if (((rest == 5) || (rest == 6)) && !memcmp(((u8 *)urb->transfer_buffer) + urb->actual_length - 4, crc_check, 4)) { urb->actual_length -= 4; } } /* Moving data from usb to kernel (in interrupt state) */ static void read_bulk_callback(struct urb *urb) { struct hso_net *odev = urb->context; struct net_device *net; int result; unsigned long flags; int status = urb->status; /* is al ok? (Filip: Who's Al ?) */ if (status) { handle_usb_error(status, __func__, odev->parent); return; } /* Sanity check */ if (!odev || !test_bit(HSO_NET_RUNNING, &odev->flags)) { hso_dbg(0x1, "BULK IN callback but driver is not active!\n"); return; } usb_mark_last_busy(urb->dev); net = odev->net; if (!netif_device_present(net)) { /* Somebody killed our network interface... */ return; } if (odev->parent->port_spec & HSO_INFO_CRC_BUG) fix_crc_bug(urb, odev->in_endp->wMaxPacketSize); /* do we even have a packet? */ if (urb->actual_length) { /* Handle the IP stream, add header and push it onto network * stack if the packet is complete. */ spin_lock_irqsave(&odev->net_lock, flags); packetizeRx(odev, urb->transfer_buffer, urb->actual_length, (urb->transfer_buffer_length > urb->actual_length) ? 1 : 0); spin_unlock_irqrestore(&odev->net_lock, flags); } /* We are done with this URB, resubmit it. Prep the USB to wait for * another frame. Reuse same as received. */ usb_fill_bulk_urb(urb, odev->parent->usb, usb_rcvbulkpipe(odev->parent->usb, odev->in_endp-> bEndpointAddress & 0x7F), urb->transfer_buffer, MUX_BULK_RX_BUF_SIZE, read_bulk_callback, odev); /* Give this to the USB subsystem so it can tell us when more data * arrives. */ result = usb_submit_urb(urb, GFP_ATOMIC); if (result) dev_warn(&odev->parent->interface->dev, "%s failed submit mux_bulk_rx_urb %d\n", __func__, result); } /* Serial driver functions */ static void hso_init_termios(struct ktermios *termios) { /* * The default requirements for this device are: */ termios->c_iflag &= ~(IGNBRK /* disable ignore break */ | BRKINT /* disable break causes interrupt */ | PARMRK /* disable mark parity errors */ | ISTRIP /* disable clear high bit of input characters */ | INLCR /* disable translate NL to CR */ | IGNCR /* disable ignore CR */ | ICRNL /* disable translate CR to NL */ | IXON); /* disable enable XON/XOFF flow control */ /* disable postprocess output characters */ termios->c_oflag &= ~OPOST; termios->c_lflag &= ~(ECHO /* disable echo input characters */ | ECHONL /* disable echo new line */ | ICANON /* disable erase, kill, werase, and rprnt special characters */ | ISIG /* disable interrupt, quit, and suspend special characters */ | IEXTEN); /* disable non-POSIX special characters */ termios->c_cflag &= ~(CSIZE /* no size */ | PARENB /* disable parity bit */ | CBAUD /* clear current baud rate */ | CBAUDEX); /* clear current buad rate */ termios->c_cflag |= CS8; /* character size 8 bits */ /* baud rate 115200 */ tty_termios_encode_baud_rate(termios, 115200, 115200); } static void _hso_serial_set_termios(struct tty_struct *tty) { struct hso_serial *serial = tty->driver_data; if (!serial) { pr_err("%s: no tty structures", __func__); return; } hso_dbg(0x8, "port %d\n", serial->minor); /* * Fix up unsupported bits */ tty->termios.c_iflag &= ~IXON; /* disable enable XON/XOFF flow control */ tty->termios.c_cflag &= ~(CSIZE /* no size */ | PARENB /* disable parity bit */ | CBAUD /* clear current baud rate */ | CBAUDEX); /* clear current buad rate */ tty->termios.c_cflag |= CS8; /* character size 8 bits */ /* baud rate 115200 */ tty_encode_baud_rate(tty, 115200, 115200); } static void hso_resubmit_rx_bulk_urb(struct hso_serial *serial, struct urb *urb) { int result; /* We are done with this URB, resubmit it. Prep the USB to wait for * another frame */ usb_fill_bulk_urb(urb, serial->parent->usb, usb_rcvbulkpipe(serial->parent->usb, serial->in_endp-> bEndpointAddress & 0x7F), urb->transfer_buffer, serial->rx_data_length, hso_std_serial_read_bulk_callback, serial); /* Give this to the USB subsystem so it can tell us when more data * arrives. */ result = usb_submit_urb(urb, GFP_ATOMIC); if (result) { dev_err(&urb->dev->dev, "%s failed submit serial rx_urb %d\n", __func__, result); } } static void put_rxbuf_data_and_resubmit_bulk_urb(struct hso_serial *serial) { int count; struct urb *curr_urb; while (serial->rx_urb_filled[serial->curr_rx_urb_idx]) { curr_urb = serial->rx_urb[serial->curr_rx_urb_idx]; count = put_rxbuf_data(curr_urb, serial); if (count == -1) return; if (count == 0) { serial->curr_rx_urb_idx++; if (serial->curr_rx_urb_idx >= serial->num_rx_urbs) serial->curr_rx_urb_idx = 0; hso_resubmit_rx_bulk_urb(serial, curr_urb); } } } static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial) { int count = 0; struct urb *urb; urb = serial->rx_urb[0]; if (serial->port.count > 0) { count = put_rxbuf_data(urb, serial); if (count == -1) return; } /* Re issue a read as long as we receive data. */ if (count == 0 && ((urb->actual_length != 0) || (serial->rx_state == RX_PENDING))) { serial->rx_state = RX_SENT; hso_mux_serial_read(serial); } else serial->rx_state = RX_IDLE; } /* read callback for Diag and CS port */ static void hso_std_serial_read_bulk_callback(struct urb *urb) { struct hso_serial *serial = urb->context; int status = urb->status; unsigned long flags; hso_dbg(0x8, "--- Got serial_read_bulk callback %02x ---\n", status); /* sanity check */ if (!serial) { hso_dbg(0x1, "serial == NULL\n"); return; } if (status) { handle_usb_error(status, __func__, serial->parent); return; } hso_dbg(0x1, "Actual length = %d\n", urb->actual_length); DUMP1(urb->transfer_buffer, urb->actual_length); /* Anyone listening? */ if (serial->port.count == 0) return; if (serial->parent->port_spec & HSO_INFO_CRC_BUG) fix_crc_bug(urb, serial->in_endp->wMaxPacketSize); /* Valid data, handle RX data */ spin_lock_irqsave(&serial->serial_lock, flags); serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 1; put_rxbuf_data_and_resubmit_bulk_urb(serial); spin_unlock_irqrestore(&serial->serial_lock, flags); } /* * This needs to be a tasklet otherwise we will * end up recursively calling this function. */ static void hso_unthrottle_tasklet(struct tasklet_struct *t) { struct hso_serial *serial = from_tasklet(serial, t, unthrottle_tasklet); unsigned long flags; spin_lock_irqsave(&serial->serial_lock, flags); if ((serial->parent->port_spec & HSO_INTF_MUX)) put_rxbuf_data_and_resubmit_ctrl_urb(serial); else put_rxbuf_data_and_resubmit_bulk_urb(serial); spin_unlock_irqrestore(&serial->serial_lock, flags); } static void hso_unthrottle(struct tty_struct *tty) { struct hso_serial *serial = tty->driver_data; tasklet_hi_schedule(&serial->unthrottle_tasklet); } /* open the requested serial port */ static int hso_serial_open(struct tty_struct *tty, struct file *filp) { struct hso_serial *serial = get_serial_by_index(tty->index); int result; /* sanity check */ if (serial == NULL || serial->magic != HSO_SERIAL_MAGIC) { WARN_ON(1); tty->driver_data = NULL; hso_dbg(0x1, "Failed to open port\n"); return -ENODEV; } mutex_lock(&serial->parent->mutex); result = usb_autopm_get_interface(serial->parent->interface); if (result < 0) goto err_out; hso_dbg(0x1, "Opening %d\n", serial->minor); /* setup */ tty->driver_data = serial; tty_port_tty_set(&serial->port, tty); /* check for port already opened, if not set the termios */ serial->port.count++; if (serial->port.count == 1) { serial->rx_state = RX_IDLE; /* Force default termio settings */ _hso_serial_set_termios(tty); tasklet_setup(&serial->unthrottle_tasklet, hso_unthrottle_tasklet); result = hso_start_serial_device(serial->parent, GFP_KERNEL); if (result) { hso_stop_serial_device(serial->parent); serial->port.count--; } else { kref_get(&serial->parent->ref); } } else { hso_dbg(0x1, "Port was already open\n"); } usb_autopm_put_interface(serial->parent->interface); /* done */ if (result) hso_serial_tiocmset(tty, TIOCM_RTS | TIOCM_DTR, 0); err_out: mutex_unlock(&serial->parent->mutex); return result; } /* close the requested serial port */ static void hso_serial_close(struct tty_struct *tty, struct file *filp) { struct hso_serial *serial = tty->driver_data; u8 usb_gone; hso_dbg(0x1, "Closing serial port\n"); /* Open failed, no close cleanup required */ if (serial == NULL) return; mutex_lock(&serial->parent->mutex); usb_gone = serial->parent->usb_gone; if (!usb_gone) usb_autopm_get_interface(serial->parent->interface); /* reset the rts and dtr */ /* do the actual close */ serial->port.count--; if (serial->port.count <= 0) { serial->port.count = 0; tty_port_tty_set(&serial->port, NULL); if (!usb_gone) hso_stop_serial_device(serial->parent); tasklet_kill(&serial->unthrottle_tasklet); } if (!usb_gone) usb_autopm_put_interface(serial->parent->interface); mutex_unlock(&serial->parent->mutex); } /* close the requested serial port */ static ssize_t hso_serial_write(struct tty_struct *tty, const u8 *buf, size_t count) { struct hso_serial *serial = tty->driver_data; unsigned long flags; /* sanity check */ if (serial == NULL) { pr_err("%s: serial is NULL\n", __func__); return -ENODEV; } spin_lock_irqsave(&serial->serial_lock, flags); count = min_t(size_t, serial->tx_data_length - serial->tx_buffer_count, count); memcpy(serial->tx_buffer + serial->tx_buffer_count, buf, count); serial->tx_buffer_count += count; spin_unlock_irqrestore(&serial->serial_lock, flags); hso_kick_transmit(serial); /* done */ return count; } /* how much room is there for writing */ static unsigned int hso_serial_write_room(struct tty_struct *tty) { struct hso_serial *serial = tty->driver_data; unsigned int room; unsigned long flags; spin_lock_irqsave(&serial->serial_lock, flags); room = serial->tx_data_length - serial->tx_buffer_count; spin_unlock_irqrestore(&serial->serial_lock, flags); /* return free room */ return room; } static void hso_serial_cleanup(struct tty_struct *tty) { struct hso_serial *serial = tty->driver_data; if (!serial) return; kref_put(&serial->parent->ref, hso_serial_ref_free); } /* setup the term */ static void hso_serial_set_termios(struct tty_struct *tty, const struct ktermios *old) { struct hso_serial *serial = tty->driver_data; unsigned long flags; if (old) hso_dbg(0x16, "Termios called with: cflags new[%u] - old[%u]\n", (unsigned int)tty->termios.c_cflag, (unsigned int)old->c_cflag); /* the actual setup */ spin_lock_irqsave(&serial->serial_lock, flags); if (serial->port.count) _hso_serial_set_termios(tty); else tty->termios = *old; spin_unlock_irqrestore(&serial->serial_lock, flags); /* done */ } /* how many characters in the buffer */ static unsigned int hso_serial_chars_in_buffer(struct tty_struct *tty) { struct hso_serial *serial = tty->driver_data; unsigned long flags; unsigned int chars; /* sanity check */ if (serial == NULL) return 0; spin_lock_irqsave(&serial->serial_lock, flags); chars = serial->tx_buffer_count; spin_unlock_irqrestore(&serial->serial_lock, flags); return chars; } static int tiocmget_submit_urb(struct hso_serial *serial, struct hso_tiocmget *tiocmget, struct usb_device *usb) { int result; if (serial->parent->usb_gone) return -ENODEV; usb_fill_int_urb(tiocmget->urb, usb, usb_rcvintpipe(usb, tiocmget->endp-> bEndpointAddress & 0x7F), tiocmget->serial_state_notification, sizeof(struct hso_serial_state_notification), tiocmget_intr_callback, serial, tiocmget->endp->bInterval); result = usb_submit_urb(tiocmget->urb, GFP_ATOMIC); if (result) { dev_warn(&usb->dev, "%s usb_submit_urb failed %d\n", __func__, result); } return result; } static void tiocmget_intr_callback(struct urb *urb) { struct hso_serial *serial = urb->context; struct hso_tiocmget *tiocmget; int status = urb->status; u16 UART_state_bitmap, prev_UART_state_bitmap; struct uart_icount *icount; struct hso_serial_state_notification *serial_state_notification; struct usb_device *usb; struct usb_interface *interface; int if_num; /* Sanity checks */ if (!serial) return; if (status) { handle_usb_error(status, __func__, serial->parent); return; } /* tiocmget is only supported on HSO_PORT_MODEM */ tiocmget = serial->tiocmget; if (!tiocmget) return; BUG_ON((serial->parent->port_spec & HSO_PORT_MASK) != HSO_PORT_MODEM); usb = serial->parent->usb; interface = serial->parent->interface; if_num = interface->cur_altsetting->desc.bInterfaceNumber; /* wIndex should be the USB interface number of the port to which the * notification applies, which should always be the Modem port. */ serial_state_notification = tiocmget->serial_state_notification; if (serial_state_notification->bmRequestType != BM_REQUEST_TYPE || serial_state_notification->bNotification != B_NOTIFICATION || le16_to_cpu(serial_state_notification->wValue) != W_VALUE || le16_to_cpu(serial_state_notification->wIndex) != if_num || le16_to_cpu(serial_state_notification->wLength) != W_LENGTH) { dev_warn(&usb->dev, "hso received invalid serial state notification\n"); DUMP(serial_state_notification, sizeof(struct hso_serial_state_notification)); } else { unsigned long flags; UART_state_bitmap = le16_to_cpu(serial_state_notification-> UART_state_bitmap); prev_UART_state_bitmap = tiocmget->prev_UART_state_bitmap; icount = &tiocmget->icount; spin_lock_irqsave(&serial->serial_lock, flags); if ((UART_state_bitmap & B_OVERRUN) != (prev_UART_state_bitmap & B_OVERRUN)) icount->parity++; if ((UART_state_bitmap & B_PARITY) != (prev_UART_state_bitmap & B_PARITY)) icount->parity++; if ((UART_state_bitmap & B_FRAMING) != (prev_UART_state_bitmap & B_FRAMING)) icount->frame++; if ((UART_state_bitmap & B_RING_SIGNAL) && !(prev_UART_state_bitmap & B_RING_SIGNAL)) icount->rng++; if ((UART_state_bitmap & B_BREAK) != (prev_UART_state_bitmap & B_BREAK)) icount->brk++; if ((UART_state_bitmap & B_TX_CARRIER) != (prev_UART_state_bitmap & B_TX_CARRIER)) icount->dsr++; if ((UART_state_bitmap & B_RX_CARRIER) != (prev_UART_state_bitmap & B_RX_CARRIER)) icount->dcd++; tiocmget->prev_UART_state_bitmap = UART_state_bitmap; spin_unlock_irqrestore(&serial->serial_lock, flags); tiocmget->intr_completed = 1; wake_up_interruptible(&tiocmget->waitq); } memset(serial_state_notification, 0, sizeof(struct hso_serial_state_notification)); tiocmget_submit_urb(serial, tiocmget, serial->parent->usb); } /* * next few functions largely stolen from drivers/serial/serial_core.c */ /* Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change * - mask passed in arg for lines of interest * (use |'ed TIOCM_RNG/DSR/CD/CTS for masking) * Caller should use TIOCGICOUNT to see which one it was */ static int hso_wait_modem_status(struct hso_serial *serial, unsigned long arg) { DECLARE_WAITQUEUE(wait, current); struct uart_icount cprev, cnow; struct hso_tiocmget *tiocmget; int ret; tiocmget = serial->tiocmget; if (!tiocmget) return -ENOENT; /* * note the counters on entry */ spin_lock_irq(&serial->serial_lock); memcpy(&cprev, &tiocmget->icount, sizeof(struct uart_icount)); spin_unlock_irq(&serial->serial_lock); add_wait_queue(&tiocmget->waitq, &wait); for (;;) { spin_lock_irq(&serial->serial_lock); memcpy(&cnow, &tiocmget->icount, sizeof(struct uart_icount)); spin_unlock_irq(&serial->serial_lock); set_current_state(TASK_INTERRUPTIBLE); if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) || ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) || ((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd))) { ret = 0; break; } schedule(); /* see if a signal did it */ if (signal_pending(current)) { ret = -ERESTARTSYS; break; } cprev = cnow; } __set_current_state(TASK_RUNNING); remove_wait_queue(&tiocmget->waitq, &wait); return ret; } /* * Get counter of input serial line interrupts (DCD,RI,DSR,CTS) * Return: write counters to the user passed counter struct * NB: both 1->0 and 0->1 transitions are counted except for * RI where only 0->1 is counted. */ static int hso_get_count(struct tty_struct *tty, struct serial_icounter_struct *icount) { struct uart_icount cnow; struct hso_serial *serial = tty->driver_data; struct hso_tiocmget *tiocmget = serial->tiocmget; memset(icount, 0, sizeof(struct serial_icounter_struct)); if (!tiocmget) return -ENOENT; spin_lock_irq(&serial->serial_lock); memcpy(&cnow, &tiocmget->icount, sizeof(struct uart_icount)); spin_unlock_irq(&serial->serial_lock); icount->cts = cnow.cts; icount->dsr = cnow.dsr; icount->rng = cnow.rng; icount->dcd = cnow.dcd; icount->rx = cnow.rx; icount->tx = cnow.tx; icount->frame = cnow.frame; icount->overrun = cnow.overrun; icount->parity = cnow.parity; icount->brk = cnow.brk; icount->buf_overrun = cnow.buf_overrun; return 0; } static int hso_serial_tiocmget(struct tty_struct *tty) { int retval; struct hso_serial *serial = tty->driver_data; struct hso_tiocmget *tiocmget; u16 UART_state_bitmap; /* sanity check */ if (!serial) { hso_dbg(0x1, "no tty structures\n"); return -EINVAL; } spin_lock_irq(&serial->serial_lock); retval = ((serial->rts_state) ? TIOCM_RTS : 0) | ((serial->dtr_state) ? TIOCM_DTR : 0); tiocmget = serial->tiocmget; if (tiocmget) { UART_state_bitmap = le16_to_cpu( tiocmget->prev_UART_state_bitmap); if (UART_state_bitmap & B_RING_SIGNAL) retval |= TIOCM_RNG; if (UART_state_bitmap & B_RX_CARRIER) retval |= TIOCM_CD; if (UART_state_bitmap & B_TX_CARRIER) retval |= TIOCM_DSR; } spin_unlock_irq(&serial->serial_lock); return retval; } static int hso_serial_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { int val = 0; unsigned long flags; int if_num; struct hso_serial *serial = tty->driver_data; struct usb_interface *interface; /* sanity check */ if (!serial) { hso_dbg(0x1, "no tty structures\n"); return -EINVAL; } if ((serial->parent->port_spec & HSO_PORT_MASK) != HSO_PORT_MODEM) return -EINVAL; interface = serial->parent->interface; if_num = interface->cur_altsetting->desc.bInterfaceNumber; spin_lock_irqsave(&serial->serial_lock, flags); if (set & TIOCM_RTS) serial->rts_state = 1; if (set & TIOCM_DTR) serial->dtr_state = 1; if (clear & TIOCM_RTS) serial->rts_state = 0; if (clear & TIOCM_DTR) serial->dtr_state = 0; if (serial->dtr_state) val |= 0x01; if (serial->rts_state) val |= 0x02; spin_unlock_irqrestore(&serial->serial_lock, flags); return usb_control_msg(serial->parent->usb, usb_sndctrlpipe(serial->parent->usb, 0), 0x22, 0x21, val, if_num, NULL, 0, USB_CTRL_SET_TIMEOUT); } static int hso_serial_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct hso_serial *serial = tty->driver_data; int ret = 0; hso_dbg(0x8, "IOCTL cmd: %d, arg: %ld\n", cmd, arg); if (!serial) return -ENODEV; switch (cmd) { case TIOCMIWAIT: ret = hso_wait_modem_status(serial, arg); break; default: ret = -ENOIOCTLCMD; break; } return ret; } /* starts a transmit */ static void hso_kick_transmit(struct hso_serial *serial) { unsigned long flags; int res; spin_lock_irqsave(&serial->serial_lock, flags); if (!serial->tx_buffer_count) goto out; if (serial->tx_urb_used) goto out; /* Wakeup USB interface if necessary */ if (hso_get_activity(serial->parent) == -EAGAIN) goto out; /* Switch pointers around to avoid memcpy */ swap(serial->tx_buffer, serial->tx_data); serial->tx_data_count = serial->tx_buffer_count; serial->tx_buffer_count = 0; /* If serial->tx_data is set, it means we switched buffers */ if (serial->tx_data && serial->write_data) { res = serial->write_data(serial); if (res >= 0) serial->tx_urb_used = 1; } out: spin_unlock_irqrestore(&serial->serial_lock, flags); } /* make a request (for reading and writing data to muxed serial port) */ static int mux_device_request(struct hso_serial *serial, u8 type, u16 port, struct urb *ctrl_urb, struct usb_ctrlrequest *ctrl_req, u8 *ctrl_urb_data, u32 size) { int result; int pipe; /* Sanity check */ if (!serial || !ctrl_urb || !ctrl_req) { pr_err("%s: Wrong arguments\n", __func__); return -EINVAL; } /* initialize */ ctrl_req->wValue = 0; ctrl_req->wIndex = cpu_to_le16(hso_port_to_mux(port)); ctrl_req->wLength = cpu_to_le16(size); if (type == USB_CDC_GET_ENCAPSULATED_RESPONSE) { /* Reading command */ ctrl_req->bRequestType = USB_DIR_IN | USB_TYPE_OPTION_VENDOR | USB_RECIP_INTERFACE; ctrl_req->bRequest = USB_CDC_GET_ENCAPSULATED_RESPONSE; pipe = usb_rcvctrlpipe(serial->parent->usb, 0); } else { /* Writing command */ ctrl_req->bRequestType = USB_DIR_OUT | USB_TYPE_OPTION_VENDOR | USB_RECIP_INTERFACE; ctrl_req->bRequest = USB_CDC_SEND_ENCAPSULATED_COMMAND; pipe = usb_sndctrlpipe(serial->parent->usb, 0); } /* syslog */ hso_dbg(0x2, "%s command (%02x) len: %d, port: %d\n", type == USB_CDC_GET_ENCAPSULATED_RESPONSE ? "Read" : "Write", ctrl_req->bRequestType, ctrl_req->wLength, port); /* Load ctrl urb */ ctrl_urb->transfer_flags = 0; usb_fill_control_urb(ctrl_urb, serial->parent->usb, pipe, (u8 *) ctrl_req, ctrl_urb_data, size, ctrl_callback, serial); /* Send it on merry way */ result = usb_submit_urb(ctrl_urb, GFP_ATOMIC); if (result) { dev_err(&ctrl_urb->dev->dev, "%s failed submit ctrl_urb %d type %d\n", __func__, result, type); return result; } /* done */ return size; } /* called by intr_callback when read occurs */ static int hso_mux_serial_read(struct hso_serial *serial) { if (!serial) return -EINVAL; /* clean data */ memset(serial->rx_data[0], 0, CTRL_URB_RX_SIZE); /* make the request */ if (serial->num_rx_urbs != 1) { dev_err(&serial->parent->interface->dev, "ERROR: mux'd reads with multiple buffers " "not possible\n"); return 0; } return mux_device_request(serial, USB_CDC_GET_ENCAPSULATED_RESPONSE, serial->parent->port_spec & HSO_PORT_MASK, serial->rx_urb[0], &serial->ctrl_req_rx, serial->rx_data[0], serial->rx_data_length); } /* used for muxed serial port callback (muxed serial read) */ static void intr_callback(struct urb *urb) { struct hso_shared_int *shared_int = urb->context; struct hso_serial *serial; unsigned char *port_req; int status = urb->status; unsigned long flags; int i; usb_mark_last_busy(urb->dev); /* sanity check */ if (!shared_int) return; /* status check */ if (status) { handle_usb_error(status, __func__, NULL); return; } hso_dbg(0x8, "--- Got intr callback 0x%02X ---\n", status); /* what request? */ port_req = urb->transfer_buffer; hso_dbg(0x8, "port_req = 0x%.2X\n", *port_req); /* loop over all muxed ports to find the one sending this */ for (i = 0; i < 8; i++) { /* max 8 channels on MUX */ if (*port_req & (1 << i)) { serial = get_serial_by_shared_int_and_type(shared_int, (1 << i)); if (serial != NULL) { hso_dbg(0x1, "Pending read interrupt on port %d\n", i); spin_lock_irqsave(&serial->serial_lock, flags); if (serial->rx_state == RX_IDLE && serial->port.count > 0) { /* Setup and send a ctrl req read on * port i */ if (!serial->rx_urb_filled[0]) { serial->rx_state = RX_SENT; hso_mux_serial_read(serial); } else serial->rx_state = RX_PENDING; } else { hso_dbg(0x1, "Already a read pending on port %d or port not open\n", i); } spin_unlock_irqrestore(&serial->serial_lock, flags); } } } /* Resubmit interrupt urb */ hso_mux_submit_intr_urb(shared_int, urb->dev, GFP_ATOMIC); } /* called for writing to muxed serial port */ static int hso_mux_serial_write_data(struct hso_serial *serial) { if (NULL == serial) return -EINVAL; return mux_device_request(serial, USB_CDC_SEND_ENCAPSULATED_COMMAND, serial->parent->port_spec & HSO_PORT_MASK, serial->tx_urb, &serial->ctrl_req_tx, serial->tx_data, serial->tx_data_count); } /* write callback for Diag and CS port */ static void hso_std_serial_write_bulk_callback(struct urb *urb) { struct hso_serial *serial = urb->context; int status = urb->status; unsigned long flags; /* sanity check */ if (!serial) { hso_dbg(0x1, "serial == NULL\n"); return; } spin_lock_irqsave(&serial->serial_lock, flags); serial->tx_urb_used = 0; spin_unlock_irqrestore(&serial->serial_lock, flags); if (status) { handle_usb_error(status, __func__, serial->parent); return; } hso_put_activity(serial->parent); tty_port_tty_wakeup(&serial->port); hso_kick_transmit(serial); hso_dbg(0x1, "\n"); } /* called for writing diag or CS serial port */ static int hso_std_serial_write_data(struct hso_serial *serial) { int count = serial->tx_data_count; int result; usb_fill_bulk_urb(serial->tx_urb, serial->parent->usb, usb_sndbulkpipe(serial->parent->usb, serial->out_endp-> bEndpointAddress & 0x7F), serial->tx_data, serial->tx_data_count, hso_std_serial_write_bulk_callback, serial); result = usb_submit_urb(serial->tx_urb, GFP_ATOMIC); if (result) { dev_warn(&serial->parent->usb->dev, "Failed to submit urb - res %d\n", result); return result; } return count; } /* callback after read or write on muxed serial port */ static void ctrl_callback(struct urb *urb) { struct hso_serial *serial = urb->context; struct usb_ctrlrequest *req; int status = urb->status; unsigned long flags; /* sanity check */ if (!serial) return; spin_lock_irqsave(&serial->serial_lock, flags); serial->tx_urb_used = 0; spin_unlock_irqrestore(&serial->serial_lock, flags); if (status) { handle_usb_error(status, __func__, serial->parent); return; } /* what request? */ req = (struct usb_ctrlrequest *)(urb->setup_packet); hso_dbg(0x8, "--- Got muxed ctrl callback 0x%02X ---\n", status); hso_dbg(0x8, "Actual length of urb = %d\n", urb->actual_length); DUMP1(urb->transfer_buffer, urb->actual_length); if (req->bRequestType == (USB_DIR_IN | USB_TYPE_OPTION_VENDOR | USB_RECIP_INTERFACE)) { /* response to a read command */ serial->rx_urb_filled[0] = 1; spin_lock_irqsave(&serial->serial_lock, flags); put_rxbuf_data_and_resubmit_ctrl_urb(serial); spin_unlock_irqrestore(&serial->serial_lock, flags); } else { hso_put_activity(serial->parent); tty_port_tty_wakeup(&serial->port); /* response to a write command */ hso_kick_transmit(serial); } } /* handle RX data for serial port */ static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial) { struct tty_struct *tty; int count; /* Sanity check */ if (urb == NULL || serial == NULL) { hso_dbg(0x1, "serial = NULL\n"); return -2; } tty = tty_port_tty_get(&serial->port); if (tty && tty_throttled(tty)) { tty_kref_put(tty); return -1; } /* Push data to tty */ hso_dbg(0x1, "data to push to tty\n"); count = tty_buffer_request_room(&serial->port, urb->actual_length); if (count >= urb->actual_length) { tty_insert_flip_string(&serial->port, urb->transfer_buffer, urb->actual_length); tty_flip_buffer_push(&serial->port); } else { dev_warn(&serial->parent->usb->dev, "dropping data, %d bytes lost\n", urb->actual_length); } tty_kref_put(tty); serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 0; return 0; } /* Base driver functions */ static void hso_log_port(struct hso_device *hso_dev) { char *port_type; char port_dev[20]; switch (hso_dev->port_spec & HSO_PORT_MASK) { case HSO_PORT_CONTROL: port_type = "Control"; break; case HSO_PORT_APP: port_type = "Application"; break; case HSO_PORT_GPS: port_type = "GPS"; break; case HSO_PORT_GPS_CONTROL: port_type = "GPS control"; break; case HSO_PORT_APP2: port_type = "Application2"; break; case HSO_PORT_PCSC: port_type = "PCSC"; break; case HSO_PORT_DIAG: port_type = "Diagnostic"; break; case HSO_PORT_DIAG2: port_type = "Diagnostic2"; break; case HSO_PORT_MODEM: port_type = "Modem"; break; case HSO_PORT_NETWORK: port_type = "Network"; break; default: port_type = "Unknown"; break; } if ((hso_dev->port_spec & HSO_PORT_MASK) == HSO_PORT_NETWORK) { sprintf(port_dev, "%s", dev2net(hso_dev)->net->name); } else sprintf(port_dev, "/dev/%s%d", tty_filename, dev2ser(hso_dev)->minor); dev_dbg(&hso_dev->interface->dev, "HSO: Found %s port %s\n", port_type, port_dev); } static int hso_start_net_device(struct hso_device *hso_dev) { int i, result = 0; struct hso_net *hso_net = dev2net(hso_dev); if (!hso_net) return -ENODEV; /* send URBs for all read buffers */ for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) { /* Prep a receive URB */ usb_fill_bulk_urb(hso_net->mux_bulk_rx_urb_pool[i], hso_dev->usb, usb_rcvbulkpipe(hso_dev->usb, hso_net->in_endp-> bEndpointAddress & 0x7F), hso_net->mux_bulk_rx_buf_pool[i], MUX_BULK_RX_BUF_SIZE, read_bulk_callback, hso_net); /* Put it out there so the device can send us stuff */ result = usb_submit_urb(hso_net->mux_bulk_rx_urb_pool[i], GFP_NOIO); if (result) dev_warn(&hso_dev->usb->dev, "%s failed mux_bulk_rx_urb[%d] %d\n", __func__, i, result); } return result; } static int hso_stop_net_device(struct hso_device *hso_dev) { int i; struct hso_net *hso_net = dev2net(hso_dev); if (!hso_net) return -ENODEV; for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) { if (hso_net->mux_bulk_rx_urb_pool[i]) usb_kill_urb(hso_net->mux_bulk_rx_urb_pool[i]); } if (hso_net->mux_bulk_tx_urb) usb_kill_urb(hso_net->mux_bulk_tx_urb); return 0; } static int hso_start_serial_device(struct hso_device *hso_dev, gfp_t flags) { int i, result = 0; struct hso_serial *serial = dev2ser(hso_dev); if (!serial) return -ENODEV; /* If it is not the MUX port fill in and submit a bulk urb (already * allocated in hso_serial_start) */ if (!(serial->parent->port_spec & HSO_INTF_MUX)) { for (i = 0; i < serial->num_rx_urbs; i++) { usb_fill_bulk_urb(serial->rx_urb[i], serial->parent->usb, usb_rcvbulkpipe(serial->parent->usb, serial->in_endp-> bEndpointAddress & 0x7F), serial->rx_data[i], serial->rx_data_length, hso_std_serial_read_bulk_callback, serial); result = usb_submit_urb(serial->rx_urb[i], flags); if (result) { dev_warn(&serial->parent->usb->dev, "Failed to submit urb - res %d\n", result); break; } } } else { mutex_lock(&serial->shared_int->shared_int_lock); if (!serial->shared_int->use_count) { result = hso_mux_submit_intr_urb(serial->shared_int, hso_dev->usb, flags); } serial->shared_int->use_count++; mutex_unlock(&serial->shared_int->shared_int_lock); } if (serial->tiocmget) tiocmget_submit_urb(serial, serial->tiocmget, serial->parent->usb); return result; } static int hso_stop_serial_device(struct hso_device *hso_dev) { int i; struct hso_serial *serial = dev2ser(hso_dev); struct hso_tiocmget *tiocmget; if (!serial) return -ENODEV; for (i = 0; i < serial->num_rx_urbs; i++) { if (serial->rx_urb[i]) { usb_kill_urb(serial->rx_urb[i]); serial->rx_urb_filled[i] = 0; } } serial->curr_rx_urb_idx = 0; if (serial->tx_urb) usb_kill_urb(serial->tx_urb); if (serial->shared_int) { mutex_lock(&serial->shared_int->shared_int_lock); if (serial->shared_int->use_count && (--serial->shared_int->use_count == 0)) { struct urb *urb; urb = serial->shared_int->shared_intr_urb; if (urb) usb_kill_urb(urb); } mutex_unlock(&serial->shared_int->shared_int_lock); } tiocmget = serial->tiocmget; if (tiocmget) { wake_up_interruptible(&tiocmget->waitq); usb_kill_urb(tiocmget->urb); } return 0; } static void hso_serial_tty_unregister(struct hso_serial *serial) { tty_unregister_device(tty_drv, serial->minor); release_minor(serial); } static void hso_serial_common_free(struct hso_serial *serial) { int i; for (i = 0; i < serial->num_rx_urbs; i++) { /* unlink and free RX URB */ usb_free_urb(serial->rx_urb[i]); /* free the RX buffer */ kfree(serial->rx_data[i]); } /* unlink and free TX URB */ usb_free_urb(serial->tx_urb); kfree(serial->tx_buffer); kfree(serial->tx_data); tty_port_destroy(&serial->port); } static int hso_serial_common_create(struct hso_serial *serial, int num_urbs, int rx_size, int tx_size) { int i; tty_port_init(&serial->port); if (obtain_minor(serial)) goto exit2; /* register our minor number */ serial->parent->dev = tty_port_register_device_attr(&serial->port, tty_drv, serial->minor, &serial->parent->interface->dev, serial->parent, hso_serial_dev_groups); if (IS_ERR(serial->parent->dev)) { release_minor(serial); goto exit2; } serial->magic = HSO_SERIAL_MAGIC; spin_lock_init(&serial->serial_lock); serial->num_rx_urbs = num_urbs; /* RX, allocate urb and initialize */ /* prepare our RX buffer */ serial->rx_data_length = rx_size; for (i = 0; i < serial->num_rx_urbs; i++) { serial->rx_urb[i] = usb_alloc_urb(0, GFP_KERNEL); if (!serial->rx_urb[i]) goto exit; serial->rx_urb[i]->transfer_buffer = NULL; serial->rx_urb[i]->transfer_buffer_length = 0; serial->rx_data[i] = kzalloc(serial->rx_data_length, GFP_KERNEL); if (!serial->rx_data[i]) goto exit; } /* TX, allocate urb and initialize */ serial->tx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!serial->tx_urb) goto exit; serial->tx_urb->transfer_buffer = NULL; serial->tx_urb->transfer_buffer_length = 0; /* prepare our TX buffer */ serial->tx_data_count = 0; serial->tx_buffer_count = 0; serial->tx_data_length = tx_size; serial->tx_data = kzalloc(serial->tx_data_length, GFP_KERNEL); if (!serial->tx_data) goto exit; serial->tx_buffer = kzalloc(serial->tx_data_length, GFP_KERNEL); if (!serial->tx_buffer) goto exit; return 0; exit: hso_serial_tty_unregister(serial); exit2: hso_serial_common_free(serial); return -1; } /* Creates a general hso device */ static struct hso_device *hso_create_device(struct usb_interface *intf, int port_spec) { struct hso_device *hso_dev; hso_dev = kzalloc(sizeof(*hso_dev), GFP_KERNEL); if (!hso_dev) return NULL; hso_dev->port_spec = port_spec; hso_dev->usb = interface_to_usbdev(intf); hso_dev->interface = intf; kref_init(&hso_dev->ref); mutex_init(&hso_dev->mutex); INIT_WORK(&hso_dev->async_get_intf, async_get_intf); INIT_WORK(&hso_dev->async_put_intf, async_put_intf); return hso_dev; } /* Removes a network device in the network device table */ static int remove_net_device(struct hso_device *hso_dev) { int i; for (i = 0; i < HSO_MAX_NET_DEVICES; i++) { if (network_table[i] == hso_dev) { network_table[i] = NULL; break; } } if (i == HSO_MAX_NET_DEVICES) return -1; return 0; } /* Frees our network device */ static void hso_free_net_device(struct hso_device *hso_dev) { int i; struct hso_net *hso_net = dev2net(hso_dev); if (!hso_net) return; remove_net_device(hso_net->parent); if (hso_net->net) unregister_netdev(hso_net->net); /* start freeing */ for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) { usb_free_urb(hso_net->mux_bulk_rx_urb_pool[i]); kfree(hso_net->mux_bulk_rx_buf_pool[i]); hso_net->mux_bulk_rx_buf_pool[i] = NULL; } usb_free_urb(hso_net->mux_bulk_tx_urb); kfree(hso_net->mux_bulk_tx_buf); hso_net->mux_bulk_tx_buf = NULL; if (hso_net->net) free_netdev(hso_net->net); kfree(hso_dev); } static const struct net_device_ops hso_netdev_ops = { .ndo_open = hso_net_open, .ndo_stop = hso_net_close, .ndo_start_xmit = hso_net_start_xmit, .ndo_tx_timeout = hso_net_tx_timeout, }; /* initialize the network interface */ static void hso_net_init(struct net_device *net) { struct hso_net *hso_net = netdev_priv(net); hso_dbg(0x1, "sizeof hso_net is %zu\n", sizeof(*hso_net)); /* fill in the other fields */ net->netdev_ops = &hso_netdev_ops; net->watchdog_timeo = HSO_NET_TX_TIMEOUT; net->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; net->type = ARPHRD_NONE; net->mtu = DEFAULT_MTU - 14; net->tx_queue_len = 10; net->ethtool_ops = &ops; /* and initialize the semaphore */ spin_lock_init(&hso_net->net_lock); } /* Adds a network device in the network device table */ static int add_net_device(struct hso_device *hso_dev) { int i; for (i = 0; i < HSO_MAX_NET_DEVICES; i++) { if (network_table[i] == NULL) { network_table[i] = hso_dev; break; } } if (i == HSO_MAX_NET_DEVICES) return -1; return 0; } static int hso_rfkill_set_block(void *data, bool blocked) { struct hso_device *hso_dev = data; int enabled = !blocked; int rv; mutex_lock(&hso_dev->mutex); if (hso_dev->usb_gone) rv = 0; else rv = usb_control_msg(hso_dev->usb, usb_sndctrlpipe(hso_dev->usb, 0), enabled ? 0x82 : 0x81, 0x40, 0, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); mutex_unlock(&hso_dev->mutex); return rv; } static const struct rfkill_ops hso_rfkill_ops = { .set_block = hso_rfkill_set_block, }; /* Creates and sets up everything for rfkill */ static void hso_create_rfkill(struct hso_device *hso_dev, struct usb_interface *interface) { struct hso_net *hso_net = dev2net(hso_dev); struct device *dev = &hso_net->net->dev; static u32 rfkill_counter; snprintf(hso_net->name, sizeof(hso_net->name), "hso-%d", rfkill_counter++); hso_net->rfkill = rfkill_alloc(hso_net->name, &interface_to_usbdev(interface)->dev, RFKILL_TYPE_WWAN, &hso_rfkill_ops, hso_dev); if (!hso_net->rfkill) return; if (rfkill_register(hso_net->rfkill) < 0) { rfkill_destroy(hso_net->rfkill); hso_net->rfkill = NULL; dev_err(dev, "%s - Failed to register rfkill\n", __func__); return; } } static const struct device_type hso_type = { .name = "wwan", }; /* Creates our network device */ static struct hso_device *hso_create_net_device(struct usb_interface *interface, int port_spec) { int result, i; struct net_device *net; struct hso_net *hso_net; struct hso_device *hso_dev; hso_dev = hso_create_device(interface, port_spec); if (!hso_dev) return NULL; /* allocate our network device, then we can put in our private data */ /* call hso_net_init to do the basic initialization */ net = alloc_netdev(sizeof(struct hso_net), "hso%d", NET_NAME_UNKNOWN, hso_net_init); if (!net) { dev_err(&interface->dev, "Unable to create ethernet device\n"); goto err_hso_dev; } hso_net = netdev_priv(net); hso_dev->port_data.dev_net = hso_net; hso_net->net = net; hso_net->parent = hso_dev; hso_net->in_endp = hso_get_ep(interface, USB_ENDPOINT_XFER_BULK, USB_DIR_IN); if (!hso_net->in_endp) { dev_err(&interface->dev, "Can't find BULK IN endpoint\n"); goto err_net; } hso_net->out_endp = hso_get_ep(interface, USB_ENDPOINT_XFER_BULK, USB_DIR_OUT); if (!hso_net->out_endp) { dev_err(&interface->dev, "Can't find BULK OUT endpoint\n"); goto err_net; } SET_NETDEV_DEV(net, &interface->dev); SET_NETDEV_DEVTYPE(net, &hso_type); /* start allocating */ for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) { hso_net->mux_bulk_rx_urb_pool[i] = usb_alloc_urb(0, GFP_KERNEL); if (!hso_net->mux_bulk_rx_urb_pool[i]) goto err_mux_bulk_rx; hso_net->mux_bulk_rx_buf_pool[i] = kzalloc(MUX_BULK_RX_BUF_SIZE, GFP_KERNEL); if (!hso_net->mux_bulk_rx_buf_pool[i]) goto err_mux_bulk_rx; } hso_net->mux_bulk_tx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!hso_net->mux_bulk_tx_urb) goto err_mux_bulk_rx; hso_net->mux_bulk_tx_buf = kzalloc(MUX_BULK_TX_BUF_SIZE, GFP_KERNEL); if (!hso_net->mux_bulk_tx_buf) goto err_free_tx_urb; result = add_net_device(hso_dev); if (result) { dev_err(&interface->dev, "Failed to add net device\n"); goto err_free_tx_buf; } /* registering our net device */ result = register_netdev(net); if (result) { dev_err(&interface->dev, "Failed to register device\n"); goto err_rmv_ndev; } hso_log_port(hso_dev); hso_create_rfkill(hso_dev, interface); return hso_dev; err_rmv_ndev: remove_net_device(hso_dev); err_free_tx_buf: kfree(hso_net->mux_bulk_tx_buf); err_free_tx_urb: usb_free_urb(hso_net->mux_bulk_tx_urb); err_mux_bulk_rx: for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) { usb_free_urb(hso_net->mux_bulk_rx_urb_pool[i]); kfree(hso_net->mux_bulk_rx_buf_pool[i]); } err_net: free_netdev(net); err_hso_dev: kfree(hso_dev); return NULL; } static void hso_free_tiomget(struct hso_serial *serial) { struct hso_tiocmget *tiocmget; if (!serial) return; tiocmget = serial->tiocmget; if (tiocmget) { usb_free_urb(tiocmget->urb); tiocmget->urb = NULL; serial->tiocmget = NULL; kfree(tiocmget->serial_state_notification); tiocmget->serial_state_notification = NULL; kfree(tiocmget); } } /* Frees an AT channel ( goes for both mux and non-mux ) */ static void hso_free_serial_device(struct hso_device *hso_dev) { struct hso_serial *serial = dev2ser(hso_dev); if (!serial) return; hso_serial_common_free(serial); if (serial->shared_int) { mutex_lock(&serial->shared_int->shared_int_lock); if (--serial->shared_int->ref_count == 0) hso_free_shared_int(serial->shared_int); else mutex_unlock(&serial->shared_int->shared_int_lock); } hso_free_tiomget(serial); kfree(serial); kfree(hso_dev); } /* Creates a bulk AT channel */ static struct hso_device *hso_create_bulk_serial_device( struct usb_interface *interface, int port) { struct hso_device *hso_dev; struct hso_serial *serial; int num_urbs; struct hso_tiocmget *tiocmget; hso_dev = hso_create_device(interface, port); if (!hso_dev) return NULL; serial = kzalloc(sizeof(*serial), GFP_KERNEL); if (!serial) goto exit; serial->parent = hso_dev; hso_dev->port_data.dev_serial = serial; if ((port & HSO_PORT_MASK) == HSO_PORT_MODEM) { num_urbs = 2; serial->tiocmget = kzalloc(sizeof(struct hso_tiocmget), GFP_KERNEL); if (!serial->tiocmget) goto exit; serial->tiocmget->serial_state_notification = kzalloc(sizeof(struct hso_serial_state_notification), GFP_KERNEL); if (!serial->tiocmget->serial_state_notification) goto exit; tiocmget = serial->tiocmget; tiocmget->endp = hso_get_ep(interface, USB_ENDPOINT_XFER_INT, USB_DIR_IN); if (!tiocmget->endp) { dev_err(&interface->dev, "Failed to find INT IN ep\n"); goto exit; } tiocmget->urb = usb_alloc_urb(0, GFP_KERNEL); if (!tiocmget->urb) goto exit; mutex_init(&tiocmget->mutex); init_waitqueue_head(&tiocmget->waitq); } else { num_urbs = 1; } if (hso_serial_common_create(serial, num_urbs, BULK_URB_RX_SIZE, BULK_URB_TX_SIZE)) goto exit; serial->in_endp = hso_get_ep(interface, USB_ENDPOINT_XFER_BULK, USB_DIR_IN); if (!serial->in_endp) { dev_err(&interface->dev, "Failed to find BULK IN ep\n"); goto exit2; } if (! (serial->out_endp = hso_get_ep(interface, USB_ENDPOINT_XFER_BULK, USB_DIR_OUT))) { dev_err(&interface->dev, "Failed to find BULK OUT ep\n"); goto exit2; } serial->write_data = hso_std_serial_write_data; /* setup the proc dirs and files if needed */ hso_log_port(hso_dev); /* done, return it */ return hso_dev; exit2: hso_serial_tty_unregister(serial); hso_serial_common_free(serial); exit: hso_free_tiomget(serial); kfree(serial); kfree(hso_dev); return NULL; } /* Creates a multiplexed AT channel */ static struct hso_device *hso_create_mux_serial_device(struct usb_interface *interface, int port, struct hso_shared_int *mux) { struct hso_device *hso_dev; struct hso_serial *serial; int port_spec; port_spec = HSO_INTF_MUX; port_spec &= ~HSO_PORT_MASK; port_spec |= hso_mux_to_port(port); if ((port_spec & HSO_PORT_MASK) == HSO_PORT_NO_PORT) return NULL; hso_dev = hso_create_device(interface, port_spec); if (!hso_dev) return NULL; serial = kzalloc(sizeof(*serial), GFP_KERNEL); if (!serial) goto err_free_dev; hso_dev->port_data.dev_serial = serial; serial->parent = hso_dev; if (hso_serial_common_create (serial, 1, CTRL_URB_RX_SIZE, CTRL_URB_TX_SIZE)) goto err_free_serial; serial->tx_data_length--; serial->write_data = hso_mux_serial_write_data; serial->shared_int = mux; mutex_lock(&serial->shared_int->shared_int_lock); serial->shared_int->ref_count++; mutex_unlock(&serial->shared_int->shared_int_lock); /* setup the proc dirs and files if needed */ hso_log_port(hso_dev); /* done, return it */ return hso_dev; err_free_serial: kfree(serial); err_free_dev: kfree(hso_dev); return NULL; } static void hso_free_shared_int(struct hso_shared_int *mux) { usb_free_urb(mux->shared_intr_urb); kfree(mux->shared_intr_buf); mutex_unlock(&mux->shared_int_lock); kfree(mux); } static struct hso_shared_int *hso_create_shared_int(struct usb_interface *interface) { struct hso_shared_int *mux = kzalloc(sizeof(*mux), GFP_KERNEL); if (!mux) return NULL; mux->intr_endp = hso_get_ep(interface, USB_ENDPOINT_XFER_INT, USB_DIR_IN); if (!mux->intr_endp) { dev_err(&interface->dev, "Can't find INT IN endpoint\n"); goto exit; } mux->shared_intr_urb = usb_alloc_urb(0, GFP_KERNEL); if (!mux->shared_intr_urb) goto exit; mux->shared_intr_buf = kzalloc(le16_to_cpu(mux->intr_endp->wMaxPacketSize), GFP_KERNEL); if (!mux->shared_intr_buf) goto exit; mutex_init(&mux->shared_int_lock); return mux; exit: kfree(mux->shared_intr_buf); usb_free_urb(mux->shared_intr_urb); kfree(mux); return NULL; } /* Gets the port spec for a certain interface */ static int hso_get_config_data(struct usb_interface *interface) { struct usb_device *usbdev = interface_to_usbdev(interface); u8 *config_data = kmalloc(17, GFP_KERNEL); u32 if_num = interface->cur_altsetting->desc.bInterfaceNumber; s32 result; if (!config_data) return -ENOMEM; if (usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x86, 0xC0, 0, 0, config_data, 17, USB_CTRL_SET_TIMEOUT) != 0x11) { kfree(config_data); return -EIO; } /* check if we have a valid interface */ if (if_num > 16) { kfree(config_data); return -EINVAL; } switch (config_data[if_num]) { case 0x0: result = 0; break; case 0x1: result = HSO_PORT_DIAG; break; case 0x2: result = HSO_PORT_GPS; break; case 0x3: result = HSO_PORT_GPS_CONTROL; break; case 0x4: result = HSO_PORT_APP; break; case 0x5: result = HSO_PORT_APP2; break; case 0x6: result = HSO_PORT_CONTROL; break; case 0x7: result = HSO_PORT_NETWORK; break; case 0x8: result = HSO_PORT_MODEM; break; case 0x9: result = HSO_PORT_MSD; break; case 0xa: result = HSO_PORT_PCSC; break; case 0xb: result = HSO_PORT_VOICE; break; default: result = 0; } if (result) result |= HSO_INTF_BULK; if (config_data[16] & 0x1) result |= HSO_INFO_CRC_BUG; kfree(config_data); return result; } /* called once for each interface upon device insertion */ static int hso_probe(struct usb_interface *interface, const struct usb_device_id *id) { int mux, i, if_num, port_spec; unsigned char port_mask; struct hso_device *hso_dev = NULL; struct hso_shared_int *shared_int; struct hso_device *tmp_dev = NULL; if (interface->cur_altsetting->desc.bInterfaceClass != 0xFF) { dev_err(&interface->dev, "Not our interface\n"); return -ENODEV; } if_num = interface->cur_altsetting->desc.bInterfaceNumber; /* Get the interface/port specification from either driver_info or from * the device itself */ if (id->driver_info) { /* if_num is controlled by the device, driver_info is a 0 terminated * array. Make sure, the access is in bounds! */ for (i = 0; i <= if_num; ++i) if (((u32 *)(id->driver_info))[i] == 0) goto exit; port_spec = ((u32 *)(id->driver_info))[if_num]; } else { port_spec = hso_get_config_data(interface); if (port_spec < 0) goto exit; } /* Check if we need to switch to alt interfaces prior to port * configuration */ if (interface->num_altsetting > 1) usb_set_interface(interface_to_usbdev(interface), if_num, 1); interface->needs_remote_wakeup = 1; /* Allocate new hso device(s) */ switch (port_spec & HSO_INTF_MASK) { case HSO_INTF_MUX: if ((port_spec & HSO_PORT_MASK) == HSO_PORT_NETWORK) { /* Create the network device */ if (!disable_net) { hso_dev = hso_create_net_device(interface, port_spec); if (!hso_dev) goto exit; tmp_dev = hso_dev; } } if (hso_get_mux_ports(interface, &port_mask)) /* TODO: de-allocate everything */ goto exit; shared_int = hso_create_shared_int(interface); if (!shared_int) goto exit; for (i = 1, mux = 0; i < 0x100; i = i << 1, mux++) { if (port_mask & i) { hso_dev = hso_create_mux_serial_device( interface, i, shared_int); if (!hso_dev) goto exit; } } if (tmp_dev) hso_dev = tmp_dev; break; case HSO_INTF_BULK: /* It's a regular bulk interface */ if ((port_spec & HSO_PORT_MASK) == HSO_PORT_NETWORK) { if (!disable_net) hso_dev = hso_create_net_device(interface, port_spec); } else { hso_dev = hso_create_bulk_serial_device(interface, port_spec); } if (!hso_dev) goto exit; break; default: goto exit; } /* save our data pointer in this device */ usb_set_intfdata(interface, hso_dev); /* done */ return 0; exit: hso_free_interface(interface); return -ENODEV; } /* device removed, cleaning up */ static void hso_disconnect(struct usb_interface *interface) { hso_free_interface(interface); /* remove reference of our private data */ usb_set_intfdata(interface, NULL); } static void async_get_intf(struct work_struct *data) { struct hso_device *hso_dev = container_of(data, struct hso_device, async_get_intf); usb_autopm_get_interface(hso_dev->interface); } static void async_put_intf(struct work_struct *data) { struct hso_device *hso_dev = container_of(data, struct hso_device, async_put_intf); usb_autopm_put_interface(hso_dev->interface); } static int hso_get_activity(struct hso_device *hso_dev) { if (hso_dev->usb->state == USB_STATE_SUSPENDED) { if (!hso_dev->is_active) { hso_dev->is_active = 1; schedule_work(&hso_dev->async_get_intf); } } if (hso_dev->usb->state != USB_STATE_CONFIGURED) return -EAGAIN; usb_mark_last_busy(hso_dev->usb); return 0; } static int hso_put_activity(struct hso_device *hso_dev) { if (hso_dev->usb->state != USB_STATE_SUSPENDED) { if (hso_dev->is_active) { hso_dev->is_active = 0; schedule_work(&hso_dev->async_put_intf); return -EAGAIN; } } hso_dev->is_active = 0; return 0; } /* called by kernel when we need to suspend device */ static int hso_suspend(struct usb_interface *iface, pm_message_t message) { int i, result; /* Stop all serial ports */ for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) { if (serial_table[i] && (serial_table[i]->interface == iface)) { result = hso_stop_serial_device(serial_table[i]); if (result) goto out; } } /* Stop all network ports */ for (i = 0; i < HSO_MAX_NET_DEVICES; i++) { if (network_table[i] && (network_table[i]->interface == iface)) { result = hso_stop_net_device(network_table[i]); if (result) goto out; } } out: return 0; } /* called by kernel when we need to resume device */ static int hso_resume(struct usb_interface *iface) { int i, result = 0; struct hso_net *hso_net; /* Start all serial ports */ for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) { if (serial_table[i] && (serial_table[i]->interface == iface)) { if (dev2ser(serial_table[i])->port.count) { result = hso_start_serial_device(serial_table[i], GFP_NOIO); hso_kick_transmit(dev2ser(serial_table[i])); if (result) goto out; } } } /* Start all network ports */ for (i = 0; i < HSO_MAX_NET_DEVICES; i++) { if (network_table[i] && (network_table[i]->interface == iface)) { hso_net = dev2net(network_table[i]); if (hso_net->flags & IFF_UP) { /* First transmit any lingering data, then restart the device. */ if (hso_net->skb_tx_buf) { dev_dbg(&iface->dev, "Transmitting" " lingering data\n"); hso_net_start_xmit(hso_net->skb_tx_buf, hso_net->net); hso_net->skb_tx_buf = NULL; } result = hso_start_net_device(network_table[i]); if (result) goto out; } } } out: return result; } static void hso_serial_ref_free(struct kref *ref) { struct hso_device *hso_dev = container_of(ref, struct hso_device, ref); hso_free_serial_device(hso_dev); } static void hso_free_interface(struct usb_interface *interface) { struct hso_serial *serial; int i; for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) { if (serial_table[i] && (serial_table[i]->interface == interface)) { serial = dev2ser(serial_table[i]); tty_port_tty_hangup(&serial->port, false); mutex_lock(&serial->parent->mutex); serial->parent->usb_gone = 1; mutex_unlock(&serial->parent->mutex); cancel_work_sync(&serial_table[i]->async_put_intf); cancel_work_sync(&serial_table[i]->async_get_intf); hso_serial_tty_unregister(serial); kref_put(&serial->parent->ref, hso_serial_ref_free); } } for (i = 0; i < HSO_MAX_NET_DEVICES; i++) { if (network_table[i] && (network_table[i]->interface == interface)) { struct rfkill *rfk = dev2net(network_table[i])->rfkill; /* hso_stop_net_device doesn't stop the net queue since * traffic needs to start it again when suspended */ netif_stop_queue(dev2net(network_table[i])->net); hso_stop_net_device(network_table[i]); cancel_work_sync(&network_table[i]->async_put_intf); cancel_work_sync(&network_table[i]->async_get_intf); if (rfk) { rfkill_unregister(rfk); rfkill_destroy(rfk); } hso_free_net_device(network_table[i]); } } } /* Helper functions */ /* Get the endpoint ! */ static struct usb_endpoint_descriptor *hso_get_ep(struct usb_interface *intf, int type, int dir) { int i; struct usb_host_interface *iface = intf->cur_altsetting; struct usb_endpoint_descriptor *endp; for (i = 0; i < iface->desc.bNumEndpoints; i++) { endp = &iface->endpoint[i].desc; if (((endp->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == dir) && (usb_endpoint_type(endp) == type)) return endp; } return NULL; } /* Get the byte that describes which ports are enabled */ static int hso_get_mux_ports(struct usb_interface *intf, unsigned char *ports) { int i; struct usb_host_interface *iface = intf->cur_altsetting; if (iface->extralen == 3) { *ports = iface->extra[2]; return 0; } for (i = 0; i < iface->desc.bNumEndpoints; i++) { if (iface->endpoint[i].extralen == 3) { *ports = iface->endpoint[i].extra[2]; return 0; } } return -1; } /* interrupt urb needs to be submitted, used for serial read of muxed port */ static int hso_mux_submit_intr_urb(struct hso_shared_int *shared_int, struct usb_device *usb, gfp_t gfp) { int result; usb_fill_int_urb(shared_int->shared_intr_urb, usb, usb_rcvintpipe(usb, shared_int->intr_endp->bEndpointAddress & 0x7F), shared_int->shared_intr_buf, 1, intr_callback, shared_int, shared_int->intr_endp->bInterval); result = usb_submit_urb(shared_int->shared_intr_urb, gfp); if (result) dev_warn(&usb->dev, "%s failed mux_intr_urb %d\n", __func__, result); return result; } /* operations setup of the serial interface */ static const struct tty_operations hso_serial_ops = { .open = hso_serial_open, .close = hso_serial_close, .write = hso_serial_write, .write_room = hso_serial_write_room, .cleanup = hso_serial_cleanup, .ioctl = hso_serial_ioctl, .set_termios = hso_serial_set_termios, .chars_in_buffer = hso_serial_chars_in_buffer, .tiocmget = hso_serial_tiocmget, .tiocmset = hso_serial_tiocmset, .get_icount = hso_get_count, .unthrottle = hso_unthrottle }; static struct usb_driver hso_driver = { .name = driver_name, .probe = hso_probe, .disconnect = hso_disconnect, .id_table = hso_ids, .suspend = hso_suspend, .resume = hso_resume, .reset_resume = hso_resume, .supports_autosuspend = 1, .disable_hub_initiated_lpm = 1, }; static int __init hso_init(void) { int result; /* allocate our driver using the proper amount of supported minors */ tty_drv = tty_alloc_driver(HSO_SERIAL_TTY_MINORS, TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV); if (IS_ERR(tty_drv)) return PTR_ERR(tty_drv); /* fill in all needed values */ tty_drv->driver_name = driver_name; tty_drv->name = tty_filename; /* if major number is provided as parameter, use that one */ if (tty_major) tty_drv->major = tty_major; tty_drv->minor_start = 0; tty_drv->type = TTY_DRIVER_TYPE_SERIAL; tty_drv->subtype = SERIAL_TYPE_NORMAL; tty_drv->init_termios = tty_std_termios; hso_init_termios(&tty_drv->init_termios); tty_set_operations(tty_drv, &hso_serial_ops); /* register the tty driver */ result = tty_register_driver(tty_drv); if (result) { pr_err("%s - tty_register_driver failed(%d)\n", __func__, result); goto err_free_tty; } /* register this module as an usb driver */ result = usb_register(&hso_driver); if (result) { pr_err("Could not register hso driver - error: %d\n", result); goto err_unreg_tty; } /* done */ return 0; err_unreg_tty: tty_unregister_driver(tty_drv); err_free_tty: tty_driver_kref_put(tty_drv); return result; } static void __exit hso_exit(void) { tty_unregister_driver(tty_drv); /* deregister the usb driver */ usb_deregister(&hso_driver); tty_driver_kref_put(tty_drv); } /* Module definitions */ module_init(hso_init); module_exit(hso_exit); MODULE_AUTHOR(MOD_AUTHOR); MODULE_DESCRIPTION(MOD_DESCRIPTION); MODULE_LICENSE("GPL"); /* change the debug level (eg: insmod hso.ko debug=0x04) */ MODULE_PARM_DESC(debug, "debug level mask [0x01 | 0x02 | 0x04 | 0x08 | 0x10]"); module_param(debug, int, 0644); /* set the major tty number (eg: insmod hso.ko tty_major=245) */ MODULE_PARM_DESC(tty_major, "Set the major tty number"); module_param(tty_major, int, 0644); /* disable network interface (eg: insmod hso.ko disable_net=1) */ MODULE_PARM_DESC(disable_net, "Disable the network interface"); module_param(disable_net, int, 0644);
17 13 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 // SPDX-License-Identifier: GPL-2.0 /* File: fs/ext4/acl.h (C) 2001 Andreas Gruenbacher, <a.gruenbacher@computer.org> */ #include <linux/posix_acl_xattr.h> #define EXT4_ACL_VERSION 0x0001 typedef struct { __le16 e_tag; __le16 e_perm; __le32 e_id; } ext4_acl_entry; typedef struct { __le16 e_tag; __le16 e_perm; } ext4_acl_entry_short; typedef struct { __le32 a_version; } ext4_acl_header; static inline size_t ext4_acl_size(int count) { if (count <= 4) { return sizeof(ext4_acl_header) + count * sizeof(ext4_acl_entry_short); } else { return sizeof(ext4_acl_header) + 4 * sizeof(ext4_acl_entry_short) + (count - 4) * sizeof(ext4_acl_entry); } } static inline int ext4_acl_count(size_t size) { ssize_t s; size -= sizeof(ext4_acl_header); s = size - 4 * sizeof(ext4_acl_entry_short); if (s < 0) { if (size % sizeof(ext4_acl_entry_short)) return -1; return size / sizeof(ext4_acl_entry_short); } else { if (s % sizeof(ext4_acl_entry)) return -1; return s / sizeof(ext4_acl_entry) + 4; } } #ifdef CONFIG_EXT4_FS_POSIX_ACL /* acl.c */ struct posix_acl *ext4_get_acl(struct inode *inode, int type, bool rcu); int ext4_set_acl(struct mnt_idmap *idmap, struct dentry *dentry, struct posix_acl *acl, int type); extern int ext4_init_acl(handle_t *, struct inode *, struct inode *); #else /* CONFIG_EXT4_FS_POSIX_ACL */ #include <linux/sched.h> #define ext4_get_acl NULL #define ext4_set_acl NULL static inline int ext4_init_acl(handle_t *handle, struct inode *inode, struct inode *dir) { return 0; } #endif /* CONFIG_EXT4_FS_POSIX_ACL */
26 26 26 26 26 26 26 26 26 26 26 26 26 26 25 26 26 26 26 26 26 26 26 26 26 26 26 25 26 26 26 26 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995 5996 5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046 6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 6057 6058 6059 6060 6061 6062 6063 6064 6065 6066 6067 6068 6069 6070 6071 6072 6073 6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090 6091 6092 6093 6094 6095 6096 6097 6098 6099 6100 6101 6102 6103 6104 6105 6106 6107 6108 6109 6110 6111 6112 6113 6114 6115 6116 6117 6118 6119 6120 6121 6122 6123 6124 6125 6126 6127 6128 6129 /* * Copyright (c) Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /*-************************************* * Dependencies ***************************************/ #include "../common/zstd_deps.h" /* INT_MAX, ZSTD_memset, ZSTD_memcpy */ #include "../common/mem.h" #include "hist.h" /* HIST_countFast_wksp */ #define FSE_STATIC_LINKING_ONLY /* FSE_encodeSymbol */ #include "../common/fse.h" #define HUF_STATIC_LINKING_ONLY #include "../common/huf.h" #include "zstd_compress_internal.h" #include "zstd_compress_sequences.h" #include "zstd_compress_literals.h" #include "zstd_fast.h" #include "zstd_double_fast.h" #include "zstd_lazy.h" #include "zstd_opt.h" #include "zstd_ldm.h" #include "zstd_compress_superblock.h" /* *************************************************************** * Tuning parameters *****************************************************************/ /*! * COMPRESS_HEAPMODE : * Select how default decompression function ZSTD_compress() allocates its context, * on stack (0, default), or into heap (1). * Note that functions with explicit context such as ZSTD_compressCCtx() are unaffected. */ /*! * ZSTD_HASHLOG3_MAX : * Maximum size of the hash table dedicated to find 3-bytes matches, * in log format, aka 17 => 1 << 17 == 128Ki positions. * This structure is only used in zstd_opt. * Since allocation is centralized for all strategies, it has to be known here. * The actual (selected) size of the hash table is then stored in ZSTD_matchState_t.hashLog3, * so that zstd_opt.c doesn't need to know about this constant. */ #ifndef ZSTD_HASHLOG3_MAX # define ZSTD_HASHLOG3_MAX 17 #endif /*-************************************* * Helper functions ***************************************/ /* ZSTD_compressBound() * Note that the result from this function is only compatible with the "normal" * full-block strategy. * When there are a lot of small blocks due to frequent flush in streaming mode * the overhead of headers can make the compressed data to be larger than the * return value of ZSTD_compressBound(). */ size_t ZSTD_compressBound(size_t srcSize) { return ZSTD_COMPRESSBOUND(srcSize); } /*-************************************* * Context memory management ***************************************/ struct ZSTD_CDict_s { const void* dictContent; size_t dictContentSize; ZSTD_dictContentType_e dictContentType; /* The dictContentType the CDict was created with */ U32* entropyWorkspace; /* entropy workspace of HUF_WORKSPACE_SIZE bytes */ ZSTD_cwksp workspace; ZSTD_matchState_t matchState; ZSTD_compressedBlockState_t cBlockState; ZSTD_customMem customMem; U32 dictID; int compressionLevel; /* 0 indicates that advanced API was used to select CDict params */ ZSTD_paramSwitch_e useRowMatchFinder; /* Indicates whether the CDict was created with params that would use * row-based matchfinder. Unless the cdict is reloaded, we will use * the same greedy/lazy matchfinder at compression time. */ }; /* typedef'd to ZSTD_CDict within "zstd.h" */ ZSTD_CCtx* ZSTD_createCCtx(void) { return ZSTD_createCCtx_advanced(ZSTD_defaultCMem); } static void ZSTD_initCCtx(ZSTD_CCtx* cctx, ZSTD_customMem memManager) { assert(cctx != NULL); ZSTD_memset(cctx, 0, sizeof(*cctx)); cctx->customMem = memManager; cctx->bmi2 = ZSTD_cpuSupportsBmi2(); { size_t const err = ZSTD_CCtx_reset(cctx, ZSTD_reset_parameters); assert(!ZSTD_isError(err)); (void)err; } } ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem) { ZSTD_STATIC_ASSERT(zcss_init==0); ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN==(0ULL - 1)); if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL; { ZSTD_CCtx* const cctx = (ZSTD_CCtx*)ZSTD_customMalloc(sizeof(ZSTD_CCtx), customMem); if (!cctx) return NULL; ZSTD_initCCtx(cctx, customMem); return cctx; } } ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize) { ZSTD_cwksp ws; ZSTD_CCtx* cctx; if (workspaceSize <= sizeof(ZSTD_CCtx)) return NULL; /* minimum size */ if ((size_t)workspace & 7) return NULL; /* must be 8-aligned */ ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_static_alloc); cctx = (ZSTD_CCtx*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CCtx)); if (cctx == NULL) return NULL; ZSTD_memset(cctx, 0, sizeof(ZSTD_CCtx)); ZSTD_cwksp_move(&cctx->workspace, &ws); cctx->staticSize = workspaceSize; /* statically sized space. entropyWorkspace never moves (but prev/next block swap places) */ if (!ZSTD_cwksp_check_available(&cctx->workspace, ENTROPY_WORKSPACE_SIZE + 2 * sizeof(ZSTD_compressedBlockState_t))) return NULL; cctx->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t)); cctx->blockState.nextCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t)); cctx->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cctx->workspace, ENTROPY_WORKSPACE_SIZE); cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid()); return cctx; } /* * Clears and frees all of the dictionaries in the CCtx. */ static void ZSTD_clearAllDicts(ZSTD_CCtx* cctx) { ZSTD_customFree(cctx->localDict.dictBuffer, cctx->customMem); ZSTD_freeCDict(cctx->localDict.cdict); ZSTD_memset(&cctx->localDict, 0, sizeof(cctx->localDict)); ZSTD_memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict)); cctx->cdict = NULL; } static size_t ZSTD_sizeof_localDict(ZSTD_localDict dict) { size_t const bufferSize = dict.dictBuffer != NULL ? dict.dictSize : 0; size_t const cdictSize = ZSTD_sizeof_CDict(dict.cdict); return bufferSize + cdictSize; } static void ZSTD_freeCCtxContent(ZSTD_CCtx* cctx) { assert(cctx != NULL); assert(cctx->staticSize == 0); ZSTD_clearAllDicts(cctx); ZSTD_cwksp_free(&cctx->workspace, cctx->customMem); } size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx) { if (cctx==NULL) return 0; /* support free on NULL */ RETURN_ERROR_IF(cctx->staticSize, memory_allocation, "not compatible with static CCtx"); { int cctxInWorkspace = ZSTD_cwksp_owns_buffer(&cctx->workspace, cctx); ZSTD_freeCCtxContent(cctx); if (!cctxInWorkspace) { ZSTD_customFree(cctx, cctx->customMem); } } return 0; } static size_t ZSTD_sizeof_mtctx(const ZSTD_CCtx* cctx) { (void)cctx; return 0; } size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx) { if (cctx==NULL) return 0; /* support sizeof on NULL */ /* cctx may be in the workspace */ return (cctx->workspace.workspace == cctx ? 0 : sizeof(*cctx)) + ZSTD_cwksp_sizeof(&cctx->workspace) + ZSTD_sizeof_localDict(cctx->localDict) + ZSTD_sizeof_mtctx(cctx); } size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs) { return ZSTD_sizeof_CCtx(zcs); /* same object */ } /* private API call, for dictBuilder only */ const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); } /* Returns true if the strategy supports using a row based matchfinder */ static int ZSTD_rowMatchFinderSupported(const ZSTD_strategy strategy) { return (strategy >= ZSTD_greedy && strategy <= ZSTD_lazy2); } /* Returns true if the strategy and useRowMatchFinder mode indicate that we will use the row based matchfinder * for this compression. */ static int ZSTD_rowMatchFinderUsed(const ZSTD_strategy strategy, const ZSTD_paramSwitch_e mode) { assert(mode != ZSTD_ps_auto); return ZSTD_rowMatchFinderSupported(strategy) && (mode == ZSTD_ps_enable); } /* Returns row matchfinder usage given an initial mode and cParams */ static ZSTD_paramSwitch_e ZSTD_resolveRowMatchFinderMode(ZSTD_paramSwitch_e mode, const ZSTD_compressionParameters* const cParams) { #if defined(ZSTD_ARCH_X86_SSE2) || defined(ZSTD_ARCH_ARM_NEON) int const kHasSIMD128 = 1; #else int const kHasSIMD128 = 0; #endif if (mode != ZSTD_ps_auto) return mode; /* if requested enabled, but no SIMD, we still will use row matchfinder */ mode = ZSTD_ps_disable; if (!ZSTD_rowMatchFinderSupported(cParams->strategy)) return mode; if (kHasSIMD128) { if (cParams->windowLog > 14) mode = ZSTD_ps_enable; } else { if (cParams->windowLog > 17) mode = ZSTD_ps_enable; } return mode; } /* Returns block splitter usage (generally speaking, when using slower/stronger compression modes) */ static ZSTD_paramSwitch_e ZSTD_resolveBlockSplitterMode(ZSTD_paramSwitch_e mode, const ZSTD_compressionParameters* const cParams) { if (mode != ZSTD_ps_auto) return mode; return (cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 17) ? ZSTD_ps_enable : ZSTD_ps_disable; } /* Returns 1 if the arguments indicate that we should allocate a chainTable, 0 otherwise */ static int ZSTD_allocateChainTable(const ZSTD_strategy strategy, const ZSTD_paramSwitch_e useRowMatchFinder, const U32 forDDSDict) { assert(useRowMatchFinder != ZSTD_ps_auto); /* We always should allocate a chaintable if we are allocating a matchstate for a DDS dictionary matchstate. * We do not allocate a chaintable if we are using ZSTD_fast, or are using the row-based matchfinder. */ return forDDSDict || ((strategy != ZSTD_fast) && !ZSTD_rowMatchFinderUsed(strategy, useRowMatchFinder)); } /* Returns 1 if compression parameters are such that we should * enable long distance matching (wlog >= 27, strategy >= btopt). * Returns 0 otherwise. */ static ZSTD_paramSwitch_e ZSTD_resolveEnableLdm(ZSTD_paramSwitch_e mode, const ZSTD_compressionParameters* const cParams) { if (mode != ZSTD_ps_auto) return mode; return (cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 27) ? ZSTD_ps_enable : ZSTD_ps_disable; } static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams( ZSTD_compressionParameters cParams) { ZSTD_CCtx_params cctxParams; /* should not matter, as all cParams are presumed properly defined */ ZSTD_CCtxParams_init(&cctxParams, ZSTD_CLEVEL_DEFAULT); cctxParams.cParams = cParams; /* Adjust advanced params according to cParams */ cctxParams.ldmParams.enableLdm = ZSTD_resolveEnableLdm(cctxParams.ldmParams.enableLdm, &cParams); if (cctxParams.ldmParams.enableLdm == ZSTD_ps_enable) { ZSTD_ldm_adjustParameters(&cctxParams.ldmParams, &cParams); assert(cctxParams.ldmParams.hashLog >= cctxParams.ldmParams.bucketSizeLog); assert(cctxParams.ldmParams.hashRateLog < 32); } cctxParams.useBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams.useBlockSplitter, &cParams); cctxParams.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams.useRowMatchFinder, &cParams); assert(!ZSTD_checkCParams(cParams)); return cctxParams; } static ZSTD_CCtx_params* ZSTD_createCCtxParams_advanced( ZSTD_customMem customMem) { ZSTD_CCtx_params* params; if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL; params = (ZSTD_CCtx_params*)ZSTD_customCalloc( sizeof(ZSTD_CCtx_params), customMem); if (!params) { return NULL; } ZSTD_CCtxParams_init(params, ZSTD_CLEVEL_DEFAULT); params->customMem = customMem; return params; } ZSTD_CCtx_params* ZSTD_createCCtxParams(void) { return ZSTD_createCCtxParams_advanced(ZSTD_defaultCMem); } size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params) { if (params == NULL) { return 0; } ZSTD_customFree(params, params->customMem); return 0; } size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params) { return ZSTD_CCtxParams_init(params, ZSTD_CLEVEL_DEFAULT); } size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel) { RETURN_ERROR_IF(!cctxParams, GENERIC, "NULL pointer!"); ZSTD_memset(cctxParams, 0, sizeof(*cctxParams)); cctxParams->compressionLevel = compressionLevel; cctxParams->fParams.contentSizeFlag = 1; return 0; } #define ZSTD_NO_CLEVEL 0 /* * Initializes the cctxParams from params and compressionLevel. * @param compressionLevel If params are derived from a compression level then that compression level, otherwise ZSTD_NO_CLEVEL. */ static void ZSTD_CCtxParams_init_internal(ZSTD_CCtx_params* cctxParams, ZSTD_parameters const* params, int compressionLevel) { assert(!ZSTD_checkCParams(params->cParams)); ZSTD_memset(cctxParams, 0, sizeof(*cctxParams)); cctxParams->cParams = params->cParams; cctxParams->fParams = params->fParams; /* Should not matter, as all cParams are presumed properly defined. * But, set it for tracing anyway. */ cctxParams->compressionLevel = compressionLevel; cctxParams->useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams->useRowMatchFinder, &params->cParams); cctxParams->useBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams->useBlockSplitter, &params->cParams); cctxParams->ldmParams.enableLdm = ZSTD_resolveEnableLdm(cctxParams->ldmParams.enableLdm, &params->cParams); DEBUGLOG(4, "ZSTD_CCtxParams_init_internal: useRowMatchFinder=%d, useBlockSplitter=%d ldm=%d", cctxParams->useRowMatchFinder, cctxParams->useBlockSplitter, cctxParams->ldmParams.enableLdm); } size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params) { RETURN_ERROR_IF(!cctxParams, GENERIC, "NULL pointer!"); FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) , ""); ZSTD_CCtxParams_init_internal(cctxParams, &params, ZSTD_NO_CLEVEL); return 0; } /* * Sets cctxParams' cParams and fParams from params, but otherwise leaves them alone. * @param param Validated zstd parameters. */ static void ZSTD_CCtxParams_setZstdParams( ZSTD_CCtx_params* cctxParams, const ZSTD_parameters* params) { assert(!ZSTD_checkCParams(params->cParams)); cctxParams->cParams = params->cParams; cctxParams->fParams = params->fParams; /* Should not matter, as all cParams are presumed properly defined. * But, set it for tracing anyway. */ cctxParams->compressionLevel = ZSTD_NO_CLEVEL; } ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param) { ZSTD_bounds bounds = { 0, 0, 0 }; switch(param) { case ZSTD_c_compressionLevel: bounds.lowerBound = ZSTD_minCLevel(); bounds.upperBound = ZSTD_maxCLevel(); return bounds; case ZSTD_c_windowLog: bounds.lowerBound = ZSTD_WINDOWLOG_MIN; bounds.upperBound = ZSTD_WINDOWLOG_MAX; return bounds; case ZSTD_c_hashLog: bounds.lowerBound = ZSTD_HASHLOG_MIN; bounds.upperBound = ZSTD_HASHLOG_MAX; return bounds; case ZSTD_c_chainLog: bounds.lowerBound = ZSTD_CHAINLOG_MIN; bounds.upperBound = ZSTD_CHAINLOG_MAX; return bounds; case ZSTD_c_searchLog: bounds.lowerBound = ZSTD_SEARCHLOG_MIN; bounds.upperBound = ZSTD_SEARCHLOG_MAX; return bounds; case ZSTD_c_minMatch: bounds.lowerBound = ZSTD_MINMATCH_MIN; bounds.upperBound = ZSTD_MINMATCH_MAX; return bounds; case ZSTD_c_targetLength: bounds.lowerBound = ZSTD_TARGETLENGTH_MIN; bounds.upperBound = ZSTD_TARGETLENGTH_MAX; return bounds; case ZSTD_c_strategy: bounds.lowerBound = ZSTD_STRATEGY_MIN; bounds.upperBound = ZSTD_STRATEGY_MAX; return bounds; case ZSTD_c_contentSizeFlag: bounds.lowerBound = 0; bounds.upperBound = 1; return bounds; case ZSTD_c_checksumFlag: bounds.lowerBound = 0; bounds.upperBound = 1; return bounds; case ZSTD_c_dictIDFlag: bounds.lowerBound = 0; bounds.upperBound = 1; return bounds; case ZSTD_c_nbWorkers: bounds.lowerBound = 0; bounds.upperBound = 0; return bounds; case ZSTD_c_jobSize: bounds.lowerBound = 0; bounds.upperBound = 0; return bounds; case ZSTD_c_overlapLog: bounds.lowerBound = 0; bounds.upperBound = 0; return bounds; case ZSTD_c_enableDedicatedDictSearch: bounds.lowerBound = 0; bounds.upperBound = 1; return bounds; case ZSTD_c_enableLongDistanceMatching: bounds.lowerBound = 0; bounds.upperBound = 1; return bounds; case ZSTD_c_ldmHashLog: bounds.lowerBound = ZSTD_LDM_HASHLOG_MIN; bounds.upperBound = ZSTD_LDM_HASHLOG_MAX; return bounds; case ZSTD_c_ldmMinMatch: bounds.lowerBound = ZSTD_LDM_MINMATCH_MIN; bounds.upperBound = ZSTD_LDM_MINMATCH_MAX; return bounds; case ZSTD_c_ldmBucketSizeLog: bounds.lowerBound = ZSTD_LDM_BUCKETSIZELOG_MIN; bounds.upperBound = ZSTD_LDM_BUCKETSIZELOG_MAX; return bounds; case ZSTD_c_ldmHashRateLog: bounds.lowerBound = ZSTD_LDM_HASHRATELOG_MIN; bounds.upperBound = ZSTD_LDM_HASHRATELOG_MAX; return bounds; /* experimental parameters */ case ZSTD_c_rsyncable: bounds.lowerBound = 0; bounds.upperBound = 1; return bounds; case ZSTD_c_forceMaxWindow : bounds.lowerBound = 0; bounds.upperBound = 1; return bounds; case ZSTD_c_format: ZSTD_STATIC_ASSERT(ZSTD_f_zstd1 < ZSTD_f_zstd1_magicless); bounds.lowerBound = ZSTD_f_zstd1; bounds.upperBound = ZSTD_f_zstd1_magicless; /* note : how to ensure at compile time that this is the highest value enum ? */ return bounds; case ZSTD_c_forceAttachDict: ZSTD_STATIC_ASSERT(ZSTD_dictDefaultAttach < ZSTD_dictForceLoad); bounds.lowerBound = ZSTD_dictDefaultAttach; bounds.upperBound = ZSTD_dictForceLoad; /* note : how to ensure at compile time that this is the highest value enum ? */ return bounds; case ZSTD_c_literalCompressionMode: ZSTD_STATIC_ASSERT(ZSTD_ps_auto < ZSTD_ps_enable && ZSTD_ps_enable < ZSTD_ps_disable); bounds.lowerBound = (int)ZSTD_ps_auto; bounds.upperBound = (int)ZSTD_ps_disable; return bounds; case ZSTD_c_targetCBlockSize: bounds.lowerBound = ZSTD_TARGETCBLOCKSIZE_MIN; bounds.upperBound = ZSTD_TARGETCBLOCKSIZE_MAX; return bounds; case ZSTD_c_srcSizeHint: bounds.lowerBound = ZSTD_SRCSIZEHINT_MIN; bounds.upperBound = ZSTD_SRCSIZEHINT_MAX; return bounds; case ZSTD_c_stableInBuffer: case ZSTD_c_stableOutBuffer: bounds.lowerBound = (int)ZSTD_bm_buffered; bounds.upperBound = (int)ZSTD_bm_stable; return bounds; case ZSTD_c_blockDelimiters: bounds.lowerBound = (int)ZSTD_sf_noBlockDelimiters; bounds.upperBound = (int)ZSTD_sf_explicitBlockDelimiters; return bounds; case ZSTD_c_validateSequences: bounds.lowerBound = 0; bounds.upperBound = 1; return bounds; case ZSTD_c_useBlockSplitter: bounds.lowerBound = (int)ZSTD_ps_auto; bounds.upperBound = (int)ZSTD_ps_disable; return bounds; case ZSTD_c_useRowMatchFinder: bounds.lowerBound = (int)ZSTD_ps_auto; bounds.upperBound = (int)ZSTD_ps_disable; return bounds; case ZSTD_c_deterministicRefPrefix: bounds.lowerBound = 0; bounds.upperBound = 1; return bounds; default: bounds.error = ERROR(parameter_unsupported); return bounds; } } /* ZSTD_cParam_clampBounds: * Clamps the value into the bounded range. */ static size_t ZSTD_cParam_clampBounds(ZSTD_cParameter cParam, int* value) { ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam); if (ZSTD_isError(bounds.error)) return bounds.error; if (*value < bounds.lowerBound) *value = bounds.lowerBound; if (*value > bounds.upperBound) *value = bounds.upperBound; return 0; } #define BOUNDCHECK(cParam, val) { \ RETURN_ERROR_IF(!ZSTD_cParam_withinBounds(cParam,val), \ parameter_outOfBound, "Param out of bounds"); \ } static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param) { switch(param) { case ZSTD_c_compressionLevel: case ZSTD_c_hashLog: case ZSTD_c_chainLog: case ZSTD_c_searchLog: case ZSTD_c_minMatch: case ZSTD_c_targetLength: case ZSTD_c_strategy: return 1; case ZSTD_c_format: case ZSTD_c_windowLog: case ZSTD_c_contentSizeFlag: case ZSTD_c_checksumFlag: case ZSTD_c_dictIDFlag: case ZSTD_c_forceMaxWindow : case ZSTD_c_nbWorkers: case ZSTD_c_jobSize: case ZSTD_c_overlapLog: case ZSTD_c_rsyncable: case ZSTD_c_enableDedicatedDictSearch: case ZSTD_c_enableLongDistanceMatching: case ZSTD_c_ldmHashLog: case ZSTD_c_ldmMinMatch: case ZSTD_c_ldmBucketSizeLog: case ZSTD_c_ldmHashRateLog: case ZSTD_c_forceAttachDict: case ZSTD_c_literalCompressionMode: case ZSTD_c_targetCBlockSize: case ZSTD_c_srcSizeHint: case ZSTD_c_stableInBuffer: case ZSTD_c_stableOutBuffer: case ZSTD_c_blockDelimiters: case ZSTD_c_validateSequences: case ZSTD_c_useBlockSplitter: case ZSTD_c_useRowMatchFinder: case ZSTD_c_deterministicRefPrefix: default: return 0; } } size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value) { DEBUGLOG(4, "ZSTD_CCtx_setParameter (%i, %i)", (int)param, value); if (cctx->streamStage != zcss_init) { if (ZSTD_isUpdateAuthorized(param)) { cctx->cParamsChanged = 1; } else { RETURN_ERROR(stage_wrong, "can only set params in ctx init stage"); } } switch(param) { case ZSTD_c_nbWorkers: RETURN_ERROR_IF((value!=0) && cctx->staticSize, parameter_unsupported, "MT not compatible with static alloc"); break; case ZSTD_c_compressionLevel: case ZSTD_c_windowLog: case ZSTD_c_hashLog: case ZSTD_c_chainLog: case ZSTD_c_searchLog: case ZSTD_c_minMatch: case ZSTD_c_targetLength: case ZSTD_c_strategy: case ZSTD_c_ldmHashRateLog: case ZSTD_c_format: case ZSTD_c_contentSizeFlag: case ZSTD_c_checksumFlag: case ZSTD_c_dictIDFlag: case ZSTD_c_forceMaxWindow: case ZSTD_c_forceAttachDict: case ZSTD_c_literalCompressionMode: case ZSTD_c_jobSize: case ZSTD_c_overlapLog: case ZSTD_c_rsyncable: case ZSTD_c_enableDedicatedDictSearch: case ZSTD_c_enableLongDistanceMatching: case ZSTD_c_ldmHashLog: case ZSTD_c_ldmMinMatch: case ZSTD_c_ldmBucketSizeLog: case ZSTD_c_targetCBlockSize: case ZSTD_c_srcSizeHint: case ZSTD_c_stableInBuffer: case ZSTD_c_stableOutBuffer: case ZSTD_c_blockDelimiters: case ZSTD_c_validateSequences: case ZSTD_c_useBlockSplitter: case ZSTD_c_useRowMatchFinder: case ZSTD_c_deterministicRefPrefix: break; default: RETURN_ERROR(parameter_unsupported, "unknown parameter"); } return ZSTD_CCtxParams_setParameter(&cctx->requestedParams, param, value); } size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams, ZSTD_cParameter param, int value) { DEBUGLOG(4, "ZSTD_CCtxParams_setParameter (%i, %i)", (int)param, value); switch(param) { case ZSTD_c_format : BOUNDCHECK(ZSTD_c_format, value); CCtxParams->format = (ZSTD_format_e)value; return (size_t)CCtxParams->format; case ZSTD_c_compressionLevel : { FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value), ""); if (value == 0) CCtxParams->compressionLevel = ZSTD_CLEVEL_DEFAULT; /* 0 == default */ else CCtxParams->compressionLevel = value; if (CCtxParams->compressionLevel >= 0) return (size_t)CCtxParams->compressionLevel; return 0; /* return type (size_t) cannot represent negative values */ } case ZSTD_c_windowLog : if (value!=0) /* 0 => use default */ BOUNDCHECK(ZSTD_c_windowLog, value); CCtxParams->cParams.windowLog = (U32)value; return CCtxParams->cParams.windowLog; case ZSTD_c_hashLog : if (value!=0) /* 0 => use default */ BOUNDCHECK(ZSTD_c_hashLog, value); CCtxParams->cParams.hashLog = (U32)value; return CCtxParams->cParams.hashLog; case ZSTD_c_chainLog : if (value!=0) /* 0 => use default */ BOUNDCHECK(ZSTD_c_chainLog, value); CCtxParams->cParams.chainLog = (U32)value; return CCtxParams->cParams.chainLog; case ZSTD_c_searchLog : if (value!=0) /* 0 => use default */ BOUNDCHECK(ZSTD_c_searchLog, value); CCtxParams->cParams.searchLog = (U32)value; return (size_t)value; case ZSTD_c_minMatch : if (value!=0) /* 0 => use default */ BOUNDCHECK(ZSTD_c_minMatch, value); CCtxParams->cParams.minMatch = value; return CCtxParams->cParams.minMatch; case ZSTD_c_targetLength : BOUNDCHECK(ZSTD_c_targetLength, value); CCtxParams->cParams.targetLength = value; return CCtxParams->cParams.targetLength; case ZSTD_c_strategy : if (value!=0) /* 0 => use default */ BOUNDCHECK(ZSTD_c_strategy, value); CCtxParams->cParams.strategy = (ZSTD_strategy)value; return (size_t)CCtxParams->cParams.strategy; case ZSTD_c_contentSizeFlag : /* Content size written in frame header _when known_ (default:1) */ DEBUGLOG(4, "set content size flag = %u", (value!=0)); CCtxParams->fParams.contentSizeFlag = value != 0; return CCtxParams->fParams.contentSizeFlag; case ZSTD_c_checksumFlag : /* A 32-bits content checksum will be calculated and written at end of frame (default:0) */ CCtxParams->fParams.checksumFlag = value != 0; return CCtxParams->fParams.checksumFlag; case ZSTD_c_dictIDFlag : /* When applicable, dictionary's dictID is provided in frame header (default:1) */ DEBUGLOG(4, "set dictIDFlag = %u", (value!=0)); CCtxParams->fParams.noDictIDFlag = !value; return !CCtxParams->fParams.noDictIDFlag; case ZSTD_c_forceMaxWindow : CCtxParams->forceWindow = (value != 0); return CCtxParams->forceWindow; case ZSTD_c_forceAttachDict : { const ZSTD_dictAttachPref_e pref = (ZSTD_dictAttachPref_e)value; BOUNDCHECK(ZSTD_c_forceAttachDict, pref); CCtxParams->attachDictPref = pref; return CCtxParams->attachDictPref; } case ZSTD_c_literalCompressionMode : { const ZSTD_paramSwitch_e lcm = (ZSTD_paramSwitch_e)value; BOUNDCHECK(ZSTD_c_literalCompressionMode, lcm); CCtxParams->literalCompressionMode = lcm; return CCtxParams->literalCompressionMode; } case ZSTD_c_nbWorkers : RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading"); return 0; case ZSTD_c_jobSize : RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading"); return 0; case ZSTD_c_overlapLog : RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading"); return 0; case ZSTD_c_rsyncable : RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading"); return 0; case ZSTD_c_enableDedicatedDictSearch : CCtxParams->enableDedicatedDictSearch = (value!=0); return CCtxParams->enableDedicatedDictSearch; case ZSTD_c_enableLongDistanceMatching : CCtxParams->ldmParams.enableLdm = (ZSTD_paramSwitch_e)value; return CCtxParams->ldmParams.enableLdm; case ZSTD_c_ldmHashLog : if (value!=0) /* 0 ==> auto */ BOUNDCHECK(ZSTD_c_ldmHashLog, value); CCtxParams->ldmParams.hashLog = value; return CCtxParams->ldmParams.hashLog; case ZSTD_c_ldmMinMatch : if (value!=0) /* 0 ==> default */ BOUNDCHECK(ZSTD_c_ldmMinMatch, value); CCtxParams->ldmParams.minMatchLength = value; return CCtxParams->ldmParams.minMatchLength; case ZSTD_c_ldmBucketSizeLog : if (value!=0) /* 0 ==> default */ BOUNDCHECK(ZSTD_c_ldmBucketSizeLog, value); CCtxParams->ldmParams.bucketSizeLog = value; return CCtxParams->ldmParams.bucketSizeLog; case ZSTD_c_ldmHashRateLog : if (value!=0) /* 0 ==> default */ BOUNDCHECK(ZSTD_c_ldmHashRateLog, value); CCtxParams->ldmParams.hashRateLog = value; return CCtxParams->ldmParams.hashRateLog; case ZSTD_c_targetCBlockSize : if (value!=0) /* 0 ==> default */ BOUNDCHECK(ZSTD_c_targetCBlockSize, value); CCtxParams->targetCBlockSize = value; return CCtxParams->targetCBlockSize; case ZSTD_c_srcSizeHint : if (value!=0) /* 0 ==> default */ BOUNDCHECK(ZSTD_c_srcSizeHint, value); CCtxParams->srcSizeHint = value; return CCtxParams->srcSizeHint; case ZSTD_c_stableInBuffer: BOUNDCHECK(ZSTD_c_stableInBuffer, value); CCtxParams->inBufferMode = (ZSTD_bufferMode_e)value; return CCtxParams->inBufferMode; case ZSTD_c_stableOutBuffer: BOUNDCHECK(ZSTD_c_stableOutBuffer, value); CCtxParams->outBufferMode = (ZSTD_bufferMode_e)value; return CCtxParams->outBufferMode; case ZSTD_c_blockDelimiters: BOUNDCHECK(ZSTD_c_blockDelimiters, value); CCtxParams->blockDelimiters = (ZSTD_sequenceFormat_e)value; return CCtxParams->blockDelimiters; case ZSTD_c_validateSequences: BOUNDCHECK(ZSTD_c_validateSequences, value); CCtxParams->validateSequences = value; return CCtxParams->validateSequences; case ZSTD_c_useBlockSplitter: BOUNDCHECK(ZSTD_c_useBlockSplitter, value); CCtxParams->useBlockSplitter = (ZSTD_paramSwitch_e)value; return CCtxParams->useBlockSplitter; case ZSTD_c_useRowMatchFinder: BOUNDCHECK(ZSTD_c_useRowMatchFinder, value); CCtxParams->useRowMatchFinder = (ZSTD_paramSwitch_e)value; return CCtxParams->useRowMatchFinder; case ZSTD_c_deterministicRefPrefix: BOUNDCHECK(ZSTD_c_deterministicRefPrefix, value); CCtxParams->deterministicRefPrefix = !!value; return CCtxParams->deterministicRefPrefix; default: RETURN_ERROR(parameter_unsupported, "unknown parameter"); } } size_t ZSTD_CCtx_getParameter(ZSTD_CCtx const* cctx, ZSTD_cParameter param, int* value) { return ZSTD_CCtxParams_getParameter(&cctx->requestedParams, param, value); } size_t ZSTD_CCtxParams_getParameter( ZSTD_CCtx_params const* CCtxParams, ZSTD_cParameter param, int* value) { switch(param) { case ZSTD_c_format : *value = CCtxParams->format; break; case ZSTD_c_compressionLevel : *value = CCtxParams->compressionLevel; break; case ZSTD_c_windowLog : *value = (int)CCtxParams->cParams.windowLog; break; case ZSTD_c_hashLog : *value = (int)CCtxParams->cParams.hashLog; break; case ZSTD_c_chainLog : *value = (int)CCtxParams->cParams.chainLog; break; case ZSTD_c_searchLog : *value = CCtxParams->cParams.searchLog; break; case ZSTD_c_minMatch : *value = CCtxParams->cParams.minMatch; break; case ZSTD_c_targetLength : *value = CCtxParams->cParams.targetLength; break; case ZSTD_c_strategy : *value = (unsigned)CCtxParams->cParams.strategy; break; case ZSTD_c_contentSizeFlag : *value = CCtxParams->fParams.contentSizeFlag; break; case ZSTD_c_checksumFlag : *value = CCtxParams->fParams.checksumFlag; break; case ZSTD_c_dictIDFlag : *value = !CCtxParams->fParams.noDictIDFlag; break; case ZSTD_c_forceMaxWindow : *value = CCtxParams->forceWindow; break; case ZSTD_c_forceAttachDict : *value = CCtxParams->attachDictPref; break; case ZSTD_c_literalCompressionMode : *value = CCtxParams->literalCompressionMode; break; case ZSTD_c_nbWorkers : assert(CCtxParams->nbWorkers == 0); *value = CCtxParams->nbWorkers; break; case ZSTD_c_jobSize : RETURN_ERROR(parameter_unsupported, "not compiled with multithreading"); case ZSTD_c_overlapLog : RETURN_ERROR(parameter_unsupported, "not compiled with multithreading"); case ZSTD_c_rsyncable : RETURN_ERROR(parameter_unsupported, "not compiled with multithreading"); case ZSTD_c_enableDedicatedDictSearch : *value = CCtxParams->enableDedicatedDictSearch; break; case ZSTD_c_enableLongDistanceMatching : *value = CCtxParams->ldmParams.enableLdm; break; case ZSTD_c_ldmHashLog : *value = CCtxParams->ldmParams.hashLog; break; case ZSTD_c_ldmMinMatch : *value = CCtxParams->ldmParams.minMatchLength; break; case ZSTD_c_ldmBucketSizeLog : *value = CCtxParams->ldmParams.bucketSizeLog; break; case ZSTD_c_ldmHashRateLog : *value = CCtxParams->ldmParams.hashRateLog; break; case ZSTD_c_targetCBlockSize : *value = (int)CCtxParams->targetCBlockSize; break; case ZSTD_c_srcSizeHint : *value = (int)CCtxParams->srcSizeHint; break; case ZSTD_c_stableInBuffer : *value = (int)CCtxParams->inBufferMode; break; case ZSTD_c_stableOutBuffer : *value = (int)CCtxParams->outBufferMode; break; case ZSTD_c_blockDelimiters : *value = (int)CCtxParams->blockDelimiters; break; case ZSTD_c_validateSequences : *value = (int)CCtxParams->validateSequences; break; case ZSTD_c_useBlockSplitter : *value = (int)CCtxParams->useBlockSplitter; break; case ZSTD_c_useRowMatchFinder : *value = (int)CCtxParams->useRowMatchFinder; break; case ZSTD_c_deterministicRefPrefix: *value = (int)CCtxParams->deterministicRefPrefix; break; default: RETURN_ERROR(parameter_unsupported, "unknown parameter"); } return 0; } /* ZSTD_CCtx_setParametersUsingCCtxParams() : * just applies `params` into `cctx` * no action is performed, parameters are merely stored. * If ZSTDMT is enabled, parameters are pushed to cctx->mtctx. * This is possible even if a compression is ongoing. * In which case, new parameters will be applied on the fly, starting with next compression job. */ size_t ZSTD_CCtx_setParametersUsingCCtxParams( ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params) { DEBUGLOG(4, "ZSTD_CCtx_setParametersUsingCCtxParams"); RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, "The context is in the wrong stage!"); RETURN_ERROR_IF(cctx->cdict, stage_wrong, "Can't override parameters with cdict attached (some must " "be inherited from the cdict)."); cctx->requestedParams = *params; return 0; } size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize) { DEBUGLOG(4, "ZSTD_CCtx_setPledgedSrcSize to %u bytes", (U32)pledgedSrcSize); RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, "Can't set pledgedSrcSize when not in init stage."); cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1; return 0; } static ZSTD_compressionParameters ZSTD_dedicatedDictSearch_getCParams( int const compressionLevel, size_t const dictSize); static int ZSTD_dedicatedDictSearch_isSupported( const ZSTD_compressionParameters* cParams); static void ZSTD_dedicatedDictSearch_revertCParams( ZSTD_compressionParameters* cParams); /* * Initializes the local dict using the requested parameters. * NOTE: This does not use the pledged src size, because it may be used for more * than one compression. */ static size_t ZSTD_initLocalDict(ZSTD_CCtx* cctx) { ZSTD_localDict* const dl = &cctx->localDict; if (dl->dict == NULL) { /* No local dictionary. */ assert(dl->dictBuffer == NULL); assert(dl->cdict == NULL); assert(dl->dictSize == 0); return 0; } if (dl->cdict != NULL) { assert(cctx->cdict == dl->cdict); /* Local dictionary already initialized. */ return 0; } assert(dl->dictSize > 0); assert(cctx->cdict == NULL); assert(cctx->prefixDict.dict == NULL); dl->cdict = ZSTD_createCDict_advanced2( dl->dict, dl->dictSize, ZSTD_dlm_byRef, dl->dictContentType, &cctx->requestedParams, cctx->customMem); RETURN_ERROR_IF(!dl->cdict, memory_allocation, "ZSTD_createCDict_advanced failed"); cctx->cdict = dl->cdict; return 0; } size_t ZSTD_CCtx_loadDictionary_advanced( ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType) { RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, "Can't load a dictionary when ctx is not in init stage."); DEBUGLOG(4, "ZSTD_CCtx_loadDictionary_advanced (size: %u)", (U32)dictSize); ZSTD_clearAllDicts(cctx); /* in case one already exists */ if (dict == NULL || dictSize == 0) /* no dictionary mode */ return 0; if (dictLoadMethod == ZSTD_dlm_byRef) { cctx->localDict.dict = dict; } else { void* dictBuffer; RETURN_ERROR_IF(cctx->staticSize, memory_allocation, "no malloc for static CCtx"); dictBuffer = ZSTD_customMalloc(dictSize, cctx->customMem); RETURN_ERROR_IF(!dictBuffer, memory_allocation, "NULL pointer!"); ZSTD_memcpy(dictBuffer, dict, dictSize); cctx->localDict.dictBuffer = dictBuffer; cctx->localDict.dict = dictBuffer; } cctx->localDict.dictSize = dictSize; cctx->localDict.dictContentType = dictContentType; return 0; } size_t ZSTD_CCtx_loadDictionary_byReference( ZSTD_CCtx* cctx, const void* dict, size_t dictSize) { return ZSTD_CCtx_loadDictionary_advanced( cctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto); } size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize) { return ZSTD_CCtx_loadDictionary_advanced( cctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto); } size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict) { RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, "Can't ref a dict when ctx not in init stage."); /* Free the existing local cdict (if any) to save memory. */ ZSTD_clearAllDicts(cctx); cctx->cdict = cdict; return 0; } size_t ZSTD_CCtx_refThreadPool(ZSTD_CCtx* cctx, ZSTD_threadPool* pool) { RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, "Can't ref a pool when ctx not in init stage."); cctx->pool = pool; return 0; } size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize) { return ZSTD_CCtx_refPrefix_advanced(cctx, prefix, prefixSize, ZSTD_dct_rawContent); } size_t ZSTD_CCtx_refPrefix_advanced( ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType) { RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, "Can't ref a prefix when ctx not in init stage."); ZSTD_clearAllDicts(cctx); if (prefix != NULL && prefixSize > 0) { cctx->prefixDict.dict = prefix; cctx->prefixDict.dictSize = prefixSize; cctx->prefixDict.dictContentType = dictContentType; } return 0; } /*! ZSTD_CCtx_reset() : * Also dumps dictionary */ size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset) { if ( (reset == ZSTD_reset_session_only) || (reset == ZSTD_reset_session_and_parameters) ) { cctx->streamStage = zcss_init; cctx->pledgedSrcSizePlusOne = 0; } if ( (reset == ZSTD_reset_parameters) || (reset == ZSTD_reset_session_and_parameters) ) { RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, "Can't reset parameters only when not in init stage."); ZSTD_clearAllDicts(cctx); return ZSTD_CCtxParams_reset(&cctx->requestedParams); } return 0; } /* ZSTD_checkCParams() : control CParam values remain within authorized range. @return : 0, or an error code if one value is beyond authorized range */ size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams) { BOUNDCHECK(ZSTD_c_windowLog, (int)cParams.windowLog); BOUNDCHECK(ZSTD_c_chainLog, (int)cParams.chainLog); BOUNDCHECK(ZSTD_c_hashLog, (int)cParams.hashLog); BOUNDCHECK(ZSTD_c_searchLog, (int)cParams.searchLog); BOUNDCHECK(ZSTD_c_minMatch, (int)cParams.minMatch); BOUNDCHECK(ZSTD_c_targetLength,(int)cParams.targetLength); BOUNDCHECK(ZSTD_c_strategy, cParams.strategy); return 0; } /* ZSTD_clampCParams() : * make CParam values within valid range. * @return : valid CParams */ static ZSTD_compressionParameters ZSTD_clampCParams(ZSTD_compressionParameters cParams) { # define CLAMP_TYPE(cParam, val, type) { \ ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam); \ if ((int)val<bounds.lowerBound) val=(type)bounds.lowerBound; \ else if ((int)val>bounds.upperBound) val=(type)bounds.upperBound; \ } # define CLAMP(cParam, val) CLAMP_TYPE(cParam, val, unsigned) CLAMP(ZSTD_c_windowLog, cParams.windowLog); CLAMP(ZSTD_c_chainLog, cParams.chainLog); CLAMP(ZSTD_c_hashLog, cParams.hashLog); CLAMP(ZSTD_c_searchLog, cParams.searchLog); CLAMP(ZSTD_c_minMatch, cParams.minMatch); CLAMP(ZSTD_c_targetLength,cParams.targetLength); CLAMP_TYPE(ZSTD_c_strategy,cParams.strategy, ZSTD_strategy); return cParams; } /* ZSTD_cycleLog() : * condition for correct operation : hashLog > 1 */ U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat) { U32 const btScale = ((U32)strat >= (U32)ZSTD_btlazy2); return hashLog - btScale; } /* ZSTD_dictAndWindowLog() : * Returns an adjusted window log that is large enough to fit the source and the dictionary. * The zstd format says that the entire dictionary is valid if one byte of the dictionary * is within the window. So the hashLog and chainLog should be large enough to reference both * the dictionary and the window. So we must use this adjusted dictAndWindowLog when downsizing * the hashLog and windowLog. * NOTE: srcSize must not be ZSTD_CONTENTSIZE_UNKNOWN. */ static U32 ZSTD_dictAndWindowLog(U32 windowLog, U64 srcSize, U64 dictSize) { const U64 maxWindowSize = 1ULL << ZSTD_WINDOWLOG_MAX; /* No dictionary ==> No change */ if (dictSize == 0) { return windowLog; } assert(windowLog <= ZSTD_WINDOWLOG_MAX); assert(srcSize != ZSTD_CONTENTSIZE_UNKNOWN); /* Handled in ZSTD_adjustCParams_internal() */ { U64 const windowSize = 1ULL << windowLog; U64 const dictAndWindowSize = dictSize + windowSize; /* If the window size is already large enough to fit both the source and the dictionary * then just use the window size. Otherwise adjust so that it fits the dictionary and * the window. */ if (windowSize >= dictSize + srcSize) { return windowLog; /* Window size large enough already */ } else if (dictAndWindowSize >= maxWindowSize) { return ZSTD_WINDOWLOG_MAX; /* Larger than max window log */ } else { return ZSTD_highbit32((U32)dictAndWindowSize - 1) + 1; } } } /* ZSTD_adjustCParams_internal() : * optimize `cPar` for a specified input (`srcSize` and `dictSize`). * mostly downsize to reduce memory consumption and initialization latency. * `srcSize` can be ZSTD_CONTENTSIZE_UNKNOWN when not known. * `mode` is the mode for parameter adjustment. See docs for `ZSTD_cParamMode_e`. * note : `srcSize==0` means 0! * condition : cPar is presumed validated (can be checked using ZSTD_checkCParams()). */ static ZSTD_compressionParameters ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize, ZSTD_cParamMode_e mode) { const U64 minSrcSize = 513; /* (1<<9) + 1 */ const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1); assert(ZSTD_checkCParams(cPar)==0); switch (mode) { case ZSTD_cpm_unknown: case ZSTD_cpm_noAttachDict: /* If we don't know the source size, don't make any * assumptions about it. We will already have selected * smaller parameters if a dictionary is in use. */ break; case ZSTD_cpm_createCDict: /* Assume a small source size when creating a dictionary * with an unknown source size. */ if (dictSize && srcSize == ZSTD_CONTENTSIZE_UNKNOWN) srcSize = minSrcSize; break; case ZSTD_cpm_attachDict: /* Dictionary has its own dedicated parameters which have * already been selected. We are selecting parameters * for only the source. */ dictSize = 0; break; default: assert(0); break; } /* resize windowLog if input is small enough, to use less memory */ if ( (srcSize < maxWindowResize) && (dictSize < maxWindowResize) ) { U32 const tSize = (U32)(srcSize + dictSize); static U32 const hashSizeMin = 1 << ZSTD_HASHLOG_MIN; U32 const srcLog = (tSize < hashSizeMin) ? ZSTD_HASHLOG_MIN : ZSTD_highbit32(tSize-1) + 1; if (cPar.windowLog > srcLog) cPar.windowLog = srcLog; } if (srcSize != ZSTD_CONTENTSIZE_UNKNOWN) { U32 const dictAndWindowLog = ZSTD_dictAndWindowLog(cPar.windowLog, (U64)srcSize, (U64)dictSize); U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy); if (cPar.hashLog > dictAndWindowLog+1) cPar.hashLog = dictAndWindowLog+1; if (cycleLog > dictAndWindowLog) cPar.chainLog -= (cycleLog - dictAndWindowLog); } if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN) cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN; /* minimum wlog required for valid frame header */ return cPar; } ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize) { cPar = ZSTD_clampCParams(cPar); /* resulting cPar is necessarily valid (all parameters within range) */ if (srcSize == 0) srcSize = ZSTD_CONTENTSIZE_UNKNOWN; return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize, ZSTD_cpm_unknown); } static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode); static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode); static void ZSTD_overrideCParams( ZSTD_compressionParameters* cParams, const ZSTD_compressionParameters* overrides) { if (overrides->windowLog) cParams->windowLog = overrides->windowLog; if (overrides->hashLog) cParams->hashLog = overrides->hashLog; if (overrides->chainLog) cParams->chainLog = overrides->chainLog; if (overrides->searchLog) cParams->searchLog = overrides->searchLog; if (overrides->minMatch) cParams->minMatch = overrides->minMatch; if (overrides->targetLength) cParams->targetLength = overrides->targetLength; if (overrides->strategy) cParams->strategy = overrides->strategy; } ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams( const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode) { ZSTD_compressionParameters cParams; if (srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN && CCtxParams->srcSizeHint > 0) { srcSizeHint = CCtxParams->srcSizeHint; } cParams = ZSTD_getCParams_internal(CCtxParams->compressionLevel, srcSizeHint, dictSize, mode); if (CCtxParams->ldmParams.enableLdm == ZSTD_ps_enable) cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG; ZSTD_overrideCParams(&cParams, &CCtxParams->cParams); assert(!ZSTD_checkCParams(cParams)); /* srcSizeHint == 0 means 0 */ return ZSTD_adjustCParams_internal(cParams, srcSizeHint, dictSize, mode); } static size_t ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams, const ZSTD_paramSwitch_e useRowMatchFinder, const U32 enableDedicatedDictSearch, const U32 forCCtx) { /* chain table size should be 0 for fast or row-hash strategies */ size_t const chainSize = ZSTD_allocateChainTable(cParams->strategy, useRowMatchFinder, enableDedicatedDictSearch && !forCCtx) ? ((size_t)1 << cParams->chainLog) : 0; size_t const hSize = ((size_t)1) << cParams->hashLog; U32 const hashLog3 = (forCCtx && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0; size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0; /* We don't use ZSTD_cwksp_alloc_size() here because the tables aren't * surrounded by redzones in ASAN. */ size_t const tableSpace = chainSize * sizeof(U32) + hSize * sizeof(U32) + h3Size * sizeof(U32); size_t const optPotentialSpace = ZSTD_cwksp_aligned_alloc_size((MaxML+1) * sizeof(U32)) + ZSTD_cwksp_aligned_alloc_size((MaxLL+1) * sizeof(U32)) + ZSTD_cwksp_aligned_alloc_size((MaxOff+1) * sizeof(U32)) + ZSTD_cwksp_aligned_alloc_size((1<<Litbits) * sizeof(U32)) + ZSTD_cwksp_aligned_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t)) + ZSTD_cwksp_aligned_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t)); size_t const lazyAdditionalSpace = ZSTD_rowMatchFinderUsed(cParams->strategy, useRowMatchFinder) ? ZSTD_cwksp_aligned_alloc_size(hSize*sizeof(U16)) : 0; size_t const optSpace = (forCCtx && (cParams->strategy >= ZSTD_btopt)) ? optPotentialSpace : 0; size_t const slackSpace = ZSTD_cwksp_slack_space_required(); /* tables are guaranteed to be sized in multiples of 64 bytes (or 16 uint32_t) */ ZSTD_STATIC_ASSERT(ZSTD_HASHLOG_MIN >= 4 && ZSTD_WINDOWLOG_MIN >= 4 && ZSTD_CHAINLOG_MIN >= 4); assert(useRowMatchFinder != ZSTD_ps_auto); DEBUGLOG(4, "chainSize: %u - hSize: %u - h3Size: %u", (U32)chainSize, (U32)hSize, (U32)h3Size); return tableSpace + optSpace + slackSpace + lazyAdditionalSpace; } static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal( const ZSTD_compressionParameters* cParams, const ldmParams_t* ldmParams, const int isStatic, const ZSTD_paramSwitch_e useRowMatchFinder, const size_t buffInSize, const size_t buffOutSize, const U64 pledgedSrcSize) { size_t const windowSize = (size_t) BOUNDED(1ULL, 1ULL << cParams->windowLog, pledgedSrcSize); size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize); U32 const divider = (cParams->minMatch==3) ? 3 : 4; size_t const maxNbSeq = blockSize / divider; size_t const tokenSpace = ZSTD_cwksp_alloc_size(WILDCOPY_OVERLENGTH + blockSize) + ZSTD_cwksp_aligned_alloc_size(maxNbSeq * sizeof(seqDef)) + 3 * ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(BYTE)); size_t const entropySpace = ZSTD_cwksp_alloc_size(ENTROPY_WORKSPACE_SIZE); size_t const blockStateSpace = 2 * ZSTD_cwksp_alloc_size(sizeof(ZSTD_compressedBlockState_t)); size_t const matchStateSize = ZSTD_sizeof_matchState(cParams, useRowMatchFinder, /* enableDedicatedDictSearch */ 0, /* forCCtx */ 1); size_t const ldmSpace = ZSTD_ldm_getTableSize(*ldmParams); size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(*ldmParams, blockSize); size_t const ldmSeqSpace = ldmParams->enableLdm == ZSTD_ps_enable ? ZSTD_cwksp_aligned_alloc_size(maxNbLdmSeq * sizeof(rawSeq)) : 0; size_t const bufferSpace = ZSTD_cwksp_alloc_size(buffInSize) + ZSTD_cwksp_alloc_size(buffOutSize); size_t const cctxSpace = isStatic ? ZSTD_cwksp_alloc_size(sizeof(ZSTD_CCtx)) : 0; size_t const neededSpace = cctxSpace + entropySpace + blockStateSpace + ldmSpace + ldmSeqSpace + matchStateSize + tokenSpace + bufferSpace; DEBUGLOG(5, "estimate workspace : %u", (U32)neededSpace); return neededSpace; } size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params) { ZSTD_compressionParameters const cParams = ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict); ZSTD_paramSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params->useRowMatchFinder, &cParams); RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only."); /* estimateCCtxSize is for one-shot compression. So no buffers should * be needed. However, we still allocate two 0-sized buffers, which can * take space under ASAN. */ return ZSTD_estimateCCtxSize_usingCCtxParams_internal( &cParams, &params->ldmParams, 1, useRowMatchFinder, 0, 0, ZSTD_CONTENTSIZE_UNKNOWN); } size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams) { ZSTD_CCtx_params initialParams = ZSTD_makeCCtxParamsFromCParams(cParams); if (ZSTD_rowMatchFinderSupported(cParams.strategy)) { /* Pick bigger of not using and using row-based matchfinder for greedy and lazy strategies */ size_t noRowCCtxSize; size_t rowCCtxSize; initialParams.useRowMatchFinder = ZSTD_ps_disable; noRowCCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(&initialParams); initialParams.useRowMatchFinder = ZSTD_ps_enable; rowCCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(&initialParams); return MAX(noRowCCtxSize, rowCCtxSize); } else { return ZSTD_estimateCCtxSize_usingCCtxParams(&initialParams); } } static size_t ZSTD_estimateCCtxSize_internal(int compressionLevel) { int tier = 0; size_t largestSize = 0; static const unsigned long long srcSizeTiers[4] = {16 KB, 128 KB, 256 KB, ZSTD_CONTENTSIZE_UNKNOWN}; for (; tier < 4; ++tier) { /* Choose the set of cParams for a given level across all srcSizes that give the largest cctxSize */ ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, srcSizeTiers[tier], 0, ZSTD_cpm_noAttachDict); largestSize = MAX(ZSTD_estimateCCtxSize_usingCParams(cParams), largestSize); } return largestSize; } size_t ZSTD_estimateCCtxSize(int compressionLevel) { int level; size_t memBudget = 0; for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) { /* Ensure monotonically increasing memory usage as compression level increases */ size_t const newMB = ZSTD_estimateCCtxSize_internal(level); if (newMB > memBudget) memBudget = newMB; } return memBudget; } size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params) { RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only."); { ZSTD_compressionParameters const cParams = ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict); size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog); size_t const inBuffSize = (params->inBufferMode == ZSTD_bm_buffered) ? ((size_t)1 << cParams.windowLog) + blockSize : 0; size_t const outBuffSize = (params->outBufferMode == ZSTD_bm_buffered) ? ZSTD_compressBound(blockSize) + 1 : 0; ZSTD_paramSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params->useRowMatchFinder, &params->cParams); return ZSTD_estimateCCtxSize_usingCCtxParams_internal( &cParams, &params->ldmParams, 1, useRowMatchFinder, inBuffSize, outBuffSize, ZSTD_CONTENTSIZE_UNKNOWN); } } size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams) { ZSTD_CCtx_params initialParams = ZSTD_makeCCtxParamsFromCParams(cParams); if (ZSTD_rowMatchFinderSupported(cParams.strategy)) { /* Pick bigger of not using and using row-based matchfinder for greedy and lazy strategies */ size_t noRowCCtxSize; size_t rowCCtxSize; initialParams.useRowMatchFinder = ZSTD_ps_disable; noRowCCtxSize = ZSTD_estimateCStreamSize_usingCCtxParams(&initialParams); initialParams.useRowMatchFinder = ZSTD_ps_enable; rowCCtxSize = ZSTD_estimateCStreamSize_usingCCtxParams(&initialParams); return MAX(noRowCCtxSize, rowCCtxSize); } else { return ZSTD_estimateCStreamSize_usingCCtxParams(&initialParams); } } static size_t ZSTD_estimateCStreamSize_internal(int compressionLevel) { ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict); return ZSTD_estimateCStreamSize_usingCParams(cParams); } size_t ZSTD_estimateCStreamSize(int compressionLevel) { int level; size_t memBudget = 0; for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) { size_t const newMB = ZSTD_estimateCStreamSize_internal(level); if (newMB > memBudget) memBudget = newMB; } return memBudget; } /* ZSTD_getFrameProgression(): * tells how much data has been consumed (input) and produced (output) for current frame. * able to count progression inside worker threads (non-blocking mode). */ ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx) { { ZSTD_frameProgression fp; size_t const buffered = (cctx->inBuff == NULL) ? 0 : cctx->inBuffPos - cctx->inToCompress; if (buffered) assert(cctx->inBuffPos >= cctx->inToCompress); assert(buffered <= ZSTD_BLOCKSIZE_MAX); fp.ingested = cctx->consumedSrcSize + buffered; fp.consumed = cctx->consumedSrcSize; fp.produced = cctx->producedCSize; fp.flushed = cctx->producedCSize; /* simplified; some data might still be left within streaming output buffer */ fp.currentJobID = 0; fp.nbActiveWorkers = 0; return fp; } } /*! ZSTD_toFlushNow() * Only useful for multithreading scenarios currently (nbWorkers >= 1). */ size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx) { (void)cctx; return 0; /* over-simplification; could also check if context is currently running in streaming mode, and in which case, report how many bytes are left to be flushed within output buffer */ } static void ZSTD_assertEqualCParams(ZSTD_compressionParameters cParams1, ZSTD_compressionParameters cParams2) { (void)cParams1; (void)cParams2; assert(cParams1.windowLog == cParams2.windowLog); assert(cParams1.chainLog == cParams2.chainLog); assert(cParams1.hashLog == cParams2.hashLog); assert(cParams1.searchLog == cParams2.searchLog); assert(cParams1.minMatch == cParams2.minMatch); assert(cParams1.targetLength == cParams2.targetLength); assert(cParams1.strategy == cParams2.strategy); } void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs) { int i; for (i = 0; i < ZSTD_REP_NUM; ++i) bs->rep[i] = repStartValue[i]; bs->entropy.huf.repeatMode = HUF_repeat_none; bs->entropy.fse.offcode_repeatMode = FSE_repeat_none; bs->entropy.fse.matchlength_repeatMode = FSE_repeat_none; bs->entropy.fse.litlength_repeatMode = FSE_repeat_none; } /*! ZSTD_invalidateMatchState() * Invalidate all the matches in the match finder tables. * Requires nextSrc and base to be set (can be NULL). */ static void ZSTD_invalidateMatchState(ZSTD_matchState_t* ms) { ZSTD_window_clear(&ms->window); ms->nextToUpdate = ms->window.dictLimit; ms->loadedDictEnd = 0; ms->opt.litLengthSum = 0; /* force reset of btopt stats */ ms->dictMatchState = NULL; } /* * Controls, for this matchState reset, whether the tables need to be cleared / * prepared for the coming compression (ZSTDcrp_makeClean), or whether the * tables can be left unclean (ZSTDcrp_leaveDirty), because we know that a * subsequent operation will overwrite the table space anyways (e.g., copying * the matchState contents in from a CDict). */ typedef enum { ZSTDcrp_makeClean, ZSTDcrp_leaveDirty } ZSTD_compResetPolicy_e; /* * Controls, for this matchState reset, whether indexing can continue where it * left off (ZSTDirp_continue), or whether it needs to be restarted from zero * (ZSTDirp_reset). */ typedef enum { ZSTDirp_continue, ZSTDirp_reset } ZSTD_indexResetPolicy_e; typedef enum { ZSTD_resetTarget_CDict, ZSTD_resetTarget_CCtx } ZSTD_resetTarget_e; static size_t ZSTD_reset_matchState(ZSTD_matchState_t* ms, ZSTD_cwksp* ws, const ZSTD_compressionParameters* cParams, const ZSTD_paramSwitch_e useRowMatchFinder, const ZSTD_compResetPolicy_e crp, const ZSTD_indexResetPolicy_e forceResetIndex, const ZSTD_resetTarget_e forWho) { /* disable chain table allocation for fast or row-based strategies */ size_t const chainSize = ZSTD_allocateChainTable(cParams->strategy, useRowMatchFinder, ms->dedicatedDictSearch && (forWho == ZSTD_resetTarget_CDict)) ? ((size_t)1 << cParams->chainLog) : 0; size_t const hSize = ((size_t)1) << cParams->hashLog; U32 const hashLog3 = ((forWho == ZSTD_resetTarget_CCtx) && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0; size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0; DEBUGLOG(4, "reset indices : %u", forceResetIndex == ZSTDirp_reset); assert(useRowMatchFinder != ZSTD_ps_auto); if (forceResetIndex == ZSTDirp_reset) { ZSTD_window_init(&ms->window); ZSTD_cwksp_mark_tables_dirty(ws); } ms->hashLog3 = hashLog3; ZSTD_invalidateMatchState(ms); assert(!ZSTD_cwksp_reserve_failed(ws)); /* check that allocation hasn't already failed */ ZSTD_cwksp_clear_tables(ws); DEBUGLOG(5, "reserving table space"); /* table Space */ ms->hashTable = (U32*)ZSTD_cwksp_reserve_table(ws, hSize * sizeof(U32)); ms->chainTable = (U32*)ZSTD_cwksp_reserve_table(ws, chainSize * sizeof(U32)); ms->hashTable3 = (U32*)ZSTD_cwksp_reserve_table(ws, h3Size * sizeof(U32)); RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation, "failed a workspace allocation in ZSTD_reset_matchState"); DEBUGLOG(4, "reset table : %u", crp!=ZSTDcrp_leaveDirty); if (crp!=ZSTDcrp_leaveDirty) { /* reset tables only */ ZSTD_cwksp_clean_tables(ws); } /* opt parser space */ if ((forWho == ZSTD_resetTarget_CCtx) && (cParams->strategy >= ZSTD_btopt)) { DEBUGLOG(4, "reserving optimal parser space"); ms->opt.litFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (1<<Litbits) * sizeof(unsigned)); ms->opt.litLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxLL+1) * sizeof(unsigned)); ms->opt.matchLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxML+1) * sizeof(unsigned)); ms->opt.offCodeFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxOff+1) * sizeof(unsigned)); ms->opt.matchTable = (ZSTD_match_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t)); ms->opt.priceTable = (ZSTD_optimal_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t)); } if (ZSTD_rowMatchFinderUsed(cParams->strategy, useRowMatchFinder)) { { /* Row match finder needs an additional table of hashes ("tags") */ size_t const tagTableSize = hSize*sizeof(U16); ms->tagTable = (U16*)ZSTD_cwksp_reserve_aligned(ws, tagTableSize); if (ms->tagTable) ZSTD_memset(ms->tagTable, 0, tagTableSize); } { /* Switch to 32-entry rows if searchLog is 5 (or more) */ U32 const rowLog = BOUNDED(4, cParams->searchLog, 6); assert(cParams->hashLog >= rowLog); ms->rowHashLog = cParams->hashLog - rowLog; } } ms->cParams = *cParams; RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation, "failed a workspace allocation in ZSTD_reset_matchState"); return 0; } /* ZSTD_indexTooCloseToMax() : * minor optimization : prefer memset() rather than reduceIndex() * which is measurably slow in some circumstances (reported for Visual Studio). * Works when re-using a context for a lot of smallish inputs : * if all inputs are smaller than ZSTD_INDEXOVERFLOW_MARGIN, * memset() will be triggered before reduceIndex(). */ #define ZSTD_INDEXOVERFLOW_MARGIN (16 MB) static int ZSTD_indexTooCloseToMax(ZSTD_window_t w) { return (size_t)(w.nextSrc - w.base) > (ZSTD_CURRENT_MAX - ZSTD_INDEXOVERFLOW_MARGIN); } /* ZSTD_dictTooBig(): * When dictionaries are larger than ZSTD_CHUNKSIZE_MAX they can't be loaded in * one go generically. So we ensure that in that case we reset the tables to zero, * so that we can load as much of the dictionary as possible. */ static int ZSTD_dictTooBig(size_t const loadedDictSize) { return loadedDictSize > ZSTD_CHUNKSIZE_MAX; } /*! ZSTD_resetCCtx_internal() : * @param loadedDictSize The size of the dictionary to be loaded * into the context, if any. If no dictionary is used, or the * dictionary is being attached / copied, then pass 0. * note : `params` are assumed fully validated at this stage. */ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, ZSTD_CCtx_params const* params, U64 const pledgedSrcSize, size_t const loadedDictSize, ZSTD_compResetPolicy_e const crp, ZSTD_buffered_policy_e const zbuff) { ZSTD_cwksp* const ws = &zc->workspace; DEBUGLOG(4, "ZSTD_resetCCtx_internal: pledgedSrcSize=%u, wlog=%u, useRowMatchFinder=%d useBlockSplitter=%d", (U32)pledgedSrcSize, params->cParams.windowLog, (int)params->useRowMatchFinder, (int)params->useBlockSplitter); assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams))); zc->isFirstBlock = 1; /* Set applied params early so we can modify them for LDM, * and point params at the applied params. */ zc->appliedParams = *params; params = &zc->appliedParams; assert(params->useRowMatchFinder != ZSTD_ps_auto); assert(params->useBlockSplitter != ZSTD_ps_auto); assert(params->ldmParams.enableLdm != ZSTD_ps_auto); if (params->ldmParams.enableLdm == ZSTD_ps_enable) { /* Adjust long distance matching parameters */ ZSTD_ldm_adjustParameters(&zc->appliedParams.ldmParams, &params->cParams); assert(params->ldmParams.hashLog >= params->ldmParams.bucketSizeLog); assert(params->ldmParams.hashRateLog < 32); } { size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params->cParams.windowLog), pledgedSrcSize)); size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize); U32 const divider = (params->cParams.minMatch==3) ? 3 : 4; size_t const maxNbSeq = blockSize / divider; size_t const buffOutSize = (zbuff == ZSTDb_buffered && params->outBufferMode == ZSTD_bm_buffered) ? ZSTD_compressBound(blockSize) + 1 : 0; size_t const buffInSize = (zbuff == ZSTDb_buffered && params->inBufferMode == ZSTD_bm_buffered) ? windowSize + blockSize : 0; size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(params->ldmParams, blockSize); int const indexTooClose = ZSTD_indexTooCloseToMax(zc->blockState.matchState.window); int const dictTooBig = ZSTD_dictTooBig(loadedDictSize); ZSTD_indexResetPolicy_e needsIndexReset = (indexTooClose || dictTooBig || !zc->initialized) ? ZSTDirp_reset : ZSTDirp_continue; size_t const neededSpace = ZSTD_estimateCCtxSize_usingCCtxParams_internal( &params->cParams, &params->ldmParams, zc->staticSize != 0, params->useRowMatchFinder, buffInSize, buffOutSize, pledgedSrcSize); int resizeWorkspace; FORWARD_IF_ERROR(neededSpace, "cctx size estimate failed!"); if (!zc->staticSize) ZSTD_cwksp_bump_oversized_duration(ws, 0); { /* Check if workspace is large enough, alloc a new one if needed */ int const workspaceTooSmall = ZSTD_cwksp_sizeof(ws) < neededSpace; int const workspaceWasteful = ZSTD_cwksp_check_wasteful(ws, neededSpace); resizeWorkspace = workspaceTooSmall || workspaceWasteful; DEBUGLOG(4, "Need %zu B workspace", neededSpace); DEBUGLOG(4, "windowSize: %zu - blockSize: %zu", windowSize, blockSize); if (resizeWorkspace) { DEBUGLOG(4, "Resize workspaceSize from %zuKB to %zuKB", ZSTD_cwksp_sizeof(ws) >> 10, neededSpace >> 10); RETURN_ERROR_IF(zc->staticSize, memory_allocation, "static cctx : no resize"); needsIndexReset = ZSTDirp_reset; ZSTD_cwksp_free(ws, zc->customMem); FORWARD_IF_ERROR(ZSTD_cwksp_create(ws, neededSpace, zc->customMem), ""); DEBUGLOG(5, "reserving object space"); /* Statically sized space. * entropyWorkspace never moves, * though prev/next block swap places */ assert(ZSTD_cwksp_check_available(ws, 2 * sizeof(ZSTD_compressedBlockState_t))); zc->blockState.prevCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t)); RETURN_ERROR_IF(zc->blockState.prevCBlock == NULL, memory_allocation, "couldn't allocate prevCBlock"); zc->blockState.nextCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t)); RETURN_ERROR_IF(zc->blockState.nextCBlock == NULL, memory_allocation, "couldn't allocate nextCBlock"); zc->entropyWorkspace = (U32*) ZSTD_cwksp_reserve_object(ws, ENTROPY_WORKSPACE_SIZE); RETURN_ERROR_IF(zc->entropyWorkspace == NULL, memory_allocation, "couldn't allocate entropyWorkspace"); } } ZSTD_cwksp_clear(ws); /* init params */ zc->blockState.matchState.cParams = params->cParams; zc->pledgedSrcSizePlusOne = pledgedSrcSize+1; zc->consumedSrcSize = 0; zc->producedCSize = 0; if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN) zc->appliedParams.fParams.contentSizeFlag = 0; DEBUGLOG(4, "pledged content size : %u ; flag : %u", (unsigned)pledgedSrcSize, zc->appliedParams.fParams.contentSizeFlag); zc->blockSize = blockSize; xxh64_reset(&zc->xxhState, 0); zc->stage = ZSTDcs_init; zc->dictID = 0; zc->dictContentSize = 0; ZSTD_reset_compressedBlockState(zc->blockState.prevCBlock); /* ZSTD_wildcopy() is used to copy into the literals buffer, * so we have to oversize the buffer by WILDCOPY_OVERLENGTH bytes. */ zc->seqStore.litStart = ZSTD_cwksp_reserve_buffer(ws, blockSize + WILDCOPY_OVERLENGTH); zc->seqStore.maxNbLit = blockSize; /* buffers */ zc->bufferedPolicy = zbuff; zc->inBuffSize = buffInSize; zc->inBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffInSize); zc->outBuffSize = buffOutSize; zc->outBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffOutSize); /* ldm bucketOffsets table */ if (params->ldmParams.enableLdm == ZSTD_ps_enable) { /* TODO: avoid memset? */ size_t const numBuckets = ((size_t)1) << (params->ldmParams.hashLog - params->ldmParams.bucketSizeLog); zc->ldmState.bucketOffsets = ZSTD_cwksp_reserve_buffer(ws, numBuckets); ZSTD_memset(zc->ldmState.bucketOffsets, 0, numBuckets); } /* sequences storage */ ZSTD_referenceExternalSequences(zc, NULL, 0); zc->seqStore.maxNbSeq = maxNbSeq; zc->seqStore.llCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE)); zc->seqStore.mlCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE)); zc->seqStore.ofCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE)); zc->seqStore.sequencesStart = (seqDef*)ZSTD_cwksp_reserve_aligned(ws, maxNbSeq * sizeof(seqDef)); FORWARD_IF_ERROR(ZSTD_reset_matchState( &zc->blockState.matchState, ws, &params->cParams, params->useRowMatchFinder, crp, needsIndexReset, ZSTD_resetTarget_CCtx), ""); /* ldm hash table */ if (params->ldmParams.enableLdm == ZSTD_ps_enable) { /* TODO: avoid memset? */ size_t const ldmHSize = ((size_t)1) << params->ldmParams.hashLog; zc->ldmState.hashTable = (ldmEntry_t*)ZSTD_cwksp_reserve_aligned(ws, ldmHSize * sizeof(ldmEntry_t)); ZSTD_memset(zc->ldmState.hashTable, 0, ldmHSize * sizeof(ldmEntry_t)); zc->ldmSequences = (rawSeq*)ZSTD_cwksp_reserve_aligned(ws, maxNbLdmSeq * sizeof(rawSeq)); zc->maxNbLdmSequences = maxNbLdmSeq; ZSTD_window_init(&zc->ldmState.window); zc->ldmState.loadedDictEnd = 0; } DEBUGLOG(3, "wksp: finished allocating, %zd bytes remain available", ZSTD_cwksp_available_space(ws)); assert(ZSTD_cwksp_estimated_space_within_bounds(ws, neededSpace, resizeWorkspace)); zc->initialized = 1; return 0; } } /* ZSTD_invalidateRepCodes() : * ensures next compression will not use repcodes from previous block. * Note : only works with regular variant; * do not use with extDict variant ! */ void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx) { int i; for (i=0; i<ZSTD_REP_NUM; i++) cctx->blockState.prevCBlock->rep[i] = 0; assert(!ZSTD_window_hasExtDict(cctx->blockState.matchState.window)); } /* These are the approximate sizes for each strategy past which copying the * dictionary tables into the working context is faster than using them * in-place. */ static const size_t attachDictSizeCutoffs[ZSTD_STRATEGY_MAX+1] = { 8 KB, /* unused */ 8 KB, /* ZSTD_fast */ 16 KB, /* ZSTD_dfast */ 32 KB, /* ZSTD_greedy */ 32 KB, /* ZSTD_lazy */ 32 KB, /* ZSTD_lazy2 */ 32 KB, /* ZSTD_btlazy2 */ 32 KB, /* ZSTD_btopt */ 8 KB, /* ZSTD_btultra */ 8 KB /* ZSTD_btultra2 */ }; static int ZSTD_shouldAttachDict(const ZSTD_CDict* cdict, const ZSTD_CCtx_params* params, U64 pledgedSrcSize) { size_t cutoff = attachDictSizeCutoffs[cdict->matchState.cParams.strategy]; int const dedicatedDictSearch = cdict->matchState.dedicatedDictSearch; return dedicatedDictSearch || ( ( pledgedSrcSize <= cutoff || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN || params->attachDictPref == ZSTD_dictForceAttach ) && params->attachDictPref != ZSTD_dictForceCopy && !params->forceWindow ); /* dictMatchState isn't correctly * handled in _enforceMaxDist */ } static size_t ZSTD_resetCCtx_byAttachingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict, ZSTD_CCtx_params params, U64 pledgedSrcSize, ZSTD_buffered_policy_e zbuff) { DEBUGLOG(4, "ZSTD_resetCCtx_byAttachingCDict() pledgedSrcSize=%llu", (unsigned long long)pledgedSrcSize); { ZSTD_compressionParameters adjusted_cdict_cParams = cdict->matchState.cParams; unsigned const windowLog = params.cParams.windowLog; assert(windowLog != 0); /* Resize working context table params for input only, since the dict * has its own tables. */ /* pledgedSrcSize == 0 means 0! */ if (cdict->matchState.dedicatedDictSearch) { ZSTD_dedicatedDictSearch_revertCParams(&adjusted_cdict_cParams); } params.cParams = ZSTD_adjustCParams_internal(adjusted_cdict_cParams, pledgedSrcSize, cdict->dictContentSize, ZSTD_cpm_attachDict); params.cParams.windowLog = windowLog; params.useRowMatchFinder = cdict->useRowMatchFinder; /* cdict overrides */ FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, &params, pledgedSrcSize, /* loadedDictSize */ 0, ZSTDcrp_makeClean, zbuff), ""); assert(cctx->appliedParams.cParams.strategy == adjusted_cdict_cParams.strategy); } { const U32 cdictEnd = (U32)( cdict->matchState.window.nextSrc - cdict->matchState.window.base); const U32 cdictLen = cdictEnd - cdict->matchState.window.dictLimit; if (cdictLen == 0) { /* don't even attach dictionaries with no contents */ DEBUGLOG(4, "skipping attaching empty dictionary"); } else { DEBUGLOG(4, "attaching dictionary into context"); cctx->blockState.matchState.dictMatchState = &cdict->matchState; /* prep working match state so dict matches never have negative indices * when they are translated to the working context's index space. */ if (cctx->blockState.matchState.window.dictLimit < cdictEnd) { cctx->blockState.matchState.window.nextSrc = cctx->blockState.matchState.window.base + cdictEnd; ZSTD_window_clear(&cctx->blockState.matchState.window); } /* loadedDictEnd is expressed within the referential of the active context */ cctx->blockState.matchState.loadedDictEnd = cctx->blockState.matchState.window.dictLimit; } } cctx->dictID = cdict->dictID; cctx->dictContentSize = cdict->dictContentSize; /* copy block state */ ZSTD_memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState)); return 0; } static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict, ZSTD_CCtx_params params, U64 pledgedSrcSize, ZSTD_buffered_policy_e zbuff) { const ZSTD_compressionParameters *cdict_cParams = &cdict->matchState.cParams; assert(!cdict->matchState.dedicatedDictSearch); DEBUGLOG(4, "ZSTD_resetCCtx_byCopyingCDict() pledgedSrcSize=%llu", (unsigned long long)pledgedSrcSize); { unsigned const windowLog = params.cParams.windowLog; assert(windowLog != 0); /* Copy only compression parameters related to tables. */ params.cParams = *cdict_cParams; params.cParams.windowLog = windowLog; params.useRowMatchFinder = cdict->useRowMatchFinder; FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, &params, pledgedSrcSize, /* loadedDictSize */ 0, ZSTDcrp_leaveDirty, zbuff), ""); assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy); assert(cctx->appliedParams.cParams.hashLog == cdict_cParams->hashLog); assert(cctx->appliedParams.cParams.chainLog == cdict_cParams->chainLog); } ZSTD_cwksp_mark_tables_dirty(&cctx->workspace); assert(params.useRowMatchFinder != ZSTD_ps_auto); /* copy tables */ { size_t const chainSize = ZSTD_allocateChainTable(cdict_cParams->strategy, cdict->useRowMatchFinder, 0 /* DDS guaranteed disabled */) ? ((size_t)1 << cdict_cParams->chainLog) : 0; size_t const hSize = (size_t)1 << cdict_cParams->hashLog; ZSTD_memcpy(cctx->blockState.matchState.hashTable, cdict->matchState.hashTable, hSize * sizeof(U32)); /* Do not copy cdict's chainTable if cctx has parameters such that it would not use chainTable */ if (ZSTD_allocateChainTable(cctx->appliedParams.cParams.strategy, cctx->appliedParams.useRowMatchFinder, 0 /* forDDSDict */)) { ZSTD_memcpy(cctx->blockState.matchState.chainTable, cdict->matchState.chainTable, chainSize * sizeof(U32)); } /* copy tag table */ if (ZSTD_rowMatchFinderUsed(cdict_cParams->strategy, cdict->useRowMatchFinder)) { size_t const tagTableSize = hSize*sizeof(U16); ZSTD_memcpy(cctx->blockState.matchState.tagTable, cdict->matchState.tagTable, tagTableSize); } } /* Zero the hashTable3, since the cdict never fills it */ { int const h3log = cctx->blockState.matchState.hashLog3; size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0; assert(cdict->matchState.hashLog3 == 0); ZSTD_memset(cctx->blockState.matchState.hashTable3, 0, h3Size * sizeof(U32)); } ZSTD_cwksp_mark_tables_clean(&cctx->workspace); /* copy dictionary offsets */ { ZSTD_matchState_t const* srcMatchState = &cdict->matchState; ZSTD_matchState_t* dstMatchState = &cctx->blockState.matchState; dstMatchState->window = srcMatchState->window; dstMatchState->nextToUpdate = srcMatchState->nextToUpdate; dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd; } cctx->dictID = cdict->dictID; cctx->dictContentSize = cdict->dictContentSize; /* copy block state */ ZSTD_memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState)); return 0; } /* We have a choice between copying the dictionary context into the working * context, or referencing the dictionary context from the working context * in-place. We decide here which strategy to use. */ static size_t ZSTD_resetCCtx_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict, const ZSTD_CCtx_params* params, U64 pledgedSrcSize, ZSTD_buffered_policy_e zbuff) { DEBUGLOG(4, "ZSTD_resetCCtx_usingCDict (pledgedSrcSize=%u)", (unsigned)pledgedSrcSize); if (ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize)) { return ZSTD_resetCCtx_byAttachingCDict( cctx, cdict, *params, pledgedSrcSize, zbuff); } else { return ZSTD_resetCCtx_byCopyingCDict( cctx, cdict, *params, pledgedSrcSize, zbuff); } } /*! ZSTD_copyCCtx_internal() : * Duplicate an existing context `srcCCtx` into another one `dstCCtx`. * Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()). * The "context", in this case, refers to the hash and chain tables, * entropy tables, and dictionary references. * `windowLog` value is enforced if != 0, otherwise value is copied from srcCCtx. * @return : 0, or an error code */ static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, ZSTD_frameParameters fParams, U64 pledgedSrcSize, ZSTD_buffered_policy_e zbuff) { RETURN_ERROR_IF(srcCCtx->stage!=ZSTDcs_init, stage_wrong, "Can't copy a ctx that's not in init stage."); DEBUGLOG(5, "ZSTD_copyCCtx_internal"); ZSTD_memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem)); { ZSTD_CCtx_params params = dstCCtx->requestedParams; /* Copy only compression parameters related to tables. */ params.cParams = srcCCtx->appliedParams.cParams; assert(srcCCtx->appliedParams.useRowMatchFinder != ZSTD_ps_auto); assert(srcCCtx->appliedParams.useBlockSplitter != ZSTD_ps_auto); assert(srcCCtx->appliedParams.ldmParams.enableLdm != ZSTD_ps_auto); params.useRowMatchFinder = srcCCtx->appliedParams.useRowMatchFinder; params.useBlockSplitter = srcCCtx->appliedParams.useBlockSplitter; params.ldmParams = srcCCtx->appliedParams.ldmParams; params.fParams = fParams; ZSTD_resetCCtx_internal(dstCCtx, &params, pledgedSrcSize, /* loadedDictSize */ 0, ZSTDcrp_leaveDirty, zbuff); assert(dstCCtx->appliedParams.cParams.windowLog == srcCCtx->appliedParams.cParams.windowLog); assert(dstCCtx->appliedParams.cParams.strategy == srcCCtx->appliedParams.cParams.strategy); assert(dstCCtx->appliedParams.cParams.hashLog == srcCCtx->appliedParams.cParams.hashLog); assert(dstCCtx->appliedParams.cParams.chainLog == srcCCtx->appliedParams.cParams.chainLog); assert(dstCCtx->blockState.matchState.hashLog3 == srcCCtx->blockState.matchState.hashLog3); } ZSTD_cwksp_mark_tables_dirty(&dstCCtx->workspace); /* copy tables */ { size_t const chainSize = ZSTD_allocateChainTable(srcCCtx->appliedParams.cParams.strategy, srcCCtx->appliedParams.useRowMatchFinder, 0 /* forDDSDict */) ? ((size_t)1 << srcCCtx->appliedParams.cParams.chainLog) : 0; size_t const hSize = (size_t)1 << srcCCtx->appliedParams.cParams.hashLog; int const h3log = srcCCtx->blockState.matchState.hashLog3; size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0; ZSTD_memcpy(dstCCtx->blockState.matchState.hashTable, srcCCtx->blockState.matchState.hashTable, hSize * sizeof(U32)); ZSTD_memcpy(dstCCtx->blockState.matchState.chainTable, srcCCtx->blockState.matchState.chainTable, chainSize * sizeof(U32)); ZSTD_memcpy(dstCCtx->blockState.matchState.hashTable3, srcCCtx->blockState.matchState.hashTable3, h3Size * sizeof(U32)); } ZSTD_cwksp_mark_tables_clean(&dstCCtx->workspace); /* copy dictionary offsets */ { const ZSTD_matchState_t* srcMatchState = &srcCCtx->blockState.matchState; ZSTD_matchState_t* dstMatchState = &dstCCtx->blockState.matchState; dstMatchState->window = srcMatchState->window; dstMatchState->nextToUpdate = srcMatchState->nextToUpdate; dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd; } dstCCtx->dictID = srcCCtx->dictID; dstCCtx->dictContentSize = srcCCtx->dictContentSize; /* copy block state */ ZSTD_memcpy(dstCCtx->blockState.prevCBlock, srcCCtx->blockState.prevCBlock, sizeof(*srcCCtx->blockState.prevCBlock)); return 0; } /*! ZSTD_copyCCtx() : * Duplicate an existing context `srcCCtx` into another one `dstCCtx`. * Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()). * pledgedSrcSize==0 means "unknown". * @return : 0, or an error code */ size_t ZSTD_copyCCtx(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, unsigned long long pledgedSrcSize) { ZSTD_frameParameters fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ }; ZSTD_buffered_policy_e const zbuff = srcCCtx->bufferedPolicy; ZSTD_STATIC_ASSERT((U32)ZSTDb_buffered==1); if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN; fParams.contentSizeFlag = (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN); return ZSTD_copyCCtx_internal(dstCCtx, srcCCtx, fParams, pledgedSrcSize, zbuff); } #define ZSTD_ROWSIZE 16 /*! ZSTD_reduceTable() : * reduce table indexes by `reducerValue`, or squash to zero. * PreserveMark preserves "unsorted mark" for btlazy2 strategy. * It must be set to a clear 0/1 value, to remove branch during inlining. * Presume table size is a multiple of ZSTD_ROWSIZE * to help auto-vectorization */ FORCE_INLINE_TEMPLATE void ZSTD_reduceTable_internal (U32* const table, U32 const size, U32 const reducerValue, int const preserveMark) { int const nbRows = (int)size / ZSTD_ROWSIZE; int cellNb = 0; int rowNb; /* Protect special index values < ZSTD_WINDOW_START_INDEX. */ U32 const reducerThreshold = reducerValue + ZSTD_WINDOW_START_INDEX; assert((size & (ZSTD_ROWSIZE-1)) == 0); /* multiple of ZSTD_ROWSIZE */ assert(size < (1U<<31)); /* can be casted to int */ for (rowNb=0 ; rowNb < nbRows ; rowNb++) { int column; for (column=0; column<ZSTD_ROWSIZE; column++) { U32 newVal; if (preserveMark && table[cellNb] == ZSTD_DUBT_UNSORTED_MARK) { /* This write is pointless, but is required(?) for the compiler * to auto-vectorize the loop. */ newVal = ZSTD_DUBT_UNSORTED_MARK; } else if (table[cellNb] < reducerThreshold) { newVal = 0; } else { newVal = table[cellNb] - reducerValue; } table[cellNb] = newVal; cellNb++; } } } static void ZSTD_reduceTable(U32* const table, U32 const size, U32 const reducerValue) { ZSTD_reduceTable_internal(table, size, reducerValue, 0); } static void ZSTD_reduceTable_btlazy2(U32* const table, U32 const size, U32 const reducerValue) { ZSTD_reduceTable_internal(table, size, reducerValue, 1); } /*! ZSTD_reduceIndex() : * rescale all indexes to avoid future overflow (indexes are U32) */ static void ZSTD_reduceIndex (ZSTD_matchState_t* ms, ZSTD_CCtx_params const* params, const U32 reducerValue) { { U32 const hSize = (U32)1 << params->cParams.hashLog; ZSTD_reduceTable(ms->hashTable, hSize, reducerValue); } if (ZSTD_allocateChainTable(params->cParams.strategy, params->useRowMatchFinder, (U32)ms->dedicatedDictSearch)) { U32 const chainSize = (U32)1 << params->cParams.chainLog; if (params->cParams.strategy == ZSTD_btlazy2) ZSTD_reduceTable_btlazy2(ms->chainTable, chainSize, reducerValue); else ZSTD_reduceTable(ms->chainTable, chainSize, reducerValue); } if (ms->hashLog3) { U32 const h3Size = (U32)1 << ms->hashLog3; ZSTD_reduceTable(ms->hashTable3, h3Size, reducerValue); } } /*-******************************************************* * Block entropic compression *********************************************************/ /* See doc/zstd_compression_format.md for detailed format description */ void ZSTD_seqToCodes(const seqStore_t* seqStorePtr) { const seqDef* const sequences = seqStorePtr->sequencesStart; BYTE* const llCodeTable = seqStorePtr->llCode; BYTE* const ofCodeTable = seqStorePtr->ofCode; BYTE* const mlCodeTable = seqStorePtr->mlCode; U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); U32 u; assert(nbSeq <= seqStorePtr->maxNbSeq); for (u=0; u<nbSeq; u++) { U32 const llv = sequences[u].litLength; U32 const mlv = sequences[u].mlBase; llCodeTable[u] = (BYTE)ZSTD_LLcode(llv); ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].offBase); mlCodeTable[u] = (BYTE)ZSTD_MLcode(mlv); } if (seqStorePtr->longLengthType==ZSTD_llt_literalLength) llCodeTable[seqStorePtr->longLengthPos] = MaxLL; if (seqStorePtr->longLengthType==ZSTD_llt_matchLength) mlCodeTable[seqStorePtr->longLengthPos] = MaxML; } /* ZSTD_useTargetCBlockSize(): * Returns if target compressed block size param is being used. * If used, compression will do best effort to make a compressed block size to be around targetCBlockSize. * Returns 1 if true, 0 otherwise. */ static int ZSTD_useTargetCBlockSize(const ZSTD_CCtx_params* cctxParams) { DEBUGLOG(5, "ZSTD_useTargetCBlockSize (targetCBlockSize=%zu)", cctxParams->targetCBlockSize); return (cctxParams->targetCBlockSize != 0); } /* ZSTD_blockSplitterEnabled(): * Returns if block splitting param is being used * If used, compression will do best effort to split a block in order to improve compression ratio. * At the time this function is called, the parameter must be finalized. * Returns 1 if true, 0 otherwise. */ static int ZSTD_blockSplitterEnabled(ZSTD_CCtx_params* cctxParams) { DEBUGLOG(5, "ZSTD_blockSplitterEnabled (useBlockSplitter=%d)", cctxParams->useBlockSplitter); assert(cctxParams->useBlockSplitter != ZSTD_ps_auto); return (cctxParams->useBlockSplitter == ZSTD_ps_enable); } /* Type returned by ZSTD_buildSequencesStatistics containing finalized symbol encoding types * and size of the sequences statistics */ typedef struct { U32 LLtype; U32 Offtype; U32 MLtype; size_t size; size_t lastCountSize; /* Accounts for bug in 1.3.4. More detail in ZSTD_entropyCompressSeqStore_internal() */ } ZSTD_symbolEncodingTypeStats_t; /* ZSTD_buildSequencesStatistics(): * Returns a ZSTD_symbolEncodingTypeStats_t, or a zstd error code in the `size` field. * Modifies `nextEntropy` to have the appropriate values as a side effect. * nbSeq must be greater than 0. * * entropyWkspSize must be of size at least ENTROPY_WORKSPACE_SIZE - (MaxSeq + 1)*sizeof(U32) */ static ZSTD_symbolEncodingTypeStats_t ZSTD_buildSequencesStatistics(seqStore_t* seqStorePtr, size_t nbSeq, const ZSTD_fseCTables_t* prevEntropy, ZSTD_fseCTables_t* nextEntropy, BYTE* dst, const BYTE* const dstEnd, ZSTD_strategy strategy, unsigned* countWorkspace, void* entropyWorkspace, size_t entropyWkspSize) { BYTE* const ostart = dst; const BYTE* const oend = dstEnd; BYTE* op = ostart; FSE_CTable* CTable_LitLength = nextEntropy->litlengthCTable; FSE_CTable* CTable_OffsetBits = nextEntropy->offcodeCTable; FSE_CTable* CTable_MatchLength = nextEntropy->matchlengthCTable; const BYTE* const ofCodeTable = seqStorePtr->ofCode; const BYTE* const llCodeTable = seqStorePtr->llCode; const BYTE* const mlCodeTable = seqStorePtr->mlCode; ZSTD_symbolEncodingTypeStats_t stats; stats.lastCountSize = 0; /* convert length/distances into codes */ ZSTD_seqToCodes(seqStorePtr); assert(op <= oend); assert(nbSeq != 0); /* ZSTD_selectEncodingType() divides by nbSeq */ /* build CTable for Literal Lengths */ { unsigned max = MaxLL; size_t const mostFrequent = HIST_countFast_wksp(countWorkspace, &max, llCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */ DEBUGLOG(5, "Building LL table"); nextEntropy->litlength_repeatMode = prevEntropy->litlength_repeatMode; stats.LLtype = ZSTD_selectEncodingType(&nextEntropy->litlength_repeatMode, countWorkspace, max, mostFrequent, nbSeq, LLFSELog, prevEntropy->litlengthCTable, LL_defaultNorm, LL_defaultNormLog, ZSTD_defaultAllowed, strategy); assert(set_basic < set_compressed && set_rle < set_compressed); assert(!(stats.LLtype < set_compressed && nextEntropy->litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */ { size_t const countSize = ZSTD_buildCTable( op, (size_t)(oend - op), CTable_LitLength, LLFSELog, (symbolEncodingType_e)stats.LLtype, countWorkspace, max, llCodeTable, nbSeq, LL_defaultNorm, LL_defaultNormLog, MaxLL, prevEntropy->litlengthCTable, sizeof(prevEntropy->litlengthCTable), entropyWorkspace, entropyWkspSize); if (ZSTD_isError(countSize)) { DEBUGLOG(3, "ZSTD_buildCTable for LitLens failed"); stats.size = countSize; return stats; } if (stats.LLtype == set_compressed) stats.lastCountSize = countSize; op += countSize; assert(op <= oend); } } /* build CTable for Offsets */ { unsigned max = MaxOff; size_t const mostFrequent = HIST_countFast_wksp( countWorkspace, &max, ofCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */ /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */ ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed; DEBUGLOG(5, "Building OF table"); nextEntropy->offcode_repeatMode = prevEntropy->offcode_repeatMode; stats.Offtype = ZSTD_selectEncodingType(&nextEntropy->offcode_repeatMode, countWorkspace, max, mostFrequent, nbSeq, OffFSELog, prevEntropy->offcodeCTable, OF_defaultNorm, OF_defaultNormLog, defaultPolicy, strategy); assert(!(stats.Offtype < set_compressed && nextEntropy->offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */ { size_t const countSize = ZSTD_buildCTable( op, (size_t)(oend - op), CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)stats.Offtype, countWorkspace, max, ofCodeTable, nbSeq, OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff, prevEntropy->offcodeCTable, sizeof(prevEntropy->offcodeCTable), entropyWorkspace, entropyWkspSize); if (ZSTD_isError(countSize)) { DEBUGLOG(3, "ZSTD_buildCTable for Offsets failed"); stats.size = countSize; return stats; } if (stats.Offtype == set_compressed) stats.lastCountSize = countSize; op += countSize; assert(op <= oend); } } /* build CTable for MatchLengths */ { unsigned max = MaxML; size_t const mostFrequent = HIST_countFast_wksp( countWorkspace, &max, mlCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */ DEBUGLOG(5, "Building ML table (remaining space : %i)", (int)(oend-op)); nextEntropy->matchlength_repeatMode = prevEntropy->matchlength_repeatMode; stats.MLtype = ZSTD_selectEncodingType(&nextEntropy->matchlength_repeatMode, countWorkspace, max, mostFrequent, nbSeq, MLFSELog, prevEntropy->matchlengthCTable, ML_defaultNorm, ML_defaultNormLog, ZSTD_defaultAllowed, strategy); assert(!(stats.MLtype < set_compressed && nextEntropy->matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */ { size_t const countSize = ZSTD_buildCTable( op, (size_t)(oend - op), CTable_MatchLength, MLFSELog, (symbolEncodingType_e)stats.MLtype, countWorkspace, max, mlCodeTable, nbSeq, ML_defaultNorm, ML_defaultNormLog, MaxML, prevEntropy->matchlengthCTable, sizeof(prevEntropy->matchlengthCTable), entropyWorkspace, entropyWkspSize); if (ZSTD_isError(countSize)) { DEBUGLOG(3, "ZSTD_buildCTable for MatchLengths failed"); stats.size = countSize; return stats; } if (stats.MLtype == set_compressed) stats.lastCountSize = countSize; op += countSize; assert(op <= oend); } } stats.size = (size_t)(op-ostart); return stats; } /* ZSTD_entropyCompressSeqStore_internal(): * compresses both literals and sequences * Returns compressed size of block, or a zstd error. */ #define SUSPECT_UNCOMPRESSIBLE_LITERAL_RATIO 20 MEM_STATIC size_t ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr, const ZSTD_entropyCTables_t* prevEntropy, ZSTD_entropyCTables_t* nextEntropy, const ZSTD_CCtx_params* cctxParams, void* dst, size_t dstCapacity, void* entropyWorkspace, size_t entropyWkspSize, const int bmi2) { const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN; ZSTD_strategy const strategy = cctxParams->cParams.strategy; unsigned* count = (unsigned*)entropyWorkspace; FSE_CTable* CTable_LitLength = nextEntropy->fse.litlengthCTable; FSE_CTable* CTable_OffsetBits = nextEntropy->fse.offcodeCTable; FSE_CTable* CTable_MatchLength = nextEntropy->fse.matchlengthCTable; const seqDef* const sequences = seqStorePtr->sequencesStart; const size_t nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart; const BYTE* const ofCodeTable = seqStorePtr->ofCode; const BYTE* const llCodeTable = seqStorePtr->llCode; const BYTE* const mlCodeTable = seqStorePtr->mlCode; BYTE* const ostart = (BYTE*)dst; BYTE* const oend = ostart + dstCapacity; BYTE* op = ostart; size_t lastCountSize; entropyWorkspace = count + (MaxSeq + 1); entropyWkspSize -= (MaxSeq + 1) * sizeof(*count); DEBUGLOG(4, "ZSTD_entropyCompressSeqStore_internal (nbSeq=%zu)", nbSeq); ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog))); assert(entropyWkspSize >= HUF_WORKSPACE_SIZE); /* Compress literals */ { const BYTE* const literals = seqStorePtr->litStart; size_t const numSequences = seqStorePtr->sequences - seqStorePtr->sequencesStart; size_t const numLiterals = seqStorePtr->lit - seqStorePtr->litStart; /* Base suspicion of uncompressibility on ratio of literals to sequences */ unsigned const suspectUncompressible = (numSequences == 0) || (numLiterals / numSequences >= SUSPECT_UNCOMPRESSIBLE_LITERAL_RATIO); size_t const litSize = (size_t)(seqStorePtr->lit - literals); size_t const cSize = ZSTD_compressLiterals( &prevEntropy->huf, &nextEntropy->huf, cctxParams->cParams.strategy, ZSTD_literalsCompressionIsDisabled(cctxParams), op, dstCapacity, literals, litSize, entropyWorkspace, entropyWkspSize, bmi2, suspectUncompressible); FORWARD_IF_ERROR(cSize, "ZSTD_compressLiterals failed"); assert(cSize <= dstCapacity); op += cSize; } /* Sequences Header */ RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/, dstSize_tooSmall, "Can't fit seq hdr in output buf!"); if (nbSeq < 128) { *op++ = (BYTE)nbSeq; } else if (nbSeq < LONGNBSEQ) { op[0] = (BYTE)((nbSeq>>8) + 0x80); op[1] = (BYTE)nbSeq; op+=2; } else { op[0]=0xFF; MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)); op+=3; } assert(op <= oend); if (nbSeq==0) { /* Copy the old tables over as if we repeated them */ ZSTD_memcpy(&nextEntropy->fse, &prevEntropy->fse, sizeof(prevEntropy->fse)); return (size_t)(op - ostart); } { ZSTD_symbolEncodingTypeStats_t stats; BYTE* seqHead = op++; /* build stats for sequences */ stats = ZSTD_buildSequencesStatistics(seqStorePtr, nbSeq, &prevEntropy->fse, &nextEntropy->fse, op, oend, strategy, count, entropyWorkspace, entropyWkspSize); FORWARD_IF_ERROR(stats.size, "ZSTD_buildSequencesStatistics failed!"); *seqHead = (BYTE)((stats.LLtype<<6) + (stats.Offtype<<4) + (stats.MLtype<<2)); lastCountSize = stats.lastCountSize; op += stats.size; } { size_t const bitstreamSize = ZSTD_encodeSequences( op, (size_t)(oend - op), CTable_MatchLength, mlCodeTable, CTable_OffsetBits, ofCodeTable, CTable_LitLength, llCodeTable, sequences, nbSeq, longOffsets, bmi2); FORWARD_IF_ERROR(bitstreamSize, "ZSTD_encodeSequences failed"); op += bitstreamSize; assert(op <= oend); /* zstd versions <= 1.3.4 mistakenly report corruption when * FSE_readNCount() receives a buffer < 4 bytes. * Fixed by https://github.com/facebook/zstd/pull/1146. * This can happen when the last set_compressed table present is 2 * bytes and the bitstream is only one byte. * In this exceedingly rare case, we will simply emit an uncompressed * block, since it isn't worth optimizing. */ if (lastCountSize && (lastCountSize + bitstreamSize) < 4) { /* lastCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */ assert(lastCountSize + bitstreamSize == 3); DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by " "emitting an uncompressed block."); return 0; } } DEBUGLOG(5, "compressed block size : %u", (unsigned)(op - ostart)); return (size_t)(op - ostart); } MEM_STATIC size_t ZSTD_entropyCompressSeqStore(seqStore_t* seqStorePtr, const ZSTD_entropyCTables_t* prevEntropy, ZSTD_entropyCTables_t* nextEntropy, const ZSTD_CCtx_params* cctxParams, void* dst, size_t dstCapacity, size_t srcSize, void* entropyWorkspace, size_t entropyWkspSize, int bmi2) { size_t const cSize = ZSTD_entropyCompressSeqStore_internal( seqStorePtr, prevEntropy, nextEntropy, cctxParams, dst, dstCapacity, entropyWorkspace, entropyWkspSize, bmi2); if (cSize == 0) return 0; /* When srcSize <= dstCapacity, there is enough space to write a raw uncompressed block. * Since we ran out of space, block must be not compressible, so fall back to raw uncompressed block. */ if ((cSize == ERROR(dstSize_tooSmall)) & (srcSize <= dstCapacity)) return 0; /* block not compressed */ FORWARD_IF_ERROR(cSize, "ZSTD_entropyCompressSeqStore_internal failed"); /* Check compressibility */ { size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, cctxParams->cParams.strategy); if (cSize >= maxCSize) return 0; /* block not compressed */ } DEBUGLOG(4, "ZSTD_entropyCompressSeqStore() cSize: %zu", cSize); return cSize; } /* ZSTD_selectBlockCompressor() : * Not static, but internal use only (used by long distance matcher) * assumption : strat is a valid strategy */ ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_paramSwitch_e useRowMatchFinder, ZSTD_dictMode_e dictMode) { static const ZSTD_blockCompressor blockCompressor[4][ZSTD_STRATEGY_MAX+1] = { { ZSTD_compressBlock_fast /* default for 0 */, ZSTD_compressBlock_fast, ZSTD_compressBlock_doubleFast, ZSTD_compressBlock_greedy, ZSTD_compressBlock_lazy, ZSTD_compressBlock_lazy2, ZSTD_compressBlock_btlazy2, ZSTD_compressBlock_btopt, ZSTD_compressBlock_btultra, ZSTD_compressBlock_btultra2 }, { ZSTD_compressBlock_fast_extDict /* default for 0 */, ZSTD_compressBlock_fast_extDict, ZSTD_compressBlock_doubleFast_extDict, ZSTD_compressBlock_greedy_extDict, ZSTD_compressBlock_lazy_extDict, ZSTD_compressBlock_lazy2_extDict, ZSTD_compressBlock_btlazy2_extDict, ZSTD_compressBlock_btopt_extDict, ZSTD_compressBlock_btultra_extDict, ZSTD_compressBlock_btultra_extDict }, { ZSTD_compressBlock_fast_dictMatchState /* default for 0 */, ZSTD_compressBlock_fast_dictMatchState, ZSTD_compressBlock_doubleFast_dictMatchState, ZSTD_compressBlock_greedy_dictMatchState, ZSTD_compressBlock_lazy_dictMatchState, ZSTD_compressBlock_lazy2_dictMatchState, ZSTD_compressBlock_btlazy2_dictMatchState, ZSTD_compressBlock_btopt_dictMatchState, ZSTD_compressBlock_btultra_dictMatchState, ZSTD_compressBlock_btultra_dictMatchState }, { NULL /* default for 0 */, NULL, NULL, ZSTD_compressBlock_greedy_dedicatedDictSearch, ZSTD_compressBlock_lazy_dedicatedDictSearch, ZSTD_compressBlock_lazy2_dedicatedDictSearch, NULL, NULL, NULL, NULL } }; ZSTD_blockCompressor selectedCompressor; ZSTD_STATIC_ASSERT((unsigned)ZSTD_fast == 1); assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat)); DEBUGLOG(4, "Selected block compressor: dictMode=%d strat=%d rowMatchfinder=%d", (int)dictMode, (int)strat, (int)useRowMatchFinder); if (ZSTD_rowMatchFinderUsed(strat, useRowMatchFinder)) { static const ZSTD_blockCompressor rowBasedBlockCompressors[4][3] = { { ZSTD_compressBlock_greedy_row, ZSTD_compressBlock_lazy_row, ZSTD_compressBlock_lazy2_row }, { ZSTD_compressBlock_greedy_extDict_row, ZSTD_compressBlock_lazy_extDict_row, ZSTD_compressBlock_lazy2_extDict_row }, { ZSTD_compressBlock_greedy_dictMatchState_row, ZSTD_compressBlock_lazy_dictMatchState_row, ZSTD_compressBlock_lazy2_dictMatchState_row }, { ZSTD_compressBlock_greedy_dedicatedDictSearch_row, ZSTD_compressBlock_lazy_dedicatedDictSearch_row, ZSTD_compressBlock_lazy2_dedicatedDictSearch_row } }; DEBUGLOG(4, "Selecting a row-based matchfinder"); assert(useRowMatchFinder != ZSTD_ps_auto); selectedCompressor = rowBasedBlockCompressors[(int)dictMode][(int)strat - (int)ZSTD_greedy]; } else { selectedCompressor = blockCompressor[(int)dictMode][(int)strat]; } assert(selectedCompressor != NULL); return selectedCompressor; } static void ZSTD_storeLastLiterals(seqStore_t* seqStorePtr, const BYTE* anchor, size_t lastLLSize) { ZSTD_memcpy(seqStorePtr->lit, anchor, lastLLSize); seqStorePtr->lit += lastLLSize; } void ZSTD_resetSeqStore(seqStore_t* ssPtr) { ssPtr->lit = ssPtr->litStart; ssPtr->sequences = ssPtr->sequencesStart; ssPtr->longLengthType = ZSTD_llt_none; } typedef enum { ZSTDbss_compress, ZSTDbss_noCompress } ZSTD_buildSeqStore_e; static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize) { ZSTD_matchState_t* const ms = &zc->blockState.matchState; DEBUGLOG(5, "ZSTD_buildSeqStore (srcSize=%zu)", srcSize); assert(srcSize <= ZSTD_BLOCKSIZE_MAX); /* Assert that we have correctly flushed the ctx params into the ms's copy */ ZSTD_assertEqualCParams(zc->appliedParams.cParams, ms->cParams); if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) { if (zc->appliedParams.cParams.strategy >= ZSTD_btopt) { ZSTD_ldm_skipRawSeqStoreBytes(&zc->externSeqStore, srcSize); } else { ZSTD_ldm_skipSequences(&zc->externSeqStore, srcSize, zc->appliedParams.cParams.minMatch); } return ZSTDbss_noCompress; /* don't even attempt compression below a certain srcSize */ } ZSTD_resetSeqStore(&(zc->seqStore)); /* required for optimal parser to read stats from dictionary */ ms->opt.symbolCosts = &zc->blockState.prevCBlock->entropy; /* tell the optimal parser how we expect to compress literals */ ms->opt.literalCompressionMode = zc->appliedParams.literalCompressionMode; /* a gap between an attached dict and the current window is not safe, * they must remain adjacent, * and when that stops being the case, the dict must be unset */ assert(ms->dictMatchState == NULL || ms->loadedDictEnd == ms->window.dictLimit); /* limited update after a very long match */ { const BYTE* const base = ms->window.base; const BYTE* const istart = (const BYTE*)src; const U32 curr = (U32)(istart-base); if (sizeof(ptrdiff_t)==8) assert(istart - base < (ptrdiff_t)(U32)(-1)); /* ensure no overflow */ if (curr > ms->nextToUpdate + 384) ms->nextToUpdate = curr - MIN(192, (U32)(curr - ms->nextToUpdate - 384)); } /* select and store sequences */ { ZSTD_dictMode_e const dictMode = ZSTD_matchState_dictMode(ms); size_t lastLLSize; { int i; for (i = 0; i < ZSTD_REP_NUM; ++i) zc->blockState.nextCBlock->rep[i] = zc->blockState.prevCBlock->rep[i]; } if (zc->externSeqStore.pos < zc->externSeqStore.size) { assert(zc->appliedParams.ldmParams.enableLdm == ZSTD_ps_disable); /* Updates ldmSeqStore.pos */ lastLLSize = ZSTD_ldm_blockCompress(&zc->externSeqStore, ms, &zc->seqStore, zc->blockState.nextCBlock->rep, zc->appliedParams.useRowMatchFinder, src, srcSize); assert(zc->externSeqStore.pos <= zc->externSeqStore.size); } else if (zc->appliedParams.ldmParams.enableLdm == ZSTD_ps_enable) { rawSeqStore_t ldmSeqStore = kNullRawSeqStore; ldmSeqStore.seq = zc->ldmSequences; ldmSeqStore.capacity = zc->maxNbLdmSequences; /* Updates ldmSeqStore.size */ FORWARD_IF_ERROR(ZSTD_ldm_generateSequences(&zc->ldmState, &ldmSeqStore, &zc->appliedParams.ldmParams, src, srcSize), ""); /* Updates ldmSeqStore.pos */ lastLLSize = ZSTD_ldm_blockCompress(&ldmSeqStore, ms, &zc->seqStore, zc->blockState.nextCBlock->rep, zc->appliedParams.useRowMatchFinder, src, srcSize); assert(ldmSeqStore.pos == ldmSeqStore.size); } else { /* not long range mode */ ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy, zc->appliedParams.useRowMatchFinder, dictMode); ms->ldmSeqStore = NULL; lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize); } { const BYTE* const lastLiterals = (const BYTE*)src + srcSize - lastLLSize; ZSTD_storeLastLiterals(&zc->seqStore, lastLiterals, lastLLSize); } } return ZSTDbss_compress; } static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc) { const seqStore_t* seqStore = ZSTD_getSeqStore(zc); const seqDef* seqStoreSeqs = seqStore->sequencesStart; size_t seqStoreSeqSize = seqStore->sequences - seqStoreSeqs; size_t seqStoreLiteralsSize = (size_t)(seqStore->lit - seqStore->litStart); size_t literalsRead = 0; size_t lastLLSize; ZSTD_Sequence* outSeqs = &zc->seqCollector.seqStart[zc->seqCollector.seqIndex]; size_t i; repcodes_t updatedRepcodes; assert(zc->seqCollector.seqIndex + 1 < zc->seqCollector.maxSequences); /* Ensure we have enough space for last literals "sequence" */ assert(zc->seqCollector.maxSequences >= seqStoreSeqSize + 1); ZSTD_memcpy(updatedRepcodes.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t)); for (i = 0; i < seqStoreSeqSize; ++i) { U32 rawOffset = seqStoreSeqs[i].offBase - ZSTD_REP_NUM; outSeqs[i].litLength = seqStoreSeqs[i].litLength; outSeqs[i].matchLength = seqStoreSeqs[i].mlBase + MINMATCH; outSeqs[i].rep = 0; if (i == seqStore->longLengthPos) { if (seqStore->longLengthType == ZSTD_llt_literalLength) { outSeqs[i].litLength += 0x10000; } else if (seqStore->longLengthType == ZSTD_llt_matchLength) { outSeqs[i].matchLength += 0x10000; } } if (seqStoreSeqs[i].offBase <= ZSTD_REP_NUM) { /* Derive the correct offset corresponding to a repcode */ outSeqs[i].rep = seqStoreSeqs[i].offBase; if (outSeqs[i].litLength != 0) { rawOffset = updatedRepcodes.rep[outSeqs[i].rep - 1]; } else { if (outSeqs[i].rep == 3) { rawOffset = updatedRepcodes.rep[0] - 1; } else { rawOffset = updatedRepcodes.rep[outSeqs[i].rep]; } } } outSeqs[i].offset = rawOffset; /* seqStoreSeqs[i].offset == offCode+1, and ZSTD_updateRep() expects offCode so we provide seqStoreSeqs[i].offset - 1 */ ZSTD_updateRep(updatedRepcodes.rep, seqStoreSeqs[i].offBase - 1, seqStoreSeqs[i].litLength == 0); literalsRead += outSeqs[i].litLength; } /* Insert last literals (if any exist) in the block as a sequence with ml == off == 0. * If there are no last literals, then we'll emit (of: 0, ml: 0, ll: 0), which is a marker * for the block boundary, according to the API. */ assert(seqStoreLiteralsSize >= literalsRead); lastLLSize = seqStoreLiteralsSize - literalsRead; outSeqs[i].litLength = (U32)lastLLSize; outSeqs[i].matchLength = outSeqs[i].offset = outSeqs[i].rep = 0; seqStoreSeqSize++; zc->seqCollector.seqIndex += seqStoreSeqSize; } size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs, size_t outSeqsSize, const void* src, size_t srcSize) { const size_t dstCapacity = ZSTD_compressBound(srcSize); void* dst = ZSTD_customMalloc(dstCapacity, ZSTD_defaultCMem); SeqCollector seqCollector; RETURN_ERROR_IF(dst == NULL, memory_allocation, "NULL pointer!"); seqCollector.collectSequences = 1; seqCollector.seqStart = outSeqs; seqCollector.seqIndex = 0; seqCollector.maxSequences = outSeqsSize; zc->seqCollector = seqCollector; ZSTD_compress2(zc, dst, dstCapacity, src, srcSize); ZSTD_customFree(dst, ZSTD_defaultCMem); return zc->seqCollector.seqIndex; } size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, size_t seqsSize) { size_t in = 0; size_t out = 0; for (; in < seqsSize; ++in) { if (sequences[in].offset == 0 && sequences[in].matchLength == 0) { if (in != seqsSize - 1) { sequences[in+1].litLength += sequences[in].litLength; } } else { sequences[out] = sequences[in]; ++out; } } return out; } /* Unrolled loop to read four size_ts of input at a time. Returns 1 if is RLE, 0 if not. */ static int ZSTD_isRLE(const BYTE* src, size_t length) { const BYTE* ip = src; const BYTE value = ip[0]; const size_t valueST = (size_t)((U64)value * 0x0101010101010101ULL); const size_t unrollSize = sizeof(size_t) * 4; const size_t unrollMask = unrollSize - 1; const size_t prefixLength = length & unrollMask; size_t i; size_t u; if (length == 1) return 1; /* Check if prefix is RLE first before using unrolled loop */ if (prefixLength && ZSTD_count(ip+1, ip, ip+prefixLength) != prefixLength-1) { return 0; } for (i = prefixLength; i != length; i += unrollSize) { for (u = 0; u < unrollSize; u += sizeof(size_t)) { if (MEM_readST(ip + i + u) != valueST) { return 0; } } } return 1; } /* Returns true if the given block may be RLE. * This is just a heuristic based on the compressibility. * It may return both false positives and false negatives. */ static int ZSTD_maybeRLE(seqStore_t const* seqStore) { size_t const nbSeqs = (size_t)(seqStore->sequences - seqStore->sequencesStart); size_t const nbLits = (size_t)(seqStore->lit - seqStore->litStart); return nbSeqs < 4 && nbLits < 10; } static void ZSTD_blockState_confirmRepcodesAndEntropyTables(ZSTD_blockState_t* const bs) { ZSTD_compressedBlockState_t* const tmp = bs->prevCBlock; bs->prevCBlock = bs->nextCBlock; bs->nextCBlock = tmp; } /* Writes the block header */ static void writeBlockHeader(void* op, size_t cSize, size_t blockSize, U32 lastBlock) { U32 const cBlockHeader = cSize == 1 ? lastBlock + (((U32)bt_rle)<<1) + (U32)(blockSize << 3) : lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3); MEM_writeLE24(op, cBlockHeader); DEBUGLOG(3, "writeBlockHeader: cSize: %zu blockSize: %zu lastBlock: %u", cSize, blockSize, lastBlock); } /* ZSTD_buildBlockEntropyStats_literals() : * Builds entropy for the literals. * Stores literals block type (raw, rle, compressed, repeat) and * huffman description table to hufMetadata. * Requires ENTROPY_WORKSPACE_SIZE workspace * @return : size of huffman description table or error code */ static size_t ZSTD_buildBlockEntropyStats_literals(void* const src, size_t srcSize, const ZSTD_hufCTables_t* prevHuf, ZSTD_hufCTables_t* nextHuf, ZSTD_hufCTablesMetadata_t* hufMetadata, const int literalsCompressionIsDisabled, void* workspace, size_t wkspSize) { BYTE* const wkspStart = (BYTE*)workspace; BYTE* const wkspEnd = wkspStart + wkspSize; BYTE* const countWkspStart = wkspStart; unsigned* const countWksp = (unsigned*)workspace; const size_t countWkspSize = (HUF_SYMBOLVALUE_MAX + 1) * sizeof(unsigned); BYTE* const nodeWksp = countWkspStart + countWkspSize; const size_t nodeWkspSize = wkspEnd-nodeWksp; unsigned maxSymbolValue = HUF_SYMBOLVALUE_MAX; unsigned huffLog = HUF_TABLELOG_DEFAULT; HUF_repeat repeat = prevHuf->repeatMode; DEBUGLOG(5, "ZSTD_buildBlockEntropyStats_literals (srcSize=%zu)", srcSize); /* Prepare nextEntropy assuming reusing the existing table */ ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); if (literalsCompressionIsDisabled) { DEBUGLOG(5, "set_basic - disabled"); hufMetadata->hType = set_basic; return 0; } /* small ? don't even attempt compression (speed opt) */ #ifndef COMPRESS_LITERALS_SIZE_MIN #define COMPRESS_LITERALS_SIZE_MIN 63 #endif { size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN; if (srcSize <= minLitSize) { DEBUGLOG(5, "set_basic - too small"); hufMetadata->hType = set_basic; return 0; } } /* Scan input and build symbol stats */ { size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)src, srcSize, workspace, wkspSize); FORWARD_IF_ERROR(largest, "HIST_count_wksp failed"); if (largest == srcSize) { DEBUGLOG(5, "set_rle"); hufMetadata->hType = set_rle; return 0; } if (largest <= (srcSize >> 7)+4) { DEBUGLOG(5, "set_basic - no gain"); hufMetadata->hType = set_basic; return 0; } } /* Validate the previous Huffman table */ if (repeat == HUF_repeat_check && !HUF_validateCTable((HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue)) { repeat = HUF_repeat_none; } /* Build Huffman Tree */ ZSTD_memset(nextHuf->CTable, 0, sizeof(nextHuf->CTable)); huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue); { size_t const maxBits = HUF_buildCTable_wksp((HUF_CElt*)nextHuf->CTable, countWksp, maxSymbolValue, huffLog, nodeWksp, nodeWkspSize); FORWARD_IF_ERROR(maxBits, "HUF_buildCTable_wksp"); huffLog = (U32)maxBits; { /* Build and write the CTable */ size_t const newCSize = HUF_estimateCompressedSize( (HUF_CElt*)nextHuf->CTable, countWksp, maxSymbolValue); size_t const hSize = HUF_writeCTable_wksp( hufMetadata->hufDesBuffer, sizeof(hufMetadata->hufDesBuffer), (HUF_CElt*)nextHuf->CTable, maxSymbolValue, huffLog, nodeWksp, nodeWkspSize); /* Check against repeating the previous CTable */ if (repeat != HUF_repeat_none) { size_t const oldCSize = HUF_estimateCompressedSize( (HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue); if (oldCSize < srcSize && (oldCSize <= hSize + newCSize || hSize + 12 >= srcSize)) { DEBUGLOG(5, "set_repeat - smaller"); ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); hufMetadata->hType = set_repeat; return 0; } } if (newCSize + hSize >= srcSize) { DEBUGLOG(5, "set_basic - no gains"); ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); hufMetadata->hType = set_basic; return 0; } DEBUGLOG(5, "set_compressed (hSize=%u)", (U32)hSize); hufMetadata->hType = set_compressed; nextHuf->repeatMode = HUF_repeat_check; return hSize; } } } /* ZSTD_buildDummySequencesStatistics(): * Returns a ZSTD_symbolEncodingTypeStats_t with all encoding types as set_basic, * and updates nextEntropy to the appropriate repeatMode. */ static ZSTD_symbolEncodingTypeStats_t ZSTD_buildDummySequencesStatistics(ZSTD_fseCTables_t* nextEntropy) { ZSTD_symbolEncodingTypeStats_t stats = {set_basic, set_basic, set_basic, 0, 0}; nextEntropy->litlength_repeatMode = FSE_repeat_none; nextEntropy->offcode_repeatMode = FSE_repeat_none; nextEntropy->matchlength_repeatMode = FSE_repeat_none; return stats; } /* ZSTD_buildBlockEntropyStats_sequences() : * Builds entropy for the sequences. * Stores symbol compression modes and fse table to fseMetadata. * Requires ENTROPY_WORKSPACE_SIZE wksp. * @return : size of fse tables or error code */ static size_t ZSTD_buildBlockEntropyStats_sequences(seqStore_t* seqStorePtr, const ZSTD_fseCTables_t* prevEntropy, ZSTD_fseCTables_t* nextEntropy, const ZSTD_CCtx_params* cctxParams, ZSTD_fseCTablesMetadata_t* fseMetadata, void* workspace, size_t wkspSize) { ZSTD_strategy const strategy = cctxParams->cParams.strategy; size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart; BYTE* const ostart = fseMetadata->fseTablesBuffer; BYTE* const oend = ostart + sizeof(fseMetadata->fseTablesBuffer); BYTE* op = ostart; unsigned* countWorkspace = (unsigned*)workspace; unsigned* entropyWorkspace = countWorkspace + (MaxSeq + 1); size_t entropyWorkspaceSize = wkspSize - (MaxSeq + 1) * sizeof(*countWorkspace); ZSTD_symbolEncodingTypeStats_t stats; DEBUGLOG(5, "ZSTD_buildBlockEntropyStats_sequences (nbSeq=%zu)", nbSeq); stats = nbSeq != 0 ? ZSTD_buildSequencesStatistics(seqStorePtr, nbSeq, prevEntropy, nextEntropy, op, oend, strategy, countWorkspace, entropyWorkspace, entropyWorkspaceSize) : ZSTD_buildDummySequencesStatistics(nextEntropy); FORWARD_IF_ERROR(stats.size, "ZSTD_buildSequencesStatistics failed!"); fseMetadata->llType = (symbolEncodingType_e) stats.LLtype; fseMetadata->ofType = (symbolEncodingType_e) stats.Offtype; fseMetadata->mlType = (symbolEncodingType_e) stats.MLtype; fseMetadata->lastCountSize = stats.lastCountSize; return stats.size; } /* ZSTD_buildBlockEntropyStats() : * Builds entropy for the block. * Requires workspace size ENTROPY_WORKSPACE_SIZE * * @return : 0 on success or error code */ size_t ZSTD_buildBlockEntropyStats(seqStore_t* seqStorePtr, const ZSTD_entropyCTables_t* prevEntropy, ZSTD_entropyCTables_t* nextEntropy, const ZSTD_CCtx_params* cctxParams, ZSTD_entropyCTablesMetadata_t* entropyMetadata, void* workspace, size_t wkspSize) { size_t const litSize = seqStorePtr->lit - seqStorePtr->litStart; entropyMetadata->hufMetadata.hufDesSize = ZSTD_buildBlockEntropyStats_literals(seqStorePtr->litStart, litSize, &prevEntropy->huf, &nextEntropy->huf, &entropyMetadata->hufMetadata, ZSTD_literalsCompressionIsDisabled(cctxParams), workspace, wkspSize); FORWARD_IF_ERROR(entropyMetadata->hufMetadata.hufDesSize, "ZSTD_buildBlockEntropyStats_literals failed"); entropyMetadata->fseMetadata.fseTablesSize = ZSTD_buildBlockEntropyStats_sequences(seqStorePtr, &prevEntropy->fse, &nextEntropy->fse, cctxParams, &entropyMetadata->fseMetadata, workspace, wkspSize); FORWARD_IF_ERROR(entropyMetadata->fseMetadata.fseTablesSize, "ZSTD_buildBlockEntropyStats_sequences failed"); return 0; } /* Returns the size estimate for the literals section (header + content) of a block */ static size_t ZSTD_estimateBlockSize_literal(const BYTE* literals, size_t litSize, const ZSTD_hufCTables_t* huf, const ZSTD_hufCTablesMetadata_t* hufMetadata, void* workspace, size_t wkspSize, int writeEntropy) { unsigned* const countWksp = (unsigned*)workspace; unsigned maxSymbolValue = HUF_SYMBOLVALUE_MAX; size_t literalSectionHeaderSize = 3 + (litSize >= 1 KB) + (litSize >= 16 KB); U32 singleStream = litSize < 256; if (hufMetadata->hType == set_basic) return litSize; else if (hufMetadata->hType == set_rle) return 1; else if (hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat) { size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)literals, litSize, workspace, wkspSize); if (ZSTD_isError(largest)) return litSize; { size_t cLitSizeEstimate = HUF_estimateCompressedSize((const HUF_CElt*)huf->CTable, countWksp, maxSymbolValue); if (writeEntropy) cLitSizeEstimate += hufMetadata->hufDesSize; if (!singleStream) cLitSizeEstimate += 6; /* multi-stream huffman uses 6-byte jump table */ return cLitSizeEstimate + literalSectionHeaderSize; } } assert(0); /* impossible */ return 0; } /* Returns the size estimate for the FSE-compressed symbols (of, ml, ll) of a block */ static size_t ZSTD_estimateBlockSize_symbolType(symbolEncodingType_e type, const BYTE* codeTable, size_t nbSeq, unsigned maxCode, const FSE_CTable* fseCTable, const U8* additionalBits, short const* defaultNorm, U32 defaultNormLog, U32 defaultMax, void* workspace, size_t wkspSize) { unsigned* const countWksp = (unsigned*)workspace; const BYTE* ctp = codeTable; const BYTE* const ctStart = ctp; const BYTE* const ctEnd = ctStart + nbSeq; size_t cSymbolTypeSizeEstimateInBits = 0; unsigned max = maxCode; HIST_countFast_wksp(countWksp, &max, codeTable, nbSeq, workspace, wkspSize); /* can't fail */ if (type == set_basic) { /* We selected this encoding type, so it must be valid. */ assert(max <= defaultMax); (void)defaultMax; cSymbolTypeSizeEstimateInBits = ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, countWksp, max); } else if (type == set_rle) { cSymbolTypeSizeEstimateInBits = 0; } else if (type == set_compressed || type == set_repeat) { cSymbolTypeSizeEstimateInBits = ZSTD_fseBitCost(fseCTable, countWksp, max); } if (ZSTD_isError(cSymbolTypeSizeEstimateInBits)) { return nbSeq * 10; } while (ctp < ctEnd) { if (additionalBits) cSymbolTypeSizeEstimateInBits += additionalBits[*ctp]; else cSymbolTypeSizeEstimateInBits += *ctp; /* for offset, offset code is also the number of additional bits */ ctp++; } return cSymbolTypeSizeEstimateInBits >> 3; } /* Returns the size estimate for the sequences section (header + content) of a block */ static size_t ZSTD_estimateBlockSize_sequences(const BYTE* ofCodeTable, const BYTE* llCodeTable, const BYTE* mlCodeTable, size_t nbSeq, const ZSTD_fseCTables_t* fseTables, const ZSTD_fseCTablesMetadata_t* fseMetadata, void* workspace, size_t wkspSize, int writeEntropy) { size_t sequencesSectionHeaderSize = 1 /* seqHead */ + 1 /* min seqSize size */ + (nbSeq >= 128) + (nbSeq >= LONGNBSEQ); size_t cSeqSizeEstimate = 0; cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->ofType, ofCodeTable, nbSeq, MaxOff, fseTables->offcodeCTable, NULL, OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff, workspace, wkspSize); cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->llType, llCodeTable, nbSeq, MaxLL, fseTables->litlengthCTable, LL_bits, LL_defaultNorm, LL_defaultNormLog, MaxLL, workspace, wkspSize); cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->mlType, mlCodeTable, nbSeq, MaxML, fseTables->matchlengthCTable, ML_bits, ML_defaultNorm, ML_defaultNormLog, MaxML, workspace, wkspSize); if (writeEntropy) cSeqSizeEstimate += fseMetadata->fseTablesSize; return cSeqSizeEstimate + sequencesSectionHeaderSize; } /* Returns the size estimate for a given stream of literals, of, ll, ml */ static size_t ZSTD_estimateBlockSize(const BYTE* literals, size_t litSize, const BYTE* ofCodeTable, const BYTE* llCodeTable, const BYTE* mlCodeTable, size_t nbSeq, const ZSTD_entropyCTables_t* entropy, const ZSTD_entropyCTablesMetadata_t* entropyMetadata, void* workspace, size_t wkspSize, int writeLitEntropy, int writeSeqEntropy) { size_t const literalsSize = ZSTD_estimateBlockSize_literal(literals, litSize, &entropy->huf, &entropyMetadata->hufMetadata, workspace, wkspSize, writeLitEntropy); size_t const seqSize = ZSTD_estimateBlockSize_sequences(ofCodeTable, llCodeTable, mlCodeTable, nbSeq, &entropy->fse, &entropyMetadata->fseMetadata, workspace, wkspSize, writeSeqEntropy); return seqSize + literalsSize + ZSTD_blockHeaderSize; } /* Builds entropy statistics and uses them for blocksize estimation. * * Returns the estimated compressed size of the seqStore, or a zstd error. */ static size_t ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(seqStore_t* seqStore, ZSTD_CCtx* zc) { ZSTD_entropyCTablesMetadata_t* entropyMetadata = &zc->blockSplitCtx.entropyMetadata; DEBUGLOG(6, "ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize()"); FORWARD_IF_ERROR(ZSTD_buildBlockEntropyStats(seqStore, &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy, &zc->appliedParams, entropyMetadata, zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */), ""); return ZSTD_estimateBlockSize(seqStore->litStart, (size_t)(seqStore->lit - seqStore->litStart), seqStore->ofCode, seqStore->llCode, seqStore->mlCode, (size_t)(seqStore->sequences - seqStore->sequencesStart), &zc->blockState.nextCBlock->entropy, entropyMetadata, zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE, (int)(entropyMetadata->hufMetadata.hType == set_compressed), 1); } /* Returns literals bytes represented in a seqStore */ static size_t ZSTD_countSeqStoreLiteralsBytes(const seqStore_t* const seqStore) { size_t literalsBytes = 0; size_t const nbSeqs = seqStore->sequences - seqStore->sequencesStart; size_t i; for (i = 0; i < nbSeqs; ++i) { seqDef seq = seqStore->sequencesStart[i]; literalsBytes += seq.litLength; if (i == seqStore->longLengthPos && seqStore->longLengthType == ZSTD_llt_literalLength) { literalsBytes += 0x10000; } } return literalsBytes; } /* Returns match bytes represented in a seqStore */ static size_t ZSTD_countSeqStoreMatchBytes(const seqStore_t* const seqStore) { size_t matchBytes = 0; size_t const nbSeqs = seqStore->sequences - seqStore->sequencesStart; size_t i; for (i = 0; i < nbSeqs; ++i) { seqDef seq = seqStore->sequencesStart[i]; matchBytes += seq.mlBase + MINMATCH; if (i == seqStore->longLengthPos && seqStore->longLengthType == ZSTD_llt_matchLength) { matchBytes += 0x10000; } } return matchBytes; } /* Derives the seqStore that is a chunk of the originalSeqStore from [startIdx, endIdx). * Stores the result in resultSeqStore. */ static void ZSTD_deriveSeqStoreChunk(seqStore_t* resultSeqStore, const seqStore_t* originalSeqStore, size_t startIdx, size_t endIdx) { BYTE* const litEnd = originalSeqStore->lit; size_t literalsBytes; size_t literalsBytesPreceding = 0; *resultSeqStore = *originalSeqStore; if (startIdx > 0) { resultSeqStore->sequences = originalSeqStore->sequencesStart + startIdx; literalsBytesPreceding = ZSTD_countSeqStoreLiteralsBytes(resultSeqStore); } /* Move longLengthPos into the correct position if necessary */ if (originalSeqStore->longLengthType != ZSTD_llt_none) { if (originalSeqStore->longLengthPos < startIdx || originalSeqStore->longLengthPos > endIdx) { resultSeqStore->longLengthType = ZSTD_llt_none; } else { resultSeqStore->longLengthPos -= (U32)startIdx; } } resultSeqStore->sequencesStart = originalSeqStore->sequencesStart + startIdx; resultSeqStore->sequences = originalSeqStore->sequencesStart + endIdx; literalsBytes = ZSTD_countSeqStoreLiteralsBytes(resultSeqStore); resultSeqStore->litStart += literalsBytesPreceding; if (endIdx == (size_t)(originalSeqStore->sequences - originalSeqStore->sequencesStart)) { /* This accounts for possible last literals if the derived chunk reaches the end of the block */ resultSeqStore->lit = litEnd; } else { resultSeqStore->lit = resultSeqStore->litStart+literalsBytes; } resultSeqStore->llCode += startIdx; resultSeqStore->mlCode += startIdx; resultSeqStore->ofCode += startIdx; } /* * Returns the raw offset represented by the combination of offCode, ll0, and repcode history. * offCode must represent a repcode in the numeric representation of ZSTD_storeSeq(). */ static U32 ZSTD_resolveRepcodeToRawOffset(const U32 rep[ZSTD_REP_NUM], const U32 offCode, const U32 ll0) { U32 const adjustedOffCode = STORED_REPCODE(offCode) - 1 + ll0; /* [ 0 - 3 ] */ assert(STORED_IS_REPCODE(offCode)); if (adjustedOffCode == ZSTD_REP_NUM) { /* litlength == 0 and offCode == 2 implies selection of first repcode - 1 */ assert(rep[0] > 0); return rep[0] - 1; } return rep[adjustedOffCode]; } /* * ZSTD_seqStore_resolveOffCodes() reconciles any possible divergences in offset history that may arise * due to emission of RLE/raw blocks that disturb the offset history, * and replaces any repcodes within the seqStore that may be invalid. * * dRepcodes are updated as would be on the decompression side. * cRepcodes are updated exactly in accordance with the seqStore. * * Note : this function assumes seq->offBase respects the following numbering scheme : * 0 : invalid * 1-3 : repcode 1-3 * 4+ : real_offset+3 */ static void ZSTD_seqStore_resolveOffCodes(repcodes_t* const dRepcodes, repcodes_t* const cRepcodes, seqStore_t* const seqStore, U32 const nbSeq) { U32 idx = 0; for (; idx < nbSeq; ++idx) { seqDef* const seq = seqStore->sequencesStart + idx; U32 const ll0 = (seq->litLength == 0); U32 const offCode = OFFBASE_TO_STORED(seq->offBase); assert(seq->offBase > 0); if (STORED_IS_REPCODE(offCode)) { U32 const dRawOffset = ZSTD_resolveRepcodeToRawOffset(dRepcodes->rep, offCode, ll0); U32 const cRawOffset = ZSTD_resolveRepcodeToRawOffset(cRepcodes->rep, offCode, ll0); /* Adjust simulated decompression repcode history if we come across a mismatch. Replace * the repcode with the offset it actually references, determined by the compression * repcode history. */ if (dRawOffset != cRawOffset) { seq->offBase = cRawOffset + ZSTD_REP_NUM; } } /* Compression repcode history is always updated with values directly from the unmodified seqStore. * Decompression repcode history may use modified seq->offset value taken from compression repcode history. */ ZSTD_updateRep(dRepcodes->rep, OFFBASE_TO_STORED(seq->offBase), ll0); ZSTD_updateRep(cRepcodes->rep, offCode, ll0); } } /* ZSTD_compressSeqStore_singleBlock(): * Compresses a seqStore into a block with a block header, into the buffer dst. * * Returns the total size of that block (including header) or a ZSTD error code. */ static size_t ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc, seqStore_t* const seqStore, repcodes_t* const dRep, repcodes_t* const cRep, void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastBlock, U32 isPartition) { const U32 rleMaxLength = 25; BYTE* op = (BYTE*)dst; const BYTE* ip = (const BYTE*)src; size_t cSize; size_t cSeqsSize; /* In case of an RLE or raw block, the simulated decompression repcode history must be reset */ repcodes_t const dRepOriginal = *dRep; DEBUGLOG(5, "ZSTD_compressSeqStore_singleBlock"); if (isPartition) ZSTD_seqStore_resolveOffCodes(dRep, cRep, seqStore, (U32)(seqStore->sequences - seqStore->sequencesStart)); RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall, "Block header doesn't fit"); cSeqsSize = ZSTD_entropyCompressSeqStore(seqStore, &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy, &zc->appliedParams, op + ZSTD_blockHeaderSize, dstCapacity - ZSTD_blockHeaderSize, srcSize, zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */, zc->bmi2); FORWARD_IF_ERROR(cSeqsSize, "ZSTD_entropyCompressSeqStore failed!"); if (!zc->isFirstBlock && cSeqsSize < rleMaxLength && ZSTD_isRLE((BYTE const*)src, srcSize)) { /* We don't want to emit our first block as a RLE even if it qualifies because * doing so will cause the decoder (cli only) to throw a "should consume all input error." * This is only an issue for zstd <= v1.4.3 */ cSeqsSize = 1; } if (zc->seqCollector.collectSequences) { ZSTD_copyBlockSequences(zc); ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState); return 0; } if (cSeqsSize == 0) { cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, srcSize, lastBlock); FORWARD_IF_ERROR(cSize, "Nocompress block failed"); DEBUGLOG(4, "Writing out nocompress block, size: %zu", cSize); *dRep = dRepOriginal; /* reset simulated decompression repcode history */ } else if (cSeqsSize == 1) { cSize = ZSTD_rleCompressBlock(op, dstCapacity, *ip, srcSize, lastBlock); FORWARD_IF_ERROR(cSize, "RLE compress block failed"); DEBUGLOG(4, "Writing out RLE block, size: %zu", cSize); *dRep = dRepOriginal; /* reset simulated decompression repcode history */ } else { ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState); writeBlockHeader(op, cSeqsSize, srcSize, lastBlock); cSize = ZSTD_blockHeaderSize + cSeqsSize; DEBUGLOG(4, "Writing out compressed block, size: %zu", cSize); } if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid) zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check; return cSize; } /* Struct to keep track of where we are in our recursive calls. */ typedef struct { U32* splitLocations; /* Array of split indices */ size_t idx; /* The current index within splitLocations being worked on */ } seqStoreSplits; #define MIN_SEQUENCES_BLOCK_SPLITTING 300 /* Helper function to perform the recursive search for block splits. * Estimates the cost of seqStore prior to split, and estimates the cost of splitting the sequences in half. * If advantageous to split, then we recurse down the two sub-blocks. If not, or if an error occurred in estimation, then * we do not recurse. * * Note: The recursion depth is capped by a heuristic minimum number of sequences, defined by MIN_SEQUENCES_BLOCK_SPLITTING. * In theory, this means the absolute largest recursion depth is 10 == log2(maxNbSeqInBlock/MIN_SEQUENCES_BLOCK_SPLITTING). * In practice, recursion depth usually doesn't go beyond 4. * * Furthermore, the number of splits is capped by ZSTD_MAX_NB_BLOCK_SPLITS. At ZSTD_MAX_NB_BLOCK_SPLITS == 196 with the current existing blockSize * maximum of 128 KB, this value is actually impossible to reach. */ static void ZSTD_deriveBlockSplitsHelper(seqStoreSplits* splits, size_t startIdx, size_t endIdx, ZSTD_CCtx* zc, const seqStore_t* origSeqStore) { seqStore_t* fullSeqStoreChunk = &zc->blockSplitCtx.fullSeqStoreChunk; seqStore_t* firstHalfSeqStore = &zc->blockSplitCtx.firstHalfSeqStore; seqStore_t* secondHalfSeqStore = &zc->blockSplitCtx.secondHalfSeqStore; size_t estimatedOriginalSize; size_t estimatedFirstHalfSize; size_t estimatedSecondHalfSize; size_t midIdx = (startIdx + endIdx)/2; if (endIdx - startIdx < MIN_SEQUENCES_BLOCK_SPLITTING || splits->idx >= ZSTD_MAX_NB_BLOCK_SPLITS) { DEBUGLOG(6, "ZSTD_deriveBlockSplitsHelper: Too few sequences"); return; } DEBUGLOG(4, "ZSTD_deriveBlockSplitsHelper: startIdx=%zu endIdx=%zu", startIdx, endIdx); ZSTD_deriveSeqStoreChunk(fullSeqStoreChunk, origSeqStore, startIdx, endIdx); ZSTD_deriveSeqStoreChunk(firstHalfSeqStore, origSeqStore, startIdx, midIdx); ZSTD_deriveSeqStoreChunk(secondHalfSeqStore, origSeqStore, midIdx, endIdx); estimatedOriginalSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(fullSeqStoreChunk, zc); estimatedFirstHalfSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(firstHalfSeqStore, zc); estimatedSecondHalfSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(secondHalfSeqStore, zc); DEBUGLOG(4, "Estimated original block size: %zu -- First half split: %zu -- Second half split: %zu", estimatedOriginalSize, estimatedFirstHalfSize, estimatedSecondHalfSize); if (ZSTD_isError(estimatedOriginalSize) || ZSTD_isError(estimatedFirstHalfSize) || ZSTD_isError(estimatedSecondHalfSize)) { return; } if (estimatedFirstHalfSize + estimatedSecondHalfSize < estimatedOriginalSize) { ZSTD_deriveBlockSplitsHelper(splits, startIdx, midIdx, zc, origSeqStore); splits->splitLocations[splits->idx] = (U32)midIdx; splits->idx++; ZSTD_deriveBlockSplitsHelper(splits, midIdx, endIdx, zc, origSeqStore); } } /* Base recursive function. Populates a table with intra-block partition indices that can improve compression ratio. * * Returns the number of splits made (which equals the size of the partition table - 1). */ static size_t ZSTD_deriveBlockSplits(ZSTD_CCtx* zc, U32 partitions[], U32 nbSeq) { seqStoreSplits splits = {partitions, 0}; if (nbSeq <= 4) { DEBUGLOG(4, "ZSTD_deriveBlockSplits: Too few sequences to split"); /* Refuse to try and split anything with less than 4 sequences */ return 0; } ZSTD_deriveBlockSplitsHelper(&splits, 0, nbSeq, zc, &zc->seqStore); splits.splitLocations[splits.idx] = nbSeq; DEBUGLOG(5, "ZSTD_deriveBlockSplits: final nb partitions: %zu", splits.idx+1); return splits.idx; } /* ZSTD_compressBlock_splitBlock(): * Attempts to split a given block into multiple blocks to improve compression ratio. * * Returns combined size of all blocks (which includes headers), or a ZSTD error code. */ static size_t ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapacity, const void* src, size_t blockSize, U32 lastBlock, U32 nbSeq) { size_t cSize = 0; const BYTE* ip = (const BYTE*)src; BYTE* op = (BYTE*)dst; size_t i = 0; size_t srcBytesTotal = 0; U32* partitions = zc->blockSplitCtx.partitions; /* size == ZSTD_MAX_NB_BLOCK_SPLITS */ seqStore_t* nextSeqStore = &zc->blockSplitCtx.nextSeqStore; seqStore_t* currSeqStore = &zc->blockSplitCtx.currSeqStore; size_t numSplits = ZSTD_deriveBlockSplits(zc, partitions, nbSeq); /* If a block is split and some partitions are emitted as RLE/uncompressed, then repcode history * may become invalid. In order to reconcile potentially invalid repcodes, we keep track of two * separate repcode histories that simulate repcode history on compression and decompression side, * and use the histories to determine whether we must replace a particular repcode with its raw offset. * * 1) cRep gets updated for each partition, regardless of whether the block was emitted as uncompressed * or RLE. This allows us to retrieve the offset value that an invalid repcode references within * a nocompress/RLE block. * 2) dRep gets updated only for compressed partitions, and when a repcode gets replaced, will use * the replacement offset value rather than the original repcode to update the repcode history. * dRep also will be the final repcode history sent to the next block. * * See ZSTD_seqStore_resolveOffCodes() for more details. */ repcodes_t dRep; repcodes_t cRep; ZSTD_memcpy(dRep.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t)); ZSTD_memcpy(cRep.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t)); ZSTD_memset(nextSeqStore, 0, sizeof(seqStore_t)); DEBUGLOG(4, "ZSTD_compressBlock_splitBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)", (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit, (unsigned)zc->blockState.matchState.nextToUpdate); if (numSplits == 0) { size_t cSizeSingleBlock = ZSTD_compressSeqStore_singleBlock(zc, &zc->seqStore, &dRep, &cRep, op, dstCapacity, ip, blockSize, lastBlock, 0 /* isPartition */); FORWARD_IF_ERROR(cSizeSingleBlock, "Compressing single block from splitBlock_internal() failed!"); DEBUGLOG(5, "ZSTD_compressBlock_splitBlock_internal: No splits"); assert(cSizeSingleBlock <= ZSTD_BLOCKSIZE_MAX + ZSTD_blockHeaderSize); return cSizeSingleBlock; } ZSTD_deriveSeqStoreChunk(currSeqStore, &zc->seqStore, 0, partitions[0]); for (i = 0; i <= numSplits; ++i) { size_t srcBytes; size_t cSizeChunk; U32 const lastPartition = (i == numSplits); U32 lastBlockEntireSrc = 0; srcBytes = ZSTD_countSeqStoreLiteralsBytes(currSeqStore) + ZSTD_countSeqStoreMatchBytes(currSeqStore); srcBytesTotal += srcBytes; if (lastPartition) { /* This is the final partition, need to account for possible last literals */ srcBytes += blockSize - srcBytesTotal; lastBlockEntireSrc = lastBlock; } else { ZSTD_deriveSeqStoreChunk(nextSeqStore, &zc->seqStore, partitions[i], partitions[i+1]); } cSizeChunk = ZSTD_compressSeqStore_singleBlock(zc, currSeqStore, &dRep, &cRep, op, dstCapacity, ip, srcBytes, lastBlockEntireSrc, 1 /* isPartition */); DEBUGLOG(5, "Estimated size: %zu actual size: %zu", ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(currSeqStore, zc), cSizeChunk); FORWARD_IF_ERROR(cSizeChunk, "Compressing chunk failed!"); ip += srcBytes; op += cSizeChunk; dstCapacity -= cSizeChunk; cSize += cSizeChunk; *currSeqStore = *nextSeqStore; assert(cSizeChunk <= ZSTD_BLOCKSIZE_MAX + ZSTD_blockHeaderSize); } /* cRep and dRep may have diverged during the compression. If so, we use the dRep repcodes * for the next block. */ ZSTD_memcpy(zc->blockState.prevCBlock->rep, dRep.rep, sizeof(repcodes_t)); return cSize; } static size_t ZSTD_compressBlock_splitBlock(ZSTD_CCtx* zc, void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastBlock) { const BYTE* ip = (const BYTE*)src; BYTE* op = (BYTE*)dst; U32 nbSeq; size_t cSize; DEBUGLOG(4, "ZSTD_compressBlock_splitBlock"); assert(zc->appliedParams.useBlockSplitter == ZSTD_ps_enable); { const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize); FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed"); if (bss == ZSTDbss_noCompress) { if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid) zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check; cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, srcSize, lastBlock); FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed"); DEBUGLOG(4, "ZSTD_compressBlock_splitBlock: Nocompress block"); return cSize; } nbSeq = (U32)(zc->seqStore.sequences - zc->seqStore.sequencesStart); } cSize = ZSTD_compressBlock_splitBlock_internal(zc, dst, dstCapacity, src, srcSize, lastBlock, nbSeq); FORWARD_IF_ERROR(cSize, "Splitting blocks failed!"); return cSize; } static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 frame) { /* This the upper bound for the length of an rle block. * This isn't the actual upper bound. Finding the real threshold * needs further investigation. */ const U32 rleMaxLength = 25; size_t cSize; const BYTE* ip = (const BYTE*)src; BYTE* op = (BYTE*)dst; DEBUGLOG(5, "ZSTD_compressBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)", (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit, (unsigned)zc->blockState.matchState.nextToUpdate); { const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize); FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed"); if (bss == ZSTDbss_noCompress) { cSize = 0; goto out; } } if (zc->seqCollector.collectSequences) { ZSTD_copyBlockSequences(zc); ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState); return 0; } /* encode sequences and literals */ cSize = ZSTD_entropyCompressSeqStore(&zc->seqStore, &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy, &zc->appliedParams, dst, dstCapacity, srcSize, zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */, zc->bmi2); if (frame && /* We don't want to emit our first block as a RLE even if it qualifies because * doing so will cause the decoder (cli only) to throw a "should consume all input error." * This is only an issue for zstd <= v1.4.3 */ !zc->isFirstBlock && cSize < rleMaxLength && ZSTD_isRLE(ip, srcSize)) { cSize = 1; op[0] = ip[0]; } out: if (!ZSTD_isError(cSize) && cSize > 1) { ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState); } /* We check that dictionaries have offset codes available for the first * block. After the first block, the offcode table might not have large * enough codes to represent the offsets in the data. */ if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid) zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check; return cSize; } static size_t ZSTD_compressBlock_targetCBlockSize_body(ZSTD_CCtx* zc, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const size_t bss, U32 lastBlock) { DEBUGLOG(6, "Attempting ZSTD_compressSuperBlock()"); if (bss == ZSTDbss_compress) { if (/* We don't want to emit our first block as a RLE even if it qualifies because * doing so will cause the decoder (cli only) to throw a "should consume all input error." * This is only an issue for zstd <= v1.4.3 */ !zc->isFirstBlock && ZSTD_maybeRLE(&zc->seqStore) && ZSTD_isRLE((BYTE const*)src, srcSize)) { return ZSTD_rleCompressBlock(dst, dstCapacity, *(BYTE const*)src, srcSize, lastBlock); } /* Attempt superblock compression. * * Note that compressed size of ZSTD_compressSuperBlock() is not bound by the * standard ZSTD_compressBound(). This is a problem, because even if we have * space now, taking an extra byte now could cause us to run out of space later * and violate ZSTD_compressBound(). * * Define blockBound(blockSize) = blockSize + ZSTD_blockHeaderSize. * * In order to respect ZSTD_compressBound() we must attempt to emit a raw * uncompressed block in these cases: * * cSize == 0: Return code for an uncompressed block. * * cSize == dstSize_tooSmall: We may have expanded beyond blockBound(srcSize). * ZSTD_noCompressBlock() will return dstSize_tooSmall if we are really out of * output space. * * cSize >= blockBound(srcSize): We have expanded the block too much so * emit an uncompressed block. */ { size_t const cSize = ZSTD_compressSuperBlock(zc, dst, dstCapacity, src, srcSize, lastBlock); if (cSize != ERROR(dstSize_tooSmall)) { size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, zc->appliedParams.cParams.strategy); FORWARD_IF_ERROR(cSize, "ZSTD_compressSuperBlock failed"); if (cSize != 0 && cSize < maxCSize + ZSTD_blockHeaderSize) { ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState); return cSize; } } } } DEBUGLOG(6, "Resorting to ZSTD_noCompressBlock()"); /* Superblock compression failed, attempt to emit a single no compress block. * The decoder will be able to stream this block since it is uncompressed. */ return ZSTD_noCompressBlock(dst, dstCapacity, src, srcSize, lastBlock); } static size_t ZSTD_compressBlock_targetCBlockSize(ZSTD_CCtx* zc, void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastBlock) { size_t cSize = 0; const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize); DEBUGLOG(5, "ZSTD_compressBlock_targetCBlockSize (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u, srcSize=%zu)", (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit, (unsigned)zc->blockState.matchState.nextToUpdate, srcSize); FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed"); cSize = ZSTD_compressBlock_targetCBlockSize_body(zc, dst, dstCapacity, src, srcSize, bss, lastBlock); FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_targetCBlockSize_body failed"); if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid) zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check; return cSize; } static void ZSTD_overflowCorrectIfNeeded(ZSTD_matchState_t* ms, ZSTD_cwksp* ws, ZSTD_CCtx_params const* params, void const* ip, void const* iend) { U32 const cycleLog = ZSTD_cycleLog(params->cParams.chainLog, params->cParams.strategy); U32 const maxDist = (U32)1 << params->cParams.windowLog; if (ZSTD_window_needOverflowCorrection(ms->window, cycleLog, maxDist, ms->loadedDictEnd, ip, iend)) { U32 const correction = ZSTD_window_correctOverflow(&ms->window, cycleLog, maxDist, ip); ZSTD_STATIC_ASSERT(ZSTD_CHAINLOG_MAX <= 30); ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_32 <= 30); ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31); ZSTD_cwksp_mark_tables_dirty(ws); ZSTD_reduceIndex(ms, params, correction); ZSTD_cwksp_mark_tables_clean(ws); if (ms->nextToUpdate < correction) ms->nextToUpdate = 0; else ms->nextToUpdate -= correction; /* invalidate dictionaries on overflow correction */ ms->loadedDictEnd = 0; ms->dictMatchState = NULL; } } /*! ZSTD_compress_frameChunk() : * Compress a chunk of data into one or multiple blocks. * All blocks will be terminated, all input will be consumed. * Function will issue an error if there is not enough `dstCapacity` to hold the compressed content. * Frame is supposed already started (header already produced) * @return : compressed size, or an error code */ static size_t ZSTD_compress_frameChunk(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastFrameChunk) { size_t blockSize = cctx->blockSize; size_t remaining = srcSize; const BYTE* ip = (const BYTE*)src; BYTE* const ostart = (BYTE*)dst; BYTE* op = ostart; U32 const maxDist = (U32)1 << cctx->appliedParams.cParams.windowLog; assert(cctx->appliedParams.cParams.windowLog <= ZSTD_WINDOWLOG_MAX); DEBUGLOG(4, "ZSTD_compress_frameChunk (blockSize=%u)", (unsigned)blockSize); if (cctx->appliedParams.fParams.checksumFlag && srcSize) xxh64_update(&cctx->xxhState, src, srcSize); while (remaining) { ZSTD_matchState_t* const ms = &cctx->blockState.matchState; U32 const lastBlock = lastFrameChunk & (blockSize >= remaining); RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE, dstSize_tooSmall, "not enough space to store compressed block"); if (remaining < blockSize) blockSize = remaining; ZSTD_overflowCorrectIfNeeded( ms, &cctx->workspace, &cctx->appliedParams, ip, ip + blockSize); ZSTD_checkDictValidity(&ms->window, ip + blockSize, maxDist, &ms->loadedDictEnd, &ms->dictMatchState); ZSTD_window_enforceMaxDist(&ms->window, ip, maxDist, &ms->loadedDictEnd, &ms->dictMatchState); /* Ensure hash/chain table insertion resumes no sooner than lowlimit */ if (ms->nextToUpdate < ms->window.lowLimit) ms->nextToUpdate = ms->window.lowLimit; { size_t cSize; if (ZSTD_useTargetCBlockSize(&cctx->appliedParams)) { cSize = ZSTD_compressBlock_targetCBlockSize(cctx, op, dstCapacity, ip, blockSize, lastBlock); FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_targetCBlockSize failed"); assert(cSize > 0); assert(cSize <= blockSize + ZSTD_blockHeaderSize); } else if (ZSTD_blockSplitterEnabled(&cctx->appliedParams)) { cSize = ZSTD_compressBlock_splitBlock(cctx, op, dstCapacity, ip, blockSize, lastBlock); FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_splitBlock failed"); assert(cSize > 0 || cctx->seqCollector.collectSequences == 1); } else { cSize = ZSTD_compressBlock_internal(cctx, op+ZSTD_blockHeaderSize, dstCapacity-ZSTD_blockHeaderSize, ip, blockSize, 1 /* frame */); FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_internal failed"); if (cSize == 0) { /* block is not compressible */ cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock); FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed"); } else { U32 const cBlockHeader = cSize == 1 ? lastBlock + (((U32)bt_rle)<<1) + (U32)(blockSize << 3) : lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3); MEM_writeLE24(op, cBlockHeader); cSize += ZSTD_blockHeaderSize; } } ip += blockSize; assert(remaining >= blockSize); remaining -= blockSize; op += cSize; assert(dstCapacity >= cSize); dstCapacity -= cSize; cctx->isFirstBlock = 0; DEBUGLOG(5, "ZSTD_compress_frameChunk: adding a block of size %u", (unsigned)cSize); } } if (lastFrameChunk && (op>ostart)) cctx->stage = ZSTDcs_ending; return (size_t)(op-ostart); } static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity, const ZSTD_CCtx_params* params, U64 pledgedSrcSize, U32 dictID) { BYTE* const op = (BYTE*)dst; U32 const dictIDSizeCodeLength = (dictID>0) + (dictID>=256) + (dictID>=65536); /* 0-3 */ U32 const dictIDSizeCode = params->fParams.noDictIDFlag ? 0 : dictIDSizeCodeLength; /* 0-3 */ U32 const checksumFlag = params->fParams.checksumFlag>0; U32 const windowSize = (U32)1 << params->cParams.windowLog; U32 const singleSegment = params->fParams.contentSizeFlag && (windowSize >= pledgedSrcSize); BYTE const windowLogByte = (BYTE)((params->cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3); U32 const fcsCode = params->fParams.contentSizeFlag ? (pledgedSrcSize>=256) + (pledgedSrcSize>=65536+256) + (pledgedSrcSize>=0xFFFFFFFFU) : 0; /* 0-3 */ BYTE const frameHeaderDescriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) ); size_t pos=0; assert(!(params->fParams.contentSizeFlag && pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN)); RETURN_ERROR_IF(dstCapacity < ZSTD_FRAMEHEADERSIZE_MAX, dstSize_tooSmall, "dst buf is too small to fit worst-case frame header size."); DEBUGLOG(4, "ZSTD_writeFrameHeader : dictIDFlag : %u ; dictID : %u ; dictIDSizeCode : %u", !params->fParams.noDictIDFlag, (unsigned)dictID, (unsigned)dictIDSizeCode); if (params->format == ZSTD_f_zstd1) { MEM_writeLE32(dst, ZSTD_MAGICNUMBER); pos = 4; } op[pos++] = frameHeaderDescriptionByte; if (!singleSegment) op[pos++] = windowLogByte; switch(dictIDSizeCode) { default: assert(0); /* impossible */ ZSTD_FALLTHROUGH; case 0 : break; case 1 : op[pos] = (BYTE)(dictID); pos++; break; case 2 : MEM_writeLE16(op+pos, (U16)dictID); pos+=2; break; case 3 : MEM_writeLE32(op+pos, dictID); pos+=4; break; } switch(fcsCode) { default: assert(0); /* impossible */ ZSTD_FALLTHROUGH; case 0 : if (singleSegment) op[pos++] = (BYTE)(pledgedSrcSize); break; case 1 : MEM_writeLE16(op+pos, (U16)(pledgedSrcSize-256)); pos+=2; break; case 2 : MEM_writeLE32(op+pos, (U32)(pledgedSrcSize)); pos+=4; break; case 3 : MEM_writeLE64(op+pos, (U64)(pledgedSrcSize)); pos+=8; break; } return pos; } /* ZSTD_writeSkippableFrame_advanced() : * Writes out a skippable frame with the specified magic number variant (16 are supported), * from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15, and the desired source data. * * Returns the total number of bytes written, or a ZSTD error code. */ size_t ZSTD_writeSkippableFrame(void* dst, size_t dstCapacity, const void* src, size_t srcSize, unsigned magicVariant) { BYTE* op = (BYTE*)dst; RETURN_ERROR_IF(dstCapacity < srcSize + ZSTD_SKIPPABLEHEADERSIZE /* Skippable frame overhead */, dstSize_tooSmall, "Not enough room for skippable frame"); RETURN_ERROR_IF(srcSize > (unsigned)0xFFFFFFFF, srcSize_wrong, "Src size too large for skippable frame"); RETURN_ERROR_IF(magicVariant > 15, parameter_outOfBound, "Skippable frame magic number variant not supported"); MEM_writeLE32(op, (U32)(ZSTD_MAGIC_SKIPPABLE_START + magicVariant)); MEM_writeLE32(op+4, (U32)srcSize); ZSTD_memcpy(op+8, src, srcSize); return srcSize + ZSTD_SKIPPABLEHEADERSIZE; } /* ZSTD_writeLastEmptyBlock() : * output an empty Block with end-of-frame mark to complete a frame * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h)) * or an error code if `dstCapacity` is too small (<ZSTD_blockHeaderSize) */ size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity) { RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall, "dst buf is too small to write frame trailer empty block."); { U32 const cBlockHeader24 = 1 /*lastBlock*/ + (((U32)bt_raw)<<1); /* 0 size */ MEM_writeLE24(dst, cBlockHeader24); return ZSTD_blockHeaderSize; } } size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq) { RETURN_ERROR_IF(cctx->stage != ZSTDcs_init, stage_wrong, "wrong cctx stage"); RETURN_ERROR_IF(cctx->appliedParams.ldmParams.enableLdm == ZSTD_ps_enable, parameter_unsupported, "incompatible with ldm"); cctx->externSeqStore.seq = seq; cctx->externSeqStore.size = nbSeq; cctx->externSeqStore.capacity = nbSeq; cctx->externSeqStore.pos = 0; cctx->externSeqStore.posInSequence = 0; return 0; } static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 frame, U32 lastFrameChunk) { ZSTD_matchState_t* const ms = &cctx->blockState.matchState; size_t fhSize = 0; DEBUGLOG(5, "ZSTD_compressContinue_internal, stage: %u, srcSize: %u", cctx->stage, (unsigned)srcSize); RETURN_ERROR_IF(cctx->stage==ZSTDcs_created, stage_wrong, "missing init (ZSTD_compressBegin)"); if (frame && (cctx->stage==ZSTDcs_init)) { fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams, cctx->pledgedSrcSizePlusOne-1, cctx->dictID); FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed"); assert(fhSize <= dstCapacity); dstCapacity -= fhSize; dst = (char*)dst + fhSize; cctx->stage = ZSTDcs_ongoing; } if (!srcSize) return fhSize; /* do not generate an empty block if no input */ if (!ZSTD_window_update(&ms->window, src, srcSize, ms->forceNonContiguous)) { ms->forceNonContiguous = 0; ms->nextToUpdate = ms->window.dictLimit; } if (cctx->appliedParams.ldmParams.enableLdm == ZSTD_ps_enable) { ZSTD_window_update(&cctx->ldmState.window, src, srcSize, /* forceNonContiguous */ 0); } if (!frame) { /* overflow check and correction for block mode */ ZSTD_overflowCorrectIfNeeded( ms, &cctx->workspace, &cctx->appliedParams, src, (BYTE const*)src + srcSize); } DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (unsigned)cctx->blockSize); { size_t const cSize = frame ? ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) : ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize, 0 /* frame */); FORWARD_IF_ERROR(cSize, "%s", frame ? "ZSTD_compress_frameChunk failed" : "ZSTD_compressBlock_internal failed"); cctx->consumedSrcSize += srcSize; cctx->producedCSize += (cSize + fhSize); assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0)); if (cctx->pledgedSrcSizePlusOne != 0) { /* control src size */ ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1); RETURN_ERROR_IF( cctx->consumedSrcSize+1 > cctx->pledgedSrcSizePlusOne, srcSize_wrong, "error : pledgedSrcSize = %u, while realSrcSize >= %u", (unsigned)cctx->pledgedSrcSizePlusOne-1, (unsigned)cctx->consumedSrcSize); } return cSize + fhSize; } } size_t ZSTD_compressContinue (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) { DEBUGLOG(5, "ZSTD_compressContinue (srcSize=%u)", (unsigned)srcSize); return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 0 /* last chunk */); } size_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx) { ZSTD_compressionParameters const cParams = cctx->appliedParams.cParams; assert(!ZSTD_checkCParams(cParams)); return MIN (ZSTD_BLOCKSIZE_MAX, (U32)1 << cParams.windowLog); } size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) { DEBUGLOG(5, "ZSTD_compressBlock: srcSize = %u", (unsigned)srcSize); { size_t const blockSizeMax = ZSTD_getBlockSize(cctx); RETURN_ERROR_IF(srcSize > blockSizeMax, srcSize_wrong, "input is larger than a block"); } return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0 /* frame mode */, 0 /* last chunk */); } /*! ZSTD_loadDictionaryContent() : * @return : 0, or an error code */ static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms, ldmState_t* ls, ZSTD_cwksp* ws, ZSTD_CCtx_params const* params, const void* src, size_t srcSize, ZSTD_dictTableLoadMethod_e dtlm) { const BYTE* ip = (const BYTE*) src; const BYTE* const iend = ip + srcSize; int const loadLdmDict = params->ldmParams.enableLdm == ZSTD_ps_enable && ls != NULL; /* Assert that we the ms params match the params we're being given */ ZSTD_assertEqualCParams(params->cParams, ms->cParams); if (srcSize > ZSTD_CHUNKSIZE_MAX) { /* Allow the dictionary to set indices up to exactly ZSTD_CURRENT_MAX. * Dictionaries right at the edge will immediately trigger overflow * correction, but I don't want to insert extra constraints here. */ U32 const maxDictSize = ZSTD_CURRENT_MAX - 1; /* We must have cleared our windows when our source is this large. */ assert(ZSTD_window_isEmpty(ms->window)); if (loadLdmDict) assert(ZSTD_window_isEmpty(ls->window)); /* If the dictionary is too large, only load the suffix of the dictionary. */ if (srcSize > maxDictSize) { ip = iend - maxDictSize; src = ip; srcSize = maxDictSize; } } DEBUGLOG(4, "ZSTD_loadDictionaryContent(): useRowMatchFinder=%d", (int)params->useRowMatchFinder); ZSTD_window_update(&ms->window, src, srcSize, /* forceNonContiguous */ 0); ms->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ms->window.base); ms->forceNonContiguous = params->deterministicRefPrefix; if (loadLdmDict) { ZSTD_window_update(&ls->window, src, srcSize, /* forceNonContiguous */ 0); ls->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ls->window.base); } if (srcSize <= HASH_READ_SIZE) return 0; ZSTD_overflowCorrectIfNeeded(ms, ws, params, ip, iend); if (loadLdmDict) ZSTD_ldm_fillHashTable(ls, ip, iend, &params->ldmParams); switch(params->cParams.strategy) { case ZSTD_fast: ZSTD_fillHashTable(ms, iend, dtlm); break; case ZSTD_dfast: ZSTD_fillDoubleHashTable(ms, iend, dtlm); break; case ZSTD_greedy: case ZSTD_lazy: case ZSTD_lazy2: assert(srcSize >= HASH_READ_SIZE); if (ms->dedicatedDictSearch) { assert(ms->chainTable != NULL); ZSTD_dedicatedDictSearch_lazy_loadDictionary(ms, iend-HASH_READ_SIZE); } else { assert(params->useRowMatchFinder != ZSTD_ps_auto); if (params->useRowMatchFinder == ZSTD_ps_enable) { size_t const tagTableSize = ((size_t)1 << params->cParams.hashLog) * sizeof(U16); ZSTD_memset(ms->tagTable, 0, tagTableSize); ZSTD_row_update(ms, iend-HASH_READ_SIZE); DEBUGLOG(4, "Using row-based hash table for lazy dict"); } else { ZSTD_insertAndFindFirstIndex(ms, iend-HASH_READ_SIZE); DEBUGLOG(4, "Using chain-based hash table for lazy dict"); } } break; case ZSTD_btlazy2: /* we want the dictionary table fully sorted */ case ZSTD_btopt: case ZSTD_btultra: case ZSTD_btultra2: assert(srcSize >= HASH_READ_SIZE); ZSTD_updateTree(ms, iend-HASH_READ_SIZE, iend); break; default: assert(0); /* not possible : not a valid strategy id */ } ms->nextToUpdate = (U32)(iend - ms->window.base); return 0; } /* Dictionaries that assign zero probability to symbols that show up causes problems * when FSE encoding. Mark dictionaries with zero probability symbols as FSE_repeat_check * and only dictionaries with 100% valid symbols can be assumed valid. */ static FSE_repeat ZSTD_dictNCountRepeat(short* normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue) { U32 s; if (dictMaxSymbolValue < maxSymbolValue) { return FSE_repeat_check; } for (s = 0; s <= maxSymbolValue; ++s) { if (normalizedCounter[s] == 0) { return FSE_repeat_check; } } return FSE_repeat_valid; } size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace, const void* const dict, size_t dictSize) { short offcodeNCount[MaxOff+1]; unsigned offcodeMaxValue = MaxOff; const BYTE* dictPtr = (const BYTE*)dict; /* skip magic num and dict ID */ const BYTE* const dictEnd = dictPtr + dictSize; dictPtr += 8; bs->entropy.huf.repeatMode = HUF_repeat_check; { unsigned maxSymbolValue = 255; unsigned hasZeroWeights = 1; size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)bs->entropy.huf.CTable, &maxSymbolValue, dictPtr, dictEnd-dictPtr, &hasZeroWeights); /* We only set the loaded table as valid if it contains all non-zero * weights. Otherwise, we set it to check */ if (!hasZeroWeights) bs->entropy.huf.repeatMode = HUF_repeat_valid; RETURN_ERROR_IF(HUF_isError(hufHeaderSize), dictionary_corrupted, ""); RETURN_ERROR_IF(maxSymbolValue < 255, dictionary_corrupted, ""); dictPtr += hufHeaderSize; } { unsigned offcodeLog; size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr); RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted, ""); RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted, ""); /* fill all offset symbols to avoid garbage at end of table */ RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp( bs->entropy.fse.offcodeCTable, offcodeNCount, MaxOff, offcodeLog, workspace, HUF_WORKSPACE_SIZE)), dictionary_corrupted, ""); /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */ dictPtr += offcodeHeaderSize; } { short matchlengthNCount[MaxML+1]; unsigned matchlengthMaxValue = MaxML, matchlengthLog; size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr); RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted, ""); RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted, ""); RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp( bs->entropy.fse.matchlengthCTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog, workspace, HUF_WORKSPACE_SIZE)), dictionary_corrupted, ""); bs->entropy.fse.matchlength_repeatMode = ZSTD_dictNCountRepeat(matchlengthNCount, matchlengthMaxValue, MaxML); dictPtr += matchlengthHeaderSize; } { short litlengthNCount[MaxLL+1]; unsigned litlengthMaxValue = MaxLL, litlengthLog; size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr); RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted, ""); RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted, ""); RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp( bs->entropy.fse.litlengthCTable, litlengthNCount, litlengthMaxValue, litlengthLog, workspace, HUF_WORKSPACE_SIZE)), dictionary_corrupted, ""); bs->entropy.fse.litlength_repeatMode = ZSTD_dictNCountRepeat(litlengthNCount, litlengthMaxValue, MaxLL); dictPtr += litlengthHeaderSize; } RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted, ""); bs->rep[0] = MEM_readLE32(dictPtr+0); bs->rep[1] = MEM_readLE32(dictPtr+4); bs->rep[2] = MEM_readLE32(dictPtr+8); dictPtr += 12; { size_t const dictContentSize = (size_t)(dictEnd - dictPtr); U32 offcodeMax = MaxOff; if (dictContentSize <= ((U32)-1) - 128 KB) { U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */ offcodeMax = ZSTD_highbit32(maxOffset); /* Calculate minimum offset code required to represent maxOffset */ } /* All offset values <= dictContentSize + 128 KB must be representable for a valid table */ bs->entropy.fse.offcode_repeatMode = ZSTD_dictNCountRepeat(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff)); /* All repCodes must be <= dictContentSize and != 0 */ { U32 u; for (u=0; u<3; u++) { RETURN_ERROR_IF(bs->rep[u] == 0, dictionary_corrupted, ""); RETURN_ERROR_IF(bs->rep[u] > dictContentSize, dictionary_corrupted, ""); } } } return dictPtr - (const BYTE*)dict; } /* Dictionary format : * See : * https://github.com/facebook/zstd/blob/release/doc/zstd_compression_format.md#dictionary-format */ /*! ZSTD_loadZstdDictionary() : * @return : dictID, or an error code * assumptions : magic number supposed already checked * dictSize supposed >= 8 */ static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs, ZSTD_matchState_t* ms, ZSTD_cwksp* ws, ZSTD_CCtx_params const* params, const void* dict, size_t dictSize, ZSTD_dictTableLoadMethod_e dtlm, void* workspace) { const BYTE* dictPtr = (const BYTE*)dict; const BYTE* const dictEnd = dictPtr + dictSize; size_t dictID; size_t eSize; ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog))); assert(dictSize >= 8); assert(MEM_readLE32(dictPtr) == ZSTD_MAGIC_DICTIONARY); dictID = params->fParams.noDictIDFlag ? 0 : MEM_readLE32(dictPtr + 4 /* skip magic number */ ); eSize = ZSTD_loadCEntropy(bs, workspace, dict, dictSize); FORWARD_IF_ERROR(eSize, "ZSTD_loadCEntropy failed"); dictPtr += eSize; { size_t const dictContentSize = (size_t)(dictEnd - dictPtr); FORWARD_IF_ERROR(ZSTD_loadDictionaryContent( ms, NULL, ws, params, dictPtr, dictContentSize, dtlm), ""); } return dictID; } /* ZSTD_compress_insertDictionary() : * @return : dictID, or an error code */ static size_t ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs, ZSTD_matchState_t* ms, ldmState_t* ls, ZSTD_cwksp* ws, const ZSTD_CCtx_params* params, const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType, ZSTD_dictTableLoadMethod_e dtlm, void* workspace) { DEBUGLOG(4, "ZSTD_compress_insertDictionary (dictSize=%u)", (U32)dictSize); if ((dict==NULL) || (dictSize<8)) { RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong, ""); return 0; } ZSTD_reset_compressedBlockState(bs); /* dict restricted modes */ if (dictContentType == ZSTD_dct_rawContent) return ZSTD_loadDictionaryContent(ms, ls, ws, params, dict, dictSize, dtlm); if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) { if (dictContentType == ZSTD_dct_auto) { DEBUGLOG(4, "raw content dictionary detected"); return ZSTD_loadDictionaryContent( ms, ls, ws, params, dict, dictSize, dtlm); } RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong, ""); assert(0); /* impossible */ } /* dict as full zstd dictionary */ return ZSTD_loadZstdDictionary( bs, ms, ws, params, dict, dictSize, dtlm, workspace); } #define ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF (128 KB) #define ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER (6ULL) /*! ZSTD_compressBegin_internal() : * @return : 0, or an error code */ static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType, ZSTD_dictTableLoadMethod_e dtlm, const ZSTD_CDict* cdict, const ZSTD_CCtx_params* params, U64 pledgedSrcSize, ZSTD_buffered_policy_e zbuff) { size_t const dictContentSize = cdict ? cdict->dictContentSize : dictSize; DEBUGLOG(4, "ZSTD_compressBegin_internal: wlog=%u", params->cParams.windowLog); /* params are supposed to be fully validated at this point */ assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams))); assert(!((dict) && (cdict))); /* either dict or cdict, not both */ if ( (cdict) && (cdict->dictContentSize > 0) && ( pledgedSrcSize < ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF || pledgedSrcSize < cdict->dictContentSize * ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN || cdict->compressionLevel == 0) && (params->attachDictPref != ZSTD_dictForceLoad) ) { return ZSTD_resetCCtx_usingCDict(cctx, cdict, params, pledgedSrcSize, zbuff); } FORWARD_IF_ERROR( ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize, dictContentSize, ZSTDcrp_makeClean, zbuff) , ""); { size_t const dictID = cdict ? ZSTD_compress_insertDictionary( cctx->blockState.prevCBlock, &cctx->blockState.matchState, &cctx->ldmState, &cctx->workspace, &cctx->appliedParams, cdict->dictContent, cdict->dictContentSize, cdict->dictContentType, dtlm, cctx->entropyWorkspace) : ZSTD_compress_insertDictionary( cctx->blockState.prevCBlock, &cctx->blockState.matchState, &cctx->ldmState, &cctx->workspace, &cctx->appliedParams, dict, dictSize, dictContentType, dtlm, cctx->entropyWorkspace); FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed"); assert(dictID <= UINT_MAX); cctx->dictID = (U32)dictID; cctx->dictContentSize = dictContentSize; } return 0; } size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType, ZSTD_dictTableLoadMethod_e dtlm, const ZSTD_CDict* cdict, const ZSTD_CCtx_params* params, unsigned long long pledgedSrcSize) { DEBUGLOG(4, "ZSTD_compressBegin_advanced_internal: wlog=%u", params->cParams.windowLog); /* compression parameters verification and optimization */ FORWARD_IF_ERROR( ZSTD_checkCParams(params->cParams) , ""); return ZSTD_compressBegin_internal(cctx, dict, dictSize, dictContentType, dtlm, cdict, params, pledgedSrcSize, ZSTDb_not_buffered); } /*! ZSTD_compressBegin_advanced() : * @return : 0, or an error code */ size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize) { ZSTD_CCtx_params cctxParams; ZSTD_CCtxParams_init_internal(&cctxParams, &params, ZSTD_NO_CLEVEL); return ZSTD_compressBegin_advanced_internal(cctx, dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL /*cdict*/, &cctxParams, pledgedSrcSize); } size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel) { ZSTD_CCtx_params cctxParams; { ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_noAttachDict); ZSTD_CCtxParams_init_internal(&cctxParams, &params, (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel); } DEBUGLOG(4, "ZSTD_compressBegin_usingDict (dictSize=%u)", (unsigned)dictSize); return ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL, &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered); } size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel) { return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel); } /*! ZSTD_writeEpilogue() : * Ends a frame. * @return : nb of bytes written into dst (or an error code) */ static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity) { BYTE* const ostart = (BYTE*)dst; BYTE* op = ostart; size_t fhSize = 0; DEBUGLOG(4, "ZSTD_writeEpilogue"); RETURN_ERROR_IF(cctx->stage == ZSTDcs_created, stage_wrong, "init missing"); /* special case : empty frame */ if (cctx->stage == ZSTDcs_init) { fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams, 0, 0); FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed"); dstCapacity -= fhSize; op += fhSize; cctx->stage = ZSTDcs_ongoing; } if (cctx->stage != ZSTDcs_ending) { /* write one last empty block, make it the "last" block */ U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0; RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for epilogue"); MEM_writeLE32(op, cBlockHeader24); op += ZSTD_blockHeaderSize; dstCapacity -= ZSTD_blockHeaderSize; } if (cctx->appliedParams.fParams.checksumFlag) { U32 const checksum = (U32) xxh64_digest(&cctx->xxhState); RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for checksum"); DEBUGLOG(4, "ZSTD_writeEpilogue: write checksum : %08X", (unsigned)checksum); MEM_writeLE32(op, checksum); op += 4; } cctx->stage = ZSTDcs_created; /* return to "created but no init" status */ return op-ostart; } void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize) { (void)cctx; (void)extraCSize; } size_t ZSTD_compressEnd (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) { size_t endResult; size_t const cSize = ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 1 /* last chunk */); FORWARD_IF_ERROR(cSize, "ZSTD_compressContinue_internal failed"); endResult = ZSTD_writeEpilogue(cctx, (char*)dst + cSize, dstCapacity-cSize); FORWARD_IF_ERROR(endResult, "ZSTD_writeEpilogue failed"); assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0)); if (cctx->pledgedSrcSizePlusOne != 0) { /* control src size */ ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1); DEBUGLOG(4, "end of frame : controlling src size"); RETURN_ERROR_IF( cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize+1, srcSize_wrong, "error : pledgedSrcSize = %u, while realSrcSize = %u", (unsigned)cctx->pledgedSrcSizePlusOne-1, (unsigned)cctx->consumedSrcSize); } ZSTD_CCtx_trace(cctx, endResult); return cSize + endResult; } size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict,size_t dictSize, ZSTD_parameters params) { DEBUGLOG(4, "ZSTD_compress_advanced"); FORWARD_IF_ERROR(ZSTD_checkCParams(params.cParams), ""); ZSTD_CCtxParams_init_internal(&cctx->simpleApiParams, &params, ZSTD_NO_CLEVEL); return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, &cctx->simpleApiParams); } /* Internal */ size_t ZSTD_compress_advanced_internal( ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict,size_t dictSize, const ZSTD_CCtx_params* params) { DEBUGLOG(4, "ZSTD_compress_advanced_internal (srcSize:%u)", (unsigned)srcSize); FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL, params, srcSize, ZSTDb_not_buffered) , ""); return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize); } size_t ZSTD_compress_usingDict(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict, size_t dictSize, int compressionLevel) { { ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, srcSize, dict ? dictSize : 0, ZSTD_cpm_noAttachDict); assert(params.fParams.contentSizeFlag == 1); ZSTD_CCtxParams_init_internal(&cctx->simpleApiParams, &params, (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT: compressionLevel); } DEBUGLOG(4, "ZSTD_compress_usingDict (srcSize=%u)", (unsigned)srcSize); return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, &cctx->simpleApiParams); } size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, int compressionLevel) { DEBUGLOG(4, "ZSTD_compressCCtx (srcSize=%u)", (unsigned)srcSize); assert(cctx != NULL); return ZSTD_compress_usingDict(cctx, dst, dstCapacity, src, srcSize, NULL, 0, compressionLevel); } size_t ZSTD_compress(void* dst, size_t dstCapacity, const void* src, size_t srcSize, int compressionLevel) { size_t result; ZSTD_CCtx* cctx = ZSTD_createCCtx(); RETURN_ERROR_IF(!cctx, memory_allocation, "ZSTD_createCCtx failed"); result = ZSTD_compressCCtx(cctx, dst, dstCapacity, src, srcSize, compressionLevel); ZSTD_freeCCtx(cctx); return result; } /* ===== Dictionary API ===== */ /*! ZSTD_estimateCDictSize_advanced() : * Estimate amount of memory that will be needed to create a dictionary with following arguments */ size_t ZSTD_estimateCDictSize_advanced( size_t dictSize, ZSTD_compressionParameters cParams, ZSTD_dictLoadMethod_e dictLoadMethod) { DEBUGLOG(5, "sizeof(ZSTD_CDict) : %u", (unsigned)sizeof(ZSTD_CDict)); return ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) + ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE) /* enableDedicatedDictSearch == 1 ensures that CDict estimation will not be too small * in case we are using DDS with row-hash. */ + ZSTD_sizeof_matchState(&cParams, ZSTD_resolveRowMatchFinderMode(ZSTD_ps_auto, &cParams), /* enableDedicatedDictSearch */ 1, /* forCCtx */ 0) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void *)))); } size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel) { ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict); return ZSTD_estimateCDictSize_advanced(dictSize, cParams, ZSTD_dlm_byCopy); } size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict) { if (cdict==NULL) return 0; /* support sizeof on NULL */ DEBUGLOG(5, "sizeof(*cdict) : %u", (unsigned)sizeof(*cdict)); /* cdict may be in the workspace */ return (cdict->workspace.workspace == cdict ? 0 : sizeof(*cdict)) + ZSTD_cwksp_sizeof(&cdict->workspace); } static size_t ZSTD_initCDict_internal( ZSTD_CDict* cdict, const void* dictBuffer, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, ZSTD_CCtx_params params) { DEBUGLOG(3, "ZSTD_initCDict_internal (dictContentType:%u)", (unsigned)dictContentType); assert(!ZSTD_checkCParams(params.cParams)); cdict->matchState.cParams = params.cParams; cdict->matchState.dedicatedDictSearch = params.enableDedicatedDictSearch; if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dictBuffer) || (!dictSize)) { cdict->dictContent = dictBuffer; } else { void *internalBuffer = ZSTD_cwksp_reserve_object(&cdict->workspace, ZSTD_cwksp_align(dictSize, sizeof(void*))); RETURN_ERROR_IF(!internalBuffer, memory_allocation, "NULL pointer!"); cdict->dictContent = internalBuffer; ZSTD_memcpy(internalBuffer, dictBuffer, dictSize); } cdict->dictContentSize = dictSize; cdict->dictContentType = dictContentType; cdict->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cdict->workspace, HUF_WORKSPACE_SIZE); /* Reset the state to no dictionary */ ZSTD_reset_compressedBlockState(&cdict->cBlockState); FORWARD_IF_ERROR(ZSTD_reset_matchState( &cdict->matchState, &cdict->workspace, &params.cParams, params.useRowMatchFinder, ZSTDcrp_makeClean, ZSTDirp_reset, ZSTD_resetTarget_CDict), ""); /* (Maybe) load the dictionary * Skips loading the dictionary if it is < 8 bytes. */ { params.compressionLevel = ZSTD_CLEVEL_DEFAULT; params.fParams.contentSizeFlag = 1; { size_t const dictID = ZSTD_compress_insertDictionary( &cdict->cBlockState, &cdict->matchState, NULL, &cdict->workspace, &params, cdict->dictContent, cdict->dictContentSize, dictContentType, ZSTD_dtlm_full, cdict->entropyWorkspace); FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed"); assert(dictID <= (size_t)(U32)-1); cdict->dictID = (U32)dictID; } } return 0; } static ZSTD_CDict* ZSTD_createCDict_advanced_internal(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_compressionParameters cParams, ZSTD_paramSwitch_e useRowMatchFinder, U32 enableDedicatedDictSearch, ZSTD_customMem customMem) { if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL; { size_t const workspaceSize = ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) + ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE) + ZSTD_sizeof_matchState(&cParams, useRowMatchFinder, enableDedicatedDictSearch, /* forCCtx */ 0) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*)))); void* const workspace = ZSTD_customMalloc(workspaceSize, customMem); ZSTD_cwksp ws; ZSTD_CDict* cdict; if (!workspace) { ZSTD_customFree(workspace, customMem); return NULL; } ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_dynamic_alloc); cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict)); assert(cdict != NULL); ZSTD_cwksp_move(&cdict->workspace, &ws); cdict->customMem = customMem; cdict->compressionLevel = ZSTD_NO_CLEVEL; /* signals advanced API usage */ cdict->useRowMatchFinder = useRowMatchFinder; return cdict; } } ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, ZSTD_compressionParameters cParams, ZSTD_customMem customMem) { ZSTD_CCtx_params cctxParams; ZSTD_memset(&cctxParams, 0, sizeof(cctxParams)); ZSTD_CCtxParams_init(&cctxParams, 0); cctxParams.cParams = cParams; cctxParams.customMem = customMem; return ZSTD_createCDict_advanced2( dictBuffer, dictSize, dictLoadMethod, dictContentType, &cctxParams, customMem); } ZSTD_CDict* ZSTD_createCDict_advanced2( const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, const ZSTD_CCtx_params* originalCctxParams, ZSTD_customMem customMem) { ZSTD_CCtx_params cctxParams = *originalCctxParams; ZSTD_compressionParameters cParams; ZSTD_CDict* cdict; DEBUGLOG(3, "ZSTD_createCDict_advanced2, mode %u", (unsigned)dictContentType); if (!customMem.customAlloc ^ !customMem.customFree) return NULL; if (cctxParams.enableDedicatedDictSearch) { cParams = ZSTD_dedicatedDictSearch_getCParams( cctxParams.compressionLevel, dictSize); ZSTD_overrideCParams(&cParams, &cctxParams.cParams); } else { cParams = ZSTD_getCParamsFromCCtxParams( &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict); } if (!ZSTD_dedicatedDictSearch_isSupported(&cParams)) { /* Fall back to non-DDSS params */ cctxParams.enableDedicatedDictSearch = 0; cParams = ZSTD_getCParamsFromCCtxParams( &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict); } DEBUGLOG(3, "ZSTD_createCDict_advanced2: DDS: %u", cctxParams.enableDedicatedDictSearch); cctxParams.cParams = cParams; cctxParams.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams.useRowMatchFinder, &cParams); cdict = ZSTD_createCDict_advanced_internal(dictSize, dictLoadMethod, cctxParams.cParams, cctxParams.useRowMatchFinder, cctxParams.enableDedicatedDictSearch, customMem); if (!cdict) return NULL; if (ZSTD_isError( ZSTD_initCDict_internal(cdict, dict, dictSize, dictLoadMethod, dictContentType, cctxParams) )) { ZSTD_freeCDict(cdict); return NULL; } return cdict; } ZSTD_CDict* ZSTD_createCDict(const void* dict, size_t dictSize, int compressionLevel) { ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict); ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto, cParams, ZSTD_defaultCMem); if (cdict) cdict->compressionLevel = (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel; return cdict; } ZSTD_CDict* ZSTD_createCDict_byReference(const void* dict, size_t dictSize, int compressionLevel) { ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict); ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto, cParams, ZSTD_defaultCMem); if (cdict) cdict->compressionLevel = (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel; return cdict; } size_t ZSTD_freeCDict(ZSTD_CDict* cdict) { if (cdict==NULL) return 0; /* support free on NULL */ { ZSTD_customMem const cMem = cdict->customMem; int cdictInWorkspace = ZSTD_cwksp_owns_buffer(&cdict->workspace, cdict); ZSTD_cwksp_free(&cdict->workspace, cMem); if (!cdictInWorkspace) { ZSTD_customFree(cdict, cMem); } return 0; } } /*! ZSTD_initStaticCDict_advanced() : * Generate a digested dictionary in provided memory area. * workspace: The memory area to emplace the dictionary into. * Provided pointer must 8-bytes aligned. * It must outlive dictionary usage. * workspaceSize: Use ZSTD_estimateCDictSize() * to determine how large workspace must be. * cParams : use ZSTD_getCParams() to transform a compression level * into its relevants cParams. * @return : pointer to ZSTD_CDict*, or NULL if error (size too small) * Note : there is no corresponding "free" function. * Since workspace was allocated externally, it must be freed externally. */ const ZSTD_CDict* ZSTD_initStaticCDict( void* workspace, size_t workspaceSize, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, ZSTD_compressionParameters cParams) { ZSTD_paramSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(ZSTD_ps_auto, &cParams); /* enableDedicatedDictSearch == 1 ensures matchstate is not too small in case this CDict will be used for DDS + row hash */ size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, useRowMatchFinder, /* enableDedicatedDictSearch */ 1, /* forCCtx */ 0); size_t const neededSize = ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*)))) + ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE) + matchStateSize; ZSTD_CDict* cdict; ZSTD_CCtx_params params; if ((size_t)workspace & 7) return NULL; /* 8-aligned */ { ZSTD_cwksp ws; ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_static_alloc); cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict)); if (cdict == NULL) return NULL; ZSTD_cwksp_move(&cdict->workspace, &ws); } DEBUGLOG(4, "(workspaceSize < neededSize) : (%u < %u) => %u", (unsigned)workspaceSize, (unsigned)neededSize, (unsigned)(workspaceSize < neededSize)); if (workspaceSize < neededSize) return NULL; ZSTD_CCtxParams_init(&params, 0); params.cParams = cParams; params.useRowMatchFinder = useRowMatchFinder; cdict->useRowMatchFinder = useRowMatchFinder; if (ZSTD_isError( ZSTD_initCDict_internal(cdict, dict, dictSize, dictLoadMethod, dictContentType, params) )) return NULL; return cdict; } ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict) { assert(cdict != NULL); return cdict->matchState.cParams; } /*! ZSTD_getDictID_fromCDict() : * Provides the dictID of the dictionary loaded into `cdict`. * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty. * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */ unsigned ZSTD_getDictID_fromCDict(const ZSTD_CDict* cdict) { if (cdict==NULL) return 0; return cdict->dictID; } /* ZSTD_compressBegin_usingCDict_internal() : * Implementation of various ZSTD_compressBegin_usingCDict* functions. */ static size_t ZSTD_compressBegin_usingCDict_internal( ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize) { ZSTD_CCtx_params cctxParams; DEBUGLOG(4, "ZSTD_compressBegin_usingCDict_internal"); RETURN_ERROR_IF(cdict==NULL, dictionary_wrong, "NULL pointer!"); /* Initialize the cctxParams from the cdict */ { ZSTD_parameters params; params.fParams = fParams; params.cParams = ( pledgedSrcSize < ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF || pledgedSrcSize < cdict->dictContentSize * ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN || cdict->compressionLevel == 0 ) ? ZSTD_getCParamsFromCDict(cdict) : ZSTD_getCParams(cdict->compressionLevel, pledgedSrcSize, cdict->dictContentSize); ZSTD_CCtxParams_init_internal(&cctxParams, &params, cdict->compressionLevel); } /* Increase window log to fit the entire dictionary and source if the * source size is known. Limit the increase to 19, which is the * window log for compression level 1 with the largest source size. */ if (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN) { U32 const limitedSrcSize = (U32)MIN(pledgedSrcSize, 1U << 19); U32 const limitedSrcLog = limitedSrcSize > 1 ? ZSTD_highbit32(limitedSrcSize - 1) + 1 : 1; cctxParams.cParams.windowLog = MAX(cctxParams.cParams.windowLog, limitedSrcLog); } return ZSTD_compressBegin_internal(cctx, NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast, cdict, &cctxParams, pledgedSrcSize, ZSTDb_not_buffered); } /* ZSTD_compressBegin_usingCDict_advanced() : * This function is DEPRECATED. * cdict must be != NULL */ size_t ZSTD_compressBegin_usingCDict_advanced( ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize) { return ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, pledgedSrcSize); } /* ZSTD_compressBegin_usingCDict() : * cdict must be != NULL */ size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict) { ZSTD_frameParameters const fParams = { 0 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ }; return ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN); } /*! ZSTD_compress_usingCDict_internal(): * Implementation of various ZSTD_compress_usingCDict* functions. */ static size_t ZSTD_compress_usingCDict_internal(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const ZSTD_CDict* cdict, ZSTD_frameParameters fParams) { FORWARD_IF_ERROR(ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, srcSize), ""); /* will check if cdict != NULL */ return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize); } /*! ZSTD_compress_usingCDict_advanced(): * This function is DEPRECATED. */ size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const ZSTD_CDict* cdict, ZSTD_frameParameters fParams) { return ZSTD_compress_usingCDict_internal(cctx, dst, dstCapacity, src, srcSize, cdict, fParams); } /*! ZSTD_compress_usingCDict() : * Compression using a digested Dictionary. * Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times. * Note that compression parameters are decided at CDict creation time * while frame parameters are hardcoded */ size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const ZSTD_CDict* cdict) { ZSTD_frameParameters const fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ }; return ZSTD_compress_usingCDict_internal(cctx, dst, dstCapacity, src, srcSize, cdict, fParams); } /* ****************************************************************** * Streaming ********************************************************************/ ZSTD_CStream* ZSTD_createCStream(void) { DEBUGLOG(3, "ZSTD_createCStream"); return ZSTD_createCStream_advanced(ZSTD_defaultCMem); } ZSTD_CStream* ZSTD_initStaticCStream(void *workspace, size_t workspaceSize) { return ZSTD_initStaticCCtx(workspace, workspaceSize); } ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem) { /* CStream and CCtx are now same object */ return ZSTD_createCCtx_advanced(customMem); } size_t ZSTD_freeCStream(ZSTD_CStream* zcs) { return ZSTD_freeCCtx(zcs); /* same object */ } /*====== Initialization ======*/ size_t ZSTD_CStreamInSize(void) { return ZSTD_BLOCKSIZE_MAX; } size_t ZSTD_CStreamOutSize(void) { return ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */ ; } static ZSTD_cParamMode_e ZSTD_getCParamMode(ZSTD_CDict const* cdict, ZSTD_CCtx_params const* params, U64 pledgedSrcSize) { if (cdict != NULL && ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize)) return ZSTD_cpm_attachDict; else return ZSTD_cpm_noAttachDict; } /* ZSTD_resetCStream(): * pledgedSrcSize == 0 means "unknown" */ size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pss) { /* temporary : 0 interpreted as "unknown" during transition period. * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN. * 0 will be interpreted as "empty" in the future. */ U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss; DEBUGLOG(4, "ZSTD_resetCStream: pledgedSrcSize = %u", (unsigned)pledgedSrcSize); FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , ""); return 0; } /*! ZSTD_initCStream_internal() : * Note : for lib/compress only. Used by zstdmt_compress.c. * Assumption 1 : params are valid * Assumption 2 : either dict, or cdict, is defined, not both */ size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs, const void* dict, size_t dictSize, const ZSTD_CDict* cdict, const ZSTD_CCtx_params* params, unsigned long long pledgedSrcSize) { DEBUGLOG(4, "ZSTD_initCStream_internal"); FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , ""); assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams))); zcs->requestedParams = *params; assert(!((dict) && (cdict))); /* either dict or cdict, not both */ if (dict) { FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , ""); } else { /* Dictionary is cleared if !cdict */ FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , ""); } return 0; } /* ZSTD_initCStream_usingCDict_advanced() : * same as ZSTD_initCStream_usingCDict(), with control over frame parameters */ size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, const ZSTD_CDict* cdict, ZSTD_frameParameters fParams, unsigned long long pledgedSrcSize) { DEBUGLOG(4, "ZSTD_initCStream_usingCDict_advanced"); FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , ""); zcs->requestedParams.fParams = fParams; FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , ""); return 0; } /* note : cdict must outlive compression session */ size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict) { DEBUGLOG(4, "ZSTD_initCStream_usingCDict"); FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , ""); return 0; } /* ZSTD_initCStream_advanced() : * pledgedSrcSize must be exact. * if srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN. * dict is loaded with default parameters ZSTD_dct_auto and ZSTD_dlm_byCopy. */ size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pss) { /* for compatibility with older programs relying on this behavior. * Users should now specify ZSTD_CONTENTSIZE_UNKNOWN. * This line will be removed in the future. */ U64 const pledgedSrcSize = (pss==0 && params.fParams.contentSizeFlag==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss; DEBUGLOG(4, "ZSTD_initCStream_advanced"); FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , ""); FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) , ""); ZSTD_CCtxParams_setZstdParams(&zcs->requestedParams, &params); FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , ""); return 0; } size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel) { DEBUGLOG(4, "ZSTD_initCStream_usingDict"); FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , ""); FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , ""); return 0; } size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pss) { /* temporary : 0 interpreted as "unknown" during transition period. * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN. * 0 will be interpreted as "empty" in the future. */ U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss; DEBUGLOG(4, "ZSTD_initCStream_srcSize"); FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) , ""); FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , ""); FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , ""); return 0; } size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel) { DEBUGLOG(4, "ZSTD_initCStream"); FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) , ""); FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , ""); return 0; } /*====== Compression ======*/ static size_t ZSTD_nextInputSizeHint(const ZSTD_CCtx* cctx) { size_t hintInSize = cctx->inBuffTarget - cctx->inBuffPos; if (hintInSize==0) hintInSize = cctx->blockSize; return hintInSize; } /* ZSTD_compressStream_generic(): * internal function for all *compressStream*() variants * non-static, because can be called from zstdmt_compress.c * @return : hint size for next input */ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input, ZSTD_EndDirective const flushMode) { const char* const istart = (const char*)input->src; const char* const iend = input->size != 0 ? istart + input->size : istart; const char* ip = input->pos != 0 ? istart + input->pos : istart; char* const ostart = (char*)output->dst; char* const oend = output->size != 0 ? ostart + output->size : ostart; char* op = output->pos != 0 ? ostart + output->pos : ostart; U32 someMoreWork = 1; /* check expectations */ DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%u", (unsigned)flushMode); if (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered) { assert(zcs->inBuff != NULL); assert(zcs->inBuffSize > 0); } if (zcs->appliedParams.outBufferMode == ZSTD_bm_buffered) { assert(zcs->outBuff != NULL); assert(zcs->outBuffSize > 0); } assert(output->pos <= output->size); assert(input->pos <= input->size); assert((U32)flushMode <= (U32)ZSTD_e_end); while (someMoreWork) { switch(zcs->streamStage) { case zcss_init: RETURN_ERROR(init_missing, "call ZSTD_initCStream() first!"); case zcss_load: if ( (flushMode == ZSTD_e_end) && ( (size_t)(oend-op) >= ZSTD_compressBound(iend-ip) /* Enough output space */ || zcs->appliedParams.outBufferMode == ZSTD_bm_stable) /* OR we are allowed to return dstSizeTooSmall */ && (zcs->inBuffPos == 0) ) { /* shortcut to compression pass directly into output buffer */ size_t const cSize = ZSTD_compressEnd(zcs, op, oend-op, ip, iend-ip); DEBUGLOG(4, "ZSTD_compressEnd : cSize=%u", (unsigned)cSize); FORWARD_IF_ERROR(cSize, "ZSTD_compressEnd failed"); ip = iend; op += cSize; zcs->frameEnded = 1; ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); someMoreWork = 0; break; } /* complete loading into inBuffer in buffered mode */ if (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered) { size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos; size_t const loaded = ZSTD_limitCopy( zcs->inBuff + zcs->inBuffPos, toLoad, ip, iend-ip); zcs->inBuffPos += loaded; if (loaded != 0) ip += loaded; if ( (flushMode == ZSTD_e_continue) && (zcs->inBuffPos < zcs->inBuffTarget) ) { /* not enough input to fill full block : stop here */ someMoreWork = 0; break; } if ( (flushMode == ZSTD_e_flush) && (zcs->inBuffPos == zcs->inToCompress) ) { /* empty */ someMoreWork = 0; break; } } /* compress current block (note : this stage cannot be stopped in the middle) */ DEBUGLOG(5, "stream compression stage (flushMode==%u)", flushMode); { int const inputBuffered = (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered); void* cDst; size_t cSize; size_t oSize = oend-op; size_t const iSize = inputBuffered ? zcs->inBuffPos - zcs->inToCompress : MIN((size_t)(iend - ip), zcs->blockSize); if (oSize >= ZSTD_compressBound(iSize) || zcs->appliedParams.outBufferMode == ZSTD_bm_stable) cDst = op; /* compress into output buffer, to skip flush stage */ else cDst = zcs->outBuff, oSize = zcs->outBuffSize; if (inputBuffered) { unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip==iend); cSize = lastBlock ? ZSTD_compressEnd(zcs, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize) : ZSTD_compressContinue(zcs, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize); FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed"); zcs->frameEnded = lastBlock; /* prepare next block */ zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize; if (zcs->inBuffTarget > zcs->inBuffSize) zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize; DEBUGLOG(5, "inBuffTarget:%u / inBuffSize:%u", (unsigned)zcs->inBuffTarget, (unsigned)zcs->inBuffSize); if (!lastBlock) assert(zcs->inBuffTarget <= zcs->inBuffSize); zcs->inToCompress = zcs->inBuffPos; } else { unsigned const lastBlock = (ip + iSize == iend); assert(flushMode == ZSTD_e_end /* Already validated */); cSize = lastBlock ? ZSTD_compressEnd(zcs, cDst, oSize, ip, iSize) : ZSTD_compressContinue(zcs, cDst, oSize, ip, iSize); /* Consume the input prior to error checking to mirror buffered mode. */ if (iSize > 0) ip += iSize; FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed"); zcs->frameEnded = lastBlock; if (lastBlock) assert(ip == iend); } if (cDst == op) { /* no need to flush */ op += cSize; if (zcs->frameEnded) { DEBUGLOG(5, "Frame completed directly in outBuffer"); someMoreWork = 0; ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); } break; } zcs->outBuffContentSize = cSize; zcs->outBuffFlushedSize = 0; zcs->streamStage = zcss_flush; /* pass-through to flush stage */ } ZSTD_FALLTHROUGH; case zcss_flush: DEBUGLOG(5, "flush stage"); assert(zcs->appliedParams.outBufferMode == ZSTD_bm_buffered); { size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize; size_t const flushed = ZSTD_limitCopy(op, (size_t)(oend-op), zcs->outBuff + zcs->outBuffFlushedSize, toFlush); DEBUGLOG(5, "toFlush: %u into %u ==> flushed: %u", (unsigned)toFlush, (unsigned)(oend-op), (unsigned)flushed); if (flushed) op += flushed; zcs->outBuffFlushedSize += flushed; if (toFlush!=flushed) { /* flush not fully completed, presumably because dst is too small */ assert(op==oend); someMoreWork = 0; break; } zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0; if (zcs->frameEnded) { DEBUGLOG(5, "Frame completed on flush"); someMoreWork = 0; ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); break; } zcs->streamStage = zcss_load; break; } default: /* impossible */ assert(0); } } input->pos = ip - istart; output->pos = op - ostart; if (zcs->frameEnded) return 0; return ZSTD_nextInputSizeHint(zcs); } static size_t ZSTD_nextInputSizeHint_MTorST(const ZSTD_CCtx* cctx) { return ZSTD_nextInputSizeHint(cctx); } size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input) { FORWARD_IF_ERROR( ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue) , ""); return ZSTD_nextInputSizeHint_MTorST(zcs); } /* After a compression call set the expected input/output buffer. * This is validated at the start of the next compression call. */ static void ZSTD_setBufferExpectations(ZSTD_CCtx* cctx, ZSTD_outBuffer const* output, ZSTD_inBuffer const* input) { if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) { cctx->expectedInBuffer = *input; } if (cctx->appliedParams.outBufferMode == ZSTD_bm_stable) { cctx->expectedOutBufferSize = output->size - output->pos; } } /* Validate that the input/output buffers match the expectations set by * ZSTD_setBufferExpectations. */ static size_t ZSTD_checkBufferStability(ZSTD_CCtx const* cctx, ZSTD_outBuffer const* output, ZSTD_inBuffer const* input, ZSTD_EndDirective endOp) { if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) { ZSTD_inBuffer const expect = cctx->expectedInBuffer; if (expect.src != input->src || expect.pos != input->pos || expect.size != input->size) RETURN_ERROR(srcBuffer_wrong, "ZSTD_c_stableInBuffer enabled but input differs!"); if (endOp != ZSTD_e_end) RETURN_ERROR(srcBuffer_wrong, "ZSTD_c_stableInBuffer can only be used with ZSTD_e_end!"); } if (cctx->appliedParams.outBufferMode == ZSTD_bm_stable) { size_t const outBufferSize = output->size - output->pos; if (cctx->expectedOutBufferSize != outBufferSize) RETURN_ERROR(dstBuffer_wrong, "ZSTD_c_stableOutBuffer enabled but output size differs!"); } return 0; } static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx, ZSTD_EndDirective endOp, size_t inSize) { ZSTD_CCtx_params params = cctx->requestedParams; ZSTD_prefixDict const prefixDict = cctx->prefixDict; FORWARD_IF_ERROR( ZSTD_initLocalDict(cctx) , ""); /* Init the local dict if present. */ ZSTD_memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict)); /* single usage */ assert(prefixDict.dict==NULL || cctx->cdict==NULL); /* only one can be set */ if (cctx->cdict && !cctx->localDict.cdict) { /* Let the cdict's compression level take priority over the requested params. * But do not take the cdict's compression level if the "cdict" is actually a localDict * generated from ZSTD_initLocalDict(). */ params.compressionLevel = cctx->cdict->compressionLevel; } DEBUGLOG(4, "ZSTD_compressStream2 : transparent init stage"); if (endOp == ZSTD_e_end) cctx->pledgedSrcSizePlusOne = inSize + 1; /* auto-fix pledgedSrcSize */ { size_t const dictSize = prefixDict.dict ? prefixDict.dictSize : (cctx->cdict ? cctx->cdict->dictContentSize : 0); ZSTD_cParamMode_e const mode = ZSTD_getCParamMode(cctx->cdict, &params, cctx->pledgedSrcSizePlusOne - 1); params.cParams = ZSTD_getCParamsFromCCtxParams( &params, cctx->pledgedSrcSizePlusOne-1, dictSize, mode); } params.useBlockSplitter = ZSTD_resolveBlockSplitterMode(params.useBlockSplitter, &params.cParams); params.ldmParams.enableLdm = ZSTD_resolveEnableLdm(params.ldmParams.enableLdm, &params.cParams); params.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params.useRowMatchFinder, &params.cParams); { U64 const pledgedSrcSize = cctx->pledgedSrcSizePlusOne - 1; assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams))); FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx, prefixDict.dict, prefixDict.dictSize, prefixDict.dictContentType, ZSTD_dtlm_fast, cctx->cdict, &params, pledgedSrcSize, ZSTDb_buffered) , ""); assert(cctx->appliedParams.nbWorkers == 0); cctx->inToCompress = 0; cctx->inBuffPos = 0; if (cctx->appliedParams.inBufferMode == ZSTD_bm_buffered) { /* for small input: avoid automatic flush on reaching end of block, since * it would require to add a 3-bytes null block to end frame */ cctx->inBuffTarget = cctx->blockSize + (cctx->blockSize == pledgedSrcSize); } else { cctx->inBuffTarget = 0; } cctx->outBuffContentSize = cctx->outBuffFlushedSize = 0; cctx->streamStage = zcss_load; cctx->frameEnded = 0; } return 0; } size_t ZSTD_compressStream2( ZSTD_CCtx* cctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input, ZSTD_EndDirective endOp) { DEBUGLOG(5, "ZSTD_compressStream2, endOp=%u ", (unsigned)endOp); /* check conditions */ RETURN_ERROR_IF(output->pos > output->size, dstSize_tooSmall, "invalid output buffer"); RETURN_ERROR_IF(input->pos > input->size, srcSize_wrong, "invalid input buffer"); RETURN_ERROR_IF((U32)endOp > (U32)ZSTD_e_end, parameter_outOfBound, "invalid endDirective"); assert(cctx != NULL); /* transparent initialization stage */ if (cctx->streamStage == zcss_init) { FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, endOp, input->size), "CompressStream2 initialization failed"); ZSTD_setBufferExpectations(cctx, output, input); /* Set initial buffer expectations now that we've initialized */ } /* end of transparent initialization stage */ FORWARD_IF_ERROR(ZSTD_checkBufferStability(cctx, output, input, endOp), "invalid buffers"); /* compression stage */ FORWARD_IF_ERROR( ZSTD_compressStream_generic(cctx, output, input, endOp) , ""); DEBUGLOG(5, "completed ZSTD_compressStream2"); ZSTD_setBufferExpectations(cctx, output, input); return cctx->outBuffContentSize - cctx->outBuffFlushedSize; /* remaining to flush */ } size_t ZSTD_compressStream2_simpleArgs ( ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, size_t* dstPos, const void* src, size_t srcSize, size_t* srcPos, ZSTD_EndDirective endOp) { ZSTD_outBuffer output = { dst, dstCapacity, *dstPos }; ZSTD_inBuffer input = { src, srcSize, *srcPos }; /* ZSTD_compressStream2() will check validity of dstPos and srcPos */ size_t const cErr = ZSTD_compressStream2(cctx, &output, &input, endOp); *dstPos = output.pos; *srcPos = input.pos; return cErr; } size_t ZSTD_compress2(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) { ZSTD_bufferMode_e const originalInBufferMode = cctx->requestedParams.inBufferMode; ZSTD_bufferMode_e const originalOutBufferMode = cctx->requestedParams.outBufferMode; DEBUGLOG(4, "ZSTD_compress2 (srcSize=%u)", (unsigned)srcSize); ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only); /* Enable stable input/output buffers. */ cctx->requestedParams.inBufferMode = ZSTD_bm_stable; cctx->requestedParams.outBufferMode = ZSTD_bm_stable; { size_t oPos = 0; size_t iPos = 0; size_t const result = ZSTD_compressStream2_simpleArgs(cctx, dst, dstCapacity, &oPos, src, srcSize, &iPos, ZSTD_e_end); /* Reset to the original values. */ cctx->requestedParams.inBufferMode = originalInBufferMode; cctx->requestedParams.outBufferMode = originalOutBufferMode; FORWARD_IF_ERROR(result, "ZSTD_compressStream2_simpleArgs failed"); if (result != 0) { /* compression not completed, due to lack of output space */ assert(oPos == dstCapacity); RETURN_ERROR(dstSize_tooSmall, ""); } assert(iPos == srcSize); /* all input is expected consumed */ return oPos; } } typedef struct { U32 idx; /* Index in array of ZSTD_Sequence */ U32 posInSequence; /* Position within sequence at idx */ size_t posInSrc; /* Number of bytes given by sequences provided so far */ } ZSTD_sequencePosition; /* ZSTD_validateSequence() : * @offCode : is presumed to follow format required by ZSTD_storeSeq() * @returns a ZSTD error code if sequence is not valid */ static size_t ZSTD_validateSequence(U32 offCode, U32 matchLength, size_t posInSrc, U32 windowLog, size_t dictSize) { U32 const windowSize = 1 << windowLog; /* posInSrc represents the amount of data the decoder would decode up to this point. * As long as the amount of data decoded is less than or equal to window size, offsets may be * larger than the total length of output decoded in order to reference the dict, even larger than * window size. After output surpasses windowSize, we're limited to windowSize offsets again. */ size_t const offsetBound = posInSrc > windowSize ? (size_t)windowSize : posInSrc + (size_t)dictSize; RETURN_ERROR_IF(offCode > STORE_OFFSET(offsetBound), corruption_detected, "Offset too large!"); RETURN_ERROR_IF(matchLength < MINMATCH, corruption_detected, "Matchlength too small"); return 0; } /* Returns an offset code, given a sequence's raw offset, the ongoing repcode array, and whether litLength == 0 */ static U32 ZSTD_finalizeOffCode(U32 rawOffset, const U32 rep[ZSTD_REP_NUM], U32 ll0) { U32 offCode = STORE_OFFSET(rawOffset); if (!ll0 && rawOffset == rep[0]) { offCode = STORE_REPCODE_1; } else if (rawOffset == rep[1]) { offCode = STORE_REPCODE(2 - ll0); } else if (rawOffset == rep[2]) { offCode = STORE_REPCODE(3 - ll0); } else if (ll0 && rawOffset == rep[0] - 1) { offCode = STORE_REPCODE_3; } return offCode; } /* Returns 0 on success, and a ZSTD_error otherwise. This function scans through an array of * ZSTD_Sequence, storing the sequences it finds, until it reaches a block delimiter. */ static size_t ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos, const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, const void* src, size_t blockSize) { U32 idx = seqPos->idx; BYTE const* ip = (BYTE const*)(src); const BYTE* const iend = ip + blockSize; repcodes_t updatedRepcodes; U32 dictSize; if (cctx->cdict) { dictSize = (U32)cctx->cdict->dictContentSize; } else if (cctx->prefixDict.dict) { dictSize = (U32)cctx->prefixDict.dictSize; } else { dictSize = 0; } ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t)); for (; (inSeqs[idx].matchLength != 0 || inSeqs[idx].offset != 0) && idx < inSeqsSize; ++idx) { U32 const litLength = inSeqs[idx].litLength; U32 const ll0 = (litLength == 0); U32 const matchLength = inSeqs[idx].matchLength; U32 const offCode = ZSTD_finalizeOffCode(inSeqs[idx].offset, updatedRepcodes.rep, ll0); ZSTD_updateRep(updatedRepcodes.rep, offCode, ll0); DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offCode, matchLength, litLength); if (cctx->appliedParams.validateSequences) { seqPos->posInSrc += litLength + matchLength; FORWARD_IF_ERROR(ZSTD_validateSequence(offCode, matchLength, seqPos->posInSrc, cctx->appliedParams.cParams.windowLog, dictSize), "Sequence validation failed"); } RETURN_ERROR_IF(idx - seqPos->idx > cctx->seqStore.maxNbSeq, memory_allocation, "Not enough memory allocated. Try adjusting ZSTD_c_minMatch."); ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offCode, matchLength); ip += matchLength + litLength; } ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(repcodes_t)); if (inSeqs[idx].litLength) { DEBUGLOG(6, "Storing last literals of size: %u", inSeqs[idx].litLength); ZSTD_storeLastLiterals(&cctx->seqStore, ip, inSeqs[idx].litLength); ip += inSeqs[idx].litLength; seqPos->posInSrc += inSeqs[idx].litLength; } RETURN_ERROR_IF(ip != iend, corruption_detected, "Blocksize doesn't agree with block delimiter!"); seqPos->idx = idx+1; return 0; } /* Returns the number of bytes to move the current read position back by. Only non-zero * if we ended up splitting a sequence. Otherwise, it may return a ZSTD error if something * went wrong. * * This function will attempt to scan through blockSize bytes represented by the sequences * in inSeqs, storing any (partial) sequences. * * Occasionally, we may want to change the actual number of bytes we consumed from inSeqs to * avoid splitting a match, or to avoid splitting a match such that it would produce a match * smaller than MINMATCH. In this case, we return the number of bytes that we didn't read from this block. */ static size_t ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos, const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, const void* src, size_t blockSize) { U32 idx = seqPos->idx; U32 startPosInSequence = seqPos->posInSequence; U32 endPosInSequence = seqPos->posInSequence + (U32)blockSize; size_t dictSize; BYTE const* ip = (BYTE const*)(src); BYTE const* iend = ip + blockSize; /* May be adjusted if we decide to process fewer than blockSize bytes */ repcodes_t updatedRepcodes; U32 bytesAdjustment = 0; U32 finalMatchSplit = 0; if (cctx->cdict) { dictSize = cctx->cdict->dictContentSize; } else if (cctx->prefixDict.dict) { dictSize = cctx->prefixDict.dictSize; } else { dictSize = 0; } DEBUGLOG(5, "ZSTD_copySequencesToSeqStore: idx: %u PIS: %u blockSize: %zu", idx, startPosInSequence, blockSize); DEBUGLOG(5, "Start seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength); ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t)); while (endPosInSequence && idx < inSeqsSize && !finalMatchSplit) { const ZSTD_Sequence currSeq = inSeqs[idx]; U32 litLength = currSeq.litLength; U32 matchLength = currSeq.matchLength; U32 const rawOffset = currSeq.offset; U32 offCode; /* Modify the sequence depending on where endPosInSequence lies */ if (endPosInSequence >= currSeq.litLength + currSeq.matchLength) { if (startPosInSequence >= litLength) { startPosInSequence -= litLength; litLength = 0; matchLength -= startPosInSequence; } else { litLength -= startPosInSequence; } /* Move to the next sequence */ endPosInSequence -= currSeq.litLength + currSeq.matchLength; startPosInSequence = 0; idx++; } else { /* This is the final (partial) sequence we're adding from inSeqs, and endPosInSequence does not reach the end of the match. So, we have to split the sequence */ DEBUGLOG(6, "Require a split: diff: %u, idx: %u PIS: %u", currSeq.litLength + currSeq.matchLength - endPosInSequence, idx, endPosInSequence); if (endPosInSequence > litLength) { U32 firstHalfMatchLength; litLength = startPosInSequence >= litLength ? 0 : litLength - startPosInSequence; firstHalfMatchLength = endPosInSequence - startPosInSequence - litLength; if (matchLength > blockSize && firstHalfMatchLength >= cctx->appliedParams.cParams.minMatch) { /* Only ever split the match if it is larger than the block size */ U32 secondHalfMatchLength = currSeq.matchLength + currSeq.litLength - endPosInSequence; if (secondHalfMatchLength < cctx->appliedParams.cParams.minMatch) { /* Move the endPosInSequence backward so that it creates match of minMatch length */ endPosInSequence -= cctx->appliedParams.cParams.minMatch - secondHalfMatchLength; bytesAdjustment = cctx->appliedParams.cParams.minMatch - secondHalfMatchLength; firstHalfMatchLength -= bytesAdjustment; } matchLength = firstHalfMatchLength; /* Flag that we split the last match - after storing the sequence, exit the loop, but keep the value of endPosInSequence */ finalMatchSplit = 1; } else { /* Move the position in sequence backwards so that we don't split match, and break to store * the last literals. We use the original currSeq.litLength as a marker for where endPosInSequence * should go. We prefer to do this whenever it is not necessary to split the match, or if doing so * would cause the first half of the match to be too small */ bytesAdjustment = endPosInSequence - currSeq.litLength; endPosInSequence = currSeq.litLength; break; } } else { /* This sequence ends inside the literals, break to store the last literals */ break; } } /* Check if this offset can be represented with a repcode */ { U32 const ll0 = (litLength == 0); offCode = ZSTD_finalizeOffCode(rawOffset, updatedRepcodes.rep, ll0); ZSTD_updateRep(updatedRepcodes.rep, offCode, ll0); } if (cctx->appliedParams.validateSequences) { seqPos->posInSrc += litLength + matchLength; FORWARD_IF_ERROR(ZSTD_validateSequence(offCode, matchLength, seqPos->posInSrc, cctx->appliedParams.cParams.windowLog, dictSize), "Sequence validation failed"); } DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offCode, matchLength, litLength); RETURN_ERROR_IF(idx - seqPos->idx > cctx->seqStore.maxNbSeq, memory_allocation, "Not enough memory allocated. Try adjusting ZSTD_c_minMatch."); ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offCode, matchLength); ip += matchLength + litLength; } DEBUGLOG(5, "Ending seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength); assert(idx == inSeqsSize || endPosInSequence <= inSeqs[idx].litLength + inSeqs[idx].matchLength); seqPos->idx = idx; seqPos->posInSequence = endPosInSequence; ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(repcodes_t)); iend -= bytesAdjustment; if (ip != iend) { /* Store any last literals */ U32 lastLLSize = (U32)(iend - ip); assert(ip <= iend); DEBUGLOG(6, "Storing last literals of size: %u", lastLLSize); ZSTD_storeLastLiterals(&cctx->seqStore, ip, lastLLSize); seqPos->posInSrc += lastLLSize; } return bytesAdjustment; } typedef size_t (*ZSTD_sequenceCopier) (ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos, const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, const void* src, size_t blockSize); static ZSTD_sequenceCopier ZSTD_selectSequenceCopier(ZSTD_sequenceFormat_e mode) { ZSTD_sequenceCopier sequenceCopier = NULL; assert(ZSTD_cParam_withinBounds(ZSTD_c_blockDelimiters, mode)); if (mode == ZSTD_sf_explicitBlockDelimiters) { return ZSTD_copySequencesToSeqStoreExplicitBlockDelim; } else if (mode == ZSTD_sf_noBlockDelimiters) { return ZSTD_copySequencesToSeqStoreNoBlockDelim; } assert(sequenceCopier != NULL); return sequenceCopier; } /* Compress, block-by-block, all of the sequences given. * * Returns the cumulative size of all compressed blocks (including their headers), * otherwise a ZSTD error. */ static size_t ZSTD_compressSequences_internal(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const ZSTD_Sequence* inSeqs, size_t inSeqsSize, const void* src, size_t srcSize) { size_t cSize = 0; U32 lastBlock; size_t blockSize; size_t compressedSeqsSize; size_t remaining = srcSize; ZSTD_sequencePosition seqPos = {0, 0, 0}; BYTE const* ip = (BYTE const*)src; BYTE* op = (BYTE*)dst; ZSTD_sequenceCopier const sequenceCopier = ZSTD_selectSequenceCopier(cctx->appliedParams.blockDelimiters); DEBUGLOG(4, "ZSTD_compressSequences_internal srcSize: %zu, inSeqsSize: %zu", srcSize, inSeqsSize); /* Special case: empty frame */ if (remaining == 0) { U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1); RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "No room for empty frame block header"); MEM_writeLE32(op, cBlockHeader24); op += ZSTD_blockHeaderSize; dstCapacity -= ZSTD_blockHeaderSize; cSize += ZSTD_blockHeaderSize; } while (remaining) { size_t cBlockSize; size_t additionalByteAdjustment; lastBlock = remaining <= cctx->blockSize; blockSize = lastBlock ? (U32)remaining : (U32)cctx->blockSize; ZSTD_resetSeqStore(&cctx->seqStore); DEBUGLOG(4, "Working on new block. Blocksize: %zu", blockSize); additionalByteAdjustment = sequenceCopier(cctx, &seqPos, inSeqs, inSeqsSize, ip, blockSize); FORWARD_IF_ERROR(additionalByteAdjustment, "Bad sequence copy"); blockSize -= additionalByteAdjustment; /* If blocks are too small, emit as a nocompress block */ if (blockSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) { cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock); FORWARD_IF_ERROR(cBlockSize, "Nocompress block failed"); DEBUGLOG(4, "Block too small, writing out nocompress block: cSize: %zu", cBlockSize); cSize += cBlockSize; ip += blockSize; op += cBlockSize; remaining -= blockSize; dstCapacity -= cBlockSize; continue; } compressedSeqsSize = ZSTD_entropyCompressSeqStore(&cctx->seqStore, &cctx->blockState.prevCBlock->entropy, &cctx->blockState.nextCBlock->entropy, &cctx->appliedParams, op + ZSTD_blockHeaderSize /* Leave space for block header */, dstCapacity - ZSTD_blockHeaderSize, blockSize, cctx->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */, cctx->bmi2); FORWARD_IF_ERROR(compressedSeqsSize, "Compressing sequences of block failed"); DEBUGLOG(4, "Compressed sequences size: %zu", compressedSeqsSize); if (!cctx->isFirstBlock && ZSTD_maybeRLE(&cctx->seqStore) && ZSTD_isRLE((BYTE const*)src, srcSize)) { /* We don't want to emit our first block as a RLE even if it qualifies because * doing so will cause the decoder (cli only) to throw a "should consume all input error." * This is only an issue for zstd <= v1.4.3 */ compressedSeqsSize = 1; } if (compressedSeqsSize == 0) { /* ZSTD_noCompressBlock writes the block header as well */ cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock); FORWARD_IF_ERROR(cBlockSize, "Nocompress block failed"); DEBUGLOG(4, "Writing out nocompress block, size: %zu", cBlockSize); } else if (compressedSeqsSize == 1) { cBlockSize = ZSTD_rleCompressBlock(op, dstCapacity, *ip, blockSize, lastBlock); FORWARD_IF_ERROR(cBlockSize, "RLE compress block failed"); DEBUGLOG(4, "Writing out RLE block, size: %zu", cBlockSize); } else { U32 cBlockHeader; /* Error checking and repcodes update */ ZSTD_blockState_confirmRepcodesAndEntropyTables(&cctx->blockState); if (cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid) cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check; /* Write block header into beginning of block*/ cBlockHeader = lastBlock + (((U32)bt_compressed)<<1) + (U32)(compressedSeqsSize << 3); MEM_writeLE24(op, cBlockHeader); cBlockSize = ZSTD_blockHeaderSize + compressedSeqsSize; DEBUGLOG(4, "Writing out compressed block, size: %zu", cBlockSize); } cSize += cBlockSize; DEBUGLOG(4, "cSize running total: %zu", cSize); if (lastBlock) { break; } else { ip += blockSize; op += cBlockSize; remaining -= blockSize; dstCapacity -= cBlockSize; cctx->isFirstBlock = 0; } } return cSize; } size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstCapacity, const ZSTD_Sequence* inSeqs, size_t inSeqsSize, const void* src, size_t srcSize) { BYTE* op = (BYTE*)dst; size_t cSize = 0; size_t compressedBlocksSize = 0; size_t frameHeaderSize = 0; /* Transparent initialization stage, same as compressStream2() */ DEBUGLOG(3, "ZSTD_compressSequences()"); assert(cctx != NULL); FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, ZSTD_e_end, srcSize), "CCtx initialization failed"); /* Begin writing output, starting with frame header */ frameHeaderSize = ZSTD_writeFrameHeader(op, dstCapacity, &cctx->appliedParams, srcSize, cctx->dictID); op += frameHeaderSize; dstCapacity -= frameHeaderSize; cSize += frameHeaderSize; if (cctx->appliedParams.fParams.checksumFlag && srcSize) { xxh64_update(&cctx->xxhState, src, srcSize); } /* cSize includes block header size and compressed sequences size */ compressedBlocksSize = ZSTD_compressSequences_internal(cctx, op, dstCapacity, inSeqs, inSeqsSize, src, srcSize); FORWARD_IF_ERROR(compressedBlocksSize, "Compressing blocks failed!"); cSize += compressedBlocksSize; dstCapacity -= compressedBlocksSize; if (cctx->appliedParams.fParams.checksumFlag) { U32 const checksum = (U32) xxh64_digest(&cctx->xxhState); RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for checksum"); DEBUGLOG(4, "Write checksum : %08X", (unsigned)checksum); MEM_writeLE32((char*)dst + cSize, checksum); cSize += 4; } DEBUGLOG(3, "Final compressed size: %zu", cSize); return cSize; } /*====== Finalize ======*/ /*! ZSTD_flushStream() : * @return : amount of data remaining to flush */ size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output) { ZSTD_inBuffer input = { NULL, 0, 0 }; return ZSTD_compressStream2(zcs, output, &input, ZSTD_e_flush); } size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output) { ZSTD_inBuffer input = { NULL, 0, 0 }; size_t const remainingToFlush = ZSTD_compressStream2(zcs, output, &input, ZSTD_e_end); FORWARD_IF_ERROR( remainingToFlush , "ZSTD_compressStream2 failed"); if (zcs->appliedParams.nbWorkers > 0) return remainingToFlush; /* minimal estimation */ /* single thread mode : attempt to calculate remaining to flush more precisely */ { size_t const lastBlockSize = zcs->frameEnded ? 0 : ZSTD_BLOCKHEADERSIZE; size_t const checksumSize = (size_t)(zcs->frameEnded ? 0 : zcs->appliedParams.fParams.checksumFlag * 4); size_t const toFlush = remainingToFlush + lastBlockSize + checksumSize; DEBUGLOG(4, "ZSTD_endStream : remaining to flush : %u", (unsigned)toFlush); return toFlush; } } /*-===== Pre-defined compression levels =====-*/ #include "clevels.h" int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; } int ZSTD_minCLevel(void) { return (int)-ZSTD_TARGETLENGTH_MAX; } int ZSTD_defaultCLevel(void) { return ZSTD_CLEVEL_DEFAULT; } static ZSTD_compressionParameters ZSTD_dedicatedDictSearch_getCParams(int const compressionLevel, size_t const dictSize) { ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, 0, dictSize, ZSTD_cpm_createCDict); switch (cParams.strategy) { case ZSTD_fast: case ZSTD_dfast: break; case ZSTD_greedy: case ZSTD_lazy: case ZSTD_lazy2: cParams.hashLog += ZSTD_LAZY_DDSS_BUCKET_LOG; break; case ZSTD_btlazy2: case ZSTD_btopt: case ZSTD_btultra: case ZSTD_btultra2: break; } return cParams; } static int ZSTD_dedicatedDictSearch_isSupported( ZSTD_compressionParameters const* cParams) { return (cParams->strategy >= ZSTD_greedy) && (cParams->strategy <= ZSTD_lazy2) && (cParams->hashLog > cParams->chainLog) && (cParams->chainLog <= 24); } /* * Reverses the adjustment applied to cparams when enabling dedicated dict * search. This is used to recover the params set to be used in the working * context. (Otherwise, those tables would also grow.) */ static void ZSTD_dedicatedDictSearch_revertCParams( ZSTD_compressionParameters* cParams) { switch (cParams->strategy) { case ZSTD_fast: case ZSTD_dfast: break; case ZSTD_greedy: case ZSTD_lazy: case ZSTD_lazy2: cParams->hashLog -= ZSTD_LAZY_DDSS_BUCKET_LOG; if (cParams->hashLog < ZSTD_HASHLOG_MIN) { cParams->hashLog = ZSTD_HASHLOG_MIN; } break; case ZSTD_btlazy2: case ZSTD_btopt: case ZSTD_btultra: case ZSTD_btultra2: break; } } static U64 ZSTD_getCParamRowSize(U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode) { switch (mode) { case ZSTD_cpm_unknown: case ZSTD_cpm_noAttachDict: case ZSTD_cpm_createCDict: break; case ZSTD_cpm_attachDict: dictSize = 0; break; default: assert(0); break; } { int const unknown = srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN; size_t const addedSize = unknown && dictSize > 0 ? 500 : 0; return unknown && dictSize == 0 ? ZSTD_CONTENTSIZE_UNKNOWN : srcSizeHint+dictSize+addedSize; } } /*! ZSTD_getCParams_internal() : * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize. * Note: srcSizeHint 0 means 0, use ZSTD_CONTENTSIZE_UNKNOWN for unknown. * Use dictSize == 0 for unknown or unused. * Note: `mode` controls how we treat the `dictSize`. See docs for `ZSTD_cParamMode_e`. */ static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode) { U64 const rSize = ZSTD_getCParamRowSize(srcSizeHint, dictSize, mode); U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB); int row; DEBUGLOG(5, "ZSTD_getCParams_internal (cLevel=%i)", compressionLevel); /* row */ if (compressionLevel == 0) row = ZSTD_CLEVEL_DEFAULT; /* 0 == default */ else if (compressionLevel < 0) row = 0; /* entry 0 is baseline for fast mode */ else if (compressionLevel > ZSTD_MAX_CLEVEL) row = ZSTD_MAX_CLEVEL; else row = compressionLevel; { ZSTD_compressionParameters cp = ZSTD_defaultCParameters[tableID][row]; DEBUGLOG(5, "ZSTD_getCParams_internal selected tableID: %u row: %u strat: %u", tableID, row, (U32)cp.strategy); /* acceleration factor */ if (compressionLevel < 0) { int const clampedCompressionLevel = MAX(ZSTD_minCLevel(), compressionLevel); cp.targetLength = (unsigned)(-clampedCompressionLevel); } /* refine parameters based on srcSize & dictSize */ return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize, mode); } } /*! ZSTD_getCParams() : * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize. * Size values are optional, provide 0 if not known or unused */ ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) { if (srcSizeHint == 0) srcSizeHint = ZSTD_CONTENTSIZE_UNKNOWN; return ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize, ZSTD_cpm_unknown); } /*! ZSTD_getParams() : * same idea as ZSTD_getCParams() * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`). * Fields of `ZSTD_frameParameters` are set to default values */ static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode) { ZSTD_parameters params; ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize, mode); DEBUGLOG(5, "ZSTD_getParams (cLevel=%i)", compressionLevel); ZSTD_memset(&params, 0, sizeof(params)); params.cParams = cParams; params.fParams.contentSizeFlag = 1; return params; } /*! ZSTD_getParams() : * same idea as ZSTD_getCParams() * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`). * Fields of `ZSTD_frameParameters` are set to default values */ ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) { if (srcSizeHint == 0) srcSizeHint = ZSTD_CONTENTSIZE_UNKNOWN; return ZSTD_getParams_internal(compressionLevel, srcSizeHint, dictSize, ZSTD_cpm_unknown); }
2 2 2 2 2 2 2 2 1 1 1 2 2 2 2 2 2 2 1 1 1 1 2 3 1 1 1 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 // SPDX-License-Identifier: GPL-2.0+ /* * hwmon driver for Aquacomputer devices (D5 Next, Farbwerk, Farbwerk 360, Octo, * Quadro, High Flow Next, Aquaero, Aquastream Ultimate, Leakshield, * High Flow USB/MPS Flow family) * * Aquacomputer devices send HID reports (with ID 0x01) every second to report * sensor values, except for devices that communicate through the * legacy way (currently, Poweradjust 3 and High Flow USB/MPS Flow family). * * Copyright 2021 Aleksa Savic <savicaleksa83@gmail.com> * Copyright 2022 Jack Doan <me@jackdoan.com> */ #include <linux/crc16.h> #include <linux/debugfs.h> #include <linux/delay.h> #include <linux/hid.h> #include <linux/hwmon.h> #include <linux/jiffies.h> #include <linux/ktime.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/seq_file.h> #include <linux/unaligned.h> #define USB_VENDOR_ID_AQUACOMPUTER 0x0c70 #define USB_PRODUCT_ID_AQUAERO 0xf001 #define USB_PRODUCT_ID_FARBWERK 0xf00a #define USB_PRODUCT_ID_QUADRO 0xf00d #define USB_PRODUCT_ID_D5NEXT 0xf00e #define USB_PRODUCT_ID_FARBWERK360 0xf010 #define USB_PRODUCT_ID_OCTO 0xf011 #define USB_PRODUCT_ID_HIGHFLOWNEXT 0xf012 #define USB_PRODUCT_ID_LEAKSHIELD 0xf014 #define USB_PRODUCT_ID_AQUASTREAMXT 0xf0b6 #define USB_PRODUCT_ID_AQUASTREAMULT 0xf00b #define USB_PRODUCT_ID_POWERADJUST3 0xf0bd #define USB_PRODUCT_ID_HIGHFLOW 0xf003 enum kinds { d5next, farbwerk, farbwerk360, octo, quadro, highflownext, aquaero, poweradjust3, aquastreamult, aquastreamxt, leakshield, highflow }; static const char *const aqc_device_names[] = { [d5next] = "d5next", [farbwerk] = "farbwerk", [farbwerk360] = "farbwerk360", [octo] = "octo", [quadro] = "quadro", [highflownext] = "highflownext", [leakshield] = "leakshield", [aquastreamxt] = "aquastreamxt", [aquaero] = "aquaero", [aquastreamult] = "aquastreamultimate", [poweradjust3] = "poweradjust3", [highflow] = "highflow" /* Covers MPS Flow devices */ }; #define DRIVER_NAME "aquacomputer_d5next" #define STATUS_REPORT_ID 0x01 #define STATUS_UPDATE_INTERVAL (2 * HZ) /* In seconds */ #define SERIAL_PART_OFFSET 2 #define CTRL_REPORT_ID 0x03 #define AQUAERO_CTRL_REPORT_ID 0x0b #define CTRL_REPORT_DELAY 200 /* ms */ /* The HID report that the official software always sends * after writing values, currently same for all devices */ #define SECONDARY_CTRL_REPORT_ID 0x02 #define SECONDARY_CTRL_REPORT_SIZE 0x0B static u8 secondary_ctrl_report[] = { 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x34, 0xC6 }; /* Secondary HID report values for Aquaero */ #define AQUAERO_SECONDARY_CTRL_REPORT_ID 0x06 #define AQUAERO_SECONDARY_CTRL_REPORT_SIZE 0x07 static u8 aquaero_secondary_ctrl_report[] = { 0x06, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00 }; /* Report IDs for legacy devices */ #define AQUASTREAMXT_STATUS_REPORT_ID 0x04 #define POWERADJUST3_STATUS_REPORT_ID 0x03 #define HIGHFLOW_STATUS_REPORT_ID 0x02 /* Data types for reading and writing control reports */ #define AQC_8 0 #define AQC_BE16 1 /* Info, sensor sizes and offsets for most Aquacomputer devices */ #define AQC_SERIAL_START 0x3 #define AQC_FIRMWARE_VERSION 0xD #define AQC_SENSOR_SIZE 0x02 #define AQC_SENSOR_NA 0x7FFF #define AQC_FAN_PERCENT_OFFSET 0x00 #define AQC_FAN_VOLTAGE_OFFSET 0x02 #define AQC_FAN_CURRENT_OFFSET 0x04 #define AQC_FAN_POWER_OFFSET 0x06 #define AQC_FAN_SPEED_OFFSET 0x08 /* Specs of the Aquaero fan controllers */ #define AQUAERO_SERIAL_START 0x07 #define AQUAERO_FIRMWARE_VERSION 0x0B #define AQUAERO_NUM_FANS 4 #define AQUAERO_NUM_SENSORS 8 #define AQUAERO_NUM_VIRTUAL_SENSORS 8 #define AQUAERO_NUM_CALC_VIRTUAL_SENSORS 4 #define AQUAERO_NUM_FLOW_SENSORS 2 #define AQUAERO_CTRL_REPORT_SIZE 0xa93 #define AQUAERO_CTRL_PRESET_ID 0x5c #define AQUAERO_CTRL_PRESET_SIZE 0x02 #define AQUAERO_CTRL_PRESET_START 0x55c /* Sensor report offsets for Aquaero fan controllers */ #define AQUAERO_SENSOR_START 0x65 #define AQUAERO_VIRTUAL_SENSOR_START 0x85 #define AQUAERO_CALC_VIRTUAL_SENSOR_START 0x95 #define AQUAERO_FLOW_SENSORS_START 0xF9 #define AQUAERO_FAN_VOLTAGE_OFFSET 0x04 #define AQUAERO_FAN_CURRENT_OFFSET 0x06 #define AQUAERO_FAN_POWER_OFFSET 0x08 #define AQUAERO_FAN_SPEED_OFFSET 0x00 static u16 aquaero_sensor_fan_offsets[] = { 0x167, 0x173, 0x17f, 0x18B }; /* Control report offsets for the Aquaero fan controllers */ #define AQUAERO_TEMP_CTRL_OFFSET 0xdb #define AQUAERO_FAN_CTRL_MIN_PWR_OFFSET 0x04 #define AQUAERO_FAN_CTRL_MAX_PWR_OFFSET 0x06 #define AQUAERO_FAN_CTRL_SRC_OFFSET 0x10 static u16 aquaero_ctrl_fan_offsets[] = { 0x20c, 0x220, 0x234, 0x248 }; /* Specs of the D5 Next pump */ #define D5NEXT_NUM_FANS 2 #define D5NEXT_NUM_SENSORS 1 #define D5NEXT_NUM_VIRTUAL_SENSORS 8 #define D5NEXT_CTRL_REPORT_SIZE 0x329 /* Sensor report offsets for the D5 Next pump */ #define D5NEXT_POWER_CYCLES 0x18 #define D5NEXT_COOLANT_TEMP 0x57 #define D5NEXT_PUMP_OFFSET 0x6c #define D5NEXT_FAN_OFFSET 0x5f #define D5NEXT_5V_VOLTAGE 0x39 #define D5NEXT_12V_VOLTAGE 0x37 #define D5NEXT_VIRTUAL_SENSORS_START 0x3f static u16 d5next_sensor_fan_offsets[] = { D5NEXT_PUMP_OFFSET, D5NEXT_FAN_OFFSET }; /* Control report offsets for the D5 Next pump */ #define D5NEXT_TEMP_CTRL_OFFSET 0x2D /* Temperature sensor offsets location */ static u16 d5next_ctrl_fan_offsets[] = { 0x97, 0x42 }; /* Pump and fan speed (from 0-100%) */ /* Specs of the Aquastream Ultimate pump */ /* Pump does not follow the standard structure, so only consider the fan */ #define AQUASTREAMULT_NUM_FANS 1 #define AQUASTREAMULT_NUM_SENSORS 2 /* Sensor report offsets for the Aquastream Ultimate pump */ #define AQUASTREAMULT_SENSOR_START 0x2D #define AQUASTREAMULT_PUMP_OFFSET 0x51 #define AQUASTREAMULT_PUMP_VOLTAGE 0x3D #define AQUASTREAMULT_PUMP_CURRENT 0x53 #define AQUASTREAMULT_PUMP_POWER 0x55 #define AQUASTREAMULT_FAN_OFFSET 0x41 #define AQUASTREAMULT_PRESSURE_OFFSET 0x57 #define AQUASTREAMULT_FLOW_SENSOR_OFFSET 0x37 #define AQUASTREAMULT_FAN_VOLTAGE_OFFSET 0x02 #define AQUASTREAMULT_FAN_CURRENT_OFFSET 0x00 #define AQUASTREAMULT_FAN_POWER_OFFSET 0x04 #define AQUASTREAMULT_FAN_SPEED_OFFSET 0x06 static u16 aquastreamult_sensor_fan_offsets[] = { AQUASTREAMULT_FAN_OFFSET }; /* Spec and sensor report offset for the Farbwerk RGB controller */ #define FARBWERK_NUM_SENSORS 4 #define FARBWERK_SENSOR_START 0x2f /* Specs of the Farbwerk 360 RGB controller */ #define FARBWERK360_NUM_SENSORS 4 #define FARBWERK360_NUM_VIRTUAL_SENSORS 16 #define FARBWERK360_CTRL_REPORT_SIZE 0x682 /* Sensor report offsets for the Farbwerk 360 */ #define FARBWERK360_SENSOR_START 0x32 #define FARBWERK360_VIRTUAL_SENSORS_START 0x3a /* Control report offsets for the Farbwerk 360 */ #define FARBWERK360_TEMP_CTRL_OFFSET 0x8 /* Specs of the Octo fan controller */ #define OCTO_NUM_FANS 8 #define OCTO_NUM_SENSORS 4 #define OCTO_NUM_VIRTUAL_SENSORS 16 #define OCTO_NUM_FLOW_SENSORS 1 #define OCTO_CTRL_REPORT_SIZE 0x65F /* Sensor report offsets for the Octo */ #define OCTO_POWER_CYCLES 0x18 #define OCTO_SENSOR_START 0x3D #define OCTO_VIRTUAL_SENSORS_START 0x45 #define OCTO_FLOW_SENSOR_OFFSET 0x7B static u16 octo_sensor_fan_offsets[] = { 0x7D, 0x8A, 0x97, 0xA4, 0xB1, 0xBE, 0xCB, 0xD8 }; /* Control report offsets for the Octo */ #define OCTO_TEMP_CTRL_OFFSET 0xA #define OCTO_FLOW_PULSES_CTRL_OFFSET 0x6 /* Fan speed offsets (0-100%) */ static u16 octo_ctrl_fan_offsets[] = { 0x5B, 0xB0, 0x105, 0x15A, 0x1AF, 0x204, 0x259, 0x2AE }; /* Specs of Quadro fan controller */ #define QUADRO_NUM_FANS 4 #define QUADRO_NUM_SENSORS 4 #define QUADRO_NUM_VIRTUAL_SENSORS 16 #define QUADRO_NUM_FLOW_SENSORS 1 #define QUADRO_CTRL_REPORT_SIZE 0x3c1 /* Sensor report offsets for the Quadro */ #define QUADRO_POWER_CYCLES 0x18 #define QUADRO_SENSOR_START 0x34 #define QUADRO_VIRTUAL_SENSORS_START 0x3c #define QUADRO_FLOW_SENSOR_OFFSET 0x6e static u16 quadro_sensor_fan_offsets[] = { 0x70, 0x7D, 0x8A, 0x97 }; /* Control report offsets for the Quadro */ #define QUADRO_TEMP_CTRL_OFFSET 0xA #define QUADRO_FLOW_PULSES_CTRL_OFFSET 0x6 static u16 quadro_ctrl_fan_offsets[] = { 0x37, 0x8c, 0xe1, 0x136 }; /* Fan speed offsets (0-100%) */ /* Specs of High Flow Next flow sensor */ #define HIGHFLOWNEXT_NUM_SENSORS 2 #define HIGHFLOWNEXT_NUM_FLOW_SENSORS 1 /* Sensor report offsets for the High Flow Next */ #define HIGHFLOWNEXT_SENSOR_START 85 #define HIGHFLOWNEXT_FLOW 81 #define HIGHFLOWNEXT_WATER_QUALITY 89 #define HIGHFLOWNEXT_POWER 91 #define HIGHFLOWNEXT_CONDUCTIVITY 95 #define HIGHFLOWNEXT_5V_VOLTAGE 97 #define HIGHFLOWNEXT_5V_VOLTAGE_USB 99 /* Specs of the Leakshield */ #define LEAKSHIELD_NUM_SENSORS 2 /* Sensor report offsets for Leakshield */ #define LEAKSHIELD_PRESSURE_ADJUSTED 285 #define LEAKSHIELD_TEMPERATURE_1 265 #define LEAKSHIELD_TEMPERATURE_2 287 #define LEAKSHIELD_PRESSURE_MIN 291 #define LEAKSHIELD_PRESSURE_TARGET 293 #define LEAKSHIELD_PRESSURE_MAX 295 #define LEAKSHIELD_PUMP_RPM_IN 101 #define LEAKSHIELD_FLOW_IN 111 #define LEAKSHIELD_RESERVOIR_VOLUME 313 #define LEAKSHIELD_RESERVOIR_FILLED 311 /* Specs of the Aquastream XT pump */ #define AQUASTREAMXT_SERIAL_START 0x3a #define AQUASTREAMXT_FIRMWARE_VERSION 0x32 #define AQUASTREAMXT_NUM_FANS 2 #define AQUASTREAMXT_NUM_SENSORS 3 #define AQUASTREAMXT_FAN_STOPPED 0x4 #define AQUASTREAMXT_PUMP_CONVERSION_CONST 45000000 #define AQUASTREAMXT_FAN_CONVERSION_CONST 5646000 #define AQUASTREAMXT_SENSOR_REPORT_SIZE 0x42 /* Sensor report offsets and info for Aquastream XT */ #define AQUASTREAMXT_SENSOR_START 0xd #define AQUASTREAMXT_FAN_VOLTAGE_OFFSET 0x7 #define AQUASTREAMXT_FAN_STATUS_OFFSET 0x1d #define AQUASTREAMXT_PUMP_VOLTAGE_OFFSET 0x9 #define AQUASTREAMXT_PUMP_CURR_OFFSET 0xb static u16 aquastreamxt_sensor_fan_offsets[] = { 0x13, 0x1b }; /* Specs of the Poweradjust 3 */ #define POWERADJUST3_NUM_SENSORS 1 #define POWERADJUST3_SENSOR_REPORT_SIZE 0x32 /* Sensor report offsets for the Poweradjust 3 */ #define POWERADJUST3_SENSOR_START 0x03 /* Specs of the High Flow USB */ #define HIGHFLOW_NUM_SENSORS 2 #define HIGHFLOW_NUM_FLOW_SENSORS 1 #define HIGHFLOW_SENSOR_REPORT_SIZE 0x76 /* Sensor report offsets for the High Flow USB */ #define HIGHFLOW_FIRMWARE_VERSION 0x3 #define HIGHFLOW_SERIAL_START 0x9 #define HIGHFLOW_FLOW_SENSOR_OFFSET 0x23 #define HIGHFLOW_SENSOR_START 0x2b /* Labels for D5 Next */ static const char *const label_d5next_temp[] = { "Coolant temp" }; static const char *const label_d5next_speeds[] = { "Pump speed", "Fan speed" }; static const char *const label_d5next_power[] = { "Pump power", "Fan power" }; static const char *const label_d5next_voltages[] = { "Pump voltage", "Fan voltage", "+5V voltage", "+12V voltage" }; static const char *const label_d5next_current[] = { "Pump current", "Fan current" }; /* Labels for Aquaero, Farbwerk, Farbwerk 360 and Octo and Quadro temperature sensors */ static const char *const label_temp_sensors[] = { "Sensor 1", "Sensor 2", "Sensor 3", "Sensor 4", "Sensor 5", "Sensor 6", "Sensor 7", "Sensor 8" }; static const char *const label_virtual_temp_sensors[] = { "Virtual sensor 1", "Virtual sensor 2", "Virtual sensor 3", "Virtual sensor 4", "Virtual sensor 5", "Virtual sensor 6", "Virtual sensor 7", "Virtual sensor 8", "Virtual sensor 9", "Virtual sensor 10", "Virtual sensor 11", "Virtual sensor 12", "Virtual sensor 13", "Virtual sensor 14", "Virtual sensor 15", "Virtual sensor 16", }; static const char *const label_aquaero_calc_temp_sensors[] = { "Calc. virtual sensor 1", "Calc. virtual sensor 2", "Calc. virtual sensor 3", "Calc. virtual sensor 4" }; static const char *const label_fan_power[] = { "Fan 1 power", "Fan 2 power", "Fan 3 power", "Fan 4 power", "Fan 5 power", "Fan 6 power", "Fan 7 power", "Fan 8 power" }; static const char *const label_fan_voltage[] = { "Fan 1 voltage", "Fan 2 voltage", "Fan 3 voltage", "Fan 4 voltage", "Fan 5 voltage", "Fan 6 voltage", "Fan 7 voltage", "Fan 8 voltage" }; static const char *const label_fan_current[] = { "Fan 1 current", "Fan 2 current", "Fan 3 current", "Fan 4 current", "Fan 5 current", "Fan 6 current", "Fan 7 current", "Fan 8 current" }; /* Labels for Octo fan speeds */ static const char *const label_octo_speeds[] = { "Fan 1 speed", "Fan 2 speed", "Fan 3 speed", "Fan 4 speed", "Fan 5 speed", "Fan 6 speed", "Fan 7 speed", "Fan 8 speed", "Flow speed [dL/h]", }; /* Labels for Quadro fan speeds */ static const char *const label_quadro_speeds[] = { "Fan 1 speed", "Fan 2 speed", "Fan 3 speed", "Fan 4 speed", "Flow speed [dL/h]" }; /* Labels for Aquaero fan speeds */ static const char *const label_aquaero_speeds[] = { "Fan 1 speed", "Fan 2 speed", "Fan 3 speed", "Fan 4 speed", "Flow sensor 1 [dL/h]", "Flow sensor 2 [dL/h]" }; /* Labels for High Flow Next */ static const char *const label_highflownext_temp_sensors[] = { "Coolant temp", "External sensor" }; static const char *const label_highflownext_fan_speed[] = { "Flow [dL/h]", "Water quality [%]", "Conductivity [nS/cm]", }; static const char *const label_highflownext_power[] = { "Dissipated power", }; static const char *const label_highflownext_voltage[] = { "+5V voltage", "+5V USB voltage" }; /* Labels for Leakshield */ static const char *const label_leakshield_temp_sensors[] = { "Temperature 1", "Temperature 2" }; static const char *const label_leakshield_fan_speed[] = { "Pressure [ubar]", "User-Provided Pump Speed", "User-Provided Flow [dL/h]", "Reservoir Volume [ml]", "Reservoir Filled [ml]", }; /* Labels for Aquastream XT */ static const char *const label_aquastreamxt_temp_sensors[] = { "Fan IC temp", "External sensor", "Coolant temp" }; /* Labels for Aquastream Ultimate */ static const char *const label_aquastreamult_temp[] = { "Coolant temp", "External temp" }; static const char *const label_aquastreamult_speeds[] = { "Fan speed", "Pump speed", "Pressure [mbar]", "Flow speed [dL/h]" }; static const char *const label_aquastreamult_power[] = { "Fan power", "Pump power" }; static const char *const label_aquastreamult_voltages[] = { "Fan voltage", "Pump voltage" }; static const char *const label_aquastreamult_current[] = { "Fan current", "Pump current" }; /* Labels for Poweradjust 3 */ static const char *const label_poweradjust3_temp_sensors[] = { "External sensor" }; /* Labels for Highflow */ static const char *const label_highflow_temp[] = { "External temp", "Internal temp" }; static const char *const label_highflow_speeds[] = { "Flow speed [dL/h]" }; struct aqc_fan_structure_offsets { u8 voltage; u8 curr; u8 power; u8 speed; }; /* Fan structure offsets for Aquaero */ static struct aqc_fan_structure_offsets aqc_aquaero_fan_structure = { .voltage = AQUAERO_FAN_VOLTAGE_OFFSET, .curr = AQUAERO_FAN_CURRENT_OFFSET, .power = AQUAERO_FAN_POWER_OFFSET, .speed = AQUAERO_FAN_SPEED_OFFSET }; /* Fan structure offsets for Aquastream Ultimate */ static struct aqc_fan_structure_offsets aqc_aquastreamult_fan_structure = { .voltage = AQUASTREAMULT_FAN_VOLTAGE_OFFSET, .curr = AQUASTREAMULT_FAN_CURRENT_OFFSET, .power = AQUASTREAMULT_FAN_POWER_OFFSET, .speed = AQUASTREAMULT_FAN_SPEED_OFFSET }; /* Fan structure offsets for all devices except those above */ static struct aqc_fan_structure_offsets aqc_general_fan_structure = { .voltage = AQC_FAN_VOLTAGE_OFFSET, .curr = AQC_FAN_CURRENT_OFFSET, .power = AQC_FAN_POWER_OFFSET, .speed = AQC_FAN_SPEED_OFFSET }; struct aqc_data { struct hid_device *hdev; struct device *hwmon_dev; struct dentry *debugfs; struct mutex mutex; /* Used for locking access when reading and writing PWM values */ enum kinds kind; const char *name; int status_report_id; /* Used for legacy devices, report is stored in buffer */ int ctrl_report_id; int secondary_ctrl_report_id; int secondary_ctrl_report_size; u8 *secondary_ctrl_report; ktime_t last_ctrl_report_op; int ctrl_report_delay; /* Delay between two ctrl report operations, in ms */ int buffer_size; u8 *buffer; int checksum_start; int checksum_length; int checksum_offset; int num_fans; u16 *fan_sensor_offsets; u16 *fan_ctrl_offsets; int num_temp_sensors; int temp_sensor_start_offset; int num_virtual_temp_sensors; int virtual_temp_sensor_start_offset; int num_calc_virt_temp_sensors; int calc_virt_temp_sensor_start_offset; u16 temp_ctrl_offset; u16 power_cycle_count_offset; int num_flow_sensors; u8 flow_sensors_start_offset; u8 flow_pulses_ctrl_offset; struct aqc_fan_structure_offsets *fan_structure; /* General info, same across all devices */ u8 serial_number_start_offset; u32 serial_number[2]; u8 firmware_version_offset; u16 firmware_version; /* How many times the device was powered on, if available */ u32 power_cycles; /* Sensor values */ s32 temp_input[20]; /* Max 4 physical and 16 virtual or 8 physical and 12 virtual */ s32 speed_input[9]; u32 speed_input_min[1]; u32 speed_input_target[1]; u32 speed_input_max[1]; u32 power_input[8]; u16 voltage_input[8]; u16 current_input[8]; /* Label values */ const char *const *temp_label; const char *const *virtual_temp_label; const char *const *calc_virt_temp_label; /* For Aquaero */ const char *const *speed_label; const char *const *power_label; const char *const *voltage_label; const char *const *current_label; unsigned long updated; }; /* Converts from centi-percent */ static int aqc_percent_to_pwm(u16 val) { return DIV_ROUND_CLOSEST(val * 255, 100 * 100); } /* Converts to centi-percent */ static int aqc_pwm_to_percent(long val) { if (val < 0 || val > 255) return -EINVAL; return DIV_ROUND_CLOSEST(val * 100 * 100, 255); } /* Converts raw value for Aquastream XT pump speed to RPM */ static int aqc_aquastreamxt_convert_pump_rpm(u16 val) { if (val > 0) return DIV_ROUND_CLOSEST(AQUASTREAMXT_PUMP_CONVERSION_CONST, val); return 0; } /* Converts raw value for Aquastream XT fan speed to RPM */ static int aqc_aquastreamxt_convert_fan_rpm(u16 val) { if (val > 0) return DIV_ROUND_CLOSEST(AQUASTREAMXT_FAN_CONVERSION_CONST, val); return 0; } static void aqc_delay_ctrl_report(struct aqc_data *priv) { /* * If previous read or write is too close to this one, delay the current operation * to give the device enough time to process the previous one. */ if (priv->ctrl_report_delay) { s64 delta = ktime_ms_delta(ktime_get(), priv->last_ctrl_report_op); if (delta < priv->ctrl_report_delay) msleep(priv->ctrl_report_delay - delta); } } /* Expects the mutex to be locked */ static int aqc_get_ctrl_data(struct aqc_data *priv) { int ret; aqc_delay_ctrl_report(priv); memset(priv->buffer, 0x00, priv->buffer_size); ret = hid_hw_raw_request(priv->hdev, priv->ctrl_report_id, priv->buffer, priv->buffer_size, HID_FEATURE_REPORT, HID_REQ_GET_REPORT); if (ret < 0) ret = -ENODATA; priv->last_ctrl_report_op = ktime_get(); return ret; } /* Expects the mutex to be locked */ static int aqc_send_ctrl_data(struct aqc_data *priv) { int ret; u16 checksum; aqc_delay_ctrl_report(priv); /* Checksum is not needed for Aquaero */ if (priv->kind != aquaero) { /* Init and xorout value for CRC-16/USB is 0xffff */ checksum = crc16(0xffff, priv->buffer + priv->checksum_start, priv->checksum_length); checksum ^= 0xffff; /* Place the new checksum at the end of the report */ put_unaligned_be16(checksum, priv->buffer + priv->checksum_offset); } /* Send the patched up report back to the device */ ret = hid_hw_raw_request(priv->hdev, priv->ctrl_report_id, priv->buffer, priv->buffer_size, HID_FEATURE_REPORT, HID_REQ_SET_REPORT); if (ret < 0) goto record_access_and_ret; /* The official software sends this report after every change, so do it here as well */ ret = hid_hw_raw_request(priv->hdev, priv->secondary_ctrl_report_id, priv->secondary_ctrl_report, priv->secondary_ctrl_report_size, HID_FEATURE_REPORT, HID_REQ_SET_REPORT); record_access_and_ret: priv->last_ctrl_report_op = ktime_get(); return ret; } /* Refreshes the control buffer and stores value at offset in val */ static int aqc_get_ctrl_val(struct aqc_data *priv, int offset, long *val, int type) { int ret; mutex_lock(&priv->mutex); ret = aqc_get_ctrl_data(priv); if (ret < 0) goto unlock_and_return; switch (type) { case AQC_BE16: *val = (s16)get_unaligned_be16(priv->buffer + offset); break; case AQC_8: *val = priv->buffer[offset]; break; default: ret = -EINVAL; } unlock_and_return: mutex_unlock(&priv->mutex); return ret; } static int aqc_set_ctrl_vals(struct aqc_data *priv, int *offsets, long *vals, int *types, int len) { int ret, i; mutex_lock(&priv->mutex); ret = aqc_get_ctrl_data(priv); if (ret < 0) goto unlock_and_return; for (i = 0; i < len; i++) { switch (types[i]) { case AQC_BE16: put_unaligned_be16((s16)vals[i], priv->buffer + offsets[i]); break; case AQC_8: priv->buffer[offsets[i]] = (u8)vals[i]; break; default: ret = -EINVAL; } } if (ret < 0) goto unlock_and_return; ret = aqc_send_ctrl_data(priv); unlock_and_return: mutex_unlock(&priv->mutex); return ret; } static int aqc_set_ctrl_val(struct aqc_data *priv, int offset, long val, int type) { return aqc_set_ctrl_vals(priv, &offset, &val, &type, 1); } static umode_t aqc_is_visible(const void *data, enum hwmon_sensor_types type, u32 attr, int channel) { const struct aqc_data *priv = data; switch (type) { case hwmon_temp: if (channel < priv->num_temp_sensors) { switch (attr) { case hwmon_temp_label: case hwmon_temp_input: return 0444; case hwmon_temp_offset: if (priv->temp_ctrl_offset != 0) return 0644; break; default: break; } } if (channel < priv->num_temp_sensors + priv->num_virtual_temp_sensors + priv->num_calc_virt_temp_sensors) switch (attr) { case hwmon_temp_label: case hwmon_temp_input: return 0444; default: break; } break; case hwmon_pwm: if (priv->fan_ctrl_offsets && channel < priv->num_fans) { switch (attr) { case hwmon_pwm_input: return 0644; default: break; } } break; case hwmon_fan: switch (attr) { case hwmon_fan_input: case hwmon_fan_label: switch (priv->kind) { case aquastreamult: /* * Special case to support pump RPM, fan RPM, * pressure and flow sensor */ if (channel < 4) return 0444; break; case highflownext: /* Special case to support flow sensor, water quality * and conductivity */ if (channel < 3) return 0444; break; case leakshield: /* Special case for Leakshield sensors */ if (channel < 5) return 0444; break; case aquaero: case octo: case quadro: case highflow: /* Special case to support flow sensors */ if (channel < priv->num_fans + priv->num_flow_sensors) return 0444; break; default: if (channel < priv->num_fans) return 0444; break; } break; case hwmon_fan_pulses: /* Special case for Quadro/Octo flow sensor */ if (channel == priv->num_fans) { switch (priv->kind) { case quadro: case octo: return 0644; default: break; } } break; case hwmon_fan_min: case hwmon_fan_max: case hwmon_fan_target: /* Special case for Leakshield pressure sensor */ if (priv->kind == leakshield && channel == 0) return 0444; break; default: break; } break; case hwmon_power: switch (priv->kind) { case aquastreamult: /* Special case to support pump and fan power */ if (channel < 2) return 0444; break; case highflownext: /* Special case to support one power sensor */ if (channel == 0) return 0444; break; case aquastreamxt: break; default: if (channel < priv->num_fans) return 0444; break; } break; case hwmon_curr: switch (priv->kind) { case aquastreamult: /* Special case to support pump and fan current */ if (channel < 2) return 0444; break; case aquastreamxt: /* Special case to support pump current */ if (channel == 0) return 0444; break; default: if (channel < priv->num_fans) return 0444; break; } break; case hwmon_in: switch (priv->kind) { case d5next: /* Special case to support +5V and +12V voltage sensors */ if (channel < priv->num_fans + 2) return 0444; break; case aquastreamult: case highflownext: /* Special case to support two voltage sensors */ if (channel < 2) return 0444; break; default: if (channel < priv->num_fans) return 0444; break; } break; default: break; } return 0; } /* Read device sensors by manually requesting the sensor report (legacy way) */ static int aqc_legacy_read(struct aqc_data *priv) { int ret, i, sensor_value; mutex_lock(&priv->mutex); memset(priv->buffer, 0x00, priv->buffer_size); ret = hid_hw_raw_request(priv->hdev, priv->status_report_id, priv->buffer, priv->buffer_size, HID_FEATURE_REPORT, HID_REQ_GET_REPORT); if (ret < 0) goto unlock_and_return; /* Temperature sensor readings */ for (i = 0; i < priv->num_temp_sensors; i++) { sensor_value = get_unaligned_le16(priv->buffer + priv->temp_sensor_start_offset + i * AQC_SENSOR_SIZE); if (sensor_value == AQC_SENSOR_NA) priv->temp_input[i] = -ENODATA; else priv->temp_input[i] = sensor_value * 10; } /* Special-case sensor readings */ switch (priv->kind) { case aquastreamxt: /* Info provided with every report */ priv->serial_number[0] = get_unaligned_le16(priv->buffer + priv->serial_number_start_offset); priv->firmware_version = get_unaligned_le16(priv->buffer + priv->firmware_version_offset); /* Read pump speed in RPM */ sensor_value = get_unaligned_le16(priv->buffer + priv->fan_sensor_offsets[0]); priv->speed_input[0] = aqc_aquastreamxt_convert_pump_rpm(sensor_value); /* Read fan speed in RPM, if available */ sensor_value = get_unaligned_le16(priv->buffer + AQUASTREAMXT_FAN_STATUS_OFFSET); if (sensor_value == AQUASTREAMXT_FAN_STOPPED) { priv->speed_input[1] = 0; } else { sensor_value = get_unaligned_le16(priv->buffer + priv->fan_sensor_offsets[1]); priv->speed_input[1] = aqc_aquastreamxt_convert_fan_rpm(sensor_value); } /* Calculation derived from linear regression */ sensor_value = get_unaligned_le16(priv->buffer + AQUASTREAMXT_PUMP_CURR_OFFSET); priv->current_input[0] = DIV_ROUND_CLOSEST(sensor_value * 176, 100) - 52; sensor_value = get_unaligned_le16(priv->buffer + AQUASTREAMXT_PUMP_VOLTAGE_OFFSET); priv->voltage_input[0] = DIV_ROUND_CLOSEST(sensor_value * 1000, 61); sensor_value = get_unaligned_le16(priv->buffer + AQUASTREAMXT_FAN_VOLTAGE_OFFSET); priv->voltage_input[1] = DIV_ROUND_CLOSEST(sensor_value * 1000, 63); break; case highflow: /* Info provided with every report */ priv->serial_number[0] = get_unaligned_le16(priv->buffer + priv->serial_number_start_offset); priv->firmware_version = get_unaligned_le16(priv->buffer + priv->firmware_version_offset); /* Read flow speed */ priv->speed_input[0] = get_unaligned_le16(priv->buffer + priv->flow_sensors_start_offset); break; default: break; } priv->updated = jiffies; unlock_and_return: mutex_unlock(&priv->mutex); return ret; } static int aqc_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, long *val) { int ret; struct aqc_data *priv = dev_get_drvdata(dev); if (time_after(jiffies, priv->updated + STATUS_UPDATE_INTERVAL)) { if (priv->status_report_id != 0) { /* Legacy devices require manual reads */ ret = aqc_legacy_read(priv); if (ret < 0) return -ENODATA; } else { return -ENODATA; } } switch (type) { case hwmon_temp: switch (attr) { case hwmon_temp_input: if (priv->temp_input[channel] == -ENODATA) return -ENODATA; *val = priv->temp_input[channel]; break; case hwmon_temp_offset: ret = aqc_get_ctrl_val(priv, priv->temp_ctrl_offset + channel * AQC_SENSOR_SIZE, val, AQC_BE16); if (ret < 0) return ret; *val *= 10; break; default: break; } break; case hwmon_fan: switch (attr) { case hwmon_fan_input: if (priv->speed_input[channel] == -ENODATA) return -ENODATA; *val = priv->speed_input[channel]; break; case hwmon_fan_min: *val = priv->speed_input_min[channel]; break; case hwmon_fan_max: *val = priv->speed_input_max[channel]; break; case hwmon_fan_target: *val = priv->speed_input_target[channel]; break; case hwmon_fan_pulses: ret = aqc_get_ctrl_val(priv, priv->flow_pulses_ctrl_offset, val, AQC_BE16); if (ret < 0) return ret; break; default: break; } break; case hwmon_power: *val = priv->power_input[channel]; break; case hwmon_pwm: switch (priv->kind) { case aquaero: ret = aqc_get_ctrl_val(priv, AQUAERO_CTRL_PRESET_START + channel * AQUAERO_CTRL_PRESET_SIZE, val, AQC_BE16); if (ret < 0) return ret; *val = aqc_percent_to_pwm(*val); break; default: ret = aqc_get_ctrl_val(priv, priv->fan_ctrl_offsets[channel], val, AQC_BE16); if (ret < 0) return ret; *val = aqc_percent_to_pwm(*val); break; } break; case hwmon_in: *val = priv->voltage_input[channel]; break; case hwmon_curr: *val = priv->current_input[channel]; break; default: return -EOPNOTSUPP; } return 0; } static int aqc_read_string(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, const char **str) { struct aqc_data *priv = dev_get_drvdata(dev); /* Number of sensors that are not calculated */ int num_non_calc_sensors = priv->num_temp_sensors + priv->num_virtual_temp_sensors; switch (type) { case hwmon_temp: if (channel < priv->num_temp_sensors) { *str = priv->temp_label[channel]; } else { if (priv->kind == aquaero && channel >= num_non_calc_sensors) *str = priv->calc_virt_temp_label[channel - num_non_calc_sensors]; else *str = priv->virtual_temp_label[channel - priv->num_temp_sensors]; } break; case hwmon_fan: *str = priv->speed_label[channel]; break; case hwmon_power: *str = priv->power_label[channel]; break; case hwmon_in: *str = priv->voltage_label[channel]; break; case hwmon_curr: *str = priv->current_label[channel]; break; default: return -EOPNOTSUPP; } return 0; } static int aqc_write(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, long val) { int ret, pwm_value; /* Arrays for setting multiple values at once in the control report */ int ctrl_values_offsets[4]; long ctrl_values[4]; int ctrl_values_types[4]; struct aqc_data *priv = dev_get_drvdata(dev); switch (type) { case hwmon_temp: switch (attr) { case hwmon_temp_offset: /* Limit temp offset to +/- 15K as in the official software */ val = clamp_val(val, -15000, 15000) / 10; ret = aqc_set_ctrl_val(priv, priv->temp_ctrl_offset + channel * AQC_SENSOR_SIZE, val, AQC_BE16); if (ret < 0) return ret; break; default: return -EOPNOTSUPP; } break; case hwmon_fan: switch (attr) { case hwmon_fan_pulses: val = clamp_val(val, 10, 1000); ret = aqc_set_ctrl_val(priv, priv->flow_pulses_ctrl_offset, val, AQC_BE16); if (ret < 0) return ret; break; default: break; } break; case hwmon_pwm: switch (attr) { case hwmon_pwm_input: pwm_value = aqc_pwm_to_percent(val); if (pwm_value < 0) return pwm_value; switch (priv->kind) { case aquaero: /* Write pwm value to preset corresponding to the channel */ ctrl_values_offsets[0] = AQUAERO_CTRL_PRESET_START + channel * AQUAERO_CTRL_PRESET_SIZE; ctrl_values[0] = pwm_value; ctrl_values_types[0] = AQC_BE16; /* Write preset number in fan control source */ ctrl_values_offsets[1] = priv->fan_ctrl_offsets[channel] + AQUAERO_FAN_CTRL_SRC_OFFSET; ctrl_values[1] = AQUAERO_CTRL_PRESET_ID + channel; ctrl_values_types[1] = AQC_BE16; /* Set minimum power to 0 to allow the fan to turn off */ ctrl_values_offsets[2] = priv->fan_ctrl_offsets[channel] + AQUAERO_FAN_CTRL_MIN_PWR_OFFSET; ctrl_values[2] = 0; ctrl_values_types[2] = AQC_BE16; /* Set maximum power to 255 to allow the fan to reach max speed */ ctrl_values_offsets[3] = priv->fan_ctrl_offsets[channel] + AQUAERO_FAN_CTRL_MAX_PWR_OFFSET; ctrl_values[3] = aqc_pwm_to_percent(255); ctrl_values_types[3] = AQC_BE16; ret = aqc_set_ctrl_vals(priv, ctrl_values_offsets, ctrl_values, ctrl_values_types, 4); if (ret < 0) return ret; break; default: ret = aqc_set_ctrl_val(priv, priv->fan_ctrl_offsets[channel], pwm_value, AQC_BE16); if (ret < 0) return ret; break; } break; default: break; } break; default: return -EOPNOTSUPP; } return 0; } static const struct hwmon_ops aqc_hwmon_ops = { .is_visible = aqc_is_visible, .read = aqc_read, .read_string = aqc_read_string, .write = aqc_write }; static const struct hwmon_channel_info * const aqc_info[] = { HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_OFFSET, HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_OFFSET, HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_OFFSET, HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_OFFSET, HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_OFFSET, HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_OFFSET, HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_OFFSET, HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_OFFSET, HWMON_T_INPUT | HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL), HWMON_CHANNEL_INFO(fan, HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MIN | HWMON_F_MAX | HWMON_F_TARGET, HWMON_F_INPUT | HWMON_F_LABEL, HWMON_F_INPUT | HWMON_F_LABEL, HWMON_F_INPUT | HWMON_F_LABEL, HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_PULSES, HWMON_F_INPUT | HWMON_F_LABEL, HWMON_F_INPUT | HWMON_F_LABEL, HWMON_F_INPUT | HWMON_F_LABEL, HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_PULSES), HWMON_CHANNEL_INFO(power, HWMON_P_INPUT | HWMON_P_LABEL, HWMON_P_INPUT | HWMON_P_LABEL, HWMON_P_INPUT | HWMON_P_LABEL, HWMON_P_INPUT | HWMON_P_LABEL, HWMON_P_INPUT | HWMON_P_LABEL, HWMON_P_INPUT | HWMON_P_LABEL, HWMON_P_INPUT | HWMON_P_LABEL, HWMON_P_INPUT | HWMON_P_LABEL), HWMON_CHANNEL_INFO(pwm, HWMON_PWM_INPUT, HWMON_PWM_INPUT, HWMON_PWM_INPUT, HWMON_PWM_INPUT, HWMON_PWM_INPUT, HWMON_PWM_INPUT, HWMON_PWM_INPUT, HWMON_PWM_INPUT), HWMON_CHANNEL_INFO(in, HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL), HWMON_CHANNEL_INFO(curr, HWMON_C_INPUT | HWMON_C_LABEL, HWMON_C_INPUT | HWMON_C_LABEL, HWMON_C_INPUT | HWMON_C_LABEL, HWMON_C_INPUT | HWMON_C_LABEL, HWMON_C_INPUT | HWMON_C_LABEL, HWMON_C_INPUT | HWMON_C_LABEL, HWMON_C_INPUT | HWMON_C_LABEL, HWMON_C_INPUT | HWMON_C_LABEL), NULL }; static const struct hwmon_chip_info aqc_chip_info = { .ops = &aqc_hwmon_ops, .info = aqc_info, }; static int aqc_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size) { int i, j, sensor_value; struct aqc_data *priv; if (report->id != STATUS_REPORT_ID) return 0; priv = hid_get_drvdata(hdev); /* Info provided with every report */ priv->serial_number[0] = get_unaligned_be16(data + priv->serial_number_start_offset); priv->serial_number[1] = get_unaligned_be16(data + priv->serial_number_start_offset + SERIAL_PART_OFFSET); priv->firmware_version = get_unaligned_be16(data + priv->firmware_version_offset); /* Physical temperature sensor readings */ for (i = 0; i < priv->num_temp_sensors; i++) { sensor_value = get_unaligned_be16(data + priv->temp_sensor_start_offset + i * AQC_SENSOR_SIZE); if (sensor_value == AQC_SENSOR_NA) priv->temp_input[i] = -ENODATA; else priv->temp_input[i] = sensor_value * 10; } /* Virtual temperature sensor readings */ for (j = 0; j < priv->num_virtual_temp_sensors; j++) { sensor_value = get_unaligned_be16(data + priv->virtual_temp_sensor_start_offset + j * AQC_SENSOR_SIZE); if (sensor_value == AQC_SENSOR_NA) priv->temp_input[i] = -ENODATA; else priv->temp_input[i] = sensor_value * 10; i++; } /* Fan speed and related readings */ for (i = 0; i < priv->num_fans; i++) { priv->speed_input[i] = get_unaligned_be16(data + priv->fan_sensor_offsets[i] + priv->fan_structure->speed); priv->power_input[i] = get_unaligned_be16(data + priv->fan_sensor_offsets[i] + priv->fan_structure->power) * 10000; priv->voltage_input[i] = get_unaligned_be16(data + priv->fan_sensor_offsets[i] + priv->fan_structure->voltage) * 10; priv->current_input[i] = get_unaligned_be16(data + priv->fan_sensor_offsets[i] + priv->fan_structure->curr); } /* Flow sensor readings */ for (j = 0; j < priv->num_flow_sensors; j++) { priv->speed_input[i] = get_unaligned_be16(data + priv->flow_sensors_start_offset + j * AQC_SENSOR_SIZE); i++; } if (priv->power_cycle_count_offset != 0) priv->power_cycles = get_unaligned_be32(data + priv->power_cycle_count_offset); /* Special-case sensor readings */ switch (priv->kind) { case aquaero: /* Read calculated virtual temp sensors */ i = priv->num_temp_sensors + priv->num_virtual_temp_sensors; for (j = 0; j < priv->num_calc_virt_temp_sensors; j++) { sensor_value = get_unaligned_be16(data + priv->calc_virt_temp_sensor_start_offset + j * AQC_SENSOR_SIZE); if (sensor_value == AQC_SENSOR_NA) priv->temp_input[i] = -ENODATA; else priv->temp_input[i] = sensor_value * 10; i++; } break; case aquastreamult: priv->speed_input[1] = get_unaligned_be16(data + AQUASTREAMULT_PUMP_OFFSET); priv->speed_input[2] = get_unaligned_be16(data + AQUASTREAMULT_PRESSURE_OFFSET); priv->speed_input[3] = get_unaligned_be16(data + AQUASTREAMULT_FLOW_SENSOR_OFFSET); priv->power_input[1] = get_unaligned_be16(data + AQUASTREAMULT_PUMP_POWER) * 10000; priv->voltage_input[1] = get_unaligned_be16(data + AQUASTREAMULT_PUMP_VOLTAGE) * 10; priv->current_input[1] = get_unaligned_be16(data + AQUASTREAMULT_PUMP_CURRENT); break; case d5next: priv->voltage_input[2] = get_unaligned_be16(data + D5NEXT_5V_VOLTAGE) * 10; priv->voltage_input[3] = get_unaligned_be16(data + D5NEXT_12V_VOLTAGE) * 10; break; case highflownext: /* If external temp sensor is not connected, its power reading is also N/A */ if (priv->temp_input[1] == -ENODATA) priv->power_input[0] = -ENODATA; else priv->power_input[0] = get_unaligned_be16(data + HIGHFLOWNEXT_POWER) * 1000000; priv->voltage_input[0] = get_unaligned_be16(data + HIGHFLOWNEXT_5V_VOLTAGE) * 10; priv->voltage_input[1] = get_unaligned_be16(data + HIGHFLOWNEXT_5V_VOLTAGE_USB) * 10; priv->speed_input[1] = get_unaligned_be16(data + HIGHFLOWNEXT_WATER_QUALITY); priv->speed_input[2] = get_unaligned_be16(data + HIGHFLOWNEXT_CONDUCTIVITY); break; case leakshield: priv->speed_input[0] = ((s16)get_unaligned_be16(data + LEAKSHIELD_PRESSURE_ADJUSTED)) * 100; priv->speed_input_min[0] = get_unaligned_be16(data + LEAKSHIELD_PRESSURE_MIN) * 100; priv->speed_input_target[0] = get_unaligned_be16(data + LEAKSHIELD_PRESSURE_TARGET) * 100; priv->speed_input_max[0] = get_unaligned_be16(data + LEAKSHIELD_PRESSURE_MAX) * 100; priv->speed_input[1] = get_unaligned_be16(data + LEAKSHIELD_PUMP_RPM_IN); if (priv->speed_input[1] == AQC_SENSOR_NA) priv->speed_input[1] = -ENODATA; priv->speed_input[2] = get_unaligned_be16(data + LEAKSHIELD_FLOW_IN); if (priv->speed_input[2] == AQC_SENSOR_NA) priv->speed_input[2] = -ENODATA; priv->speed_input[3] = get_unaligned_be16(data + LEAKSHIELD_RESERVOIR_VOLUME); priv->speed_input[4] = get_unaligned_be16(data + LEAKSHIELD_RESERVOIR_FILLED); /* Second temp sensor is not positioned after the first one, read it here */ priv->temp_input[1] = get_unaligned_be16(data + LEAKSHIELD_TEMPERATURE_2) * 10; break; default: break; } priv->updated = jiffies; return 0; } static int serial_number_show(struct seq_file *seqf, void *unused) { struct aqc_data *priv = seqf->private; seq_printf(seqf, "%05u-%05u\n", priv->serial_number[0], priv->serial_number[1]); return 0; } DEFINE_SHOW_ATTRIBUTE(serial_number); static int firmware_version_show(struct seq_file *seqf, void *unused) { struct aqc_data *priv = seqf->private; seq_printf(seqf, "%u\n", priv->firmware_version); return 0; } DEFINE_SHOW_ATTRIBUTE(firmware_version); static int power_cycles_show(struct seq_file *seqf, void *unused) { struct aqc_data *priv = seqf->private; seq_printf(seqf, "%u\n", priv->power_cycles); return 0; } DEFINE_SHOW_ATTRIBUTE(power_cycles); static void aqc_debugfs_init(struct aqc_data *priv) { char name[64]; scnprintf(name, sizeof(name), "%s_%s-%s", "aquacomputer", priv->name, dev_name(&priv->hdev->dev)); priv->debugfs = debugfs_create_dir(name, NULL); if (priv->serial_number_start_offset != 0) debugfs_create_file("serial_number", 0444, priv->debugfs, priv, &serial_number_fops); if (priv->firmware_version_offset != 0) debugfs_create_file("firmware_version", 0444, priv->debugfs, priv, &firmware_version_fops); if (priv->power_cycle_count_offset != 0) debugfs_create_file("power_cycles", 0444, priv->debugfs, priv, &power_cycles_fops); } static int aqc_probe(struct hid_device *hdev, const struct hid_device_id *id) { struct aqc_data *priv; int ret; priv = devm_kzalloc(&hdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->hdev = hdev; hid_set_drvdata(hdev, priv); priv->updated = jiffies - STATUS_UPDATE_INTERVAL; ret = hid_parse(hdev); if (ret) return ret; ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW); if (ret) return ret; ret = hid_hw_open(hdev); if (ret) goto fail_and_stop; switch (hdev->product) { case USB_PRODUCT_ID_AQUAERO: /* * Aquaero presents itself as three HID devices under the same product ID: * "aquaero keyboard/mouse", "aquaero System Control" and "aquaero Device", * which is the one we want to communicate with. Unlike most other Aquacomputer * devices, Aquaero does not return meaningful data when explicitly requested * using GET_FEATURE_REPORT. * * The difference between "aquaero Device" and the other two is in the collections * they present. The two other devices have the type of the second element in * their respective collections set to 1, while the real device has it set to 0. */ if (hdev->collection[1].type != 0) { ret = -ENODEV; goto fail_and_close; } priv->kind = aquaero; priv->num_fans = AQUAERO_NUM_FANS; priv->fan_sensor_offsets = aquaero_sensor_fan_offsets; priv->fan_ctrl_offsets = aquaero_ctrl_fan_offsets; priv->num_temp_sensors = AQUAERO_NUM_SENSORS; priv->temp_sensor_start_offset = AQUAERO_SENSOR_START; priv->num_virtual_temp_sensors = AQUAERO_NUM_VIRTUAL_SENSORS; priv->virtual_temp_sensor_start_offset = AQUAERO_VIRTUAL_SENSOR_START; priv->num_calc_virt_temp_sensors = AQUAERO_NUM_CALC_VIRTUAL_SENSORS; priv->calc_virt_temp_sensor_start_offset = AQUAERO_CALC_VIRTUAL_SENSOR_START; priv->num_flow_sensors = AQUAERO_NUM_FLOW_SENSORS; priv->flow_sensors_start_offset = AQUAERO_FLOW_SENSORS_START; priv->buffer_size = AQUAERO_CTRL_REPORT_SIZE; priv->temp_ctrl_offset = AQUAERO_TEMP_CTRL_OFFSET; priv->ctrl_report_delay = CTRL_REPORT_DELAY; priv->temp_label = label_temp_sensors; priv->virtual_temp_label = label_virtual_temp_sensors; priv->calc_virt_temp_label = label_aquaero_calc_temp_sensors; priv->speed_label = label_aquaero_speeds; priv->power_label = label_fan_power; priv->voltage_label = label_fan_voltage; priv->current_label = label_fan_current; break; case USB_PRODUCT_ID_D5NEXT: priv->kind = d5next; priv->num_fans = D5NEXT_NUM_FANS; priv->fan_sensor_offsets = d5next_sensor_fan_offsets; priv->fan_ctrl_offsets = d5next_ctrl_fan_offsets; priv->num_temp_sensors = D5NEXT_NUM_SENSORS; priv->temp_sensor_start_offset = D5NEXT_COOLANT_TEMP; priv->num_virtual_temp_sensors = D5NEXT_NUM_VIRTUAL_SENSORS; priv->virtual_temp_sensor_start_offset = D5NEXT_VIRTUAL_SENSORS_START; priv->temp_ctrl_offset = D5NEXT_TEMP_CTRL_OFFSET; priv->buffer_size = D5NEXT_CTRL_REPORT_SIZE; priv->ctrl_report_delay = CTRL_REPORT_DELAY; priv->power_cycle_count_offset = D5NEXT_POWER_CYCLES; priv->temp_label = label_d5next_temp; priv->virtual_temp_label = label_virtual_temp_sensors; priv->speed_label = label_d5next_speeds; priv->power_label = label_d5next_power; priv->voltage_label = label_d5next_voltages; priv->current_label = label_d5next_current; break; case USB_PRODUCT_ID_FARBWERK: priv->kind = farbwerk; priv->num_fans = 0; priv->num_temp_sensors = FARBWERK_NUM_SENSORS; priv->temp_sensor_start_offset = FARBWERK_SENSOR_START; priv->temp_label = label_temp_sensors; break; case USB_PRODUCT_ID_FARBWERK360: priv->kind = farbwerk360; priv->num_fans = 0; priv->num_temp_sensors = FARBWERK360_NUM_SENSORS; priv->temp_sensor_start_offset = FARBWERK360_SENSOR_START; priv->num_virtual_temp_sensors = FARBWERK360_NUM_VIRTUAL_SENSORS; priv->virtual_temp_sensor_start_offset = FARBWERK360_VIRTUAL_SENSORS_START; priv->temp_ctrl_offset = FARBWERK360_TEMP_CTRL_OFFSET; priv->buffer_size = FARBWERK360_CTRL_REPORT_SIZE; priv->temp_label = label_temp_sensors; priv->virtual_temp_label = label_virtual_temp_sensors; break; case USB_PRODUCT_ID_OCTO: priv->kind = octo; priv->num_fans = OCTO_NUM_FANS; priv->fan_sensor_offsets = octo_sensor_fan_offsets; priv->fan_ctrl_offsets = octo_ctrl_fan_offsets; priv->num_temp_sensors = OCTO_NUM_SENSORS; priv->temp_sensor_start_offset = OCTO_SENSOR_START; priv->num_virtual_temp_sensors = OCTO_NUM_VIRTUAL_SENSORS; priv->virtual_temp_sensor_start_offset = OCTO_VIRTUAL_SENSORS_START; priv->num_flow_sensors = OCTO_NUM_FLOW_SENSORS; priv->flow_sensors_start_offset = OCTO_FLOW_SENSOR_OFFSET; priv->temp_ctrl_offset = OCTO_TEMP_CTRL_OFFSET; priv->buffer_size = OCTO_CTRL_REPORT_SIZE; priv->ctrl_report_delay = CTRL_REPORT_DELAY; priv->flow_pulses_ctrl_offset = OCTO_FLOW_PULSES_CTRL_OFFSET; priv->power_cycle_count_offset = OCTO_POWER_CYCLES; priv->temp_label = label_temp_sensors; priv->virtual_temp_label = label_virtual_temp_sensors; priv->speed_label = label_octo_speeds; priv->power_label = label_fan_power; priv->voltage_label = label_fan_voltage; priv->current_label = label_fan_current; break; case USB_PRODUCT_ID_QUADRO: priv->kind = quadro; priv->num_fans = QUADRO_NUM_FANS; priv->fan_sensor_offsets = quadro_sensor_fan_offsets; priv->fan_ctrl_offsets = quadro_ctrl_fan_offsets; priv->num_temp_sensors = QUADRO_NUM_SENSORS; priv->temp_sensor_start_offset = QUADRO_SENSOR_START; priv->num_virtual_temp_sensors = QUADRO_NUM_VIRTUAL_SENSORS; priv->virtual_temp_sensor_start_offset = QUADRO_VIRTUAL_SENSORS_START; priv->num_flow_sensors = QUADRO_NUM_FLOW_SENSORS; priv->flow_sensors_start_offset = QUADRO_FLOW_SENSOR_OFFSET; priv->temp_ctrl_offset = QUADRO_TEMP_CTRL_OFFSET; priv->buffer_size = QUADRO_CTRL_REPORT_SIZE; priv->ctrl_report_delay = CTRL_REPORT_DELAY; priv->flow_pulses_ctrl_offset = QUADRO_FLOW_PULSES_CTRL_OFFSET; priv->power_cycle_count_offset = QUADRO_POWER_CYCLES; priv->temp_label = label_temp_sensors; priv->virtual_temp_label = label_virtual_temp_sensors; priv->speed_label = label_quadro_speeds; priv->power_label = label_fan_power; priv->voltage_label = label_fan_voltage; priv->current_label = label_fan_current; break; case USB_PRODUCT_ID_HIGHFLOWNEXT: priv->kind = highflownext; priv->num_fans = 0; priv->num_temp_sensors = HIGHFLOWNEXT_NUM_SENSORS; priv->temp_sensor_start_offset = HIGHFLOWNEXT_SENSOR_START; priv->num_flow_sensors = HIGHFLOWNEXT_NUM_FLOW_SENSORS; priv->flow_sensors_start_offset = HIGHFLOWNEXT_FLOW; priv->power_cycle_count_offset = QUADRO_POWER_CYCLES; priv->temp_label = label_highflownext_temp_sensors; priv->speed_label = label_highflownext_fan_speed; priv->power_label = label_highflownext_power; priv->voltage_label = label_highflownext_voltage; break; case USB_PRODUCT_ID_LEAKSHIELD: /* * Choose the right Leakshield device, because * the other one acts as a keyboard */ if (hdev->type != 2) { ret = -ENODEV; goto fail_and_close; } priv->kind = leakshield; priv->num_fans = 0; priv->num_temp_sensors = LEAKSHIELD_NUM_SENSORS; priv->temp_sensor_start_offset = LEAKSHIELD_TEMPERATURE_1; priv->temp_label = label_leakshield_temp_sensors; priv->speed_label = label_leakshield_fan_speed; break; case USB_PRODUCT_ID_AQUASTREAMXT: priv->kind = aquastreamxt; priv->num_fans = AQUASTREAMXT_NUM_FANS; priv->fan_sensor_offsets = aquastreamxt_sensor_fan_offsets; priv->num_temp_sensors = AQUASTREAMXT_NUM_SENSORS; priv->temp_sensor_start_offset = AQUASTREAMXT_SENSOR_START; priv->buffer_size = AQUASTREAMXT_SENSOR_REPORT_SIZE; priv->temp_label = label_aquastreamxt_temp_sensors; priv->speed_label = label_d5next_speeds; priv->voltage_label = label_d5next_voltages; priv->current_label = label_d5next_current; break; case USB_PRODUCT_ID_AQUASTREAMULT: priv->kind = aquastreamult; priv->num_fans = AQUASTREAMULT_NUM_FANS; priv->fan_sensor_offsets = aquastreamult_sensor_fan_offsets; priv->num_temp_sensors = AQUASTREAMULT_NUM_SENSORS; priv->temp_sensor_start_offset = AQUASTREAMULT_SENSOR_START; priv->temp_label = label_aquastreamult_temp; priv->speed_label = label_aquastreamult_speeds; priv->power_label = label_aquastreamult_power; priv->voltage_label = label_aquastreamult_voltages; priv->current_label = label_aquastreamult_current; break; case USB_PRODUCT_ID_POWERADJUST3: priv->kind = poweradjust3; priv->num_fans = 0; priv->num_temp_sensors = POWERADJUST3_NUM_SENSORS; priv->temp_sensor_start_offset = POWERADJUST3_SENSOR_START; priv->buffer_size = POWERADJUST3_SENSOR_REPORT_SIZE; priv->temp_label = label_poweradjust3_temp_sensors; break; case USB_PRODUCT_ID_HIGHFLOW: priv->kind = highflow; priv->num_fans = 0; priv->num_temp_sensors = HIGHFLOW_NUM_SENSORS; priv->temp_sensor_start_offset = HIGHFLOW_SENSOR_START; priv->num_flow_sensors = HIGHFLOW_NUM_FLOW_SENSORS; priv->flow_sensors_start_offset = HIGHFLOW_FLOW_SENSOR_OFFSET; priv->buffer_size = HIGHFLOW_SENSOR_REPORT_SIZE; priv->temp_label = label_highflow_temp; priv->speed_label = label_highflow_speeds; break; default: break; } switch (priv->kind) { case aquaero: priv->serial_number_start_offset = AQUAERO_SERIAL_START; priv->firmware_version_offset = AQUAERO_FIRMWARE_VERSION; priv->fan_structure = &aqc_aquaero_fan_structure; priv->ctrl_report_id = AQUAERO_CTRL_REPORT_ID; priv->secondary_ctrl_report_id = AQUAERO_SECONDARY_CTRL_REPORT_ID; priv->secondary_ctrl_report_size = AQUAERO_SECONDARY_CTRL_REPORT_SIZE; priv->secondary_ctrl_report = aquaero_secondary_ctrl_report; break; case poweradjust3: priv->status_report_id = POWERADJUST3_STATUS_REPORT_ID; break; case aquastreamxt: priv->serial_number_start_offset = AQUASTREAMXT_SERIAL_START; priv->firmware_version_offset = AQUASTREAMXT_FIRMWARE_VERSION; priv->status_report_id = AQUASTREAMXT_STATUS_REPORT_ID; break; case highflow: priv->serial_number_start_offset = HIGHFLOW_SERIAL_START; priv->firmware_version_offset = HIGHFLOW_FIRMWARE_VERSION; priv->status_report_id = HIGHFLOW_STATUS_REPORT_ID; break; default: priv->serial_number_start_offset = AQC_SERIAL_START; priv->firmware_version_offset = AQC_FIRMWARE_VERSION; priv->ctrl_report_id = CTRL_REPORT_ID; priv->secondary_ctrl_report_id = SECONDARY_CTRL_REPORT_ID; priv->secondary_ctrl_report_size = SECONDARY_CTRL_REPORT_SIZE; priv->secondary_ctrl_report = secondary_ctrl_report; if (priv->kind == aquastreamult) priv->fan_structure = &aqc_aquastreamult_fan_structure; else priv->fan_structure = &aqc_general_fan_structure; break; } if (priv->buffer_size != 0) { priv->checksum_start = 0x01; priv->checksum_length = priv->buffer_size - 3; priv->checksum_offset = priv->buffer_size - 2; } priv->name = aqc_device_names[priv->kind]; priv->buffer = devm_kzalloc(&hdev->dev, priv->buffer_size, GFP_KERNEL); if (!priv->buffer) { ret = -ENOMEM; goto fail_and_close; } mutex_init(&priv->mutex); priv->hwmon_dev = hwmon_device_register_with_info(&hdev->dev, priv->name, priv, &aqc_chip_info, NULL); if (IS_ERR(priv->hwmon_dev)) { ret = PTR_ERR(priv->hwmon_dev); goto fail_and_close; } aqc_debugfs_init(priv); return 0; fail_and_close: hid_hw_close(hdev); fail_and_stop: hid_hw_stop(hdev); return ret; } static void aqc_remove(struct hid_device *hdev) { struct aqc_data *priv = hid_get_drvdata(hdev); debugfs_remove_recursive(priv->debugfs); hwmon_device_unregister(priv->hwmon_dev); hid_hw_close(hdev); hid_hw_stop(hdev); } static const struct hid_device_id aqc_table[] = { { HID_USB_DEVICE(USB_VENDOR_ID_AQUACOMPUTER, USB_PRODUCT_ID_AQUAERO) }, { HID_USB_DEVICE(USB_VENDOR_ID_AQUACOMPUTER, USB_PRODUCT_ID_D5NEXT) }, { HID_USB_DEVICE(USB_VENDOR_ID_AQUACOMPUTER, USB_PRODUCT_ID_FARBWERK) }, { HID_USB_DEVICE(USB_VENDOR_ID_AQUACOMPUTER, USB_PRODUCT_ID_FARBWERK360) }, { HID_USB_DEVICE(USB_VENDOR_ID_AQUACOMPUTER, USB_PRODUCT_ID_OCTO) }, { HID_USB_DEVICE(USB_VENDOR_ID_AQUACOMPUTER, USB_PRODUCT_ID_QUADRO) }, { HID_USB_DEVICE(USB_VENDOR_ID_AQUACOMPUTER, USB_PRODUCT_ID_HIGHFLOWNEXT) }, { HID_USB_DEVICE(USB_VENDOR_ID_AQUACOMPUTER, USB_PRODUCT_ID_LEAKSHIELD) }, { HID_USB_DEVICE(USB_VENDOR_ID_AQUACOMPUTER, USB_PRODUCT_ID_AQUASTREAMXT) }, { HID_USB_DEVICE(USB_VENDOR_ID_AQUACOMPUTER, USB_PRODUCT_ID_AQUASTREAMULT) }, { HID_USB_DEVICE(USB_VENDOR_ID_AQUACOMPUTER, USB_PRODUCT_ID_POWERADJUST3) }, { HID_USB_DEVICE(USB_VENDOR_ID_AQUACOMPUTER, USB_PRODUCT_ID_HIGHFLOW) }, { } }; MODULE_DEVICE_TABLE(hid, aqc_table); static struct hid_driver aqc_driver = { .name = DRIVER_NAME, .id_table = aqc_table, .probe = aqc_probe, .remove = aqc_remove, .raw_event = aqc_raw_event, }; static int __init aqc_init(void) { return hid_register_driver(&aqc_driver); } static void __exit aqc_exit(void) { hid_unregister_driver(&aqc_driver); } /* Request to initialize after the HID bus to ensure it's not being loaded before */ late_initcall(aqc_init); module_exit(aqc_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Aleksa Savic <savicaleksa83@gmail.com>"); MODULE_AUTHOR("Jack Doan <me@jackdoan.com>"); MODULE_DESCRIPTION("Hwmon driver for Aquacomputer devices");
9 10 40 40 40 38 23 7 32 40 6 15 40 1 3 10 9 9 1 9 9 11 1 1 11 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _BCACHEFS_STR_HASH_H #define _BCACHEFS_STR_HASH_H #include "btree_iter.h" #include "btree_update.h" #include "checksum.h" #include "error.h" #include "inode.h" #include "siphash.h" #include "subvolume.h" #include "super.h" #include <linux/crc32c.h> #include <crypto/hash.h> #include <crypto/sha2.h> static inline enum bch_str_hash_type bch2_str_hash_opt_to_type(struct bch_fs *c, enum bch_str_hash_opts opt) { switch (opt) { case BCH_STR_HASH_OPT_crc32c: return BCH_STR_HASH_crc32c; case BCH_STR_HASH_OPT_crc64: return BCH_STR_HASH_crc64; case BCH_STR_HASH_OPT_siphash: return c->sb.features & (1ULL << BCH_FEATURE_new_siphash) ? BCH_STR_HASH_siphash : BCH_STR_HASH_siphash_old; default: BUG(); } } struct bch_hash_info { u8 type; /* * For crc32 or crc64 string hashes the first key value of * the siphash_key (k0) is used as the key. */ SIPHASH_KEY siphash_key; }; static inline struct bch_hash_info bch2_hash_info_init(struct bch_fs *c, const struct bch_inode_unpacked *bi) { /* XXX ick */ struct bch_hash_info info = { .type = INODE_STR_HASH(bi), .siphash_key = { .k0 = bi->bi_hash_seed } }; if (unlikely(info.type == BCH_STR_HASH_siphash_old)) { SHASH_DESC_ON_STACK(desc, c->sha256); u8 digest[SHA256_DIGEST_SIZE]; desc->tfm = c->sha256; crypto_shash_digest(desc, (void *) &bi->bi_hash_seed, sizeof(bi->bi_hash_seed), digest); memcpy(&info.siphash_key, digest, sizeof(info.siphash_key)); } return info; } struct bch_str_hash_ctx { union { u32 crc32c; u64 crc64; SIPHASH_CTX siphash; }; }; static inline void bch2_str_hash_init(struct bch_str_hash_ctx *ctx, const struct bch_hash_info *info) { switch (info->type) { case BCH_STR_HASH_crc32c: ctx->crc32c = crc32c(~0, &info->siphash_key.k0, sizeof(info->siphash_key.k0)); break; case BCH_STR_HASH_crc64: ctx->crc64 = crc64_be(~0, &info->siphash_key.k0, sizeof(info->siphash_key.k0)); break; case BCH_STR_HASH_siphash_old: case BCH_STR_HASH_siphash: SipHash24_Init(&ctx->siphash, &info->siphash_key); break; default: BUG(); } } static inline void bch2_str_hash_update(struct bch_str_hash_ctx *ctx, const struct bch_hash_info *info, const void *data, size_t len) { switch (info->type) { case BCH_STR_HASH_crc32c: ctx->crc32c = crc32c(ctx->crc32c, data, len); break; case BCH_STR_HASH_crc64: ctx->crc64 = crc64_be(ctx->crc64, data, len); break; case BCH_STR_HASH_siphash_old: case BCH_STR_HASH_siphash: SipHash24_Update(&ctx->siphash, data, len); break; default: BUG(); } } static inline u64 bch2_str_hash_end(struct bch_str_hash_ctx *ctx, const struct bch_hash_info *info) { switch (info->type) { case BCH_STR_HASH_crc32c: return ctx->crc32c; case BCH_STR_HASH_crc64: return ctx->crc64 >> 1; case BCH_STR_HASH_siphash_old: case BCH_STR_HASH_siphash: return SipHash24_End(&ctx->siphash) >> 1; default: BUG(); } } struct bch_hash_desc { enum btree_id btree_id; u8 key_type; u64 (*hash_key)(const struct bch_hash_info *, const void *); u64 (*hash_bkey)(const struct bch_hash_info *, struct bkey_s_c); bool (*cmp_key)(struct bkey_s_c, const void *); bool (*cmp_bkey)(struct bkey_s_c, struct bkey_s_c); bool (*is_visible)(subvol_inum inum, struct bkey_s_c); }; static inline bool is_visible_key(struct bch_hash_desc desc, subvol_inum inum, struct bkey_s_c k) { return k.k->type == desc.key_type && (!desc.is_visible || !inum.inum || desc.is_visible(inum, k)); } static __always_inline struct bkey_s_c bch2_hash_lookup_in_snapshot(struct btree_trans *trans, struct btree_iter *iter, const struct bch_hash_desc desc, const struct bch_hash_info *info, subvol_inum inum, const void *key, enum btree_iter_update_trigger_flags flags, u32 snapshot) { struct bkey_s_c k; int ret; for_each_btree_key_max_norestart(trans, *iter, desc.btree_id, SPOS(inum.inum, desc.hash_key(info, key), snapshot), POS(inum.inum, U64_MAX), BTREE_ITER_slots|flags, k, ret) { if (is_visible_key(desc, inum, k)) { if (!desc.cmp_key(k, key)) return k; } else if (k.k->type == KEY_TYPE_hash_whiteout) { ; } else { /* hole, not found */ break; } } bch2_trans_iter_exit(trans, iter); return bkey_s_c_err(ret ?: -BCH_ERR_ENOENT_str_hash_lookup); } static __always_inline struct bkey_s_c bch2_hash_lookup(struct btree_trans *trans, struct btree_iter *iter, const struct bch_hash_desc desc, const struct bch_hash_info *info, subvol_inum inum, const void *key, enum btree_iter_update_trigger_flags flags) { u32 snapshot; int ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot); if (ret) return bkey_s_c_err(ret); return bch2_hash_lookup_in_snapshot(trans, iter, desc, info, inum, key, flags, snapshot); } static __always_inline int bch2_hash_hole(struct btree_trans *trans, struct btree_iter *iter, const struct bch_hash_desc desc, const struct bch_hash_info *info, subvol_inum inum, const void *key) { struct bkey_s_c k; u32 snapshot; int ret; ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot); if (ret) return ret; for_each_btree_key_max_norestart(trans, *iter, desc.btree_id, SPOS(inum.inum, desc.hash_key(info, key), snapshot), POS(inum.inum, U64_MAX), BTREE_ITER_slots|BTREE_ITER_intent, k, ret) if (!is_visible_key(desc, inum, k)) return 0; bch2_trans_iter_exit(trans, iter); return ret ?: -BCH_ERR_ENOSPC_str_hash_create; } static __always_inline int bch2_hash_needs_whiteout(struct btree_trans *trans, const struct bch_hash_desc desc, const struct bch_hash_info *info, struct btree_iter *start) { struct btree_iter iter; struct bkey_s_c k; int ret; bch2_trans_copy_iter(&iter, start); bch2_btree_iter_advance(&iter); for_each_btree_key_continue_norestart(iter, BTREE_ITER_slots, k, ret) { if (k.k->type != desc.key_type && k.k->type != KEY_TYPE_hash_whiteout) break; if (k.k->type == desc.key_type && desc.hash_bkey(info, k) <= start->pos.offset) { ret = 1; break; } } bch2_trans_iter_exit(trans, &iter); return ret; } static __always_inline struct bkey_s_c bch2_hash_set_or_get_in_snapshot(struct btree_trans *trans, struct btree_iter *iter, const struct bch_hash_desc desc, const struct bch_hash_info *info, subvol_inum inum, u32 snapshot, struct bkey_i *insert, enum btree_iter_update_trigger_flags flags) { struct btree_iter slot = {}; struct bkey_s_c k; bool found = false; int ret; for_each_btree_key_max_norestart(trans, *iter, desc.btree_id, SPOS(insert->k.p.inode, desc.hash_bkey(info, bkey_i_to_s_c(insert)), snapshot), POS(insert->k.p.inode, U64_MAX), BTREE_ITER_slots|BTREE_ITER_intent|flags, k, ret) { if (is_visible_key(desc, inum, k)) { if (!desc.cmp_bkey(k, bkey_i_to_s_c(insert))) goto found; /* hash collision: */ continue; } if (!slot.path && !(flags & STR_HASH_must_replace)) bch2_trans_copy_iter(&slot, iter); if (k.k->type != KEY_TYPE_hash_whiteout) goto not_found; } if (!ret) ret = -BCH_ERR_ENOSPC_str_hash_create; out: bch2_trans_iter_exit(trans, &slot); bch2_trans_iter_exit(trans, iter); return ret ? bkey_s_c_err(ret) : bkey_s_c_null; found: found = true; not_found: if (found && (flags & STR_HASH_must_create)) { bch2_trans_iter_exit(trans, &slot); return k; } else if (!found && (flags & STR_HASH_must_replace)) { ret = -BCH_ERR_ENOENT_str_hash_set_must_replace; } else { if (!found && slot.path) swap(*iter, slot); insert->k.p = iter->pos; ret = bch2_trans_update(trans, iter, insert, flags); } goto out; } static __always_inline int bch2_hash_set_in_snapshot(struct btree_trans *trans, const struct bch_hash_desc desc, const struct bch_hash_info *info, subvol_inum inum, u32 snapshot, struct bkey_i *insert, enum btree_iter_update_trigger_flags flags) { struct btree_iter iter; struct bkey_s_c k = bch2_hash_set_or_get_in_snapshot(trans, &iter, desc, info, inum, snapshot, insert, flags); int ret = bkey_err(k); if (ret) return ret; if (k.k) { bch2_trans_iter_exit(trans, &iter); return -BCH_ERR_EEXIST_str_hash_set; } return 0; } static __always_inline int bch2_hash_set(struct btree_trans *trans, const struct bch_hash_desc desc, const struct bch_hash_info *info, subvol_inum inum, struct bkey_i *insert, enum btree_iter_update_trigger_flags flags) { insert->k.p.inode = inum.inum; u32 snapshot; return bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot) ?: bch2_hash_set_in_snapshot(trans, desc, info, inum, snapshot, insert, flags); } static __always_inline int bch2_hash_delete_at(struct btree_trans *trans, const struct bch_hash_desc desc, const struct bch_hash_info *info, struct btree_iter *iter, enum btree_iter_update_trigger_flags flags) { struct bkey_i *delete; int ret; delete = bch2_trans_kmalloc(trans, sizeof(*delete)); ret = PTR_ERR_OR_ZERO(delete); if (ret) return ret; ret = bch2_hash_needs_whiteout(trans, desc, info, iter); if (ret < 0) return ret; bkey_init(&delete->k); delete->k.p = iter->pos; delete->k.type = ret ? KEY_TYPE_hash_whiteout : KEY_TYPE_deleted; return bch2_trans_update(trans, iter, delete, flags); } static __always_inline int bch2_hash_delete(struct btree_trans *trans, const struct bch_hash_desc desc, const struct bch_hash_info *info, subvol_inum inum, const void *key) { struct btree_iter iter; struct bkey_s_c k = bch2_hash_lookup(trans, &iter, desc, info, inum, key, BTREE_ITER_intent); int ret = bkey_err(k); if (ret) return ret; ret = bch2_hash_delete_at(trans, desc, info, &iter, 0); bch2_trans_iter_exit(trans, &iter); return ret; } struct snapshots_seen; int __bch2_str_hash_check_key(struct btree_trans *, struct snapshots_seen *, const struct bch_hash_desc *, struct bch_hash_info *, struct btree_iter *, struct bkey_s_c); static inline int bch2_str_hash_check_key(struct btree_trans *trans, struct snapshots_seen *s, const struct bch_hash_desc *desc, struct bch_hash_info *hash_info, struct btree_iter *k_iter, struct bkey_s_c hash_k) { if (hash_k.k->type != desc->key_type) return 0; if (likely(desc->hash_bkey(hash_info, hash_k) == hash_k.k->p.offset)) return 0; return __bch2_str_hash_check_key(trans, s, desc, hash_info, k_iter, hash_k); } #endif /* _BCACHEFS_STR_HASH_H */
57 57 56 57 57 57 57 57 36 34 1 36 36 36 15 15 8 8 5 8 7 55 2 19 36 20 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 // SPDX-License-Identifier: GPL-2.0 /* * Functions related to generic helpers functions */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/scatterlist.h> #include "blk.h" static sector_t bio_discard_limit(struct block_device *bdev, sector_t sector) { unsigned int discard_granularity = bdev_discard_granularity(bdev); sector_t granularity_aligned_sector; if (bdev_is_partition(bdev)) sector += bdev->bd_start_sect; granularity_aligned_sector = round_up(sector, discard_granularity >> SECTOR_SHIFT); /* * Make sure subsequent bios start aligned to the discard granularity if * it needs to be split. */ if (granularity_aligned_sector != sector) return granularity_aligned_sector - sector; /* * Align the bio size to the discard granularity to make splitting the bio * at discard granularity boundaries easier in the driver if needed. */ return round_down(UINT_MAX, discard_granularity) >> SECTOR_SHIFT; } struct bio *blk_alloc_discard_bio(struct block_device *bdev, sector_t *sector, sector_t *nr_sects, gfp_t gfp_mask) { sector_t bio_sects = min(*nr_sects, bio_discard_limit(bdev, *sector)); struct bio *bio; if (!bio_sects) return NULL; bio = bio_alloc(bdev, 0, REQ_OP_DISCARD, gfp_mask); if (!bio) return NULL; bio->bi_iter.bi_sector = *sector; bio->bi_iter.bi_size = bio_sects << SECTOR_SHIFT; *sector += bio_sects; *nr_sects -= bio_sects; /* * We can loop for a long time in here if someone does full device * discards (like mkfs). Be nice and allow us to schedule out to avoid * softlocking if preempt is disabled. */ cond_resched(); return bio; } int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct bio **biop) { struct bio *bio; while ((bio = blk_alloc_discard_bio(bdev, &sector, &nr_sects, gfp_mask))) *biop = bio_chain_and_submit(*biop, bio); return 0; } EXPORT_SYMBOL(__blkdev_issue_discard); /** * blkdev_issue_discard - queue a discard * @bdev: blockdev to issue discard for * @sector: start sector * @nr_sects: number of sectors to discard * @gfp_mask: memory allocation flags (for bio_alloc) * * Description: * Issue a discard request for the sectors in question. */ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask) { struct bio *bio = NULL; struct blk_plug plug; int ret; blk_start_plug(&plug); ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, &bio); if (!ret && bio) { ret = submit_bio_wait(bio); if (ret == -EOPNOTSUPP) ret = 0; bio_put(bio); } blk_finish_plug(&plug); return ret; } EXPORT_SYMBOL(blkdev_issue_discard); static sector_t bio_write_zeroes_limit(struct block_device *bdev) { sector_t bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; return min(bdev_write_zeroes_sectors(bdev), (UINT_MAX >> SECTOR_SHIFT) & ~bs_mask); } /* * There is no reliable way for the SCSI subsystem to determine whether a * device supports a WRITE SAME operation without actually performing a write * to media. As a result, write_zeroes is enabled by default and will be * disabled if a zeroing operation subsequently fails. This means that this * queue limit is likely to change at runtime. */ static void __blkdev_issue_write_zeroes(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, unsigned flags, sector_t limit) { while (nr_sects) { unsigned int len = min(nr_sects, limit); struct bio *bio; if ((flags & BLKDEV_ZERO_KILLABLE) && fatal_signal_pending(current)) break; bio = bio_alloc(bdev, 0, REQ_OP_WRITE_ZEROES, gfp_mask); bio->bi_iter.bi_sector = sector; if (flags & BLKDEV_ZERO_NOUNMAP) bio->bi_opf |= REQ_NOUNMAP; bio->bi_iter.bi_size = len << SECTOR_SHIFT; *biop = bio_chain_and_submit(*biop, bio); nr_sects -= len; sector += len; cond_resched(); } } static int blkdev_issue_write_zeroes(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp, unsigned flags) { sector_t limit = bio_write_zeroes_limit(bdev); struct bio *bio = NULL; struct blk_plug plug; int ret = 0; blk_start_plug(&plug); __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp, &bio, flags, limit); if (bio) { if ((flags & BLKDEV_ZERO_KILLABLE) && fatal_signal_pending(current)) { bio_await_chain(bio); blk_finish_plug(&plug); return -EINTR; } ret = submit_bio_wait(bio); bio_put(bio); } blk_finish_plug(&plug); /* * For some devices there is no non-destructive way to verify whether * WRITE ZEROES is actually supported. These will clear the capability * on an I/O error, in which case we'll turn any error into * "not supported" here. */ if (ret && !bdev_write_zeroes_sectors(bdev)) return -EOPNOTSUPP; return ret; } /* * Convert a number of 512B sectors to a number of pages. * The result is limited to a number of pages that can fit into a BIO. * Also make sure that the result is always at least 1 (page) for the cases * where nr_sects is lower than the number of sectors in a page. */ static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects) { sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512); return min(pages, (sector_t)BIO_MAX_VECS); } static void __blkdev_issue_zero_pages(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, unsigned int flags) { while (nr_sects) { unsigned int nr_vecs = __blkdev_sectors_to_bio_pages(nr_sects); struct bio *bio; bio = bio_alloc(bdev, nr_vecs, REQ_OP_WRITE, gfp_mask); bio->bi_iter.bi_sector = sector; if ((flags & BLKDEV_ZERO_KILLABLE) && fatal_signal_pending(current)) break; do { unsigned int len, added; len = min_t(sector_t, PAGE_SIZE, nr_sects << SECTOR_SHIFT); added = bio_add_page(bio, ZERO_PAGE(0), len, 0); if (added < len) break; nr_sects -= added >> SECTOR_SHIFT; sector += added >> SECTOR_SHIFT; } while (nr_sects); *biop = bio_chain_and_submit(*biop, bio); cond_resched(); } } static int blkdev_issue_zero_pages(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp, unsigned flags) { struct bio *bio = NULL; struct blk_plug plug; int ret = 0; if (flags & BLKDEV_ZERO_NOFALLBACK) return -EOPNOTSUPP; blk_start_plug(&plug); __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp, &bio, flags); if (bio) { if ((flags & BLKDEV_ZERO_KILLABLE) && fatal_signal_pending(current)) { bio_await_chain(bio); blk_finish_plug(&plug); return -EINTR; } ret = submit_bio_wait(bio); bio_put(bio); } blk_finish_plug(&plug); return ret; } /** * __blkdev_issue_zeroout - generate number of zero filed write bios * @bdev: blockdev to issue * @sector: start sector * @nr_sects: number of sectors to write * @gfp_mask: memory allocation flags (for bio_alloc) * @biop: pointer to anchor bio * @flags: controls detailed behavior * * Description: * Zero-fill a block range, either using hardware offload or by explicitly * writing zeroes to the device. * * If a device is using logical block provisioning, the underlying space will * not be released if %flags contains BLKDEV_ZERO_NOUNMAP. * * If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return * -EOPNOTSUPP if no explicit hardware offload for zeroing is provided. */ int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, unsigned flags) { sector_t limit = bio_write_zeroes_limit(bdev); if (bdev_read_only(bdev)) return -EPERM; if (limit) { __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask, biop, flags, limit); } else { if (flags & BLKDEV_ZERO_NOFALLBACK) return -EOPNOTSUPP; __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask, biop, flags); } return 0; } EXPORT_SYMBOL(__blkdev_issue_zeroout); /** * blkdev_issue_zeroout - zero-fill a block range * @bdev: blockdev to write * @sector: start sector * @nr_sects: number of sectors to write * @gfp_mask: memory allocation flags (for bio_alloc) * @flags: controls detailed behavior * * Description: * Zero-fill a block range, either using hardware offload or by explicitly * writing zeroes to the device. See __blkdev_issue_zeroout() for the * valid values for %flags. */ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, unsigned flags) { int ret; if ((sector | nr_sects) & ((bdev_logical_block_size(bdev) >> 9) - 1)) return -EINVAL; if (bdev_read_only(bdev)) return -EPERM; if (bdev_write_zeroes_sectors(bdev)) { ret = blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask, flags); if (ret != -EOPNOTSUPP) return ret; } return blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask, flags); } EXPORT_SYMBOL(blkdev_issue_zeroout); int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp) { sector_t bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; unsigned int max_sectors = bdev_max_secure_erase_sectors(bdev); struct bio *bio = NULL; struct blk_plug plug; int ret = 0; /* make sure that "len << SECTOR_SHIFT" doesn't overflow */ if (max_sectors > UINT_MAX >> SECTOR_SHIFT) max_sectors = UINT_MAX >> SECTOR_SHIFT; max_sectors &= ~bs_mask; if (max_sectors == 0) return -EOPNOTSUPP; if ((sector | nr_sects) & bs_mask) return -EINVAL; if (bdev_read_only(bdev)) return -EPERM; blk_start_plug(&plug); while (nr_sects) { unsigned int len = min_t(sector_t, nr_sects, max_sectors); bio = blk_next_bio(bio, bdev, 0, REQ_OP_SECURE_ERASE, gfp); bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_size = len << SECTOR_SHIFT; sector += len; nr_sects -= len; cond_resched(); } if (bio) { ret = submit_bio_wait(bio); bio_put(bio); } blk_finish_plug(&plug); return ret; } EXPORT_SYMBOL(blkdev_issue_secure_erase);
336 337 2 346 346 338 341 6 2 2 4 4 4 4 2 2 2 4 2 198 174 69 4 5 65 10 1 1 8 1 1 1 1 1 1 1 191 29 14 149 187 64 47 48 48 46 1 46 45 46 1 45 4 2 1 4 4 4 36 13 52 9 51 9 1 1 1 1 1 1 1 41 2 17 36 36 35 2 14 2 44 44 30 183 127 152 1 357 356 6 341 146 184 33 125 25 13 13 13 13 12 1 11 11 13 25 2 13 13 7 1 10 1 7 7 9 2 4 4 4 3 3 4 3 1 1 1 8 5 3 3 1 1 1 1 13 11 5 3 13 2 1 1 1 19 4 12 3 13 2 4 2 15 3 1 15 2 15 1 13 3 11 14 1 1 3 10 15 4 9 12 14 21 2 19 52 52 13 52 51 51 42 21 1 1 66 67 18 9 43 8 1 4 62 4 16 7 7 2 7 4 3 1 1 9 5 2 2 3 1 2 5 3 2 1 5 3 1 1 16 1 2 1 3 1 2 1 1 1 1 2 3 48 48 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 // SPDX-License-Identifier: GPL-2.0-only /* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu> * Patrick Schaaf <bof@bof.de> * Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@netfilter.org> */ /* Kernel module for IP set management */ #include <linux/init.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/ip.h> #include <linux/skbuff.h> #include <linux/spinlock.h> #include <linux/rculist.h> #include <net/netlink.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include <linux/netfilter.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/nfnetlink.h> #include <linux/netfilter/ipset/ip_set.h> static LIST_HEAD(ip_set_type_list); /* all registered set types */ static DEFINE_MUTEX(ip_set_type_mutex); /* protects ip_set_type_list */ static DEFINE_RWLOCK(ip_set_ref_lock); /* protects the set refs */ struct ip_set_net { struct ip_set * __rcu *ip_set_list; /* all individual sets */ ip_set_id_t ip_set_max; /* max number of sets */ bool is_deleted; /* deleted by ip_set_net_exit */ bool is_destroyed; /* all sets are destroyed */ }; static unsigned int ip_set_net_id __read_mostly; static struct ip_set_net *ip_set_pernet(struct net *net) { return net_generic(net, ip_set_net_id); } #define IP_SET_INC 64 #define STRNCMP(a, b) (strncmp(a, b, IPSET_MAXNAMELEN) == 0) static unsigned int max_sets; module_param(max_sets, int, 0600); MODULE_PARM_DESC(max_sets, "maximal number of sets"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>"); MODULE_DESCRIPTION("core IP set support"); MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_IPSET); /* When the nfnl mutex or ip_set_ref_lock is held: */ #define ip_set_dereference(inst) \ rcu_dereference_protected((inst)->ip_set_list, \ lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET) || \ lockdep_is_held(&ip_set_ref_lock) || \ (inst)->is_deleted) #define ip_set(inst, id) \ ip_set_dereference(inst)[id] #define ip_set_ref_netlink(inst,id) \ rcu_dereference_raw((inst)->ip_set_list)[id] #define ip_set_dereference_nfnl(p) \ rcu_dereference_check(p, lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET)) /* The set types are implemented in modules and registered set types * can be found in ip_set_type_list. Adding/deleting types is * serialized by ip_set_type_mutex. */ static void ip_set_type_lock(void) { mutex_lock(&ip_set_type_mutex); } static void ip_set_type_unlock(void) { mutex_unlock(&ip_set_type_mutex); } /* Register and deregister settype */ static struct ip_set_type * find_set_type(const char *name, u8 family, u8 revision) { struct ip_set_type *type; list_for_each_entry_rcu(type, &ip_set_type_list, list, lockdep_is_held(&ip_set_type_mutex)) if (STRNCMP(type->name, name) && (type->family == family || type->family == NFPROTO_UNSPEC) && revision >= type->revision_min && revision <= type->revision_max) return type; return NULL; } /* Unlock, try to load a set type module and lock again */ static bool load_settype(const char *name) { if (!try_module_get(THIS_MODULE)) return false; nfnl_unlock(NFNL_SUBSYS_IPSET); pr_debug("try to load ip_set_%s\n", name); if (request_module("ip_set_%s", name) < 0) { pr_warn("Can't find ip_set type %s\n", name); nfnl_lock(NFNL_SUBSYS_IPSET); module_put(THIS_MODULE); return false; } nfnl_lock(NFNL_SUBSYS_IPSET); module_put(THIS_MODULE); return true; } /* Find a set type and reference it */ #define find_set_type_get(name, family, revision, found) \ __find_set_type_get(name, family, revision, found, false) static int __find_set_type_get(const char *name, u8 family, u8 revision, struct ip_set_type **found, bool retry) { struct ip_set_type *type; int err; if (retry && !load_settype(name)) return -IPSET_ERR_FIND_TYPE; rcu_read_lock(); *found = find_set_type(name, family, revision); if (*found) { err = !try_module_get((*found)->me) ? -EFAULT : 0; goto unlock; } /* Make sure the type is already loaded * but we don't support the revision */ list_for_each_entry_rcu(type, &ip_set_type_list, list) if (STRNCMP(type->name, name)) { err = -IPSET_ERR_FIND_TYPE; goto unlock; } rcu_read_unlock(); return retry ? -IPSET_ERR_FIND_TYPE : __find_set_type_get(name, family, revision, found, true); unlock: rcu_read_unlock(); return err; } /* Find a given set type by name and family. * If we succeeded, the supported minimal and maximum revisions are * filled out. */ #define find_set_type_minmax(name, family, min, max) \ __find_set_type_minmax(name, family, min, max, false) static int __find_set_type_minmax(const char *name, u8 family, u8 *min, u8 *max, bool retry) { struct ip_set_type *type; bool found = false; if (retry && !load_settype(name)) return -IPSET_ERR_FIND_TYPE; *min = 255; *max = 0; rcu_read_lock(); list_for_each_entry_rcu(type, &ip_set_type_list, list) if (STRNCMP(type->name, name) && (type->family == family || type->family == NFPROTO_UNSPEC)) { found = true; if (type->revision_min < *min) *min = type->revision_min; if (type->revision_max > *max) *max = type->revision_max; } rcu_read_unlock(); if (found) return 0; return retry ? -IPSET_ERR_FIND_TYPE : __find_set_type_minmax(name, family, min, max, true); } #define family_name(f) ((f) == NFPROTO_IPV4 ? "inet" : \ (f) == NFPROTO_IPV6 ? "inet6" : "any") /* Register a set type structure. The type is identified by * the unique triple of name, family and revision. */ int ip_set_type_register(struct ip_set_type *type) { int ret = 0; if (type->protocol != IPSET_PROTOCOL) { pr_warn("ip_set type %s, family %s, revision %u:%u uses wrong protocol version %u (want %u)\n", type->name, family_name(type->family), type->revision_min, type->revision_max, type->protocol, IPSET_PROTOCOL); return -EINVAL; } ip_set_type_lock(); if (find_set_type(type->name, type->family, type->revision_min)) { /* Duplicate! */ pr_warn("ip_set type %s, family %s with revision min %u already registered!\n", type->name, family_name(type->family), type->revision_min); ip_set_type_unlock(); return -EINVAL; } list_add_rcu(&type->list, &ip_set_type_list); pr_debug("type %s, family %s, revision %u:%u registered.\n", type->name, family_name(type->family), type->revision_min, type->revision_max); ip_set_type_unlock(); return ret; } EXPORT_SYMBOL_GPL(ip_set_type_register); /* Unregister a set type. There's a small race with ip_set_create */ void ip_set_type_unregister(struct ip_set_type *type) { ip_set_type_lock(); if (!find_set_type(type->name, type->family, type->revision_min)) { pr_warn("ip_set type %s, family %s with revision min %u not registered\n", type->name, family_name(type->family), type->revision_min); ip_set_type_unlock(); return; } list_del_rcu(&type->list); pr_debug("type %s, family %s with revision min %u unregistered.\n", type->name, family_name(type->family), type->revision_min); ip_set_type_unlock(); synchronize_rcu(); } EXPORT_SYMBOL_GPL(ip_set_type_unregister); /* Utility functions */ void * ip_set_alloc(size_t size) { return kvzalloc(size, GFP_KERNEL_ACCOUNT); } EXPORT_SYMBOL_GPL(ip_set_alloc); void ip_set_free(void *members) { pr_debug("%p: free with %s\n", members, is_vmalloc_addr(members) ? "vfree" : "kfree"); kvfree(members); } EXPORT_SYMBOL_GPL(ip_set_free); static bool flag_nested(const struct nlattr *nla) { return nla->nla_type & NLA_F_NESTED; } static const struct nla_policy ipaddr_policy[IPSET_ATTR_IPADDR_MAX + 1] = { [IPSET_ATTR_IPADDR_IPV4] = { .type = NLA_U32 }, [IPSET_ATTR_IPADDR_IPV6] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)), }; int ip_set_get_ipaddr4(struct nlattr *nla, __be32 *ipaddr) { struct nlattr *tb[IPSET_ATTR_IPADDR_MAX + 1]; if (unlikely(!flag_nested(nla))) return -IPSET_ERR_PROTOCOL; if (nla_parse_nested(tb, IPSET_ATTR_IPADDR_MAX, nla, ipaddr_policy, NULL)) return -IPSET_ERR_PROTOCOL; if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_IPADDR_IPV4))) return -IPSET_ERR_PROTOCOL; *ipaddr = nla_get_be32(tb[IPSET_ATTR_IPADDR_IPV4]); return 0; } EXPORT_SYMBOL_GPL(ip_set_get_ipaddr4); int ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr) { struct nlattr *tb[IPSET_ATTR_IPADDR_MAX + 1]; if (unlikely(!flag_nested(nla))) return -IPSET_ERR_PROTOCOL; if (nla_parse_nested(tb, IPSET_ATTR_IPADDR_MAX, nla, ipaddr_policy, NULL)) return -IPSET_ERR_PROTOCOL; if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_IPADDR_IPV6))) return -IPSET_ERR_PROTOCOL; memcpy(ipaddr, nla_data(tb[IPSET_ATTR_IPADDR_IPV6]), sizeof(struct in6_addr)); return 0; } EXPORT_SYMBOL_GPL(ip_set_get_ipaddr6); static u32 ip_set_timeout_get(const unsigned long *timeout) { u32 t; if (*timeout == IPSET_ELEM_PERMANENT) return 0; t = jiffies_to_msecs(*timeout - jiffies) / MSEC_PER_SEC; /* Zero value in userspace means no timeout */ return t == 0 ? 1 : t; } static char * ip_set_comment_uget(struct nlattr *tb) { return nla_data(tb); } /* Called from uadd only, protected by the set spinlock. * The kadt functions don't use the comment extensions in any way. */ void ip_set_init_comment(struct ip_set *set, struct ip_set_comment *comment, const struct ip_set_ext *ext) { struct ip_set_comment_rcu *c = rcu_dereference_protected(comment->c, 1); size_t len = ext->comment ? strlen(ext->comment) : 0; if (unlikely(c)) { set->ext_size -= sizeof(*c) + strlen(c->str) + 1; kfree_rcu(c, rcu); rcu_assign_pointer(comment->c, NULL); } if (!len) return; if (unlikely(len > IPSET_MAX_COMMENT_SIZE)) len = IPSET_MAX_COMMENT_SIZE; c = kmalloc(sizeof(*c) + len + 1, GFP_ATOMIC); if (unlikely(!c)) return; strscpy(c->str, ext->comment, len + 1); set->ext_size += sizeof(*c) + strlen(c->str) + 1; rcu_assign_pointer(comment->c, c); } EXPORT_SYMBOL_GPL(ip_set_init_comment); /* Used only when dumping a set, protected by rcu_read_lock() */ static int ip_set_put_comment(struct sk_buff *skb, const struct ip_set_comment *comment) { struct ip_set_comment_rcu *c = rcu_dereference(comment->c); if (!c) return 0; return nla_put_string(skb, IPSET_ATTR_COMMENT, c->str); } /* Called from uadd/udel, flush or the garbage collectors protected * by the set spinlock. * Called when the set is destroyed and when there can't be any user * of the set data anymore. */ static void ip_set_comment_free(struct ip_set *set, void *ptr) { struct ip_set_comment *comment = ptr; struct ip_set_comment_rcu *c; c = rcu_dereference_protected(comment->c, 1); if (unlikely(!c)) return; set->ext_size -= sizeof(*c) + strlen(c->str) + 1; kfree_rcu(c, rcu); rcu_assign_pointer(comment->c, NULL); } typedef void (*destroyer)(struct ip_set *, void *); /* ipset data extension types, in size order */ const struct ip_set_ext_type ip_set_extensions[] = { [IPSET_EXT_ID_COUNTER] = { .type = IPSET_EXT_COUNTER, .flag = IPSET_FLAG_WITH_COUNTERS, .len = sizeof(struct ip_set_counter), .align = __alignof__(struct ip_set_counter), }, [IPSET_EXT_ID_TIMEOUT] = { .type = IPSET_EXT_TIMEOUT, .len = sizeof(unsigned long), .align = __alignof__(unsigned long), }, [IPSET_EXT_ID_SKBINFO] = { .type = IPSET_EXT_SKBINFO, .flag = IPSET_FLAG_WITH_SKBINFO, .len = sizeof(struct ip_set_skbinfo), .align = __alignof__(struct ip_set_skbinfo), }, [IPSET_EXT_ID_COMMENT] = { .type = IPSET_EXT_COMMENT | IPSET_EXT_DESTROY, .flag = IPSET_FLAG_WITH_COMMENT, .len = sizeof(struct ip_set_comment), .align = __alignof__(struct ip_set_comment), .destroy = ip_set_comment_free, }, }; EXPORT_SYMBOL_GPL(ip_set_extensions); static bool add_extension(enum ip_set_ext_id id, u32 flags, struct nlattr *tb[]) { return ip_set_extensions[id].flag ? (flags & ip_set_extensions[id].flag) : !!tb[IPSET_ATTR_TIMEOUT]; } size_t ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len, size_t align) { enum ip_set_ext_id id; u32 cadt_flags = 0; if (tb[IPSET_ATTR_CADT_FLAGS]) cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); if (cadt_flags & IPSET_FLAG_WITH_FORCEADD) set->flags |= IPSET_CREATE_FLAG_FORCEADD; if (!align) align = 1; for (id = 0; id < IPSET_EXT_ID_MAX; id++) { if (!add_extension(id, cadt_flags, tb)) continue; if (align < ip_set_extensions[id].align) align = ip_set_extensions[id].align; len = ALIGN(len, ip_set_extensions[id].align); set->offset[id] = len; set->extensions |= ip_set_extensions[id].type; len += ip_set_extensions[id].len; } return ALIGN(len, align); } EXPORT_SYMBOL_GPL(ip_set_elem_len); int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[], struct ip_set_ext *ext) { u64 fullmark; if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE))) return -IPSET_ERR_PROTOCOL; if (tb[IPSET_ATTR_TIMEOUT]) { if (!SET_WITH_TIMEOUT(set)) return -IPSET_ERR_TIMEOUT; ext->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); } if (tb[IPSET_ATTR_BYTES] || tb[IPSET_ATTR_PACKETS]) { if (!SET_WITH_COUNTER(set)) return -IPSET_ERR_COUNTER; if (tb[IPSET_ATTR_BYTES]) ext->bytes = be64_to_cpu(nla_get_be64( tb[IPSET_ATTR_BYTES])); if (tb[IPSET_ATTR_PACKETS]) ext->packets = be64_to_cpu(nla_get_be64( tb[IPSET_ATTR_PACKETS])); } if (tb[IPSET_ATTR_COMMENT]) { if (!SET_WITH_COMMENT(set)) return -IPSET_ERR_COMMENT; ext->comment = ip_set_comment_uget(tb[IPSET_ATTR_COMMENT]); } if (tb[IPSET_ATTR_SKBMARK]) { if (!SET_WITH_SKBINFO(set)) return -IPSET_ERR_SKBINFO; fullmark = be64_to_cpu(nla_get_be64(tb[IPSET_ATTR_SKBMARK])); ext->skbinfo.skbmark = fullmark >> 32; ext->skbinfo.skbmarkmask = fullmark & 0xffffffff; } if (tb[IPSET_ATTR_SKBPRIO]) { if (!SET_WITH_SKBINFO(set)) return -IPSET_ERR_SKBINFO; ext->skbinfo.skbprio = be32_to_cpu(nla_get_be32(tb[IPSET_ATTR_SKBPRIO])); } if (tb[IPSET_ATTR_SKBQUEUE]) { if (!SET_WITH_SKBINFO(set)) return -IPSET_ERR_SKBINFO; ext->skbinfo.skbqueue = be16_to_cpu(nla_get_be16(tb[IPSET_ATTR_SKBQUEUE])); } return 0; } EXPORT_SYMBOL_GPL(ip_set_get_extensions); static u64 ip_set_get_bytes(const struct ip_set_counter *counter) { return (u64)atomic64_read(&(counter)->bytes); } static u64 ip_set_get_packets(const struct ip_set_counter *counter) { return (u64)atomic64_read(&(counter)->packets); } static bool ip_set_put_counter(struct sk_buff *skb, const struct ip_set_counter *counter) { return nla_put_net64(skb, IPSET_ATTR_BYTES, cpu_to_be64(ip_set_get_bytes(counter)), IPSET_ATTR_PAD) || nla_put_net64(skb, IPSET_ATTR_PACKETS, cpu_to_be64(ip_set_get_packets(counter)), IPSET_ATTR_PAD); } static bool ip_set_put_skbinfo(struct sk_buff *skb, const struct ip_set_skbinfo *skbinfo) { /* Send nonzero parameters only */ return ((skbinfo->skbmark || skbinfo->skbmarkmask) && nla_put_net64(skb, IPSET_ATTR_SKBMARK, cpu_to_be64((u64)skbinfo->skbmark << 32 | skbinfo->skbmarkmask), IPSET_ATTR_PAD)) || (skbinfo->skbprio && nla_put_net32(skb, IPSET_ATTR_SKBPRIO, cpu_to_be32(skbinfo->skbprio))) || (skbinfo->skbqueue && nla_put_net16(skb, IPSET_ATTR_SKBQUEUE, cpu_to_be16(skbinfo->skbqueue))); } int ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set, const void *e, bool active) { if (SET_WITH_TIMEOUT(set)) { unsigned long *timeout = ext_timeout(e, set); if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(active ? ip_set_timeout_get(timeout) : *timeout))) return -EMSGSIZE; } if (SET_WITH_COUNTER(set) && ip_set_put_counter(skb, ext_counter(e, set))) return -EMSGSIZE; if (SET_WITH_COMMENT(set) && ip_set_put_comment(skb, ext_comment(e, set))) return -EMSGSIZE; if (SET_WITH_SKBINFO(set) && ip_set_put_skbinfo(skb, ext_skbinfo(e, set))) return -EMSGSIZE; return 0; } EXPORT_SYMBOL_GPL(ip_set_put_extensions); static bool ip_set_match_counter(u64 counter, u64 match, u8 op) { switch (op) { case IPSET_COUNTER_NONE: return true; case IPSET_COUNTER_EQ: return counter == match; case IPSET_COUNTER_NE: return counter != match; case IPSET_COUNTER_LT: return counter < match; case IPSET_COUNTER_GT: return counter > match; } return false; } static void ip_set_add_bytes(u64 bytes, struct ip_set_counter *counter) { atomic64_add((long long)bytes, &(counter)->bytes); } static void ip_set_add_packets(u64 packets, struct ip_set_counter *counter) { atomic64_add((long long)packets, &(counter)->packets); } static void ip_set_update_counter(struct ip_set_counter *counter, const struct ip_set_ext *ext, u32 flags) { if (ext->packets != ULLONG_MAX && !(flags & IPSET_FLAG_SKIP_COUNTER_UPDATE)) { ip_set_add_bytes(ext->bytes, counter); ip_set_add_packets(ext->packets, counter); } } static void ip_set_get_skbinfo(struct ip_set_skbinfo *skbinfo, const struct ip_set_ext *ext, struct ip_set_ext *mext, u32 flags) { mext->skbinfo = *skbinfo; } bool ip_set_match_extensions(struct ip_set *set, const struct ip_set_ext *ext, struct ip_set_ext *mext, u32 flags, void *data) { if (SET_WITH_TIMEOUT(set) && ip_set_timeout_expired(ext_timeout(data, set))) return false; if (SET_WITH_COUNTER(set)) { struct ip_set_counter *counter = ext_counter(data, set); ip_set_update_counter(counter, ext, flags); if (flags & IPSET_FLAG_MATCH_COUNTERS && !(ip_set_match_counter(ip_set_get_packets(counter), mext->packets, mext->packets_op) && ip_set_match_counter(ip_set_get_bytes(counter), mext->bytes, mext->bytes_op))) return false; } if (SET_WITH_SKBINFO(set)) ip_set_get_skbinfo(ext_skbinfo(data, set), ext, mext, flags); return true; } EXPORT_SYMBOL_GPL(ip_set_match_extensions); /* Creating/destroying/renaming/swapping affect the existence and * the properties of a set. All of these can be executed from userspace * only and serialized by the nfnl mutex indirectly from nfnetlink. * * Sets are identified by their index in ip_set_list and the index * is used by the external references (set/SET netfilter modules). * * The set behind an index may change by swapping only, from userspace. */ static void __ip_set_get(struct ip_set *set) { write_lock_bh(&ip_set_ref_lock); set->ref++; write_unlock_bh(&ip_set_ref_lock); } static void __ip_set_put(struct ip_set *set) { write_lock_bh(&ip_set_ref_lock); BUG_ON(set->ref == 0); set->ref--; write_unlock_bh(&ip_set_ref_lock); } /* set->ref can be swapped out by ip_set_swap, netlink events (like dump) need * a separate reference counter */ static void __ip_set_get_netlink(struct ip_set *set) { write_lock_bh(&ip_set_ref_lock); set->ref_netlink++; write_unlock_bh(&ip_set_ref_lock); } static void __ip_set_put_netlink(struct ip_set *set) { write_lock_bh(&ip_set_ref_lock); BUG_ON(set->ref_netlink == 0); set->ref_netlink--; write_unlock_bh(&ip_set_ref_lock); } /* Add, del and test set entries from kernel. * * The set behind the index must exist and must be referenced * so it can't be destroyed (or changed) under our foot. */ static struct ip_set * ip_set_rcu_get(struct net *net, ip_set_id_t index) { struct ip_set_net *inst = ip_set_pernet(net); /* ip_set_list and the set pointer need to be protected */ return ip_set_dereference_nfnl(inst->ip_set_list)[index]; } static inline void ip_set_lock(struct ip_set *set) { if (!set->variant->region_lock) spin_lock_bh(&set->lock); } static inline void ip_set_unlock(struct ip_set *set) { if (!set->variant->region_lock) spin_unlock_bh(&set->lock); } int ip_set_test(ip_set_id_t index, const struct sk_buff *skb, const struct xt_action_param *par, struct ip_set_adt_opt *opt) { struct ip_set *set = ip_set_rcu_get(xt_net(par), index); int ret = 0; BUG_ON(!set); pr_debug("set %s, index %u\n", set->name, index); if (opt->dim < set->type->dimension || !(opt->family == set->family || set->family == NFPROTO_UNSPEC)) return 0; ret = set->variant->kadt(set, skb, par, IPSET_TEST, opt); if (ret == -EAGAIN) { /* Type requests element to be completed */ pr_debug("element must be completed, ADD is triggered\n"); ip_set_lock(set); set->variant->kadt(set, skb, par, IPSET_ADD, opt); ip_set_unlock(set); ret = 1; } else { /* --return-nomatch: invert matched element */ if ((opt->cmdflags & IPSET_FLAG_RETURN_NOMATCH) && (set->type->features & IPSET_TYPE_NOMATCH) && (ret > 0 || ret == -ENOTEMPTY)) ret = -ret; } /* Convert error codes to nomatch */ return (ret < 0 ? 0 : ret); } EXPORT_SYMBOL_GPL(ip_set_test); int ip_set_add(ip_set_id_t index, const struct sk_buff *skb, const struct xt_action_param *par, struct ip_set_adt_opt *opt) { struct ip_set *set = ip_set_rcu_get(xt_net(par), index); int ret; BUG_ON(!set); pr_debug("set %s, index %u\n", set->name, index); if (opt->dim < set->type->dimension || !(opt->family == set->family || set->family == NFPROTO_UNSPEC)) return -IPSET_ERR_TYPE_MISMATCH; ip_set_lock(set); ret = set->variant->kadt(set, skb, par, IPSET_ADD, opt); ip_set_unlock(set); return ret; } EXPORT_SYMBOL_GPL(ip_set_add); int ip_set_del(ip_set_id_t index, const struct sk_buff *skb, const struct xt_action_param *par, struct ip_set_adt_opt *opt) { struct ip_set *set = ip_set_rcu_get(xt_net(par), index); int ret = 0; BUG_ON(!set); pr_debug("set %s, index %u\n", set->name, index); if (opt->dim < set->type->dimension || !(opt->family == set->family || set->family == NFPROTO_UNSPEC)) return -IPSET_ERR_TYPE_MISMATCH; ip_set_lock(set); ret = set->variant->kadt(set, skb, par, IPSET_DEL, opt); ip_set_unlock(set); return ret; } EXPORT_SYMBOL_GPL(ip_set_del); /* Find set by name, reference it once. The reference makes sure the * thing pointed to, does not go away under our feet. * */ ip_set_id_t ip_set_get_byname(struct net *net, const char *name, struct ip_set **set) { ip_set_id_t i, index = IPSET_INVALID_ID; struct ip_set *s; struct ip_set_net *inst = ip_set_pernet(net); rcu_read_lock(); for (i = 0; i < inst->ip_set_max; i++) { s = rcu_dereference(inst->ip_set_list)[i]; if (s && STRNCMP(s->name, name)) { __ip_set_get(s); index = i; *set = s; break; } } rcu_read_unlock(); return index; } EXPORT_SYMBOL_GPL(ip_set_get_byname); /* If the given set pointer points to a valid set, decrement * reference count by 1. The caller shall not assume the index * to be valid, after calling this function. * */ static void __ip_set_put_byindex(struct ip_set_net *inst, ip_set_id_t index) { struct ip_set *set; rcu_read_lock(); set = rcu_dereference(inst->ip_set_list)[index]; if (set) __ip_set_put(set); rcu_read_unlock(); } void ip_set_put_byindex(struct net *net, ip_set_id_t index) { struct ip_set_net *inst = ip_set_pernet(net); __ip_set_put_byindex(inst, index); } EXPORT_SYMBOL_GPL(ip_set_put_byindex); /* Get the name of a set behind a set index. * Set itself is protected by RCU, but its name isn't: to protect against * renaming, grab ip_set_ref_lock as reader (see ip_set_rename()) and copy the * name. */ void ip_set_name_byindex(struct net *net, ip_set_id_t index, char *name) { struct ip_set *set = ip_set_rcu_get(net, index); BUG_ON(!set); read_lock_bh(&ip_set_ref_lock); strscpy_pad(name, set->name, IPSET_MAXNAMELEN); read_unlock_bh(&ip_set_ref_lock); } EXPORT_SYMBOL_GPL(ip_set_name_byindex); /* Routines to call by external subsystems, which do not * call nfnl_lock for us. */ /* Find set by index, reference it once. The reference makes sure the * thing pointed to, does not go away under our feet. * * The nfnl mutex is used in the function. */ ip_set_id_t ip_set_nfnl_get_byindex(struct net *net, ip_set_id_t index) { struct ip_set *set; struct ip_set_net *inst = ip_set_pernet(net); if (index >= inst->ip_set_max) return IPSET_INVALID_ID; nfnl_lock(NFNL_SUBSYS_IPSET); set = ip_set(inst, index); if (set) __ip_set_get(set); else index = IPSET_INVALID_ID; nfnl_unlock(NFNL_SUBSYS_IPSET); return index; } EXPORT_SYMBOL_GPL(ip_set_nfnl_get_byindex); /* If the given set pointer points to a valid set, decrement * reference count by 1. The caller shall not assume the index * to be valid, after calling this function. * * The nfnl mutex is used in the function. */ void ip_set_nfnl_put(struct net *net, ip_set_id_t index) { struct ip_set *set; struct ip_set_net *inst = ip_set_pernet(net); nfnl_lock(NFNL_SUBSYS_IPSET); if (!inst->is_deleted) { /* already deleted from ip_set_net_exit() */ set = ip_set(inst, index); if (set) __ip_set_put(set); } nfnl_unlock(NFNL_SUBSYS_IPSET); } EXPORT_SYMBOL_GPL(ip_set_nfnl_put); /* Communication protocol with userspace over netlink. * * The commands are serialized by the nfnl mutex. */ static inline u8 protocol(const struct nlattr * const tb[]) { return nla_get_u8(tb[IPSET_ATTR_PROTOCOL]); } static inline bool protocol_failed(const struct nlattr * const tb[]) { return !tb[IPSET_ATTR_PROTOCOL] || protocol(tb) != IPSET_PROTOCOL; } static inline bool protocol_min_failed(const struct nlattr * const tb[]) { return !tb[IPSET_ATTR_PROTOCOL] || protocol(tb) < IPSET_PROTOCOL_MIN; } static inline u32 flag_exist(const struct nlmsghdr *nlh) { return nlh->nlmsg_flags & NLM_F_EXCL ? 0 : IPSET_FLAG_EXIST; } static struct nlmsghdr * start_msg(struct sk_buff *skb, u32 portid, u32 seq, unsigned int flags, enum ipset_cmd cmd) { return nfnl_msg_put(skb, portid, seq, nfnl_msg_type(NFNL_SUBSYS_IPSET, cmd), flags, NFPROTO_IPV4, NFNETLINK_V0, 0); } /* Create a set */ static const struct nla_policy ip_set_create_policy[IPSET_ATTR_CMD_MAX + 1] = { [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 }, [IPSET_ATTR_SETNAME] = { .type = NLA_NUL_STRING, .len = IPSET_MAXNAMELEN - 1 }, [IPSET_ATTR_TYPENAME] = { .type = NLA_NUL_STRING, .len = IPSET_MAXNAMELEN - 1}, [IPSET_ATTR_REVISION] = { .type = NLA_U8 }, [IPSET_ATTR_FAMILY] = { .type = NLA_U8 }, [IPSET_ATTR_DATA] = { .type = NLA_NESTED }, }; static struct ip_set * find_set_and_id(struct ip_set_net *inst, const char *name, ip_set_id_t *id) { struct ip_set *set = NULL; ip_set_id_t i; *id = IPSET_INVALID_ID; for (i = 0; i < inst->ip_set_max; i++) { set = ip_set(inst, i); if (set && STRNCMP(set->name, name)) { *id = i; break; } } return (*id == IPSET_INVALID_ID ? NULL : set); } static inline struct ip_set * find_set(struct ip_set_net *inst, const char *name) { ip_set_id_t id; return find_set_and_id(inst, name, &id); } static int find_free_id(struct ip_set_net *inst, const char *name, ip_set_id_t *index, struct ip_set **set) { struct ip_set *s; ip_set_id_t i; *index = IPSET_INVALID_ID; for (i = 0; i < inst->ip_set_max; i++) { s = ip_set(inst, i); if (!s) { if (*index == IPSET_INVALID_ID) *index = i; } else if (STRNCMP(name, s->name)) { /* Name clash */ *set = s; return -EEXIST; } } if (*index == IPSET_INVALID_ID) /* No free slot remained */ return -IPSET_ERR_MAX_SETS; return 0; } static int ip_set_none(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const attr[]) { return -EOPNOTSUPP; } static int ip_set_create(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const attr[]) { struct ip_set_net *inst = ip_set_pernet(info->net); struct ip_set *set, *clash = NULL; ip_set_id_t index = IPSET_INVALID_ID; struct nlattr *tb[IPSET_ATTR_CREATE_MAX + 1] = {}; const char *name, *typename; u8 family, revision; u32 flags = flag_exist(info->nlh); int ret = 0; if (unlikely(protocol_min_failed(attr) || !attr[IPSET_ATTR_SETNAME] || !attr[IPSET_ATTR_TYPENAME] || !attr[IPSET_ATTR_REVISION] || !attr[IPSET_ATTR_FAMILY] || (attr[IPSET_ATTR_DATA] && !flag_nested(attr[IPSET_ATTR_DATA])))) return -IPSET_ERR_PROTOCOL; name = nla_data(attr[IPSET_ATTR_SETNAME]); typename = nla_data(attr[IPSET_ATTR_TYPENAME]); family = nla_get_u8(attr[IPSET_ATTR_FAMILY]); revision = nla_get_u8(attr[IPSET_ATTR_REVISION]); pr_debug("setname: %s, typename: %s, family: %s, revision: %u\n", name, typename, family_name(family), revision); /* First, and without any locks, allocate and initialize * a normal base set structure. */ set = kzalloc(sizeof(*set), GFP_KERNEL); if (!set) return -ENOMEM; spin_lock_init(&set->lock); strscpy(set->name, name, IPSET_MAXNAMELEN); set->family = family; set->revision = revision; /* Next, check that we know the type, and take * a reference on the type, to make sure it stays available * while constructing our new set. * * After referencing the type, we try to create the type * specific part of the set without holding any locks. */ ret = find_set_type_get(typename, family, revision, &set->type); if (ret) goto out; /* Without holding any locks, create private part. */ if (attr[IPSET_ATTR_DATA] && nla_parse_nested(tb, IPSET_ATTR_CREATE_MAX, attr[IPSET_ATTR_DATA], set->type->create_policy, NULL)) { ret = -IPSET_ERR_PROTOCOL; goto put_out; } /* Set create flags depending on the type revision */ set->flags |= set->type->create_flags[revision]; ret = set->type->create(info->net, set, tb, flags); if (ret != 0) goto put_out; /* BTW, ret==0 here. */ /* Here, we have a valid, constructed set and we are protected * by the nfnl mutex. Find the first free index in ip_set_list * and check clashing. */ ret = find_free_id(inst, set->name, &index, &clash); if (ret == -EEXIST) { /* If this is the same set and requested, ignore error */ if ((flags & IPSET_FLAG_EXIST) && STRNCMP(set->type->name, clash->type->name) && set->type->family == clash->type->family && set->type->revision_min == clash->type->revision_min && set->type->revision_max == clash->type->revision_max && set->variant->same_set(set, clash)) ret = 0; goto cleanup; } else if (ret == -IPSET_ERR_MAX_SETS) { struct ip_set **list, **tmp; ip_set_id_t i = inst->ip_set_max + IP_SET_INC; if (i < inst->ip_set_max || i == IPSET_INVALID_ID) /* Wraparound */ goto cleanup; list = kvcalloc(i, sizeof(struct ip_set *), GFP_KERNEL); if (!list) goto cleanup; /* nfnl mutex is held, both lists are valid */ tmp = ip_set_dereference(inst); memcpy(list, tmp, sizeof(struct ip_set *) * inst->ip_set_max); rcu_assign_pointer(inst->ip_set_list, list); /* Make sure all current packets have passed through */ synchronize_net(); /* Use new list */ index = inst->ip_set_max; inst->ip_set_max = i; kvfree(tmp); ret = 0; } else if (ret) { goto cleanup; } /* Finally! Add our shiny new set to the list, and be done. */ pr_debug("create: '%s' created with index %u!\n", set->name, index); ip_set(inst, index) = set; return ret; cleanup: set->variant->cancel_gc(set); set->variant->destroy(set); put_out: module_put(set->type->me); out: kfree(set); return ret; } /* Destroy sets */ static const struct nla_policy ip_set_setname_policy[IPSET_ATTR_CMD_MAX + 1] = { [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 }, [IPSET_ATTR_SETNAME] = { .type = NLA_NUL_STRING, .len = IPSET_MAXNAMELEN - 1 }, }; /* In order to return quickly when destroying a single set, it is split * into two stages: * - Cancel garbage collector * - Destroy the set itself via call_rcu() */ static void ip_set_destroy_set_rcu(struct rcu_head *head) { struct ip_set *set = container_of(head, struct ip_set, rcu); set->variant->destroy(set); module_put(set->type->me); kfree(set); } static void _destroy_all_sets(struct ip_set_net *inst) { struct ip_set *set; ip_set_id_t i; bool need_wait = false; /* First cancel gc's: set:list sets are flushed as well */ for (i = 0; i < inst->ip_set_max; i++) { set = ip_set(inst, i); if (set) { set->variant->cancel_gc(set); if (set->type->features & IPSET_TYPE_NAME) need_wait = true; } } /* Must wait for flush to be really finished */ if (need_wait) rcu_barrier(); for (i = 0; i < inst->ip_set_max; i++) { set = ip_set(inst, i); if (set) { ip_set(inst, i) = NULL; set->variant->destroy(set); module_put(set->type->me); kfree(set); } } } static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const attr[]) { struct ip_set_net *inst = ip_set_pernet(info->net); struct ip_set *s; ip_set_id_t i; int ret = 0; if (unlikely(protocol_min_failed(attr))) return -IPSET_ERR_PROTOCOL; /* Commands are serialized and references are * protected by the ip_set_ref_lock. * External systems (i.e. xt_set) must call * ip_set_nfnl_get_* functions, that way we * can safely check references here. * * list:set timer can only decrement the reference * counter, so if it's already zero, we can proceed * without holding the lock. */ if (!attr[IPSET_ATTR_SETNAME]) { read_lock_bh(&ip_set_ref_lock); for (i = 0; i < inst->ip_set_max; i++) { s = ip_set(inst, i); if (s && (s->ref || s->ref_netlink)) { ret = -IPSET_ERR_BUSY; goto out; } } inst->is_destroyed = true; read_unlock_bh(&ip_set_ref_lock); _destroy_all_sets(inst); /* Modified by ip_set_destroy() only, which is serialized */ inst->is_destroyed = false; } else { u32 flags = flag_exist(info->nlh); u16 features = 0; read_lock_bh(&ip_set_ref_lock); s = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME]), &i); if (!s) { if (!(flags & IPSET_FLAG_EXIST)) ret = -ENOENT; goto out; } else if (s->ref || s->ref_netlink) { ret = -IPSET_ERR_BUSY; goto out; } features = s->type->features; ip_set(inst, i) = NULL; read_unlock_bh(&ip_set_ref_lock); /* Must cancel garbage collectors */ s->variant->cancel_gc(s); if (features & IPSET_TYPE_NAME) { /* Must wait for flush to be really finished */ rcu_barrier(); } call_rcu(&s->rcu, ip_set_destroy_set_rcu); } return 0; out: read_unlock_bh(&ip_set_ref_lock); return ret; } /* Flush sets */ static void ip_set_flush_set(struct ip_set *set) { pr_debug("set: %s\n", set->name); ip_set_lock(set); set->variant->flush(set); ip_set_unlock(set); } static int ip_set_flush(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const attr[]) { struct ip_set_net *inst = ip_set_pernet(info->net); struct ip_set *s; ip_set_id_t i; if (unlikely(protocol_min_failed(attr))) return -IPSET_ERR_PROTOCOL; if (!attr[IPSET_ATTR_SETNAME]) { for (i = 0; i < inst->ip_set_max; i++) { s = ip_set(inst, i); if (s) ip_set_flush_set(s); } } else { s = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME])); if (!s) return -ENOENT; ip_set_flush_set(s); } return 0; } /* Rename a set */ static const struct nla_policy ip_set_setname2_policy[IPSET_ATTR_CMD_MAX + 1] = { [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 }, [IPSET_ATTR_SETNAME] = { .type = NLA_NUL_STRING, .len = IPSET_MAXNAMELEN - 1 }, [IPSET_ATTR_SETNAME2] = { .type = NLA_NUL_STRING, .len = IPSET_MAXNAMELEN - 1 }, }; static int ip_set_rename(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const attr[]) { struct ip_set_net *inst = ip_set_pernet(info->net); struct ip_set *set, *s; const char *name2; ip_set_id_t i; int ret = 0; if (unlikely(protocol_min_failed(attr) || !attr[IPSET_ATTR_SETNAME] || !attr[IPSET_ATTR_SETNAME2])) return -IPSET_ERR_PROTOCOL; set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME])); if (!set) return -ENOENT; write_lock_bh(&ip_set_ref_lock); if (set->ref != 0 || set->ref_netlink != 0) { ret = -IPSET_ERR_REFERENCED; goto out; } name2 = nla_data(attr[IPSET_ATTR_SETNAME2]); for (i = 0; i < inst->ip_set_max; i++) { s = ip_set(inst, i); if (s && STRNCMP(s->name, name2)) { ret = -IPSET_ERR_EXIST_SETNAME2; goto out; } } strscpy_pad(set->name, name2, IPSET_MAXNAMELEN); out: write_unlock_bh(&ip_set_ref_lock); return ret; } /* Swap two sets so that name/index points to the other. * References and set names are also swapped. * * The commands are serialized by the nfnl mutex and references are * protected by the ip_set_ref_lock. The kernel interfaces * do not hold the mutex but the pointer settings are atomic * so the ip_set_list always contains valid pointers to the sets. */ static int ip_set_swap(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const attr[]) { struct ip_set_net *inst = ip_set_pernet(info->net); struct ip_set *from, *to; ip_set_id_t from_id, to_id; char from_name[IPSET_MAXNAMELEN]; if (unlikely(protocol_min_failed(attr) || !attr[IPSET_ATTR_SETNAME] || !attr[IPSET_ATTR_SETNAME2])) return -IPSET_ERR_PROTOCOL; from = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME]), &from_id); if (!from) return -ENOENT; to = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME2]), &to_id); if (!to) return -IPSET_ERR_EXIST_SETNAME2; /* Features must not change. * Not an artifical restriction anymore, as we must prevent * possible loops created by swapping in setlist type of sets. */ if (!(from->type->features == to->type->features && from->family == to->family)) return -IPSET_ERR_TYPE_MISMATCH; write_lock_bh(&ip_set_ref_lock); if (from->ref_netlink || to->ref_netlink) { write_unlock_bh(&ip_set_ref_lock); return -EBUSY; } strscpy_pad(from_name, from->name, IPSET_MAXNAMELEN); strscpy_pad(from->name, to->name, IPSET_MAXNAMELEN); strscpy_pad(to->name, from_name, IPSET_MAXNAMELEN); swap(from->ref, to->ref); ip_set(inst, from_id) = to; ip_set(inst, to_id) = from; write_unlock_bh(&ip_set_ref_lock); return 0; } /* List/save set data */ #define DUMP_INIT 0 #define DUMP_ALL 1 #define DUMP_ONE 2 #define DUMP_LAST 3 #define DUMP_TYPE(arg) (((u32)(arg)) & 0x0000FFFF) #define DUMP_FLAGS(arg) (((u32)(arg)) >> 16) int ip_set_put_flags(struct sk_buff *skb, struct ip_set *set) { u32 cadt_flags = 0; if (SET_WITH_TIMEOUT(set)) if (unlikely(nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(set->timeout)))) return -EMSGSIZE; if (SET_WITH_COUNTER(set)) cadt_flags |= IPSET_FLAG_WITH_COUNTERS; if (SET_WITH_COMMENT(set)) cadt_flags |= IPSET_FLAG_WITH_COMMENT; if (SET_WITH_SKBINFO(set)) cadt_flags |= IPSET_FLAG_WITH_SKBINFO; if (SET_WITH_FORCEADD(set)) cadt_flags |= IPSET_FLAG_WITH_FORCEADD; if (!cadt_flags) return 0; return nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(cadt_flags)); } EXPORT_SYMBOL_GPL(ip_set_put_flags); static int ip_set_dump_done(struct netlink_callback *cb) { if (cb->args[IPSET_CB_ARG0]) { struct ip_set_net *inst = (struct ip_set_net *)cb->args[IPSET_CB_NET]; ip_set_id_t index = (ip_set_id_t)cb->args[IPSET_CB_INDEX]; struct ip_set *set = ip_set_ref_netlink(inst, index); if (set->variant->uref) set->variant->uref(set, cb, false); pr_debug("release set %s\n", set->name); __ip_set_put_netlink(set); } return 0; } static inline void dump_attrs(struct nlmsghdr *nlh) { const struct nlattr *attr; int rem; pr_debug("dump nlmsg\n"); nlmsg_for_each_attr(attr, nlh, sizeof(struct nfgenmsg), rem) { pr_debug("type: %u, len %u\n", nla_type(attr), attr->nla_len); } } static const struct nla_policy ip_set_dump_policy[IPSET_ATTR_CMD_MAX + 1] = { [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 }, [IPSET_ATTR_SETNAME] = { .type = NLA_NUL_STRING, .len = IPSET_MAXNAMELEN - 1 }, [IPSET_ATTR_FLAGS] = { .type = NLA_U32 }, }; static int ip_set_dump_start(struct netlink_callback *cb) { struct nlmsghdr *nlh = nlmsg_hdr(cb->skb); int min_len = nlmsg_total_size(sizeof(struct nfgenmsg)); struct nlattr *cda[IPSET_ATTR_CMD_MAX + 1]; struct nlattr *attr = (void *)nlh + min_len; struct sk_buff *skb = cb->skb; struct ip_set_net *inst = ip_set_pernet(sock_net(skb->sk)); u32 dump_type; int ret; ret = nla_parse(cda, IPSET_ATTR_CMD_MAX, attr, nlh->nlmsg_len - min_len, ip_set_dump_policy, NULL); if (ret) goto error; cb->args[IPSET_CB_PROTO] = nla_get_u8(cda[IPSET_ATTR_PROTOCOL]); if (cda[IPSET_ATTR_SETNAME]) { ip_set_id_t index; struct ip_set *set; set = find_set_and_id(inst, nla_data(cda[IPSET_ATTR_SETNAME]), &index); if (!set) { ret = -ENOENT; goto error; } dump_type = DUMP_ONE; cb->args[IPSET_CB_INDEX] = index; } else { dump_type = DUMP_ALL; } if (cda[IPSET_ATTR_FLAGS]) { u32 f = ip_set_get_h32(cda[IPSET_ATTR_FLAGS]); dump_type |= (f << 16); } cb->args[IPSET_CB_NET] = (unsigned long)inst; cb->args[IPSET_CB_DUMP] = dump_type; return 0; error: /* We have to create and send the error message manually :-( */ if (nlh->nlmsg_flags & NLM_F_ACK) { netlink_ack(cb->skb, nlh, ret, NULL); } return ret; } static int ip_set_dump_do(struct sk_buff *skb, struct netlink_callback *cb) { ip_set_id_t index = IPSET_INVALID_ID, max; struct ip_set *set = NULL; struct nlmsghdr *nlh = NULL; unsigned int flags = NETLINK_CB(cb->skb).portid ? NLM_F_MULTI : 0; struct ip_set_net *inst = ip_set_pernet(sock_net(skb->sk)); u32 dump_type, dump_flags; bool is_destroyed; int ret = 0; if (!cb->args[IPSET_CB_DUMP]) return -EINVAL; if (cb->args[IPSET_CB_INDEX] >= inst->ip_set_max) goto out; dump_type = DUMP_TYPE(cb->args[IPSET_CB_DUMP]); dump_flags = DUMP_FLAGS(cb->args[IPSET_CB_DUMP]); max = dump_type == DUMP_ONE ? cb->args[IPSET_CB_INDEX] + 1 : inst->ip_set_max; dump_last: pr_debug("dump type, flag: %u %u index: %ld\n", dump_type, dump_flags, cb->args[IPSET_CB_INDEX]); for (; cb->args[IPSET_CB_INDEX] < max; cb->args[IPSET_CB_INDEX]++) { index = (ip_set_id_t)cb->args[IPSET_CB_INDEX]; write_lock_bh(&ip_set_ref_lock); set = ip_set(inst, index); is_destroyed = inst->is_destroyed; if (!set || is_destroyed) { write_unlock_bh(&ip_set_ref_lock); if (dump_type == DUMP_ONE) { ret = -ENOENT; goto out; } if (is_destroyed) { /* All sets are just being destroyed */ ret = 0; goto out; } continue; } /* When dumping all sets, we must dump "sorted" * so that lists (unions of sets) are dumped last. */ if (dump_type != DUMP_ONE && ((dump_type == DUMP_ALL) == !!(set->type->features & IPSET_DUMP_LAST))) { write_unlock_bh(&ip_set_ref_lock); continue; } pr_debug("List set: %s\n", set->name); if (!cb->args[IPSET_CB_ARG0]) { /* Start listing: make sure set won't be destroyed */ pr_debug("reference set\n"); set->ref_netlink++; } write_unlock_bh(&ip_set_ref_lock); nlh = start_msg(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, flags, IPSET_CMD_LIST); if (!nlh) { ret = -EMSGSIZE; goto release_refcount; } if (nla_put_u8(skb, IPSET_ATTR_PROTOCOL, cb->args[IPSET_CB_PROTO]) || nla_put_string(skb, IPSET_ATTR_SETNAME, set->name)) goto nla_put_failure; if (dump_flags & IPSET_FLAG_LIST_SETNAME) goto next_set; switch (cb->args[IPSET_CB_ARG0]) { case 0: /* Core header data */ if (nla_put_string(skb, IPSET_ATTR_TYPENAME, set->type->name) || nla_put_u8(skb, IPSET_ATTR_FAMILY, set->family) || nla_put_u8(skb, IPSET_ATTR_REVISION, set->revision)) goto nla_put_failure; if (cb->args[IPSET_CB_PROTO] > IPSET_PROTOCOL_MIN && nla_put_net16(skb, IPSET_ATTR_INDEX, htons(index))) goto nla_put_failure; ret = set->variant->head(set, skb); if (ret < 0) goto release_refcount; if (dump_flags & IPSET_FLAG_LIST_HEADER) goto next_set; if (set->variant->uref) set->variant->uref(set, cb, true); fallthrough; default: ret = set->variant->list(set, skb, cb); if (!cb->args[IPSET_CB_ARG0]) /* Set is done, proceed with next one */ goto next_set; goto release_refcount; } } /* If we dump all sets, continue with dumping last ones */ if (dump_type == DUMP_ALL) { dump_type = DUMP_LAST; cb->args[IPSET_CB_DUMP] = dump_type | (dump_flags << 16); cb->args[IPSET_CB_INDEX] = 0; if (set && set->variant->uref) set->variant->uref(set, cb, false); goto dump_last; } goto out; nla_put_failure: ret = -EFAULT; next_set: if (dump_type == DUMP_ONE) cb->args[IPSET_CB_INDEX] = IPSET_INVALID_ID; else cb->args[IPSET_CB_INDEX]++; release_refcount: /* If there was an error or set is done, release set */ if (ret || !cb->args[IPSET_CB_ARG0]) { set = ip_set_ref_netlink(inst, index); if (set->variant->uref) set->variant->uref(set, cb, false); pr_debug("release set %s\n", set->name); __ip_set_put_netlink(set); cb->args[IPSET_CB_ARG0] = 0; } out: if (nlh) { nlmsg_end(skb, nlh); pr_debug("nlmsg_len: %u\n", nlh->nlmsg_len); dump_attrs(nlh); } return ret < 0 ? ret : skb->len; } static int ip_set_dump(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const attr[]) { if (unlikely(protocol_min_failed(attr))) return -IPSET_ERR_PROTOCOL; { struct netlink_dump_control c = { .start = ip_set_dump_start, .dump = ip_set_dump_do, .done = ip_set_dump_done, }; return netlink_dump_start(info->sk, skb, info->nlh, &c); } } /* Add, del and test */ static const struct nla_policy ip_set_adt_policy[IPSET_ATTR_CMD_MAX + 1] = { [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 }, [IPSET_ATTR_SETNAME] = { .type = NLA_NUL_STRING, .len = IPSET_MAXNAMELEN - 1 }, [IPSET_ATTR_LINENO] = { .type = NLA_U32 }, [IPSET_ATTR_DATA] = { .type = NLA_NESTED }, [IPSET_ATTR_ADT] = { .type = NLA_NESTED }, }; static int call_ad(struct net *net, struct sock *ctnl, struct sk_buff *skb, struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 flags, bool use_lineno) { int ret; u32 lineno = 0; bool eexist = flags & IPSET_FLAG_EXIST, retried = false; do { if (retried) { __ip_set_get_netlink(set); nfnl_unlock(NFNL_SUBSYS_IPSET); cond_resched(); nfnl_lock(NFNL_SUBSYS_IPSET); __ip_set_put_netlink(set); } ip_set_lock(set); ret = set->variant->uadt(set, tb, adt, &lineno, flags, retried); ip_set_unlock(set); retried = true; } while (ret == -ERANGE || (ret == -EAGAIN && set->variant->resize && (ret = set->variant->resize(set, retried)) == 0)); if (!ret || (ret == -IPSET_ERR_EXIST && eexist)) return 0; if (lineno && use_lineno) { /* Error in restore/batch mode: send back lineno */ struct nlmsghdr *rep, *nlh = nlmsg_hdr(skb); struct sk_buff *skb2; struct nlmsgerr *errmsg; size_t payload = min(SIZE_MAX, sizeof(*errmsg) + nlmsg_len(nlh)); int min_len = nlmsg_total_size(sizeof(struct nfgenmsg)); struct nlattr *cda[IPSET_ATTR_CMD_MAX + 1]; struct nlattr *cmdattr; u32 *errline; skb2 = nlmsg_new(payload, GFP_KERNEL); if (!skb2) return -ENOMEM; rep = nlmsg_put(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq, NLMSG_ERROR, payload, 0); errmsg = nlmsg_data(rep); errmsg->error = ret; unsafe_memcpy(&errmsg->msg, nlh, nlh->nlmsg_len, /* Bounds checked by the skb layer. */); cmdattr = (void *)&errmsg->msg + min_len; ret = nla_parse(cda, IPSET_ATTR_CMD_MAX, cmdattr, nlh->nlmsg_len - min_len, ip_set_adt_policy, NULL); if (ret) { nlmsg_free(skb2); return ret; } errline = nla_data(cda[IPSET_ATTR_LINENO]); *errline = lineno; nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid); /* Signal netlink not to send its ACK/errmsg. */ return -EINTR; } return ret; } static int ip_set_ad(struct net *net, struct sock *ctnl, struct sk_buff *skb, enum ipset_adt adt, const struct nlmsghdr *nlh, const struct nlattr * const attr[], struct netlink_ext_ack *extack) { struct ip_set_net *inst = ip_set_pernet(net); struct ip_set *set; struct nlattr *tb[IPSET_ATTR_ADT_MAX + 1] = {}; const struct nlattr *nla; u32 flags = flag_exist(nlh); bool use_lineno; int ret = 0; if (unlikely(protocol_min_failed(attr) || !attr[IPSET_ATTR_SETNAME] || !((attr[IPSET_ATTR_DATA] != NULL) ^ (attr[IPSET_ATTR_ADT] != NULL)) || (attr[IPSET_ATTR_DATA] && !flag_nested(attr[IPSET_ATTR_DATA])) || (attr[IPSET_ATTR_ADT] && (!flag_nested(attr[IPSET_ATTR_ADT]) || !attr[IPSET_ATTR_LINENO])))) return -IPSET_ERR_PROTOCOL; set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME])); if (!set) return -ENOENT; use_lineno = !!attr[IPSET_ATTR_LINENO]; if (attr[IPSET_ATTR_DATA]) { if (nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, attr[IPSET_ATTR_DATA], set->type->adt_policy, NULL)) return -IPSET_ERR_PROTOCOL; ret = call_ad(net, ctnl, skb, set, tb, adt, flags, use_lineno); } else { int nla_rem; nla_for_each_nested(nla, attr[IPSET_ATTR_ADT], nla_rem) { if (nla_type(nla) != IPSET_ATTR_DATA || !flag_nested(nla) || nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, nla, set->type->adt_policy, NULL)) return -IPSET_ERR_PROTOCOL; ret = call_ad(net, ctnl, skb, set, tb, adt, flags, use_lineno); if (ret < 0) return ret; } } return ret; } static int ip_set_uadd(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const attr[]) { return ip_set_ad(info->net, info->sk, skb, IPSET_ADD, info->nlh, attr, info->extack); } static int ip_set_udel(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const attr[]) { return ip_set_ad(info->net, info->sk, skb, IPSET_DEL, info->nlh, attr, info->extack); } static int ip_set_utest(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const attr[]) { struct ip_set_net *inst = ip_set_pernet(info->net); struct ip_set *set; struct nlattr *tb[IPSET_ATTR_ADT_MAX + 1] = {}; int ret = 0; u32 lineno; if (unlikely(protocol_min_failed(attr) || !attr[IPSET_ATTR_SETNAME] || !attr[IPSET_ATTR_DATA] || !flag_nested(attr[IPSET_ATTR_DATA]))) return -IPSET_ERR_PROTOCOL; set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME])); if (!set) return -ENOENT; if (nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, attr[IPSET_ATTR_DATA], set->type->adt_policy, NULL)) return -IPSET_ERR_PROTOCOL; rcu_read_lock_bh(); ret = set->variant->uadt(set, tb, IPSET_TEST, &lineno, 0, 0); rcu_read_unlock_bh(); /* Userspace can't trigger element to be re-added */ if (ret == -EAGAIN) ret = 1; return ret > 0 ? 0 : -IPSET_ERR_EXIST; } /* Get headed data of a set */ static int ip_set_header(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const attr[]) { struct ip_set_net *inst = ip_set_pernet(info->net); const struct ip_set *set; struct sk_buff *skb2; struct nlmsghdr *nlh2; if (unlikely(protocol_min_failed(attr) || !attr[IPSET_ATTR_SETNAME])) return -IPSET_ERR_PROTOCOL; set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME])); if (!set) return -ENOENT; skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!skb2) return -ENOMEM; nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, info->nlh->nlmsg_seq, 0, IPSET_CMD_HEADER); if (!nlh2) goto nlmsg_failure; if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, protocol(attr)) || nla_put_string(skb2, IPSET_ATTR_SETNAME, set->name) || nla_put_string(skb2, IPSET_ATTR_TYPENAME, set->type->name) || nla_put_u8(skb2, IPSET_ATTR_FAMILY, set->family) || nla_put_u8(skb2, IPSET_ATTR_REVISION, set->revision)) goto nla_put_failure; nlmsg_end(skb2, nlh2); return nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid); nla_put_failure: nlmsg_cancel(skb2, nlh2); nlmsg_failure: kfree_skb(skb2); return -EMSGSIZE; } /* Get type data */ static const struct nla_policy ip_set_type_policy[IPSET_ATTR_CMD_MAX + 1] = { [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 }, [IPSET_ATTR_TYPENAME] = { .type = NLA_NUL_STRING, .len = IPSET_MAXNAMELEN - 1 }, [IPSET_ATTR_FAMILY] = { .type = NLA_U8 }, }; static int ip_set_type(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const attr[]) { struct sk_buff *skb2; struct nlmsghdr *nlh2; u8 family, min, max; const char *typename; int ret = 0; if (unlikely(protocol_min_failed(attr) || !attr[IPSET_ATTR_TYPENAME] || !attr[IPSET_ATTR_FAMILY])) return -IPSET_ERR_PROTOCOL; family = nla_get_u8(attr[IPSET_ATTR_FAMILY]); typename = nla_data(attr[IPSET_ATTR_TYPENAME]); ret = find_set_type_minmax(typename, family, &min, &max); if (ret) return ret; skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!skb2) return -ENOMEM; nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, info->nlh->nlmsg_seq, 0, IPSET_CMD_TYPE); if (!nlh2) goto nlmsg_failure; if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, protocol(attr)) || nla_put_string(skb2, IPSET_ATTR_TYPENAME, typename) || nla_put_u8(skb2, IPSET_ATTR_FAMILY, family) || nla_put_u8(skb2, IPSET_ATTR_REVISION, max) || nla_put_u8(skb2, IPSET_ATTR_REVISION_MIN, min)) goto nla_put_failure; nlmsg_end(skb2, nlh2); pr_debug("Send TYPE, nlmsg_len: %u\n", nlh2->nlmsg_len); return nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid); nla_put_failure: nlmsg_cancel(skb2, nlh2); nlmsg_failure: kfree_skb(skb2); return -EMSGSIZE; } /* Get protocol version */ static const struct nla_policy ip_set_protocol_policy[IPSET_ATTR_CMD_MAX + 1] = { [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 }, }; static int ip_set_protocol(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const attr[]) { struct sk_buff *skb2; struct nlmsghdr *nlh2; if (unlikely(!attr[IPSET_ATTR_PROTOCOL])) return -IPSET_ERR_PROTOCOL; skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!skb2) return -ENOMEM; nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, info->nlh->nlmsg_seq, 0, IPSET_CMD_PROTOCOL); if (!nlh2) goto nlmsg_failure; if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL)) goto nla_put_failure; if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL_MIN, IPSET_PROTOCOL_MIN)) goto nla_put_failure; nlmsg_end(skb2, nlh2); return nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid); nla_put_failure: nlmsg_cancel(skb2, nlh2); nlmsg_failure: kfree_skb(skb2); return -EMSGSIZE; } /* Get set by name or index, from userspace */ static int ip_set_byname(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const attr[]) { struct ip_set_net *inst = ip_set_pernet(info->net); struct sk_buff *skb2; struct nlmsghdr *nlh2; ip_set_id_t id = IPSET_INVALID_ID; const struct ip_set *set; if (unlikely(protocol_failed(attr) || !attr[IPSET_ATTR_SETNAME])) return -IPSET_ERR_PROTOCOL; set = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME]), &id); if (id == IPSET_INVALID_ID) return -ENOENT; skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!skb2) return -ENOMEM; nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, info->nlh->nlmsg_seq, 0, IPSET_CMD_GET_BYNAME); if (!nlh2) goto nlmsg_failure; if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, protocol(attr)) || nla_put_u8(skb2, IPSET_ATTR_FAMILY, set->family) || nla_put_net16(skb2, IPSET_ATTR_INDEX, htons(id))) goto nla_put_failure; nlmsg_end(skb2, nlh2); return nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid); nla_put_failure: nlmsg_cancel(skb2, nlh2); nlmsg_failure: kfree_skb(skb2); return -EMSGSIZE; } static const struct nla_policy ip_set_index_policy[IPSET_ATTR_CMD_MAX + 1] = { [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 }, [IPSET_ATTR_INDEX] = { .type = NLA_U16 }, }; static int ip_set_byindex(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const attr[]) { struct ip_set_net *inst = ip_set_pernet(info->net); struct sk_buff *skb2; struct nlmsghdr *nlh2; ip_set_id_t id = IPSET_INVALID_ID; const struct ip_set *set; if (unlikely(protocol_failed(attr) || !attr[IPSET_ATTR_INDEX])) return -IPSET_ERR_PROTOCOL; id = ip_set_get_h16(attr[IPSET_ATTR_INDEX]); if (id >= inst->ip_set_max) return -ENOENT; set = ip_set(inst, id); if (set == NULL) return -ENOENT; skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!skb2) return -ENOMEM; nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, info->nlh->nlmsg_seq, 0, IPSET_CMD_GET_BYINDEX); if (!nlh2) goto nlmsg_failure; if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, protocol(attr)) || nla_put_string(skb2, IPSET_ATTR_SETNAME, set->name)) goto nla_put_failure; nlmsg_end(skb2, nlh2); return nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid); nla_put_failure: nlmsg_cancel(skb2, nlh2); nlmsg_failure: kfree_skb(skb2); return -EMSGSIZE; } static const struct nfnl_callback ip_set_netlink_subsys_cb[IPSET_MSG_MAX] = { [IPSET_CMD_NONE] = { .call = ip_set_none, .type = NFNL_CB_MUTEX, .attr_count = IPSET_ATTR_CMD_MAX, }, [IPSET_CMD_CREATE] = { .call = ip_set_create, .type = NFNL_CB_MUTEX, .attr_count = IPSET_ATTR_CMD_MAX, .policy = ip_set_create_policy, }, [IPSET_CMD_DESTROY] = { .call = ip_set_destroy, .type = NFNL_CB_MUTEX, .attr_count = IPSET_ATTR_CMD_MAX, .policy = ip_set_setname_policy, }, [IPSET_CMD_FLUSH] = { .call = ip_set_flush, .type = NFNL_CB_MUTEX, .attr_count = IPSET_ATTR_CMD_MAX, .policy = ip_set_setname_policy, }, [IPSET_CMD_RENAME] = { .call = ip_set_rename, .type = NFNL_CB_MUTEX, .attr_count = IPSET_ATTR_CMD_MAX, .policy = ip_set_setname2_policy, }, [IPSET_CMD_SWAP] = { .call = ip_set_swap, .type = NFNL_CB_MUTEX, .attr_count = IPSET_ATTR_CMD_MAX, .policy = ip_set_setname2_policy, }, [IPSET_CMD_LIST] = { .call = ip_set_dump, .type = NFNL_CB_MUTEX, .attr_count = IPSET_ATTR_CMD_MAX, .policy = ip_set_dump_policy, }, [IPSET_CMD_SAVE] = { .call = ip_set_dump, .type = NFNL_CB_MUTEX, .attr_count = IPSET_ATTR_CMD_MAX, .policy = ip_set_setname_policy, }, [IPSET_CMD_ADD] = { .call = ip_set_uadd, .type = NFNL_CB_MUTEX, .attr_count = IPSET_ATTR_CMD_MAX, .policy = ip_set_adt_policy, }, [IPSET_CMD_DEL] = { .call = ip_set_udel, .type = NFNL_CB_MUTEX, .attr_count = IPSET_ATTR_CMD_MAX, .policy = ip_set_adt_policy, }, [IPSET_CMD_TEST] = { .call = ip_set_utest, .type = NFNL_CB_MUTEX, .attr_count = IPSET_ATTR_CMD_MAX, .policy = ip_set_adt_policy, }, [IPSET_CMD_HEADER] = { .call = ip_set_header, .type = NFNL_CB_MUTEX, .attr_count = IPSET_ATTR_CMD_MAX, .policy = ip_set_setname_policy, }, [IPSET_CMD_TYPE] = { .call = ip_set_type, .type = NFNL_CB_MUTEX, .attr_count = IPSET_ATTR_CMD_MAX, .policy = ip_set_type_policy, }, [IPSET_CMD_PROTOCOL] = { .call = ip_set_protocol, .type = NFNL_CB_MUTEX, .attr_count = IPSET_ATTR_CMD_MAX, .policy = ip_set_protocol_policy, }, [IPSET_CMD_GET_BYNAME] = { .call = ip_set_byname, .type = NFNL_CB_MUTEX, .attr_count = IPSET_ATTR_CMD_MAX, .policy = ip_set_setname_policy, }, [IPSET_CMD_GET_BYINDEX] = { .call = ip_set_byindex, .type = NFNL_CB_MUTEX, .attr_count = IPSET_ATTR_CMD_MAX, .policy = ip_set_index_policy, }, }; static struct nfnetlink_subsystem ip_set_netlink_subsys __read_mostly = { .name = "ip_set", .subsys_id = NFNL_SUBSYS_IPSET, .cb_count = IPSET_MSG_MAX, .cb = ip_set_netlink_subsys_cb, }; /* Interface to iptables/ip6tables */ static int ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len) { unsigned int *op; void *data; int copylen = *len, ret = 0; struct net *net = sock_net(sk); struct ip_set_net *inst = ip_set_pernet(net); if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM; if (optval != SO_IP_SET) return -EBADF; if (*len < sizeof(unsigned int)) return -EINVAL; data = vmalloc(*len); if (!data) return -ENOMEM; if (copy_from_user(data, user, *len) != 0) { ret = -EFAULT; goto done; } op = data; if (*op < IP_SET_OP_VERSION) { /* Check the version at the beginning of operations */ struct ip_set_req_version *req_version = data; if (*len < sizeof(struct ip_set_req_version)) { ret = -EINVAL; goto done; } if (req_version->version < IPSET_PROTOCOL_MIN) { ret = -EPROTO; goto done; } } switch (*op) { case IP_SET_OP_VERSION: { struct ip_set_req_version *req_version = data; if (*len != sizeof(struct ip_set_req_version)) { ret = -EINVAL; goto done; } req_version->version = IPSET_PROTOCOL; if (copy_to_user(user, req_version, sizeof(struct ip_set_req_version))) ret = -EFAULT; goto done; } case IP_SET_OP_GET_BYNAME: { struct ip_set_req_get_set *req_get = data; ip_set_id_t id; if (*len != sizeof(struct ip_set_req_get_set)) { ret = -EINVAL; goto done; } req_get->set.name[IPSET_MAXNAMELEN - 1] = '\0'; nfnl_lock(NFNL_SUBSYS_IPSET); find_set_and_id(inst, req_get->set.name, &id); req_get->set.index = id; nfnl_unlock(NFNL_SUBSYS_IPSET); goto copy; } case IP_SET_OP_GET_FNAME: { struct ip_set_req_get_set_family *req_get = data; ip_set_id_t id; if (*len != sizeof(struct ip_set_req_get_set_family)) { ret = -EINVAL; goto done; } req_get->set.name[IPSET_MAXNAMELEN - 1] = '\0'; nfnl_lock(NFNL_SUBSYS_IPSET); find_set_and_id(inst, req_get->set.name, &id); req_get->set.index = id; if (id != IPSET_INVALID_ID) req_get->family = ip_set(inst, id)->family; nfnl_unlock(NFNL_SUBSYS_IPSET); goto copy; } case IP_SET_OP_GET_BYINDEX: { struct ip_set_req_get_set *req_get = data; struct ip_set *set; if (*len != sizeof(struct ip_set_req_get_set) || req_get->set.index >= inst->ip_set_max) { ret = -EINVAL; goto done; } nfnl_lock(NFNL_SUBSYS_IPSET); set = ip_set(inst, req_get->set.index); ret = strscpy(req_get->set.name, set ? set->name : "", IPSET_MAXNAMELEN); nfnl_unlock(NFNL_SUBSYS_IPSET); if (ret < 0) goto done; goto copy; } default: ret = -EBADMSG; goto done; } /* end of switch(op) */ copy: if (copy_to_user(user, data, copylen)) ret = -EFAULT; done: vfree(data); if (ret > 0) ret = 0; return ret; } static struct nf_sockopt_ops so_set __read_mostly = { .pf = PF_INET, .get_optmin = SO_IP_SET, .get_optmax = SO_IP_SET + 1, .get = ip_set_sockfn_get, .owner = THIS_MODULE, }; static int __net_init ip_set_net_init(struct net *net) { struct ip_set_net *inst = ip_set_pernet(net); struct ip_set **list; inst->ip_set_max = max_sets ? max_sets : CONFIG_IP_SET_MAX; if (inst->ip_set_max >= IPSET_INVALID_ID) inst->ip_set_max = IPSET_INVALID_ID - 1; list = kvcalloc(inst->ip_set_max, sizeof(struct ip_set *), GFP_KERNEL); if (!list) return -ENOMEM; inst->is_deleted = false; inst->is_destroyed = false; rcu_assign_pointer(inst->ip_set_list, list); return 0; } static void __net_exit ip_set_net_pre_exit(struct net *net) { struct ip_set_net *inst = ip_set_pernet(net); inst->is_deleted = true; /* flag for ip_set_nfnl_put */ } static void __net_exit ip_set_net_exit(struct net *net) { struct ip_set_net *inst = ip_set_pernet(net); _destroy_all_sets(inst); kvfree(rcu_dereference_protected(inst->ip_set_list, 1)); } static struct pernet_operations ip_set_net_ops = { .init = ip_set_net_init, .pre_exit = ip_set_net_pre_exit, .exit = ip_set_net_exit, .id = &ip_set_net_id, .size = sizeof(struct ip_set_net), }; static int __init ip_set_init(void) { int ret = register_pernet_subsys(&ip_set_net_ops); if (ret) { pr_err("ip_set: cannot register pernet_subsys.\n"); return ret; } ret = nfnetlink_subsys_register(&ip_set_netlink_subsys); if (ret != 0) { pr_err("ip_set: cannot register with nfnetlink.\n"); unregister_pernet_subsys(&ip_set_net_ops); return ret; } ret = nf_register_sockopt(&so_set); if (ret != 0) { pr_err("SO_SET registry failed: %d\n", ret); nfnetlink_subsys_unregister(&ip_set_netlink_subsys); unregister_pernet_subsys(&ip_set_net_ops); return ret; } return 0; } static void __exit ip_set_fini(void) { nf_unregister_sockopt(&so_set); nfnetlink_subsys_unregister(&ip_set_netlink_subsys); unregister_pernet_subsys(&ip_set_net_ops); /* Wait for call_rcu() in destroy */ rcu_barrier(); pr_debug("these are the famous last words\n"); } module_init(ip_set_init); module_exit(ip_set_fini); MODULE_DESCRIPTION("ip_set: protocol " __stringify(IPSET_PROTOCOL));
7 1 6 9 9 1 2 3 3 3 3 3 3 3 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/spinlock.h> #include <linux/completion.h> #include <linux/buffer_head.h> #include <linux/kthread.h> #include <linux/crc32.h> #include <linux/gfs2_ondisk.h> #include <linux/delay.h> #include <linux/uaccess.h> #include "gfs2.h" #include "incore.h" #include "glock.h" #include "glops.h" #include "log.h" #include "lops.h" #include "recovery.h" #include "rgrp.h" #include "super.h" #include "util.h" struct kmem_cache *gfs2_glock_cachep __read_mostly; struct kmem_cache *gfs2_glock_aspace_cachep __read_mostly; struct kmem_cache *gfs2_inode_cachep __read_mostly; struct kmem_cache *gfs2_bufdata_cachep __read_mostly; struct kmem_cache *gfs2_rgrpd_cachep __read_mostly; struct kmem_cache *gfs2_quotad_cachep __read_mostly; struct kmem_cache *gfs2_qadata_cachep __read_mostly; struct kmem_cache *gfs2_trans_cachep __read_mostly; mempool_t *gfs2_page_pool __read_mostly; void gfs2_assert_i(struct gfs2_sbd *sdp) { fs_emerg(sdp, "fatal assertion failed\n"); } /** * check_journal_clean - Make sure a journal is clean for a spectator mount * @sdp: The GFS2 superblock * @jd: The journal descriptor * @verbose: Show more prints in the log * * Returns: 0 if the journal is clean or locked, else an error */ int check_journal_clean(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd, bool verbose) { int error; struct gfs2_holder j_gh; struct gfs2_log_header_host head; struct gfs2_inode *ip; ip = GFS2_I(jd->jd_inode); error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_NOEXP | GL_EXACT | GL_NOCACHE, &j_gh); if (error) { if (verbose) fs_err(sdp, "Error %d locking journal for spectator " "mount.\n", error); return -EPERM; } error = gfs2_jdesc_check(jd); if (error) { if (verbose) fs_err(sdp, "Error checking journal for spectator " "mount.\n"); goto out_unlock; } error = gfs2_find_jhead(jd, &head, false); if (error) { if (verbose) fs_err(sdp, "Error parsing journal for spectator " "mount.\n"); goto out_unlock; } if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) { error = -EPERM; if (verbose) fs_err(sdp, "jid=%u: Journal is dirty, so the first " "mounter must not be a spectator.\n", jd->jd_jid); } out_unlock: gfs2_glock_dq_uninit(&j_gh); return error; } /** * gfs2_freeze_lock_shared - hold the freeze glock * @sdp: the superblock */ int gfs2_freeze_lock_shared(struct gfs2_sbd *sdp) { int flags = LM_FLAG_NOEXP | GL_EXACT; int error; error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, flags, &sdp->sd_freeze_gh); if (error && error != GLR_TRYFAILED) fs_err(sdp, "can't lock the freeze glock: %d\n", error); return error; } void gfs2_freeze_unlock(struct gfs2_sbd *sdp) { if (gfs2_holder_initialized(&sdp->sd_freeze_gh)) gfs2_glock_dq_uninit(&sdp->sd_freeze_gh); } static void signal_our_withdraw(struct gfs2_sbd *sdp) { struct gfs2_glock *live_gl = sdp->sd_live_gh.gh_gl; struct inode *inode; struct gfs2_inode *ip; struct gfs2_glock *i_gl; u64 no_formal_ino; int ret = 0; int tries; if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) || !sdp->sd_jdesc) return; gfs2_ail_drain(sdp); /* frees all transactions */ inode = sdp->sd_jdesc->jd_inode; ip = GFS2_I(inode); i_gl = ip->i_gl; no_formal_ino = ip->i_no_formal_ino; /* Prevent any glock dq until withdraw recovery is complete */ set_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags); /* * Don't tell dlm we're bailing until we have no more buffers in the * wind. If journal had an IO error, the log code should just purge * the outstanding buffers rather than submitting new IO. Making the * file system read-only will flush the journal, etc. * * During a normal unmount, gfs2_make_fs_ro calls gfs2_log_shutdown * which clears SDF_JOURNAL_LIVE. In a withdraw, we must not write * any UNMOUNT log header, so we can't call gfs2_log_shutdown, and * therefore we need to clear SDF_JOURNAL_LIVE manually. */ clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags); if (!sb_rdonly(sdp->sd_vfs)) { bool locked = mutex_trylock(&sdp->sd_freeze_mutex); wake_up(&sdp->sd_logd_waitq); wake_up(&sdp->sd_quota_wait); wait_event_timeout(sdp->sd_log_waitq, gfs2_log_is_empty(sdp), HZ * 5); sdp->sd_vfs->s_flags |= SB_RDONLY; if (locked) mutex_unlock(&sdp->sd_freeze_mutex); /* * Dequeue any pending non-system glock holders that can no * longer be granted because the file system is withdrawn. */ gfs2_gl_dq_holders(sdp); } if (sdp->sd_lockstruct.ls_ops->lm_lock == NULL) { /* lock_nolock */ if (!ret) ret = -EIO; clear_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags); goto skip_recovery; } /* * Drop the glock for our journal so another node can recover it. */ if (gfs2_holder_initialized(&sdp->sd_journal_gh)) { gfs2_glock_dq_wait(&sdp->sd_journal_gh); gfs2_holder_uninit(&sdp->sd_journal_gh); } sdp->sd_jinode_gh.gh_flags |= GL_NOCACHE; gfs2_glock_dq(&sdp->sd_jinode_gh); gfs2_thaw_freeze_initiator(sdp->sd_vfs); wait_on_bit(&i_gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE); /* * holder_uninit to force glock_put, to force dlm to let go */ gfs2_holder_uninit(&sdp->sd_jinode_gh); /* * Note: We need to be careful here: * Our iput of jd_inode will evict it. The evict will dequeue its * glock, but the glock dq will wait for the withdraw unless we have * exception code in glock_dq. */ iput(inode); sdp->sd_jdesc->jd_inode = NULL; /* * Wait until the journal inode's glock is freed. This allows try locks * on other nodes to be successful, otherwise we remain the owner of * the glock as far as dlm is concerned. */ if (i_gl->gl_ops->go_unlocked) { set_bit(GLF_UNLOCKED, &i_gl->gl_flags); wait_on_bit(&i_gl->gl_flags, GLF_UNLOCKED, TASK_UNINTERRUPTIBLE); } /* * Dequeue the "live" glock, but keep a reference so it's never freed. */ gfs2_glock_hold(live_gl); gfs2_glock_dq_wait(&sdp->sd_live_gh); /* * We enqueue the "live" glock in EX so that all other nodes * get a demote request and act on it. We don't really want the * lock in EX, so we send a "try" lock with 1CB to produce a callback. */ fs_warn(sdp, "Requesting recovery of jid %d.\n", sdp->sd_lockstruct.ls_jid); gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | LM_FLAG_NOEXP | GL_NOPID, &sdp->sd_live_gh); msleep(GL_GLOCK_MAX_HOLD); /* * This will likely fail in a cluster, but succeed standalone: */ ret = gfs2_glock_nq(&sdp->sd_live_gh); /* * If we actually got the "live" lock in EX mode, there are no other * nodes available to replay our journal. So we try to replay it * ourselves. We hold the "live" glock to prevent other mounters * during recovery, then just dequeue it and reacquire it in our * normal SH mode. Just in case the problem that caused us to * withdraw prevents us from recovering our journal (e.g. io errors * and such) we still check if the journal is clean before proceeding * but we may wait forever until another mounter does the recovery. */ if (ret == 0) { fs_warn(sdp, "No other mounters found. Trying to recover our " "own journal jid %d.\n", sdp->sd_lockstruct.ls_jid); if (gfs2_recover_journal(sdp->sd_jdesc, 1)) fs_warn(sdp, "Unable to recover our journal jid %d.\n", sdp->sd_lockstruct.ls_jid); gfs2_glock_dq_wait(&sdp->sd_live_gh); gfs2_holder_reinit(LM_ST_SHARED, LM_FLAG_NOEXP | GL_EXACT | GL_NOPID, &sdp->sd_live_gh); gfs2_glock_nq(&sdp->sd_live_gh); } gfs2_glock_put(live_gl); /* drop extra reference we acquired */ clear_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags); /* * At this point our journal is evicted, so we need to get a new inode * for it. Once done, we need to call gfs2_find_jhead which * calls gfs2_map_journal_extents to map it for us again. * * Note that we don't really want it to look up a FREE block. The * GFS2_BLKST_FREE simply overrides a block check in gfs2_inode_lookup * which would otherwise fail because it requires grabbing an rgrp * glock, which would fail with -EIO because we're withdrawing. */ inode = gfs2_inode_lookup(sdp->sd_vfs, DT_UNKNOWN, sdp->sd_jdesc->jd_no_addr, no_formal_ino, GFS2_BLKST_FREE); if (IS_ERR(inode)) { fs_warn(sdp, "Reprocessing of jid %d failed with %ld.\n", sdp->sd_lockstruct.ls_jid, PTR_ERR(inode)); goto skip_recovery; } sdp->sd_jdesc->jd_inode = inode; d_mark_dontcache(inode); /* * Now wait until recovery is complete. */ for (tries = 0; tries < 10; tries++) { ret = check_journal_clean(sdp, sdp->sd_jdesc, false); if (!ret) break; msleep(HZ); fs_warn(sdp, "Waiting for journal recovery jid %d.\n", sdp->sd_lockstruct.ls_jid); } skip_recovery: if (!ret) fs_warn(sdp, "Journal recovery complete for jid %d.\n", sdp->sd_lockstruct.ls_jid); else fs_warn(sdp, "Journal recovery skipped for jid %d until next " "mount.\n", sdp->sd_lockstruct.ls_jid); fs_warn(sdp, "Glock dequeues delayed: %lu\n", sdp->sd_glock_dqs_held); sdp->sd_glock_dqs_held = 0; wake_up_bit(&sdp->sd_flags, SDF_WITHDRAW_RECOVERY); } void gfs2_lm(struct gfs2_sbd *sdp, const char *fmt, ...) { struct va_format vaf; va_list args; if (sdp->sd_args.ar_errors == GFS2_ERRORS_WITHDRAW && test_bit(SDF_WITHDRAWN, &sdp->sd_flags)) return; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; fs_err(sdp, "%pV", &vaf); va_end(args); } int gfs2_withdraw(struct gfs2_sbd *sdp) { struct lm_lockstruct *ls = &sdp->sd_lockstruct; const struct lm_lockops *lm = ls->ls_ops; if (sdp->sd_args.ar_errors == GFS2_ERRORS_WITHDRAW) { unsigned long old = READ_ONCE(sdp->sd_flags), new; do { if (old & BIT(SDF_WITHDRAWN)) { wait_on_bit(&sdp->sd_flags, SDF_WITHDRAW_IN_PROG, TASK_UNINTERRUPTIBLE); return -1; } new = old | BIT(SDF_WITHDRAWN) | BIT(SDF_WITHDRAW_IN_PROG); } while (unlikely(!try_cmpxchg(&sdp->sd_flags, &old, new))); fs_err(sdp, "about to withdraw this file system\n"); BUG_ON(sdp->sd_args.ar_debug); signal_our_withdraw(sdp); kobject_uevent(&sdp->sd_kobj, KOBJ_OFFLINE); if (!strcmp(sdp->sd_lockstruct.ls_ops->lm_proto_name, "lock_dlm")) wait_for_completion(&sdp->sd_wdack); if (lm->lm_unmount) { fs_err(sdp, "telling LM to unmount\n"); lm->lm_unmount(sdp); } fs_err(sdp, "File system withdrawn\n"); dump_stack(); clear_bit(SDF_WITHDRAW_IN_PROG, &sdp->sd_flags); smp_mb__after_atomic(); wake_up_bit(&sdp->sd_flags, SDF_WITHDRAW_IN_PROG); } if (sdp->sd_args.ar_errors == GFS2_ERRORS_PANIC) panic("GFS2: fsid=%s: panic requested\n", sdp->sd_fsname); return -1; } /* * gfs2_assert_withdraw_i - Cause the machine to withdraw if @assertion is false */ void gfs2_assert_withdraw_i(struct gfs2_sbd *sdp, char *assertion, const char *function, char *file, unsigned int line, bool delayed) { if (gfs2_withdrawing_or_withdrawn(sdp)) return; fs_err(sdp, "fatal: assertion \"%s\" failed - " "function = %s, file = %s, line = %u\n", assertion, function, file, line); /* * If errors=panic was specified on mount, it won't help to delay the * withdraw. */ if (sdp->sd_args.ar_errors == GFS2_ERRORS_PANIC) delayed = false; if (delayed) gfs2_withdraw_delayed(sdp); else gfs2_withdraw(sdp); dump_stack(); } /* * gfs2_assert_warn_i - Print a message to the console if @assertion is false */ void gfs2_assert_warn_i(struct gfs2_sbd *sdp, char *assertion, const char *function, char *file, unsigned int line) { if (time_before(jiffies, sdp->sd_last_warning + gfs2_tune_get(sdp, gt_complain_secs) * HZ)) return; if (sdp->sd_args.ar_errors == GFS2_ERRORS_WITHDRAW) fs_warn(sdp, "warning: assertion \"%s\" failed - " "function = %s, file = %s, line = %u\n", assertion, function, file, line); if (sdp->sd_args.ar_debug) BUG(); else dump_stack(); if (sdp->sd_args.ar_errors == GFS2_ERRORS_PANIC) panic("GFS2: fsid=%s: warning: assertion \"%s\" failed - " "function = %s, file = %s, line = %u\n", sdp->sd_fsname, assertion, function, file, line); sdp->sd_last_warning = jiffies; } /* * gfs2_consist_i - Flag a filesystem consistency error and withdraw */ void gfs2_consist_i(struct gfs2_sbd *sdp, const char *function, char *file, unsigned int line) { gfs2_lm(sdp, "fatal: filesystem consistency error - " "function = %s, file = %s, line = %u\n", function, file, line); gfs2_withdraw(sdp); } /* * gfs2_consist_inode_i - Flag an inode consistency error and withdraw */ void gfs2_consist_inode_i(struct gfs2_inode *ip, const char *function, char *file, unsigned int line) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); gfs2_lm(sdp, "fatal: filesystem consistency error - " "inode = %llu %llu, " "function = %s, file = %s, line = %u\n", (unsigned long long)ip->i_no_formal_ino, (unsigned long long)ip->i_no_addr, function, file, line); gfs2_dump_glock(NULL, ip->i_gl, 1); gfs2_withdraw(sdp); } /* * gfs2_consist_rgrpd_i - Flag a RG consistency error and withdraw */ void gfs2_consist_rgrpd_i(struct gfs2_rgrpd *rgd, const char *function, char *file, unsigned int line) { struct gfs2_sbd *sdp = rgd->rd_sbd; char fs_id_buf[sizeof(sdp->sd_fsname) + 7]; sprintf(fs_id_buf, "fsid=%s: ", sdp->sd_fsname); gfs2_rgrp_dump(NULL, rgd, fs_id_buf); gfs2_lm(sdp, "fatal: filesystem consistency error - " "RG = %llu, " "function = %s, file = %s, line = %u\n", (unsigned long long)rgd->rd_addr, function, file, line); gfs2_dump_glock(NULL, rgd->rd_gl, 1); gfs2_withdraw(sdp); } /* * gfs2_meta_check_ii - Flag a magic number consistency error and withdraw * Returns: -1 if this call withdrew the machine, * -2 if it was already withdrawn */ int gfs2_meta_check_ii(struct gfs2_sbd *sdp, struct buffer_head *bh, const char *function, char *file, unsigned int line) { int me; gfs2_lm(sdp, "fatal: invalid metadata block - " "bh = %llu (bad magic number), " "function = %s, file = %s, line = %u\n", (unsigned long long)bh->b_blocknr, function, file, line); me = gfs2_withdraw(sdp); return (me) ? -1 : -2; } /* * gfs2_metatype_check_ii - Flag a metadata type consistency error and withdraw * Returns: -1 if this call withdrew the machine, * -2 if it was already withdrawn */ int gfs2_metatype_check_ii(struct gfs2_sbd *sdp, struct buffer_head *bh, u16 type, u16 t, const char *function, char *file, unsigned int line) { int me; gfs2_lm(sdp, "fatal: invalid metadata block - " "bh = %llu (type: exp=%u, found=%u), " "function = %s, file = %s, line = %u\n", (unsigned long long)bh->b_blocknr, type, t, function, file, line); me = gfs2_withdraw(sdp); return (me) ? -1 : -2; } /* * gfs2_io_error_i - Flag an I/O error and withdraw * Returns: -1 if this call withdrew the machine, * 0 if it was already withdrawn */ int gfs2_io_error_i(struct gfs2_sbd *sdp, const char *function, char *file, unsigned int line) { gfs2_lm(sdp, "fatal: I/O error - " "function = %s, file = %s, line = %u\n", function, file, line); return gfs2_withdraw(sdp); } /* * gfs2_io_error_bh_i - Flag a buffer I/O error * @withdraw: withdraw the filesystem */ void gfs2_io_error_bh_i(struct gfs2_sbd *sdp, struct buffer_head *bh, const char *function, char *file, unsigned int line, bool withdraw) { if (gfs2_withdrawing_or_withdrawn(sdp)) return; fs_err(sdp, "fatal: I/O error - " "block = %llu, " "function = %s, file = %s, line = %u\n", (unsigned long long)bh->b_blocknr, function, file, line); if (withdraw) gfs2_withdraw(sdp); }
2 2 2 26 26 2 26 26 2 26 1 26 26 2 26 26 2 25 26 2 26 2 1 2 2 2 1 1 1 2 1 2 2 2 2 2 1 2 2 2 2 2 2 2 2 1 1 1 1 2 1 2 2 2 2 2 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1 2 2 2 1 2 2 2 2 2 2 26 26 26 26 2 2 26 25 25 25 1 26 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 // SPDX-License-Identifier: GPL-2.0 #include "bcachefs.h" #include "bkey_buf.h" #include "bset.h" #include "btree_cache.h" #include "btree_journal_iter.h" #include "journal_io.h" #include <linux/sort.h> /* * For managing keys we read from the journal: until journal replay works normal * btree lookups need to be able to find and return keys from the journal where * they overwrite what's in the btree, so we have a special iterator and * operations for the regular btree iter code to use: */ static inline size_t pos_to_idx(struct journal_keys *keys, size_t pos) { size_t gap_size = keys->size - keys->nr; BUG_ON(pos >= keys->gap && pos < keys->gap + gap_size); if (pos >= keys->gap) pos -= gap_size; return pos; } static inline size_t idx_to_pos(struct journal_keys *keys, size_t idx) { size_t gap_size = keys->size - keys->nr; if (idx >= keys->gap) idx += gap_size; return idx; } static inline struct journal_key *idx_to_key(struct journal_keys *keys, size_t idx) { return keys->data + idx_to_pos(keys, idx); } static size_t __bch2_journal_key_search(struct journal_keys *keys, enum btree_id id, unsigned level, struct bpos pos) { size_t l = 0, r = keys->nr, m; while (l < r) { m = l + ((r - l) >> 1); if (__journal_key_cmp(id, level, pos, idx_to_key(keys, m)) > 0) l = m + 1; else r = m; } BUG_ON(l < keys->nr && __journal_key_cmp(id, level, pos, idx_to_key(keys, l)) > 0); BUG_ON(l && __journal_key_cmp(id, level, pos, idx_to_key(keys, l - 1)) <= 0); return l; } static size_t bch2_journal_key_search(struct journal_keys *keys, enum btree_id id, unsigned level, struct bpos pos) { return idx_to_pos(keys, __bch2_journal_key_search(keys, id, level, pos)); } /* Returns first non-overwritten key >= search key: */ struct bkey_i *bch2_journal_keys_peek_max(struct bch_fs *c, enum btree_id btree_id, unsigned level, struct bpos pos, struct bpos end_pos, size_t *idx) { struct journal_keys *keys = &c->journal_keys; unsigned iters = 0; struct journal_key *k; BUG_ON(*idx > keys->nr); search: if (!*idx) *idx = __bch2_journal_key_search(keys, btree_id, level, pos); while (*idx && __journal_key_cmp(btree_id, level, end_pos, idx_to_key(keys, *idx - 1)) <= 0) { --(*idx); iters++; if (iters == 10) { *idx = 0; goto search; } } struct bkey_i *ret = NULL; rcu_read_lock(); /* for overwritten_ranges */ while ((k = *idx < keys->nr ? idx_to_key(keys, *idx) : NULL)) { if (__journal_key_cmp(btree_id, level, end_pos, k) < 0) break; if (k->overwritten) { if (k->overwritten_range) *idx = rcu_dereference(k->overwritten_range)->end; else *idx += 1; continue; } if (__journal_key_cmp(btree_id, level, pos, k) <= 0) { ret = k->k; break; } (*idx)++; iters++; if (iters == 10) { *idx = 0; rcu_read_unlock(); goto search; } } rcu_read_unlock(); return ret; } struct bkey_i *bch2_journal_keys_peek_prev_min(struct bch_fs *c, enum btree_id btree_id, unsigned level, struct bpos pos, struct bpos end_pos, size_t *idx) { struct journal_keys *keys = &c->journal_keys; unsigned iters = 0; struct journal_key *k; BUG_ON(*idx > keys->nr); search: if (!*idx) *idx = __bch2_journal_key_search(keys, btree_id, level, pos); while (*idx && __journal_key_cmp(btree_id, level, end_pos, idx_to_key(keys, *idx - 1)) <= 0) { (*idx)++; iters++; if (iters == 10) { *idx = 0; goto search; } } struct bkey_i *ret = NULL; rcu_read_lock(); /* for overwritten_ranges */ while ((k = *idx < keys->nr ? idx_to_key(keys, *idx) : NULL)) { if (__journal_key_cmp(btree_id, level, end_pos, k) > 0) break; if (k->overwritten) { if (k->overwritten_range) *idx = rcu_dereference(k->overwritten_range)->start - 1; else *idx -= 1; continue; } if (__journal_key_cmp(btree_id, level, pos, k) >= 0) { ret = k->k; break; } --(*idx); iters++; if (iters == 10) { *idx = 0; goto search; } } rcu_read_unlock(); return ret; } struct bkey_i *bch2_journal_keys_peek_slot(struct bch_fs *c, enum btree_id btree_id, unsigned level, struct bpos pos) { size_t idx = 0; return bch2_journal_keys_peek_max(c, btree_id, level, pos, pos, &idx); } static void journal_iter_verify(struct journal_iter *iter) { #ifdef CONFIG_BCACHEFS_DEBUG struct journal_keys *keys = iter->keys; size_t gap_size = keys->size - keys->nr; BUG_ON(iter->idx >= keys->gap && iter->idx < keys->gap + gap_size); if (iter->idx < keys->size) { struct journal_key *k = keys->data + iter->idx; int cmp = __journal_key_btree_cmp(iter->btree_id, iter->level, k); BUG_ON(cmp > 0); } #endif } static void journal_iters_fix(struct bch_fs *c) { struct journal_keys *keys = &c->journal_keys; /* The key we just inserted is immediately before the gap: */ size_t gap_end = keys->gap + (keys->size - keys->nr); struct journal_key *new_key = &keys->data[keys->gap - 1]; struct journal_iter *iter; /* * If an iterator points one after the key we just inserted, decrement * the iterator so it points at the key we just inserted - if the * decrement was unnecessary, bch2_btree_and_journal_iter_peek() will * handle that: */ list_for_each_entry(iter, &c->journal_iters, list) { journal_iter_verify(iter); if (iter->idx == gap_end && new_key->btree_id == iter->btree_id && new_key->level == iter->level) iter->idx = keys->gap - 1; journal_iter_verify(iter); } } static void journal_iters_move_gap(struct bch_fs *c, size_t old_gap, size_t new_gap) { struct journal_keys *keys = &c->journal_keys; struct journal_iter *iter; size_t gap_size = keys->size - keys->nr; list_for_each_entry(iter, &c->journal_iters, list) { if (iter->idx > old_gap) iter->idx -= gap_size; if (iter->idx >= new_gap) iter->idx += gap_size; } } int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id, unsigned level, struct bkey_i *k) { struct journal_key n = { .btree_id = id, .level = level, .k = k, .allocated = true, /* * Ensure these keys are done last by journal replay, to unblock * journal reclaim: */ .journal_seq = U64_MAX, }; struct journal_keys *keys = &c->journal_keys; size_t idx = bch2_journal_key_search(keys, id, level, k->k.p); BUG_ON(test_bit(BCH_FS_rw, &c->flags)); if (idx < keys->size && journal_key_cmp(&n, &keys->data[idx]) == 0) { if (keys->data[idx].allocated) kfree(keys->data[idx].k); keys->data[idx] = n; return 0; } if (idx > keys->gap) idx -= keys->size - keys->nr; size_t old_gap = keys->gap; if (keys->nr == keys->size) { journal_iters_move_gap(c, old_gap, keys->size); old_gap = keys->size; struct journal_keys new_keys = { .nr = keys->nr, .size = max_t(size_t, keys->size, 8) * 2, }; new_keys.data = kvmalloc_array(new_keys.size, sizeof(new_keys.data[0]), GFP_KERNEL); if (!new_keys.data) { bch_err(c, "%s: error allocating new key array (size %zu)", __func__, new_keys.size); return -BCH_ERR_ENOMEM_journal_key_insert; } /* Since @keys was full, there was no gap: */ memcpy(new_keys.data, keys->data, sizeof(keys->data[0]) * keys->nr); kvfree(keys->data); keys->data = new_keys.data; keys->nr = new_keys.nr; keys->size = new_keys.size; /* And now the gap is at the end: */ keys->gap = keys->nr; } journal_iters_move_gap(c, old_gap, idx); move_gap(keys, idx); keys->nr++; keys->data[keys->gap++] = n; journal_iters_fix(c); return 0; } /* * Can only be used from the recovery thread while we're still RO - can't be * used once we've got RW, as journal_keys is at that point used by multiple * threads: */ int bch2_journal_key_insert(struct bch_fs *c, enum btree_id id, unsigned level, struct bkey_i *k) { struct bkey_i *n; int ret; n = kmalloc(bkey_bytes(&k->k), GFP_KERNEL); if (!n) return -BCH_ERR_ENOMEM_journal_key_insert; bkey_copy(n, k); ret = bch2_journal_key_insert_take(c, id, level, n); if (ret) kfree(n); return ret; } int bch2_journal_key_delete(struct bch_fs *c, enum btree_id id, unsigned level, struct bpos pos) { struct bkey_i whiteout; bkey_init(&whiteout.k); whiteout.k.p = pos; return bch2_journal_key_insert(c, id, level, &whiteout); } bool bch2_key_deleted_in_journal(struct btree_trans *trans, enum btree_id btree, unsigned level, struct bpos pos) { struct journal_keys *keys = &trans->c->journal_keys; size_t idx = bch2_journal_key_search(keys, btree, level, pos); if (!trans->journal_replay_not_finished) return false; return (idx < keys->size && keys->data[idx].btree_id == btree && keys->data[idx].level == level && bpos_eq(keys->data[idx].k->k.p, pos) && bkey_deleted(&keys->data[idx].k->k)); } static void __bch2_journal_key_overwritten(struct journal_keys *keys, size_t pos) { struct journal_key *k = keys->data + pos; size_t idx = pos_to_idx(keys, pos); k->overwritten = true; struct journal_key *prev = idx > 0 ? keys->data + idx_to_pos(keys, idx - 1) : NULL; struct journal_key *next = idx + 1 < keys->nr ? keys->data + idx_to_pos(keys, idx + 1) : NULL; bool prev_overwritten = prev && prev->overwritten; bool next_overwritten = next && next->overwritten; struct journal_key_range_overwritten *prev_range = prev_overwritten ? prev->overwritten_range : NULL; struct journal_key_range_overwritten *next_range = next_overwritten ? next->overwritten_range : NULL; BUG_ON(prev_range && prev_range->end != idx); BUG_ON(next_range && next_range->start != idx + 1); if (prev_range && next_range) { prev_range->end = next_range->end; keys->data[pos].overwritten_range = prev_range; for (size_t i = next_range->start; i < next_range->end; i++) { struct journal_key *ip = keys->data + idx_to_pos(keys, i); BUG_ON(ip->overwritten_range != next_range); ip->overwritten_range = prev_range; } kfree_rcu_mightsleep(next_range); } else if (prev_range) { prev_range->end++; k->overwritten_range = prev_range; if (next_overwritten) { prev_range->end++; next->overwritten_range = prev_range; } } else if (next_range) { next_range->start--; k->overwritten_range = next_range; if (prev_overwritten) { next_range->start--; prev->overwritten_range = next_range; } } else if (prev_overwritten || next_overwritten) { struct journal_key_range_overwritten *r = kmalloc(sizeof(*r), GFP_KERNEL); if (!r) return; r->start = idx - (size_t) prev_overwritten; r->end = idx + 1 + (size_t) next_overwritten; rcu_assign_pointer(k->overwritten_range, r); if (prev_overwritten) prev->overwritten_range = r; if (next_overwritten) next->overwritten_range = r; } } void bch2_journal_key_overwritten(struct bch_fs *c, enum btree_id btree, unsigned level, struct bpos pos) { struct journal_keys *keys = &c->journal_keys; size_t idx = bch2_journal_key_search(keys, btree, level, pos); if (idx < keys->size && keys->data[idx].btree_id == btree && keys->data[idx].level == level && bpos_eq(keys->data[idx].k->k.p, pos) && !keys->data[idx].overwritten) { mutex_lock(&keys->overwrite_lock); __bch2_journal_key_overwritten(keys, idx); mutex_unlock(&keys->overwrite_lock); } } static void bch2_journal_iter_advance(struct journal_iter *iter) { if (iter->idx < iter->keys->size) { iter->idx++; if (iter->idx == iter->keys->gap) iter->idx += iter->keys->size - iter->keys->nr; } } static struct bkey_s_c bch2_journal_iter_peek(struct journal_iter *iter) { struct bkey_s_c ret = bkey_s_c_null; journal_iter_verify(iter); rcu_read_lock(); while (iter->idx < iter->keys->size) { struct journal_key *k = iter->keys->data + iter->idx; int cmp = __journal_key_btree_cmp(iter->btree_id, iter->level, k); if (cmp < 0) break; BUG_ON(cmp); if (!k->overwritten) { ret = bkey_i_to_s_c(k->k); break; } if (k->overwritten_range) iter->idx = idx_to_pos(iter->keys, rcu_dereference(k->overwritten_range)->end); else bch2_journal_iter_advance(iter); } rcu_read_unlock(); return ret; } static void bch2_journal_iter_exit(struct journal_iter *iter) { list_del(&iter->list); } static void bch2_journal_iter_init(struct bch_fs *c, struct journal_iter *iter, enum btree_id id, unsigned level, struct bpos pos) { iter->btree_id = id; iter->level = level; iter->keys = &c->journal_keys; iter->idx = bch2_journal_key_search(&c->journal_keys, id, level, pos); journal_iter_verify(iter); } static struct bkey_s_c bch2_journal_iter_peek_btree(struct btree_and_journal_iter *iter) { return bch2_btree_node_iter_peek_unpack(&iter->node_iter, iter->b, &iter->unpacked); } static void bch2_journal_iter_advance_btree(struct btree_and_journal_iter *iter) { bch2_btree_node_iter_advance(&iter->node_iter, iter->b); } void bch2_btree_and_journal_iter_advance(struct btree_and_journal_iter *iter) { if (bpos_eq(iter->pos, SPOS_MAX)) iter->at_end = true; else iter->pos = bpos_successor(iter->pos); } static void btree_and_journal_iter_prefetch(struct btree_and_journal_iter *_iter) { struct btree_and_journal_iter iter = *_iter; struct bch_fs *c = iter.trans->c; unsigned level = iter.journal.level; struct bkey_buf tmp; unsigned nr = test_bit(BCH_FS_started, &c->flags) ? (level > 1 ? 0 : 2) : (level > 1 ? 1 : 16); iter.prefetch = false; iter.fail_if_too_many_whiteouts = true; bch2_bkey_buf_init(&tmp); while (nr--) { bch2_btree_and_journal_iter_advance(&iter); struct bkey_s_c k = bch2_btree_and_journal_iter_peek(&iter); if (!k.k) break; bch2_bkey_buf_reassemble(&tmp, c, k); bch2_btree_node_prefetch(iter.trans, NULL, tmp.k, iter.journal.btree_id, level - 1); } bch2_bkey_buf_exit(&tmp, c); } struct bkey_s_c bch2_btree_and_journal_iter_peek(struct btree_and_journal_iter *iter) { struct bkey_s_c btree_k, journal_k = bkey_s_c_null, ret; size_t iters = 0; if (iter->prefetch && iter->journal.level) btree_and_journal_iter_prefetch(iter); again: if (iter->at_end) return bkey_s_c_null; iters++; if (iters > 20 && iter->fail_if_too_many_whiteouts) return bkey_s_c_null; while ((btree_k = bch2_journal_iter_peek_btree(iter)).k && bpos_lt(btree_k.k->p, iter->pos)) bch2_journal_iter_advance_btree(iter); if (iter->trans->journal_replay_not_finished) while ((journal_k = bch2_journal_iter_peek(&iter->journal)).k && bpos_lt(journal_k.k->p, iter->pos)) bch2_journal_iter_advance(&iter->journal); ret = journal_k.k && (!btree_k.k || bpos_le(journal_k.k->p, btree_k.k->p)) ? journal_k : btree_k; if (ret.k && iter->b && bpos_gt(ret.k->p, iter->b->data->max_key)) ret = bkey_s_c_null; if (ret.k) { iter->pos = ret.k->p; if (bkey_deleted(ret.k)) { bch2_btree_and_journal_iter_advance(iter); goto again; } } else { iter->pos = SPOS_MAX; iter->at_end = true; } return ret; } void bch2_btree_and_journal_iter_exit(struct btree_and_journal_iter *iter) { bch2_journal_iter_exit(&iter->journal); } void __bch2_btree_and_journal_iter_init_node_iter(struct btree_trans *trans, struct btree_and_journal_iter *iter, struct btree *b, struct btree_node_iter node_iter, struct bpos pos) { memset(iter, 0, sizeof(*iter)); iter->trans = trans; iter->b = b; iter->node_iter = node_iter; iter->pos = b->data->min_key; iter->at_end = false; INIT_LIST_HEAD(&iter->journal.list); if (trans->journal_replay_not_finished) { bch2_journal_iter_init(trans->c, &iter->journal, b->c.btree_id, b->c.level, pos); if (!test_bit(BCH_FS_may_go_rw, &trans->c->flags)) list_add(&iter->journal.list, &trans->c->journal_iters); } } /* * this version is used by btree_gc before filesystem has gone RW and * multithreaded, so uses the journal_iters list: */ void bch2_btree_and_journal_iter_init_node_iter(struct btree_trans *trans, struct btree_and_journal_iter *iter, struct btree *b) { struct btree_node_iter node_iter; bch2_btree_node_iter_init_from_start(&node_iter, b); __bch2_btree_and_journal_iter_init_node_iter(trans, iter, b, node_iter, b->data->min_key); } /* sort and dedup all keys in the journal: */ /* * When keys compare equal, oldest compares first: */ static int journal_sort_key_cmp(const void *_l, const void *_r) { const struct journal_key *l = _l; const struct journal_key *r = _r; return journal_key_cmp(l, r) ?: cmp_int(l->journal_seq, r->journal_seq) ?: cmp_int(l->journal_offset, r->journal_offset); } void bch2_journal_keys_put(struct bch_fs *c) { struct journal_keys *keys = &c->journal_keys; BUG_ON(atomic_read(&keys->ref) <= 0); if (!atomic_dec_and_test(&keys->ref)) return; move_gap(keys, keys->nr); darray_for_each(*keys, i) { if (i->overwritten_range && (i == &darray_last(*keys) || i->overwritten_range != i[1].overwritten_range)) kfree(i->overwritten_range); if (i->allocated) kfree(i->k); } kvfree(keys->data); keys->data = NULL; keys->nr = keys->gap = keys->size = 0; struct journal_replay **i; struct genradix_iter iter; genradix_for_each(&c->journal_entries, iter, i) kvfree(*i); genradix_free(&c->journal_entries); } static void __journal_keys_sort(struct journal_keys *keys) { sort(keys->data, keys->nr, sizeof(keys->data[0]), journal_sort_key_cmp, NULL); cond_resched(); struct journal_key *dst = keys->data; darray_for_each(*keys, src) { /* * We don't accumulate accounting keys here because we have to * compare each individual accounting key against the version in * the btree during replay: */ if (src->k->k.type != KEY_TYPE_accounting && src + 1 < &darray_top(*keys) && !journal_key_cmp(src, src + 1)) continue; *dst++ = *src; } keys->nr = dst - keys->data; } int bch2_journal_keys_sort(struct bch_fs *c) { struct genradix_iter iter; struct journal_replay *i, **_i; struct journal_keys *keys = &c->journal_keys; size_t nr_read = 0; genradix_for_each(&c->journal_entries, iter, _i) { i = *_i; if (journal_replay_ignore(i)) continue; cond_resched(); for_each_jset_key(k, entry, &i->j) { struct journal_key n = (struct journal_key) { .btree_id = entry->btree_id, .level = entry->level, .k = k, .journal_seq = le64_to_cpu(i->j.seq), .journal_offset = k->_data - i->j._data, }; if (darray_push(keys, n)) { __journal_keys_sort(keys); if (keys->nr * 8 > keys->size * 7) { bch_err(c, "Too many journal keys for slowpath; have %zu compacted, buf size %zu, processed %zu keys at seq %llu", keys->nr, keys->size, nr_read, le64_to_cpu(i->j.seq)); return -BCH_ERR_ENOMEM_journal_keys_sort; } BUG_ON(darray_push(keys, n)); } nr_read++; } } __journal_keys_sort(keys); keys->gap = keys->nr; bch_verbose(c, "Journal keys: %zu read, %zu after sorting and compacting", nr_read, keys->nr); return 0; } void bch2_shoot_down_journal_keys(struct bch_fs *c, enum btree_id btree, unsigned level_min, unsigned level_max, struct bpos start, struct bpos end) { struct journal_keys *keys = &c->journal_keys; size_t dst = 0; move_gap(keys, keys->nr); darray_for_each(*keys, i) if (!(i->btree_id == btree && i->level >= level_min && i->level <= level_max && bpos_ge(i->k->k.p, start) && bpos_le(i->k->k.p, end))) keys->data[dst++] = *i; keys->nr = keys->gap = dst; } void bch2_journal_keys_dump(struct bch_fs *c) { struct journal_keys *keys = &c->journal_keys; struct printbuf buf = PRINTBUF; pr_info("%zu keys:", keys->nr); move_gap(keys, keys->nr); darray_for_each(*keys, i) { printbuf_reset(&buf); prt_printf(&buf, "btree="); bch2_btree_id_to_text(&buf, i->btree_id); prt_printf(&buf, " l=%u ", i->level); bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(i->k)); pr_err("%s", buf.buf); } printbuf_exit(&buf); } void bch2_fs_journal_keys_init(struct bch_fs *c) { struct journal_keys *keys = &c->journal_keys; atomic_set(&keys->ref, 1); keys->initial_ref_held = true; mutex_init(&keys->overwrite_lock); }
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 /* SPDX-License-Identifier: GPL-2.0 */ /* * Portions of this file * Copyright (C) 2018, 2020-2024 Intel Corporation */ #ifndef __NET_WIRELESS_NL80211_H #define __NET_WIRELESS_NL80211_H #include "core.h" int nl80211_init(void); void nl80211_exit(void); void *nl80211hdr_put(struct sk_buff *skb, u32 portid, u32 seq, int flags, u8 cmd); bool nl80211_put_sta_rate(struct sk_buff *msg, struct rate_info *info, int attr); static inline u64 wdev_id(struct wireless_dev *wdev) { return (u64)wdev->identifier | ((u64)wiphy_to_rdev(wdev->wiphy)->wiphy_idx << 32); } int nl80211_parse_chandef(struct cfg80211_registered_device *rdev, struct genl_info *info, struct cfg80211_chan_def *chandef); int nl80211_parse_random_mac(struct nlattr **attrs, u8 *mac_addr, u8 *mac_addr_mask); void nl80211_notify_wiphy(struct cfg80211_registered_device *rdev, enum nl80211_commands cmd); void nl80211_notify_iface(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, enum nl80211_commands cmd); void nl80211_send_scan_start(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev); struct sk_buff *nl80211_build_scan_msg(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, bool aborted); void nl80211_send_scan_msg(struct cfg80211_registered_device *rdev, struct sk_buff *msg); void nl80211_send_sched_scan(struct cfg80211_sched_scan_request *req, u32 cmd); void nl80211_common_reg_change_event(enum nl80211_commands cmd_id, struct regulatory_request *request); static inline void nl80211_send_reg_change_event(struct regulatory_request *request) { nl80211_common_reg_change_event(NL80211_CMD_REG_CHANGE, request); } static inline void nl80211_send_wiphy_reg_change_event(struct regulatory_request *request) { nl80211_common_reg_change_event(NL80211_CMD_WIPHY_REG_CHANGE, request); } void nl80211_send_rx_auth(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *buf, size_t len, gfp_t gfp); void nl80211_send_rx_assoc(struct cfg80211_registered_device *rdev, struct net_device *netdev, const struct cfg80211_rx_assoc_resp_data *data); void nl80211_send_deauth(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *buf, size_t len, bool reconnect, gfp_t gfp); void nl80211_send_disassoc(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *buf, size_t len, bool reconnect, gfp_t gfp); void nl80211_send_auth_timeout(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *addr, gfp_t gfp); void nl80211_send_assoc_timeout(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *addr, gfp_t gfp); void nl80211_send_connect_result(struct cfg80211_registered_device *rdev, struct net_device *netdev, struct cfg80211_connect_resp_params *params, gfp_t gfp); void nl80211_send_roamed(struct cfg80211_registered_device *rdev, struct net_device *netdev, struct cfg80211_roam_info *info, gfp_t gfp); /* For STA/GC, indicate port authorized with AP/GO bssid. * For GO/AP, use peer GC/STA mac_addr. */ void nl80211_send_port_authorized(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *peer_addr, const u8 *td_bitmap, u8 td_bitmap_len); void nl80211_send_disconnected(struct cfg80211_registered_device *rdev, struct net_device *netdev, u16 reason, const u8 *ie, size_t ie_len, bool from_ap); void nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *addr, enum nl80211_key_type key_type, int key_id, const u8 *tsc, gfp_t gfp); void nl80211_send_beacon_hint_event(struct wiphy *wiphy, struct ieee80211_channel *channel_before, struct ieee80211_channel *channel_after); void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *bssid, gfp_t gfp); int nl80211_send_mgmt(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, u32 nlpid, struct cfg80211_rx_info *info, gfp_t gfp); void nl80211_radar_notify(struct cfg80211_registered_device *rdev, const struct cfg80211_chan_def *chandef, enum nl80211_radar_event event, struct net_device *netdev, gfp_t gfp); void nl80211_send_ap_stopped(struct wireless_dev *wdev, unsigned int link_id); void cfg80211_free_coalesce(struct cfg80211_coalesce *coalesce); /* peer measurement */ int nl80211_pmsr_start(struct sk_buff *skb, struct genl_info *info); void nl80211_mlo_reconf_add_done(struct net_device *dev, struct cfg80211_mlo_reconf_done_data *data); #endif /* __NET_WIRELESS_NL80211_H */
3 2 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 // SPDX-License-Identifier: GPL-2.0+ /* * USB Compaq iPAQ driver * * Copyright (C) 2001 - 2002 * Ganesh Varadarajan <ganesh@veritas.com> */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/uaccess.h> #include <linux/usb.h> #include <linux/usb/serial.h> #define KP_RETRIES 100 #define DRIVER_AUTHOR "Ganesh Varadarajan <ganesh@veritas.com>" #define DRIVER_DESC "USB PocketPC PDA driver" static int connect_retries = KP_RETRIES; static int initial_wait; /* Function prototypes for an ipaq */ static int ipaq_open(struct tty_struct *tty, struct usb_serial_port *port); static int ipaq_calc_num_ports(struct usb_serial *serial, struct usb_serial_endpoints *epds); static int ipaq_startup(struct usb_serial *serial); static const struct usb_device_id ipaq_id_table[] = { { USB_DEVICE(0x0104, 0x00BE) }, /* Socket USB Sync */ { USB_DEVICE(0x03F0, 0x1016) }, /* HP USB Sync */ { USB_DEVICE(0x03F0, 0x1116) }, /* HP USB Sync 1611 */ { USB_DEVICE(0x03F0, 0x1216) }, /* HP USB Sync 1612 */ { USB_DEVICE(0x03F0, 0x2016) }, /* HP USB Sync 1620 */ { USB_DEVICE(0x03F0, 0x2116) }, /* HP USB Sync 1621 */ { USB_DEVICE(0x03F0, 0x2216) }, /* HP USB Sync 1622 */ { USB_DEVICE(0x03F0, 0x3016) }, /* HP USB Sync 1630 */ { USB_DEVICE(0x03F0, 0x3116) }, /* HP USB Sync 1631 */ { USB_DEVICE(0x03F0, 0x3216) }, /* HP USB Sync 1632 */ { USB_DEVICE(0x03F0, 0x4016) }, /* HP USB Sync 1640 */ { USB_DEVICE(0x03F0, 0x4116) }, /* HP USB Sync 1641 */ { USB_DEVICE(0x03F0, 0x4216) }, /* HP USB Sync 1642 */ { USB_DEVICE(0x03F0, 0x5016) }, /* HP USB Sync 1650 */ { USB_DEVICE(0x03F0, 0x5116) }, /* HP USB Sync 1651 */ { USB_DEVICE(0x03F0, 0x5216) }, /* HP USB Sync 1652 */ { USB_DEVICE(0x0409, 0x00D5) }, /* NEC USB Sync */ { USB_DEVICE(0x0409, 0x00D6) }, /* NEC USB Sync */ { USB_DEVICE(0x0409, 0x00D7) }, /* NEC USB Sync */ { USB_DEVICE(0x0409, 0x8024) }, /* NEC USB Sync */ { USB_DEVICE(0x0409, 0x8025) }, /* NEC USB Sync */ { USB_DEVICE(0x043E, 0x9C01) }, /* LGE USB Sync */ { USB_DEVICE(0x045E, 0x00CE) }, /* Microsoft USB Sync */ { USB_DEVICE(0x045E, 0x0400) }, /* Windows Powered Pocket PC 2002 */ { USB_DEVICE(0x045E, 0x0401) }, /* Windows Powered Pocket PC 2002 */ { USB_DEVICE(0x045E, 0x0402) }, /* Windows Powered Pocket PC 2002 */ { USB_DEVICE(0x045E, 0x0403) }, /* Windows Powered Pocket PC 2002 */ { USB_DEVICE(0x045E, 0x0404) }, /* Windows Powered Pocket PC 2002 */ { USB_DEVICE(0x045E, 0x0405) }, /* Windows Powered Pocket PC 2002 */ { USB_DEVICE(0x045E, 0x0406) }, /* Windows Powered Pocket PC 2002 */ { USB_DEVICE(0x045E, 0x0407) }, /* Windows Powered Pocket PC 2002 */ { USB_DEVICE(0x045E, 0x0408) }, /* Windows Powered Pocket PC 2002 */ { USB_DEVICE(0x045E, 0x0409) }, /* Windows Powered Pocket PC 2002 */ { USB_DEVICE(0x045E, 0x040A) }, /* Windows Powered Pocket PC 2002 */ { USB_DEVICE(0x045E, 0x040B) }, /* Windows Powered Pocket PC 2002 */ { USB_DEVICE(0x045E, 0x040C) }, /* Windows Powered Pocket PC 2002 */ { USB_DEVICE(0x045E, 0x040D) }, /* Windows Powered Pocket PC 2002 */ { USB_DEVICE(0x045E, 0x040E) }, /* Windows Powered Pocket PC 2002 */ { USB_DEVICE(0x045E, 0x040F) }, /* Windows Powered Pocket PC 2002 */ { USB_DEVICE(0x045E, 0x0410) }, /* Windows Powered Pocket PC 2002 */ { USB_DEVICE(0x045E, 0x0411) }, /* Windows Powered Pocket PC 2002 */ { USB_DEVICE(0x045E, 0x0412) }, /* Windows Powered Pocket PC 2002 */ { USB_DEVICE(0x045E, 0x0413) }, /* Windows Powered Pocket PC 2002 */ { USB_DEVICE(0x045E, 0x0414) }, /* Windows Powered Pocket PC 2002 */ { USB_DEVICE(0x045E, 0x0415) }, /* Windows Powered Pocket PC 2002 */ { USB_DEVICE(0x045E, 0x0416) }, /* Windows Powered Pocket PC 2002 */ { USB_DEVICE(0x045E, 0x0417) }, /* Windows Powered Pocket PC 2002 */ { USB_DEVICE(0x045E, 0x0432) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0433) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0434) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0435) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0436) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0437) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0438) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0439) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x043A) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x043B) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x043C) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x043D) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x043E) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x043F) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0440) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0441) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0442) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0443) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0444) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0445) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0446) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0447) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0448) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0449) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x044A) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x044B) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x044C) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x044D) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x044E) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x044F) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0450) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0451) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0452) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0453) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0454) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0455) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0456) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0457) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0458) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0459) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x045A) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x045B) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x045C) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x045D) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x045E) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x045F) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0460) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0461) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0462) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0463) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0464) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0465) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0466) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0467) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0468) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0469) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x046A) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x046B) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x046C) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x046D) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x046E) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x046F) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0470) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0471) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0472) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0473) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0474) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0475) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0476) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0477) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0478) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0479) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x047A) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x047B) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x04C8) }, /* Windows Powered Smartphone 2002 */ { USB_DEVICE(0x045E, 0x04C9) }, /* Windows Powered Smartphone 2002 */ { USB_DEVICE(0x045E, 0x04CA) }, /* Windows Powered Smartphone 2002 */ { USB_DEVICE(0x045E, 0x04CB) }, /* Windows Powered Smartphone 2002 */ { USB_DEVICE(0x045E, 0x04CC) }, /* Windows Powered Smartphone 2002 */ { USB_DEVICE(0x045E, 0x04CD) }, /* Windows Powered Smartphone 2002 */ { USB_DEVICE(0x045E, 0x04CE) }, /* Windows Powered Smartphone 2002 */ { USB_DEVICE(0x045E, 0x04D7) }, /* Windows Powered Smartphone 2003 */ { USB_DEVICE(0x045E, 0x04D8) }, /* Windows Powered Smartphone 2003 */ { USB_DEVICE(0x045E, 0x04D9) }, /* Windows Powered Smartphone 2003 */ { USB_DEVICE(0x045E, 0x04DA) }, /* Windows Powered Smartphone 2003 */ { USB_DEVICE(0x045E, 0x04DB) }, /* Windows Powered Smartphone 2003 */ { USB_DEVICE(0x045E, 0x04DC) }, /* Windows Powered Smartphone 2003 */ { USB_DEVICE(0x045E, 0x04DD) }, /* Windows Powered Smartphone 2003 */ { USB_DEVICE(0x045E, 0x04DE) }, /* Windows Powered Smartphone 2003 */ { USB_DEVICE(0x045E, 0x04DF) }, /* Windows Powered Smartphone 2003 */ { USB_DEVICE(0x045E, 0x04E0) }, /* Windows Powered Smartphone 2003 */ { USB_DEVICE(0x045E, 0x04E1) }, /* Windows Powered Smartphone 2003 */ { USB_DEVICE(0x045E, 0x04E2) }, /* Windows Powered Smartphone 2003 */ { USB_DEVICE(0x045E, 0x04E3) }, /* Windows Powered Smartphone 2003 */ { USB_DEVICE(0x045E, 0x04E4) }, /* Windows Powered Smartphone 2003 */ { USB_DEVICE(0x045E, 0x04E5) }, /* Windows Powered Smartphone 2003 */ { USB_DEVICE(0x045E, 0x04E6) }, /* Windows Powered Smartphone 2003 */ { USB_DEVICE(0x045E, 0x04E7) }, /* Windows Powered Smartphone 2003 */ { USB_DEVICE(0x045E, 0x04E8) }, /* Windows Powered Smartphone 2003 */ { USB_DEVICE(0x045E, 0x04E9) }, /* Windows Powered Smartphone 2003 */ { USB_DEVICE(0x045E, 0x04EA) }, /* Windows Powered Smartphone 2003 */ { USB_DEVICE(0x049F, 0x0003) }, /* Compaq iPAQ USB Sync */ { USB_DEVICE(0x049F, 0x0032) }, /* Compaq iPAQ USB Sync */ { USB_DEVICE(0x04A4, 0x0014) }, /* Hitachi USB Sync */ { USB_DEVICE(0x04AD, 0x0301) }, /* USB Sync 0301 */ { USB_DEVICE(0x04AD, 0x0302) }, /* USB Sync 0302 */ { USB_DEVICE(0x04AD, 0x0303) }, /* USB Sync 0303 */ { USB_DEVICE(0x04AD, 0x0306) }, /* GPS Pocket PC USB Sync */ { USB_DEVICE(0x04B7, 0x0531) }, /* MyGuide 7000 XL USB Sync */ { USB_DEVICE(0x04C5, 0x1058) }, /* FUJITSU USB Sync */ { USB_DEVICE(0x04C5, 0x1079) }, /* FUJITSU USB Sync */ { USB_DEVICE(0x04DA, 0x2500) }, /* Panasonic USB Sync */ { USB_DEVICE(0x04DD, 0x9102) }, /* SHARP WS003SH USB Modem */ { USB_DEVICE(0x04DD, 0x9121) }, /* SHARP WS004SH USB Modem */ { USB_DEVICE(0x04DD, 0x9123) }, /* SHARP WS007SH USB Modem */ { USB_DEVICE(0x04DD, 0x9151) }, /* SHARP S01SH USB Modem */ { USB_DEVICE(0x04DD, 0x91AC) }, /* SHARP WS011SH USB Modem */ { USB_DEVICE(0x04E8, 0x5F00) }, /* Samsung NEXiO USB Sync */ { USB_DEVICE(0x04E8, 0x5F01) }, /* Samsung NEXiO USB Sync */ { USB_DEVICE(0x04E8, 0x5F02) }, /* Samsung NEXiO USB Sync */ { USB_DEVICE(0x04E8, 0x5F03) }, /* Samsung NEXiO USB Sync */ { USB_DEVICE(0x04E8, 0x5F04) }, /* Samsung NEXiO USB Sync */ { USB_DEVICE(0x04E8, 0x6611) }, /* Samsung MITs USB Sync */ { USB_DEVICE(0x04E8, 0x6613) }, /* Samsung MITs USB Sync */ { USB_DEVICE(0x04E8, 0x6615) }, /* Samsung MITs USB Sync */ { USB_DEVICE(0x04E8, 0x6617) }, /* Samsung MITs USB Sync */ { USB_DEVICE(0x04E8, 0x6619) }, /* Samsung MITs USB Sync */ { USB_DEVICE(0x04E8, 0x661B) }, /* Samsung MITs USB Sync */ { USB_DEVICE(0x04E8, 0x662E) }, /* Samsung MITs USB Sync */ { USB_DEVICE(0x04E8, 0x6630) }, /* Samsung MITs USB Sync */ { USB_DEVICE(0x04E8, 0x6632) }, /* Samsung MITs USB Sync */ { USB_DEVICE(0x04f1, 0x3011) }, /* JVC USB Sync */ { USB_DEVICE(0x04F1, 0x3012) }, /* JVC USB Sync */ { USB_DEVICE(0x0502, 0x1631) }, /* c10 Series */ { USB_DEVICE(0x0502, 0x1632) }, /* c20 Series */ { USB_DEVICE(0x0502, 0x16E1) }, /* Acer n10 Handheld USB Sync */ { USB_DEVICE(0x0502, 0x16E2) }, /* Acer n20 Handheld USB Sync */ { USB_DEVICE(0x0502, 0x16E3) }, /* Acer n30 Handheld USB Sync */ { USB_DEVICE(0x0536, 0x01A0) }, /* HHP PDT */ { USB_DEVICE(0x0543, 0x0ED9) }, /* ViewSonic Color Pocket PC V35 */ { USB_DEVICE(0x0543, 0x1527) }, /* ViewSonic Color Pocket PC V36 */ { USB_DEVICE(0x0543, 0x1529) }, /* ViewSonic Color Pocket PC V37 */ { USB_DEVICE(0x0543, 0x152B) }, /* ViewSonic Color Pocket PC V38 */ { USB_DEVICE(0x0543, 0x152E) }, /* ViewSonic Pocket PC */ { USB_DEVICE(0x0543, 0x1921) }, /* ViewSonic Communicator Pocket PC */ { USB_DEVICE(0x0543, 0x1922) }, /* ViewSonic Smartphone */ { USB_DEVICE(0x0543, 0x1923) }, /* ViewSonic Pocket PC V30 */ { USB_DEVICE(0x05E0, 0x2000) }, /* Symbol USB Sync */ { USB_DEVICE(0x05E0, 0x2001) }, /* Symbol USB Sync 0x2001 */ { USB_DEVICE(0x05E0, 0x2002) }, /* Symbol USB Sync 0x2002 */ { USB_DEVICE(0x05E0, 0x2003) }, /* Symbol USB Sync 0x2003 */ { USB_DEVICE(0x05E0, 0x2004) }, /* Symbol USB Sync 0x2004 */ { USB_DEVICE(0x05E0, 0x2005) }, /* Symbol USB Sync 0x2005 */ { USB_DEVICE(0x05E0, 0x2006) }, /* Symbol USB Sync 0x2006 */ { USB_DEVICE(0x05E0, 0x2007) }, /* Symbol USB Sync 0x2007 */ { USB_DEVICE(0x05E0, 0x2008) }, /* Symbol USB Sync 0x2008 */ { USB_DEVICE(0x05E0, 0x2009) }, /* Symbol USB Sync 0x2009 */ { USB_DEVICE(0x05E0, 0x200A) }, /* Symbol USB Sync 0x200A */ { USB_DEVICE(0x067E, 0x1001) }, /* Intermec Mobile Computer */ { USB_DEVICE(0x07CF, 0x2001) }, /* CASIO USB Sync 2001 */ { USB_DEVICE(0x07CF, 0x2002) }, /* CASIO USB Sync 2002 */ { USB_DEVICE(0x07CF, 0x2003) }, /* CASIO USB Sync 2003 */ { USB_DEVICE(0x0930, 0x0700) }, /* TOSHIBA USB Sync 0700 */ { USB_DEVICE(0x0930, 0x0705) }, /* TOSHIBA Pocket PC e310 */ { USB_DEVICE(0x0930, 0x0706) }, /* TOSHIBA Pocket PC e740 */ { USB_DEVICE(0x0930, 0x0707) }, /* TOSHIBA Pocket PC e330 Series */ { USB_DEVICE(0x0930, 0x0708) }, /* TOSHIBA Pocket PC e350 Series */ { USB_DEVICE(0x0930, 0x0709) }, /* TOSHIBA Pocket PC e750 Series */ { USB_DEVICE(0x0930, 0x070A) }, /* TOSHIBA Pocket PC e400 Series */ { USB_DEVICE(0x0930, 0x070B) }, /* TOSHIBA Pocket PC e800 Series */ { USB_DEVICE(0x094B, 0x0001) }, /* Linkup Systems USB Sync */ { USB_DEVICE(0x0960, 0x0065) }, /* BCOM USB Sync 0065 */ { USB_DEVICE(0x0960, 0x0066) }, /* BCOM USB Sync 0066 */ { USB_DEVICE(0x0960, 0x0067) }, /* BCOM USB Sync 0067 */ { USB_DEVICE(0x0961, 0x0010) }, /* Portatec USB Sync */ { USB_DEVICE(0x099E, 0x0052) }, /* Trimble GeoExplorer */ { USB_DEVICE(0x099E, 0x4000) }, /* TDS Data Collector */ { USB_DEVICE(0x0B05, 0x4200) }, /* ASUS USB Sync */ { USB_DEVICE(0x0B05, 0x4201) }, /* ASUS USB Sync */ { USB_DEVICE(0x0B05, 0x4202) }, /* ASUS USB Sync */ { USB_DEVICE(0x0B05, 0x420F) }, /* ASUS USB Sync */ { USB_DEVICE(0x0B05, 0x9200) }, /* ASUS USB Sync */ { USB_DEVICE(0x0B05, 0x9202) }, /* ASUS USB Sync */ { USB_DEVICE(0x0BB4, 0x00CE) }, /* HTC USB Sync */ { USB_DEVICE(0x0BB4, 0x00CF) }, /* HTC USB Modem */ { USB_DEVICE(0x0BB4, 0x0A01) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A02) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A03) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A04) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A05) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A06) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A07) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A08) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A09) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A0A) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A0B) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A0C) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A0D) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A0E) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A0F) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A10) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A11) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A12) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A13) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A14) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A15) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A16) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A17) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A18) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A19) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A1A) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A1B) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A1C) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A1D) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A1E) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A1F) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A20) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A21) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A22) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A23) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A24) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A25) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A26) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A27) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A28) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A29) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A2A) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A2B) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A2C) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A2D) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A2E) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A2F) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A30) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A31) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A32) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A33) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A34) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A35) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A36) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A37) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A38) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A39) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A3A) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A3B) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A3C) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A3D) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A3E) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A3F) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A40) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A41) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A42) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A43) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A44) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A45) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A46) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A47) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A48) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A49) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A4A) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A4B) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A4C) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A4D) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A4E) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A4F) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A50) }, /* HTC SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A51) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A52) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A53) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A54) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A55) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A56) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A57) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A58) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A59) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A5A) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A5B) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A5C) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A5D) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A5E) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A5F) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A60) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A61) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A62) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A63) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A64) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A65) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A66) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A67) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A68) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A69) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A6A) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A6B) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A6C) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A6D) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A6E) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A6F) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A70) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A71) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A72) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A73) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A74) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A75) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A76) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A77) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A78) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A79) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A7A) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A7B) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A7C) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A7D) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A7E) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A7F) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A80) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A81) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A82) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A83) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A84) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A85) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A86) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A87) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A88) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A89) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A8A) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A8B) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A8C) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A8D) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A8E) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A8F) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A90) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A91) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A92) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A93) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A94) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A95) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A96) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A97) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A98) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A99) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A9A) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A9B) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A9C) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A9D) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A9E) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A9F) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0BCE) }, /* "High Tech Computer Corp" */ { USB_DEVICE(0x0BF8, 0x1001) }, /* Fujitsu Siemens Computers USB Sync */ { USB_DEVICE(0x0C44, 0x03A2) }, /* Motorola iDEN Smartphone */ { USB_DEVICE(0x0C8E, 0x6000) }, /* Cesscom Luxian Series */ { USB_DEVICE(0x0CAD, 0x9001) }, /* Motorola PowerPad Pocket PC Device */ { USB_DEVICE(0x0F4E, 0x0200) }, /* Freedom Scientific USB Sync */ { USB_DEVICE(0x0F98, 0x0201) }, /* Cyberbank USB Sync */ { USB_DEVICE(0x0FB8, 0x3001) }, /* Wistron USB Sync */ { USB_DEVICE(0x0FB8, 0x3002) }, /* Wistron USB Sync */ { USB_DEVICE(0x0FB8, 0x3003) }, /* Wistron USB Sync */ { USB_DEVICE(0x0FB8, 0x4001) }, /* Wistron USB Sync */ { USB_DEVICE(0x1066, 0x00CE) }, /* E-TEN USB Sync */ { USB_DEVICE(0x1066, 0x0300) }, /* E-TEN P3XX Pocket PC */ { USB_DEVICE(0x1066, 0x0500) }, /* E-TEN P5XX Pocket PC */ { USB_DEVICE(0x1066, 0x0600) }, /* E-TEN P6XX Pocket PC */ { USB_DEVICE(0x1066, 0x0700) }, /* E-TEN P7XX Pocket PC */ { USB_DEVICE(0x1114, 0x0001) }, /* Psion Teklogix Sync 753x */ { USB_DEVICE(0x1114, 0x0004) }, /* Psion Teklogix Sync netBookPro */ { USB_DEVICE(0x1114, 0x0006) }, /* Psion Teklogix Sync 7525 */ { USB_DEVICE(0x1182, 0x1388) }, /* VES USB Sync */ { USB_DEVICE(0x11D9, 0x1002) }, /* Rugged Pocket PC 2003 */ { USB_DEVICE(0x11D9, 0x1003) }, /* Rugged Pocket PC 2003 */ { USB_DEVICE(0x1231, 0xCE01) }, /* USB Sync 03 */ { USB_DEVICE(0x1231, 0xCE02) }, /* USB Sync 03 */ { USB_DEVICE(0x1690, 0x0601) }, /* Askey USB Sync */ { USB_DEVICE(0x22B8, 0x4204) }, /* Motorola MPx200 Smartphone */ { USB_DEVICE(0x22B8, 0x4214) }, /* Motorola MPc GSM */ { USB_DEVICE(0x22B8, 0x4224) }, /* Motorola MPx220 Smartphone */ { USB_DEVICE(0x22B8, 0x4234) }, /* Motorola MPc CDMA */ { USB_DEVICE(0x22B8, 0x4244) }, /* Motorola MPx100 Smartphone */ { USB_DEVICE(0x3340, 0x011C) }, /* Mio DigiWalker PPC StrongARM */ { USB_DEVICE(0x3340, 0x0326) }, /* Mio DigiWalker 338 */ { USB_DEVICE(0x3340, 0x0426) }, /* Mio DigiWalker 338 */ { USB_DEVICE(0x3340, 0x043A) }, /* Mio DigiWalker USB Sync */ { USB_DEVICE(0x3340, 0x051C) }, /* MiTAC USB Sync 528 */ { USB_DEVICE(0x3340, 0x053A) }, /* Mio DigiWalker SmartPhone USB Sync */ { USB_DEVICE(0x3340, 0x071C) }, /* MiTAC USB Sync */ { USB_DEVICE(0x3340, 0x0B1C) }, /* Generic PPC StrongARM */ { USB_DEVICE(0x3340, 0x0E3A) }, /* Generic PPC USB Sync */ { USB_DEVICE(0x3340, 0x0F1C) }, /* Itautec USB Sync */ { USB_DEVICE(0x3340, 0x0F3A) }, /* Generic SmartPhone USB Sync */ { USB_DEVICE(0x3340, 0x1326) }, /* Itautec USB Sync */ { USB_DEVICE(0x3340, 0x191C) }, /* YAKUMO USB Sync */ { USB_DEVICE(0x3340, 0x2326) }, /* Vobis USB Sync */ { USB_DEVICE(0x3340, 0x3326) }, /* MEDION Winodws Moble USB Sync */ { USB_DEVICE(0x3708, 0x20CE) }, /* Legend USB Sync */ { USB_DEVICE(0x3708, 0x21CE) }, /* Lenovo USB Sync */ { USB_DEVICE(0x4113, 0x0210) }, /* Mobile Media Technology USB Sync */ { USB_DEVICE(0x4113, 0x0211) }, /* Mobile Media Technology USB Sync */ { USB_DEVICE(0x4113, 0x0400) }, /* Mobile Media Technology USB Sync */ { USB_DEVICE(0x4113, 0x0410) }, /* Mobile Media Technology USB Sync */ { USB_DEVICE(0x413C, 0x4001) }, /* Dell Axim USB Sync */ { USB_DEVICE(0x413C, 0x4002) }, /* Dell Axim USB Sync */ { USB_DEVICE(0x413C, 0x4003) }, /* Dell Axim USB Sync */ { USB_DEVICE(0x413C, 0x4004) }, /* Dell Axim USB Sync */ { USB_DEVICE(0x413C, 0x4005) }, /* Dell Axim USB Sync */ { USB_DEVICE(0x413C, 0x4006) }, /* Dell Axim USB Sync */ { USB_DEVICE(0x413C, 0x4007) }, /* Dell Axim USB Sync */ { USB_DEVICE(0x413C, 0x4008) }, /* Dell Axim USB Sync */ { USB_DEVICE(0x413C, 0x4009) }, /* Dell Axim USB Sync */ { USB_DEVICE(0x4505, 0x0010) }, /* Smartphone */ { USB_DEVICE(0x5E04, 0xCE00) }, /* SAGEM Wireless Assistant */ { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, ipaq_id_table); /* All of the device info needed for the Compaq iPAQ */ static struct usb_serial_driver ipaq_device = { .driver = { .name = "ipaq", }, .description = "PocketPC PDA", .id_table = ipaq_id_table, .bulk_in_size = 256, .bulk_out_size = 256, .open = ipaq_open, .attach = ipaq_startup, .calc_num_ports = ipaq_calc_num_ports, }; static struct usb_serial_driver * const serial_drivers[] = { &ipaq_device, NULL }; static int ipaq_open(struct tty_struct *tty, struct usb_serial_port *port) { struct usb_serial *serial = port->serial; int result = 0; int retries = connect_retries; msleep(1000*initial_wait); /* * Send out control message observed in win98 sniffs. Not sure what * it does, but from empirical observations, it seems that the device * will start the chat sequence once one of these messages gets * through. Since this has a reasonably high failure rate, we retry * several times. */ while (retries) { retries--; result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), 0x22, 0x21, 0x1, 0, NULL, 0, 100); if (!result) break; msleep(1000); } if (!retries && result) { dev_err(&port->dev, "%s - failed doing control urb, error %d\n", __func__, result); return result; } return usb_serial_generic_open(tty, port); } static int ipaq_calc_num_ports(struct usb_serial *serial, struct usb_serial_endpoints *epds) { /* * Some of the devices in ipaq_id_table[] are composite, and we * shouldn't bind to all the interfaces. This test will rule out * some obviously invalid possibilities. */ if (epds->num_bulk_in == 0 || epds->num_bulk_out == 0) return -ENODEV; /* * A few devices have four endpoints, seemingly Yakuma devices, and * we need the second pair. */ if (epds->num_bulk_in > 1 && epds->num_bulk_out > 1) { epds->bulk_in[0] = epds->bulk_in[1]; epds->bulk_out[0] = epds->bulk_out[1]; } /* * Other devices have 3 endpoints, but we only use the first bulk in * and out endpoints. */ epds->num_bulk_in = 1; epds->num_bulk_out = 1; return 1; } static int ipaq_startup(struct usb_serial *serial) { if (serial->dev->actconfig->desc.bConfigurationValue != 1) { /* * FIXME: HP iPaq rx3715, possibly others, have 1 config that * is labeled as 2 */ dev_err(&serial->dev->dev, "active config #%d != 1 ??\n", serial->dev->actconfig->desc.bConfigurationValue); return -ENODEV; } return usb_reset_configuration(serial->dev); } module_usb_serial_driver(serial_drivers, ipaq_id_table); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); module_param(connect_retries, int, 0644); MODULE_PARM_DESC(connect_retries, "Maximum number of connect retries (one second each)"); module_param(initial_wait, int, 0644); MODULE_PARM_DESC(initial_wait, "Time to wait before attempting a connection (in seconds)");
75 22 60 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 // SPDX-License-Identifier: GPL-2.0-or-later /* * ALSA sequencer MIDI-through client * Copyright (c) 1999-2000 by Takashi Iwai <tiwai@suse.de> */ #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <sound/core.h> #include "seq_clientmgr.h" #include <sound/initval.h> #include <sound/asoundef.h> /* Sequencer MIDI-through client This gives a simple midi-through client. All the normal input events are redirected to output port immediately. The routing can be done via aconnect program in alsa-utils. Each client has a static client number 14 (= SNDRV_SEQ_CLIENT_DUMMY). If you want to auto-load this module, you may add the following alias in your /etc/conf.modules file. alias snd-seq-client-14 snd-seq-dummy The module is loaded on demand for client 14, or /proc/asound/seq/ is accessed. If you don't need this module to be loaded, alias snd-seq-client-14 as "off". This will help modprobe. The number of ports to be created can be specified via the module parameter "ports". For example, to create four ports, add the following option in a configuration file under /etc/modprobe.d/: option snd-seq-dummy ports=4 The model option "duplex=1" enables duplex operation to the port. In duplex mode, a pair of ports are created instead of single port, and events are tunneled between pair-ports. For example, input to port A is sent to output port of another port B and vice versa. In duplex mode, each port has DUPLEX capability. */ MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>"); MODULE_DESCRIPTION("ALSA sequencer MIDI-through client"); MODULE_LICENSE("GPL"); MODULE_ALIAS("snd-seq-client-" __stringify(SNDRV_SEQ_CLIENT_DUMMY)); static int ports = 1; static bool duplex; module_param(ports, int, 0444); MODULE_PARM_DESC(ports, "number of ports to be created"); module_param(duplex, bool, 0444); MODULE_PARM_DESC(duplex, "create DUPLEX ports"); #if IS_ENABLED(CONFIG_SND_SEQ_UMP) static int ump; module_param(ump, int, 0444); MODULE_PARM_DESC(ump, "UMP conversion (0: no convert, 1: MIDI 1.0, 2: MIDI 2.0)"); #endif struct snd_seq_dummy_port { int client; int port; int duplex; int connect; }; static int my_client = -1; /* * event input callback - just redirect events to subscribers */ static int dummy_input(struct snd_seq_event *ev, int direct, void *private_data, int atomic, int hop) { struct snd_seq_dummy_port *p; struct snd_seq_event tmpev; p = private_data; if (ev->source.client == SNDRV_SEQ_CLIENT_SYSTEM || ev->type == SNDRV_SEQ_EVENT_KERNEL_ERROR) return 0; /* ignore system messages */ tmpev = *ev; if (p->duplex) tmpev.source.port = p->connect; else tmpev.source.port = p->port; tmpev.dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS; return snd_seq_kernel_client_dispatch(p->client, &tmpev, atomic, hop); } /* * free_private callback */ static void dummy_free(void *private_data) { kfree(private_data); } /* * create a port */ static struct snd_seq_dummy_port __init * create_port(int idx, int type) { struct snd_seq_port_info pinfo; struct snd_seq_port_callback pcb; struct snd_seq_dummy_port *rec; rec = kzalloc(sizeof(*rec), GFP_KERNEL); if (!rec) return NULL; rec->client = my_client; rec->duplex = duplex; rec->connect = 0; memset(&pinfo, 0, sizeof(pinfo)); pinfo.addr.client = my_client; if (duplex) sprintf(pinfo.name, "Midi Through Port-%d:%c", idx, (type ? 'B' : 'A')); else sprintf(pinfo.name, "Midi Through Port-%d", idx); pinfo.capability = SNDRV_SEQ_PORT_CAP_READ | SNDRV_SEQ_PORT_CAP_SUBS_READ; pinfo.capability |= SNDRV_SEQ_PORT_CAP_WRITE | SNDRV_SEQ_PORT_CAP_SUBS_WRITE; if (duplex) pinfo.capability |= SNDRV_SEQ_PORT_CAP_DUPLEX; pinfo.direction = SNDRV_SEQ_PORT_DIR_BIDIRECTION; pinfo.type = SNDRV_SEQ_PORT_TYPE_MIDI_GENERIC | SNDRV_SEQ_PORT_TYPE_SOFTWARE | SNDRV_SEQ_PORT_TYPE_PORT; memset(&pcb, 0, sizeof(pcb)); pcb.owner = THIS_MODULE; pcb.event_input = dummy_input; pcb.private_free = dummy_free; pcb.private_data = rec; pinfo.kernel = &pcb; if (snd_seq_kernel_client_ctl(my_client, SNDRV_SEQ_IOCTL_CREATE_PORT, &pinfo) < 0) { kfree(rec); return NULL; } rec->port = pinfo.addr.port; return rec; } /* * register client and create ports */ static int __init register_client(void) { struct snd_seq_dummy_port *rec1, *rec2; #if IS_ENABLED(CONFIG_SND_SEQ_UMP) struct snd_seq_client *client; #endif int i; if (ports < 1) { pr_err("ALSA: seq_dummy: invalid number of ports %d\n", ports); return -EINVAL; } /* create client */ my_client = snd_seq_create_kernel_client(NULL, SNDRV_SEQ_CLIENT_DUMMY, "Midi Through"); if (my_client < 0) return my_client; #if IS_ENABLED(CONFIG_SND_SEQ_UMP) client = snd_seq_kernel_client_get(my_client); if (!client) return -EINVAL; switch (ump) { case 1: client->midi_version = SNDRV_SEQ_CLIENT_UMP_MIDI_1_0; break; case 2: client->midi_version = SNDRV_SEQ_CLIENT_UMP_MIDI_2_0; break; default: /* don't convert events but just pass-through */ client->filter = SNDRV_SEQ_FILTER_NO_CONVERT; break; } snd_seq_kernel_client_put(client); #endif /* create ports */ for (i = 0; i < ports; i++) { rec1 = create_port(i, 0); if (rec1 == NULL) { snd_seq_delete_kernel_client(my_client); return -ENOMEM; } if (duplex) { rec2 = create_port(i, 1); if (rec2 == NULL) { snd_seq_delete_kernel_client(my_client); return -ENOMEM; } rec1->connect = rec2->port; rec2->connect = rec1->port; } } return 0; } /* * delete client if exists */ static void __exit delete_client(void) { if (my_client >= 0) snd_seq_delete_kernel_client(my_client); } /* * Init part */ static int __init alsa_seq_dummy_init(void) { return register_client(); } static void __exit alsa_seq_dummy_exit(void) { delete_client(); } module_init(alsa_seq_dummy_init) module_exit(alsa_seq_dummy_exit)
10753 11079 11093 46715 46728 11094 11085 8518 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 // SPDX-License-Identifier: GPL-2.0-or-later /* * printk_safe.c - Safe printk for printk-deadlock-prone contexts */ #include <linux/preempt.h> #include <linux/kdb.h> #include <linux/smp.h> #include <linux/cpumask.h> #include <linux/printk.h> #include <linux/kprobes.h> #include "internal.h" /* Context where printk messages are never suppressed */ static atomic_t force_con; void printk_force_console_enter(void) { atomic_inc(&force_con); } void printk_force_console_exit(void) { atomic_dec(&force_con); } bool is_printk_force_console(void) { return atomic_read(&force_con); } static DEFINE_PER_CPU(int, printk_context); /* Can be preempted by NMI. */ void __printk_safe_enter(void) { this_cpu_inc(printk_context); } /* Can be preempted by NMI. */ void __printk_safe_exit(void) { this_cpu_dec(printk_context); } void __printk_deferred_enter(void) { cant_migrate(); __printk_safe_enter(); } void __printk_deferred_exit(void) { cant_migrate(); __printk_safe_exit(); } bool is_printk_legacy_deferred(void) { /* * The per-CPU variable @printk_context can be read safely in any * context. CPU migration is always disabled when set. * * A context holding the printk_cpu_sync must not spin waiting for * another CPU. For legacy printing, it could be the console_lock * or the port lock. */ return (force_legacy_kthread() || this_cpu_read(printk_context) || in_nmi() || is_printk_cpu_sync_owner()); } asmlinkage int vprintk(const char *fmt, va_list args) { #ifdef CONFIG_KGDB_KDB /* Allow to pass printk() to kdb but avoid a recursion. */ if (unlikely(kdb_trap_printk && kdb_printf_cpu < 0)) return vkdb_printf(KDB_MSGSRC_PRINTK, fmt, args); #endif return vprintk_default(fmt, args); } EXPORT_SYMBOL(vprintk);
2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 // SPDX-License-Identifier: GPL-2.0-only /* * NFC hardware simulation driver * Copyright (c) 2013, Intel Corporation. */ #include <linux/device.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/ctype.h> #include <linux/debugfs.h> #include <linux/nfc.h> #include <net/nfc/nfc.h> #include <net/nfc/digital.h> #define NFCSIM_ERR(d, fmt, args...) nfc_err(&d->nfc_digital_dev->nfc_dev->dev, \ "%s: " fmt, __func__, ## args) #define NFCSIM_DBG(d, fmt, args...) dev_dbg(&d->nfc_digital_dev->nfc_dev->dev, \ "%s: " fmt, __func__, ## args) #define NFCSIM_VERSION "0.2" #define NFCSIM_MODE_NONE 0 #define NFCSIM_MODE_INITIATOR 1 #define NFCSIM_MODE_TARGET 2 #define NFCSIM_CAPABILITIES (NFC_DIGITAL_DRV_CAPS_IN_CRC | \ NFC_DIGITAL_DRV_CAPS_TG_CRC) struct nfcsim { struct nfc_digital_dev *nfc_digital_dev; struct work_struct recv_work; struct delayed_work send_work; struct nfcsim_link *link_in; struct nfcsim_link *link_out; bool up; u8 mode; u8 rf_tech; u16 recv_timeout; nfc_digital_cmd_complete_t cb; void *arg; u8 dropframe; }; struct nfcsim_link { struct mutex lock; u8 rf_tech; u8 mode; u8 shutdown; struct sk_buff *skb; wait_queue_head_t recv_wait; u8 cond; }; static struct nfcsim_link *nfcsim_link_new(void) { struct nfcsim_link *link; link = kzalloc(sizeof(struct nfcsim_link), GFP_KERNEL); if (!link) return NULL; mutex_init(&link->lock); init_waitqueue_head(&link->recv_wait); return link; } static void nfcsim_link_free(struct nfcsim_link *link) { dev_kfree_skb(link->skb); kfree(link); } static void nfcsim_link_recv_wake(struct nfcsim_link *link) { link->cond = 1; wake_up_interruptible(&link->recv_wait); } static void nfcsim_link_set_skb(struct nfcsim_link *link, struct sk_buff *skb, u8 rf_tech, u8 mode) { mutex_lock(&link->lock); dev_kfree_skb(link->skb); link->skb = skb; link->rf_tech = rf_tech; link->mode = mode; mutex_unlock(&link->lock); } static void nfcsim_link_recv_cancel(struct nfcsim_link *link) { mutex_lock(&link->lock); link->mode = NFCSIM_MODE_NONE; mutex_unlock(&link->lock); nfcsim_link_recv_wake(link); } static void nfcsim_link_shutdown(struct nfcsim_link *link) { mutex_lock(&link->lock); link->shutdown = 1; link->mode = NFCSIM_MODE_NONE; mutex_unlock(&link->lock); nfcsim_link_recv_wake(link); } static struct sk_buff *nfcsim_link_recv_skb(struct nfcsim_link *link, int timeout, u8 rf_tech, u8 mode) { int rc; struct sk_buff *skb; rc = wait_event_interruptible_timeout(link->recv_wait, link->cond, msecs_to_jiffies(timeout)); mutex_lock(&link->lock); skb = link->skb; link->skb = NULL; if (!rc) { rc = -ETIMEDOUT; goto done; } if (!skb || link->rf_tech != rf_tech || link->mode == mode) { rc = -EINVAL; goto done; } if (link->shutdown) { rc = -ENODEV; goto done; } done: mutex_unlock(&link->lock); if (rc < 0) { dev_kfree_skb(skb); skb = ERR_PTR(rc); } link->cond = 0; return skb; } static void nfcsim_send_wq(struct work_struct *work) { struct nfcsim *dev = container_of(work, struct nfcsim, send_work.work); /* * To effectively send data, the device just wake up its link_out which * is the link_in of the peer device. The exchanged skb has already been * stored in the dev->link_out through nfcsim_link_set_skb(). */ nfcsim_link_recv_wake(dev->link_out); } static void nfcsim_recv_wq(struct work_struct *work) { struct nfcsim *dev = container_of(work, struct nfcsim, recv_work); struct sk_buff *skb; skb = nfcsim_link_recv_skb(dev->link_in, dev->recv_timeout, dev->rf_tech, dev->mode); if (!dev->up) { NFCSIM_ERR(dev, "Device is down\n"); if (!IS_ERR(skb)) dev_kfree_skb(skb); return; } dev->cb(dev->nfc_digital_dev, dev->arg, skb); } static int nfcsim_send(struct nfc_digital_dev *ddev, struct sk_buff *skb, u16 timeout, nfc_digital_cmd_complete_t cb, void *arg) { struct nfcsim *dev = nfc_digital_get_drvdata(ddev); u8 delay; if (!dev->up) { NFCSIM_ERR(dev, "Device is down\n"); return -ENODEV; } dev->recv_timeout = timeout; dev->cb = cb; dev->arg = arg; schedule_work(&dev->recv_work); if (dev->dropframe) { NFCSIM_DBG(dev, "dropping frame (out of %d)\n", dev->dropframe); dev_kfree_skb(skb); dev->dropframe--; return 0; } if (skb) { nfcsim_link_set_skb(dev->link_out, skb, dev->rf_tech, dev->mode); /* Add random delay (between 3 and 10 ms) before sending data */ get_random_bytes(&delay, 1); delay = 3 + (delay & 0x07); schedule_delayed_work(&dev->send_work, msecs_to_jiffies(delay)); } return 0; } static void nfcsim_abort_cmd(struct nfc_digital_dev *ddev) { const struct nfcsim *dev = nfc_digital_get_drvdata(ddev); nfcsim_link_recv_cancel(dev->link_in); } static int nfcsim_switch_rf(struct nfc_digital_dev *ddev, bool on) { struct nfcsim *dev = nfc_digital_get_drvdata(ddev); dev->up = on; return 0; } static int nfcsim_in_configure_hw(struct nfc_digital_dev *ddev, int type, int param) { struct nfcsim *dev = nfc_digital_get_drvdata(ddev); switch (type) { case NFC_DIGITAL_CONFIG_RF_TECH: dev->up = true; dev->mode = NFCSIM_MODE_INITIATOR; dev->rf_tech = param; break; case NFC_DIGITAL_CONFIG_FRAMING: break; default: NFCSIM_ERR(dev, "Invalid configuration type: %d\n", type); return -EINVAL; } return 0; } static int nfcsim_in_send_cmd(struct nfc_digital_dev *ddev, struct sk_buff *skb, u16 timeout, nfc_digital_cmd_complete_t cb, void *arg) { return nfcsim_send(ddev, skb, timeout, cb, arg); } static int nfcsim_tg_configure_hw(struct nfc_digital_dev *ddev, int type, int param) { struct nfcsim *dev = nfc_digital_get_drvdata(ddev); switch (type) { case NFC_DIGITAL_CONFIG_RF_TECH: dev->up = true; dev->mode = NFCSIM_MODE_TARGET; dev->rf_tech = param; break; case NFC_DIGITAL_CONFIG_FRAMING: break; default: NFCSIM_ERR(dev, "Invalid configuration type: %d\n", type); return -EINVAL; } return 0; } static int nfcsim_tg_send_cmd(struct nfc_digital_dev *ddev, struct sk_buff *skb, u16 timeout, nfc_digital_cmd_complete_t cb, void *arg) { return nfcsim_send(ddev, skb, timeout, cb, arg); } static int nfcsim_tg_listen(struct nfc_digital_dev *ddev, u16 timeout, nfc_digital_cmd_complete_t cb, void *arg) { return nfcsim_send(ddev, NULL, timeout, cb, arg); } static const struct nfc_digital_ops nfcsim_digital_ops = { .in_configure_hw = nfcsim_in_configure_hw, .in_send_cmd = nfcsim_in_send_cmd, .tg_listen = nfcsim_tg_listen, .tg_configure_hw = nfcsim_tg_configure_hw, .tg_send_cmd = nfcsim_tg_send_cmd, .abort_cmd = nfcsim_abort_cmd, .switch_rf = nfcsim_switch_rf, }; static struct dentry *nfcsim_debugfs_root; static void nfcsim_debugfs_init(void) { nfcsim_debugfs_root = debugfs_create_dir("nfcsim", NULL); } static void nfcsim_debugfs_remove(void) { debugfs_remove_recursive(nfcsim_debugfs_root); } static void nfcsim_debugfs_init_dev(struct nfcsim *dev) { struct dentry *dev_dir; char devname[5]; /* nfcX\0 */ u32 idx; int n; if (!nfcsim_debugfs_root) { NFCSIM_ERR(dev, "nfcsim debugfs not initialized\n"); return; } idx = dev->nfc_digital_dev->nfc_dev->idx; n = snprintf(devname, sizeof(devname), "nfc%d", idx); if (n >= sizeof(devname)) { NFCSIM_ERR(dev, "Could not compute dev name for dev %d\n", idx); return; } dev_dir = debugfs_create_dir(devname, nfcsim_debugfs_root); debugfs_create_u8("dropframe", 0664, dev_dir, &dev->dropframe); } static struct nfcsim *nfcsim_device_new(struct nfcsim_link *link_in, struct nfcsim_link *link_out) { struct nfcsim *dev; int rc; dev = kzalloc(sizeof(struct nfcsim), GFP_KERNEL); if (!dev) return ERR_PTR(-ENOMEM); INIT_DELAYED_WORK(&dev->send_work, nfcsim_send_wq); INIT_WORK(&dev->recv_work, nfcsim_recv_wq); dev->nfc_digital_dev = nfc_digital_allocate_device(&nfcsim_digital_ops, NFC_PROTO_NFC_DEP_MASK, NFCSIM_CAPABILITIES, 0, 0); if (!dev->nfc_digital_dev) { kfree(dev); return ERR_PTR(-ENOMEM); } nfc_digital_set_drvdata(dev->nfc_digital_dev, dev); dev->link_in = link_in; dev->link_out = link_out; rc = nfc_digital_register_device(dev->nfc_digital_dev); if (rc) { pr_err("Could not register digital device (%d)\n", rc); nfc_digital_free_device(dev->nfc_digital_dev); kfree(dev); return ERR_PTR(rc); } nfcsim_debugfs_init_dev(dev); return dev; } static void nfcsim_device_free(struct nfcsim *dev) { nfc_digital_unregister_device(dev->nfc_digital_dev); dev->up = false; nfcsim_link_shutdown(dev->link_in); cancel_delayed_work_sync(&dev->send_work); cancel_work_sync(&dev->recv_work); nfc_digital_free_device(dev->nfc_digital_dev); kfree(dev); } static struct nfcsim *dev0; static struct nfcsim *dev1; static int __init nfcsim_init(void) { struct nfcsim_link *link0, *link1; int rc; link0 = nfcsim_link_new(); link1 = nfcsim_link_new(); if (!link0 || !link1) { rc = -ENOMEM; goto exit_err; } nfcsim_debugfs_init(); dev0 = nfcsim_device_new(link0, link1); if (IS_ERR(dev0)) { rc = PTR_ERR(dev0); goto exit_err; } dev1 = nfcsim_device_new(link1, link0); if (IS_ERR(dev1)) { nfcsim_device_free(dev0); rc = PTR_ERR(dev1); goto exit_err; } pr_info("nfcsim " NFCSIM_VERSION " initialized\n"); return 0; exit_err: pr_err("Failed to initialize nfcsim driver (%d)\n", rc); if (link0) nfcsim_link_free(link0); if (link1) nfcsim_link_free(link1); return rc; } static void __exit nfcsim_exit(void) { struct nfcsim_link *link0, *link1; link0 = dev0->link_in; link1 = dev0->link_out; nfcsim_device_free(dev0); nfcsim_device_free(dev1); nfcsim_link_free(link0); nfcsim_link_free(link1); nfcsim_debugfs_remove(); } module_init(nfcsim_init); module_exit(nfcsim_exit); MODULE_DESCRIPTION("NFCSim driver ver " NFCSIM_VERSION); MODULE_VERSION(NFCSIM_VERSION); MODULE_LICENSE("GPL");
1 1 1 1 5 1 4 3 3 1 1 2 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 // SPDX-License-Identifier: GPL-2.0 /* Driver for Microtek Scanmaker X6 USB scanner, and possibly others. * * (C) Copyright 2000 John Fremlin <vii@penguinpowered.com> * (C) Copyright 2000 Oliver Neukum <Oliver.Neukum@lrz.uni-muenchen.de> * * Parts shamelessly stolen from usb-storage and copyright by their * authors. Thanks to Matt Dharm for giving us permission! * * This driver implements a SCSI host controller driver and a USB * device driver. To avoid confusion, all the USB related stuff is * prefixed by mts_usb_ and all the SCSI stuff by mts_scsi_. * * Microtek (www.microtek.com) did not release the specifications for * their USB protocol to us, so we had to reverse engineer them. We * don't know for which models they are valid. * * The X6 USB has three bulk endpoints, one output (0x1) down which * commands and outgoing data are sent, and two input: 0x82 from which * normal data is read from the scanner (in packets of maximum 32 * bytes) and from which the status byte is read, and 0x83 from which * the results of a scan (or preview) are read in up to 64 * 1024 byte * chunks by the Windows driver. We don't know how much it is possible * to read at a time from 0x83. * * It seems possible to read (with URB transfers) everything from 0x82 * in one go, without bothering to read in 32 byte chunks. * * There seems to be an optimisation of a further READ implicit if * you simply read from 0x83. * * Guessed protocol: * * Send raw SCSI command to EP 0x1 * * If there is data to receive: * If the command was READ datatype=image: * Read a lot of data from EP 0x83 * Else: * Read data from EP 0x82 * Else: * If there is data to transmit: * Write it to EP 0x1 * * Read status byte from EP 0x82 * * References: * * The SCSI command set for the scanner is available from * ftp://ftp.microtek.com/microtek/devpack/ * * Microtek NV sent us a more up to date version of the document. If * you want it, just send mail. * * Status: * * Untested with multiple scanners. * Untested on SMP. * Untested on a bigendian machine. * * History: * * 20000417 starting history * 20000417 fixed load oops * 20000417 fixed unload oops * 20000419 fixed READ IMAGE detection * 20000424 started conversion to use URBs * 20000502 handled short transfers as errors * 20000513 rename and organisation of functions (john) * 20000513 added IDs for all products supported by Windows driver (john) * 20000514 Rewrote mts_scsi_queuecommand to use URBs (john) * 20000514 Version 0.0.8j * 20000514 Fix reporting of non-existent devices to SCSI layer (john) * 20000514 Added MTS_DEBUG_INT (john) * 20000514 Changed "usb-microtek" to "microtek" for consistency (john) * 20000514 Stupid bug fixes (john) * 20000514 Version 0.0.9j * 20000515 Put transfer context and URB in mts_desc (john) * 20000515 Added prelim turn off debugging support (john) * 20000515 Version 0.0.10j * 20000515 Fixed up URB allocation (clear URB on alloc) (john) * 20000515 Version 0.0.11j * 20000516 Removed unnecessary spinlock in mts_transfer_context (john) * 20000516 Removed unnecessary up on instance lock in mts_remove_nolock (john) * 20000516 Implemented (badly) scsi_abort (john) * 20000516 Version 0.0.12j * 20000517 Hopefully removed mts_remove_nolock quasideadlock (john) * 20000517 Added mts_debug_dump to print ll USB info (john) * 20000518 Tweaks and documentation updates (john) * 20000518 Version 0.0.13j * 20000518 Cleaned up abort handling (john) * 20000523 Removed scsi_command and various scsi_..._resets (john) * 20000523 Added unlink URB on scsi_abort, now OHCI supports it (john) * 20000523 Fixed last tiresome compile warning (john) * 20000523 Version 0.0.14j (though version 0.1 has come out?) * 20000602 Added primitive reset * 20000602 Version 0.2.0 * 20000603 various cosmetic changes * 20000603 Version 0.2.1 * 20000620 minor cosmetic changes * 20000620 Version 0.2.2 * 20000822 Hopefully fixed deadlock in mts_remove_nolock() * 20000822 Fixed minor race in mts_transfer_cleanup() * 20000822 Fixed deadlock on submission error in queuecommand * 20000822 Version 0.2.3 * 20000913 Reduced module size if debugging is off * 20000913 Version 0.2.4 * 20010210 New abort logic * 20010210 Version 0.3.0 * 20010217 Merged scatter/gather * 20010218 Version 0.4.0 * 20010218 Cosmetic fixes * 20010218 Version 0.4.1 * 20010306 Abort while using scatter/gather * 20010306 Version 0.4.2 * 20010311 Remove all timeouts and tidy up generally (john) * 20010320 check return value of scsi_register() * 20010320 Version 0.4.3 * 20010408 Identify version on module load. * 20011003 Fix multiple requests */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/errno.h> #include <linux/random.h> #include <linux/poll.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/usb.h> #include <linux/proc_fs.h> #include <linux/atomic.h> #include <linux/blkdev.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #include "microtek.h" #define DRIVER_AUTHOR "John Fremlin <vii@penguinpowered.com>, Oliver Neukum <Oliver.Neukum@lrz.uni-muenchen.de>" #define DRIVER_DESC "Microtek Scanmaker X6 USB scanner driver" /* Should we do debugging? */ //#define MTS_DO_DEBUG /* USB layer driver interface */ static int mts_usb_probe(struct usb_interface *intf, const struct usb_device_id *id); static void mts_usb_disconnect(struct usb_interface *intf); static const struct usb_device_id mts_usb_ids[]; static struct usb_driver mts_usb_driver = { .name = "microtekX6", .probe = mts_usb_probe, .disconnect = mts_usb_disconnect, .id_table = mts_usb_ids, }; /* Internal driver stuff */ #define MTS_VERSION "0.4.3" #define MTS_NAME "microtek usb (rev " MTS_VERSION "): " #define MTS_WARNING(x...) \ printk( KERN_WARNING MTS_NAME x ) #define MTS_ERROR(x...) \ printk( KERN_ERR MTS_NAME x ) #define MTS_INT_ERROR(x...) \ MTS_ERROR(x) #define MTS_MESSAGE(x...) \ printk( KERN_INFO MTS_NAME x ) #if defined MTS_DO_DEBUG #define MTS_DEBUG(x...) \ printk( KERN_DEBUG MTS_NAME x ) #define MTS_DEBUG_GOT_HERE() \ MTS_DEBUG("got to %s:%d (%s)\n", __FILE__, (int)__LINE__, __func__ ) #define MTS_DEBUG_INT() \ do { MTS_DEBUG_GOT_HERE(); \ MTS_DEBUG("transfer = 0x%x context = 0x%x\n",(int)transfer,(int)context ); \ MTS_DEBUG("status = 0x%x data-length = 0x%x sent = 0x%x\n",transfer->status,(int)context->data_length, (int)transfer->actual_length ); \ mts_debug_dump(context->instance);\ } while(0) #else #define MTS_NUL_STATEMENT do { } while(0) #define MTS_DEBUG(x...) MTS_NUL_STATEMENT #define MTS_DEBUG_GOT_HERE() MTS_NUL_STATEMENT #define MTS_DEBUG_INT() MTS_NUL_STATEMENT #endif #define MTS_INT_INIT()\ struct mts_transfer_context* context = (struct mts_transfer_context*)transfer->context; \ MTS_DEBUG_INT();\ #ifdef MTS_DO_DEBUG static inline void mts_debug_dump(struct mts_desc* desc) { MTS_DEBUG("desc at 0x%x: toggle = %02x%02x\n", (int)desc, (int)desc->usb_dev->toggle[1],(int)desc->usb_dev->toggle[0] ); MTS_DEBUG("ep_out=%x ep_response=%x ep_image=%x\n", usb_sndbulkpipe(desc->usb_dev,desc->ep_out), usb_rcvbulkpipe(desc->usb_dev,desc->ep_response), usb_rcvbulkpipe(desc->usb_dev,desc->ep_image) ); } static inline void mts_show_command(struct scsi_cmnd *srb) { char *what = NULL; switch (srb->cmnd[0]) { case TEST_UNIT_READY: what = "TEST_UNIT_READY"; break; case REZERO_UNIT: what = "REZERO_UNIT"; break; case REQUEST_SENSE: what = "REQUEST_SENSE"; break; case FORMAT_UNIT: what = "FORMAT_UNIT"; break; case READ_BLOCK_LIMITS: what = "READ_BLOCK_LIMITS"; break; case REASSIGN_BLOCKS: what = "REASSIGN_BLOCKS"; break; case READ_6: what = "READ_6"; break; case WRITE_6: what = "WRITE_6"; break; case SEEK_6: what = "SEEK_6"; break; case READ_REVERSE: what = "READ_REVERSE"; break; case WRITE_FILEMARKS: what = "WRITE_FILEMARKS"; break; case SPACE: what = "SPACE"; break; case INQUIRY: what = "INQUIRY"; break; case RECOVER_BUFFERED_DATA: what = "RECOVER_BUFFERED_DATA"; break; case MODE_SELECT: what = "MODE_SELECT"; break; case RESERVE: what = "RESERVE"; break; case RELEASE: what = "RELEASE"; break; case COPY: what = "COPY"; break; case ERASE: what = "ERASE"; break; case MODE_SENSE: what = "MODE_SENSE"; break; case START_STOP: what = "START_STOP"; break; case RECEIVE_DIAGNOSTIC: what = "RECEIVE_DIAGNOSTIC"; break; case SEND_DIAGNOSTIC: what = "SEND_DIAGNOSTIC"; break; case ALLOW_MEDIUM_REMOVAL: what = "ALLOW_MEDIUM_REMOVAL"; break; case SET_WINDOW: what = "SET_WINDOW"; break; case READ_CAPACITY: what = "READ_CAPACITY"; break; case READ_10: what = "READ_10"; break; case WRITE_10: what = "WRITE_10"; break; case SEEK_10: what = "SEEK_10"; break; case WRITE_VERIFY: what = "WRITE_VERIFY"; break; case VERIFY: what = "VERIFY"; break; case SEARCH_HIGH: what = "SEARCH_HIGH"; break; case SEARCH_EQUAL: what = "SEARCH_EQUAL"; break; case SEARCH_LOW: what = "SEARCH_LOW"; break; case SET_LIMITS: what = "SET_LIMITS"; break; case READ_POSITION: what = "READ_POSITION"; break; case SYNCHRONIZE_CACHE: what = "SYNCHRONIZE_CACHE"; break; case LOCK_UNLOCK_CACHE: what = "LOCK_UNLOCK_CACHE"; break; case READ_DEFECT_DATA: what = "READ_DEFECT_DATA"; break; case MEDIUM_SCAN: what = "MEDIUM_SCAN"; break; case COMPARE: what = "COMPARE"; break; case COPY_VERIFY: what = "COPY_VERIFY"; break; case WRITE_BUFFER: what = "WRITE_BUFFER"; break; case READ_BUFFER: what = "READ_BUFFER"; break; case UPDATE_BLOCK: what = "UPDATE_BLOCK"; break; case READ_LONG: what = "READ_LONG"; break; case WRITE_LONG: what = "WRITE_LONG"; break; case CHANGE_DEFINITION: what = "CHANGE_DEFINITION"; break; case WRITE_SAME: what = "WRITE_SAME"; break; case READ_TOC: what = "READ_TOC"; break; case LOG_SELECT: what = "LOG_SELECT"; break; case LOG_SENSE: what = "LOG_SENSE"; break; case MODE_SELECT_10: what = "MODE_SELECT_10"; break; case MODE_SENSE_10: what = "MODE_SENSE_10"; break; case MOVE_MEDIUM: what = "MOVE_MEDIUM"; break; case READ_12: what = "READ_12"; break; case WRITE_12: what = "WRITE_12"; break; case WRITE_VERIFY_12: what = "WRITE_VERIFY_12"; break; case SEARCH_HIGH_12: what = "SEARCH_HIGH_12"; break; case SEARCH_EQUAL_12: what = "SEARCH_EQUAL_12"; break; case SEARCH_LOW_12: what = "SEARCH_LOW_12"; break; case READ_ELEMENT_STATUS: what = "READ_ELEMENT_STATUS"; break; case SEND_VOLUME_TAG: what = "SEND_VOLUME_TAG"; break; case WRITE_LONG_2: what = "WRITE_LONG_2"; break; default: MTS_DEBUG("can't decode command\n"); goto out; break; } MTS_DEBUG( "Command %s (%d bytes)\n", what, srb->cmd_len); out: MTS_DEBUG( " %10ph\n", srb->cmnd); } #else static inline void mts_show_command(struct scsi_cmnd * dummy) { } static inline void mts_debug_dump(struct mts_desc* dummy) { } #endif static inline void mts_urb_abort(struct mts_desc* desc) { MTS_DEBUG_GOT_HERE(); mts_debug_dump(desc); usb_kill_urb( desc->urb ); } static int mts_sdev_init (struct scsi_device *s) { s->inquiry_len = 0x24; return 0; } static int mts_scsi_abort(struct scsi_cmnd *srb) { struct mts_desc* desc = (struct mts_desc*)(srb->device->host->hostdata[0]); MTS_DEBUG_GOT_HERE(); mts_urb_abort(desc); return FAILED; } static int mts_scsi_host_reset(struct scsi_cmnd *srb) { struct mts_desc* desc = (struct mts_desc*)(srb->device->host->hostdata[0]); int result; MTS_DEBUG_GOT_HERE(); mts_debug_dump(desc); result = usb_lock_device_for_reset(desc->usb_dev, desc->usb_intf); if (result == 0) { result = usb_reset_device(desc->usb_dev); usb_unlock_device(desc->usb_dev); } return result ? FAILED : SUCCESS; } static int mts_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *srb); static void mts_transfer_cleanup( struct urb *transfer ); static void mts_do_sg(struct urb * transfer); static inline void mts_int_submit_urb (struct urb* transfer, int pipe, void* data, unsigned length, usb_complete_t callback ) /* Interrupt context! */ /* Holding transfer->context->lock! */ { int res; MTS_INT_INIT(); usb_fill_bulk_urb(transfer, context->instance->usb_dev, pipe, data, length, callback, context ); res = usb_submit_urb( transfer, GFP_ATOMIC ); if ( unlikely(res) ) { MTS_INT_ERROR( "could not submit URB! Error was %d\n",(int)res ); set_host_byte(context->srb, DID_ERROR); mts_transfer_cleanup(transfer); } } static void mts_transfer_cleanup( struct urb *transfer ) /* Interrupt context! */ { MTS_INT_INIT(); if ( likely(context->final_callback != NULL) ) context->final_callback(context->srb); } static void mts_transfer_done( struct urb *transfer ) { MTS_INT_INIT(); context->srb->result &= MTS_SCSI_ERR_MASK; context->srb->result |= (unsigned)(*context->scsi_status)<<1; mts_transfer_cleanup(transfer); } static void mts_get_status( struct urb *transfer ) /* Interrupt context! */ { MTS_INT_INIT(); mts_int_submit_urb(transfer, usb_rcvbulkpipe(context->instance->usb_dev, context->instance->ep_response), context->scsi_status, 1, mts_transfer_done ); } static void mts_data_done( struct urb* transfer ) /* Interrupt context! */ { int status = transfer->status; MTS_INT_INIT(); if ( context->data_length != transfer->actual_length ) { scsi_set_resid(context->srb, context->data_length - transfer->actual_length); } else if ( unlikely(status) ) { set_host_byte(context->srb, (status == -ENOENT ? DID_ABORT : DID_ERROR)); } mts_get_status(transfer); } static void mts_command_done( struct urb *transfer ) /* Interrupt context! */ { int status = transfer->status; MTS_INT_INIT(); if ( unlikely(status) ) { if (status == -ENOENT) { /* We are being killed */ MTS_DEBUG_GOT_HERE(); set_host_byte(context->srb, DID_ABORT); } else { /* A genuine error has occurred */ MTS_DEBUG_GOT_HERE(); set_host_byte(context->srb, DID_ERROR); } mts_transfer_cleanup(transfer); return; } if (context->srb->cmnd[0] == REQUEST_SENSE) { mts_int_submit_urb(transfer, context->data_pipe, context->srb->sense_buffer, context->data_length, mts_data_done); } else { if ( context->data ) { mts_int_submit_urb(transfer, context->data_pipe, context->data, context->data_length, scsi_sg_count(context->srb) > 1 ? mts_do_sg : mts_data_done); } else { mts_get_status(transfer); } } } static void mts_do_sg (struct urb* transfer) { int status = transfer->status; MTS_INT_INIT(); MTS_DEBUG("Processing fragment %d of %d\n", context->fragment, scsi_sg_count(context->srb)); if (unlikely(status)) { set_host_byte(context->srb, (status == -ENOENT ? DID_ABORT : DID_ERROR)); mts_transfer_cleanup(transfer); } context->curr_sg = sg_next(context->curr_sg); mts_int_submit_urb(transfer, context->data_pipe, sg_virt(context->curr_sg), context->curr_sg->length, sg_is_last(context->curr_sg) ? mts_data_done : mts_do_sg); } static const u8 mts_read_image_sig[] = { 0x28, 00, 00, 00 }; static const u8 mts_read_image_sig_len = 4; static const unsigned char mts_direction[256/8] = { 0x28, 0x81, 0x14, 0x14, 0x20, 0x01, 0x90, 0x77, 0x0C, 0x20, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; #define MTS_DIRECTION_IS_IN(x) ((mts_direction[x>>3] >> (x & 7)) & 1) static void mts_build_transfer_context(struct scsi_cmnd *srb, struct mts_desc* desc) { int pipe; MTS_DEBUG_GOT_HERE(); desc->context.instance = desc; desc->context.srb = srb; if (!scsi_bufflen(srb)) { desc->context.data = NULL; desc->context.data_length = 0; return; } else { desc->context.curr_sg = scsi_sglist(srb); desc->context.data = sg_virt(desc->context.curr_sg); desc->context.data_length = desc->context.curr_sg->length; } /* can't rely on srb->sc_data_direction */ /* Brutally ripped from usb-storage */ if ( !memcmp( srb->cmnd, mts_read_image_sig, mts_read_image_sig_len ) ) { pipe = usb_rcvbulkpipe(desc->usb_dev,desc->ep_image); MTS_DEBUG( "transferring from desc->ep_image == %d\n", (int)desc->ep_image ); } else if ( MTS_DIRECTION_IS_IN(srb->cmnd[0]) ) { pipe = usb_rcvbulkpipe(desc->usb_dev,desc->ep_response); MTS_DEBUG( "transferring from desc->ep_response == %d\n", (int)desc->ep_response); } else { MTS_DEBUG("transferring to desc->ep_out == %d\n", (int)desc->ep_out); pipe = usb_sndbulkpipe(desc->usb_dev,desc->ep_out); } desc->context.data_pipe = pipe; } static int mts_scsi_queuecommand_lck(struct scsi_cmnd *srb) { mts_scsi_cmnd_callback callback = scsi_done; struct mts_desc* desc = (struct mts_desc*)(srb->device->host->hostdata[0]); int res; MTS_DEBUG_GOT_HERE(); mts_show_command(srb); mts_debug_dump(desc); if ( srb->device->lun || srb->device->id || srb->device->channel ) { MTS_DEBUG("Command to LUN=%d ID=%d CHANNEL=%d from SCSI layer\n",(int)srb->device->lun,(int)srb->device->id, (int)srb->device->channel ); MTS_DEBUG("this device doesn't exist\n"); set_host_byte(srb, DID_BAD_TARGET); if(likely(callback != NULL)) callback(srb); goto out; } usb_fill_bulk_urb(desc->urb, desc->usb_dev, usb_sndbulkpipe(desc->usb_dev,desc->ep_out), srb->cmnd, srb->cmd_len, mts_command_done, &desc->context ); mts_build_transfer_context( srb, desc ); desc->context.final_callback = callback; /* here we need ATOMIC as we are called with the iolock */ res=usb_submit_urb(desc->urb, GFP_ATOMIC); if(unlikely(res)){ MTS_ERROR("error %d submitting URB\n",(int)res); set_host_byte(srb, DID_ERROR); if(likely(callback != NULL)) callback(srb); } out: return 0; } static DEF_SCSI_QCMD(mts_scsi_queuecommand) static const struct scsi_host_template mts_scsi_host_template = { .module = THIS_MODULE, .name = "microtekX6", .proc_name = "microtekX6", .queuecommand = mts_scsi_queuecommand, .eh_abort_handler = mts_scsi_abort, .eh_host_reset_handler = mts_scsi_host_reset, .sg_tablesize = SG_ALL, .can_queue = 1, .this_id = -1, .emulated = 1, .dma_alignment = 511, .sdev_init = mts_sdev_init, .max_sectors= 256, /* 128 K */ }; /* The entries of microtek_table must correspond, line-by-line to the entries of mts_supported_products[]. */ static const struct usb_device_id mts_usb_ids[] = { { USB_DEVICE(0x4ce, 0x0300) }, { USB_DEVICE(0x5da, 0x0094) }, { USB_DEVICE(0x5da, 0x0099) }, { USB_DEVICE(0x5da, 0x009a) }, { USB_DEVICE(0x5da, 0x00a0) }, { USB_DEVICE(0x5da, 0x00a3) }, { USB_DEVICE(0x5da, 0x80a3) }, { USB_DEVICE(0x5da, 0x80ac) }, { USB_DEVICE(0x5da, 0x00b6) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE (usb, mts_usb_ids); static int mts_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { int i; int ep_out = -1; int ep_in_set[3]; /* this will break if we have more than three endpoints which is why we check */ int *ep_in_current = ep_in_set; int err_retval = -ENOMEM; struct mts_desc * new_desc; struct usb_device *dev = interface_to_usbdev (intf); /* the current altsetting on the interface we're probing */ struct usb_host_interface *altsetting; MTS_DEBUG_GOT_HERE(); MTS_DEBUG( "usb-device descriptor at %x\n", (int)dev ); MTS_DEBUG( "product id = 0x%x, vendor id = 0x%x\n", le16_to_cpu(dev->descriptor.idProduct), le16_to_cpu(dev->descriptor.idVendor) ); MTS_DEBUG_GOT_HERE(); /* the current altsetting on the interface we're probing */ altsetting = intf->cur_altsetting; /* Check if the config is sane */ if ( altsetting->desc.bNumEndpoints != MTS_EP_TOTAL ) { MTS_WARNING( "expecting %d got %d endpoints! Bailing out.\n", (int)MTS_EP_TOTAL, (int)altsetting->desc.bNumEndpoints ); return -ENODEV; } for( i = 0; i < altsetting->desc.bNumEndpoints; i++ ) { if ((altsetting->endpoint[i].desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) != USB_ENDPOINT_XFER_BULK) { MTS_WARNING( "can only deal with bulk endpoints; endpoint %d is not bulk.\n", (int)altsetting->endpoint[i].desc.bEndpointAddress ); } else { if (altsetting->endpoint[i].desc.bEndpointAddress & USB_DIR_IN) *ep_in_current++ = altsetting->endpoint[i].desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; else { if ( ep_out != -1 ) { MTS_WARNING( "can only deal with one output endpoints. Bailing out." ); return -ENODEV; } ep_out = altsetting->endpoint[i].desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; } } } if (ep_in_current != &ep_in_set[2]) { MTS_WARNING("couldn't find two input bulk endpoints. Bailing out.\n"); return -ENODEV; } if ( ep_out == -1 ) { MTS_WARNING( "couldn't find an output bulk endpoint. Bailing out.\n" ); return -ENODEV; } new_desc = kzalloc(sizeof(struct mts_desc), GFP_KERNEL); if (!new_desc) goto out; new_desc->urb = usb_alloc_urb(0, GFP_KERNEL); if (!new_desc->urb) goto out_kfree; new_desc->context.scsi_status = kmalloc(1, GFP_KERNEL); if (!new_desc->context.scsi_status) goto out_free_urb; new_desc->usb_dev = dev; new_desc->usb_intf = intf; /* endpoints */ new_desc->ep_out = ep_out; new_desc->ep_response = ep_in_set[0]; new_desc->ep_image = ep_in_set[1]; if ( new_desc->ep_out != MTS_EP_OUT ) MTS_WARNING( "will this work? Command EP is not usually %d\n", (int)new_desc->ep_out ); if ( new_desc->ep_response != MTS_EP_RESPONSE ) MTS_WARNING( "will this work? Response EP is not usually %d\n", (int)new_desc->ep_response ); if ( new_desc->ep_image != MTS_EP_IMAGE ) MTS_WARNING( "will this work? Image data EP is not usually %d\n", (int)new_desc->ep_image ); new_desc->host = scsi_host_alloc(&mts_scsi_host_template, sizeof(new_desc)); if (!new_desc->host) goto out_kfree2; new_desc->host->hostdata[0] = (unsigned long)new_desc; if (scsi_add_host(new_desc->host, &dev->dev)) { err_retval = -EIO; goto out_host_put; } scsi_scan_host(new_desc->host); usb_set_intfdata(intf, new_desc); return 0; out_host_put: scsi_host_put(new_desc->host); out_kfree2: kfree(new_desc->context.scsi_status); out_free_urb: usb_free_urb(new_desc->urb); out_kfree: kfree(new_desc); out: return err_retval; } static void mts_usb_disconnect (struct usb_interface *intf) { struct mts_desc *desc = usb_get_intfdata(intf); usb_set_intfdata(intf, NULL); usb_kill_urb(desc->urb); scsi_remove_host(desc->host); scsi_host_put(desc->host); usb_free_urb(desc->urb); kfree(desc->context.scsi_status); kfree(desc); } module_usb_driver(mts_usb_driver); MODULE_AUTHOR( DRIVER_AUTHOR ); MODULE_DESCRIPTION( DRIVER_DESC ); MODULE_LICENSE("GPL");
1027 5 7 122 22 120 10 21 3 24 36 3758 2985 1843 172 3948 2618 114 1084 800 129 1163 1495 798 800 801 796 3 1 4 4 800 801 761 55 763 762 760 746 17 714 715 713 711 783 1145 660 1493 1260 778 378 111 374 113 85 85 112 111 120 121 3836 139 139 88 88 27 26 20 20 40 31 7 1 1 1 1 1 1 525 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 // SPDX-License-Identifier: GPL-2.0-only /* * Integrity Measurement Architecture * * Copyright (C) 2005,2006,2007,2008 IBM Corporation * * Authors: * Reiner Sailer <sailer@watson.ibm.com> * Serge Hallyn <serue@us.ibm.com> * Kylene Hall <kylene@us.ibm.com> * Mimi Zohar <zohar@us.ibm.com> * * File: ima_main.c * implements the IMA hooks: ima_bprm_check, ima_file_mmap, * and ima_file_check. */ #include <linux/module.h> #include <linux/file.h> #include <linux/binfmts.h> #include <linux/kernel_read_file.h> #include <linux/mount.h> #include <linux/mman.h> #include <linux/slab.h> #include <linux/xattr.h> #include <linux/ima.h> #include <linux/fs.h> #include <linux/iversion.h> #include <linux/evm.h> #include "ima.h" #ifdef CONFIG_IMA_APPRAISE int ima_appraise = IMA_APPRAISE_ENFORCE; #else int ima_appraise; #endif int __ro_after_init ima_hash_algo = HASH_ALGO_SHA1; static int hash_setup_done; static struct notifier_block ima_lsm_policy_notifier = { .notifier_call = ima_lsm_policy_change, }; static int __init hash_setup(char *str) { struct ima_template_desc *template_desc = ima_template_desc_current(); int i; if (hash_setup_done) return 1; if (strcmp(template_desc->name, IMA_TEMPLATE_IMA_NAME) == 0) { if (strncmp(str, "sha1", 4) == 0) { ima_hash_algo = HASH_ALGO_SHA1; } else if (strncmp(str, "md5", 3) == 0) { ima_hash_algo = HASH_ALGO_MD5; } else { pr_err("invalid hash algorithm \"%s\" for template \"%s\"", str, IMA_TEMPLATE_IMA_NAME); return 1; } goto out; } i = match_string(hash_algo_name, HASH_ALGO__LAST, str); if (i < 0) { pr_err("invalid hash algorithm \"%s\"", str); return 1; } ima_hash_algo = i; out: hash_setup_done = 1; return 1; } __setup("ima_hash=", hash_setup); enum hash_algo ima_get_current_hash_algo(void) { return ima_hash_algo; } /* Prevent mmap'ing a file execute that is already mmap'ed write */ static int mmap_violation_check(enum ima_hooks func, struct file *file, char **pathbuf, const char **pathname, char *filename) { struct inode *inode; int rc = 0; if ((func == MMAP_CHECK || func == MMAP_CHECK_REQPROT) && mapping_writably_mapped(file->f_mapping)) { rc = -ETXTBSY; inode = file_inode(file); if (!*pathbuf) /* ima_rdwr_violation possibly pre-fetched */ *pathname = ima_d_path(&file->f_path, pathbuf, filename); integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode, *pathname, "mmap_file", "mmapped_writers", rc, 0); } return rc; } /* * ima_rdwr_violation_check * * Only invalidate the PCR for measured files: * - Opening a file for write when already open for read, * results in a time of measure, time of use (ToMToU) error. * - Opening a file for read when already open for write, * could result in a file measurement error. * */ static void ima_rdwr_violation_check(struct file *file, struct ima_iint_cache *iint, int must_measure, char **pathbuf, const char **pathname, char *filename) { struct inode *inode = file_inode(file); fmode_t mode = file->f_mode; bool send_tomtou = false, send_writers = false; if (mode & FMODE_WRITE) { if (atomic_read(&inode->i_readcount) && IS_IMA(inode)) { if (!iint) iint = ima_iint_find(inode); /* IMA_MEASURE is set from reader side */ if (iint && test_bit(IMA_MUST_MEASURE, &iint->atomic_flags)) send_tomtou = true; } } else { if (must_measure) set_bit(IMA_MUST_MEASURE, &iint->atomic_flags); if (inode_is_open_for_write(inode) && must_measure) send_writers = true; } if (!send_tomtou && !send_writers) return; *pathname = ima_d_path(&file->f_path, pathbuf, filename); if (send_tomtou) ima_add_violation(file, *pathname, iint, "invalid_pcr", "ToMToU"); if (send_writers) ima_add_violation(file, *pathname, iint, "invalid_pcr", "open_writers"); } static void ima_check_last_writer(struct ima_iint_cache *iint, struct inode *inode, struct file *file) { fmode_t mode = file->f_mode; bool update; if (!(mode & FMODE_WRITE)) return; mutex_lock(&iint->mutex); if (atomic_read(&inode->i_writecount) == 1) { struct kstat stat; update = test_and_clear_bit(IMA_UPDATE_XATTR, &iint->atomic_flags); if ((iint->flags & IMA_NEW_FILE) || vfs_getattr_nosec(&file->f_path, &stat, STATX_CHANGE_COOKIE, AT_STATX_SYNC_AS_STAT) || !(stat.result_mask & STATX_CHANGE_COOKIE) || stat.change_cookie != iint->real_inode.version) { iint->flags &= ~(IMA_DONE_MASK | IMA_NEW_FILE); iint->measured_pcrs = 0; if (update) ima_update_xattr(iint, file); } } mutex_unlock(&iint->mutex); } /** * ima_file_free - called on __fput() * @file: pointer to file structure being freed * * Flag files that changed, based on i_version */ static void ima_file_free(struct file *file) { struct inode *inode = file_inode(file); struct ima_iint_cache *iint; if (!ima_policy_flag || !S_ISREG(inode->i_mode)) return; iint = ima_iint_find(inode); if (!iint) return; ima_check_last_writer(iint, inode, file); } static int process_measurement(struct file *file, const struct cred *cred, struct lsm_prop *prop, char *buf, loff_t size, int mask, enum ima_hooks func) { struct inode *real_inode, *inode = file_inode(file); struct ima_iint_cache *iint = NULL; struct ima_template_desc *template_desc = NULL; struct inode *metadata_inode; char *pathbuf = NULL; char filename[NAME_MAX]; const char *pathname = NULL; int rc = 0, action, must_appraise = 0; int pcr = CONFIG_IMA_MEASURE_PCR_IDX; struct evm_ima_xattr_data *xattr_value = NULL; struct modsig *modsig = NULL; int xattr_len = 0; bool violation_check; enum hash_algo hash_algo; unsigned int allowed_algos = 0; if (!ima_policy_flag || !S_ISREG(inode->i_mode)) return 0; /* Return an IMA_MEASURE, IMA_APPRAISE, IMA_AUDIT action * bitmask based on the appraise/audit/measurement policy. * Included is the appraise submask. */ action = ima_get_action(file_mnt_idmap(file), inode, cred, prop, mask, func, &pcr, &template_desc, NULL, &allowed_algos); violation_check = ((func == FILE_CHECK || func == MMAP_CHECK || func == MMAP_CHECK_REQPROT) && (ima_policy_flag & IMA_MEASURE)); if (!action && !violation_check) return 0; must_appraise = action & IMA_APPRAISE; /* Is the appraise rule hook specific? */ if (action & IMA_FILE_APPRAISE) func = FILE_CHECK; inode_lock(inode); if (action) { iint = ima_inode_get(inode); if (!iint) rc = -ENOMEM; } if (!rc && violation_check) ima_rdwr_violation_check(file, iint, action & IMA_MEASURE, &pathbuf, &pathname, filename); inode_unlock(inode); if (rc) goto out; if (!action) goto out; mutex_lock(&iint->mutex); if (test_and_clear_bit(IMA_CHANGE_ATTR, &iint->atomic_flags)) /* reset appraisal flags if ima_inode_post_setattr was called */ iint->flags &= ~(IMA_APPRAISE | IMA_APPRAISED | IMA_APPRAISE_SUBMASK | IMA_APPRAISED_SUBMASK | IMA_NONACTION_FLAGS); /* * Re-evaulate the file if either the xattr has changed or the * kernel has no way of detecting file change on the filesystem. * (Limited to privileged mounted filesystems.) */ if (test_and_clear_bit(IMA_CHANGE_XATTR, &iint->atomic_flags) || ((inode->i_sb->s_iflags & SB_I_IMA_UNVERIFIABLE_SIGNATURE) && !(inode->i_sb->s_iflags & SB_I_UNTRUSTED_MOUNTER) && !(action & IMA_FAIL_UNVERIFIABLE_SIGS))) { iint->flags &= ~IMA_DONE_MASK; iint->measured_pcrs = 0; } /* * On stacked filesystems, detect and re-evaluate file data and * metadata changes. */ real_inode = d_real_inode(file_dentry(file)); if (real_inode != inode && (action & IMA_DO_MASK) && (iint->flags & IMA_DONE_MASK)) { if (!IS_I_VERSION(real_inode) || integrity_inode_attrs_changed(&iint->real_inode, real_inode)) { iint->flags &= ~IMA_DONE_MASK; iint->measured_pcrs = 0; } /* * Reset the EVM status when metadata changed. */ metadata_inode = d_inode(d_real(file_dentry(file), D_REAL_METADATA)); if (evm_metadata_changed(inode, metadata_inode)) iint->flags &= ~(IMA_APPRAISED | IMA_APPRAISED_SUBMASK); } /* Determine if already appraised/measured based on bitmask * (IMA_MEASURE, IMA_MEASURED, IMA_XXXX_APPRAISE, IMA_XXXX_APPRAISED, * IMA_AUDIT, IMA_AUDITED) */ iint->flags |= action; action &= IMA_DO_MASK; action &= ~((iint->flags & (IMA_DONE_MASK ^ IMA_MEASURED)) >> 1); /* If target pcr is already measured, unset IMA_MEASURE action */ if ((action & IMA_MEASURE) && (iint->measured_pcrs & (0x1 << pcr))) action ^= IMA_MEASURE; /* HASH sets the digital signature and update flags, nothing else */ if ((action & IMA_HASH) && !(test_bit(IMA_DIGSIG, &iint->atomic_flags))) { xattr_len = ima_read_xattr(file_dentry(file), &xattr_value, xattr_len); if ((xattr_value && xattr_len > 2) && (xattr_value->type == EVM_IMA_XATTR_DIGSIG)) set_bit(IMA_DIGSIG, &iint->atomic_flags); iint->flags |= IMA_HASHED; action ^= IMA_HASH; set_bit(IMA_UPDATE_XATTR, &iint->atomic_flags); } /* Nothing to do, just return existing appraised status */ if (!action) { if (must_appraise) { rc = mmap_violation_check(func, file, &pathbuf, &pathname, filename); if (!rc) rc = ima_get_cache_status(iint, func); } goto out_locked; } if ((action & IMA_APPRAISE_SUBMASK) || strcmp(template_desc->name, IMA_TEMPLATE_IMA_NAME) != 0) { /* read 'security.ima' */ xattr_len = ima_read_xattr(file_dentry(file), &xattr_value, xattr_len); /* * Read the appended modsig if allowed by the policy, and allow * an additional measurement list entry, if needed, based on the * template format and whether the file was already measured. */ if (iint->flags & IMA_MODSIG_ALLOWED) { rc = ima_read_modsig(func, buf, size, &modsig); if (!rc && ima_template_has_modsig(template_desc) && iint->flags & IMA_MEASURED) action |= IMA_MEASURE; } } hash_algo = ima_get_hash_algo(xattr_value, xattr_len); rc = ima_collect_measurement(iint, file, buf, size, hash_algo, modsig); if (rc != 0 && rc != -EBADF && rc != -EINVAL) goto out_locked; if (!pathbuf) /* ima_rdwr_violation possibly pre-fetched */ pathname = ima_d_path(&file->f_path, &pathbuf, filename); if (action & IMA_MEASURE) ima_store_measurement(iint, file, pathname, xattr_value, xattr_len, modsig, pcr, template_desc); if (rc == 0 && (action & IMA_APPRAISE_SUBMASK)) { rc = ima_check_blacklist(iint, modsig, pcr); if (rc != -EPERM) { inode_lock(inode); rc = ima_appraise_measurement(func, iint, file, pathname, xattr_value, xattr_len, modsig); inode_unlock(inode); } if (!rc) rc = mmap_violation_check(func, file, &pathbuf, &pathname, filename); } if (action & IMA_AUDIT) ima_audit_measurement(iint, pathname); if ((file->f_flags & O_DIRECT) && (iint->flags & IMA_PERMIT_DIRECTIO)) rc = 0; /* Ensure the digest was generated using an allowed algorithm */ if (rc == 0 && must_appraise && allowed_algos != 0 && (allowed_algos & (1U << hash_algo)) == 0) { rc = -EACCES; integrity_audit_msg(AUDIT_INTEGRITY_DATA, file_inode(file), pathname, "collect_data", "denied-hash-algorithm", rc, 0); } out_locked: if ((mask & MAY_WRITE) && test_bit(IMA_DIGSIG, &iint->atomic_flags) && !(iint->flags & IMA_NEW_FILE)) rc = -EACCES; mutex_unlock(&iint->mutex); kfree(xattr_value); ima_free_modsig(modsig); out: if (pathbuf) __putname(pathbuf); if (must_appraise) { if (rc && (ima_appraise & IMA_APPRAISE_ENFORCE)) return -EACCES; if (file->f_mode & FMODE_WRITE) set_bit(IMA_UPDATE_XATTR, &iint->atomic_flags); } return 0; } /** * ima_file_mmap - based on policy, collect/store measurement. * @file: pointer to the file to be measured (May be NULL) * @reqprot: protection requested by the application * @prot: protection that will be applied by the kernel * @flags: operational flags * * Measure files being mmapped executable based on the ima_must_measure() * policy decision. * * On success return 0. On integrity appraisal error, assuming the file * is in policy and IMA-appraisal is in enforcing mode, return -EACCES. */ static int ima_file_mmap(struct file *file, unsigned long reqprot, unsigned long prot, unsigned long flags) { struct lsm_prop prop; int ret; if (!file) return 0; security_current_getlsmprop_subj(&prop); if (reqprot & PROT_EXEC) { ret = process_measurement(file, current_cred(), &prop, NULL, 0, MAY_EXEC, MMAP_CHECK_REQPROT); if (ret) return ret; } if (prot & PROT_EXEC) return process_measurement(file, current_cred(), &prop, NULL, 0, MAY_EXEC, MMAP_CHECK); return 0; } /** * ima_file_mprotect - based on policy, limit mprotect change * @vma: vm_area_struct protection is set to * @reqprot: protection requested by the application * @prot: protection that will be applied by the kernel * * Files can be mmap'ed read/write and later changed to execute to circumvent * IMA's mmap appraisal policy rules. Due to locking issues (mmap semaphore * would be taken before i_mutex), files can not be measured or appraised at * this point. Eliminate this integrity gap by denying the mprotect * PROT_EXECUTE change, if an mmap appraise policy rule exists. * * On mprotect change success, return 0. On failure, return -EACESS. */ static int ima_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot, unsigned long prot) { struct ima_template_desc *template = NULL; struct file *file; char filename[NAME_MAX]; char *pathbuf = NULL; const char *pathname = NULL; struct inode *inode; struct lsm_prop prop; int result = 0; int action; int pcr; /* Is mprotect making an mmap'ed file executable? */ if (!(ima_policy_flag & IMA_APPRAISE) || !vma->vm_file || !(prot & PROT_EXEC) || (vma->vm_flags & VM_EXEC)) return 0; security_current_getlsmprop_subj(&prop); inode = file_inode(vma->vm_file); action = ima_get_action(file_mnt_idmap(vma->vm_file), inode, current_cred(), &prop, MAY_EXEC, MMAP_CHECK, &pcr, &template, NULL, NULL); action |= ima_get_action(file_mnt_idmap(vma->vm_file), inode, current_cred(), &prop, MAY_EXEC, MMAP_CHECK_REQPROT, &pcr, &template, NULL, NULL); /* Is the mmap'ed file in policy? */ if (!(action & (IMA_MEASURE | IMA_APPRAISE_SUBMASK))) return 0; if (action & IMA_APPRAISE_SUBMASK) result = -EPERM; file = vma->vm_file; pathname = ima_d_path(&file->f_path, &pathbuf, filename); integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode, pathname, "collect_data", "failed-mprotect", result, 0); if (pathbuf) __putname(pathbuf); return result; } /** * ima_bprm_check - based on policy, collect/store measurement. * @bprm: contains the linux_binprm structure * * The OS protects against an executable file, already open for write, * from being executed in deny_write_access() and an executable file, * already open for execute, from being modified in get_write_access(). * So we can be certain that what we verify and measure here is actually * what is being executed. * * On success return 0. On integrity appraisal error, assuming the file * is in policy and IMA-appraisal is in enforcing mode, return -EACCES. */ static int ima_bprm_check(struct linux_binprm *bprm) { int ret; struct lsm_prop prop; security_current_getlsmprop_subj(&prop); ret = process_measurement(bprm->file, current_cred(), &prop, NULL, 0, MAY_EXEC, BPRM_CHECK); if (ret) return ret; security_cred_getlsmprop(bprm->cred, &prop); return process_measurement(bprm->file, bprm->cred, &prop, NULL, 0, MAY_EXEC, CREDS_CHECK); } /** * ima_bprm_creds_for_exec - collect/store/appraise measurement. * @bprm: contains the linux_binprm structure * * Based on the IMA policy and the execveat(2) AT_EXECVE_CHECK flag, measure * and appraise the integrity of a file to be executed by script interpreters. * Unlike any of the other LSM hooks where the kernel enforces file integrity, * enforcing file integrity is left up to the discretion of the script * interpreter (userspace). * * On success return 0. On integrity appraisal error, assuming the file * is in policy and IMA-appraisal is in enforcing mode, return -EACCES. */ static int ima_bprm_creds_for_exec(struct linux_binprm *bprm) { /* * As security_bprm_check() is called multiple times, both * the script and the shebang interpreter are measured, appraised, * and audited. Limit usage of this LSM hook to just measuring, * appraising, and auditing the indirect script execution * (e.g. ./sh example.sh). */ if (!bprm->is_check) return 0; return ima_bprm_check(bprm); } /** * ima_file_check - based on policy, collect/store measurement. * @file: pointer to the file to be measured * @mask: contains MAY_READ, MAY_WRITE, MAY_EXEC or MAY_APPEND * * Measure files based on the ima_must_measure() policy decision. * * On success return 0. On integrity appraisal error, assuming the file * is in policy and IMA-appraisal is in enforcing mode, return -EACCES. */ static int ima_file_check(struct file *file, int mask) { struct lsm_prop prop; security_current_getlsmprop_subj(&prop); return process_measurement(file, current_cred(), &prop, NULL, 0, mask & (MAY_READ | MAY_WRITE | MAY_EXEC | MAY_APPEND), FILE_CHECK); } static int __ima_inode_hash(struct inode *inode, struct file *file, char *buf, size_t buf_size) { struct ima_iint_cache *iint = NULL, tmp_iint; int rc, hash_algo; if (ima_policy_flag) { iint = ima_iint_find(inode); if (iint) mutex_lock(&iint->mutex); } if ((!iint || !(iint->flags & IMA_COLLECTED)) && file) { if (iint) mutex_unlock(&iint->mutex); memset(&tmp_iint, 0, sizeof(tmp_iint)); mutex_init(&tmp_iint.mutex); rc = ima_collect_measurement(&tmp_iint, file, NULL, 0, ima_hash_algo, NULL); if (rc < 0) { /* ima_hash could be allocated in case of failure. */ if (rc != -ENOMEM) kfree(tmp_iint.ima_hash); return -EOPNOTSUPP; } iint = &tmp_iint; mutex_lock(&iint->mutex); } if (!iint) return -EOPNOTSUPP; /* * ima_file_hash can be called when ima_collect_measurement has still * not been called, we might not always have a hash. */ if (!iint->ima_hash || !(iint->flags & IMA_COLLECTED)) { mutex_unlock(&iint->mutex); return -EOPNOTSUPP; } if (buf) { size_t copied_size; copied_size = min_t(size_t, iint->ima_hash->length, buf_size); memcpy(buf, iint->ima_hash->digest, copied_size); } hash_algo = iint->ima_hash->algo; mutex_unlock(&iint->mutex); if (iint == &tmp_iint) kfree(iint->ima_hash); return hash_algo; } /** * ima_file_hash - return a measurement of the file * @file: pointer to the file * @buf: buffer in which to store the hash * @buf_size: length of the buffer * * On success, return the hash algorithm (as defined in the enum hash_algo). * If buf is not NULL, this function also outputs the hash into buf. * If the hash is larger than buf_size, then only buf_size bytes will be copied. * It generally just makes sense to pass a buffer capable of holding the largest * possible hash: IMA_MAX_DIGEST_SIZE. * The file hash returned is based on the entire file, including the appended * signature. * * If the measurement cannot be performed, return -EOPNOTSUPP. * If the parameters are incorrect, return -EINVAL. */ int ima_file_hash(struct file *file, char *buf, size_t buf_size) { if (!file) return -EINVAL; return __ima_inode_hash(file_inode(file), file, buf, buf_size); } EXPORT_SYMBOL_GPL(ima_file_hash); /** * ima_inode_hash - return the stored measurement if the inode has been hashed * and is in the iint cache. * @inode: pointer to the inode * @buf: buffer in which to store the hash * @buf_size: length of the buffer * * On success, return the hash algorithm (as defined in the enum hash_algo). * If buf is not NULL, this function also outputs the hash into buf. * If the hash is larger than buf_size, then only buf_size bytes will be copied. * It generally just makes sense to pass a buffer capable of holding the largest * possible hash: IMA_MAX_DIGEST_SIZE. * The hash returned is based on the entire contents, including the appended * signature. * * If IMA is disabled or if no measurement is available, return -EOPNOTSUPP. * If the parameters are incorrect, return -EINVAL. */ int ima_inode_hash(struct inode *inode, char *buf, size_t buf_size) { if (!inode) return -EINVAL; return __ima_inode_hash(inode, NULL, buf, buf_size); } EXPORT_SYMBOL_GPL(ima_inode_hash); /** * ima_post_create_tmpfile - mark newly created tmpfile as new * @idmap: idmap of the mount the inode was found from * @inode: inode of the newly created tmpfile * * No measuring, appraising or auditing of newly created tmpfiles is needed. * Skip calling process_measurement(), but indicate which newly, created * tmpfiles are in policy. */ static void ima_post_create_tmpfile(struct mnt_idmap *idmap, struct inode *inode) { struct ima_iint_cache *iint; int must_appraise; if (!ima_policy_flag || !S_ISREG(inode->i_mode)) return; must_appraise = ima_must_appraise(idmap, inode, MAY_ACCESS, FILE_CHECK); if (!must_appraise) return; /* Nothing to do if we can't allocate memory */ iint = ima_inode_get(inode); if (!iint) return; /* needed for writing the security xattrs */ set_bit(IMA_UPDATE_XATTR, &iint->atomic_flags); iint->ima_file_status = INTEGRITY_PASS; } /** * ima_post_path_mknod - mark as a new inode * @idmap: idmap of the mount the inode was found from * @dentry: newly created dentry * * Mark files created via the mknodat syscall as new, so that the * file data can be written later. */ static void ima_post_path_mknod(struct mnt_idmap *idmap, struct dentry *dentry) { struct ima_iint_cache *iint; struct inode *inode = dentry->d_inode; int must_appraise; if (!ima_policy_flag || !S_ISREG(inode->i_mode)) return; must_appraise = ima_must_appraise(idmap, inode, MAY_ACCESS, FILE_CHECK); if (!must_appraise) return; /* Nothing to do if we can't allocate memory */ iint = ima_inode_get(inode); if (!iint) return; /* needed for re-opening empty files */ iint->flags |= IMA_NEW_FILE; } /** * ima_read_file - pre-measure/appraise hook decision based on policy * @file: pointer to the file to be measured/appraised/audit * @read_id: caller identifier * @contents: whether a subsequent call will be made to ima_post_read_file() * * Permit reading a file based on policy. The policy rules are written * in terms of the policy identifier. Appraising the integrity of * a file requires a file descriptor. * * For permission return 0, otherwise return -EACCES. */ static int ima_read_file(struct file *file, enum kernel_read_file_id read_id, bool contents) { enum ima_hooks func; struct lsm_prop prop; /* * Do devices using pre-allocated memory run the risk of the * firmware being accessible to the device prior to the completion * of IMA's signature verification any more than when using two * buffers? It may be desirable to include the buffer address * in this API and walk all the dma_map_single() mappings to check. */ /* * There will be a call made to ima_post_read_file() with * a filled buffer, so we don't need to perform an extra * read early here. */ if (contents) return 0; /* Read entire file for all partial reads. */ func = read_idmap[read_id] ?: FILE_CHECK; security_current_getlsmprop_subj(&prop); return process_measurement(file, current_cred(), &prop, NULL, 0, MAY_READ, func); } const int read_idmap[READING_MAX_ID] = { [READING_FIRMWARE] = FIRMWARE_CHECK, [READING_MODULE] = MODULE_CHECK, [READING_KEXEC_IMAGE] = KEXEC_KERNEL_CHECK, [READING_KEXEC_INITRAMFS] = KEXEC_INITRAMFS_CHECK, [READING_POLICY] = POLICY_CHECK }; /** * ima_post_read_file - in memory collect/appraise/audit measurement * @file: pointer to the file to be measured/appraised/audit * @buf: pointer to in memory file contents * @size: size of in memory file contents * @read_id: caller identifier * * Measure/appraise/audit in memory file based on policy. Policy rules * are written in terms of a policy identifier. * * On success return 0. On integrity appraisal error, assuming the file * is in policy and IMA-appraisal is in enforcing mode, return -EACCES. */ static int ima_post_read_file(struct file *file, char *buf, loff_t size, enum kernel_read_file_id read_id) { enum ima_hooks func; struct lsm_prop prop; /* permit signed certs */ if (!file && read_id == READING_X509_CERTIFICATE) return 0; if (!file || !buf || size == 0) { /* should never happen */ if (ima_appraise & IMA_APPRAISE_ENFORCE) return -EACCES; return 0; } func = read_idmap[read_id] ?: FILE_CHECK; security_current_getlsmprop_subj(&prop); return process_measurement(file, current_cred(), &prop, buf, size, MAY_READ, func); } /** * ima_load_data - appraise decision based on policy * @id: kernel load data caller identifier * @contents: whether the full contents will be available in a later * call to ima_post_load_data(). * * Callers of this LSM hook can not measure, appraise, or audit the * data provided by userspace. Enforce policy rules requiring a file * signature (eg. kexec'ed kernel image). * * For permission return 0, otherwise return -EACCES. */ static int ima_load_data(enum kernel_load_data_id id, bool contents) { bool ima_enforce, sig_enforce; ima_enforce = (ima_appraise & IMA_APPRAISE_ENFORCE) == IMA_APPRAISE_ENFORCE; switch (id) { case LOADING_KEXEC_IMAGE: if (IS_ENABLED(CONFIG_KEXEC_SIG) && arch_ima_get_secureboot()) { pr_err("impossible to appraise a kernel image without a file descriptor; try using kexec_file_load syscall.\n"); return -EACCES; } if (ima_enforce && (ima_appraise & IMA_APPRAISE_KEXEC)) { pr_err("impossible to appraise a kernel image without a file descriptor; try using kexec_file_load syscall.\n"); return -EACCES; /* INTEGRITY_UNKNOWN */ } break; case LOADING_FIRMWARE: if (ima_enforce && (ima_appraise & IMA_APPRAISE_FIRMWARE) && !contents) { pr_err("Prevent firmware sysfs fallback loading.\n"); return -EACCES; /* INTEGRITY_UNKNOWN */ } break; case LOADING_MODULE: sig_enforce = is_module_sig_enforced(); if (ima_enforce && (!sig_enforce && (ima_appraise & IMA_APPRAISE_MODULES))) { pr_err("impossible to appraise a module without a file descriptor. sig_enforce kernel parameter might help\n"); return -EACCES; /* INTEGRITY_UNKNOWN */ } break; default: break; } return 0; } /** * ima_post_load_data - appraise decision based on policy * @buf: pointer to in memory file contents * @size: size of in memory file contents * @load_id: kernel load data caller identifier * @description: @load_id-specific description of contents * * Measure/appraise/audit in memory buffer based on policy. Policy rules * are written in terms of a policy identifier. * * On success return 0. On integrity appraisal error, assuming the file * is in policy and IMA-appraisal is in enforcing mode, return -EACCES. */ static int ima_post_load_data(char *buf, loff_t size, enum kernel_load_data_id load_id, char *description) { if (load_id == LOADING_FIRMWARE) { if ((ima_appraise & IMA_APPRAISE_FIRMWARE) && (ima_appraise & IMA_APPRAISE_ENFORCE)) { pr_err("Prevent firmware loading_store.\n"); return -EACCES; /* INTEGRITY_UNKNOWN */ } return 0; } /* * Measure the init_module syscall buffer containing the ELF image. */ if (load_id == LOADING_MODULE) ima_measure_critical_data("modules", "init_module", buf, size, true, NULL, 0); return 0; } /** * process_buffer_measurement - Measure the buffer or the buffer data hash * @idmap: idmap of the mount the inode was found from * @inode: inode associated with the object being measured (NULL for KEY_CHECK) * @buf: pointer to the buffer that needs to be added to the log. * @size: size of buffer(in bytes). * @eventname: event name to be used for the buffer entry. * @func: IMA hook * @pcr: pcr to extend the measurement * @func_data: func specific data, may be NULL * @buf_hash: measure buffer data hash * @digest: buffer digest will be written to * @digest_len: buffer length * * Based on policy, either the buffer data or buffer data hash is measured * * Return: 0 if the buffer has been successfully measured, 1 if the digest * has been written to the passed location but not added to a measurement entry, * a negative value otherwise. */ int process_buffer_measurement(struct mnt_idmap *idmap, struct inode *inode, const void *buf, int size, const char *eventname, enum ima_hooks func, int pcr, const char *func_data, bool buf_hash, u8 *digest, size_t digest_len) { int ret = 0; const char *audit_cause = "ENOMEM"; struct ima_template_entry *entry = NULL; struct ima_iint_cache iint = {}; struct ima_event_data event_data = {.iint = &iint, .filename = eventname, .buf = buf, .buf_len = size}; struct ima_template_desc *template; struct ima_max_digest_data hash; struct ima_digest_data *hash_hdr = container_of(&hash.hdr, struct ima_digest_data, hdr); char digest_hash[IMA_MAX_DIGEST_SIZE]; int digest_hash_len = hash_digest_size[ima_hash_algo]; int violation = 0; int action = 0; struct lsm_prop prop; if (digest && digest_len < digest_hash_len) return -EINVAL; if (!ima_policy_flag && !digest) return -ENOENT; template = ima_template_desc_buf(); if (!template) { ret = -EINVAL; audit_cause = "ima_template_desc_buf"; goto out; } /* * Both LSM hooks and auxilary based buffer measurements are * based on policy. To avoid code duplication, differentiate * between the LSM hooks and auxilary buffer measurements, * retrieving the policy rule information only for the LSM hook * buffer measurements. */ if (func) { security_current_getlsmprop_subj(&prop); action = ima_get_action(idmap, inode, current_cred(), &prop, 0, func, &pcr, &template, func_data, NULL); if (!(action & IMA_MEASURE) && !digest) return -ENOENT; } if (!pcr) pcr = CONFIG_IMA_MEASURE_PCR_IDX; iint.ima_hash = hash_hdr; iint.ima_hash->algo = ima_hash_algo; iint.ima_hash->length = hash_digest_size[ima_hash_algo]; ret = ima_calc_buffer_hash(buf, size, iint.ima_hash); if (ret < 0) { audit_cause = "hashing_error"; goto out; } if (buf_hash) { memcpy(digest_hash, hash_hdr->digest, digest_hash_len); ret = ima_calc_buffer_hash(digest_hash, digest_hash_len, iint.ima_hash); if (ret < 0) { audit_cause = "hashing_error"; goto out; } event_data.buf = digest_hash; event_data.buf_len = digest_hash_len; } if (digest) memcpy(digest, iint.ima_hash->digest, digest_hash_len); if (!ima_policy_flag || (func && !(action & IMA_MEASURE))) return 1; ret = ima_alloc_init_template(&event_data, &entry, template); if (ret < 0) { audit_cause = "alloc_entry"; goto out; } ret = ima_store_template(entry, violation, NULL, event_data.buf, pcr); if (ret < 0) { audit_cause = "store_entry"; ima_free_template_entry(entry); } out: if (ret < 0) integrity_audit_message(AUDIT_INTEGRITY_PCR, NULL, eventname, func_measure_str(func), audit_cause, ret, 0, ret); return ret; } /** * ima_kexec_cmdline - measure kexec cmdline boot args * @kernel_fd: file descriptor of the kexec kernel being loaded * @buf: pointer to buffer * @size: size of buffer * * Buffers can only be measured, not appraised. */ void ima_kexec_cmdline(int kernel_fd, const void *buf, int size) { if (!buf || !size) return; CLASS(fd, f)(kernel_fd); if (fd_empty(f)) return; process_buffer_measurement(file_mnt_idmap(fd_file(f)), file_inode(fd_file(f)), buf, size, "kexec-cmdline", KEXEC_CMDLINE, 0, NULL, false, NULL, 0); } /** * ima_measure_critical_data - measure kernel integrity critical data * @event_label: unique event label for grouping and limiting critical data * @event_name: event name for the record in the IMA measurement list * @buf: pointer to buffer data * @buf_len: length of buffer data (in bytes) * @hash: measure buffer data hash * @digest: buffer digest will be written to * @digest_len: buffer length * * Measure data critical to the integrity of the kernel into the IMA log * and extend the pcr. Examples of critical data could be various data * structures, policies, and states stored in kernel memory that can * impact the integrity of the system. * * Return: 0 if the buffer has been successfully measured, 1 if the digest * has been written to the passed location but not added to a measurement entry, * a negative value otherwise. */ int ima_measure_critical_data(const char *event_label, const char *event_name, const void *buf, size_t buf_len, bool hash, u8 *digest, size_t digest_len) { if (!event_name || !event_label || !buf || !buf_len) return -ENOPARAM; return process_buffer_measurement(&nop_mnt_idmap, NULL, buf, buf_len, event_name, CRITICAL_DATA, 0, event_label, hash, digest, digest_len); } EXPORT_SYMBOL_GPL(ima_measure_critical_data); #ifdef CONFIG_INTEGRITY_ASYMMETRIC_KEYS /** * ima_kernel_module_request - Prevent crypto-pkcs1(rsa,*) requests * @kmod_name: kernel module name * * Avoid a verification loop where verifying the signature of the modprobe * binary requires executing modprobe itself. Since the modprobe iint->mutex * is already held when the signature verification is performed, a deadlock * occurs as soon as modprobe is executed within the critical region, since * the same lock cannot be taken again. * * This happens when public_key_verify_signature(), in case of RSA algorithm, * use alg_name to store internal information in order to construct an * algorithm on the fly, but crypto_larval_lookup() will try to use alg_name * in order to load a kernel module with same name. * * Since we don't have any real "crypto-pkcs1(rsa,*)" kernel modules, * we are safe to fail such module request from crypto_larval_lookup(), and * avoid the verification loop. * * Return: Zero if it is safe to load the kernel module, -EINVAL otherwise. */ static int ima_kernel_module_request(char *kmod_name) { if (strncmp(kmod_name, "crypto-pkcs1(rsa,", 17) == 0) return -EINVAL; return 0; } #endif /* CONFIG_INTEGRITY_ASYMMETRIC_KEYS */ static int __init init_ima(void) { int error; ima_appraise_parse_cmdline(); ima_init_template_list(); hash_setup(CONFIG_IMA_DEFAULT_HASH); error = ima_init(); if (error && strcmp(hash_algo_name[ima_hash_algo], CONFIG_IMA_DEFAULT_HASH) != 0) { pr_info("Allocating %s failed, going to use default hash algorithm %s\n", hash_algo_name[ima_hash_algo], CONFIG_IMA_DEFAULT_HASH); hash_setup_done = 0; hash_setup(CONFIG_IMA_DEFAULT_HASH); error = ima_init(); } if (error) return error; error = register_blocking_lsm_notifier(&ima_lsm_policy_notifier); if (error) pr_warn("Couldn't register LSM notifier, error %d\n", error); if (!error) ima_update_policy_flags(); return error; } static struct security_hook_list ima_hooks[] __ro_after_init = { LSM_HOOK_INIT(bprm_check_security, ima_bprm_check), LSM_HOOK_INIT(bprm_creds_for_exec, ima_bprm_creds_for_exec), LSM_HOOK_INIT(file_post_open, ima_file_check), LSM_HOOK_INIT(inode_post_create_tmpfile, ima_post_create_tmpfile), LSM_HOOK_INIT(file_release, ima_file_free), LSM_HOOK_INIT(mmap_file, ima_file_mmap), LSM_HOOK_INIT(file_mprotect, ima_file_mprotect), LSM_HOOK_INIT(kernel_load_data, ima_load_data), LSM_HOOK_INIT(kernel_post_load_data, ima_post_load_data), LSM_HOOK_INIT(kernel_read_file, ima_read_file), LSM_HOOK_INIT(kernel_post_read_file, ima_post_read_file), LSM_HOOK_INIT(path_post_mknod, ima_post_path_mknod), #ifdef CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS LSM_HOOK_INIT(key_post_create_or_update, ima_post_key_create_or_update), #endif #ifdef CONFIG_INTEGRITY_ASYMMETRIC_KEYS LSM_HOOK_INIT(kernel_module_request, ima_kernel_module_request), #endif LSM_HOOK_INIT(inode_free_security_rcu, ima_inode_free_rcu), }; static const struct lsm_id ima_lsmid = { .name = "ima", .id = LSM_ID_IMA, }; static int __init init_ima_lsm(void) { ima_iintcache_init(); security_add_hooks(ima_hooks, ARRAY_SIZE(ima_hooks), &ima_lsmid); init_ima_appraise_lsm(&ima_lsmid); return 0; } struct lsm_blob_sizes ima_blob_sizes __ro_after_init = { .lbs_inode = sizeof(struct ima_iint_cache *), }; DEFINE_LSM(ima) = { .name = "ima", .init = init_ima_lsm, .order = LSM_ORDER_LAST, .blobs = &ima_blob_sizes, }; late_initcall(init_ima); /* Start IMA after the TPM is available */
508 128 5 2343 2676 3662 3763 1518 1519 649 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 /* SPDX-License-Identifier: GPL-2.0 */ /* * include/linux/writeback.h */ #ifndef WRITEBACK_H #define WRITEBACK_H #include <linux/sched.h> #include <linux/workqueue.h> #include <linux/fs.h> #include <linux/flex_proportions.h> #include <linux/backing-dev-defs.h> #include <linux/blk_types.h> #include <linux/pagevec.h> struct bio; DECLARE_PER_CPU(int, dirty_throttle_leaks); /* * The global dirty threshold is normally equal to the global dirty limit, * except when the system suddenly allocates a lot of anonymous memory and * knocks down the global dirty threshold quickly, in which case the global * dirty limit will follow down slowly to prevent livelocking all dirtier tasks. */ #define DIRTY_SCOPE 8 struct backing_dev_info; /* * fs/fs-writeback.c */ enum writeback_sync_modes { WB_SYNC_NONE, /* Don't wait on anything */ WB_SYNC_ALL, /* Wait on every mapping */ }; /* * A control structure which tells the writeback code what to do. These are * always on the stack, and hence need no locking. They are always initialised * in a manner such that unspecified fields are set to zero. */ struct writeback_control { /* public fields that can be set and/or consumed by the caller: */ long nr_to_write; /* Write this many pages, and decrement this for each page written */ long pages_skipped; /* Pages which were not written */ /* * For a_ops->writepages(): if start or end are non-zero then this is * a hint that the filesystem need only write out the pages inside that * byterange. The byte at `end' is included in the writeout request. */ loff_t range_start; loff_t range_end; enum writeback_sync_modes sync_mode; unsigned for_kupdate:1; /* A kupdate writeback */ unsigned for_background:1; /* A background writeback */ unsigned tagged_writepages:1; /* tag-and-write to avoid livelock */ unsigned for_reclaim:1; /* Invoked from the page allocator */ unsigned range_cyclic:1; /* range_start is cyclic */ unsigned for_sync:1; /* sync(2) WB_SYNC_ALL writeback */ unsigned unpinned_netfs_wb:1; /* Cleared I_PINNING_NETFS_WB */ /* * When writeback IOs are bounced through async layers, only the * initial synchronous phase should be accounted towards inode * cgroup ownership arbitration to avoid confusion. Later stages * can set the following flag to disable the accounting. */ unsigned no_cgroup_owner:1; /* To enable batching of swap writes to non-block-device backends, * "plug" can be set point to a 'struct swap_iocb *'. When all swap * writes have been submitted, if with swap_iocb is not NULL, * swap_write_unplug() should be called. */ struct swap_iocb **swap_plug; /* Target list for splitting a large folio */ struct list_head *list; /* internal fields used by the ->writepages implementation: */ struct folio_batch fbatch; pgoff_t index; int saved_err; #ifdef CONFIG_CGROUP_WRITEBACK struct bdi_writeback *wb; /* wb this writeback is issued under */ struct inode *inode; /* inode being written out */ /* foreign inode detection, see wbc_detach_inode() */ int wb_id; /* current wb id */ int wb_lcand_id; /* last foreign candidate wb id */ int wb_tcand_id; /* this foreign candidate wb id */ size_t wb_bytes; /* bytes written by current wb */ size_t wb_lcand_bytes; /* bytes written by last candidate */ size_t wb_tcand_bytes; /* bytes written by this candidate */ #endif }; static inline blk_opf_t wbc_to_write_flags(struct writeback_control *wbc) { blk_opf_t flags = 0; if (wbc->sync_mode == WB_SYNC_ALL) flags |= REQ_SYNC; else if (wbc->for_kupdate || wbc->for_background) flags |= REQ_BACKGROUND; return flags; } #ifdef CONFIG_CGROUP_WRITEBACK #define wbc_blkcg_css(wbc) \ ((wbc)->wb ? (wbc)->wb->blkcg_css : blkcg_root_css) #else #define wbc_blkcg_css(wbc) (blkcg_root_css) #endif /* CONFIG_CGROUP_WRITEBACK */ /* * A wb_domain represents a domain that wb's (bdi_writeback's) belong to * and are measured against each other in. There always is one global * domain, global_wb_domain, that every wb in the system is a member of. * This allows measuring the relative bandwidth of each wb to distribute * dirtyable memory accordingly. */ struct wb_domain { spinlock_t lock; /* * Scale the writeback cache size proportional to the relative * writeout speed. * * We do this by keeping a floating proportion between BDIs, based * on page writeback completions [end_page_writeback()]. Those * devices that write out pages fastest will get the larger share, * while the slower will get a smaller share. * * We use page writeout completions because we are interested in * getting rid of dirty pages. Having them written out is the * primary goal. * * We introduce a concept of time, a period over which we measure * these events, because demand can/will vary over time. The length * of this period itself is measured in page writeback completions. */ struct fprop_global completions; struct timer_list period_timer; /* timer for aging of completions */ unsigned long period_time; /* * The dirtyable memory and dirty threshold could be suddenly * knocked down by a large amount (eg. on the startup of KVM in a * swapless system). This may throw the system into deep dirty * exceeded state and throttle heavy/light dirtiers alike. To * retain good responsiveness, maintain global_dirty_limit for * tracking slowly down to the knocked down dirty threshold. * * Both fields are protected by ->lock. */ unsigned long dirty_limit_tstamp; unsigned long dirty_limit; }; /** * wb_domain_size_changed - memory available to a wb_domain has changed * @dom: wb_domain of interest * * This function should be called when the amount of memory available to * @dom has changed. It resets @dom's dirty limit parameters to prevent * the past values which don't match the current configuration from skewing * dirty throttling. Without this, when memory size of a wb_domain is * greatly reduced, the dirty throttling logic may allow too many pages to * be dirtied leading to consecutive unnecessary OOMs and may get stuck in * that situation. */ static inline void wb_domain_size_changed(struct wb_domain *dom) { spin_lock(&dom->lock); dom->dirty_limit_tstamp = jiffies; dom->dirty_limit = 0; spin_unlock(&dom->lock); } /* * fs/fs-writeback.c */ struct bdi_writeback; void writeback_inodes_sb(struct super_block *, enum wb_reason reason); void writeback_inodes_sb_nr(struct super_block *, unsigned long nr, enum wb_reason reason); void try_to_writeback_inodes_sb(struct super_block *sb, enum wb_reason reason); void sync_inodes_sb(struct super_block *); void wakeup_flusher_threads(enum wb_reason reason); void wakeup_flusher_threads_bdi(struct backing_dev_info *bdi, enum wb_reason reason); void inode_wait_for_writeback(struct inode *inode); void inode_io_list_del(struct inode *inode); /* writeback.h requires fs.h; it, too, is not included from here. */ static inline void wait_on_inode(struct inode *inode) { wait_var_event(inode_state_wait_address(inode, __I_NEW), !(READ_ONCE(inode->i_state) & I_NEW)); } #ifdef CONFIG_CGROUP_WRITEBACK #include <linux/cgroup.h> #include <linux/bio.h> void __inode_attach_wb(struct inode *inode, struct folio *folio); void wbc_detach_inode(struct writeback_control *wbc); void wbc_account_cgroup_owner(struct writeback_control *wbc, struct folio *folio, size_t bytes); int cgroup_writeback_by_id(u64 bdi_id, int memcg_id, enum wb_reason reason, struct wb_completion *done); void cgroup_writeback_umount(struct super_block *sb); bool cleanup_offline_cgwb(struct bdi_writeback *wb); /** * inode_attach_wb - associate an inode with its wb * @inode: inode of interest * @folio: folio being dirtied (may be NULL) * * If @inode doesn't have its wb, associate it with the wb matching the * memcg of @folio or, if @folio is NULL, %current. May be called w/ or w/o * @inode->i_lock. */ static inline void inode_attach_wb(struct inode *inode, struct folio *folio) { if (!inode->i_wb) __inode_attach_wb(inode, folio); } /** * inode_detach_wb - disassociate an inode from its wb * @inode: inode of interest * * @inode is being freed. Detach from its wb. */ static inline void inode_detach_wb(struct inode *inode) { if (inode->i_wb) { WARN_ON_ONCE(!(inode->i_state & I_CLEAR)); wb_put(inode->i_wb); inode->i_wb = NULL; } } void wbc_attach_fdatawrite_inode(struct writeback_control *wbc, struct inode *inode); /** * wbc_init_bio - writeback specific initializtion of bio * @wbc: writeback_control for the writeback in progress * @bio: bio to be initialized * * @bio is a part of the writeback in progress controlled by @wbc. Perform * writeback specific initialization. This is used to apply the cgroup * writeback context. Must be called after the bio has been associated with * a device. */ static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio) { /* * pageout() path doesn't attach @wbc to the inode being written * out. This is intentional as we don't want the function to block * behind a slow cgroup. Ultimately, we want pageout() to kick off * regular writeback instead of writing things out itself. */ if (wbc->wb) bio_associate_blkg_from_css(bio, wbc->wb->blkcg_css); } #else /* CONFIG_CGROUP_WRITEBACK */ static inline void inode_attach_wb(struct inode *inode, struct folio *folio) { } static inline void inode_detach_wb(struct inode *inode) { } static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc, struct inode *inode) { } static inline void wbc_detach_inode(struct writeback_control *wbc) { } static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio) { } static inline void wbc_account_cgroup_owner(struct writeback_control *wbc, struct folio *folio, size_t bytes) { } static inline void cgroup_writeback_umount(struct super_block *sb) { } #endif /* CONFIG_CGROUP_WRITEBACK */ /* * mm/page-writeback.c */ void laptop_io_completion(struct backing_dev_info *info); void laptop_sync_completion(void); void laptop_mode_timer_fn(struct timer_list *t); bool node_dirty_ok(struct pglist_data *pgdat); int wb_domain_init(struct wb_domain *dom, gfp_t gfp); #ifdef CONFIG_CGROUP_WRITEBACK void wb_domain_exit(struct wb_domain *dom); #endif extern struct wb_domain global_wb_domain; /* These are exported to sysctl. */ extern unsigned int dirty_writeback_interval; extern unsigned int dirty_expire_interval; extern unsigned int dirtytime_expire_interval; extern int laptop_mode; int dirtytime_interval_handler(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos); void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty); unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh); unsigned long cgwb_calc_thresh(struct bdi_writeback *wb); void wb_update_bandwidth(struct bdi_writeback *wb); /* Invoke balance dirty pages in async mode. */ #define BDP_ASYNC 0x0001 void balance_dirty_pages_ratelimited(struct address_space *mapping); int balance_dirty_pages_ratelimited_flags(struct address_space *mapping, unsigned int flags); bool wb_over_bg_thresh(struct bdi_writeback *wb); struct folio *writeback_iter(struct address_space *mapping, struct writeback_control *wbc, struct folio *folio, int *error); typedef int (*writepage_t)(struct folio *folio, struct writeback_control *wbc, void *data); int write_cache_pages(struct address_space *mapping, struct writeback_control *wbc, writepage_t writepage, void *data); int do_writepages(struct address_space *mapping, struct writeback_control *wbc); void writeback_set_ratelimit(void); void tag_pages_for_writeback(struct address_space *mapping, pgoff_t start, pgoff_t end); bool filemap_dirty_folio(struct address_space *mapping, struct folio *folio); bool folio_redirty_for_writepage(struct writeback_control *, struct folio *); bool redirty_page_for_writepage(struct writeback_control *, struct page *); void sb_mark_inode_writeback(struct inode *inode); void sb_clear_inode_writeback(struct inode *inode); #endif /* WRITEBACK_H */
14 215 7 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Checksumming functions for IP, TCP, UDP and so on * * Authors: Jorge Cwik, <jorge@laser.satlink.net> * Arnt Gulbrandsen, <agulbra@nvg.unit.no> * Borrows very liberally from tcp.c and ip.c, see those * files for more names. */ #ifndef _CHECKSUM_H #define _CHECKSUM_H #include <linux/errno.h> #include <asm/types.h> #include <asm/byteorder.h> #include <asm/checksum.h> #if !defined(_HAVE_ARCH_COPY_AND_CSUM_FROM_USER) || !defined(HAVE_CSUM_COPY_USER) #include <linux/uaccess.h> #endif #ifndef _HAVE_ARCH_COPY_AND_CSUM_FROM_USER static __always_inline __wsum csum_and_copy_from_user (const void __user *src, void *dst, int len) { if (copy_from_user(dst, src, len)) return 0; return csum_partial(dst, len, ~0U); } #endif #ifndef HAVE_CSUM_COPY_USER static __always_inline __wsum csum_and_copy_to_user (const void *src, void __user *dst, int len) { __wsum sum = csum_partial(src, len, ~0U); if (copy_to_user(dst, src, len) == 0) return sum; return 0; } #endif #ifndef _HAVE_ARCH_CSUM_AND_COPY static __always_inline __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len) { memcpy(dst, src, len); return csum_partial(dst, len, 0); } #endif #ifndef HAVE_ARCH_CSUM_ADD static __always_inline __wsum csum_add(__wsum csum, __wsum addend) { u32 res = (__force u32)csum; res += (__force u32)addend; return (__force __wsum)(res + (res < (__force u32)addend)); } #endif static __always_inline __wsum csum_sub(__wsum csum, __wsum addend) { return csum_add(csum, ~addend); } static __always_inline __sum16 csum16_add(__sum16 csum, __be16 addend) { u16 res = (__force u16)csum; res += (__force u16)addend; return (__force __sum16)(res + (res < (__force u16)addend)); } static __always_inline __sum16 csum16_sub(__sum16 csum, __be16 addend) { return csum16_add(csum, ~addend); } #ifndef HAVE_ARCH_CSUM_SHIFT static __always_inline __wsum csum_shift(__wsum sum, int offset) { /* rotate sum to align it with a 16b boundary */ if (offset & 1) return (__force __wsum)ror32((__force u32)sum, 8); return sum; } #endif static __always_inline __wsum csum_block_add(__wsum csum, __wsum csum2, int offset) { return csum_add(csum, csum_shift(csum2, offset)); } static __always_inline __wsum csum_block_add_ext(__wsum csum, __wsum csum2, int offset, int len) { return csum_block_add(csum, csum2, offset); } static __always_inline __wsum csum_block_sub(__wsum csum, __wsum csum2, int offset) { return csum_block_add(csum, ~csum2, offset); } static __always_inline __wsum csum_unfold(__sum16 n) { return (__force __wsum)n; } static __always_inline __wsum csum_partial_ext(const void *buff, int len, __wsum sum) { return csum_partial(buff, len, sum); } #define CSUM_MANGLED_0 ((__force __sum16)0xffff) static __always_inline void csum_replace_by_diff(__sum16 *sum, __wsum diff) { *sum = csum_fold(csum_add(diff, ~csum_unfold(*sum))); } static __always_inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to) { __wsum tmp = csum_sub(~csum_unfold(*sum), (__force __wsum)from); *sum = csum_fold(csum_add(tmp, (__force __wsum)to)); } /* Implements RFC 1624 (Incremental Internet Checksum) * 3. Discussion states : * HC' = ~(~HC + ~m + m') * m : old value of a 16bit field * m' : new value of a 16bit field */ static __always_inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new) { *sum = ~csum16_add(csum16_sub(~(*sum), old), new); } static inline void csum_replace(__wsum *csum, __wsum old, __wsum new) { *csum = csum_add(csum_sub(*csum, old), new); } static inline unsigned short csum_from32to16(unsigned int sum) { sum += (sum >> 16) | (sum << 16); return (unsigned short)(sum >> 16); } struct sk_buff; void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb, __be32 from, __be32 to, bool pseudohdr); void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb, const __be32 *from, const __be32 *to, bool pseudohdr); void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb, __wsum diff, bool pseudohdr); static __always_inline void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb, __be16 from, __be16 to, bool pseudohdr) { inet_proto_csum_replace4(sum, skb, (__force __be32)from, (__force __be32)to, pseudohdr); } static __always_inline __wsum remcsum_adjust(void *ptr, __wsum csum, int start, int offset) { __sum16 *psum = (__sum16 *)(ptr + offset); __wsum delta; /* Subtract out checksum up to start */ csum = csum_sub(csum, csum_partial(ptr, start, 0)); /* Set derived checksum in packet */ delta = csum_sub((__force __wsum)csum_fold(csum), (__force __wsum)*psum); *psum = csum_fold(csum); return delta; } static __always_inline void remcsum_unadjust(__sum16 *psum, __wsum delta) { *psum = csum_fold(csum_sub(delta, (__force __wsum)*psum)); } static __always_inline __wsum wsum_negate(__wsum val) { return (__force __wsum)-((__force u32)val); } #endif
66 685 10 8963 6613 9571 12233 17162 43 24 163 89 165 570 16 17 22937 5 14 7 779 17 14 13 20 35 18 2 67 40 2605 122 4 4126 2856 16 69 22 85 5 167 70 15 18 930 48 8 29 47 90 218 287 4600 497 122 15 91 85 606 604 54 97 509 4 509 22144 19311 8 63 1419 4 56 14721 6 15819 15848 9407 9857 267 23 253 9910 41 1255 7 7 12486 54 2030 10 10 10 3058 574 2 3095 2423 123 15708 978 3468 1 25 30 30 15568 79 596 1560 1 292 1666 18 1751 44 550 45 24 2283 2262 86 155 3 823 3 4 90 24 24 1595 2697 5473 17343 23 3019 1826 15 1778 1624 11 3 74 154 1 251 30 227 11 7 17 285 123 3 374 49 1 1 955 49 46 13 4 37 39 36 1509 1 1 2 2 4 2 60 12997 2 86 98 444 448 816 13378 1118 3 2 1093 1119 6 1856 420 803 1518 34 4 701 667 119 3 32 53 10 2 4 4 17 1 677 42 1 902 15 60 70 14 22571 65 17516 37 165 168 1 29 14802 46 407 22 213 1573 17527 164 165 13909 14 11 3 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Definitions for the 'struct sk_buff' memory handlers. * * Authors: * Alan Cox, <gw4pts@gw4pts.ampr.org> * Florian La Roche, <rzsfl@rz.uni-sb.de> */ #ifndef _LINUX_SKBUFF_H #define _LINUX_SKBUFF_H #include <linux/kernel.h> #include <linux/compiler.h> #include <linux/time.h> #include <linux/bug.h> #include <linux/bvec.h> #include <linux/cache.h> #include <linux/rbtree.h> #include <linux/socket.h> #include <linux/refcount.h> #include <linux/atomic.h> #include <asm/types.h> #include <linux/spinlock.h> #include <net/checksum.h> #include <linux/rcupdate.h> #include <linux/dma-mapping.h> #include <linux/netdev_features.h> #include <net/flow_dissector.h> #include <linux/in6.h> #include <linux/if_packet.h> #include <linux/llist.h> #include <linux/page_frag_cache.h> #include <net/flow.h> #if IS_ENABLED(CONFIG_NF_CONNTRACK) #include <linux/netfilter/nf_conntrack_common.h> #endif #include <net/net_debug.h> #include <net/dropreason-core.h> #include <net/netmem.h> /** * DOC: skb checksums * * The interface for checksum offload between the stack and networking drivers * is as follows... * * IP checksum related features * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * Drivers advertise checksum offload capabilities in the features of a device. * From the stack's point of view these are capabilities offered by the driver. * A driver typically only advertises features that it is capable of offloading * to its device. * * .. flat-table:: Checksum related device features * :widths: 1 10 * * * - %NETIF_F_HW_CSUM * - The driver (or its device) is able to compute one * IP (one's complement) checksum for any combination * of protocols or protocol layering. The checksum is * computed and set in a packet per the CHECKSUM_PARTIAL * interface (see below). * * * - %NETIF_F_IP_CSUM * - Driver (device) is only able to checksum plain * TCP or UDP packets over IPv4. These are specifically * unencapsulated packets of the form IPv4|TCP or * IPv4|UDP where the Protocol field in the IPv4 header * is TCP or UDP. The IPv4 header may contain IP options. * This feature cannot be set in features for a device * with NETIF_F_HW_CSUM also set. This feature is being * DEPRECATED (see below). * * * - %NETIF_F_IPV6_CSUM * - Driver (device) is only able to checksum plain * TCP or UDP packets over IPv6. These are specifically * unencapsulated packets of the form IPv6|TCP or * IPv6|UDP where the Next Header field in the IPv6 * header is either TCP or UDP. IPv6 extension headers * are not supported with this feature. This feature * cannot be set in features for a device with * NETIF_F_HW_CSUM also set. This feature is being * DEPRECATED (see below). * * * - %NETIF_F_RXCSUM * - Driver (device) performs receive checksum offload. * This flag is only used to disable the RX checksum * feature for a device. The stack will accept receive * checksum indication in packets received on a device * regardless of whether NETIF_F_RXCSUM is set. * * Checksumming of received packets by device * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * Indication of checksum verification is set in &sk_buff.ip_summed. * Possible values are: * * - %CHECKSUM_NONE * * Device did not checksum this packet e.g. due to lack of capabilities. * The packet contains full (though not verified) checksum in packet but * not in skb->csum. Thus, skb->csum is undefined in this case. * * - %CHECKSUM_UNNECESSARY * * The hardware you're dealing with doesn't calculate the full checksum * (as in %CHECKSUM_COMPLETE), but it does parse headers and verify checksums * for specific protocols. For such packets it will set %CHECKSUM_UNNECESSARY * if their checksums are okay. &sk_buff.csum is still undefined in this case * though. A driver or device must never modify the checksum field in the * packet even if checksum is verified. * * %CHECKSUM_UNNECESSARY is applicable to following protocols: * * - TCP: IPv6 and IPv4. * - UDP: IPv4 and IPv6. A device may apply CHECKSUM_UNNECESSARY to a * zero UDP checksum for either IPv4 or IPv6, the networking stack * may perform further validation in this case. * - GRE: only if the checksum is present in the header. * - SCTP: indicates the CRC in SCTP header has been validated. * - FCOE: indicates the CRC in FC frame has been validated. * * &sk_buff.csum_level indicates the number of consecutive checksums found in * the packet minus one that have been verified as %CHECKSUM_UNNECESSARY. * For instance if a device receives an IPv6->UDP->GRE->IPv4->TCP packet * and a device is able to verify the checksums for UDP (possibly zero), * GRE (checksum flag is set) and TCP, &sk_buff.csum_level would be set to * two. If the device were only able to verify the UDP checksum and not * GRE, either because it doesn't support GRE checksum or because GRE * checksum is bad, skb->csum_level would be set to zero (TCP checksum is * not considered in this case). * * - %CHECKSUM_COMPLETE * * This is the most generic way. The device supplied checksum of the _whole_ * packet as seen by netif_rx() and fills in &sk_buff.csum. This means the * hardware doesn't need to parse L3/L4 headers to implement this. * * Notes: * * - Even if device supports only some protocols, but is able to produce * skb->csum, it MUST use CHECKSUM_COMPLETE, not CHECKSUM_UNNECESSARY. * - CHECKSUM_COMPLETE is not applicable to SCTP and FCoE protocols. * * - %CHECKSUM_PARTIAL * * A checksum is set up to be offloaded to a device as described in the * output description for CHECKSUM_PARTIAL. This may occur on a packet * received directly from another Linux OS, e.g., a virtualized Linux kernel * on the same host, or it may be set in the input path in GRO or remote * checksum offload. For the purposes of checksum verification, the checksum * referred to by skb->csum_start + skb->csum_offset and any preceding * checksums in the packet are considered verified. Any checksums in the * packet that are after the checksum being offloaded are not considered to * be verified. * * Checksumming on transmit for non-GSO * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * The stack requests checksum offload in the &sk_buff.ip_summed for a packet. * Values are: * * - %CHECKSUM_PARTIAL * * The driver is required to checksum the packet as seen by hard_start_xmit() * from &sk_buff.csum_start up to the end, and to record/write the checksum at * offset &sk_buff.csum_start + &sk_buff.csum_offset. * A driver may verify that the * csum_start and csum_offset values are valid values given the length and * offset of the packet, but it should not attempt to validate that the * checksum refers to a legitimate transport layer checksum -- it is the * purview of the stack to validate that csum_start and csum_offset are set * correctly. * * When the stack requests checksum offload for a packet, the driver MUST * ensure that the checksum is set correctly. A driver can either offload the * checksum calculation to the device, or call skb_checksum_help (in the case * that the device does not support offload for a particular checksum). * * %NETIF_F_IP_CSUM and %NETIF_F_IPV6_CSUM are being deprecated in favor of * %NETIF_F_HW_CSUM. New devices should use %NETIF_F_HW_CSUM to indicate * checksum offload capability. * skb_csum_hwoffload_help() can be called to resolve %CHECKSUM_PARTIAL based * on network device checksumming capabilities: if a packet does not match * them, skb_checksum_help() or skb_crc32c_help() (depending on the value of * &sk_buff.csum_not_inet, see :ref:`crc`) * is called to resolve the checksum. * * - %CHECKSUM_NONE * * The skb was already checksummed by the protocol, or a checksum is not * required. * * - %CHECKSUM_UNNECESSARY * * This has the same meaning as CHECKSUM_NONE for checksum offload on * output. * * - %CHECKSUM_COMPLETE * * Not used in checksum output. If a driver observes a packet with this value * set in skbuff, it should treat the packet as if %CHECKSUM_NONE were set. * * .. _crc: * * Non-IP checksum (CRC) offloads * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * .. flat-table:: * :widths: 1 10 * * * - %NETIF_F_SCTP_CRC * - This feature indicates that a device is capable of * offloading the SCTP CRC in a packet. To perform this offload the stack * will set csum_start and csum_offset accordingly, set ip_summed to * %CHECKSUM_PARTIAL and set csum_not_inet to 1, to provide an indication * in the skbuff that the %CHECKSUM_PARTIAL refers to CRC32c. * A driver that supports both IP checksum offload and SCTP CRC32c offload * must verify which offload is configured for a packet by testing the * value of &sk_buff.csum_not_inet; skb_crc32c_csum_help() is provided to * resolve %CHECKSUM_PARTIAL on skbs where csum_not_inet is set to 1. * * * - %NETIF_F_FCOE_CRC * - This feature indicates that a device is capable of offloading the FCOE * CRC in a packet. To perform this offload the stack will set ip_summed * to %CHECKSUM_PARTIAL and set csum_start and csum_offset * accordingly. Note that there is no indication in the skbuff that the * %CHECKSUM_PARTIAL refers to an FCOE checksum, so a driver that supports * both IP checksum offload and FCOE CRC offload must verify which offload * is configured for a packet, presumably by inspecting packet headers. * * Checksumming on output with GSO * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * In the case of a GSO packet (skb_is_gso() is true), checksum offload * is implied by the SKB_GSO_* flags in gso_type. Most obviously, if the * gso_type is %SKB_GSO_TCPV4 or %SKB_GSO_TCPV6, TCP checksum offload as * part of the GSO operation is implied. If a checksum is being offloaded * with GSO then ip_summed is %CHECKSUM_PARTIAL, and both csum_start and * csum_offset are set to refer to the outermost checksum being offloaded * (two offloaded checksums are possible with UDP encapsulation). */ /* Don't change this without changing skb_csum_unnecessary! */ #define CHECKSUM_NONE 0 #define CHECKSUM_UNNECESSARY 1 #define CHECKSUM_COMPLETE 2 #define CHECKSUM_PARTIAL 3 /* Maximum value in skb->csum_level */ #define SKB_MAX_CSUM_LEVEL 3 #define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES) #define SKB_WITH_OVERHEAD(X) \ ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) /* For X bytes available in skb->head, what is the minimal * allocation needed, knowing struct skb_shared_info needs * to be aligned. */ #define SKB_HEAD_ALIGN(X) (SKB_DATA_ALIGN(X) + \ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) #define SKB_MAX_ORDER(X, ORDER) \ SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X)) #define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0)) #define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2)) /* return minimum truesize of one skb containing X bytes of data */ #define SKB_TRUESIZE(X) ((X) + \ SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) struct ahash_request; struct net_device; struct scatterlist; struct pipe_inode_info; struct iov_iter; struct napi_struct; struct bpf_prog; union bpf_attr; struct skb_ext; struct ts_config; #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) struct nf_bridge_info { enum { BRNF_PROTO_UNCHANGED, BRNF_PROTO_8021Q, BRNF_PROTO_PPPOE } orig_proto:8; u8 pkt_otherhost:1; u8 in_prerouting:1; u8 bridged_dnat:1; u8 sabotage_in_done:1; __u16 frag_max_size; int physinif; /* always valid & non-NULL from FORWARD on, for physdev match */ struct net_device *physoutdev; union { /* prerouting: detect dnat in orig/reply direction */ __be32 ipv4_daddr; struct in6_addr ipv6_daddr; /* after prerouting + nat detected: store original source * mac since neigh resolution overwrites it, only used while * skb is out in neigh layer. */ char neigh_header[8]; }; }; #endif #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) /* Chain in tc_skb_ext will be used to share the tc chain with * ovs recirc_id. It will be set to the current chain by tc * and read by ovs to recirc_id. */ struct tc_skb_ext { union { u64 act_miss_cookie; __u32 chain; }; __u16 mru; __u16 zone; u8 post_ct:1; u8 post_ct_snat:1; u8 post_ct_dnat:1; u8 act_miss:1; /* Set if act_miss_cookie is used */ u8 l2_miss:1; /* Set by bridge upon FDB or MDB miss */ }; #endif struct sk_buff_head { /* These two members must be first to match sk_buff. */ struct_group_tagged(sk_buff_list, list, struct sk_buff *next; struct sk_buff *prev; ); __u32 qlen; spinlock_t lock; }; struct sk_buff; #ifndef CONFIG_MAX_SKB_FRAGS # define CONFIG_MAX_SKB_FRAGS 17 #endif #define MAX_SKB_FRAGS CONFIG_MAX_SKB_FRAGS /* Set skb_shinfo(skb)->gso_size to this in case you want skb_segment to * segment using its current segmentation instead. */ #define GSO_BY_FRAGS 0xFFFF typedef struct skb_frag { netmem_ref netmem; unsigned int len; unsigned int offset; } skb_frag_t; /** * skb_frag_size() - Returns the size of a skb fragment * @frag: skb fragment */ static inline unsigned int skb_frag_size(const skb_frag_t *frag) { return frag->len; } /** * skb_frag_size_set() - Sets the size of a skb fragment * @frag: skb fragment * @size: size of fragment */ static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size) { frag->len = size; } /** * skb_frag_size_add() - Increments the size of a skb fragment by @delta * @frag: skb fragment * @delta: value to add */ static inline void skb_frag_size_add(skb_frag_t *frag, int delta) { frag->len += delta; } /** * skb_frag_size_sub() - Decrements the size of a skb fragment by @delta * @frag: skb fragment * @delta: value to subtract */ static inline void skb_frag_size_sub(skb_frag_t *frag, int delta) { frag->len -= delta; } /** * skb_frag_must_loop - Test if %p is a high memory page * @p: fragment's page */ static inline bool skb_frag_must_loop(struct page *p) { #if defined(CONFIG_HIGHMEM) if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP) || PageHighMem(p)) return true; #endif return false; } /** * skb_frag_foreach_page - loop over pages in a fragment * * @f: skb frag to operate on * @f_off: offset from start of f->netmem * @f_len: length from f_off to loop over * @p: (temp var) current page * @p_off: (temp var) offset from start of current page, * non-zero only on first page. * @p_len: (temp var) length in current page, * < PAGE_SIZE only on first and last page. * @copied: (temp var) length so far, excluding current p_len. * * A fragment can hold a compound page, in which case per-page * operations, notably kmap_atomic, must be called for each * regular page. */ #define skb_frag_foreach_page(f, f_off, f_len, p, p_off, p_len, copied) \ for (p = skb_frag_page(f) + ((f_off) >> PAGE_SHIFT), \ p_off = (f_off) & (PAGE_SIZE - 1), \ p_len = skb_frag_must_loop(p) ? \ min_t(u32, f_len, PAGE_SIZE - p_off) : f_len, \ copied = 0; \ copied < f_len; \ copied += p_len, p++, p_off = 0, \ p_len = min_t(u32, f_len - copied, PAGE_SIZE)) \ /** * struct skb_shared_hwtstamps - hardware time stamps * @hwtstamp: hardware time stamp transformed into duration * since arbitrary point in time * @netdev_data: address/cookie of network device driver used as * reference to actual hardware time stamp * * Software time stamps generated by ktime_get_real() are stored in * skb->tstamp. * * hwtstamps can only be compared against other hwtstamps from * the same device. * * This structure is attached to packets as part of the * &skb_shared_info. Use skb_hwtstamps() to get a pointer. */ struct skb_shared_hwtstamps { union { ktime_t hwtstamp; void *netdev_data; }; }; /* Definitions for tx_flags in struct skb_shared_info */ enum { /* generate hardware time stamp */ SKBTX_HW_TSTAMP = 1 << 0, /* generate software time stamp when queueing packet to NIC */ SKBTX_SW_TSTAMP = 1 << 1, /* device driver is going to provide hardware time stamp */ SKBTX_IN_PROGRESS = 1 << 2, /* generate hardware time stamp based on cycles if supported */ SKBTX_HW_TSTAMP_USE_CYCLES = 1 << 3, /* generate wifi status information (where possible) */ SKBTX_WIFI_STATUS = 1 << 4, /* determine hardware time stamp based on time or cycles */ SKBTX_HW_TSTAMP_NETDEV = 1 << 5, /* generate software time stamp when entering packet scheduling */ SKBTX_SCHED_TSTAMP = 1 << 6, }; #define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \ SKBTX_SCHED_TSTAMP) #define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | \ SKBTX_HW_TSTAMP_USE_CYCLES | \ SKBTX_ANY_SW_TSTAMP) /* Definitions for flags in struct skb_shared_info */ enum { /* use zcopy routines */ SKBFL_ZEROCOPY_ENABLE = BIT(0), /* This indicates at least one fragment might be overwritten * (as in vmsplice(), sendfile() ...) * If we need to compute a TX checksum, we'll need to copy * all frags to avoid possible bad checksum */ SKBFL_SHARED_FRAG = BIT(1), /* segment contains only zerocopy data and should not be * charged to the kernel memory. */ SKBFL_PURE_ZEROCOPY = BIT(2), SKBFL_DONT_ORPHAN = BIT(3), /* page references are managed by the ubuf_info, so it's safe to * use frags only up until ubuf_info is released */ SKBFL_MANAGED_FRAG_REFS = BIT(4), }; #define SKBFL_ZEROCOPY_FRAG (SKBFL_ZEROCOPY_ENABLE | SKBFL_SHARED_FRAG) #define SKBFL_ALL_ZEROCOPY (SKBFL_ZEROCOPY_FRAG | SKBFL_PURE_ZEROCOPY | \ SKBFL_DONT_ORPHAN | SKBFL_MANAGED_FRAG_REFS) struct ubuf_info_ops { void (*complete)(struct sk_buff *, struct ubuf_info *, bool zerocopy_success); /* has to be compatible with skb_zcopy_set() */ int (*link_skb)(struct sk_buff *skb, struct ubuf_info *uarg); }; /* * The callback notifies userspace to release buffers when skb DMA is done in * lower device, the skb last reference should be 0 when calling this. * The zerocopy_success argument is true if zero copy transmit occurred, * false on data copy or out of memory error caused by data copy attempt. * The ctx field is used to track device context. * The desc field is used to track userspace buffer index. */ struct ubuf_info { const struct ubuf_info_ops *ops; refcount_t refcnt; u8 flags; }; struct ubuf_info_msgzc { struct ubuf_info ubuf; union { struct { unsigned long desc; void *ctx; }; struct { u32 id; u16 len; u16 zerocopy:1; u32 bytelen; }; }; struct mmpin { struct user_struct *user; unsigned int num_pg; } mmp; }; #define skb_uarg(SKB) ((struct ubuf_info *)(skb_shinfo(SKB)->destructor_arg)) #define uarg_to_msgzc(ubuf_ptr) container_of((ubuf_ptr), struct ubuf_info_msgzc, \ ubuf) int mm_account_pinned_pages(struct mmpin *mmp, size_t size); void mm_unaccount_pinned_pages(struct mmpin *mmp); /* Preserve some data across TX submission and completion. * * Note, this state is stored in the driver. Extending the layout * might need some special care. */ struct xsk_tx_metadata_compl { __u64 *tx_timestamp; }; /* This data is invariant across clones and lives at * the end of the header data, ie. at skb->end. */ struct skb_shared_info { __u8 flags; __u8 meta_len; __u8 nr_frags; __u8 tx_flags; unsigned short gso_size; /* Warning: this field is not always filled in (UFO)! */ unsigned short gso_segs; struct sk_buff *frag_list; union { struct skb_shared_hwtstamps hwtstamps; struct xsk_tx_metadata_compl xsk_meta; }; unsigned int gso_type; u32 tskey; /* * Warning : all fields before dataref are cleared in __alloc_skb() */ atomic_t dataref; union { struct { u32 xdp_frags_size; u32 xdp_frags_truesize; }; /* * Intermediate layers must ensure that destructor_arg * remains valid until skb destructor. */ void *destructor_arg; }; /* must be last field, see pskb_expand_head() */ skb_frag_t frags[MAX_SKB_FRAGS]; }; /** * DOC: dataref and headerless skbs * * Transport layers send out clones of payload skbs they hold for * retransmissions. To allow lower layers of the stack to prepend their headers * we split &skb_shared_info.dataref into two halves. * The lower 16 bits count the overall number of references. * The higher 16 bits indicate how many of the references are payload-only. * skb_header_cloned() checks if skb is allowed to add / write the headers. * * The creator of the skb (e.g. TCP) marks its skb as &sk_buff.nohdr * (via __skb_header_release()). Any clone created from marked skb will get * &sk_buff.hdr_len populated with the available headroom. * If there's the only clone in existence it's able to modify the headroom * at will. The sequence of calls inside the transport layer is:: * * <alloc skb> * skb_reserve() * __skb_header_release() * skb_clone() * // send the clone down the stack * * This is not a very generic construct and it depends on the transport layers * doing the right thing. In practice there's usually only one payload-only skb. * Having multiple payload-only skbs with different lengths of hdr_len is not * possible. The payload-only skbs should never leave their owner. */ #define SKB_DATAREF_SHIFT 16 #define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1) enum { SKB_FCLONE_UNAVAILABLE, /* skb has no fclone (from head_cache) */ SKB_FCLONE_ORIG, /* orig skb (from fclone_cache) */ SKB_FCLONE_CLONE, /* companion fclone skb (from fclone_cache) */ }; enum { SKB_GSO_TCPV4 = 1 << 0, /* This indicates the skb is from an untrusted source. */ SKB_GSO_DODGY = 1 << 1, /* This indicates the tcp segment has CWR set. */ SKB_GSO_TCP_ECN = 1 << 2, SKB_GSO_TCP_FIXEDID = 1 << 3, SKB_GSO_TCPV6 = 1 << 4, SKB_GSO_FCOE = 1 << 5, SKB_GSO_GRE = 1 << 6, SKB_GSO_GRE_CSUM = 1 << 7, SKB_GSO_IPXIP4 = 1 << 8, SKB_GSO_IPXIP6 = 1 << 9, SKB_GSO_UDP_TUNNEL = 1 << 10, SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11, SKB_GSO_PARTIAL = 1 << 12, SKB_GSO_TUNNEL_REMCSUM = 1 << 13, SKB_GSO_SCTP = 1 << 14, SKB_GSO_ESP = 1 << 15, SKB_GSO_UDP = 1 << 16, SKB_GSO_UDP_L4 = 1 << 17, SKB_GSO_FRAGLIST = 1 << 18, }; #if BITS_PER_LONG > 32 #define NET_SKBUFF_DATA_USES_OFFSET 1 #endif #ifdef NET_SKBUFF_DATA_USES_OFFSET typedef unsigned int sk_buff_data_t; #else typedef unsigned char *sk_buff_data_t; #endif enum skb_tstamp_type { SKB_CLOCK_REALTIME, SKB_CLOCK_MONOTONIC, SKB_CLOCK_TAI, __SKB_CLOCK_MAX = SKB_CLOCK_TAI, }; /** * DOC: Basic sk_buff geometry * * struct sk_buff itself is a metadata structure and does not hold any packet * data. All the data is held in associated buffers. * * &sk_buff.head points to the main "head" buffer. The head buffer is divided * into two parts: * * - data buffer, containing headers and sometimes payload; * this is the part of the skb operated on by the common helpers * such as skb_put() or skb_pull(); * - shared info (struct skb_shared_info) which holds an array of pointers * to read-only data in the (page, offset, length) format. * * Optionally &skb_shared_info.frag_list may point to another skb. * * Basic diagram may look like this:: * * --------------- * | sk_buff | * --------------- * ,--------------------------- + head * / ,----------------- + data * / / ,----------- + tail * | | | , + end * | | | | * v v v v * ----------------------------------------------- * | headroom | data | tailroom | skb_shared_info | * ----------------------------------------------- * + [page frag] * + [page frag] * + [page frag] * + [page frag] --------- * + frag_list --> | sk_buff | * --------- * */ /** * struct sk_buff - socket buffer * @next: Next buffer in list * @prev: Previous buffer in list * @tstamp: Time we arrived/left * @skb_mstamp_ns: (aka @tstamp) earliest departure time; start point * for retransmit timer * @rbnode: RB tree node, alternative to next/prev for netem/tcp * @list: queue head * @ll_node: anchor in an llist (eg socket defer_list) * @sk: Socket we are owned by * @dev: Device we arrived on/are leaving by * @dev_scratch: (aka @dev) alternate use of @dev when @dev would be %NULL * @cb: Control buffer. Free for use by every layer. Put private vars here * @_skb_refdst: destination entry (with norefcount bit) * @len: Length of actual data * @data_len: Data length * @mac_len: Length of link layer header * @hdr_len: writable header length of cloned skb * @csum: Checksum (must include start/offset pair) * @csum_start: Offset from skb->head where checksumming should start * @csum_offset: Offset from csum_start where checksum should be stored * @priority: Packet queueing priority * @ignore_df: allow local fragmentation * @cloned: Head may be cloned (check refcnt to be sure) * @ip_summed: Driver fed us an IP checksum * @nohdr: Payload reference only, must not modify header * @pkt_type: Packet class * @fclone: skbuff clone status * @ipvs_property: skbuff is owned by ipvs * @inner_protocol_type: whether the inner protocol is * ENCAP_TYPE_ETHER or ENCAP_TYPE_IPPROTO * @remcsum_offload: remote checksum offload is enabled * @offload_fwd_mark: Packet was L2-forwarded in hardware * @offload_l3_fwd_mark: Packet was L3-forwarded in hardware * @tc_skip_classify: do not classify packet. set by IFB device * @tc_at_ingress: used within tc_classify to distinguish in/egress * @redirected: packet was redirected by packet classifier * @from_ingress: packet was redirected from the ingress path * @nf_skip_egress: packet shall skip nf egress - see netfilter_netdev.h * @peeked: this packet has been seen already, so stats have been * done for it, don't do them again * @nf_trace: netfilter packet trace flag * @protocol: Packet protocol from driver * @destructor: Destruct function * @tcp_tsorted_anchor: list structure for TCP (tp->tsorted_sent_queue) * @_sk_redir: socket redirection information for skmsg * @_nfct: Associated connection, if any (with nfctinfo bits) * @skb_iif: ifindex of device we arrived on * @tc_index: Traffic control index * @hash: the packet hash * @queue_mapping: Queue mapping for multiqueue devices * @head_frag: skb was allocated from page fragments, * not allocated by kmalloc() or vmalloc(). * @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves * @pp_recycle: mark the packet for recycling instead of freeing (implies * page_pool support on driver) * @active_extensions: active extensions (skb_ext_id types) * @ndisc_nodetype: router type (from link layer) * @ooo_okay: allow the mapping of a socket to a queue to be changed * @l4_hash: indicate hash is a canonical 4-tuple hash over transport * ports. * @sw_hash: indicates hash was computed in software stack * @wifi_acked_valid: wifi_acked was set * @wifi_acked: whether frame was acked on wifi or not * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS * @encapsulation: indicates the inner headers in the skbuff are valid * @encap_hdr_csum: software checksum is needed * @csum_valid: checksum is already valid * @csum_not_inet: use CRC32c to resolve CHECKSUM_PARTIAL * @csum_complete_sw: checksum was completed by software * @csum_level: indicates the number of consecutive checksums found in * the packet minus one that have been verified as * CHECKSUM_UNNECESSARY (max 3) * @unreadable: indicates that at least 1 of the fragments in this skb is * unreadable. * @dst_pending_confirm: need to confirm neighbour * @decrypted: Decrypted SKB * @slow_gro: state present at GRO time, slower prepare step required * @tstamp_type: When set, skb->tstamp has the * delivery_time clock base of skb->tstamp. * @napi_id: id of the NAPI struct this skb came from * @sender_cpu: (aka @napi_id) source CPU in XPS * @alloc_cpu: CPU which did the skb allocation. * @secmark: security marking * @mark: Generic packet mark * @reserved_tailroom: (aka @mark) number of bytes of free space available * at the tail of an sk_buff * @vlan_all: vlan fields (proto & tci) * @vlan_proto: vlan encapsulation protocol * @vlan_tci: vlan tag control information * @inner_protocol: Protocol (encapsulation) * @inner_ipproto: (aka @inner_protocol) stores ipproto when * skb->inner_protocol_type == ENCAP_TYPE_IPPROTO; * @inner_transport_header: Inner transport layer header (encapsulation) * @inner_network_header: Network layer header (encapsulation) * @inner_mac_header: Link layer header (encapsulation) * @transport_header: Transport layer header * @network_header: Network layer header * @mac_header: Link layer header * @kcov_handle: KCOV remote handle for remote coverage collection * @tail: Tail pointer * @end: End pointer * @head: Head of buffer * @data: Data head pointer * @truesize: Buffer size * @users: User count - see {datagram,tcp}.c * @extensions: allocated extensions, valid if active_extensions is nonzero */ struct sk_buff { union { struct { /* These two members must be first to match sk_buff_head. */ struct sk_buff *next; struct sk_buff *prev; union { struct net_device *dev; /* Some protocols might use this space to store information, * while device pointer would be NULL. * UDP receive path is one user. */ unsigned long dev_scratch; }; }; struct rb_node rbnode; /* used in netem, ip4 defrag, and tcp stack */ struct list_head list; struct llist_node ll_node; }; struct sock *sk; union { ktime_t tstamp; u64 skb_mstamp_ns; /* earliest departure time */ }; /* * This is the control buffer. It is free to use for every * layer. Please put your private variables there. If you * want to keep them across layers you have to do a skb_clone() * first. This is owned by whoever has the skb queued ATM. */ char cb[48] __aligned(8); union { struct { unsigned long _skb_refdst; void (*destructor)(struct sk_buff *skb); }; struct list_head tcp_tsorted_anchor; #ifdef CONFIG_NET_SOCK_MSG unsigned long _sk_redir; #endif }; #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) unsigned long _nfct; #endif unsigned int len, data_len; __u16 mac_len, hdr_len; /* Following fields are _not_ copied in __copy_skb_header() * Note that queue_mapping is here mostly to fill a hole. */ __u16 queue_mapping; /* if you move cloned around you also must adapt those constants */ #ifdef __BIG_ENDIAN_BITFIELD #define CLONED_MASK (1 << 7) #else #define CLONED_MASK 1 #endif #define CLONED_OFFSET offsetof(struct sk_buff, __cloned_offset) /* private: */ __u8 __cloned_offset[0]; /* public: */ __u8 cloned:1, nohdr:1, fclone:2, peeked:1, head_frag:1, pfmemalloc:1, pp_recycle:1; /* page_pool recycle indicator */ #ifdef CONFIG_SKB_EXTENSIONS __u8 active_extensions; #endif /* Fields enclosed in headers group are copied * using a single memcpy() in __copy_skb_header() */ struct_group(headers, /* private: */ __u8 __pkt_type_offset[0]; /* public: */ __u8 pkt_type:3; /* see PKT_TYPE_MAX */ __u8 ignore_df:1; __u8 dst_pending_confirm:1; __u8 ip_summed:2; __u8 ooo_okay:1; /* private: */ __u8 __mono_tc_offset[0]; /* public: */ __u8 tstamp_type:2; /* See skb_tstamp_type */ #ifdef CONFIG_NET_XGRESS __u8 tc_at_ingress:1; /* See TC_AT_INGRESS_MASK */ __u8 tc_skip_classify:1; #endif __u8 remcsum_offload:1; __u8 csum_complete_sw:1; __u8 csum_level:2; __u8 inner_protocol_type:1; __u8 l4_hash:1; __u8 sw_hash:1; #ifdef CONFIG_WIRELESS __u8 wifi_acked_valid:1; __u8 wifi_acked:1; #endif __u8 no_fcs:1; /* Indicates the inner headers are valid in the skbuff. */ __u8 encapsulation:1; __u8 encap_hdr_csum:1; __u8 csum_valid:1; #ifdef CONFIG_IPV6_NDISC_NODETYPE __u8 ndisc_nodetype:2; #endif #if IS_ENABLED(CONFIG_IP_VS) __u8 ipvs_property:1; #endif #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || IS_ENABLED(CONFIG_NF_TABLES) __u8 nf_trace:1; #endif #ifdef CONFIG_NET_SWITCHDEV __u8 offload_fwd_mark:1; __u8 offload_l3_fwd_mark:1; #endif __u8 redirected:1; #ifdef CONFIG_NET_REDIRECT __u8 from_ingress:1; #endif #ifdef CONFIG_NETFILTER_SKIP_EGRESS __u8 nf_skip_egress:1; #endif #ifdef CONFIG_SKB_DECRYPTED __u8 decrypted:1; #endif __u8 slow_gro:1; #if IS_ENABLED(CONFIG_IP_SCTP) __u8 csum_not_inet:1; #endif __u8 unreadable:1; #if defined(CONFIG_NET_SCHED) || defined(CONFIG_NET_XGRESS) __u16 tc_index; /* traffic control index */ #endif u16 alloc_cpu; union { __wsum csum; struct { __u16 csum_start; __u16 csum_offset; }; }; __u32 priority; int skb_iif; __u32 hash; union { u32 vlan_all; struct { __be16 vlan_proto; __u16 vlan_tci; }; }; #if defined(CONFIG_NET_RX_BUSY_POLL) || defined(CONFIG_XPS) union { unsigned int napi_id; unsigned int sender_cpu; }; #endif #ifdef CONFIG_NETWORK_SECMARK __u32 secmark; #endif union { __u32 mark; __u32 reserved_tailroom; }; union { __be16 inner_protocol; __u8 inner_ipproto; }; __u16 inner_transport_header; __u16 inner_network_header; __u16 inner_mac_header; __be16 protocol; __u16 transport_header; __u16 network_header; __u16 mac_header; #ifdef CONFIG_KCOV u64 kcov_handle; #endif ); /* end headers group */ /* These elements must be at the end, see alloc_skb() for details. */ sk_buff_data_t tail; sk_buff_data_t end; unsigned char *head, *data; unsigned int truesize; refcount_t users; #ifdef CONFIG_SKB_EXTENSIONS /* only usable after checking ->active_extensions != 0 */ struct skb_ext *extensions; #endif }; /* if you move pkt_type around you also must adapt those constants */ #ifdef __BIG_ENDIAN_BITFIELD #define PKT_TYPE_MAX (7 << 5) #else #define PKT_TYPE_MAX 7 #endif #define PKT_TYPE_OFFSET offsetof(struct sk_buff, __pkt_type_offset) /* if you move tc_at_ingress or tstamp_type * around, you also must adapt these constants. */ #ifdef __BIG_ENDIAN_BITFIELD #define SKB_TSTAMP_TYPE_MASK (3 << 6) #define SKB_TSTAMP_TYPE_RSHIFT (6) #define TC_AT_INGRESS_MASK (1 << 5) #else #define SKB_TSTAMP_TYPE_MASK (3) #define TC_AT_INGRESS_MASK (1 << 2) #endif #define SKB_BF_MONO_TC_OFFSET offsetof(struct sk_buff, __mono_tc_offset) #ifdef __KERNEL__ /* * Handling routines are only of interest to the kernel */ #define SKB_ALLOC_FCLONE 0x01 #define SKB_ALLOC_RX 0x02 #define SKB_ALLOC_NAPI 0x04 /** * skb_pfmemalloc - Test if the skb was allocated from PFMEMALLOC reserves * @skb: buffer */ static inline bool skb_pfmemalloc(const struct sk_buff *skb) { return unlikely(skb->pfmemalloc); } /* * skb might have a dst pointer attached, refcounted or not. * _skb_refdst low order bit is set if refcount was _not_ taken */ #define SKB_DST_NOREF 1UL #define SKB_DST_PTRMASK ~(SKB_DST_NOREF) /** * skb_dst - returns skb dst_entry * @skb: buffer * * Returns: skb dst_entry, regardless of reference taken or not. */ static inline struct dst_entry *skb_dst(const struct sk_buff *skb) { /* If refdst was not refcounted, check we still are in a * rcu_read_lock section */ WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) && !rcu_read_lock_held() && !rcu_read_lock_bh_held()); return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK); } /** * skb_dst_set - sets skb dst * @skb: buffer * @dst: dst entry * * Sets skb dst, assuming a reference was taken on dst and should * be released by skb_dst_drop() */ static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst) { skb->slow_gro |= !!dst; skb->_skb_refdst = (unsigned long)dst; } /** * skb_dst_set_noref - sets skb dst, hopefully, without taking reference * @skb: buffer * @dst: dst entry * * Sets skb dst, assuming a reference was not taken on dst. * If dst entry is cached, we do not take reference and dst_release * will be avoided by refdst_drop. If dst entry is not cached, we take * reference, so that last dst_release can destroy the dst immediately. */ static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst) { WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); skb->slow_gro |= !!dst; skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF; } /** * skb_dst_is_noref - Test if skb dst isn't refcounted * @skb: buffer */ static inline bool skb_dst_is_noref(const struct sk_buff *skb) { return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb); } /* For mangling skb->pkt_type from user space side from applications * such as nft, tc, etc, we only allow a conservative subset of * possible pkt_types to be set. */ static inline bool skb_pkt_type_ok(u32 ptype) { return ptype <= PACKET_OTHERHOST; } /** * skb_napi_id - Returns the skb's NAPI id * @skb: buffer */ static inline unsigned int skb_napi_id(const struct sk_buff *skb) { #ifdef CONFIG_NET_RX_BUSY_POLL return skb->napi_id; #else return 0; #endif } static inline bool skb_wifi_acked_valid(const struct sk_buff *skb) { #ifdef CONFIG_WIRELESS return skb->wifi_acked_valid; #else return 0; #endif } /** * skb_unref - decrement the skb's reference count * @skb: buffer * * Returns: true if we can free the skb. */ static inline bool skb_unref(struct sk_buff *skb) { if (unlikely(!skb)) return false; if (!IS_ENABLED(CONFIG_DEBUG_NET) && likely(refcount_read(&skb->users) == 1)) smp_rmb(); else if (likely(!refcount_dec_and_test(&skb->users))) return false; return true; } static inline bool skb_data_unref(const struct sk_buff *skb, struct skb_shared_info *shinfo) { int bias; if (!skb->cloned) return true; bias = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1; if (atomic_read(&shinfo->dataref) == bias) smp_rmb(); else if (atomic_sub_return(bias, &shinfo->dataref)) return false; return true; } void __fix_address sk_skb_reason_drop(struct sock *sk, struct sk_buff *skb, enum skb_drop_reason reason); static inline void kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason) { sk_skb_reason_drop(NULL, skb, reason); } /** * kfree_skb - free an sk_buff with 'NOT_SPECIFIED' reason * @skb: buffer to free */ static inline void kfree_skb(struct sk_buff *skb) { kfree_skb_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED); } void skb_release_head_state(struct sk_buff *skb); void kfree_skb_list_reason(struct sk_buff *segs, enum skb_drop_reason reason); void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt); void skb_tx_error(struct sk_buff *skb); static inline void kfree_skb_list(struct sk_buff *segs) { kfree_skb_list_reason(segs, SKB_DROP_REASON_NOT_SPECIFIED); } #ifdef CONFIG_TRACEPOINTS void consume_skb(struct sk_buff *skb); #else static inline void consume_skb(struct sk_buff *skb) { return kfree_skb(skb); } #endif void __consume_stateless_skb(struct sk_buff *skb); void __kfree_skb(struct sk_buff *skb); void kfree_skb_partial(struct sk_buff *skb, bool head_stolen); bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, bool *fragstolen, int *delta_truesize); struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags, int node); struct sk_buff *__build_skb(void *data, unsigned int frag_size); struct sk_buff *build_skb(void *data, unsigned int frag_size); struct sk_buff *build_skb_around(struct sk_buff *skb, void *data, unsigned int frag_size); void skb_attempt_defer_free(struct sk_buff *skb); struct sk_buff *napi_build_skb(void *data, unsigned int frag_size); struct sk_buff *slab_build_skb(void *data); /** * alloc_skb - allocate a network buffer * @size: size to allocate * @priority: allocation mask * * This function is a convenient wrapper around __alloc_skb(). */ static inline struct sk_buff *alloc_skb(unsigned int size, gfp_t priority) { return __alloc_skb(size, priority, 0, NUMA_NO_NODE); } struct sk_buff *alloc_skb_with_frags(unsigned long header_len, unsigned long data_len, int max_page_order, int *errcode, gfp_t gfp_mask); struct sk_buff *alloc_skb_for_msg(struct sk_buff *first); /* Layout of fast clones : [skb1][skb2][fclone_ref] */ struct sk_buff_fclones { struct sk_buff skb1; struct sk_buff skb2; refcount_t fclone_ref; }; /** * skb_fclone_busy - check if fclone is busy * @sk: socket * @skb: buffer * * Returns: true if skb is a fast clone, and its clone is not freed. * Some drivers call skb_orphan() in their ndo_start_xmit(), * so we also check that didn't happen. */ static inline bool skb_fclone_busy(const struct sock *sk, const struct sk_buff *skb) { const struct sk_buff_fclones *fclones; fclones = container_of(skb, struct sk_buff_fclones, skb1); return skb->fclone == SKB_FCLONE_ORIG && refcount_read(&fclones->fclone_ref) > 1 && READ_ONCE(fclones->skb2.sk) == sk; } /** * alloc_skb_fclone - allocate a network buffer from fclone cache * @size: size to allocate * @priority: allocation mask * * This function is a convenient wrapper around __alloc_skb(). */ static inline struct sk_buff *alloc_skb_fclone(unsigned int size, gfp_t priority) { return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE); } struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); void skb_headers_offset_update(struct sk_buff *skb, int off); int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask); struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority); void skb_copy_header(struct sk_buff *new, const struct sk_buff *old); struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority); struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, gfp_t gfp_mask, bool fclone); static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask) { return __pskb_copy_fclone(skb, headroom, gfp_mask, false); } int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask); struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom); struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom); struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom, int newtailroom, gfp_t priority); int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, int offset, int len); int __must_check skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len); int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer); int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error); /** * skb_pad - zero pad the tail of an skb * @skb: buffer to pad * @pad: space to pad * * Ensure that a buffer is followed by a padding area that is zero * filled. Used by network drivers which may DMA or transfer data * beyond the buffer end onto the wire. * * May return error in out of memory cases. The skb is freed on error. */ static inline int skb_pad(struct sk_buff *skb, int pad) { return __skb_pad(skb, pad, true); } #define dev_kfree_skb(a) consume_skb(a) int skb_append_pagefrags(struct sk_buff *skb, struct page *page, int offset, size_t size, size_t max_frags); struct skb_seq_state { __u32 lower_offset; __u32 upper_offset; __u32 frag_idx; __u32 stepped_offset; struct sk_buff *root_skb; struct sk_buff *cur_skb; __u8 *frag_data; __u32 frag_off; }; void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, unsigned int to, struct skb_seq_state *st); unsigned int skb_seq_read(unsigned int consumed, const u8 **data, struct skb_seq_state *st); void skb_abort_seq_read(struct skb_seq_state *st); int skb_copy_seq_read(struct skb_seq_state *st, int offset, void *to, int len); unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, unsigned int to, struct ts_config *config); /* * Packet hash types specify the type of hash in skb_set_hash. * * Hash types refer to the protocol layer addresses which are used to * construct a packet's hash. The hashes are used to differentiate or identify * flows of the protocol layer for the hash type. Hash types are either * layer-2 (L2), layer-3 (L3), or layer-4 (L4). * * Properties of hashes: * * 1) Two packets in different flows have different hash values * 2) Two packets in the same flow should have the same hash value * * A hash at a higher layer is considered to be more specific. A driver should * set the most specific hash possible. * * A driver cannot indicate a more specific hash than the layer at which a hash * was computed. For instance an L3 hash cannot be set as an L4 hash. * * A driver may indicate a hash level which is less specific than the * actual layer the hash was computed on. For instance, a hash computed * at L4 may be considered an L3 hash. This should only be done if the * driver can't unambiguously determine that the HW computed the hash at * the higher layer. Note that the "should" in the second property above * permits this. */ enum pkt_hash_types { PKT_HASH_TYPE_NONE, /* Undefined type */ PKT_HASH_TYPE_L2, /* Input: src_MAC, dest_MAC */ PKT_HASH_TYPE_L3, /* Input: src_IP, dst_IP */ PKT_HASH_TYPE_L4, /* Input: src_IP, dst_IP, src_port, dst_port */ }; static inline void skb_clear_hash(struct sk_buff *skb) { skb->hash = 0; skb->sw_hash = 0; skb->l4_hash = 0; } static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb) { if (!skb->l4_hash) skb_clear_hash(skb); } static inline void __skb_set_hash(struct sk_buff *skb, __u32 hash, bool is_sw, bool is_l4) { skb->l4_hash = is_l4; skb->sw_hash = is_sw; skb->hash = hash; } static inline void skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type) { /* Used by drivers to set hash from HW */ __skb_set_hash(skb, hash, false, type == PKT_HASH_TYPE_L4); } static inline void __skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4) { __skb_set_hash(skb, hash, true, is_l4); } u32 __skb_get_hash_symmetric_net(const struct net *net, const struct sk_buff *skb); static inline u32 __skb_get_hash_symmetric(const struct sk_buff *skb) { return __skb_get_hash_symmetric_net(NULL, skb); } void __skb_get_hash_net(const struct net *net, struct sk_buff *skb); u32 skb_get_poff(const struct sk_buff *skb); u32 __skb_get_poff(const struct sk_buff *skb, const void *data, const struct flow_keys_basic *keys, int hlen); __be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto, const void *data, int hlen_proto); static inline __be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto) { return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0); } void skb_flow_dissector_init(struct flow_dissector *flow_dissector, const struct flow_dissector_key *key, unsigned int key_count); struct bpf_flow_dissector; u32 bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx, __be16 proto, int nhoff, int hlen, unsigned int flags); bool __skb_flow_dissect(const struct net *net, const struct sk_buff *skb, struct flow_dissector *flow_dissector, void *target_container, const void *data, __be16 proto, int nhoff, int hlen, unsigned int flags); static inline bool skb_flow_dissect(const struct sk_buff *skb, struct flow_dissector *flow_dissector, void *target_container, unsigned int flags) { return __skb_flow_dissect(NULL, skb, flow_dissector, target_container, NULL, 0, 0, 0, flags); } static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb, struct flow_keys *flow, unsigned int flags) { memset(flow, 0, sizeof(*flow)); return __skb_flow_dissect(NULL, skb, &flow_keys_dissector, flow, NULL, 0, 0, 0, flags); } static inline bool skb_flow_dissect_flow_keys_basic(const struct net *net, const struct sk_buff *skb, struct flow_keys_basic *flow, const void *data, __be16 proto, int nhoff, int hlen, unsigned int flags) { memset(flow, 0, sizeof(*flow)); return __skb_flow_dissect(net, skb, &flow_keys_basic_dissector, flow, data, proto, nhoff, hlen, flags); } void skb_flow_dissect_meta(const struct sk_buff *skb, struct flow_dissector *flow_dissector, void *target_container); /* Gets a skb connection tracking info, ctinfo map should be a * map of mapsize to translate enum ip_conntrack_info states * to user states. */ void skb_flow_dissect_ct(const struct sk_buff *skb, struct flow_dissector *flow_dissector, void *target_container, u16 *ctinfo_map, size_t mapsize, bool post_ct, u16 zone); void skb_flow_dissect_tunnel_info(const struct sk_buff *skb, struct flow_dissector *flow_dissector, void *target_container); void skb_flow_dissect_hash(const struct sk_buff *skb, struct flow_dissector *flow_dissector, void *target_container); static inline __u32 skb_get_hash_net(const struct net *net, struct sk_buff *skb) { if (!skb->l4_hash && !skb->sw_hash) __skb_get_hash_net(net, skb); return skb->hash; } static inline __u32 skb_get_hash(struct sk_buff *skb) { if (!skb->l4_hash && !skb->sw_hash) __skb_get_hash_net(NULL, skb); return skb->hash; } static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6) { if (!skb->l4_hash && !skb->sw_hash) { struct flow_keys keys; __u32 hash = __get_hash_from_flowi6(fl6, &keys); __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys)); } return skb->hash; } __u32 skb_get_hash_perturb(const struct sk_buff *skb, const siphash_key_t *perturb); static inline __u32 skb_get_hash_raw(const struct sk_buff *skb) { return skb->hash; } static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from) { to->hash = from->hash; to->sw_hash = from->sw_hash; to->l4_hash = from->l4_hash; }; static inline int skb_cmp_decrypted(const struct sk_buff *skb1, const struct sk_buff *skb2) { #ifdef CONFIG_SKB_DECRYPTED return skb2->decrypted - skb1->decrypted; #else return 0; #endif } static inline bool skb_is_decrypted(const struct sk_buff *skb) { #ifdef CONFIG_SKB_DECRYPTED return skb->decrypted; #else return false; #endif } static inline void skb_copy_decrypted(struct sk_buff *to, const struct sk_buff *from) { #ifdef CONFIG_SKB_DECRYPTED to->decrypted = from->decrypted; #endif } #ifdef NET_SKBUFF_DATA_USES_OFFSET static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) { return skb->head + skb->end; } static inline unsigned int skb_end_offset(const struct sk_buff *skb) { return skb->end; } static inline void skb_set_end_offset(struct sk_buff *skb, unsigned int offset) { skb->end = offset; } #else static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) { return skb->end; } static inline unsigned int skb_end_offset(const struct sk_buff *skb) { return skb->end - skb->head; } static inline void skb_set_end_offset(struct sk_buff *skb, unsigned int offset) { skb->end = skb->head + offset; } #endif extern const struct ubuf_info_ops msg_zerocopy_ubuf_ops; struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size, struct ubuf_info *uarg); void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref); int __zerocopy_sg_from_iter(struct msghdr *msg, struct sock *sk, struct sk_buff *skb, struct iov_iter *from, size_t length); int zerocopy_fill_skb_from_iter(struct sk_buff *skb, struct iov_iter *from, size_t length); static inline int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len) { return __zerocopy_sg_from_iter(msg, skb->sk, skb, &msg->msg_iter, len); } int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, struct msghdr *msg, int len, struct ubuf_info *uarg); /* Internal */ #define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB))) static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb) { return &skb_shinfo(skb)->hwtstamps; } static inline struct ubuf_info *skb_zcopy(struct sk_buff *skb) { bool is_zcopy = skb && skb_shinfo(skb)->flags & SKBFL_ZEROCOPY_ENABLE; return is_zcopy ? skb_uarg(skb) : NULL; } static inline bool skb_zcopy_pure(const struct sk_buff *skb) { return skb_shinfo(skb)->flags & SKBFL_PURE_ZEROCOPY; } static inline bool skb_zcopy_managed(const struct sk_buff *skb) { return skb_shinfo(skb)->flags & SKBFL_MANAGED_FRAG_REFS; } static inline bool skb_pure_zcopy_same(const struct sk_buff *skb1, const struct sk_buff *skb2) { return skb_zcopy_pure(skb1) == skb_zcopy_pure(skb2); } static inline void net_zcopy_get(struct ubuf_info *uarg) { refcount_inc(&uarg->refcnt); } static inline void skb_zcopy_init(struct sk_buff *skb, struct ubuf_info *uarg) { skb_shinfo(skb)->destructor_arg = uarg; skb_shinfo(skb)->flags |= uarg->flags; } static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg, bool *have_ref) { if (skb && uarg && !skb_zcopy(skb)) { if (unlikely(have_ref && *have_ref)) *have_ref = false; else net_zcopy_get(uarg); skb_zcopy_init(skb, uarg); } } static inline void skb_zcopy_set_nouarg(struct sk_buff *skb, void *val) { skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t) val | 0x1UL); skb_shinfo(skb)->flags |= SKBFL_ZEROCOPY_FRAG; } static inline bool skb_zcopy_is_nouarg(struct sk_buff *skb) { return (uintptr_t) skb_shinfo(skb)->destructor_arg & 0x1UL; } static inline void *skb_zcopy_get_nouarg(struct sk_buff *skb) { return (void *)((uintptr_t) skb_shinfo(skb)->destructor_arg & ~0x1UL); } static inline void net_zcopy_put(struct ubuf_info *uarg) { if (uarg) uarg->ops->complete(NULL, uarg, true); } static inline void net_zcopy_put_abort(struct ubuf_info *uarg, bool have_uref) { if (uarg) { if (uarg->ops == &msg_zerocopy_ubuf_ops) msg_zerocopy_put_abort(uarg, have_uref); else if (have_uref) net_zcopy_put(uarg); } } /* Release a reference on a zerocopy structure */ static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy_success) { struct ubuf_info *uarg = skb_zcopy(skb); if (uarg) { if (!skb_zcopy_is_nouarg(skb)) uarg->ops->complete(skb, uarg, zerocopy_success); skb_shinfo(skb)->flags &= ~SKBFL_ALL_ZEROCOPY; } } void __skb_zcopy_downgrade_managed(struct sk_buff *skb); static inline void skb_zcopy_downgrade_managed(struct sk_buff *skb) { if (unlikely(skb_zcopy_managed(skb))) __skb_zcopy_downgrade_managed(skb); } /* Return true if frags in this skb are readable by the host. */ static inline bool skb_frags_readable(const struct sk_buff *skb) { return !skb->unreadable; } static inline void skb_mark_not_on_list(struct sk_buff *skb) { skb->next = NULL; } static inline void skb_poison_list(struct sk_buff *skb) { #ifdef CONFIG_DEBUG_NET skb->next = SKB_LIST_POISON_NEXT; #endif } /* Iterate through singly-linked GSO fragments of an skb. */ #define skb_list_walk_safe(first, skb, next_skb) \ for ((skb) = (first), (next_skb) = (skb) ? (skb)->next : NULL; (skb); \ (skb) = (next_skb), (next_skb) = (skb) ? (skb)->next : NULL) static inline void skb_list_del_init(struct sk_buff *skb) { __list_del_entry(&skb->list); skb_mark_not_on_list(skb); } /** * skb_queue_empty - check if a queue is empty * @list: queue head * * Returns true if the queue is empty, false otherwise. */ static inline int skb_queue_empty(const struct sk_buff_head *list) { return list->next == (const struct sk_buff *) list; } /** * skb_queue_empty_lockless - check if a queue is empty * @list: queue head * * Returns true if the queue is empty, false otherwise. * This variant can be used in lockless contexts. */ static inline bool skb_queue_empty_lockless(const struct sk_buff_head *list) { return READ_ONCE(list->next) == (const struct sk_buff *) list; } /** * skb_queue_is_last - check if skb is the last entry in the queue * @list: queue head * @skb: buffer * * Returns true if @skb is the last buffer on the list. */ static inline bool skb_queue_is_last(const struct sk_buff_head *list, const struct sk_buff *skb) { return skb->next == (const struct sk_buff *) list; } /** * skb_queue_is_first - check if skb is the first entry in the queue * @list: queue head * @skb: buffer * * Returns true if @skb is the first buffer on the list. */ static inline bool skb_queue_is_first(const struct sk_buff_head *list, const struct sk_buff *skb) { return skb->prev == (const struct sk_buff *) list; } /** * skb_queue_next - return the next packet in the queue * @list: queue head * @skb: current buffer * * Return the next packet in @list after @skb. It is only valid to * call this if skb_queue_is_last() evaluates to false. */ static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list, const struct sk_buff *skb) { /* This BUG_ON may seem severe, but if we just return then we * are going to dereference garbage. */ BUG_ON(skb_queue_is_last(list, skb)); return skb->next; } /** * skb_queue_prev - return the prev packet in the queue * @list: queue head * @skb: current buffer * * Return the prev packet in @list before @skb. It is only valid to * call this if skb_queue_is_first() evaluates to false. */ static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list, const struct sk_buff *skb) { /* This BUG_ON may seem severe, but if we just return then we * are going to dereference garbage. */ BUG_ON(skb_queue_is_first(list, skb)); return skb->prev; } /** * skb_get - reference buffer * @skb: buffer to reference * * Makes another reference to a socket buffer and returns a pointer * to the buffer. */ static inline struct sk_buff *skb_get(struct sk_buff *skb) { refcount_inc(&skb->users); return skb; } /* * If users == 1, we are the only owner and can avoid redundant atomic changes. */ /** * skb_cloned - is the buffer a clone * @skb: buffer to check * * Returns true if the buffer was generated with skb_clone() and is * one of multiple shared copies of the buffer. Cloned buffers are * shared data so must not be written to under normal circumstances. */ static inline int skb_cloned(const struct sk_buff *skb) { return skb->cloned && (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1; } static inline int skb_unclone(struct sk_buff *skb, gfp_t pri) { might_sleep_if(gfpflags_allow_blocking(pri)); if (skb_cloned(skb)) return pskb_expand_head(skb, 0, 0, pri); return 0; } /* This variant of skb_unclone() makes sure skb->truesize * and skb_end_offset() are not changed, whenever a new skb->head is needed. * * Indeed there is no guarantee that ksize(kmalloc(X)) == ksize(kmalloc(X)) * when various debugging features are in place. */ int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri); static inline int skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri) { might_sleep_if(gfpflags_allow_blocking(pri)); if (skb_cloned(skb)) return __skb_unclone_keeptruesize(skb, pri); return 0; } /** * skb_header_cloned - is the header a clone * @skb: buffer to check * * Returns true if modifying the header part of the buffer requires * the data to be copied. */ static inline int skb_header_cloned(const struct sk_buff *skb) { int dataref; if (!skb->cloned) return 0; dataref = atomic_read(&skb_shinfo(skb)->dataref); dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT); return dataref != 1; } static inline int skb_header_unclone(struct sk_buff *skb, gfp_t pri) { might_sleep_if(gfpflags_allow_blocking(pri)); if (skb_header_cloned(skb)) return pskb_expand_head(skb, 0, 0, pri); return 0; } /** * __skb_header_release() - allow clones to use the headroom * @skb: buffer to operate on * * See "DOC: dataref and headerless skbs". */ static inline void __skb_header_release(struct sk_buff *skb) { skb->nohdr = 1; atomic_set(&skb_shinfo(skb)->dataref, 1 + (1 << SKB_DATAREF_SHIFT)); } /** * skb_shared - is the buffer shared * @skb: buffer to check * * Returns true if more than one person has a reference to this * buffer. */ static inline int skb_shared(const struct sk_buff *skb) { return refcount_read(&skb->users) != 1; } /** * skb_share_check - check if buffer is shared and if so clone it * @skb: buffer to check * @pri: priority for memory allocation * * If the buffer is shared the buffer is cloned and the old copy * drops a reference. A new clone with a single reference is returned. * If the buffer is not shared the original buffer is returned. When * being called from interrupt status or with spinlocks held pri must * be GFP_ATOMIC. * * NULL is returned on a memory allocation failure. */ static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri) { might_sleep_if(gfpflags_allow_blocking(pri)); if (skb_shared(skb)) { struct sk_buff *nskb = skb_clone(skb, pri); if (likely(nskb)) consume_skb(skb); else kfree_skb(skb); skb = nskb; } return skb; } /* * Copy shared buffers into a new sk_buff. We effectively do COW on * packets to handle cases where we have a local reader and forward * and a couple of other messy ones. The normal one is tcpdumping * a packet that's being forwarded. */ /** * skb_unshare - make a copy of a shared buffer * @skb: buffer to check * @pri: priority for memory allocation * * If the socket buffer is a clone then this function creates a new * copy of the data, drops a reference count on the old copy and returns * the new copy with the reference count at 1. If the buffer is not a clone * the original buffer is returned. When called with a spinlock held or * from interrupt state @pri must be %GFP_ATOMIC * * %NULL is returned on a memory allocation failure. */ static inline struct sk_buff *skb_unshare(struct sk_buff *skb, gfp_t pri) { might_sleep_if(gfpflags_allow_blocking(pri)); if (skb_cloned(skb)) { struct sk_buff *nskb = skb_copy(skb, pri); /* Free our shared copy */ if (likely(nskb)) consume_skb(skb); else kfree_skb(skb); skb = nskb; } return skb; } /** * skb_peek - peek at the head of an &sk_buff_head * @list_: list to peek at * * Peek an &sk_buff. Unlike most other operations you _MUST_ * be careful with this one. A peek leaves the buffer on the * list and someone else may run off with it. You must hold * the appropriate locks or have a private queue to do this. * * Returns %NULL for an empty list or a pointer to the head element. * The reference count is not incremented and the reference is therefore * volatile. Use with caution. */ static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_) { struct sk_buff *skb = list_->next; if (skb == (struct sk_buff *)list_) skb = NULL; return skb; } /** * __skb_peek - peek at the head of a non-empty &sk_buff_head * @list_: list to peek at * * Like skb_peek(), but the caller knows that the list is not empty. */ static inline struct sk_buff *__skb_peek(const struct sk_buff_head *list_) { return list_->next; } /** * skb_peek_next - peek skb following the given one from a queue * @skb: skb to start from * @list_: list to peek at * * Returns %NULL when the end of the list is met or a pointer to the * next element. The reference count is not incremented and the * reference is therefore volatile. Use with caution. */ static inline struct sk_buff *skb_peek_next(struct sk_buff *skb, const struct sk_buff_head *list_) { struct sk_buff *next = skb->next; if (next == (struct sk_buff *)list_) next = NULL; return next; } /** * skb_peek_tail - peek at the tail of an &sk_buff_head * @list_: list to peek at * * Peek an &sk_buff. Unlike most other operations you _MUST_ * be careful with this one. A peek leaves the buffer on the * list and someone else may run off with it. You must hold * the appropriate locks or have a private queue to do this. * * Returns %NULL for an empty list or a pointer to the tail element. * The reference count is not incremented and the reference is therefore * volatile. Use with caution. */ static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_) { struct sk_buff *skb = READ_ONCE(list_->prev); if (skb == (struct sk_buff *)list_) skb = NULL; return skb; } /** * skb_queue_len - get queue length * @list_: list to measure * * Return the length of an &sk_buff queue. */ static inline __u32 skb_queue_len(const struct sk_buff_head *list_) { return list_->qlen; } /** * skb_queue_len_lockless - get queue length * @list_: list to measure * * Return the length of an &sk_buff queue. * This variant can be used in lockless contexts. */ static inline __u32 skb_queue_len_lockless(const struct sk_buff_head *list_) { return READ_ONCE(list_->qlen); } /** * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head * @list: queue to initialize * * This initializes only the list and queue length aspects of * an sk_buff_head object. This allows to initialize the list * aspects of an sk_buff_head without reinitializing things like * the spinlock. It can also be used for on-stack sk_buff_head * objects where the spinlock is known to not be used. */ static inline void __skb_queue_head_init(struct sk_buff_head *list) { list->prev = list->next = (struct sk_buff *)list; list->qlen = 0; } /* * This function creates a split out lock class for each invocation; * this is needed for now since a whole lot of users of the skb-queue * infrastructure in drivers have different locking usage (in hardirq) * than the networking core (in softirq only). In the long run either the * network layer or drivers should need annotation to consolidate the * main types of usage into 3 classes. */ static inline void skb_queue_head_init(struct sk_buff_head *list) { spin_lock_init(&list->lock); __skb_queue_head_init(list); } static inline void skb_queue_head_init_class(struct sk_buff_head *list, struct lock_class_key *class) { skb_queue_head_init(list); lockdep_set_class(&list->lock, class); } /* * Insert an sk_buff on a list. * * The "__skb_xxxx()" functions are the non-atomic ones that * can only be called with interrupts disabled. */ static inline void __skb_insert(struct sk_buff *newsk, struct sk_buff *prev, struct sk_buff *next, struct sk_buff_head *list) { /* See skb_queue_empty_lockless() and skb_peek_tail() * for the opposite READ_ONCE() */ WRITE_ONCE(newsk->next, next); WRITE_ONCE(newsk->prev, prev); WRITE_ONCE(((struct sk_buff_list *)next)->prev, newsk); WRITE_ONCE(((struct sk_buff_list *)prev)->next, newsk); WRITE_ONCE(list->qlen, list->qlen + 1); } static inline void __skb_queue_splice(const struct sk_buff_head *list, struct sk_buff *prev, struct sk_buff *next) { struct sk_buff *first = list->next; struct sk_buff *last = list->prev; WRITE_ONCE(first->prev, prev); WRITE_ONCE(prev->next, first); WRITE_ONCE(last->next, next); WRITE_ONCE(next->prev, last); } /** * skb_queue_splice - join two skb lists, this is designed for stacks * @list: the new list to add * @head: the place to add it in the first list */ static inline void skb_queue_splice(const struct sk_buff_head *list, struct sk_buff_head *head) { if (!skb_queue_empty(list)) { __skb_queue_splice(list, (struct sk_buff *) head, head->next); head->qlen += list->qlen; } } /** * skb_queue_splice_init - join two skb lists and reinitialise the emptied list * @list: the new list to add * @head: the place to add it in the first list * * The list at @list is reinitialised */ static inline void skb_queue_splice_init(struct sk_buff_head *list, struct sk_buff_head *head) { if (!skb_queue_empty(list)) { __skb_queue_splice(list, (struct sk_buff *) head, head->next); head->qlen += list->qlen; __skb_queue_head_init(list); } } /** * skb_queue_splice_tail - join two skb lists, each list being a queue * @list: the new list to add * @head: the place to add it in the first list */ static inline void skb_queue_splice_tail(const struct sk_buff_head *list, struct sk_buff_head *head) { if (!skb_queue_empty(list)) { __skb_queue_splice(list, head->prev, (struct sk_buff *) head); head->qlen += list->qlen; } } /** * skb_queue_splice_tail_init - join two skb lists and reinitialise the emptied list * @list: the new list to add * @head: the place to add it in the first list * * Each of the lists is a queue. * The list at @list is reinitialised */ static inline void skb_queue_splice_tail_init(struct sk_buff_head *list, struct sk_buff_head *head) { if (!skb_queue_empty(list)) { __skb_queue_splice(list, head->prev, (struct sk_buff *) head); head->qlen += list->qlen; __skb_queue_head_init(list); } } /** * __skb_queue_after - queue a buffer at the list head * @list: list to use * @prev: place after this buffer * @newsk: buffer to queue * * Queue a buffer int the middle of a list. This function takes no locks * and you must therefore hold required locks before calling it. * * A buffer cannot be placed on two lists at the same time. */ static inline void __skb_queue_after(struct sk_buff_head *list, struct sk_buff *prev, struct sk_buff *newsk) { __skb_insert(newsk, prev, ((struct sk_buff_list *)prev)->next, list); } void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list); static inline void __skb_queue_before(struct sk_buff_head *list, struct sk_buff *next, struct sk_buff *newsk) { __skb_insert(newsk, ((struct sk_buff_list *)next)->prev, next, list); } /** * __skb_queue_head - queue a buffer at the list head * @list: list to use * @newsk: buffer to queue * * Queue a buffer at the start of a list. This function takes no locks * and you must therefore hold required locks before calling it. * * A buffer cannot be placed on two lists at the same time. */ static inline void __skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) { __skb_queue_after(list, (struct sk_buff *)list, newsk); } void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk); /** * __skb_queue_tail - queue a buffer at the list tail * @list: list to use * @newsk: buffer to queue * * Queue a buffer at the end of a list. This function takes no locks * and you must therefore hold required locks before calling it. * * A buffer cannot be placed on two lists at the same time. */ static inline void __skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) { __skb_queue_before(list, (struct sk_buff *)list, newsk); } void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk); /* * remove sk_buff from list. _Must_ be called atomically, and with * the list known.. */ void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list); static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) { struct sk_buff *next, *prev; WRITE_ONCE(list->qlen, list->qlen - 1); next = skb->next; prev = skb->prev; skb->next = skb->prev = NULL; WRITE_ONCE(next->prev, prev); WRITE_ONCE(prev->next, next); } /** * __skb_dequeue - remove from the head of the queue * @list: list to dequeue from * * Remove the head of the list. This function does not take any locks * so must be used with appropriate locks held only. The head item is * returned or %NULL if the list is empty. */ static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list) { struct sk_buff *skb = skb_peek(list); if (skb) __skb_unlink(skb, list); return skb; } struct sk_buff *skb_dequeue(struct sk_buff_head *list); /** * __skb_dequeue_tail - remove from the tail of the queue * @list: list to dequeue from * * Remove the tail of the list. This function does not take any locks * so must be used with appropriate locks held only. The tail item is * returned or %NULL if the list is empty. */ static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list) { struct sk_buff *skb = skb_peek_tail(list); if (skb) __skb_unlink(skb, list); return skb; } struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list); static inline bool skb_is_nonlinear(const struct sk_buff *skb) { return skb->data_len; } static inline unsigned int skb_headlen(const struct sk_buff *skb) { return skb->len - skb->data_len; } static inline unsigned int __skb_pagelen(const struct sk_buff *skb) { unsigned int i, len = 0; for (i = skb_shinfo(skb)->nr_frags - 1; (int)i >= 0; i--) len += skb_frag_size(&skb_shinfo(skb)->frags[i]); return len; } static inline unsigned int skb_pagelen(const struct sk_buff *skb) { return skb_headlen(skb) + __skb_pagelen(skb); } static inline void skb_frag_fill_netmem_desc(skb_frag_t *frag, netmem_ref netmem, int off, int size) { frag->netmem = netmem; frag->offset = off; skb_frag_size_set(frag, size); } static inline void skb_frag_fill_page_desc(skb_frag_t *frag, struct page *page, int off, int size) { skb_frag_fill_netmem_desc(frag, page_to_netmem(page), off, size); } static inline void __skb_fill_netmem_desc_noacc(struct skb_shared_info *shinfo, int i, netmem_ref netmem, int off, int size) { skb_frag_t *frag = &shinfo->frags[i]; skb_frag_fill_netmem_desc(frag, netmem, off, size); } static inline void __skb_fill_page_desc_noacc(struct skb_shared_info *shinfo, int i, struct page *page, int off, int size) { __skb_fill_netmem_desc_noacc(shinfo, i, page_to_netmem(page), off, size); } /** * skb_len_add - adds a number to len fields of skb * @skb: buffer to add len to * @delta: number of bytes to add */ static inline void skb_len_add(struct sk_buff *skb, int delta) { skb->len += delta; skb->data_len += delta; skb->truesize += delta; } /** * __skb_fill_netmem_desc - initialise a fragment in an skb * @skb: buffer containing fragment to be initialised * @i: fragment index to initialise * @netmem: the netmem to use for this fragment * @off: the offset to the data with @page * @size: the length of the data * * Initialises the @i'th fragment of @skb to point to &size bytes at * offset @off within @page. * * Does not take any additional reference on the fragment. */ static inline void __skb_fill_netmem_desc(struct sk_buff *skb, int i, netmem_ref netmem, int off, int size) { struct page *page; __skb_fill_netmem_desc_noacc(skb_shinfo(skb), i, netmem, off, size); if (netmem_is_net_iov(netmem)) { skb->unreadable = true; return; } page = netmem_to_page(netmem); /* Propagate page pfmemalloc to the skb if we can. The problem is * that not all callers have unique ownership of the page but rely * on page_is_pfmemalloc doing the right thing(tm). */ page = compound_head(page); if (page_is_pfmemalloc(page)) skb->pfmemalloc = true; } static inline void __skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size) { __skb_fill_netmem_desc(skb, i, page_to_netmem(page), off, size); } static inline void skb_fill_netmem_desc(struct sk_buff *skb, int i, netmem_ref netmem, int off, int size) { __skb_fill_netmem_desc(skb, i, netmem, off, size); skb_shinfo(skb)->nr_frags = i + 1; } /** * skb_fill_page_desc - initialise a paged fragment in an skb * @skb: buffer containing fragment to be initialised * @i: paged fragment index to initialise * @page: the page to use for this fragment * @off: the offset to the data with @page * @size: the length of the data * * As per __skb_fill_page_desc() -- initialises the @i'th fragment of * @skb to point to @size bytes at offset @off within @page. In * addition updates @skb such that @i is the last fragment. * * Does not take any additional reference on the fragment. */ static inline void skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size) { skb_fill_netmem_desc(skb, i, page_to_netmem(page), off, size); } /** * skb_fill_page_desc_noacc - initialise a paged fragment in an skb * @skb: buffer containing fragment to be initialised * @i: paged fragment index to initialise * @page: the page to use for this fragment * @off: the offset to the data with @page * @size: the length of the data * * Variant of skb_fill_page_desc() which does not deal with * pfmemalloc, if page is not owned by us. */ static inline void skb_fill_page_desc_noacc(struct sk_buff *skb, int i, struct page *page, int off, int size) { struct skb_shared_info *shinfo = skb_shinfo(skb); __skb_fill_page_desc_noacc(shinfo, i, page, off, size); shinfo->nr_frags = i + 1; } void skb_add_rx_frag_netmem(struct sk_buff *skb, int i, netmem_ref netmem, int off, int size, unsigned int truesize); static inline void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, int size, unsigned int truesize) { skb_add_rx_frag_netmem(skb, i, page_to_netmem(page), off, size, truesize); } void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, unsigned int truesize); #define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb)) #ifdef NET_SKBUFF_DATA_USES_OFFSET static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb) { return skb->head + skb->tail; } static inline void skb_reset_tail_pointer(struct sk_buff *skb) { skb->tail = skb->data - skb->head; } static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) { skb_reset_tail_pointer(skb); skb->tail += offset; } #else /* NET_SKBUFF_DATA_USES_OFFSET */ static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb) { return skb->tail; } static inline void skb_reset_tail_pointer(struct sk_buff *skb) { skb->tail = skb->data; } static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) { skb->tail = skb->data + offset; } #endif /* NET_SKBUFF_DATA_USES_OFFSET */ static inline void skb_assert_len(struct sk_buff *skb) { #ifdef CONFIG_DEBUG_NET if (WARN_ONCE(!skb->len, "%s\n", __func__)) DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false); #endif /* CONFIG_DEBUG_NET */ } #if defined(CONFIG_FAIL_SKB_REALLOC) void skb_might_realloc(struct sk_buff *skb); #else static inline void skb_might_realloc(struct sk_buff *skb) {} #endif /* * Add data to an sk_buff */ void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len); void *skb_put(struct sk_buff *skb, unsigned int len); static inline void *__skb_put(struct sk_buff *skb, unsigned int len) { void *tmp = skb_tail_pointer(skb); SKB_LINEAR_ASSERT(skb); skb->tail += len; skb->len += len; return tmp; } static inline void *__skb_put_zero(struct sk_buff *skb, unsigned int len) { void *tmp = __skb_put(skb, len); memset(tmp, 0, len); return tmp; } static inline void *__skb_put_data(struct sk_buff *skb, const void *data, unsigned int len) { void *tmp = __skb_put(skb, len); memcpy(tmp, data, len); return tmp; } static inline void __skb_put_u8(struct sk_buff *skb, u8 val) { *(u8 *)__skb_put(skb, 1) = val; } static inline void *skb_put_zero(struct sk_buff *skb, unsigned int len) { void *tmp = skb_put(skb, len); memset(tmp, 0, len); return tmp; } static inline void *skb_put_data(struct sk_buff *skb, const void *data, unsigned int len) { void *tmp = skb_put(skb, len); memcpy(tmp, data, len); return tmp; } static inline void skb_put_u8(struct sk_buff *skb, u8 val) { *(u8 *)skb_put(skb, 1) = val; } void *skb_push(struct sk_buff *skb, unsigned int len); static inline void *__skb_push(struct sk_buff *skb, unsigned int len) { DEBUG_NET_WARN_ON_ONCE(len > INT_MAX); skb->data -= len; skb->len += len; return skb->data; } void *skb_pull(struct sk_buff *skb, unsigned int len); static inline void *__skb_pull(struct sk_buff *skb, unsigned int len) { DEBUG_NET_WARN_ON_ONCE(len > INT_MAX); skb->len -= len; if (unlikely(skb->len < skb->data_len)) { #if defined(CONFIG_DEBUG_NET) skb->len += len; pr_err("__skb_pull(len=%u)\n", len); skb_dump(KERN_ERR, skb, false); #endif BUG(); } return skb->data += len; } static inline void *skb_pull_inline(struct sk_buff *skb, unsigned int len) { return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len); } void *skb_pull_data(struct sk_buff *skb, size_t len); void *__pskb_pull_tail(struct sk_buff *skb, int delta); static inline enum skb_drop_reason pskb_may_pull_reason(struct sk_buff *skb, unsigned int len) { DEBUG_NET_WARN_ON_ONCE(len > INT_MAX); skb_might_realloc(skb); if (likely(len <= skb_headlen(skb))) return SKB_NOT_DROPPED_YET; if (unlikely(len > skb->len)) return SKB_DROP_REASON_PKT_TOO_SMALL; if (unlikely(!__pskb_pull_tail(skb, len - skb_headlen(skb)))) return SKB_DROP_REASON_NOMEM; return SKB_NOT_DROPPED_YET; } static inline bool pskb_may_pull(struct sk_buff *skb, unsigned int len) { return pskb_may_pull_reason(skb, len) == SKB_NOT_DROPPED_YET; } static inline void *pskb_pull(struct sk_buff *skb, unsigned int len) { if (!pskb_may_pull(skb, len)) return NULL; skb->len -= len; return skb->data += len; } void skb_condense(struct sk_buff *skb); /** * skb_headroom - bytes at buffer head * @skb: buffer to check * * Return the number of bytes of free space at the head of an &sk_buff. */ static inline unsigned int skb_headroom(const struct sk_buff *skb) { return skb->data - skb->head; } /** * skb_tailroom - bytes at buffer end * @skb: buffer to check * * Return the number of bytes of free space at the tail of an sk_buff */ static inline int skb_tailroom(const struct sk_buff *skb) { return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail; } /** * skb_availroom - bytes at buffer end * @skb: buffer to check * * Return the number of bytes of free space at the tail of an sk_buff * allocated by sk_stream_alloc() */ static inline int skb_availroom(const struct sk_buff *skb) { if (skb_is_nonlinear(skb)) return 0; return skb->end - skb->tail - skb->reserved_tailroom; } /** * skb_reserve - adjust headroom * @skb: buffer to alter * @len: bytes to move * * Increase the headroom of an empty &sk_buff by reducing the tail * room. This is only allowed for an empty buffer. */ static inline void skb_reserve(struct sk_buff *skb, int len) { skb->data += len; skb->tail += len; } /** * skb_tailroom_reserve - adjust reserved_tailroom * @skb: buffer to alter * @mtu: maximum amount of headlen permitted * @needed_tailroom: minimum amount of reserved_tailroom * * Set reserved_tailroom so that headlen can be as large as possible but * not larger than mtu and tailroom cannot be smaller than * needed_tailroom. * The required headroom should already have been reserved before using * this function. */ static inline void skb_tailroom_reserve(struct sk_buff *skb, unsigned int mtu, unsigned int needed_tailroom) { SKB_LINEAR_ASSERT(skb); if (mtu < skb_tailroom(skb) - needed_tailroom) /* use at most mtu */ skb->reserved_tailroom = skb_tailroom(skb) - mtu; else /* use up to all available space */ skb->reserved_tailroom = needed_tailroom; } #define ENCAP_TYPE_ETHER 0 #define ENCAP_TYPE_IPPROTO 1 static inline void skb_set_inner_protocol(struct sk_buff *skb, __be16 protocol) { skb->inner_protocol = protocol; skb->inner_protocol_type = ENCAP_TYPE_ETHER; } static inline void skb_set_inner_ipproto(struct sk_buff *skb, __u8 ipproto) { skb->inner_ipproto = ipproto; skb->inner_protocol_type = ENCAP_TYPE_IPPROTO; } static inline void skb_reset_inner_headers(struct sk_buff *skb) { skb->inner_mac_header = skb->mac_header; skb->inner_network_header = skb->network_header; skb->inner_transport_header = skb->transport_header; } static inline int skb_mac_header_was_set(const struct sk_buff *skb) { return skb->mac_header != (typeof(skb->mac_header))~0U; } static inline void skb_reset_mac_len(struct sk_buff *skb) { if (!skb_mac_header_was_set(skb)) { DEBUG_NET_WARN_ON_ONCE(1); skb->mac_len = 0; } else { skb->mac_len = skb->network_header - skb->mac_header; } } static inline unsigned char *skb_inner_transport_header(const struct sk_buff *skb) { return skb->head + skb->inner_transport_header; } static inline int skb_inner_transport_offset(const struct sk_buff *skb) { return skb_inner_transport_header(skb) - skb->data; } static inline void skb_reset_inner_transport_header(struct sk_buff *skb) { long offset = skb->data - skb->head; DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->inner_transport_header))offset); skb->inner_transport_header = offset; } static inline void skb_set_inner_transport_header(struct sk_buff *skb, const int offset) { skb_reset_inner_transport_header(skb); skb->inner_transport_header += offset; } static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb) { return skb->head + skb->inner_network_header; } static inline void skb_reset_inner_network_header(struct sk_buff *skb) { long offset = skb->data - skb->head; DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->inner_network_header))offset); skb->inner_network_header = offset; } static inline void skb_set_inner_network_header(struct sk_buff *skb, const int offset) { skb_reset_inner_network_header(skb); skb->inner_network_header += offset; } static inline bool skb_inner_network_header_was_set(const struct sk_buff *skb) { return skb->inner_network_header > 0; } static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb) { return skb->head + skb->inner_mac_header; } static inline void skb_reset_inner_mac_header(struct sk_buff *skb) { long offset = skb->data - skb->head; DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->inner_mac_header))offset); skb->inner_mac_header = offset; } static inline void skb_set_inner_mac_header(struct sk_buff *skb, const int offset) { skb_reset_inner_mac_header(skb); skb->inner_mac_header += offset; } static inline bool skb_transport_header_was_set(const struct sk_buff *skb) { return skb->transport_header != (typeof(skb->transport_header))~0U; } static inline unsigned char *skb_transport_header(const struct sk_buff *skb) { DEBUG_NET_WARN_ON_ONCE(!skb_transport_header_was_set(skb)); return skb->head + skb->transport_header; } static inline void skb_reset_transport_header(struct sk_buff *skb) { long offset = skb->data - skb->head; DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->transport_header))offset); skb->transport_header = offset; } static inline void skb_set_transport_header(struct sk_buff *skb, const int offset) { skb_reset_transport_header(skb); skb->transport_header += offset; } static inline unsigned char *skb_network_header(const struct sk_buff *skb) { return skb->head + skb->network_header; } static inline void skb_reset_network_header(struct sk_buff *skb) { long offset = skb->data - skb->head; DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->network_header))offset); skb->network_header = offset; } static inline void skb_set_network_header(struct sk_buff *skb, const int offset) { skb_reset_network_header(skb); skb->network_header += offset; } static inline unsigned char *skb_mac_header(const struct sk_buff *skb) { DEBUG_NET_WARN_ON_ONCE(!skb_mac_header_was_set(skb)); return skb->head + skb->mac_header; } static inline int skb_mac_offset(const struct sk_buff *skb) { return skb_mac_header(skb) - skb->data; } static inline u32 skb_mac_header_len(const struct sk_buff *skb) { DEBUG_NET_WARN_ON_ONCE(!skb_mac_header_was_set(skb)); return skb->network_header - skb->mac_header; } static inline void skb_unset_mac_header(struct sk_buff *skb) { skb->mac_header = (typeof(skb->mac_header))~0U; } static inline void skb_reset_mac_header(struct sk_buff *skb) { long offset = skb->data - skb->head; DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->mac_header))offset); skb->mac_header = offset; } static inline void skb_set_mac_header(struct sk_buff *skb, const int offset) { skb_reset_mac_header(skb); skb->mac_header += offset; } static inline void skb_pop_mac_header(struct sk_buff *skb) { skb->mac_header = skb->network_header; } static inline void skb_probe_transport_header(struct sk_buff *skb) { struct flow_keys_basic keys; if (skb_transport_header_was_set(skb)) return; if (skb_flow_dissect_flow_keys_basic(NULL, skb, &keys, NULL, 0, 0, 0, 0)) skb_set_transport_header(skb, keys.control.thoff); } static inline void skb_mac_header_rebuild(struct sk_buff *skb) { if (skb_mac_header_was_set(skb)) { const unsigned char *old_mac = skb_mac_header(skb); skb_set_mac_header(skb, -skb->mac_len); memmove(skb_mac_header(skb), old_mac, skb->mac_len); } } /* Move the full mac header up to current network_header. * Leaves skb->data pointing at offset skb->mac_len into the mac_header. * Must be provided the complete mac header length. */ static inline void skb_mac_header_rebuild_full(struct sk_buff *skb, u32 full_mac_len) { if (skb_mac_header_was_set(skb)) { const unsigned char *old_mac = skb_mac_header(skb); skb_set_mac_header(skb, -full_mac_len); memmove(skb_mac_header(skb), old_mac, full_mac_len); __skb_push(skb, full_mac_len - skb->mac_len); } } static inline int skb_checksum_start_offset(const struct sk_buff *skb) { return skb->csum_start - skb_headroom(skb); } static inline unsigned char *skb_checksum_start(const struct sk_buff *skb) { return skb->head + skb->csum_start; } static inline int skb_transport_offset(const struct sk_buff *skb) { return skb_transport_header(skb) - skb->data; } static inline u32 skb_network_header_len(const struct sk_buff *skb) { DEBUG_NET_WARN_ON_ONCE(!skb_transport_header_was_set(skb)); return skb->transport_header - skb->network_header; } static inline u32 skb_inner_network_header_len(const struct sk_buff *skb) { return skb->inner_transport_header - skb->inner_network_header; } static inline int skb_network_offset(const struct sk_buff *skb) { return skb_network_header(skb) - skb->data; } static inline int skb_inner_network_offset(const struct sk_buff *skb) { return skb_inner_network_header(skb) - skb->data; } static inline enum skb_drop_reason pskb_network_may_pull_reason(struct sk_buff *skb, unsigned int len) { return pskb_may_pull_reason(skb, skb_network_offset(skb) + len); } static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len) { return pskb_network_may_pull_reason(skb, len) == SKB_NOT_DROPPED_YET; } /* * CPUs often take a performance hit when accessing unaligned memory * locations. The actual performance hit varies, it can be small if the * hardware handles it or large if we have to take an exception and fix it * in software. * * Since an ethernet header is 14 bytes network drivers often end up with * the IP header at an unaligned offset. The IP header can be aligned by * shifting the start of the packet by 2 bytes. Drivers should do this * with: * * skb_reserve(skb, NET_IP_ALIGN); * * The downside to this alignment of the IP header is that the DMA is now * unaligned. On some architectures the cost of an unaligned DMA is high * and this cost outweighs the gains made by aligning the IP header. * * Since this trade off varies between architectures, we allow NET_IP_ALIGN * to be overridden. */ #ifndef NET_IP_ALIGN #define NET_IP_ALIGN 2 #endif /* * The networking layer reserves some headroom in skb data (via * dev_alloc_skb). This is used to avoid having to reallocate skb data when * the header has to grow. In the default case, if the header has to grow * 32 bytes or less we avoid the reallocation. * * Unfortunately this headroom changes the DMA alignment of the resulting * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive * on some architectures. An architecture can override this value, * perhaps setting it to a cacheline in size (since that will maintain * cacheline alignment of the DMA). It must be a power of 2. * * Various parts of the networking layer expect at least 32 bytes of * headroom, you should not reduce this. * * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS) * to reduce average number of cache lines per packet. * get_rps_cpu() for example only access one 64 bytes aligned block : * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8) */ #ifndef NET_SKB_PAD #define NET_SKB_PAD max(32, L1_CACHE_BYTES) #endif int ___pskb_trim(struct sk_buff *skb, unsigned int len); static inline void __skb_set_length(struct sk_buff *skb, unsigned int len) { if (WARN_ON(skb_is_nonlinear(skb))) return; skb->len = len; skb_set_tail_pointer(skb, len); } static inline void __skb_trim(struct sk_buff *skb, unsigned int len) { __skb_set_length(skb, len); } void skb_trim(struct sk_buff *skb, unsigned int len); static inline int __pskb_trim(struct sk_buff *skb, unsigned int len) { if (skb->data_len) return ___pskb_trim(skb, len); __skb_trim(skb, len); return 0; } static inline int pskb_trim(struct sk_buff *skb, unsigned int len) { skb_might_realloc(skb); return (len < skb->len) ? __pskb_trim(skb, len) : 0; } /** * pskb_trim_unique - remove end from a paged unique (not cloned) buffer * @skb: buffer to alter * @len: new length * * This is identical to pskb_trim except that the caller knows that * the skb is not cloned so we should never get an error due to out- * of-memory. */ static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len) { int err = pskb_trim(skb, len); BUG_ON(err); } static inline int __skb_grow(struct sk_buff *skb, unsigned int len) { unsigned int diff = len - skb->len; if (skb_tailroom(skb) < diff) { int ret = pskb_expand_head(skb, 0, diff - skb_tailroom(skb), GFP_ATOMIC); if (ret) return ret; } __skb_set_length(skb, len); return 0; } /** * skb_orphan - orphan a buffer * @skb: buffer to orphan * * If a buffer currently has an owner then we call the owner's * destructor function and make the @skb unowned. The buffer continues * to exist but is no longer charged to its former owner. */ static inline void skb_orphan(struct sk_buff *skb) { if (skb->destructor) { skb->destructor(skb); skb->destructor = NULL; skb->sk = NULL; } else { BUG_ON(skb->sk); } } /** * skb_orphan_frags - orphan the frags contained in a buffer * @skb: buffer to orphan frags from * @gfp_mask: allocation mask for replacement pages * * For each frag in the SKB which needs a destructor (i.e. has an * owner) create a copy of that frag and release the original * page by calling the destructor. */ static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask) { if (likely(!skb_zcopy(skb))) return 0; if (skb_shinfo(skb)->flags & SKBFL_DONT_ORPHAN) return 0; return skb_copy_ubufs(skb, gfp_mask); } /* Frags must be orphaned, even if refcounted, if skb might loop to rx path */ static inline int skb_orphan_frags_rx(struct sk_buff *skb, gfp_t gfp_mask) { if (likely(!skb_zcopy(skb))) return 0; return skb_copy_ubufs(skb, gfp_mask); } /** * __skb_queue_purge_reason - empty a list * @list: list to empty * @reason: drop reason * * Delete all buffers on an &sk_buff list. Each buffer is removed from * the list and one reference dropped. This function does not take the * list lock and the caller must hold the relevant locks to use it. */ static inline void __skb_queue_purge_reason(struct sk_buff_head *list, enum skb_drop_reason reason) { struct sk_buff *skb; while ((skb = __skb_dequeue(list)) != NULL) kfree_skb_reason(skb, reason); } static inline void __skb_queue_purge(struct sk_buff_head *list) { __skb_queue_purge_reason(list, SKB_DROP_REASON_QUEUE_PURGE); } void skb_queue_purge_reason(struct sk_buff_head *list, enum skb_drop_reason reason); static inline void skb_queue_purge(struct sk_buff_head *list) { skb_queue_purge_reason(list, SKB_DROP_REASON_QUEUE_PURGE); } unsigned int skb_rbtree_purge(struct rb_root *root); void skb_errqueue_purge(struct sk_buff_head *list); void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask); /** * netdev_alloc_frag - allocate a page fragment * @fragsz: fragment size * * Allocates a frag from a page for receive buffer. * Uses GFP_ATOMIC allocations. */ static inline void *netdev_alloc_frag(unsigned int fragsz) { return __netdev_alloc_frag_align(fragsz, ~0u); } static inline void *netdev_alloc_frag_align(unsigned int fragsz, unsigned int align) { WARN_ON_ONCE(!is_power_of_2(align)); return __netdev_alloc_frag_align(fragsz, -align); } struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length, gfp_t gfp_mask); /** * netdev_alloc_skb - allocate an skbuff for rx on a specific device * @dev: network device to receive on * @length: length to allocate * * Allocate a new &sk_buff and assign it a usage count of one. The * buffer has unspecified headroom built in. Users should allocate * the headroom they think they need without accounting for the * built in space. The built in space is used for optimisations. * * %NULL is returned if there is no free memory. Although this function * allocates memory it can be called from an interrupt. */ static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev, unsigned int length) { return __netdev_alloc_skb(dev, length, GFP_ATOMIC); } /* legacy helper around __netdev_alloc_skb() */ static inline struct sk_buff *__dev_alloc_skb(unsigned int length, gfp_t gfp_mask) { return __netdev_alloc_skb(NULL, length, gfp_mask); } /* legacy helper around netdev_alloc_skb() */ static inline struct sk_buff *dev_alloc_skb(unsigned int length) { return netdev_alloc_skb(NULL, length); } static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev, unsigned int length, gfp_t gfp) { struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp); if (NET_IP_ALIGN && skb) skb_reserve(skb, NET_IP_ALIGN); return skb; } static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev, unsigned int length) { return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC); } static inline void skb_free_frag(void *addr) { page_frag_free(addr); } void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask); static inline void *napi_alloc_frag(unsigned int fragsz) { return __napi_alloc_frag_align(fragsz, ~0u); } static inline void *napi_alloc_frag_align(unsigned int fragsz, unsigned int align) { WARN_ON_ONCE(!is_power_of_2(align)); return __napi_alloc_frag_align(fragsz, -align); } struct sk_buff *napi_alloc_skb(struct napi_struct *napi, unsigned int length); void napi_consume_skb(struct sk_buff *skb, int budget); void napi_skb_free_stolen_head(struct sk_buff *skb); void __napi_kfree_skb(struct sk_buff *skb, enum skb_drop_reason reason); /** * __dev_alloc_pages - allocate page for network Rx * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx * @order: size of the allocation * * Allocate a new page. * * %NULL is returned if there is no free memory. */ static inline struct page *__dev_alloc_pages_noprof(gfp_t gfp_mask, unsigned int order) { /* This piece of code contains several assumptions. * 1. This is for device Rx, therefore a cold page is preferred. * 2. The expectation is the user wants a compound page. * 3. If requesting a order 0 page it will not be compound * due to the check to see if order has a value in prep_new_page * 4. __GFP_MEMALLOC is ignored if __GFP_NOMEMALLOC is set due to * code in gfp_to_alloc_flags that should be enforcing this. */ gfp_mask |= __GFP_COMP | __GFP_MEMALLOC; return alloc_pages_node_noprof(NUMA_NO_NODE, gfp_mask, order); } #define __dev_alloc_pages(...) alloc_hooks(__dev_alloc_pages_noprof(__VA_ARGS__)) /* * This specialized allocator has to be a macro for its allocations to be * accounted separately (to have a separate alloc_tag). */ #define dev_alloc_pages(_order) __dev_alloc_pages(GFP_ATOMIC | __GFP_NOWARN, _order) /** * __dev_alloc_page - allocate a page for network Rx * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx * * Allocate a new page. * * %NULL is returned if there is no free memory. */ static inline struct page *__dev_alloc_page_noprof(gfp_t gfp_mask) { return __dev_alloc_pages_noprof(gfp_mask, 0); } #define __dev_alloc_page(...) alloc_hooks(__dev_alloc_page_noprof(__VA_ARGS__)) /* * This specialized allocator has to be a macro for its allocations to be * accounted separately (to have a separate alloc_tag). */ #define dev_alloc_page() dev_alloc_pages(0) /** * dev_page_is_reusable - check whether a page can be reused for network Rx * @page: the page to test * * A page shouldn't be considered for reusing/recycling if it was allocated * under memory pressure or at a distant memory node. * * Returns: false if this page should be returned to page allocator, true * otherwise. */ static inline bool dev_page_is_reusable(const struct page *page) { return likely(page_to_nid(page) == numa_mem_id() && !page_is_pfmemalloc(page)); } /** * skb_propagate_pfmemalloc - Propagate pfmemalloc if skb is allocated after RX page * @page: The page that was allocated from skb_alloc_page * @skb: The skb that may need pfmemalloc set */ static inline void skb_propagate_pfmemalloc(const struct page *page, struct sk_buff *skb) { if (page_is_pfmemalloc(page)) skb->pfmemalloc = true; } /** * skb_frag_off() - Returns the offset of a skb fragment * @frag: the paged fragment */ static inline unsigned int skb_frag_off(const skb_frag_t *frag) { return frag->offset; } /** * skb_frag_off_add() - Increments the offset of a skb fragment by @delta * @frag: skb fragment * @delta: value to add */ static inline void skb_frag_off_add(skb_frag_t *frag, int delta) { frag->offset += delta; } /** * skb_frag_off_set() - Sets the offset of a skb fragment * @frag: skb fragment * @offset: offset of fragment */ static inline void skb_frag_off_set(skb_frag_t *frag, unsigned int offset) { frag->offset = offset; } /** * skb_frag_off_copy() - Sets the offset of a skb fragment from another fragment * @fragto: skb fragment where offset is set * @fragfrom: skb fragment offset is copied from */ static inline void skb_frag_off_copy(skb_frag_t *fragto, const skb_frag_t *fragfrom) { fragto->offset = fragfrom->offset; } /* Return: true if the skb_frag contains a net_iov. */ static inline bool skb_frag_is_net_iov(const skb_frag_t *frag) { return netmem_is_net_iov(frag->netmem); } /** * skb_frag_net_iov - retrieve the net_iov referred to by fragment * @frag: the fragment * * Return: the &struct net_iov associated with @frag. Returns NULL if this * frag has no associated net_iov. */ static inline struct net_iov *skb_frag_net_iov(const skb_frag_t *frag) { if (!skb_frag_is_net_iov(frag)) return NULL; return netmem_to_net_iov(frag->netmem); } /** * skb_frag_page - retrieve the page referred to by a paged fragment * @frag: the paged fragment * * Return: the &struct page associated with @frag. Returns NULL if this frag * has no associated page. */ static inline struct page *skb_frag_page(const skb_frag_t *frag) { if (skb_frag_is_net_iov(frag)) return NULL; return netmem_to_page(frag->netmem); } /** * skb_frag_netmem - retrieve the netmem referred to by a fragment * @frag: the fragment * * Return: the &netmem_ref associated with @frag. */ static inline netmem_ref skb_frag_netmem(const skb_frag_t *frag) { return frag->netmem; } int skb_pp_cow_data(struct page_pool *pool, struct sk_buff **pskb, unsigned int headroom); int skb_cow_data_for_xdp(struct page_pool *pool, struct sk_buff **pskb, const struct bpf_prog *prog); /** * skb_frag_address - gets the address of the data contained in a paged fragment * @frag: the paged fragment buffer * * Returns: the address of the data within @frag. The page must already * be mapped. */ static inline void *skb_frag_address(const skb_frag_t *frag) { if (!skb_frag_page(frag)) return NULL; return page_address(skb_frag_page(frag)) + skb_frag_off(frag); } /** * skb_frag_address_safe - gets the address of the data contained in a paged fragment * @frag: the paged fragment buffer * * Returns: the address of the data within @frag. Checks that the page * is mapped and returns %NULL otherwise. */ static inline void *skb_frag_address_safe(const skb_frag_t *frag) { void *ptr = page_address(skb_frag_page(frag)); if (unlikely(!ptr)) return NULL; return ptr + skb_frag_off(frag); } /** * skb_frag_page_copy() - sets the page in a fragment from another fragment * @fragto: skb fragment where page is set * @fragfrom: skb fragment page is copied from */ static inline void skb_frag_page_copy(skb_frag_t *fragto, const skb_frag_t *fragfrom) { fragto->netmem = fragfrom->netmem; } bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio); /** * __skb_frag_dma_map - maps a paged fragment via the DMA API * @dev: the device to map the fragment to * @frag: the paged fragment to map * @offset: the offset within the fragment (starting at the * fragment's own offset) * @size: the number of bytes to map * @dir: the direction of the mapping (``PCI_DMA_*``) * * Maps the page associated with @frag to @device. */ static inline dma_addr_t __skb_frag_dma_map(struct device *dev, const skb_frag_t *frag, size_t offset, size_t size, enum dma_data_direction dir) { return dma_map_page(dev, skb_frag_page(frag), skb_frag_off(frag) + offset, size, dir); } #define skb_frag_dma_map(dev, frag, ...) \ CONCATENATE(_skb_frag_dma_map, \ COUNT_ARGS(__VA_ARGS__))(dev, frag, ##__VA_ARGS__) #define __skb_frag_dma_map1(dev, frag, offset, uf, uo) ({ \ const skb_frag_t *uf = (frag); \ size_t uo = (offset); \ \ __skb_frag_dma_map(dev, uf, uo, skb_frag_size(uf) - uo, \ DMA_TO_DEVICE); \ }) #define _skb_frag_dma_map1(dev, frag, offset) \ __skb_frag_dma_map1(dev, frag, offset, __UNIQUE_ID(frag_), \ __UNIQUE_ID(offset_)) #define _skb_frag_dma_map0(dev, frag) \ _skb_frag_dma_map1(dev, frag, 0) #define _skb_frag_dma_map2(dev, frag, offset, size) \ __skb_frag_dma_map(dev, frag, offset, size, DMA_TO_DEVICE) #define _skb_frag_dma_map3(dev, frag, offset, size, dir) \ __skb_frag_dma_map(dev, frag, offset, size, dir) static inline struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask) { return __pskb_copy(skb, skb_headroom(skb), gfp_mask); } static inline struct sk_buff *pskb_copy_for_clone(struct sk_buff *skb, gfp_t gfp_mask) { return __pskb_copy_fclone(skb, skb_headroom(skb), gfp_mask, true); } /** * skb_clone_writable - is the header of a clone writable * @skb: buffer to check * @len: length up to which to write * * Returns true if modifying the header part of the cloned buffer * does not requires the data to be copied. */ static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len) { return !skb_header_cloned(skb) && skb_headroom(skb) + len <= skb->hdr_len; } static inline int skb_try_make_writable(struct sk_buff *skb, unsigned int write_len) { return skb_cloned(skb) && !skb_clone_writable(skb, write_len) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC); } static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom, int cloned) { int delta = 0; if (headroom > skb_headroom(skb)) delta = headroom - skb_headroom(skb); if (delta || cloned) return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0, GFP_ATOMIC); return 0; } /** * skb_cow - copy header of skb when it is required * @skb: buffer to cow * @headroom: needed headroom * * If the skb passed lacks sufficient headroom or its data part * is shared, data is reallocated. If reallocation fails, an error * is returned and original skb is not changed. * * The result is skb with writable area skb->head...skb->tail * and at least @headroom of space at head. */ static inline int skb_cow(struct sk_buff *skb, unsigned int headroom) { return __skb_cow(skb, headroom, skb_cloned(skb)); } /** * skb_cow_head - skb_cow but only making the head writable * @skb: buffer to cow * @headroom: needed headroom * * This function is identical to skb_cow except that we replace the * skb_cloned check by skb_header_cloned. It should be used when * you only need to push on some header and do not need to modify * the data. */ static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom) { return __skb_cow(skb, headroom, skb_header_cloned(skb)); } /** * skb_padto - pad an skbuff up to a minimal size * @skb: buffer to pad * @len: minimal length * * Pads up a buffer to ensure the trailing bytes exist and are * blanked. If the buffer already contains sufficient data it * is untouched. Otherwise it is extended. Returns zero on * success. The skb is freed on error. */ static inline int skb_padto(struct sk_buff *skb, unsigned int len) { unsigned int size = skb->len; if (likely(size >= len)) return 0; return skb_pad(skb, len - size); } /** * __skb_put_padto - increase size and pad an skbuff up to a minimal size * @skb: buffer to pad * @len: minimal length * @free_on_error: free buffer on error * * Pads up a buffer to ensure the trailing bytes exist and are * blanked. If the buffer already contains sufficient data it * is untouched. Otherwise it is extended. Returns zero on * success. The skb is freed on error if @free_on_error is true. */ static inline int __must_check __skb_put_padto(struct sk_buff *skb, unsigned int len, bool free_on_error) { unsigned int size = skb->len; if (unlikely(size < len)) { len -= size; if (__skb_pad(skb, len, free_on_error)) return -ENOMEM; __skb_put(skb, len); } return 0; } /** * skb_put_padto - increase size and pad an skbuff up to a minimal size * @skb: buffer to pad * @len: minimal length * * Pads up a buffer to ensure the trailing bytes exist and are * blanked. If the buffer already contains sufficient data it * is untouched. Otherwise it is extended. Returns zero on * success. The skb is freed on error. */ static inline int __must_check skb_put_padto(struct sk_buff *skb, unsigned int len) { return __skb_put_padto(skb, len, true); } bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i) __must_check; static inline int skb_add_data(struct sk_buff *skb, struct iov_iter *from, int copy) { const int off = skb->len; if (skb->ip_summed == CHECKSUM_NONE) { __wsum csum = 0; if (csum_and_copy_from_iter_full(skb_put(skb, copy), copy, &csum, from)) { skb->csum = csum_block_add(skb->csum, csum, off); return 0; } } else if (copy_from_iter_full(skb_put(skb, copy), copy, from)) return 0; __skb_trim(skb, off); return -EFAULT; } static inline bool skb_can_coalesce(struct sk_buff *skb, int i, const struct page *page, int off) { if (skb_zcopy(skb)) return false; if (i) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; return page == skb_frag_page(frag) && off == skb_frag_off(frag) + skb_frag_size(frag); } return false; } static inline int __skb_linearize(struct sk_buff *skb) { return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM; } /** * skb_linearize - convert paged skb to linear one * @skb: buffer to linarize * * If there is no free memory -ENOMEM is returned, otherwise zero * is returned and the old skb data released. */ static inline int skb_linearize(struct sk_buff *skb) { return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0; } /** * skb_has_shared_frag - can any frag be overwritten * @skb: buffer to test * * Return: true if the skb has at least one frag that might be modified * by an external entity (as in vmsplice()/sendfile()) */ static inline bool skb_has_shared_frag(const struct sk_buff *skb) { return skb_is_nonlinear(skb) && skb_shinfo(skb)->flags & SKBFL_SHARED_FRAG; } /** * skb_linearize_cow - make sure skb is linear and writable * @skb: buffer to process * * If there is no free memory -ENOMEM is returned, otherwise zero * is returned and the old skb data released. */ static inline int skb_linearize_cow(struct sk_buff *skb) { return skb_is_nonlinear(skb) || skb_cloned(skb) ? __skb_linearize(skb) : 0; } static __always_inline void __skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len, unsigned int off) { if (skb->ip_summed == CHECKSUM_COMPLETE) skb->csum = csum_block_sub(skb->csum, csum_partial(start, len, 0), off); else if (skb->ip_summed == CHECKSUM_PARTIAL && skb_checksum_start_offset(skb) < 0) skb->ip_summed = CHECKSUM_NONE; } /** * skb_postpull_rcsum - update checksum for received skb after pull * @skb: buffer to update * @start: start of data before pull * @len: length of data pulled * * After doing a pull on a received packet, you need to call this to * update the CHECKSUM_COMPLETE checksum, or set ip_summed to * CHECKSUM_NONE so that it can be recomputed from scratch. */ static inline void skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len) { if (skb->ip_summed == CHECKSUM_COMPLETE) skb->csum = wsum_negate(csum_partial(start, len, wsum_negate(skb->csum))); else if (skb->ip_summed == CHECKSUM_PARTIAL && skb_checksum_start_offset(skb) < 0) skb->ip_summed = CHECKSUM_NONE; } static __always_inline void __skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len, unsigned int off) { if (skb->ip_summed == CHECKSUM_COMPLETE) skb->csum = csum_block_add(skb->csum, csum_partial(start, len, 0), off); } /** * skb_postpush_rcsum - update checksum for received skb after push * @skb: buffer to update * @start: start of data after push * @len: length of data pushed * * After doing a push on a received packet, you need to call this to * update the CHECKSUM_COMPLETE checksum. */ static inline void skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len) { __skb_postpush_rcsum(skb, start, len, 0); } void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); /** * skb_push_rcsum - push skb and update receive checksum * @skb: buffer to update * @len: length of data pulled * * This function performs an skb_push on the packet and updates * the CHECKSUM_COMPLETE checksum. It should be used on * receive path processing instead of skb_push unless you know * that the checksum difference is zero (e.g., a valid IP header) * or you are setting ip_summed to CHECKSUM_NONE. */ static inline void *skb_push_rcsum(struct sk_buff *skb, unsigned int len) { skb_push(skb, len); skb_postpush_rcsum(skb, skb->data, len); return skb->data; } int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len); /** * pskb_trim_rcsum - trim received skb and update checksum * @skb: buffer to trim * @len: new length * * This is exactly the same as pskb_trim except that it ensures the * checksum of received packets are still valid after the operation. * It can change skb pointers. */ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) { skb_might_realloc(skb); if (likely(len >= skb->len)) return 0; return pskb_trim_rcsum_slow(skb, len); } static inline int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len) { if (skb->ip_summed == CHECKSUM_COMPLETE) skb->ip_summed = CHECKSUM_NONE; __skb_trim(skb, len); return 0; } static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len) { if (skb->ip_summed == CHECKSUM_COMPLETE) skb->ip_summed = CHECKSUM_NONE; return __skb_grow(skb, len); } #define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode) #define skb_rb_first(root) rb_to_skb(rb_first(root)) #define skb_rb_last(root) rb_to_skb(rb_last(root)) #define skb_rb_next(skb) rb_to_skb(rb_next(&(skb)->rbnode)) #define skb_rb_prev(skb) rb_to_skb(rb_prev(&(skb)->rbnode)) #define skb_queue_walk(queue, skb) \ for (skb = (queue)->next; \ skb != (struct sk_buff *)(queue); \ skb = skb->next) #define skb_queue_walk_safe(queue, skb, tmp) \ for (skb = (queue)->next, tmp = skb->next; \ skb != (struct sk_buff *)(queue); \ skb = tmp, tmp = skb->next) #define skb_queue_walk_from(queue, skb) \ for (; skb != (struct sk_buff *)(queue); \ skb = skb->next) #define skb_rbtree_walk(skb, root) \ for (skb = skb_rb_first(root); skb != NULL; \ skb = skb_rb_next(skb)) #define skb_rbtree_walk_from(skb) \ for (; skb != NULL; \ skb = skb_rb_next(skb)) #define skb_rbtree_walk_from_safe(skb, tmp) \ for (; tmp = skb ? skb_rb_next(skb) : NULL, (skb != NULL); \ skb = tmp) #define skb_queue_walk_from_safe(queue, skb, tmp) \ for (tmp = skb->next; \ skb != (struct sk_buff *)(queue); \ skb = tmp, tmp = skb->next) #define skb_queue_reverse_walk(queue, skb) \ for (skb = (queue)->prev; \ skb != (struct sk_buff *)(queue); \ skb = skb->prev) #define skb_queue_reverse_walk_safe(queue, skb, tmp) \ for (skb = (queue)->prev, tmp = skb->prev; \ skb != (struct sk_buff *)(queue); \ skb = tmp, tmp = skb->prev) #define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \ for (tmp = skb->prev; \ skb != (struct sk_buff *)(queue); \ skb = tmp, tmp = skb->prev) static inline bool skb_has_frag_list(const struct sk_buff *skb) { return skb_shinfo(skb)->frag_list != NULL; } static inline void skb_frag_list_init(struct sk_buff *skb) { skb_shinfo(skb)->frag_list = NULL; } #define skb_walk_frags(skb, iter) \ for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next) int __skb_wait_for_more_packets(struct sock *sk, struct sk_buff_head *queue, int *err, long *timeo_p, const struct sk_buff *skb); struct sk_buff *__skb_try_recv_from_queue(struct sock *sk, struct sk_buff_head *queue, unsigned int flags, int *off, int *err, struct sk_buff **last); struct sk_buff *__skb_try_recv_datagram(struct sock *sk, struct sk_buff_head *queue, unsigned int flags, int *off, int *err, struct sk_buff **last); struct sk_buff *__skb_recv_datagram(struct sock *sk, struct sk_buff_head *sk_queue, unsigned int flags, int *off, int *err); struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned int flags, int *err); __poll_t datagram_poll(struct file *file, struct socket *sock, struct poll_table_struct *wait); int skb_copy_datagram_iter(const struct sk_buff *from, int offset, struct iov_iter *to, int size); static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset, struct msghdr *msg, int size) { return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size); } int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen, struct msghdr *msg); int skb_copy_and_hash_datagram_iter(const struct sk_buff *skb, int offset, struct iov_iter *to, int len, struct ahash_request *hash); int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset, struct iov_iter *from, int len); int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm); void skb_free_datagram(struct sock *sk, struct sk_buff *skb); int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags); int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len); int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len); __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to, int len); int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset, struct pipe_inode_info *pipe, unsigned int len, unsigned int flags); int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset, int len); int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len); void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); unsigned int skb_zerocopy_headlen(const struct sk_buff *from); int skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen); void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len); int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen); void skb_scrub_packet(struct sk_buff *skb, bool xnet); struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features); struct sk_buff *skb_segment_list(struct sk_buff *skb, netdev_features_t features, unsigned int offset); struct sk_buff *skb_vlan_untag(struct sk_buff *skb); int skb_ensure_writable(struct sk_buff *skb, unsigned int write_len); int skb_ensure_writable_head_tail(struct sk_buff *skb, struct net_device *dev); int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci); int skb_vlan_pop(struct sk_buff *skb); int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci); int skb_eth_pop(struct sk_buff *skb); int skb_eth_push(struct sk_buff *skb, const unsigned char *dst, const unsigned char *src); int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto, int mac_len, bool ethernet); int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len, bool ethernet); int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse); int skb_mpls_dec_ttl(struct sk_buff *skb); struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy, gfp_t gfp); static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len) { return copy_from_iter_full(data, len, &msg->msg_iter) ? 0 : -EFAULT; } static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len) { return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT; } struct skb_checksum_ops { __wsum (*update)(const void *mem, int len, __wsum wsum); __wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len); }; extern const struct skb_checksum_ops *crc32c_csum_stub __read_mostly; __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, __wsum csum, const struct skb_checksum_ops *ops); __wsum skb_checksum(const struct sk_buff *skb, int offset, int len, __wsum csum); static inline void * __must_check __skb_header_pointer(const struct sk_buff *skb, int offset, int len, const void *data, int hlen, void *buffer) { if (likely(hlen - offset >= len)) return (void *)data + offset; if (!skb || unlikely(skb_copy_bits(skb, offset, buffer, len) < 0)) return NULL; return buffer; } static inline void * __must_check skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer) { return __skb_header_pointer(skb, offset, len, skb->data, skb_headlen(skb), buffer); } static inline void * __must_check skb_pointer_if_linear(const struct sk_buff *skb, int offset, int len) { if (likely(skb_headlen(skb) - offset >= len)) return skb->data + offset; return NULL; } /** * skb_needs_linearize - check if we need to linearize a given skb * depending on the given device features. * @skb: socket buffer to check * @features: net device features * * Returns true if either: * 1. skb has frag_list and the device doesn't support FRAGLIST, or * 2. skb is fragmented and the device does not support SG. */ static inline bool skb_needs_linearize(struct sk_buff *skb, netdev_features_t features) { return skb_is_nonlinear(skb) && ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) || (skb_shinfo(skb)->nr_frags && !(features & NETIF_F_SG))); } static inline void skb_copy_from_linear_data(const struct sk_buff *skb, void *to, const unsigned int len) { memcpy(to, skb->data, len); } static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb, const int offset, void *to, const unsigned int len) { memcpy(to, skb->data + offset, len); } static inline void skb_copy_to_linear_data(struct sk_buff *skb, const void *from, const unsigned int len) { memcpy(skb->data, from, len); } static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb, const int offset, const void *from, const unsigned int len) { memcpy(skb->data + offset, from, len); } void skb_init(void); static inline ktime_t skb_get_ktime(const struct sk_buff *skb) { return skb->tstamp; } /** * skb_get_timestamp - get timestamp from a skb * @skb: skb to get stamp from * @stamp: pointer to struct __kernel_old_timeval to store stamp in * * Timestamps are stored in the skb as offsets to a base timestamp. * This function converts the offset back to a struct timeval and stores * it in stamp. */ static inline void skb_get_timestamp(const struct sk_buff *skb, struct __kernel_old_timeval *stamp) { *stamp = ns_to_kernel_old_timeval(skb->tstamp); } static inline void skb_get_new_timestamp(const struct sk_buff *skb, struct __kernel_sock_timeval *stamp) { struct timespec64 ts = ktime_to_timespec64(skb->tstamp); stamp->tv_sec = ts.tv_sec; stamp->tv_usec = ts.tv_nsec / 1000; } static inline void skb_get_timestampns(const struct sk_buff *skb, struct __kernel_old_timespec *stamp) { struct timespec64 ts = ktime_to_timespec64(skb->tstamp); stamp->tv_sec = ts.tv_sec; stamp->tv_nsec = ts.tv_nsec; } static inline void skb_get_new_timestampns(const struct sk_buff *skb, struct __kernel_timespec *stamp) { struct timespec64 ts = ktime_to_timespec64(skb->tstamp); stamp->tv_sec = ts.tv_sec; stamp->tv_nsec = ts.tv_nsec; } static inline void __net_timestamp(struct sk_buff *skb) { skb->tstamp = ktime_get_real(); skb->tstamp_type = SKB_CLOCK_REALTIME; } static inline ktime_t net_timedelta(ktime_t t) { return ktime_sub(ktime_get_real(), t); } static inline void skb_set_delivery_time(struct sk_buff *skb, ktime_t kt, u8 tstamp_type) { skb->tstamp = kt; if (kt) skb->tstamp_type = tstamp_type; else skb->tstamp_type = SKB_CLOCK_REALTIME; } static inline void skb_set_delivery_type_by_clockid(struct sk_buff *skb, ktime_t kt, clockid_t clockid) { u8 tstamp_type = SKB_CLOCK_REALTIME; switch (clockid) { case CLOCK_REALTIME: break; case CLOCK_MONOTONIC: tstamp_type = SKB_CLOCK_MONOTONIC; break; case CLOCK_TAI: tstamp_type = SKB_CLOCK_TAI; break; default: WARN_ON_ONCE(1); kt = 0; } skb_set_delivery_time(skb, kt, tstamp_type); } DECLARE_STATIC_KEY_FALSE(netstamp_needed_key); /* It is used in the ingress path to clear the delivery_time. * If needed, set the skb->tstamp to the (rcv) timestamp. */ static inline void skb_clear_delivery_time(struct sk_buff *skb) { if (skb->tstamp_type) { skb->tstamp_type = SKB_CLOCK_REALTIME; if (static_branch_unlikely(&netstamp_needed_key)) skb->tstamp = ktime_get_real(); else skb->tstamp = 0; } } static inline void skb_clear_tstamp(struct sk_buff *skb) { if (skb->tstamp_type) return; skb->tstamp = 0; } static inline ktime_t skb_tstamp(const struct sk_buff *skb) { if (skb->tstamp_type) return 0; return skb->tstamp; } static inline ktime_t skb_tstamp_cond(const struct sk_buff *skb, bool cond) { if (skb->tstamp_type != SKB_CLOCK_MONOTONIC && skb->tstamp) return skb->tstamp; if (static_branch_unlikely(&netstamp_needed_key) || cond) return ktime_get_real(); return 0; } static inline u8 skb_metadata_len(const struct sk_buff *skb) { return skb_shinfo(skb)->meta_len; } static inline void *skb_metadata_end(const struct sk_buff *skb) { return skb_mac_header(skb); } static inline bool __skb_metadata_differs(const struct sk_buff *skb_a, const struct sk_buff *skb_b, u8 meta_len) { const void *a = skb_metadata_end(skb_a); const void *b = skb_metadata_end(skb_b); u64 diffs = 0; if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || BITS_PER_LONG != 64) goto slow; /* Using more efficient variant than plain call to memcmp(). */ switch (meta_len) { #define __it(x, op) (x -= sizeof(u##op)) #define __it_diff(a, b, op) (*(u##op *)__it(a, op)) ^ (*(u##op *)__it(b, op)) case 32: diffs |= __it_diff(a, b, 64); fallthrough; case 24: diffs |= __it_diff(a, b, 64); fallthrough; case 16: diffs |= __it_diff(a, b, 64); fallthrough; case 8: diffs |= __it_diff(a, b, 64); break; case 28: diffs |= __it_diff(a, b, 64); fallthrough; case 20: diffs |= __it_diff(a, b, 64); fallthrough; case 12: diffs |= __it_diff(a, b, 64); fallthrough; case 4: diffs |= __it_diff(a, b, 32); break; default: slow: return memcmp(a - meta_len, b - meta_len, meta_len); } return diffs; } static inline bool skb_metadata_differs(const struct sk_buff *skb_a, const struct sk_buff *skb_b) { u8 len_a = skb_metadata_len(skb_a); u8 len_b = skb_metadata_len(skb_b); if (!(len_a | len_b)) return false; return len_a != len_b ? true : __skb_metadata_differs(skb_a, skb_b, len_a); } static inline void skb_metadata_set(struct sk_buff *skb, u8 meta_len) { skb_shinfo(skb)->meta_len = meta_len; } static inline void skb_metadata_clear(struct sk_buff *skb) { skb_metadata_set(skb, 0); } struct sk_buff *skb_clone_sk(struct sk_buff *skb); #ifdef CONFIG_NETWORK_PHY_TIMESTAMPING void skb_clone_tx_timestamp(struct sk_buff *skb); bool skb_defer_rx_timestamp(struct sk_buff *skb); #else /* CONFIG_NETWORK_PHY_TIMESTAMPING */ static inline void skb_clone_tx_timestamp(struct sk_buff *skb) { } static inline bool skb_defer_rx_timestamp(struct sk_buff *skb) { return false; } #endif /* !CONFIG_NETWORK_PHY_TIMESTAMPING */ /** * skb_complete_tx_timestamp() - deliver cloned skb with tx timestamps * * PHY drivers may accept clones of transmitted packets for * timestamping via their phy_driver.txtstamp method. These drivers * must call this function to return the skb back to the stack with a * timestamp. * * @skb: clone of the original outgoing packet * @hwtstamps: hardware time stamps * */ void skb_complete_tx_timestamp(struct sk_buff *skb, struct skb_shared_hwtstamps *hwtstamps); void __skb_tstamp_tx(struct sk_buff *orig_skb, const struct sk_buff *ack_skb, struct skb_shared_hwtstamps *hwtstamps, struct sock *sk, int tstype); /** * skb_tstamp_tx - queue clone of skb with send time stamps * @orig_skb: the original outgoing packet * @hwtstamps: hardware time stamps, may be NULL if not available * * If the skb has a socket associated, then this function clones the * skb (thus sharing the actual data and optional structures), stores * the optional hardware time stamping information (if non NULL) or * generates a software time stamp (otherwise), then queues the clone * to the error queue of the socket. Errors are silently ignored. */ void skb_tstamp_tx(struct sk_buff *orig_skb, struct skb_shared_hwtstamps *hwtstamps); /** * skb_tx_timestamp() - Driver hook for transmit timestamping * * Ethernet MAC Drivers should call this function in their hard_xmit() * function immediately before giving the sk_buff to the MAC hardware. * * Specifically, one should make absolutely sure that this function is * called before TX completion of this packet can trigger. Otherwise * the packet could potentially already be freed. * * @skb: A socket buffer. */ static inline void skb_tx_timestamp(struct sk_buff *skb) { skb_clone_tx_timestamp(skb); if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP) skb_tstamp_tx(skb, NULL); } /** * skb_complete_wifi_ack - deliver skb with wifi status * * @skb: the original outgoing packet * @acked: ack status * */ void skb_complete_wifi_ack(struct sk_buff *skb, bool acked); __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len); __sum16 __skb_checksum_complete(struct sk_buff *skb); static inline int skb_csum_unnecessary(const struct sk_buff *skb) { return ((skb->ip_summed == CHECKSUM_UNNECESSARY) || skb->csum_valid || (skb->ip_summed == CHECKSUM_PARTIAL && skb_checksum_start_offset(skb) >= 0)); } /** * skb_checksum_complete - Calculate checksum of an entire packet * @skb: packet to process * * This function calculates the checksum over the entire packet plus * the value of skb->csum. The latter can be used to supply the * checksum of a pseudo header as used by TCP/UDP. It returns the * checksum. * * For protocols that contain complete checksums such as ICMP/TCP/UDP, * this function can be used to verify that checksum on received * packets. In that case the function should return zero if the * checksum is correct. In particular, this function will return zero * if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the * hardware has already verified the correctness of the checksum. */ static inline __sum16 skb_checksum_complete(struct sk_buff *skb) { return skb_csum_unnecessary(skb) ? 0 : __skb_checksum_complete(skb); } static inline void __skb_decr_checksum_unnecessary(struct sk_buff *skb) { if (skb->ip_summed == CHECKSUM_UNNECESSARY) { if (skb->csum_level == 0) skb->ip_summed = CHECKSUM_NONE; else skb->csum_level--; } } static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb) { if (skb->ip_summed == CHECKSUM_UNNECESSARY) { if (skb->csum_level < SKB_MAX_CSUM_LEVEL) skb->csum_level++; } else if (skb->ip_summed == CHECKSUM_NONE) { skb->ip_summed = CHECKSUM_UNNECESSARY; skb->csum_level = 0; } } static inline void __skb_reset_checksum_unnecessary(struct sk_buff *skb) { if (skb->ip_summed == CHECKSUM_UNNECESSARY) { skb->ip_summed = CHECKSUM_NONE; skb->csum_level = 0; } } /* Check if we need to perform checksum complete validation. * * Returns: true if checksum complete is needed, false otherwise * (either checksum is unnecessary or zero checksum is allowed). */ static inline bool __skb_checksum_validate_needed(struct sk_buff *skb, bool zero_okay, __sum16 check) { if (skb_csum_unnecessary(skb) || (zero_okay && !check)) { skb->csum_valid = 1; __skb_decr_checksum_unnecessary(skb); return false; } return true; } /* For small packets <= CHECKSUM_BREAK perform checksum complete directly * in checksum_init. */ #define CHECKSUM_BREAK 76 /* Unset checksum-complete * * Unset checksum complete can be done when packet is being modified * (uncompressed for instance) and checksum-complete value is * invalidated. */ static inline void skb_checksum_complete_unset(struct sk_buff *skb) { if (skb->ip_summed == CHECKSUM_COMPLETE) skb->ip_summed = CHECKSUM_NONE; } /* Validate (init) checksum based on checksum complete. * * Return values: * 0: checksum is validated or try to in skb_checksum_complete. In the latter * case the ip_summed will not be CHECKSUM_UNNECESSARY and the pseudo * checksum is stored in skb->csum for use in __skb_checksum_complete * non-zero: value of invalid checksum * */ static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb, bool complete, __wsum psum) { if (skb->ip_summed == CHECKSUM_COMPLETE) { if (!csum_fold(csum_add(psum, skb->csum))) { skb->csum_valid = 1; return 0; } } skb->csum = psum; if (complete || skb->len <= CHECKSUM_BREAK) { __sum16 csum; csum = __skb_checksum_complete(skb); skb->csum_valid = !csum; return csum; } return 0; } static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto) { return 0; } /* Perform checksum validate (init). Note that this is a macro since we only * want to calculate the pseudo header which is an input function if necessary. * First we try to validate without any computation (checksum unnecessary) and * then calculate based on checksum complete calling the function to compute * pseudo header. * * Return values: * 0: checksum is validated or try to in skb_checksum_complete * non-zero: value of invalid checksum */ #define __skb_checksum_validate(skb, proto, complete, \ zero_okay, check, compute_pseudo) \ ({ \ __sum16 __ret = 0; \ skb->csum_valid = 0; \ if (__skb_checksum_validate_needed(skb, zero_okay, check)) \ __ret = __skb_checksum_validate_complete(skb, \ complete, compute_pseudo(skb, proto)); \ __ret; \ }) #define skb_checksum_init(skb, proto, compute_pseudo) \ __skb_checksum_validate(skb, proto, false, false, 0, compute_pseudo) #define skb_checksum_init_zero_check(skb, proto, check, compute_pseudo) \ __skb_checksum_validate(skb, proto, false, true, check, compute_pseudo) #define skb_checksum_validate(skb, proto, compute_pseudo) \ __skb_checksum_validate(skb, proto, true, false, 0, compute_pseudo) #define skb_checksum_validate_zero_check(skb, proto, check, \ compute_pseudo) \ __skb_checksum_validate(skb, proto, true, true, check, compute_pseudo) #define skb_checksum_simple_validate(skb) \ __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo) static inline bool __skb_checksum_convert_check(struct sk_buff *skb) { return (skb->ip_summed == CHECKSUM_NONE && skb->csum_valid); } static inline void __skb_checksum_convert(struct sk_buff *skb, __wsum pseudo) { skb->csum = ~pseudo; skb->ip_summed = CHECKSUM_COMPLETE; } #define skb_checksum_try_convert(skb, proto, compute_pseudo) \ do { \ if (__skb_checksum_convert_check(skb)) \ __skb_checksum_convert(skb, compute_pseudo(skb, proto)); \ } while (0) static inline void skb_remcsum_adjust_partial(struct sk_buff *skb, void *ptr, u16 start, u16 offset) { skb->ip_summed = CHECKSUM_PARTIAL; skb->csum_start = ((unsigned char *)ptr + start) - skb->head; skb->csum_offset = offset - start; } /* Update skbuf and packet to reflect the remote checksum offload operation. * When called, ptr indicates the starting point for skb->csum when * ip_summed is CHECKSUM_COMPLETE. If we need create checksum complete * here, skb_postpull_rcsum is done so skb->csum start is ptr. */ static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr, int start, int offset, bool nopartial) { __wsum delta; if (!nopartial) { skb_remcsum_adjust_partial(skb, ptr, start, offset); return; } if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) { __skb_checksum_complete(skb); skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data); } delta = remcsum_adjust(ptr, skb->csum, start, offset); /* Adjust skb->csum since we changed the packet */ skb->csum = csum_add(skb->csum, delta); } static inline struct nf_conntrack *skb_nfct(const struct sk_buff *skb) { #if IS_ENABLED(CONFIG_NF_CONNTRACK) return (void *)(skb->_nfct & NFCT_PTRMASK); #else return NULL; #endif } static inline unsigned long skb_get_nfct(const struct sk_buff *skb) { #if IS_ENABLED(CONFIG_NF_CONNTRACK) return skb->_nfct; #else return 0UL; #endif } static inline void skb_set_nfct(struct sk_buff *skb, unsigned long nfct) { #if IS_ENABLED(CONFIG_NF_CONNTRACK) skb->slow_gro |= !!nfct; skb->_nfct = nfct; #endif } #ifdef CONFIG_SKB_EXTENSIONS enum skb_ext_id { #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) SKB_EXT_BRIDGE_NF, #endif #ifdef CONFIG_XFRM SKB_EXT_SEC_PATH, #endif #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) TC_SKB_EXT, #endif #if IS_ENABLED(CONFIG_MPTCP) SKB_EXT_MPTCP, #endif #if IS_ENABLED(CONFIG_MCTP_FLOWS) SKB_EXT_MCTP, #endif SKB_EXT_NUM, /* must be last */ }; /** * struct skb_ext - sk_buff extensions * @refcnt: 1 on allocation, deallocated on 0 * @offset: offset to add to @data to obtain extension address * @chunks: size currently allocated, stored in SKB_EXT_ALIGN_SHIFT units * @data: start of extension data, variable sized * * Note: offsets/lengths are stored in chunks of 8 bytes, this allows * to use 'u8' types while allowing up to 2kb worth of extension data. */ struct skb_ext { refcount_t refcnt; u8 offset[SKB_EXT_NUM]; /* in chunks of 8 bytes */ u8 chunks; /* same */ char data[] __aligned(8); }; struct skb_ext *__skb_ext_alloc(gfp_t flags); void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id, struct skb_ext *ext); void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id); void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id); void __skb_ext_put(struct skb_ext *ext); static inline void skb_ext_put(struct sk_buff *skb) { if (skb->active_extensions) __skb_ext_put(skb->extensions); } static inline void __skb_ext_copy(struct sk_buff *dst, const struct sk_buff *src) { dst->active_extensions = src->active_extensions; if (src->active_extensions) { struct skb_ext *ext = src->extensions; refcount_inc(&ext->refcnt); dst->extensions = ext; } } static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *src) { skb_ext_put(dst); __skb_ext_copy(dst, src); } static inline bool __skb_ext_exist(const struct skb_ext *ext, enum skb_ext_id i) { return !!ext->offset[i]; } static inline bool skb_ext_exist(const struct sk_buff *skb, enum skb_ext_id id) { return skb->active_extensions & (1 << id); } static inline void skb_ext_del(struct sk_buff *skb, enum skb_ext_id id) { if (skb_ext_exist(skb, id)) __skb_ext_del(skb, id); } static inline void *skb_ext_find(const struct sk_buff *skb, enum skb_ext_id id) { if (skb_ext_exist(skb, id)) { struct skb_ext *ext = skb->extensions; return (void *)ext + (ext->offset[id] << 3); } return NULL; } static inline void skb_ext_reset(struct sk_buff *skb) { if (unlikely(skb->active_extensions)) { __skb_ext_put(skb->extensions); skb->active_extensions = 0; } } static inline bool skb_has_extensions(struct sk_buff *skb) { return unlikely(skb->active_extensions); } #else static inline void skb_ext_put(struct sk_buff *skb) {} static inline void skb_ext_reset(struct sk_buff *skb) {} static inline void skb_ext_del(struct sk_buff *skb, int unused) {} static inline void __skb_ext_copy(struct sk_buff *d, const struct sk_buff *s) {} static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *s) {} static inline bool skb_has_extensions(struct sk_buff *skb) { return false; } #endif /* CONFIG_SKB_EXTENSIONS */ static inline void nf_reset_ct(struct sk_buff *skb) { #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) nf_conntrack_put(skb_nfct(skb)); skb->_nfct = 0; #endif } static inline void nf_reset_trace(struct sk_buff *skb) { #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || IS_ENABLED(CONFIG_NF_TABLES) skb->nf_trace = 0; #endif } static inline void ipvs_reset(struct sk_buff *skb) { #if IS_ENABLED(CONFIG_IP_VS) skb->ipvs_property = 0; #endif } /* Note: This doesn't put any conntrack info in dst. */ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src, bool copy) { #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) dst->_nfct = src->_nfct; nf_conntrack_get(skb_nfct(src)); #endif #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || IS_ENABLED(CONFIG_NF_TABLES) if (copy) dst->nf_trace = src->nf_trace; #endif } static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src) { #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) nf_conntrack_put(skb_nfct(dst)); #endif dst->slow_gro = src->slow_gro; __nf_copy(dst, src, true); } #ifdef CONFIG_NETWORK_SECMARK static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from) { to->secmark = from->secmark; } static inline void skb_init_secmark(struct sk_buff *skb) { skb->secmark = 0; } #else static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from) { } static inline void skb_init_secmark(struct sk_buff *skb) { } #endif static inline int secpath_exists(const struct sk_buff *skb) { #ifdef CONFIG_XFRM return skb_ext_exist(skb, SKB_EXT_SEC_PATH); #else return 0; #endif } static inline bool skb_irq_freeable(const struct sk_buff *skb) { return !skb->destructor && !secpath_exists(skb) && !skb_nfct(skb) && !skb->_skb_refdst && !skb_has_frag_list(skb); } static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping) { skb->queue_mapping = queue_mapping; } static inline u16 skb_get_queue_mapping(const struct sk_buff *skb) { return skb->queue_mapping; } static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from) { to->queue_mapping = from->queue_mapping; } static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue) { skb->queue_mapping = rx_queue + 1; } static inline u16 skb_get_rx_queue(const struct sk_buff *skb) { return skb->queue_mapping - 1; } static inline bool skb_rx_queue_recorded(const struct sk_buff *skb) { return skb->queue_mapping != 0; } static inline void skb_set_dst_pending_confirm(struct sk_buff *skb, u32 val) { skb->dst_pending_confirm = val; } static inline bool skb_get_dst_pending_confirm(const struct sk_buff *skb) { return skb->dst_pending_confirm != 0; } static inline struct sec_path *skb_sec_path(const struct sk_buff *skb) { #ifdef CONFIG_XFRM return skb_ext_find(skb, SKB_EXT_SEC_PATH); #else return NULL; #endif } static inline bool skb_is_gso(const struct sk_buff *skb) { return skb_shinfo(skb)->gso_size; } /* Note: Should be called only if skb_is_gso(skb) is true */ static inline bool skb_is_gso_v6(const struct sk_buff *skb) { return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; } /* Note: Should be called only if skb_is_gso(skb) is true */ static inline bool skb_is_gso_sctp(const struct sk_buff *skb) { return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP; } /* Note: Should be called only if skb_is_gso(skb) is true */ static inline bool skb_is_gso_tcp(const struct sk_buff *skb) { return skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6); } static inline void skb_gso_reset(struct sk_buff *skb) { skb_shinfo(skb)->gso_size = 0; skb_shinfo(skb)->gso_segs = 0; skb_shinfo(skb)->gso_type = 0; } static inline void skb_increase_gso_size(struct skb_shared_info *shinfo, u16 increment) { if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS)) return; shinfo->gso_size += increment; } static inline void skb_decrease_gso_size(struct skb_shared_info *shinfo, u16 decrement) { if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS)) return; shinfo->gso_size -= decrement; } void __skb_warn_lro_forwarding(const struct sk_buff *skb); static inline bool skb_warn_if_lro(const struct sk_buff *skb) { /* LRO sets gso_size but not gso_type, whereas if GSO is really * wanted then gso_type will be set. */ const struct skb_shared_info *shinfo = skb_shinfo(skb); if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 && unlikely(shinfo->gso_type == 0)) { __skb_warn_lro_forwarding(skb); return true; } return false; } static inline void skb_forward_csum(struct sk_buff *skb) { /* Unfortunately we don't support this one. Any brave souls? */ if (skb->ip_summed == CHECKSUM_COMPLETE) skb->ip_summed = CHECKSUM_NONE; } /** * skb_checksum_none_assert - make sure skb ip_summed is CHECKSUM_NONE * @skb: skb to check * * fresh skbs have their ip_summed set to CHECKSUM_NONE. * Instead of forcing ip_summed to CHECKSUM_NONE, we can * use this helper, to document places where we make this assertion. */ static inline void skb_checksum_none_assert(const struct sk_buff *skb) { DEBUG_NET_WARN_ON_ONCE(skb->ip_summed != CHECKSUM_NONE); } bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off); int skb_checksum_setup(struct sk_buff *skb, bool recalculate); struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb, unsigned int transport_len, __sum16(*skb_chkf)(struct sk_buff *skb)); /** * skb_head_is_locked - Determine if the skb->head is locked down * @skb: skb to check * * The head on skbs build around a head frag can be removed if they are * not cloned. This function returns true if the skb head is locked down * due to either being allocated via kmalloc, or by being a clone with * multiple references to the head. */ static inline bool skb_head_is_locked(const struct sk_buff *skb) { return !skb->head_frag || skb_cloned(skb); } /* Local Checksum Offload. * Compute outer checksum based on the assumption that the * inner checksum will be offloaded later. * See Documentation/networking/checksum-offloads.rst for * explanation of how this works. * Fill in outer checksum adjustment (e.g. with sum of outer * pseudo-header) before calling. * Also ensure that inner checksum is in linear data area. */ static inline __wsum lco_csum(struct sk_buff *skb) { unsigned char *csum_start = skb_checksum_start(skb); unsigned char *l4_hdr = skb_transport_header(skb); __wsum partial; /* Start with complement of inner checksum adjustment */ partial = ~csum_unfold(*(__force __sum16 *)(csum_start + skb->csum_offset)); /* Add in checksum of our headers (incl. outer checksum * adjustment filled in by caller) and return result. */ return csum_partial(l4_hdr, csum_start - l4_hdr, partial); } static inline bool skb_is_redirected(const struct sk_buff *skb) { return skb->redirected; } static inline void skb_set_redirected(struct sk_buff *skb, bool from_ingress) { skb->redirected = 1; #ifdef CONFIG_NET_REDIRECT skb->from_ingress = from_ingress; if (skb->from_ingress) skb_clear_tstamp(skb); #endif } static inline void skb_reset_redirect(struct sk_buff *skb) { skb->redirected = 0; } static inline void skb_set_redirected_noclear(struct sk_buff *skb, bool from_ingress) { skb->redirected = 1; #ifdef CONFIG_NET_REDIRECT skb->from_ingress = from_ingress; #endif } static inline bool skb_csum_is_sctp(struct sk_buff *skb) { #if IS_ENABLED(CONFIG_IP_SCTP) return skb->csum_not_inet; #else return 0; #endif } static inline void skb_reset_csum_not_inet(struct sk_buff *skb) { skb->ip_summed = CHECKSUM_NONE; #if IS_ENABLED(CONFIG_IP_SCTP) skb->csum_not_inet = 0; #endif } static inline void skb_set_kcov_handle(struct sk_buff *skb, const u64 kcov_handle) { #ifdef CONFIG_KCOV skb->kcov_handle = kcov_handle; #endif } static inline u64 skb_get_kcov_handle(struct sk_buff *skb) { #ifdef CONFIG_KCOV return skb->kcov_handle; #else return 0; #endif } static inline void skb_mark_for_recycle(struct sk_buff *skb) { #ifdef CONFIG_PAGE_POOL skb->pp_recycle = 1; #endif } ssize_t skb_splice_from_iter(struct sk_buff *skb, struct iov_iter *iter, ssize_t maxsize, gfp_t gfp); #endif /* __KERNEL__ */ #endif /* _LINUX_SKBUFF_H */
2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 // SPDX-License-Identifier: GPL-2.0-only /*************************************************************************** * Copyright (C) 2010-2012 by Bruno Prémont <bonbons@linux-vserver.org> * * * * Based on Logitech G13 driver (v0.4) * * Copyright (C) 2009 by Rick L. Vinyard, Jr. <rvinyard@cs.nmsu.edu> * * * ***************************************************************************/ #include <linux/hid.h> #include <linux/hid-debug.h> #include <linux/input.h> #include "hid-ids.h" #include <linux/fb.h> #include <linux/vmalloc.h> #include <linux/backlight.h> #include <linux/lcd.h> #include <linux/leds.h> #include <linux/seq_file.h> #include <linux/debugfs.h> #include <linux/completion.h> #include <linux/uaccess.h> #include <linux/module.h> #include <media/rc-core.h> #include "hid-picolcd.h" int picolcd_raw_cir(struct picolcd_data *data, struct hid_report *report, u8 *raw_data, int size) { unsigned long flags; int i, w, sz; struct ir_raw_event rawir = {}; /* ignore if rc_dev is NULL or status is shunned */ spin_lock_irqsave(&data->lock, flags); if (!data->rc_dev || (data->status & PICOLCD_CIR_SHUN)) { spin_unlock_irqrestore(&data->lock, flags); return 1; } spin_unlock_irqrestore(&data->lock, flags); /* PicoLCD USB packets contain 16-bit intervals in network order, * with value negated for pulse. Intervals are in microseconds. * * Note: some userspace LIRC code for PicoLCD says negated values * for space - is it a matter of IR chip? (pulse for my TSOP2236) * * In addition, the first interval seems to be around 15000 + base * interval for non-first report of IR data - thus the quirk below * to get RC_CODE to understand Sony and JVC remotes I have at hand */ sz = size > 0 ? min((int)raw_data[0], size-1) : 0; for (i = 0; i+1 < sz; i += 2) { w = (raw_data[i] << 8) | (raw_data[i+1]); rawir.pulse = !!(w & 0x8000); rawir.duration = rawir.pulse ? (65536 - w) : w; /* Quirk!! - see above */ if (i == 0 && rawir.duration > 15000) rawir.duration -= 15000; ir_raw_event_store(data->rc_dev, &rawir); } ir_raw_event_handle(data->rc_dev); return 1; } static int picolcd_cir_open(struct rc_dev *dev) { struct picolcd_data *data = dev->priv; unsigned long flags; spin_lock_irqsave(&data->lock, flags); data->status &= ~PICOLCD_CIR_SHUN; spin_unlock_irqrestore(&data->lock, flags); return 0; } static void picolcd_cir_close(struct rc_dev *dev) { struct picolcd_data *data = dev->priv; unsigned long flags; spin_lock_irqsave(&data->lock, flags); data->status |= PICOLCD_CIR_SHUN; spin_unlock_irqrestore(&data->lock, flags); } /* initialize CIR input device */ int picolcd_init_cir(struct picolcd_data *data, struct hid_report *report) { struct rc_dev *rdev; int ret = 0; rdev = rc_allocate_device(RC_DRIVER_IR_RAW); if (!rdev) return -ENOMEM; rdev->priv = data; rdev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER; rdev->open = picolcd_cir_open; rdev->close = picolcd_cir_close; rdev->device_name = data->hdev->name; rdev->input_phys = data->hdev->phys; rdev->input_id.bustype = data->hdev->bus; rdev->input_id.vendor = data->hdev->vendor; rdev->input_id.product = data->hdev->product; rdev->input_id.version = data->hdev->version; rdev->dev.parent = &data->hdev->dev; rdev->driver_name = PICOLCD_NAME; rdev->map_name = RC_MAP_RC6_MCE; rdev->timeout = MS_TO_US(100); rdev->rx_resolution = 1; ret = rc_register_device(rdev); if (ret) goto err; data->rc_dev = rdev; return 0; err: rc_free_device(rdev); return ret; } void picolcd_exit_cir(struct picolcd_data *data) { struct rc_dev *rdev = data->rc_dev; data->rc_dev = NULL; rc_unregister_device(rdev); }
2746 2888 2876 12 62 1271 1180 129 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 #ifndef _LINUX_JHASH_H #define _LINUX_JHASH_H /* jhash.h: Jenkins hash support. * * Copyright (C) 2006. Bob Jenkins (bob_jenkins@burtleburtle.net) * * https://burtleburtle.net/bob/hash/ * * These are the credits from Bob's sources: * * lookup3.c, by Bob Jenkins, May 2006, Public Domain. * * These are functions for producing 32-bit hashes for hash table lookup. * hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final() * are externally useful functions. Routines to test the hash are included * if SELF_TEST is defined. You can use this free for any purpose. It's in * the public domain. It has no warranty. * * Copyright (C) 2009-2010 Jozsef Kadlecsik (kadlec@netfilter.org) * * I've modified Bob's hash to be useful in the Linux kernel, and * any bugs present are my fault. * Jozsef */ #include <linux/bitops.h> #include <linux/unaligned/packed_struct.h> /* Best hash sizes are of power of two */ #define jhash_size(n) ((u32)1<<(n)) /* Mask the hash value, i.e (value & jhash_mask(n)) instead of (value % n) */ #define jhash_mask(n) (jhash_size(n)-1) /* __jhash_mix - mix 3 32-bit values reversibly. */ #define __jhash_mix(a, b, c) \ { \ a -= c; a ^= rol32(c, 4); c += b; \ b -= a; b ^= rol32(a, 6); a += c; \ c -= b; c ^= rol32(b, 8); b += a; \ a -= c; a ^= rol32(c, 16); c += b; \ b -= a; b ^= rol32(a, 19); a += c; \ c -= b; c ^= rol32(b, 4); b += a; \ } /* __jhash_final - final mixing of 3 32-bit values (a,b,c) into c */ #define __jhash_final(a, b, c) \ { \ c ^= b; c -= rol32(b, 14); \ a ^= c; a -= rol32(c, 11); \ b ^= a; b -= rol32(a, 25); \ c ^= b; c -= rol32(b, 16); \ a ^= c; a -= rol32(c, 4); \ b ^= a; b -= rol32(a, 14); \ c ^= b; c -= rol32(b, 24); \ } /* An arbitrary initial parameter */ #define JHASH_INITVAL 0xdeadbeef /* jhash - hash an arbitrary key * @k: sequence of bytes as key * @length: the length of the key * @initval: the previous hash, or an arbitrary value * * The generic version, hashes an arbitrary sequence of bytes. * No alignment or length assumptions are made about the input key. * * Returns the hash value of the key. The result depends on endianness. */ static inline u32 jhash(const void *key, u32 length, u32 initval) { u32 a, b, c; const u8 *k = key; /* Set up the internal state */ a = b = c = JHASH_INITVAL + length + initval; /* All but the last block: affect some 32 bits of (a,b,c) */ while (length > 12) { a += __get_unaligned_cpu32(k); b += __get_unaligned_cpu32(k + 4); c += __get_unaligned_cpu32(k + 8); __jhash_mix(a, b, c); length -= 12; k += 12; } /* Last block: affect all 32 bits of (c) */ switch (length) { case 12: c += (u32)k[11]<<24; fallthrough; case 11: c += (u32)k[10]<<16; fallthrough; case 10: c += (u32)k[9]<<8; fallthrough; case 9: c += k[8]; fallthrough; case 8: b += (u32)k[7]<<24; fallthrough; case 7: b += (u32)k[6]<<16; fallthrough; case 6: b += (u32)k[5]<<8; fallthrough; case 5: b += k[4]; fallthrough; case 4: a += (u32)k[3]<<24; fallthrough; case 3: a += (u32)k[2]<<16; fallthrough; case 2: a += (u32)k[1]<<8; fallthrough; case 1: a += k[0]; __jhash_final(a, b, c); break; case 0: /* Nothing left to add */ break; } return c; } /* jhash2 - hash an array of u32's * @k: the key which must be an array of u32's * @length: the number of u32's in the key * @initval: the previous hash, or an arbitrary value * * Returns the hash value of the key. */ static inline u32 jhash2(const u32 *k, u32 length, u32 initval) { u32 a, b, c; /* Set up the internal state */ a = b = c = JHASH_INITVAL + (length<<2) + initval; /* Handle most of the key */ while (length > 3) { a += k[0]; b += k[1]; c += k[2]; __jhash_mix(a, b, c); length -= 3; k += 3; } /* Handle the last 3 u32's */ switch (length) { case 3: c += k[2]; fallthrough; case 2: b += k[1]; fallthrough; case 1: a += k[0]; __jhash_final(a, b, c); break; case 0: /* Nothing left to add */ break; } return c; } /* __jhash_nwords - hash exactly 3, 2 or 1 word(s) */ static inline u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval) { a += initval; b += initval; c += initval; __jhash_final(a, b, c); return c; } static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval) { return __jhash_nwords(a, b, c, initval + JHASH_INITVAL + (3 << 2)); } static inline u32 jhash_2words(u32 a, u32 b, u32 initval) { return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2)); } static inline u32 jhash_1word(u32 a, u32 initval) { return __jhash_nwords(a, 0, 0, initval + JHASH_INITVAL + (1 << 2)); } #endif /* _LINUX_JHASH_H */
1 6 1 44 1 9 36 139 72 8 1 16 1 1 1 13 1 3 1 1 1 2 2 4 4 13 4 17 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 // SPDX-License-Identifier: GPL-2.0-or-later /* * Squashfs - a compressed read only filesystem for Linux * * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 * Phillip Lougher <phillip@squashfs.org.uk> * * inode.c */ /* * This file implements code to create and read inodes from disk. * * Inodes in Squashfs are identified by a 48-bit inode which encodes the * location of the compressed metadata block containing the inode, and the byte * offset into that block where the inode is placed (<block, offset>). * * To maximise compression there are different inodes for each file type * (regular file, directory, device, etc.), the inode contents and length * varying with the type. * * To further maximise compression, two types of regular file inode and * directory inode are defined: inodes optimised for frequently occurring * regular files and directories, and extended types where extra * information has to be stored. */ #include <linux/fs.h> #include <linux/vfs.h> #include <linux/xattr.h> #include <linux/pagemap.h> #include "squashfs_fs.h" #include "squashfs_fs_sb.h" #include "squashfs_fs_i.h" #include "squashfs.h" #include "xattr.h" /* * Initialise VFS inode with the base inode information common to all * Squashfs inode types. Sqsh_ino contains the unswapped base inode * off disk. */ static int squashfs_new_inode(struct super_block *sb, struct inode *inode, struct squashfs_base_inode *sqsh_ino) { uid_t i_uid; gid_t i_gid; int err; inode->i_ino = le32_to_cpu(sqsh_ino->inode_number); if (inode->i_ino == 0) return -EINVAL; err = squashfs_get_id(sb, le16_to_cpu(sqsh_ino->uid), &i_uid); if (err) return err; err = squashfs_get_id(sb, le16_to_cpu(sqsh_ino->guid), &i_gid); if (err) return err; i_uid_write(inode, i_uid); i_gid_write(inode, i_gid); inode_set_mtime(inode, le32_to_cpu(sqsh_ino->mtime), 0); inode_set_atime(inode, inode_get_mtime_sec(inode), 0); inode_set_ctime(inode, inode_get_mtime_sec(inode), 0); inode->i_mode = le16_to_cpu(sqsh_ino->mode); inode->i_size = 0; return err; } struct inode *squashfs_iget(struct super_block *sb, long long ino, unsigned int ino_number) { struct inode *inode = iget_locked(sb, ino_number); int err; TRACE("Entered squashfs_iget\n"); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) return inode; err = squashfs_read_inode(inode, ino); if (err) { iget_failed(inode); return ERR_PTR(err); } unlock_new_inode(inode); return inode; } /* * Initialise VFS inode by reading inode from inode table (compressed * metadata). The format and amount of data read depends on type. */ int squashfs_read_inode(struct inode *inode, long long ino) { struct super_block *sb = inode->i_sb; struct squashfs_sb_info *msblk = sb->s_fs_info; u64 block = SQUASHFS_INODE_BLK(ino) + msblk->inode_table; int err, type, offset = SQUASHFS_INODE_OFFSET(ino); union squashfs_inode squashfs_ino; struct squashfs_base_inode *sqshb_ino = &squashfs_ino.base; int xattr_id = SQUASHFS_INVALID_XATTR; TRACE("Entered squashfs_read_inode\n"); /* * Read inode base common to all inode types. */ err = squashfs_read_metadata(sb, sqshb_ino, &block, &offset, sizeof(*sqshb_ino)); if (err < 0) goto failed_read; err = squashfs_new_inode(sb, inode, sqshb_ino); if (err) goto failed_read; block = SQUASHFS_INODE_BLK(ino) + msblk->inode_table; offset = SQUASHFS_INODE_OFFSET(ino); type = le16_to_cpu(sqshb_ino->inode_type); switch (type) { case SQUASHFS_REG_TYPE: { unsigned int frag_offset, frag; int frag_size; u64 frag_blk; struct squashfs_reg_inode *sqsh_ino = &squashfs_ino.reg; err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset, sizeof(*sqsh_ino)); if (err < 0) goto failed_read; frag = le32_to_cpu(sqsh_ino->fragment); if (frag != SQUASHFS_INVALID_FRAG) { frag_offset = le32_to_cpu(sqsh_ino->offset); frag_size = squashfs_frag_lookup(sb, frag, &frag_blk); if (frag_size < 0) { err = frag_size; goto failed_read; } } else { frag_blk = SQUASHFS_INVALID_BLK; frag_size = 0; frag_offset = 0; } set_nlink(inode, 1); inode->i_size = le32_to_cpu(sqsh_ino->file_size); inode->i_fop = &generic_ro_fops; inode->i_mode |= S_IFREG; inode->i_blocks = ((inode->i_size - 1) >> 9) + 1; squashfs_i(inode)->fragment_block = frag_blk; squashfs_i(inode)->fragment_size = frag_size; squashfs_i(inode)->fragment_offset = frag_offset; squashfs_i(inode)->start = le32_to_cpu(sqsh_ino->start_block); squashfs_i(inode)->block_list_start = block; squashfs_i(inode)->offset = offset; inode->i_data.a_ops = &squashfs_aops; TRACE("File inode %x:%x, start_block %llx, block_list_start " "%llx, offset %x\n", SQUASHFS_INODE_BLK(ino), offset, squashfs_i(inode)->start, block, offset); break; } case SQUASHFS_LREG_TYPE: { unsigned int frag_offset, frag; int frag_size; u64 frag_blk; struct squashfs_lreg_inode *sqsh_ino = &squashfs_ino.lreg; err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset, sizeof(*sqsh_ino)); if (err < 0) goto failed_read; frag = le32_to_cpu(sqsh_ino->fragment); if (frag != SQUASHFS_INVALID_FRAG) { frag_offset = le32_to_cpu(sqsh_ino->offset); frag_size = squashfs_frag_lookup(sb, frag, &frag_blk); if (frag_size < 0) { err = frag_size; goto failed_read; } } else { frag_blk = SQUASHFS_INVALID_BLK; frag_size = 0; frag_offset = 0; } xattr_id = le32_to_cpu(sqsh_ino->xattr); set_nlink(inode, le32_to_cpu(sqsh_ino->nlink)); inode->i_size = le64_to_cpu(sqsh_ino->file_size); inode->i_op = &squashfs_inode_ops; inode->i_fop = &generic_ro_fops; inode->i_mode |= S_IFREG; inode->i_blocks = (inode->i_size - le64_to_cpu(sqsh_ino->sparse) + 511) >> 9; squashfs_i(inode)->fragment_block = frag_blk; squashfs_i(inode)->fragment_size = frag_size; squashfs_i(inode)->fragment_offset = frag_offset; squashfs_i(inode)->start = le64_to_cpu(sqsh_ino->start_block); squashfs_i(inode)->block_list_start = block; squashfs_i(inode)->offset = offset; inode->i_data.a_ops = &squashfs_aops; TRACE("File inode %x:%x, start_block %llx, block_list_start " "%llx, offset %x\n", SQUASHFS_INODE_BLK(ino), offset, squashfs_i(inode)->start, block, offset); break; } case SQUASHFS_DIR_TYPE: { struct squashfs_dir_inode *sqsh_ino = &squashfs_ino.dir; err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset, sizeof(*sqsh_ino)); if (err < 0) goto failed_read; set_nlink(inode, le32_to_cpu(sqsh_ino->nlink)); inode->i_size = le16_to_cpu(sqsh_ino->file_size); inode->i_op = &squashfs_dir_inode_ops; inode->i_fop = &squashfs_dir_ops; inode->i_mode |= S_IFDIR; squashfs_i(inode)->start = le32_to_cpu(sqsh_ino->start_block); squashfs_i(inode)->offset = le16_to_cpu(sqsh_ino->offset); squashfs_i(inode)->dir_idx_cnt = 0; squashfs_i(inode)->parent = le32_to_cpu(sqsh_ino->parent_inode); TRACE("Directory inode %x:%x, start_block %llx, offset %x\n", SQUASHFS_INODE_BLK(ino), offset, squashfs_i(inode)->start, le16_to_cpu(sqsh_ino->offset)); break; } case SQUASHFS_LDIR_TYPE: { struct squashfs_ldir_inode *sqsh_ino = &squashfs_ino.ldir; err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset, sizeof(*sqsh_ino)); if (err < 0) goto failed_read; xattr_id = le32_to_cpu(sqsh_ino->xattr); set_nlink(inode, le32_to_cpu(sqsh_ino->nlink)); inode->i_size = le32_to_cpu(sqsh_ino->file_size); inode->i_op = &squashfs_dir_inode_ops; inode->i_fop = &squashfs_dir_ops; inode->i_mode |= S_IFDIR; squashfs_i(inode)->start = le32_to_cpu(sqsh_ino->start_block); squashfs_i(inode)->offset = le16_to_cpu(sqsh_ino->offset); squashfs_i(inode)->dir_idx_start = block; squashfs_i(inode)->dir_idx_offset = offset; squashfs_i(inode)->dir_idx_cnt = le16_to_cpu(sqsh_ino->i_count); squashfs_i(inode)->parent = le32_to_cpu(sqsh_ino->parent_inode); TRACE("Long directory inode %x:%x, start_block %llx, offset " "%x\n", SQUASHFS_INODE_BLK(ino), offset, squashfs_i(inode)->start, le16_to_cpu(sqsh_ino->offset)); break; } case SQUASHFS_SYMLINK_TYPE: case SQUASHFS_LSYMLINK_TYPE: { struct squashfs_symlink_inode *sqsh_ino = &squashfs_ino.symlink; err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset, sizeof(*sqsh_ino)); if (err < 0) goto failed_read; inode->i_size = le32_to_cpu(sqsh_ino->symlink_size); if (inode->i_size > PAGE_SIZE) { ERROR("Corrupted symlink\n"); return -EINVAL; } set_nlink(inode, le32_to_cpu(sqsh_ino->nlink)); inode->i_op = &squashfs_symlink_inode_ops; inode_nohighmem(inode); inode->i_data.a_ops = &squashfs_symlink_aops; inode->i_mode |= S_IFLNK; squashfs_i(inode)->start = block; squashfs_i(inode)->offset = offset; if (type == SQUASHFS_LSYMLINK_TYPE) { __le32 xattr; err = squashfs_read_metadata(sb, NULL, &block, &offset, inode->i_size); if (err < 0) goto failed_read; err = squashfs_read_metadata(sb, &xattr, &block, &offset, sizeof(xattr)); if (err < 0) goto failed_read; xattr_id = le32_to_cpu(xattr); } TRACE("Symbolic link inode %x:%x, start_block %llx, offset " "%x\n", SQUASHFS_INODE_BLK(ino), offset, block, offset); break; } case SQUASHFS_BLKDEV_TYPE: case SQUASHFS_CHRDEV_TYPE: { struct squashfs_dev_inode *sqsh_ino = &squashfs_ino.dev; unsigned int rdev; err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset, sizeof(*sqsh_ino)); if (err < 0) goto failed_read; if (type == SQUASHFS_CHRDEV_TYPE) inode->i_mode |= S_IFCHR; else inode->i_mode |= S_IFBLK; set_nlink(inode, le32_to_cpu(sqsh_ino->nlink)); rdev = le32_to_cpu(sqsh_ino->rdev); init_special_inode(inode, inode->i_mode, new_decode_dev(rdev)); TRACE("Device inode %x:%x, rdev %x\n", SQUASHFS_INODE_BLK(ino), offset, rdev); break; } case SQUASHFS_LBLKDEV_TYPE: case SQUASHFS_LCHRDEV_TYPE: { struct squashfs_ldev_inode *sqsh_ino = &squashfs_ino.ldev; unsigned int rdev; err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset, sizeof(*sqsh_ino)); if (err < 0) goto failed_read; if (type == SQUASHFS_LCHRDEV_TYPE) inode->i_mode |= S_IFCHR; else inode->i_mode |= S_IFBLK; xattr_id = le32_to_cpu(sqsh_ino->xattr); inode->i_op = &squashfs_inode_ops; set_nlink(inode, le32_to_cpu(sqsh_ino->nlink)); rdev = le32_to_cpu(sqsh_ino->rdev); init_special_inode(inode, inode->i_mode, new_decode_dev(rdev)); TRACE("Device inode %x:%x, rdev %x\n", SQUASHFS_INODE_BLK(ino), offset, rdev); break; } case SQUASHFS_FIFO_TYPE: case SQUASHFS_SOCKET_TYPE: { struct squashfs_ipc_inode *sqsh_ino = &squashfs_ino.ipc; err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset, sizeof(*sqsh_ino)); if (err < 0) goto failed_read; if (type == SQUASHFS_FIFO_TYPE) inode->i_mode |= S_IFIFO; else inode->i_mode |= S_IFSOCK; set_nlink(inode, le32_to_cpu(sqsh_ino->nlink)); init_special_inode(inode, inode->i_mode, 0); break; } case SQUASHFS_LFIFO_TYPE: case SQUASHFS_LSOCKET_TYPE: { struct squashfs_lipc_inode *sqsh_ino = &squashfs_ino.lipc; err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset, sizeof(*sqsh_ino)); if (err < 0) goto failed_read; if (type == SQUASHFS_LFIFO_TYPE) inode->i_mode |= S_IFIFO; else inode->i_mode |= S_IFSOCK; xattr_id = le32_to_cpu(sqsh_ino->xattr); inode->i_op = &squashfs_inode_ops; set_nlink(inode, le32_to_cpu(sqsh_ino->nlink)); init_special_inode(inode, inode->i_mode, 0); break; } default: ERROR("Unknown inode type %d in squashfs_iget!\n", type); return -EINVAL; } if (xattr_id != SQUASHFS_INVALID_XATTR && msblk->xattr_id_table) { err = squashfs_xattr_lookup(sb, xattr_id, &squashfs_i(inode)->xattr_count, &squashfs_i(inode)->xattr_size, &squashfs_i(inode)->xattr); if (err < 0) goto failed_read; inode->i_blocks += ((squashfs_i(inode)->xattr_size - 1) >> 9) + 1; } else squashfs_i(inode)->xattr_count = 0; return 0; failed_read: ERROR("Unable to read inode 0x%llx\n", ino); return err; } const struct inode_operations squashfs_inode_ops = { .listxattr = squashfs_listxattr };
4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 // SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2000,2005 Silicon Graphics, Inc. * All Rights Reserved. */ #ifndef __XFS_INODE_ITEM_H__ #define __XFS_INODE_ITEM_H__ /* kernel only definitions */ struct xfs_buf; struct xfs_bmbt_rec; struct xfs_inode; struct xfs_mount; struct xfs_inode_log_item { struct xfs_log_item ili_item; /* common portion */ struct xfs_inode *ili_inode; /* inode ptr */ unsigned short ili_lock_flags; /* inode lock flags */ unsigned int ili_dirty_flags; /* dirty in current tx */ /* * The ili_lock protects the interactions between the dirty state and * the flush state of the inode log item. This allows us to do atomic * modifications of multiple state fields without having to hold a * specific inode lock to serialise them. * * We need atomic changes between inode dirtying, inode flushing and * inode completion, but these all hold different combinations of * ILOCK and IFLUSHING and hence we need some other method of * serialising updates to the flush state. */ spinlock_t ili_lock; /* flush state lock */ unsigned int ili_last_fields; /* fields when flushed */ unsigned int ili_fields; /* fields to be logged */ unsigned int ili_fsync_fields; /* logged since last fsync */ xfs_lsn_t ili_flush_lsn; /* lsn at last flush */ xfs_csn_t ili_commit_seq; /* last transaction commit */ }; static inline int xfs_inode_clean(struct xfs_inode *ip) { return !ip->i_itemp || !(ip->i_itemp->ili_fields & XFS_ILOG_ALL); } extern void xfs_inode_item_init(struct xfs_inode *, struct xfs_mount *); extern void xfs_inode_item_destroy(struct xfs_inode *); extern void xfs_iflush_abort(struct xfs_inode *); extern void xfs_iflush_shutdown_abort(struct xfs_inode *); extern int xfs_inode_item_format_convert(xfs_log_iovec_t *, struct xfs_inode_log_format *); extern struct kmem_cache *xfs_ili_cache; #endif /* __XFS_INODE_ITEM_H__ */
2 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 // SPDX-License-Identifier: GPL-2.0-only /* * Driver for the s5k83a sensor * * Copyright (C) 2008 Erik Andrén * Copyright (C) 2007 Ilyes Gouta. Based on the m5603x Linux Driver Project. * Copyright (C) 2005 m5603x Linux Driver Project <m5602@x3ng.com.br> * * Portions of code to USB interface and ALi driver software, * Copyright (c) 2006 Willem Duinker * v4l2 interface modeled after the V4L2 driver * for SN9C10x PC Camera Controllers */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kthread.h> #include "m5602_s5k83a.h" static int s5k83a_s_ctrl(struct v4l2_ctrl *ctrl); static const struct v4l2_ctrl_ops s5k83a_ctrl_ops = { .s_ctrl = s5k83a_s_ctrl, }; static struct v4l2_pix_format s5k83a_modes[] = { { 640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .sizeimage = 640 * 480, .bytesperline = 640, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0 } }; static const unsigned char preinit_s5k83a[][4] = { {BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02, 0x00}, {BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0, 0x00}, {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00}, {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00}, {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00}, {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0d, 0x00}, {BRIDGE, M5602_XB_SENSOR_CTRL, 0x00, 0x00}, {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00}, {BRIDGE, M5602_XB_GPIO_DIR, 0x1d, 0x00}, {BRIDGE, M5602_XB_GPIO_DAT, 0x08, 0x00}, {BRIDGE, M5602_XB_GPIO_EN_H, 0x3f, 0x00}, {BRIDGE, M5602_XB_GPIO_DIR_H, 0x3f, 0x00}, {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00, 0x00}, {BRIDGE, M5602_XB_GPIO_EN_L, 0xff, 0x00}, {BRIDGE, M5602_XB_GPIO_DIR_L, 0xff, 0x00}, {BRIDGE, M5602_XB_GPIO_DAT_L, 0x00, 0x00}, {BRIDGE, M5602_XB_SEN_CLK_DIV, 0xb0, 0x00}, {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0x80, 0x00}, {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00}, {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00}, {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00}, {BRIDGE, M5602_XB_SENSOR_TYPE, 0x09, 0x00}, {BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02, 0x00}, {BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0, 0x00}, {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00}, {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xf0, 0x00}, {BRIDGE, M5602_XB_GPIO_DIR, 0x1d, 0x00}, {BRIDGE, M5602_XB_GPIO_DAT, 0x1c, 0x00}, {BRIDGE, M5602_XB_GPIO_EN_H, 0x06, 0x00}, {BRIDGE, M5602_XB_GPIO_DIR_H, 0x06, 0x00}, {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00, 0x00}, {BRIDGE, M5602_XB_GPIO_EN_L, 0x00, 0x00}, {BRIDGE, M5602_XB_I2C_CLK_DIV, 0x20, 0x00}, }; /* This could probably be considerably shortened. I don't have the hardware to experiment with it, patches welcome */ static const unsigned char init_s5k83a[][4] = { /* The following sequence is useless after a clean boot but is necessary after resume from suspend */ {BRIDGE, M5602_XB_GPIO_DIR, 0x1d, 0x00}, {BRIDGE, M5602_XB_GPIO_DAT, 0x08, 0x00}, {BRIDGE, M5602_XB_GPIO_EN_H, 0x3f, 0x00}, {BRIDGE, M5602_XB_GPIO_DIR_H, 0x3f, 0x00}, {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00, 0x00}, {BRIDGE, M5602_XB_GPIO_EN_L, 0xff, 0x00}, {BRIDGE, M5602_XB_GPIO_DIR_L, 0xff, 0x00}, {BRIDGE, M5602_XB_GPIO_DAT_L, 0x00, 0x00}, {BRIDGE, M5602_XB_SEN_CLK_DIV, 0xb0, 0x00}, {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0x80, 0x00}, {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00}, {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00}, {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00}, {BRIDGE, M5602_XB_SENSOR_TYPE, 0x09, 0x00}, {BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02, 0x00}, {BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0, 0x00}, {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00}, {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xf0, 0x00}, {BRIDGE, M5602_XB_GPIO_DIR, 0x1d, 0x00}, {BRIDGE, M5602_XB_GPIO_DAT, 0x08, 0x00}, {BRIDGE, M5602_XB_GPIO_EN_H, 0x06, 0x00}, {BRIDGE, M5602_XB_GPIO_DIR_H, 0x06, 0x00}, {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00, 0x00}, {BRIDGE, M5602_XB_GPIO_EN_L, 0x00, 0x00}, {BRIDGE, M5602_XB_I2C_CLK_DIV, 0x20, 0x00}, {SENSOR, S5K83A_PAGE_MAP, 0x04, 0x00}, {SENSOR, 0xaf, 0x01, 0x00}, {SENSOR, S5K83A_PAGE_MAP, 0x00, 0x00}, {SENSOR, 0x7b, 0xff, 0x00}, {SENSOR, S5K83A_PAGE_MAP, 0x05, 0x00}, {SENSOR, 0x01, 0x50, 0x00}, {SENSOR, 0x12, 0x20, 0x00}, {SENSOR, 0x17, 0x40, 0x00}, {SENSOR, 0x1c, 0x00, 0x00}, {SENSOR, 0x02, 0x70, 0x00}, {SENSOR, 0x03, 0x0b, 0x00}, {SENSOR, 0x04, 0xf0, 0x00}, {SENSOR, 0x05, 0x0b, 0x00}, {SENSOR, 0x06, 0x71, 0x00}, {SENSOR, 0x07, 0xe8, 0x00}, /* 488 */ {SENSOR, 0x08, 0x02, 0x00}, {SENSOR, 0x09, 0x88, 0x00}, /* 648 */ {SENSOR, 0x14, 0x00, 0x00}, {SENSOR, 0x15, 0x20, 0x00}, /* 32 */ {SENSOR, 0x19, 0x00, 0x00}, {SENSOR, 0x1a, 0x98, 0x00}, /* 152 */ {SENSOR, 0x0f, 0x02, 0x00}, {SENSOR, 0x10, 0xe5, 0x00}, /* 741 */ /* normal colors (this is value after boot, but after tries can be different) */ {SENSOR, 0x00, 0x06, 0x00}, }; static const unsigned char start_s5k83a[][4] = { {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06, 0x00}, {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00}, {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00}, {BRIDGE, M5602_XB_SENSOR_TYPE, 0x09, 0x00}, {BRIDGE, M5602_XB_LINE_OF_FRAME_H, 0x81, 0x00}, {BRIDGE, M5602_XB_PIX_OF_LINE_H, 0x82, 0x00}, {BRIDGE, M5602_XB_SIG_INI, 0x01, 0x00}, {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00}, {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00}, {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00}, {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00}, {BRIDGE, M5602_XB_VSYNC_PARA, 0x01, 0x00}, {BRIDGE, M5602_XB_VSYNC_PARA, 0xe4, 0x00}, /* 484 */ {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00}, {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00}, {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00}, {BRIDGE, M5602_XB_SIG_INI, 0x02, 0x00}, {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00}, {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00}, {BRIDGE, M5602_XB_HSYNC_PARA, 0x02, 0x00}, {BRIDGE, M5602_XB_HSYNC_PARA, 0x7f, 0x00}, /* 639 */ {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00}, {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00}, {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00}, }; static void s5k83a_dump_registers(struct sd *sd); static int s5k83a_get_rotation(struct sd *sd, u8 *reg_data); static int s5k83a_set_led_indication(struct sd *sd, u8 val); static int s5k83a_set_flip_real(struct gspca_dev *gspca_dev, __s32 vflip, __s32 hflip); int s5k83a_probe(struct sd *sd) { u8 prod_id = 0, ver_id = 0; int i, err = 0; struct gspca_dev *gspca_dev = (struct gspca_dev *)sd; if (force_sensor) { if (force_sensor == S5K83A_SENSOR) { pr_info("Forcing a %s sensor\n", s5k83a.name); goto sensor_found; } /* If we want to force another sensor, don't try to probe this * one */ return -ENODEV; } gspca_dbg(gspca_dev, D_PROBE, "Probing for a s5k83a sensor\n"); /* Preinit the sensor */ for (i = 0; i < ARRAY_SIZE(preinit_s5k83a) && !err; i++) { u8 data[2] = {preinit_s5k83a[i][2], preinit_s5k83a[i][3]}; if (preinit_s5k83a[i][0] == SENSOR) err = m5602_write_sensor(sd, preinit_s5k83a[i][1], data, 2); else err = m5602_write_bridge(sd, preinit_s5k83a[i][1], data[0]); } /* We don't know what register (if any) that contain the product id * Just pick the first addresses that seem to produce the same results * on multiple machines */ if (m5602_read_sensor(sd, 0x00, &prod_id, 1)) return -ENODEV; if (m5602_read_sensor(sd, 0x01, &ver_id, 1)) return -ENODEV; if ((prod_id == 0xff) || (ver_id == 0xff)) return -ENODEV; else pr_info("Detected a s5k83a sensor\n"); sensor_found: sd->gspca_dev.cam.cam_mode = s5k83a_modes; sd->gspca_dev.cam.nmodes = ARRAY_SIZE(s5k83a_modes); /* null the pointer! thread is't running now */ sd->rotation_thread = NULL; return 0; } int s5k83a_init(struct sd *sd) { int i, err = 0; for (i = 0; i < ARRAY_SIZE(init_s5k83a) && !err; i++) { u8 data[2] = {0x00, 0x00}; switch (init_s5k83a[i][0]) { case BRIDGE: err = m5602_write_bridge(sd, init_s5k83a[i][1], init_s5k83a[i][2]); break; case SENSOR: data[0] = init_s5k83a[i][2]; err = m5602_write_sensor(sd, init_s5k83a[i][1], data, 1); break; case SENSOR_LONG: data[0] = init_s5k83a[i][2]; data[1] = init_s5k83a[i][3]; err = m5602_write_sensor(sd, init_s5k83a[i][1], data, 2); break; default: pr_info("Invalid stream command, exiting init\n"); return -EINVAL; } } if (dump_sensor) s5k83a_dump_registers(sd); return err; } int s5k83a_init_controls(struct sd *sd) { struct v4l2_ctrl_handler *hdl = &sd->gspca_dev.ctrl_handler; sd->gspca_dev.vdev.ctrl_handler = hdl; v4l2_ctrl_handler_init(hdl, 6); v4l2_ctrl_new_std(hdl, &s5k83a_ctrl_ops, V4L2_CID_BRIGHTNESS, 0, 255, 1, S5K83A_DEFAULT_BRIGHTNESS); v4l2_ctrl_new_std(hdl, &s5k83a_ctrl_ops, V4L2_CID_EXPOSURE, 0, S5K83A_MAXIMUM_EXPOSURE, 1, S5K83A_DEFAULT_EXPOSURE); v4l2_ctrl_new_std(hdl, &s5k83a_ctrl_ops, V4L2_CID_GAIN, 0, 255, 1, S5K83A_DEFAULT_GAIN); sd->hflip = v4l2_ctrl_new_std(hdl, &s5k83a_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0); sd->vflip = v4l2_ctrl_new_std(hdl, &s5k83a_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0); if (hdl->error) { pr_err("Could not initialize controls\n"); return hdl->error; } v4l2_ctrl_cluster(2, &sd->hflip); return 0; } static int rotation_thread_function(void *data) { struct sd *sd = (struct sd *) data; u8 reg, previous_rotation = 0; __s32 vflip, hflip; set_current_state(TASK_INTERRUPTIBLE); while (!schedule_timeout(msecs_to_jiffies(100))) { if (mutex_lock_interruptible(&sd->gspca_dev.usb_lock)) break; s5k83a_get_rotation(sd, &reg); if (previous_rotation != reg) { previous_rotation = reg; pr_info("Camera was flipped\n"); hflip = sd->hflip->val; vflip = sd->vflip->val; if (reg) { vflip = !vflip; hflip = !hflip; } s5k83a_set_flip_real((struct gspca_dev *) sd, vflip, hflip); } mutex_unlock(&sd->gspca_dev.usb_lock); set_current_state(TASK_INTERRUPTIBLE); } /* return to "front" flip */ if (previous_rotation) { hflip = sd->hflip->val; vflip = sd->vflip->val; s5k83a_set_flip_real((struct gspca_dev *) sd, vflip, hflip); } sd->rotation_thread = NULL; return 0; } int s5k83a_start(struct sd *sd) { int i, err = 0; /* Create another thread, polling the GPIO ports of the camera to check if it got rotated. This is how the windows driver does it so we have to assume that there is no better way of accomplishing this */ sd->rotation_thread = kthread_run(rotation_thread_function, sd, "rotation thread"); if (IS_ERR(sd->rotation_thread)) { err = PTR_ERR(sd->rotation_thread); sd->rotation_thread = NULL; return err; } /* Preinit the sensor */ for (i = 0; i < ARRAY_SIZE(start_s5k83a) && !err; i++) { u8 data[2] = {start_s5k83a[i][2], start_s5k83a[i][3]}; if (start_s5k83a[i][0] == SENSOR) err = m5602_write_sensor(sd, start_s5k83a[i][1], data, 2); else err = m5602_write_bridge(sd, start_s5k83a[i][1], data[0]); } if (err < 0) return err; return s5k83a_set_led_indication(sd, 1); } int s5k83a_stop(struct sd *sd) { if (sd->rotation_thread) kthread_stop(sd->rotation_thread); return s5k83a_set_led_indication(sd, 0); } void s5k83a_disconnect(struct sd *sd) { s5k83a_stop(sd); sd->sensor = NULL; } static int s5k83a_set_gain(struct gspca_dev *gspca_dev, __s32 val) { int err; u8 data[2]; struct sd *sd = (struct sd *) gspca_dev; data[0] = 0x00; data[1] = 0x20; err = m5602_write_sensor(sd, 0x14, data, 2); if (err < 0) return err; data[0] = 0x01; data[1] = 0x00; err = m5602_write_sensor(sd, 0x0d, data, 2); if (err < 0) return err; /* FIXME: This is not sane, we need to figure out the composition of these registers */ data[0] = val >> 3; /* gain, high 5 bits */ data[1] = val >> 1; /* gain, high 7 bits */ err = m5602_write_sensor(sd, S5K83A_GAIN, data, 2); return err; } static int s5k83a_set_brightness(struct gspca_dev *gspca_dev, __s32 val) { u8 data[1]; struct sd *sd = (struct sd *) gspca_dev; data[0] = val; return m5602_write_sensor(sd, S5K83A_BRIGHTNESS, data, 1); } static int s5k83a_set_exposure(struct gspca_dev *gspca_dev, __s32 val) { u8 data[2]; struct sd *sd = (struct sd *) gspca_dev; data[0] = 0; data[1] = val; return m5602_write_sensor(sd, S5K83A_EXPOSURE, data, 2); } static int s5k83a_set_flip_real(struct gspca_dev *gspca_dev, __s32 vflip, __s32 hflip) { int err; u8 data[1]; struct sd *sd = (struct sd *) gspca_dev; data[0] = 0x05; err = m5602_write_sensor(sd, S5K83A_PAGE_MAP, data, 1); if (err < 0) return err; /* six bit is vflip, seven is hflip */ data[0] = S5K83A_FLIP_MASK; data[0] = (vflip) ? data[0] | 0x40 : data[0]; data[0] = (hflip) ? data[0] | 0x80 : data[0]; err = m5602_write_sensor(sd, S5K83A_FLIP, data, 1); if (err < 0) return err; data[0] = (vflip) ? 0x0b : 0x0a; err = m5602_write_sensor(sd, S5K83A_VFLIP_TUNE, data, 1); if (err < 0) return err; data[0] = (hflip) ? 0x0a : 0x0b; err = m5602_write_sensor(sd, S5K83A_HFLIP_TUNE, data, 1); return err; } static int s5k83a_set_hvflip(struct gspca_dev *gspca_dev) { int err; u8 reg; struct sd *sd = (struct sd *) gspca_dev; int hflip = sd->hflip->val; int vflip = sd->vflip->val; err = s5k83a_get_rotation(sd, &reg); if (err < 0) return err; if (reg) { hflip = !hflip; vflip = !vflip; } err = s5k83a_set_flip_real(gspca_dev, vflip, hflip); return err; } static int s5k83a_s_ctrl(struct v4l2_ctrl *ctrl) { struct gspca_dev *gspca_dev = container_of(ctrl->handler, struct gspca_dev, ctrl_handler); int err; if (!gspca_dev->streaming) return 0; switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: err = s5k83a_set_brightness(gspca_dev, ctrl->val); break; case V4L2_CID_EXPOSURE: err = s5k83a_set_exposure(gspca_dev, ctrl->val); break; case V4L2_CID_GAIN: err = s5k83a_set_gain(gspca_dev, ctrl->val); break; case V4L2_CID_HFLIP: err = s5k83a_set_hvflip(gspca_dev); break; default: return -EINVAL; } return err; } static int s5k83a_set_led_indication(struct sd *sd, u8 val) { int err = 0; u8 data[1]; err = m5602_read_bridge(sd, M5602_XB_GPIO_DAT, data); if (err < 0) return err; if (val) data[0] = data[0] | S5K83A_GPIO_LED_MASK; else data[0] = data[0] & ~S5K83A_GPIO_LED_MASK; err = m5602_write_bridge(sd, M5602_XB_GPIO_DAT, data[0]); return err; } /* Get camera rotation on Acer notebooks */ static int s5k83a_get_rotation(struct sd *sd, u8 *reg_data) { int err = m5602_read_bridge(sd, M5602_XB_GPIO_DAT, reg_data); *reg_data = (*reg_data & S5K83A_GPIO_ROTATION_MASK) ? 0 : 1; return err; } static void s5k83a_dump_registers(struct sd *sd) { int address; u8 page, old_page; m5602_read_sensor(sd, S5K83A_PAGE_MAP, &old_page, 1); for (page = 0; page < 16; page++) { m5602_write_sensor(sd, S5K83A_PAGE_MAP, &page, 1); pr_info("Dumping the s5k83a register state for page 0x%x\n", page); for (address = 0; address <= 0xff; address++) { u8 val = 0; m5602_read_sensor(sd, address, &val, 1); pr_info("register 0x%x contains 0x%x\n", address, val); } } pr_info("s5k83a register state dump complete\n"); for (page = 0; page < 16; page++) { m5602_write_sensor(sd, S5K83A_PAGE_MAP, &page, 1); pr_info("Probing for which registers that are read/write for page 0x%x\n", page); for (address = 0; address <= 0xff; address++) { u8 old_val, ctrl_val, test_val = 0xff; m5602_read_sensor(sd, address, &old_val, 1); m5602_write_sensor(sd, address, &test_val, 1); m5602_read_sensor(sd, address, &ctrl_val, 1); if (ctrl_val == test_val) pr_info("register 0x%x is writeable\n", address); else pr_info("register 0x%x is read only\n", address); /* Restore original val */ m5602_write_sensor(sd, address, &old_val, 1); } } pr_info("Read/write register probing complete\n"); m5602_write_sensor(sd, S5K83A_PAGE_MAP, &old_page, 1); }
48 48 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 // SPDX-License-Identifier: GPL-2.0-only /* (C) 1999-2001 Paul `Rusty' Russell * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/skbuff.h> #include <linux/if_arp.h> #include <linux/ip.h> #include <net/ipv6.h> #include <net/icmp.h> #include <net/udp.h> #include <net/tcp.h> #include <net/route.h> #include <linux/netfilter.h> #include <linux/netfilter_bridge.h> #include <linux/netfilter_ipv6.h> #include <linux/netfilter/xt_LOG.h> #include <net/netfilter/nf_log.h> static const struct nf_loginfo default_loginfo = { .type = NF_LOG_TYPE_LOG, .u = { .log = { .level = LOGLEVEL_NOTICE, .logflags = NF_LOG_DEFAULT_MASK, }, }, }; struct arppayload { unsigned char mac_src[ETH_ALEN]; unsigned char ip_src[4]; unsigned char mac_dst[ETH_ALEN]; unsigned char ip_dst[4]; }; /* Guard against containers flooding syslog. */ static bool nf_log_allowed(const struct net *net) { return net_eq(net, &init_net) || sysctl_nf_log_all_netns; } static void nf_log_dump_vlan(struct nf_log_buf *m, const struct sk_buff *skb) { u16 vid; if (!skb_vlan_tag_present(skb)) return; vid = skb_vlan_tag_get(skb); nf_log_buf_add(m, "VPROTO=%04x VID=%u ", ntohs(skb->vlan_proto), vid); } static void noinline_for_stack dump_arp_packet(struct nf_log_buf *m, const struct nf_loginfo *info, const struct sk_buff *skb, unsigned int nhoff) { const struct arppayload *ap; struct arppayload _arpp; const struct arphdr *ah; unsigned int logflags; struct arphdr _arph; ah = skb_header_pointer(skb, nhoff, sizeof(_arph), &_arph); if (!ah) { nf_log_buf_add(m, "TRUNCATED"); return; } if (info->type == NF_LOG_TYPE_LOG) logflags = info->u.log.logflags; else logflags = NF_LOG_DEFAULT_MASK; if (logflags & NF_LOG_MACDECODE) { nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM ", eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest); nf_log_dump_vlan(m, skb); nf_log_buf_add(m, "MACPROTO=%04x ", ntohs(eth_hdr(skb)->h_proto)); } nf_log_buf_add(m, "ARP HTYPE=%d PTYPE=0x%04x OPCODE=%d", ntohs(ah->ar_hrd), ntohs(ah->ar_pro), ntohs(ah->ar_op)); /* If it's for Ethernet and the lengths are OK, then log the ARP * payload. */ if (ah->ar_hrd != htons(ARPHRD_ETHER) || ah->ar_hln != ETH_ALEN || ah->ar_pln != sizeof(__be32)) return; ap = skb_header_pointer(skb, nhoff + sizeof(_arph), sizeof(_arpp), &_arpp); if (!ap) { nf_log_buf_add(m, " INCOMPLETE [%zu bytes]", skb->len - sizeof(_arph)); return; } nf_log_buf_add(m, " MACSRC=%pM IPSRC=%pI4 MACDST=%pM IPDST=%pI4", ap->mac_src, ap->ip_src, ap->mac_dst, ap->ip_dst); } static void nf_log_dump_packet_common(struct nf_log_buf *m, u8 pf, unsigned int hooknum, const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const struct nf_loginfo *loginfo, const char *prefix, struct net *net) { const struct net_device *physoutdev __maybe_unused; const struct net_device *physindev __maybe_unused; nf_log_buf_add(m, KERN_SOH "%c%sIN=%s OUT=%s ", '0' + loginfo->u.log.level, prefix, in ? in->name : "", out ? out->name : ""); #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) physindev = nf_bridge_get_physindev(skb, net); if (physindev && in != physindev) nf_log_buf_add(m, "PHYSIN=%s ", physindev->name); physoutdev = nf_bridge_get_physoutdev(skb); if (physoutdev && out != physoutdev) nf_log_buf_add(m, "PHYSOUT=%s ", physoutdev->name); #endif } static void nf_log_arp_packet(struct net *net, u_int8_t pf, unsigned int hooknum, const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const struct nf_loginfo *loginfo, const char *prefix) { struct nf_log_buf *m; if (!nf_log_allowed(net)) return; m = nf_log_buf_open(); if (!loginfo) loginfo = &default_loginfo; nf_log_dump_packet_common(m, pf, hooknum, skb, in, out, loginfo, prefix, net); dump_arp_packet(m, loginfo, skb, skb_network_offset(skb)); nf_log_buf_close(m); } static struct nf_logger nf_arp_logger __read_mostly = { .name = "nf_log_arp", .type = NF_LOG_TYPE_LOG, .logfn = nf_log_arp_packet, .me = THIS_MODULE, }; static void nf_log_dump_sk_uid_gid(struct net *net, struct nf_log_buf *m, struct sock *sk) { if (!sk || !sk_fullsock(sk) || !net_eq(net, sock_net(sk))) return; read_lock_bh(&sk->sk_callback_lock); if (sk->sk_socket && sk->sk_socket->file) { const struct cred *cred = sk->sk_socket->file->f_cred; nf_log_buf_add(m, "UID=%u GID=%u ", from_kuid_munged(&init_user_ns, cred->fsuid), from_kgid_munged(&init_user_ns, cred->fsgid)); } read_unlock_bh(&sk->sk_callback_lock); } static noinline_for_stack int nf_log_dump_tcp_header(struct nf_log_buf *m, const struct sk_buff *skb, u8 proto, int fragment, unsigned int offset, unsigned int logflags) { struct tcphdr _tcph; const struct tcphdr *th; /* Max length: 10 "PROTO=TCP " */ nf_log_buf_add(m, "PROTO=TCP "); if (fragment) return 0; /* Max length: 25 "INCOMPLETE [65535 bytes] " */ th = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph); if (!th) { nf_log_buf_add(m, "INCOMPLETE [%u bytes] ", skb->len - offset); return 1; } /* Max length: 20 "SPT=65535 DPT=65535 " */ nf_log_buf_add(m, "SPT=%u DPT=%u ", ntohs(th->source), ntohs(th->dest)); /* Max length: 30 "SEQ=4294967295 ACK=4294967295 " */ if (logflags & NF_LOG_TCPSEQ) { nf_log_buf_add(m, "SEQ=%u ACK=%u ", ntohl(th->seq), ntohl(th->ack_seq)); } /* Max length: 13 "WINDOW=65535 " */ nf_log_buf_add(m, "WINDOW=%u ", ntohs(th->window)); /* Max length: 9 "RES=0x3C " */ nf_log_buf_add(m, "RES=0x%02x ", (u_int8_t)(ntohl(tcp_flag_word(th) & TCP_RESERVED_BITS) >> 22)); /* Max length: 32 "CWR ECE URG ACK PSH RST SYN FIN " */ if (th->cwr) nf_log_buf_add(m, "CWR "); if (th->ece) nf_log_buf_add(m, "ECE "); if (th->urg) nf_log_buf_add(m, "URG "); if (th->ack) nf_log_buf_add(m, "ACK "); if (th->psh) nf_log_buf_add(m, "PSH "); if (th->rst) nf_log_buf_add(m, "RST "); if (th->syn) nf_log_buf_add(m, "SYN "); if (th->fin) nf_log_buf_add(m, "FIN "); /* Max length: 11 "URGP=65535 " */ nf_log_buf_add(m, "URGP=%u ", ntohs(th->urg_ptr)); if ((logflags & NF_LOG_TCPOPT) && th->doff * 4 > sizeof(struct tcphdr)) { unsigned int optsize = th->doff * 4 - sizeof(struct tcphdr); u8 _opt[60 - sizeof(struct tcphdr)]; unsigned int i; const u8 *op; op = skb_header_pointer(skb, offset + sizeof(struct tcphdr), optsize, _opt); if (!op) { nf_log_buf_add(m, "OPT (TRUNCATED)"); return 1; } /* Max length: 127 "OPT (" 15*4*2chars ") " */ nf_log_buf_add(m, "OPT ("); for (i = 0; i < optsize; i++) nf_log_buf_add(m, "%02X", op[i]); nf_log_buf_add(m, ") "); } return 0; } static noinline_for_stack int nf_log_dump_udp_header(struct nf_log_buf *m, const struct sk_buff *skb, u8 proto, int fragment, unsigned int offset) { struct udphdr _udph; const struct udphdr *uh; if (proto == IPPROTO_UDP) /* Max length: 10 "PROTO=UDP " */ nf_log_buf_add(m, "PROTO=UDP "); else /* Max length: 14 "PROTO=UDPLITE " */ nf_log_buf_add(m, "PROTO=UDPLITE "); if (fragment) goto out; /* Max length: 25 "INCOMPLETE [65535 bytes] " */ uh = skb_header_pointer(skb, offset, sizeof(_udph), &_udph); if (!uh) { nf_log_buf_add(m, "INCOMPLETE [%u bytes] ", skb->len - offset); return 1; } /* Max length: 20 "SPT=65535 DPT=65535 " */ nf_log_buf_add(m, "SPT=%u DPT=%u LEN=%u ", ntohs(uh->source), ntohs(uh->dest), ntohs(uh->len)); out: return 0; } /* One level of recursion won't kill us */ static noinline_for_stack void dump_ipv4_packet(struct net *net, struct nf_log_buf *m, const struct nf_loginfo *info, const struct sk_buff *skb, unsigned int iphoff) { const struct iphdr *ih; unsigned int logflags; struct iphdr _iph; if (info->type == NF_LOG_TYPE_LOG) logflags = info->u.log.logflags; else logflags = NF_LOG_DEFAULT_MASK; ih = skb_header_pointer(skb, iphoff, sizeof(_iph), &_iph); if (!ih) { nf_log_buf_add(m, "TRUNCATED"); return; } /* Important fields: * TOS, len, DF/MF, fragment offset, TTL, src, dst, options. * Max length: 40 "SRC=255.255.255.255 DST=255.255.255.255 " */ nf_log_buf_add(m, "SRC=%pI4 DST=%pI4 ", &ih->saddr, &ih->daddr); /* Max length: 46 "LEN=65535 TOS=0xFF PREC=0xFF TTL=255 ID=65535 " */ nf_log_buf_add(m, "LEN=%u TOS=0x%02X PREC=0x%02X TTL=%u ID=%u ", iph_totlen(skb, ih), ih->tos & IPTOS_TOS_MASK, ih->tos & IPTOS_PREC_MASK, ih->ttl, ntohs(ih->id)); /* Max length: 6 "CE DF MF " */ if (ntohs(ih->frag_off) & IP_CE) nf_log_buf_add(m, "CE "); if (ntohs(ih->frag_off) & IP_DF) nf_log_buf_add(m, "DF "); if (ntohs(ih->frag_off) & IP_MF) nf_log_buf_add(m, "MF "); /* Max length: 11 "FRAG:65535 " */ if (ntohs(ih->frag_off) & IP_OFFSET) nf_log_buf_add(m, "FRAG:%u ", ntohs(ih->frag_off) & IP_OFFSET); if ((logflags & NF_LOG_IPOPT) && ih->ihl * 4 > sizeof(struct iphdr)) { unsigned char _opt[4 * 15 - sizeof(struct iphdr)]; const unsigned char *op; unsigned int i, optsize; optsize = ih->ihl * 4 - sizeof(struct iphdr); op = skb_header_pointer(skb, iphoff + sizeof(_iph), optsize, _opt); if (!op) { nf_log_buf_add(m, "TRUNCATED"); return; } /* Max length: 127 "OPT (" 15*4*2chars ") " */ nf_log_buf_add(m, "OPT ("); for (i = 0; i < optsize; i++) nf_log_buf_add(m, "%02X", op[i]); nf_log_buf_add(m, ") "); } switch (ih->protocol) { case IPPROTO_TCP: if (nf_log_dump_tcp_header(m, skb, ih->protocol, ntohs(ih->frag_off) & IP_OFFSET, iphoff + ih->ihl * 4, logflags)) return; break; case IPPROTO_UDP: case IPPROTO_UDPLITE: if (nf_log_dump_udp_header(m, skb, ih->protocol, ntohs(ih->frag_off) & IP_OFFSET, iphoff + ih->ihl * 4)) return; break; case IPPROTO_ICMP: { static const size_t required_len[NR_ICMP_TYPES + 1] = { [ICMP_ECHOREPLY] = 4, [ICMP_DEST_UNREACH] = 8 + sizeof(struct iphdr), [ICMP_SOURCE_QUENCH] = 8 + sizeof(struct iphdr), [ICMP_REDIRECT] = 8 + sizeof(struct iphdr), [ICMP_ECHO] = 4, [ICMP_TIME_EXCEEDED] = 8 + sizeof(struct iphdr), [ICMP_PARAMETERPROB] = 8 + sizeof(struct iphdr), [ICMP_TIMESTAMP] = 20, [ICMP_TIMESTAMPREPLY] = 20, [ICMP_ADDRESS] = 12, [ICMP_ADDRESSREPLY] = 12 }; const struct icmphdr *ich; struct icmphdr _icmph; /* Max length: 11 "PROTO=ICMP " */ nf_log_buf_add(m, "PROTO=ICMP "); if (ntohs(ih->frag_off) & IP_OFFSET) break; /* Max length: 25 "INCOMPLETE [65535 bytes] " */ ich = skb_header_pointer(skb, iphoff + ih->ihl * 4, sizeof(_icmph), &_icmph); if (!ich) { nf_log_buf_add(m, "INCOMPLETE [%u bytes] ", skb->len - iphoff - ih->ihl * 4); break; } /* Max length: 18 "TYPE=255 CODE=255 " */ nf_log_buf_add(m, "TYPE=%u CODE=%u ", ich->type, ich->code); /* Max length: 25 "INCOMPLETE [65535 bytes] " */ if (ich->type <= NR_ICMP_TYPES && required_len[ich->type] && skb->len - iphoff - ih->ihl * 4 < required_len[ich->type]) { nf_log_buf_add(m, "INCOMPLETE [%u bytes] ", skb->len - iphoff - ih->ihl * 4); break; } switch (ich->type) { case ICMP_ECHOREPLY: case ICMP_ECHO: /* Max length: 19 "ID=65535 SEQ=65535 " */ nf_log_buf_add(m, "ID=%u SEQ=%u ", ntohs(ich->un.echo.id), ntohs(ich->un.echo.sequence)); break; case ICMP_PARAMETERPROB: /* Max length: 14 "PARAMETER=255 " */ nf_log_buf_add(m, "PARAMETER=%u ", ntohl(ich->un.gateway) >> 24); break; case ICMP_REDIRECT: /* Max length: 24 "GATEWAY=255.255.255.255 " */ nf_log_buf_add(m, "GATEWAY=%pI4 ", &ich->un.gateway); fallthrough; case ICMP_DEST_UNREACH: case ICMP_SOURCE_QUENCH: case ICMP_TIME_EXCEEDED: /* Max length: 3+maxlen */ if (!iphoff) { /* Only recurse once. */ nf_log_buf_add(m, "["); dump_ipv4_packet(net, m, info, skb, iphoff + ih->ihl * 4 + sizeof(_icmph)); nf_log_buf_add(m, "] "); } /* Max length: 10 "MTU=65535 " */ if (ich->type == ICMP_DEST_UNREACH && ich->code == ICMP_FRAG_NEEDED) { nf_log_buf_add(m, "MTU=%u ", ntohs(ich->un.frag.mtu)); } } break; } /* Max Length */ case IPPROTO_AH: { const struct ip_auth_hdr *ah; struct ip_auth_hdr _ahdr; if (ntohs(ih->frag_off) & IP_OFFSET) break; /* Max length: 9 "PROTO=AH " */ nf_log_buf_add(m, "PROTO=AH "); /* Max length: 25 "INCOMPLETE [65535 bytes] " */ ah = skb_header_pointer(skb, iphoff + ih->ihl * 4, sizeof(_ahdr), &_ahdr); if (!ah) { nf_log_buf_add(m, "INCOMPLETE [%u bytes] ", skb->len - iphoff - ih->ihl * 4); break; } /* Length: 15 "SPI=0xF1234567 " */ nf_log_buf_add(m, "SPI=0x%x ", ntohl(ah->spi)); break; } case IPPROTO_ESP: { const struct ip_esp_hdr *eh; struct ip_esp_hdr _esph; /* Max length: 10 "PROTO=ESP " */ nf_log_buf_add(m, "PROTO=ESP "); if (ntohs(ih->frag_off) & IP_OFFSET) break; /* Max length: 25 "INCOMPLETE [65535 bytes] " */ eh = skb_header_pointer(skb, iphoff + ih->ihl * 4, sizeof(_esph), &_esph); if (!eh) { nf_log_buf_add(m, "INCOMPLETE [%u bytes] ", skb->len - iphoff - ih->ihl * 4); break; } /* Length: 15 "SPI=0xF1234567 " */ nf_log_buf_add(m, "SPI=0x%x ", ntohl(eh->spi)); break; } /* Max length: 10 "PROTO 255 " */ default: nf_log_buf_add(m, "PROTO=%u ", ih->protocol); } /* Max length: 15 "UID=4294967295 " */ if ((logflags & NF_LOG_UID) && !iphoff) nf_log_dump_sk_uid_gid(net, m, skb->sk); /* Max length: 16 "MARK=0xFFFFFFFF " */ if (!iphoff && skb->mark) nf_log_buf_add(m, "MARK=0x%x ", skb->mark); /* Proto Max log string length */ /* IP: 40+46+6+11+127 = 230 */ /* TCP: 10+max(25,20+30+13+9+32+11+127) = 252 */ /* UDP: 10+max(25,20) = 35 */ /* UDPLITE: 14+max(25,20) = 39 */ /* ICMP: 11+max(25, 18+25+max(19,14,24+3+n+10,3+n+10)) = 91+n */ /* ESP: 10+max(25)+15 = 50 */ /* AH: 9+max(25)+15 = 49 */ /* unknown: 10 */ /* (ICMP allows recursion one level deep) */ /* maxlen = IP + ICMP + IP + max(TCP,UDP,ICMP,unknown) */ /* maxlen = 230+ 91 + 230 + 252 = 803 */ } static noinline_for_stack void dump_ipv6_packet(struct net *net, struct nf_log_buf *m, const struct nf_loginfo *info, const struct sk_buff *skb, unsigned int ip6hoff, int recurse) { const struct ipv6hdr *ih; unsigned int hdrlen = 0; unsigned int logflags; struct ipv6hdr _ip6h; unsigned int ptr; u8 currenthdr; int fragment; if (info->type == NF_LOG_TYPE_LOG) logflags = info->u.log.logflags; else logflags = NF_LOG_DEFAULT_MASK; ih = skb_header_pointer(skb, ip6hoff, sizeof(_ip6h), &_ip6h); if (!ih) { nf_log_buf_add(m, "TRUNCATED"); return; } /* Max length: 88 "SRC=0000.0000.0000.0000.0000.0000.0000.0000 DST=0000.0000.0000.0000.0000.0000.0000.0000 " */ nf_log_buf_add(m, "SRC=%pI6 DST=%pI6 ", &ih->saddr, &ih->daddr); /* Max length: 44 "LEN=65535 TC=255 HOPLIMIT=255 FLOWLBL=FFFFF " */ nf_log_buf_add(m, "LEN=%zu TC=%u HOPLIMIT=%u FLOWLBL=%u ", ntohs(ih->payload_len) + sizeof(struct ipv6hdr), (ntohl(*(__be32 *)ih) & 0x0ff00000) >> 20, ih->hop_limit, (ntohl(*(__be32 *)ih) & 0x000fffff)); fragment = 0; ptr = ip6hoff + sizeof(struct ipv6hdr); currenthdr = ih->nexthdr; while (currenthdr != NEXTHDR_NONE && nf_ip6_ext_hdr(currenthdr)) { struct ipv6_opt_hdr _hdr; const struct ipv6_opt_hdr *hp; hp = skb_header_pointer(skb, ptr, sizeof(_hdr), &_hdr); if (!hp) { nf_log_buf_add(m, "TRUNCATED"); return; } /* Max length: 48 "OPT (...) " */ if (logflags & NF_LOG_IPOPT) nf_log_buf_add(m, "OPT ( "); switch (currenthdr) { case IPPROTO_FRAGMENT: { struct frag_hdr _fhdr; const struct frag_hdr *fh; nf_log_buf_add(m, "FRAG:"); fh = skb_header_pointer(skb, ptr, sizeof(_fhdr), &_fhdr); if (!fh) { nf_log_buf_add(m, "TRUNCATED "); return; } /* Max length: 6 "65535 " */ nf_log_buf_add(m, "%u ", ntohs(fh->frag_off) & 0xFFF8); /* Max length: 11 "INCOMPLETE " */ if (fh->frag_off & htons(0x0001)) nf_log_buf_add(m, "INCOMPLETE "); nf_log_buf_add(m, "ID:%08x ", ntohl(fh->identification)); if (ntohs(fh->frag_off) & 0xFFF8) fragment = 1; hdrlen = 8; break; } case IPPROTO_DSTOPTS: case IPPROTO_ROUTING: case IPPROTO_HOPOPTS: if (fragment) { if (logflags & NF_LOG_IPOPT) nf_log_buf_add(m, ")"); return; } hdrlen = ipv6_optlen(hp); break; /* Max Length */ case IPPROTO_AH: if (logflags & NF_LOG_IPOPT) { struct ip_auth_hdr _ahdr; const struct ip_auth_hdr *ah; /* Max length: 3 "AH " */ nf_log_buf_add(m, "AH "); if (fragment) { nf_log_buf_add(m, ")"); return; } ah = skb_header_pointer(skb, ptr, sizeof(_ahdr), &_ahdr); if (!ah) { /* Max length: 26 "INCOMPLETE [65535 bytes] )" */ nf_log_buf_add(m, "INCOMPLETE [%u bytes] )", skb->len - ptr); return; } /* Length: 15 "SPI=0xF1234567 */ nf_log_buf_add(m, "SPI=0x%x ", ntohl(ah->spi)); } hdrlen = ipv6_authlen(hp); break; case IPPROTO_ESP: if (logflags & NF_LOG_IPOPT) { struct ip_esp_hdr _esph; const struct ip_esp_hdr *eh; /* Max length: 4 "ESP " */ nf_log_buf_add(m, "ESP "); if (fragment) { nf_log_buf_add(m, ")"); return; } /* Max length: 26 "INCOMPLETE [65535 bytes] )" */ eh = skb_header_pointer(skb, ptr, sizeof(_esph), &_esph); if (!eh) { nf_log_buf_add(m, "INCOMPLETE [%u bytes] )", skb->len - ptr); return; } /* Length: 16 "SPI=0xF1234567 )" */ nf_log_buf_add(m, "SPI=0x%x )", ntohl(eh->spi)); } return; default: /* Max length: 20 "Unknown Ext Hdr 255" */ nf_log_buf_add(m, "Unknown Ext Hdr %u", currenthdr); return; } if (logflags & NF_LOG_IPOPT) nf_log_buf_add(m, ") "); currenthdr = hp->nexthdr; ptr += hdrlen; } switch (currenthdr) { case IPPROTO_TCP: if (nf_log_dump_tcp_header(m, skb, currenthdr, fragment, ptr, logflags)) return; break; case IPPROTO_UDP: case IPPROTO_UDPLITE: if (nf_log_dump_udp_header(m, skb, currenthdr, fragment, ptr)) return; break; case IPPROTO_ICMPV6: { struct icmp6hdr _icmp6h; const struct icmp6hdr *ic; /* Max length: 13 "PROTO=ICMPv6 " */ nf_log_buf_add(m, "PROTO=ICMPv6 "); if (fragment) break; /* Max length: 25 "INCOMPLETE [65535 bytes] " */ ic = skb_header_pointer(skb, ptr, sizeof(_icmp6h), &_icmp6h); if (!ic) { nf_log_buf_add(m, "INCOMPLETE [%u bytes] ", skb->len - ptr); return; } /* Max length: 18 "TYPE=255 CODE=255 " */ nf_log_buf_add(m, "TYPE=%u CODE=%u ", ic->icmp6_type, ic->icmp6_code); switch (ic->icmp6_type) { case ICMPV6_ECHO_REQUEST: case ICMPV6_ECHO_REPLY: /* Max length: 19 "ID=65535 SEQ=65535 " */ nf_log_buf_add(m, "ID=%u SEQ=%u ", ntohs(ic->icmp6_identifier), ntohs(ic->icmp6_sequence)); break; case ICMPV6_MGM_QUERY: case ICMPV6_MGM_REPORT: case ICMPV6_MGM_REDUCTION: break; case ICMPV6_PARAMPROB: /* Max length: 17 "POINTER=ffffffff " */ nf_log_buf_add(m, "POINTER=%08x ", ntohl(ic->icmp6_pointer)); fallthrough; case ICMPV6_DEST_UNREACH: case ICMPV6_PKT_TOOBIG: case ICMPV6_TIME_EXCEED: /* Max length: 3+maxlen */ if (recurse) { nf_log_buf_add(m, "["); dump_ipv6_packet(net, m, info, skb, ptr + sizeof(_icmp6h), 0); nf_log_buf_add(m, "] "); } /* Max length: 10 "MTU=65535 " */ if (ic->icmp6_type == ICMPV6_PKT_TOOBIG) { nf_log_buf_add(m, "MTU=%u ", ntohl(ic->icmp6_mtu)); } } break; } /* Max length: 10 "PROTO=255 " */ default: nf_log_buf_add(m, "PROTO=%u ", currenthdr); } /* Max length: 15 "UID=4294967295 " */ if ((logflags & NF_LOG_UID) && recurse) nf_log_dump_sk_uid_gid(net, m, skb->sk); /* Max length: 16 "MARK=0xFFFFFFFF " */ if (recurse && skb->mark) nf_log_buf_add(m, "MARK=0x%x ", skb->mark); } static void dump_mac_header(struct nf_log_buf *m, const struct nf_loginfo *info, const struct sk_buff *skb) { struct net_device *dev = skb->dev; unsigned int logflags = 0; if (info->type == NF_LOG_TYPE_LOG) logflags = info->u.log.logflags; if (!(logflags & NF_LOG_MACDECODE)) goto fallback; switch (dev->type) { case ARPHRD_ETHER: nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM ", eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest); nf_log_dump_vlan(m, skb); nf_log_buf_add(m, "MACPROTO=%04x ", ntohs(eth_hdr(skb)->h_proto)); return; default: break; } fallback: nf_log_buf_add(m, "MAC="); if (dev->hard_header_len && skb->mac_header != skb->network_header) { const unsigned char *p = skb_mac_header(skb); unsigned int i; if (dev->type == ARPHRD_SIT) { p -= ETH_HLEN; if (p < skb->head) p = NULL; } if (p) { nf_log_buf_add(m, "%02x", *p++); for (i = 1; i < dev->hard_header_len; i++) nf_log_buf_add(m, ":%02x", *p++); } if (dev->type == ARPHRD_SIT) { const struct iphdr *iph = (struct iphdr *)skb_mac_header(skb); nf_log_buf_add(m, " TUNNEL=%pI4->%pI4", &iph->saddr, &iph->daddr); } } nf_log_buf_add(m, " "); } static void nf_log_ip_packet(struct net *net, u_int8_t pf, unsigned int hooknum, const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const struct nf_loginfo *loginfo, const char *prefix) { struct nf_log_buf *m; if (!nf_log_allowed(net)) return; m = nf_log_buf_open(); if (!loginfo) loginfo = &default_loginfo; nf_log_dump_packet_common(m, pf, hooknum, skb, in, out, loginfo, prefix, net); if (in) dump_mac_header(m, loginfo, skb); dump_ipv4_packet(net, m, loginfo, skb, skb_network_offset(skb)); nf_log_buf_close(m); } static struct nf_logger nf_ip_logger __read_mostly = { .name = "nf_log_ipv4", .type = NF_LOG_TYPE_LOG, .logfn = nf_log_ip_packet, .me = THIS_MODULE, }; static void nf_log_ip6_packet(struct net *net, u_int8_t pf, unsigned int hooknum, const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const struct nf_loginfo *loginfo, const char *prefix) { struct nf_log_buf *m; if (!nf_log_allowed(net)) return; m = nf_log_buf_open(); if (!loginfo) loginfo = &default_loginfo; nf_log_dump_packet_common(m, pf, hooknum, skb, in, out, loginfo, prefix, net); if (in) dump_mac_header(m, loginfo, skb); dump_ipv6_packet(net, m, loginfo, skb, skb_network_offset(skb), 1); nf_log_buf_close(m); } static struct nf_logger nf_ip6_logger __read_mostly = { .name = "nf_log_ipv6", .type = NF_LOG_TYPE_LOG, .logfn = nf_log_ip6_packet, .me = THIS_MODULE, }; static void nf_log_unknown_packet(struct net *net, u_int8_t pf, unsigned int hooknum, const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const struct nf_loginfo *loginfo, const char *prefix) { struct nf_log_buf *m; if (!nf_log_allowed(net)) return; m = nf_log_buf_open(); if (!loginfo) loginfo = &default_loginfo; nf_log_dump_packet_common(m, pf, hooknum, skb, in, out, loginfo, prefix, net); dump_mac_header(m, loginfo, skb); nf_log_buf_close(m); } static void nf_log_netdev_packet(struct net *net, u_int8_t pf, unsigned int hooknum, const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const struct nf_loginfo *loginfo, const char *prefix) { switch (skb->protocol) { case htons(ETH_P_IP): nf_log_ip_packet(net, pf, hooknum, skb, in, out, loginfo, prefix); break; case htons(ETH_P_IPV6): nf_log_ip6_packet(net, pf, hooknum, skb, in, out, loginfo, prefix); break; case htons(ETH_P_ARP): case htons(ETH_P_RARP): nf_log_arp_packet(net, pf, hooknum, skb, in, out, loginfo, prefix); break; default: nf_log_unknown_packet(net, pf, hooknum, skb, in, out, loginfo, prefix); break; } } static struct nf_logger nf_netdev_logger __read_mostly = { .name = "nf_log_netdev", .type = NF_LOG_TYPE_LOG, .logfn = nf_log_netdev_packet, .me = THIS_MODULE, }; static struct nf_logger nf_bridge_logger __read_mostly = { .name = "nf_log_bridge", .type = NF_LOG_TYPE_LOG, .logfn = nf_log_netdev_packet, .me = THIS_MODULE, }; static int __net_init nf_log_syslog_net_init(struct net *net) { int ret = nf_log_set(net, NFPROTO_IPV4, &nf_ip_logger); if (ret) return ret; ret = nf_log_set(net, NFPROTO_ARP, &nf_arp_logger); if (ret) goto err1; ret = nf_log_set(net, NFPROTO_IPV6, &nf_ip6_logger); if (ret) goto err2; ret = nf_log_set(net, NFPROTO_NETDEV, &nf_netdev_logger); if (ret) goto err3; ret = nf_log_set(net, NFPROTO_BRIDGE, &nf_bridge_logger); if (ret) goto err4; return 0; err4: nf_log_unset(net, &nf_netdev_logger); err3: nf_log_unset(net, &nf_ip6_logger); err2: nf_log_unset(net, &nf_arp_logger); err1: nf_log_unset(net, &nf_ip_logger); return ret; } static void __net_exit nf_log_syslog_net_exit(struct net *net) { nf_log_unset(net, &nf_ip_logger); nf_log_unset(net, &nf_arp_logger); nf_log_unset(net, &nf_ip6_logger); nf_log_unset(net, &nf_netdev_logger); nf_log_unset(net, &nf_bridge_logger); } static struct pernet_operations nf_log_syslog_net_ops = { .init = nf_log_syslog_net_init, .exit = nf_log_syslog_net_exit, }; static int __init nf_log_syslog_init(void) { int ret; ret = register_pernet_subsys(&nf_log_syslog_net_ops); if (ret < 0) return ret; ret = nf_log_register(NFPROTO_IPV4, &nf_ip_logger); if (ret < 0) goto err1; ret = nf_log_register(NFPROTO_ARP, &nf_arp_logger); if (ret < 0) goto err2; ret = nf_log_register(NFPROTO_IPV6, &nf_ip6_logger); if (ret < 0) goto err3; ret = nf_log_register(NFPROTO_NETDEV, &nf_netdev_logger); if (ret < 0) goto err4; ret = nf_log_register(NFPROTO_BRIDGE, &nf_bridge_logger); if (ret < 0) goto err5; return 0; err5: nf_log_unregister(&nf_netdev_logger); err4: nf_log_unregister(&nf_ip6_logger); err3: nf_log_unregister(&nf_arp_logger); err2: nf_log_unregister(&nf_ip_logger); err1: pr_err("failed to register logger\n"); unregister_pernet_subsys(&nf_log_syslog_net_ops); return ret; } static void __exit nf_log_syslog_exit(void) { unregister_pernet_subsys(&nf_log_syslog_net_ops); nf_log_unregister(&nf_ip_logger); nf_log_unregister(&nf_arp_logger); nf_log_unregister(&nf_ip6_logger); nf_log_unregister(&nf_netdev_logger); nf_log_unregister(&nf_bridge_logger); } module_init(nf_log_syslog_init); module_exit(nf_log_syslog_exit); MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); MODULE_DESCRIPTION("Netfilter syslog packet logging"); MODULE_LICENSE("GPL"); MODULE_ALIAS("nf_log_arp"); MODULE_ALIAS("nf_log_bridge"); MODULE_ALIAS("nf_log_ipv4"); MODULE_ALIAS("nf_log_ipv6"); MODULE_ALIAS("nf_log_netdev"); MODULE_ALIAS_NF_LOGGER(AF_BRIDGE, 0); MODULE_ALIAS_NF_LOGGER(AF_INET, 0); MODULE_ALIAS_NF_LOGGER(3, 0); MODULE_ALIAS_NF_LOGGER(5, 0); /* NFPROTO_NETDEV */ MODULE_ALIAS_NF_LOGGER(AF_INET6, 0);
7 7 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 // SPDX-License-Identifier: GPL-2.0+ /* * Driver for Realtek RTS51xx USB card reader * * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved. * * Author: * wwang (wei_wang@realsil.com.cn) * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China */ #include <linux/module.h> #include <linux/blkdev.h> #include <linux/kthread.h> #include <linux/sched.h> #include <linux/kernel.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <linux/cdrom.h> #include <linux/usb.h> #include <linux/slab.h> #include <linux/usb_usual.h> #include "usb.h" #include "transport.h" #include "protocol.h" #include "debug.h" #include "scsiglue.h" #define DRV_NAME "ums-realtek" MODULE_DESCRIPTION("Driver for Realtek USB Card Reader"); MODULE_AUTHOR("wwang <wei_wang@realsil.com.cn>"); MODULE_LICENSE("GPL"); MODULE_IMPORT_NS("USB_STORAGE"); static int auto_delink_en = 1; module_param(auto_delink_en, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(auto_delink_en, "auto delink mode (0=firmware, 1=software [default])"); #ifdef CONFIG_REALTEK_AUTOPM static int ss_en = 1; module_param(ss_en, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(ss_en, "enable selective suspend"); static int ss_delay = 50; module_param(ss_delay, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(ss_delay, "seconds to delay before entering selective suspend"); enum RTS51X_STAT { RTS51X_STAT_INIT, RTS51X_STAT_IDLE, RTS51X_STAT_RUN, RTS51X_STAT_SS }; #define POLLING_INTERVAL 50 #define rts51x_set_stat(chip, stat) \ ((chip)->state = (enum RTS51X_STAT)(stat)) #define rts51x_get_stat(chip) ((chip)->state) #define SET_LUN_READY(chip, lun) ((chip)->lun_ready |= ((u8)1 << (lun))) #define CLR_LUN_READY(chip, lun) ((chip)->lun_ready &= ~((u8)1 << (lun))) #define TST_LUN_READY(chip, lun) ((chip)->lun_ready & ((u8)1 << (lun))) #endif struct rts51x_status { u16 vid; u16 pid; u8 cur_lun; u8 card_type; u8 total_lun; u16 fw_ver; u8 phy_exist; u8 multi_flag; u8 multi_card; u8 log_exist; union { u8 detailed_type1; u8 detailed_type2; } detailed_type; u8 function[2]; }; struct rts51x_chip { u16 vendor_id; u16 product_id; char max_lun; struct rts51x_status *status; int status_len; u32 flag; struct us_data *us; #ifdef CONFIG_REALTEK_AUTOPM struct timer_list rts51x_suspend_timer; unsigned long timer_expires; int pwr_state; u8 lun_ready; enum RTS51X_STAT state; int support_auto_delink; #endif /* used to back up the protocol chosen in probe1 phase */ proto_cmnd proto_handler_backup; }; /* flag definition */ #define FLIDX_AUTO_DELINK 0x01 #define SCSI_LUN(srb) ((srb)->device->lun) /* Bit Operation */ #define SET_BIT(data, idx) ((data) |= 1 << (idx)) #define CLR_BIT(data, idx) ((data) &= ~(1 << (idx))) #define CHK_BIT(data, idx) ((data) & (1 << (idx))) #define SET_AUTO_DELINK(chip) ((chip)->flag |= FLIDX_AUTO_DELINK) #define CLR_AUTO_DELINK(chip) ((chip)->flag &= ~FLIDX_AUTO_DELINK) #define CHK_AUTO_DELINK(chip) ((chip)->flag & FLIDX_AUTO_DELINK) #define RTS51X_GET_VID(chip) ((chip)->vendor_id) #define RTS51X_GET_PID(chip) ((chip)->product_id) #define VENDOR_ID(chip) ((chip)->status[0].vid) #define PRODUCT_ID(chip) ((chip)->status[0].pid) #define FW_VERSION(chip) ((chip)->status[0].fw_ver) #define STATUS_LEN(chip) ((chip)->status_len) #define STATUS_SUCCESS 0 #define STATUS_FAIL 1 /* Check card reader function */ #define SUPPORT_DETAILED_TYPE1(chip) \ CHK_BIT((chip)->status[0].function[0], 1) #define SUPPORT_OT(chip) \ CHK_BIT((chip)->status[0].function[0], 2) #define SUPPORT_OC(chip) \ CHK_BIT((chip)->status[0].function[0], 3) #define SUPPORT_AUTO_DELINK(chip) \ CHK_BIT((chip)->status[0].function[0], 4) #define SUPPORT_SDIO(chip) \ CHK_BIT((chip)->status[0].function[1], 0) #define SUPPORT_DETAILED_TYPE2(chip) \ CHK_BIT((chip)->status[0].function[1], 1) #define CHECK_PID(chip, pid) (RTS51X_GET_PID(chip) == (pid)) #define CHECK_FW_VER(chip, fw_ver) (FW_VERSION(chip) == (fw_ver)) #define CHECK_ID(chip, pid, fw_ver) \ (CHECK_PID((chip), (pid)) && CHECK_FW_VER((chip), (fw_ver))) static int init_realtek_cr(struct us_data *us); /* * The table of devices */ #define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \ vendorName, productName, useProtocol, useTransport, \ initFunction, flags) \ {\ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \ .driver_info = (flags) \ } static const struct usb_device_id realtek_cr_ids[] = { # include "unusual_realtek.h" {} /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, realtek_cr_ids); #undef UNUSUAL_DEV /* * The flags table */ #define UNUSUAL_DEV(idVendor, idProduct, bcdDeviceMin, bcdDeviceMax, \ vendor_name, product_name, use_protocol, use_transport, \ init_function, Flags) \ { \ .vendorName = vendor_name, \ .productName = product_name, \ .useProtocol = use_protocol, \ .useTransport = use_transport, \ .initFunction = init_function, \ } static struct us_unusual_dev realtek_cr_unusual_dev_list[] = { # include "unusual_realtek.h" {} /* Terminating entry */ }; #undef UNUSUAL_DEV static int rts51x_bulk_transport(struct us_data *us, u8 lun, u8 *cmd, int cmd_len, u8 *buf, int buf_len, enum dma_data_direction dir, int *act_len) { struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *)us->iobuf; struct bulk_cs_wrap *bcs = (struct bulk_cs_wrap *)us->iobuf; int result; unsigned int residue; unsigned int cswlen; unsigned int cbwlen = US_BULK_CB_WRAP_LEN; /* set up the command wrapper */ bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = cpu_to_le32(buf_len); bcb->Flags = (dir == DMA_FROM_DEVICE) ? US_BULK_FLAG_IN : US_BULK_FLAG_OUT; bcb->Tag = ++us->tag; bcb->Lun = lun; bcb->Length = cmd_len; /* copy the command payload */ memset(bcb->CDB, 0, sizeof(bcb->CDB)); memcpy(bcb->CDB, cmd, bcb->Length); /* send it to out endpoint */ result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcb, cbwlen, NULL); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; /* DATA STAGE */ /* send/receive data payload, if there is any */ if (buf && buf_len) { unsigned int pipe = (dir == DMA_FROM_DEVICE) ? us->recv_bulk_pipe : us->send_bulk_pipe; result = usb_stor_bulk_transfer_buf(us, pipe, buf, buf_len, NULL); if (result == USB_STOR_XFER_ERROR) return USB_STOR_TRANSPORT_ERROR; } /* get CSW for device status */ result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, bcs, US_BULK_CS_WRAP_LEN, &cswlen); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; /* check bulk status */ if (bcs->Signature != cpu_to_le32(US_BULK_CS_SIGN)) { usb_stor_dbg(us, "Signature mismatch: got %08X, expecting %08X\n", le32_to_cpu(bcs->Signature), US_BULK_CS_SIGN); return USB_STOR_TRANSPORT_ERROR; } residue = bcs->Residue; if (bcs->Tag != us->tag) return USB_STOR_TRANSPORT_ERROR; /* * try to compute the actual residue, based on how much data * was really transferred and what the device tells us */ if (residue) residue = residue < buf_len ? residue : buf_len; if (act_len) *act_len = buf_len - residue; /* based on the status code, we report good or bad */ switch (bcs->Status) { case US_BULK_STAT_OK: /* command good -- note that data could be short */ return USB_STOR_TRANSPORT_GOOD; case US_BULK_STAT_FAIL: /* command failed */ return USB_STOR_TRANSPORT_FAILED; case US_BULK_STAT_PHASE: /* * phase error -- note that a transport reset will be * invoked by the invoke_transport() function */ return USB_STOR_TRANSPORT_ERROR; } /* we should never get here, but if we do, we're in trouble */ return USB_STOR_TRANSPORT_ERROR; } static int rts51x_bulk_transport_special(struct us_data *us, u8 lun, u8 *cmd, int cmd_len, u8 *buf, int buf_len, enum dma_data_direction dir, int *act_len) { struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; struct bulk_cs_wrap *bcs = (struct bulk_cs_wrap *) us->iobuf; int result; unsigned int cswlen; unsigned int cbwlen = US_BULK_CB_WRAP_LEN; /* set up the command wrapper */ bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = cpu_to_le32(buf_len); bcb->Flags = (dir == DMA_FROM_DEVICE) ? US_BULK_FLAG_IN : US_BULK_FLAG_OUT; bcb->Tag = ++us->tag; bcb->Lun = lun; bcb->Length = cmd_len; /* copy the command payload */ memset(bcb->CDB, 0, sizeof(bcb->CDB)); memcpy(bcb->CDB, cmd, bcb->Length); /* send it to out endpoint */ result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcb, cbwlen, NULL); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; /* DATA STAGE */ /* send/receive data payload, if there is any */ if (buf && buf_len) { unsigned int pipe = (dir == DMA_FROM_DEVICE) ? us->recv_bulk_pipe : us->send_bulk_pipe; result = usb_stor_bulk_transfer_buf(us, pipe, buf, buf_len, NULL); if (result == USB_STOR_XFER_ERROR) return USB_STOR_TRANSPORT_ERROR; } /* get CSW for device status */ result = usb_bulk_msg(us->pusb_dev, us->recv_bulk_pipe, bcs, US_BULK_CS_WRAP_LEN, &cswlen, 250); return result; } /* Determine what the maximum LUN supported is */ static int rts51x_get_max_lun(struct us_data *us) { int result; /* issue the command */ us->iobuf[0] = 0; result = usb_stor_control_msg(us, us->recv_ctrl_pipe, US_BULK_GET_MAX_LUN, USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, us->ifnum, us->iobuf, 1, 10 * HZ); usb_stor_dbg(us, "GetMaxLUN command result is %d, data is %d\n", result, us->iobuf[0]); /* if we have a successful request, return the result */ if (result > 0) return us->iobuf[0]; return 0; } static int rts51x_read_mem(struct us_data *us, u16 addr, u8 *data, u16 len) { int retval; u8 cmnd[12] = { 0 }; u8 *buf; buf = kmalloc(len, GFP_NOIO); if (buf == NULL) return -ENOMEM; usb_stor_dbg(us, "addr = 0x%x, len = %d\n", addr, len); cmnd[0] = 0xF0; cmnd[1] = 0x0D; cmnd[2] = (u8) (addr >> 8); cmnd[3] = (u8) addr; cmnd[4] = (u8) (len >> 8); cmnd[5] = (u8) len; retval = rts51x_bulk_transport(us, 0, cmnd, 12, buf, len, DMA_FROM_DEVICE, NULL); if (retval != USB_STOR_TRANSPORT_GOOD) { kfree(buf); return -EIO; } memcpy(data, buf, len); kfree(buf); return 0; } static int rts51x_write_mem(struct us_data *us, u16 addr, u8 *data, u16 len) { int retval; u8 cmnd[12] = { 0 }; u8 *buf; buf = kmemdup(data, len, GFP_NOIO); if (buf == NULL) return USB_STOR_TRANSPORT_ERROR; usb_stor_dbg(us, "addr = 0x%x, len = %d\n", addr, len); cmnd[0] = 0xF0; cmnd[1] = 0x0E; cmnd[2] = (u8) (addr >> 8); cmnd[3] = (u8) addr; cmnd[4] = (u8) (len >> 8); cmnd[5] = (u8) len; retval = rts51x_bulk_transport(us, 0, cmnd, 12, buf, len, DMA_TO_DEVICE, NULL); kfree(buf); if (retval != USB_STOR_TRANSPORT_GOOD) return -EIO; return 0; } static int rts51x_read_status(struct us_data *us, u8 lun, u8 *status, int len, int *actlen) { int retval; u8 cmnd[12] = { 0 }; u8 *buf; buf = kmalloc(len, GFP_NOIO); if (buf == NULL) return USB_STOR_TRANSPORT_ERROR; usb_stor_dbg(us, "lun = %d\n", lun); cmnd[0] = 0xF0; cmnd[1] = 0x09; retval = rts51x_bulk_transport(us, lun, cmnd, 12, buf, len, DMA_FROM_DEVICE, actlen); if (retval != USB_STOR_TRANSPORT_GOOD) { kfree(buf); return -EIO; } memcpy(status, buf, len); kfree(buf); return 0; } static int rts51x_check_status(struct us_data *us, u8 lun) { struct rts51x_chip *chip = (struct rts51x_chip *)(us->extra); int retval; u8 buf[16]; retval = rts51x_read_status(us, lun, buf, 16, &(chip->status_len)); if (retval != STATUS_SUCCESS) return -EIO; usb_stor_dbg(us, "chip->status_len = %d\n", chip->status_len); chip->status[lun].vid = ((u16) buf[0] << 8) | buf[1]; chip->status[lun].pid = ((u16) buf[2] << 8) | buf[3]; chip->status[lun].cur_lun = buf[4]; chip->status[lun].card_type = buf[5]; chip->status[lun].total_lun = buf[6]; chip->status[lun].fw_ver = ((u16) buf[7] << 8) | buf[8]; chip->status[lun].phy_exist = buf[9]; chip->status[lun].multi_flag = buf[10]; chip->status[lun].multi_card = buf[11]; chip->status[lun].log_exist = buf[12]; if (chip->status_len == 16) { chip->status[lun].detailed_type.detailed_type1 = buf[13]; chip->status[lun].function[0] = buf[14]; chip->status[lun].function[1] = buf[15]; } return 0; } static int enable_oscillator(struct us_data *us) { int retval; u8 value; retval = rts51x_read_mem(us, 0xFE77, &value, 1); if (retval < 0) return -EIO; value |= 0x04; retval = rts51x_write_mem(us, 0xFE77, &value, 1); if (retval < 0) return -EIO; retval = rts51x_read_mem(us, 0xFE77, &value, 1); if (retval < 0) return -EIO; if (!(value & 0x04)) return -EIO; return 0; } static int __do_config_autodelink(struct us_data *us, u8 *data, u16 len) { int retval; u8 cmnd[12] = {0}; u8 *buf; usb_stor_dbg(us, "addr = 0xfe47, len = %d\n", len); buf = kmemdup(data, len, GFP_NOIO); if (!buf) return USB_STOR_TRANSPORT_ERROR; cmnd[0] = 0xF0; cmnd[1] = 0x0E; cmnd[2] = 0xfe; cmnd[3] = 0x47; cmnd[4] = (u8)(len >> 8); cmnd[5] = (u8)len; retval = rts51x_bulk_transport_special(us, 0, cmnd, 12, buf, len, DMA_TO_DEVICE, NULL); kfree(buf); if (retval != USB_STOR_TRANSPORT_GOOD) { return -EIO; } return 0; } static int do_config_autodelink(struct us_data *us, int enable, int force) { int retval; u8 value; retval = rts51x_read_mem(us, 0xFE47, &value, 1); if (retval < 0) return -EIO; if (enable) { if (force) value |= 0x03; else value |= 0x01; } else { value &= ~0x03; } usb_stor_dbg(us, "set 0xfe47 to 0x%x\n", value); /* retval = rts51x_write_mem(us, 0xFE47, &value, 1); */ retval = __do_config_autodelink(us, &value, 1); if (retval < 0) return -EIO; return 0; } static int config_autodelink_after_power_on(struct us_data *us) { struct rts51x_chip *chip = (struct rts51x_chip *)(us->extra); int retval; u8 value; if (!CHK_AUTO_DELINK(chip)) return 0; retval = rts51x_read_mem(us, 0xFE47, &value, 1); if (retval < 0) return -EIO; if (auto_delink_en) { CLR_BIT(value, 0); CLR_BIT(value, 1); SET_BIT(value, 2); if (CHECK_ID(chip, 0x0138, 0x3882)) CLR_BIT(value, 2); SET_BIT(value, 7); /* retval = rts51x_write_mem(us, 0xFE47, &value, 1); */ retval = __do_config_autodelink(us, &value, 1); if (retval < 0) return -EIO; retval = enable_oscillator(us); if (retval == 0) (void)do_config_autodelink(us, 1, 0); } else { /* Autodelink controlled by firmware */ SET_BIT(value, 2); if (CHECK_ID(chip, 0x0138, 0x3882)) CLR_BIT(value, 2); if (CHECK_ID(chip, 0x0159, 0x5889) || CHECK_ID(chip, 0x0138, 0x3880)) { CLR_BIT(value, 0); CLR_BIT(value, 7); } /* retval = rts51x_write_mem(us, 0xFE47, &value, 1); */ retval = __do_config_autodelink(us, &value, 1); if (retval < 0) return -EIO; if (CHECK_ID(chip, 0x0159, 0x5888)) { value = 0xFF; retval = rts51x_write_mem(us, 0xFE79, &value, 1); if (retval < 0) return -EIO; value = 0x01; retval = rts51x_write_mem(us, 0x48, &value, 1); if (retval < 0) return -EIO; } } return 0; } #ifdef CONFIG_PM static int config_autodelink_before_power_down(struct us_data *us) { struct rts51x_chip *chip = (struct rts51x_chip *)(us->extra); int retval; u8 value; if (!CHK_AUTO_DELINK(chip)) return 0; if (auto_delink_en) { retval = rts51x_read_mem(us, 0xFE77, &value, 1); if (retval < 0) return -EIO; SET_BIT(value, 2); retval = rts51x_write_mem(us, 0xFE77, &value, 1); if (retval < 0) return -EIO; if (CHECK_ID(chip, 0x0159, 0x5888)) { value = 0x01; retval = rts51x_write_mem(us, 0x48, &value, 1); if (retval < 0) return -EIO; } retval = rts51x_read_mem(us, 0xFE47, &value, 1); if (retval < 0) return -EIO; SET_BIT(value, 0); if (CHECK_ID(chip, 0x0138, 0x3882)) SET_BIT(value, 2); retval = rts51x_write_mem(us, 0xFE77, &value, 1); if (retval < 0) return -EIO; } else { if (CHECK_ID(chip, 0x0159, 0x5889) || CHECK_ID(chip, 0x0138, 0x3880) || CHECK_ID(chip, 0x0138, 0x3882)) { retval = rts51x_read_mem(us, 0xFE47, &value, 1); if (retval < 0) return -EIO; if (CHECK_ID(chip, 0x0159, 0x5889) || CHECK_ID(chip, 0x0138, 0x3880)) { SET_BIT(value, 0); SET_BIT(value, 7); } if (CHECK_ID(chip, 0x0138, 0x3882)) SET_BIT(value, 2); /* retval = rts51x_write_mem(us, 0xFE47, &value, 1); */ retval = __do_config_autodelink(us, &value, 1); if (retval < 0) return -EIO; } if (CHECK_ID(chip, 0x0159, 0x5888)) { value = 0x01; retval = rts51x_write_mem(us, 0x48, &value, 1); if (retval < 0) return -EIO; } } return 0; } static void fw5895_init(struct us_data *us) { struct rts51x_chip *chip = (struct rts51x_chip *)(us->extra); int retval; u8 val; if ((PRODUCT_ID(chip) != 0x0158) || (FW_VERSION(chip) != 0x5895)) { usb_stor_dbg(us, "Not the specified device, return immediately!\n"); } else { retval = rts51x_read_mem(us, 0xFD6F, &val, 1); if (retval == STATUS_SUCCESS && (val & 0x1F) == 0) { val = 0x1F; retval = rts51x_write_mem(us, 0xFD70, &val, 1); if (retval != STATUS_SUCCESS) usb_stor_dbg(us, "Write memory fail\n"); } else { usb_stor_dbg(us, "Read memory fail, OR (val & 0x1F) != 0\n"); } } } #endif #ifdef CONFIG_REALTEK_AUTOPM static void fw5895_set_mmc_wp(struct us_data *us) { struct rts51x_chip *chip = (struct rts51x_chip *)(us->extra); int retval; u8 buf[13]; if ((PRODUCT_ID(chip) != 0x0158) || (FW_VERSION(chip) != 0x5895)) { usb_stor_dbg(us, "Not the specified device, return immediately!\n"); } else { retval = rts51x_read_mem(us, 0xFD6F, buf, 1); if (retval == STATUS_SUCCESS && (buf[0] & 0x24) == 0x24) { /* SD Exist and SD WP */ retval = rts51x_read_mem(us, 0xD04E, buf, 1); if (retval == STATUS_SUCCESS) { buf[0] |= 0x04; retval = rts51x_write_mem(us, 0xFD70, buf, 1); if (retval != STATUS_SUCCESS) usb_stor_dbg(us, "Write memory fail\n"); } else { usb_stor_dbg(us, "Read memory fail\n"); } } else { usb_stor_dbg(us, "Read memory fail, OR (buf[0]&0x24)!=0x24\n"); } } } static void rts51x_modi_suspend_timer(struct rts51x_chip *chip) { struct us_data *us = chip->us; usb_stor_dbg(us, "state:%d\n", rts51x_get_stat(chip)); chip->timer_expires = jiffies + msecs_to_jiffies(1000*ss_delay); mod_timer(&chip->rts51x_suspend_timer, chip->timer_expires); } static void rts51x_suspend_timer_fn(struct timer_list *t) { struct rts51x_chip *chip = from_timer(chip, t, rts51x_suspend_timer); struct us_data *us = chip->us; switch (rts51x_get_stat(chip)) { case RTS51X_STAT_INIT: case RTS51X_STAT_RUN: rts51x_modi_suspend_timer(chip); break; case RTS51X_STAT_IDLE: case RTS51X_STAT_SS: usb_stor_dbg(us, "RTS51X_STAT_SS, power.usage:%d\n", atomic_read(&us->pusb_intf->dev.power.usage_count)); if (atomic_read(&us->pusb_intf->dev.power.usage_count) > 0) { usb_stor_dbg(us, "Ready to enter SS state\n"); rts51x_set_stat(chip, RTS51X_STAT_SS); /* ignore mass storage interface's children */ pm_suspend_ignore_children(&us->pusb_intf->dev, true); usb_autopm_put_interface_async(us->pusb_intf); usb_stor_dbg(us, "RTS51X_STAT_SS 01, power.usage:%d\n", atomic_read(&us->pusb_intf->dev.power.usage_count)); } break; default: usb_stor_dbg(us, "Unknown state !!!\n"); break; } } static inline int working_scsi(struct scsi_cmnd *srb) { if ((srb->cmnd[0] == TEST_UNIT_READY) || (srb->cmnd[0] == ALLOW_MEDIUM_REMOVAL)) { return 0; } return 1; } static void rts51x_invoke_transport(struct scsi_cmnd *srb, struct us_data *us) { struct rts51x_chip *chip = (struct rts51x_chip *)(us->extra); static int card_first_show = 1; static u8 media_not_present[] = { 0x70, 0, 0x02, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0x3A, 0, 0, 0, 0, 0 }; static u8 invalid_cmd_field[] = { 0x70, 0, 0x05, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0x24, 0, 0, 0, 0, 0 }; int ret; if (working_scsi(srb)) { usb_stor_dbg(us, "working scsi, power.usage:%d\n", atomic_read(&us->pusb_intf->dev.power.usage_count)); if (atomic_read(&us->pusb_intf->dev.power.usage_count) <= 0) { ret = usb_autopm_get_interface(us->pusb_intf); usb_stor_dbg(us, "working scsi, ret=%d\n", ret); } if (rts51x_get_stat(chip) != RTS51X_STAT_RUN) rts51x_set_stat(chip, RTS51X_STAT_RUN); chip->proto_handler_backup(srb, us); } else { if (rts51x_get_stat(chip) == RTS51X_STAT_SS) { usb_stor_dbg(us, "NOT working scsi\n"); if ((srb->cmnd[0] == TEST_UNIT_READY) && (chip->pwr_state == US_SUSPEND)) { if (TST_LUN_READY(chip, srb->device->lun)) { srb->result = SAM_STAT_GOOD; } else { srb->result = SAM_STAT_CHECK_CONDITION; memcpy(srb->sense_buffer, media_not_present, US_SENSE_SIZE); } usb_stor_dbg(us, "TEST_UNIT_READY\n"); goto out; } if (srb->cmnd[0] == ALLOW_MEDIUM_REMOVAL) { int prevent = srb->cmnd[4] & 0x1; if (prevent) { srb->result = SAM_STAT_CHECK_CONDITION; memcpy(srb->sense_buffer, invalid_cmd_field, US_SENSE_SIZE); } else { srb->result = SAM_STAT_GOOD; } usb_stor_dbg(us, "ALLOW_MEDIUM_REMOVAL\n"); goto out; } } else { usb_stor_dbg(us, "NOT working scsi, not SS\n"); chip->proto_handler_backup(srb, us); /* Check whether card is plugged in */ if (srb->cmnd[0] == TEST_UNIT_READY) { if (srb->result == SAM_STAT_GOOD) { SET_LUN_READY(chip, srb->device->lun); if (card_first_show) { card_first_show = 0; fw5895_set_mmc_wp(us); } } else { CLR_LUN_READY(chip, srb->device->lun); card_first_show = 1; } } if (rts51x_get_stat(chip) != RTS51X_STAT_IDLE) rts51x_set_stat(chip, RTS51X_STAT_IDLE); } } out: usb_stor_dbg(us, "state:%d\n", rts51x_get_stat(chip)); if (rts51x_get_stat(chip) == RTS51X_STAT_RUN) rts51x_modi_suspend_timer(chip); } static int realtek_cr_autosuspend_setup(struct us_data *us) { struct rts51x_chip *chip; struct rts51x_status *status = NULL; u8 buf[16]; int retval; chip = (struct rts51x_chip *)us->extra; chip->support_auto_delink = 0; chip->pwr_state = US_RESUME; chip->lun_ready = 0; rts51x_set_stat(chip, RTS51X_STAT_INIT); retval = rts51x_read_status(us, 0, buf, 16, &(chip->status_len)); if (retval != STATUS_SUCCESS) { usb_stor_dbg(us, "Read status fail\n"); return -EIO; } status = chip->status; status->vid = ((u16) buf[0] << 8) | buf[1]; status->pid = ((u16) buf[2] << 8) | buf[3]; status->cur_lun = buf[4]; status->card_type = buf[5]; status->total_lun = buf[6]; status->fw_ver = ((u16) buf[7] << 8) | buf[8]; status->phy_exist = buf[9]; status->multi_flag = buf[10]; status->multi_card = buf[11]; status->log_exist = buf[12]; if (chip->status_len == 16) { status->detailed_type.detailed_type1 = buf[13]; status->function[0] = buf[14]; status->function[1] = buf[15]; } /* back up the proto_handler in us->extra */ chip = (struct rts51x_chip *)(us->extra); chip->proto_handler_backup = us->proto_handler; /* Set the autosuspend_delay to 0 */ pm_runtime_set_autosuspend_delay(&us->pusb_dev->dev, 0); /* override us->proto_handler setted in get_protocol() */ us->proto_handler = rts51x_invoke_transport; chip->timer_expires = 0; timer_setup(&chip->rts51x_suspend_timer, rts51x_suspend_timer_fn, 0); fw5895_init(us); /* enable autosuspend function of the usb device */ usb_enable_autosuspend(us->pusb_dev); return 0; } #endif static void realtek_cr_destructor(void *extra) { struct rts51x_chip *chip = extra; if (!chip) return; #ifdef CONFIG_REALTEK_AUTOPM if (ss_en) { del_timer(&chip->rts51x_suspend_timer); chip->timer_expires = 0; } #endif kfree(chip->status); } #ifdef CONFIG_PM static int realtek_cr_suspend(struct usb_interface *iface, pm_message_t message) { struct us_data *us = usb_get_intfdata(iface); /* wait until no command is running */ mutex_lock(&us->dev_mutex); config_autodelink_before_power_down(us); mutex_unlock(&us->dev_mutex); return 0; } static int realtek_cr_resume(struct usb_interface *iface) { struct us_data *us = usb_get_intfdata(iface); fw5895_init(us); config_autodelink_after_power_on(us); return 0; } #else #define realtek_cr_suspend NULL #define realtek_cr_resume NULL #endif static int init_realtek_cr(struct us_data *us) { struct rts51x_chip *chip; int size, i, retval; chip = kzalloc(sizeof(struct rts51x_chip), GFP_KERNEL); if (!chip) return -ENOMEM; us->extra = chip; us->extra_destructor = realtek_cr_destructor; us->max_lun = chip->max_lun = rts51x_get_max_lun(us); chip->us = us; usb_stor_dbg(us, "chip->max_lun = %d\n", chip->max_lun); size = (chip->max_lun + 1) * sizeof(struct rts51x_status); chip->status = kzalloc(size, GFP_KERNEL); if (!chip->status) goto INIT_FAIL; for (i = 0; i <= (int)(chip->max_lun); i++) { retval = rts51x_check_status(us, (u8) i); if (retval < 0) goto INIT_FAIL; } if (CHECK_PID(chip, 0x0138) || CHECK_PID(chip, 0x0158) || CHECK_PID(chip, 0x0159)) { if (CHECK_FW_VER(chip, 0x5888) || CHECK_FW_VER(chip, 0x5889) || CHECK_FW_VER(chip, 0x5901)) SET_AUTO_DELINK(chip); if (STATUS_LEN(chip) == 16) { if (SUPPORT_AUTO_DELINK(chip)) SET_AUTO_DELINK(chip); } } #ifdef CONFIG_REALTEK_AUTOPM if (ss_en) realtek_cr_autosuspend_setup(us); #endif usb_stor_dbg(us, "chip->flag = 0x%x\n", chip->flag); (void)config_autodelink_after_power_on(us); return 0; INIT_FAIL: if (us->extra) { kfree(chip->status); kfree(us->extra); us->extra = NULL; } return -EIO; } static struct scsi_host_template realtek_cr_host_template; static int realtek_cr_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct us_data *us; int result; dev_dbg(&intf->dev, "Probe Realtek Card Reader!\n"); result = usb_stor_probe1(&us, intf, id, (id - realtek_cr_ids) + realtek_cr_unusual_dev_list, &realtek_cr_host_template); if (result) return result; result = usb_stor_probe2(us); return result; } static struct usb_driver realtek_cr_driver = { .name = DRV_NAME, .probe = realtek_cr_probe, .disconnect = usb_stor_disconnect, /* .suspend = usb_stor_suspend, */ /* .resume = usb_stor_resume, */ .reset_resume = usb_stor_reset_resume, .suspend = realtek_cr_suspend, .resume = realtek_cr_resume, .pre_reset = usb_stor_pre_reset, .post_reset = usb_stor_post_reset, .id_table = realtek_cr_ids, .soft_unbind = 1, .supports_autosuspend = 1, .no_dynamic_id = 1, }; module_usb_stor_driver(realtek_cr_driver, realtek_cr_host_template, DRV_NAME);
1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 1 1 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 // SPDX-License-Identifier: GPL-2.0+ /* * HID driver for Valve Steam Controller * * Copyright (c) 2018 Rodrigo Rivas Costa <rodrigorivascosta@gmail.com> * Copyright (c) 2022 Valve Software * * Supports both the wired and wireless interfaces. * * This controller has a builtin emulation of mouse and keyboard: the right pad * can be used as a mouse, the shoulder buttons are mouse buttons, A and B * buttons are ENTER and ESCAPE, and so on. This is implemented as additional * HID interfaces. * * This is known as the "lizard mode", because apparently lizards like to use * the computer from the coach, without a proper mouse and keyboard. * * This driver will disable the lizard mode when the input device is opened * and re-enable it when the input device is closed, so as not to break user * mode behaviour. The lizard_mode parameter can be used to change that. * * There are a few user space applications (notably Steam Client) that use * the hidraw interface directly to create input devices (XTest, uinput...). * In order to avoid breaking them this driver creates a layered hidraw device, * so it can detect when the client is running and then: * - it will not send any command to the controller. * - this input device will be removed, to avoid double input of the same * user action. * When the client is closed, this input device will be created again. * * For additional functions, such as changing the right-pad margin or switching * the led, you can use the user-space tool at: * * https://github.com/rodrigorc/steamctrl */ #include <linux/device.h> #include <linux/input.h> #include <linux/hid.h> #include <linux/module.h> #include <linux/workqueue.h> #include <linux/mutex.h> #include <linux/rcupdate.h> #include <linux/delay.h> #include <linux/power_supply.h> #include "hid-ids.h" MODULE_DESCRIPTION("HID driver for Valve Steam Controller"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Rodrigo Rivas Costa <rodrigorivascosta@gmail.com>"); static bool lizard_mode = true; static DEFINE_MUTEX(steam_devices_lock); static LIST_HEAD(steam_devices); #define STEAM_QUIRK_WIRELESS BIT(0) #define STEAM_QUIRK_DECK BIT(1) /* Touch pads are 40 mm in diameter and 65535 units */ #define STEAM_PAD_RESOLUTION 1638 /* Trigger runs are about 5 mm and 256 units */ #define STEAM_TRIGGER_RESOLUTION 51 /* Joystick runs are about 5 mm and 256 units */ #define STEAM_JOYSTICK_RESOLUTION 51 /* Trigger runs are about 6 mm and 32768 units */ #define STEAM_DECK_TRIGGER_RESOLUTION 5461 /* Joystick runs are about 5 mm and 32768 units */ #define STEAM_DECK_JOYSTICK_RESOLUTION 6553 /* Accelerometer has 16 bit resolution and a range of +/- 2g */ #define STEAM_DECK_ACCEL_RES_PER_G 16384 #define STEAM_DECK_ACCEL_RANGE 32768 #define STEAM_DECK_ACCEL_FUZZ 32 /* Gyroscope has 16 bit resolution and a range of +/- 2000 dps */ #define STEAM_DECK_GYRO_RES_PER_DPS 16 #define STEAM_DECK_GYRO_RANGE 32768 #define STEAM_DECK_GYRO_FUZZ 1 #define STEAM_PAD_FUZZ 256 /* * Commands that can be sent in a feature report. * Thanks to Valve and SDL for the names. */ enum { ID_SET_DIGITAL_MAPPINGS = 0x80, ID_CLEAR_DIGITAL_MAPPINGS = 0x81, ID_GET_DIGITAL_MAPPINGS = 0x82, ID_GET_ATTRIBUTES_VALUES = 0x83, ID_GET_ATTRIBUTE_LABEL = 0x84, ID_SET_DEFAULT_DIGITAL_MAPPINGS = 0x85, ID_FACTORY_RESET = 0x86, ID_SET_SETTINGS_VALUES = 0x87, ID_CLEAR_SETTINGS_VALUES = 0x88, ID_GET_SETTINGS_VALUES = 0x89, ID_GET_SETTING_LABEL = 0x8A, ID_GET_SETTINGS_MAXS = 0x8B, ID_GET_SETTINGS_DEFAULTS = 0x8C, ID_SET_CONTROLLER_MODE = 0x8D, ID_LOAD_DEFAULT_SETTINGS = 0x8E, ID_TRIGGER_HAPTIC_PULSE = 0x8F, ID_TURN_OFF_CONTROLLER = 0x9F, ID_GET_DEVICE_INFO = 0xA1, ID_CALIBRATE_TRACKPADS = 0xA7, ID_RESERVED_0 = 0xA8, ID_SET_SERIAL_NUMBER = 0xA9, ID_GET_TRACKPAD_CALIBRATION = 0xAA, ID_GET_TRACKPAD_FACTORY_CALIBRATION = 0xAB, ID_GET_TRACKPAD_RAW_DATA = 0xAC, ID_ENABLE_PAIRING = 0xAD, ID_GET_STRING_ATTRIBUTE = 0xAE, ID_RADIO_ERASE_RECORDS = 0xAF, ID_RADIO_WRITE_RECORD = 0xB0, ID_SET_DONGLE_SETTING = 0xB1, ID_DONGLE_DISCONNECT_DEVICE = 0xB2, ID_DONGLE_COMMIT_DEVICE = 0xB3, ID_DONGLE_GET_WIRELESS_STATE = 0xB4, ID_CALIBRATE_GYRO = 0xB5, ID_PLAY_AUDIO = 0xB6, ID_AUDIO_UPDATE_START = 0xB7, ID_AUDIO_UPDATE_DATA = 0xB8, ID_AUDIO_UPDATE_COMPLETE = 0xB9, ID_GET_CHIPID = 0xBA, ID_CALIBRATE_JOYSTICK = 0xBF, ID_CALIBRATE_ANALOG_TRIGGERS = 0xC0, ID_SET_AUDIO_MAPPING = 0xC1, ID_CHECK_GYRO_FW_LOAD = 0xC2, ID_CALIBRATE_ANALOG = 0xC3, ID_DONGLE_GET_CONNECTED_SLOTS = 0xC4, ID_RESET_IMU = 0xCE, ID_TRIGGER_HAPTIC_CMD = 0xEA, ID_TRIGGER_RUMBLE_CMD = 0xEB, }; /* Settings IDs */ enum { /* 0 */ SETTING_MOUSE_SENSITIVITY, SETTING_MOUSE_ACCELERATION, SETTING_TRACKBALL_ROTATION_ANGLE, SETTING_HAPTIC_INTENSITY_UNUSED, SETTING_LEFT_GAMEPAD_STICK_ENABLED, SETTING_RIGHT_GAMEPAD_STICK_ENABLED, SETTING_USB_DEBUG_MODE, SETTING_LEFT_TRACKPAD_MODE, SETTING_RIGHT_TRACKPAD_MODE, SETTING_MOUSE_POINTER_ENABLED, /* 10 */ SETTING_DPAD_DEADZONE, SETTING_MINIMUM_MOMENTUM_VEL, SETTING_MOMENTUM_DECAY_AMMOUNT, SETTING_TRACKPAD_RELATIVE_MODE_TICKS_PER_PIXEL, SETTING_HAPTIC_INCREMENT, SETTING_DPAD_ANGLE_SIN, SETTING_DPAD_ANGLE_COS, SETTING_MOMENTUM_VERTICAL_DIVISOR, SETTING_MOMENTUM_MAXIMUM_VELOCITY, SETTING_TRACKPAD_Z_ON, /* 20 */ SETTING_TRACKPAD_Z_OFF, SETTING_SENSITIVY_SCALE_AMMOUNT, SETTING_LEFT_TRACKPAD_SECONDARY_MODE, SETTING_RIGHT_TRACKPAD_SECONDARY_MODE, SETTING_SMOOTH_ABSOLUTE_MOUSE, SETTING_STEAMBUTTON_POWEROFF_TIME, SETTING_UNUSED_1, SETTING_TRACKPAD_OUTER_RADIUS, SETTING_TRACKPAD_Z_ON_LEFT, SETTING_TRACKPAD_Z_OFF_LEFT, /* 30 */ SETTING_TRACKPAD_OUTER_SPIN_VEL, SETTING_TRACKPAD_OUTER_SPIN_RADIUS, SETTING_TRACKPAD_OUTER_SPIN_HORIZONTAL_ONLY, SETTING_TRACKPAD_RELATIVE_MODE_DEADZONE, SETTING_TRACKPAD_RELATIVE_MODE_MAX_VEL, SETTING_TRACKPAD_RELATIVE_MODE_INVERT_Y, SETTING_TRACKPAD_DOUBLE_TAP_BEEP_ENABLED, SETTING_TRACKPAD_DOUBLE_TAP_BEEP_PERIOD, SETTING_TRACKPAD_DOUBLE_TAP_BEEP_COUNT, SETTING_TRACKPAD_OUTER_RADIUS_RELEASE_ON_TRANSITION, /* 40 */ SETTING_RADIAL_MODE_ANGLE, SETTING_HAPTIC_INTENSITY_MOUSE_MODE, SETTING_LEFT_DPAD_REQUIRES_CLICK, SETTING_RIGHT_DPAD_REQUIRES_CLICK, SETTING_LED_BASELINE_BRIGHTNESS, SETTING_LED_USER_BRIGHTNESS, SETTING_ENABLE_RAW_JOYSTICK, SETTING_ENABLE_FAST_SCAN, SETTING_IMU_MODE, SETTING_WIRELESS_PACKET_VERSION, /* 50 */ SETTING_SLEEP_INACTIVITY_TIMEOUT, SETTING_TRACKPAD_NOISE_THRESHOLD, SETTING_LEFT_TRACKPAD_CLICK_PRESSURE, SETTING_RIGHT_TRACKPAD_CLICK_PRESSURE, SETTING_LEFT_BUMPER_CLICK_PRESSURE, SETTING_RIGHT_BUMPER_CLICK_PRESSURE, SETTING_LEFT_GRIP_CLICK_PRESSURE, SETTING_RIGHT_GRIP_CLICK_PRESSURE, SETTING_LEFT_GRIP2_CLICK_PRESSURE, SETTING_RIGHT_GRIP2_CLICK_PRESSURE, /* 60 */ SETTING_PRESSURE_MODE, SETTING_CONTROLLER_TEST_MODE, SETTING_TRIGGER_MODE, SETTING_TRACKPAD_Z_THRESHOLD, SETTING_FRAME_RATE, SETTING_TRACKPAD_FILT_CTRL, SETTING_TRACKPAD_CLIP, SETTING_DEBUG_OUTPUT_SELECT, SETTING_TRIGGER_THRESHOLD_PERCENT, SETTING_TRACKPAD_FREQUENCY_HOPPING, /* 70 */ SETTING_HAPTICS_ENABLED, SETTING_STEAM_WATCHDOG_ENABLE, SETTING_TIMP_TOUCH_THRESHOLD_ON, SETTING_TIMP_TOUCH_THRESHOLD_OFF, SETTING_FREQ_HOPPING, SETTING_TEST_CONTROL, SETTING_HAPTIC_MASTER_GAIN_DB, SETTING_THUMB_TOUCH_THRESH, SETTING_DEVICE_POWER_STATUS, SETTING_HAPTIC_INTENSITY, /* 80 */ SETTING_STABILIZER_ENABLED, SETTING_TIMP_MODE_MTE, }; /* Input report identifiers */ enum { ID_CONTROLLER_STATE = 1, ID_CONTROLLER_DEBUG = 2, ID_CONTROLLER_WIRELESS = 3, ID_CONTROLLER_STATUS = 4, ID_CONTROLLER_DEBUG2 = 5, ID_CONTROLLER_SECONDARY_STATE = 6, ID_CONTROLLER_BLE_STATE = 7, ID_CONTROLLER_DECK_STATE = 9 }; /* String attribute identifiers */ enum { ATTRIB_STR_BOARD_SERIAL, ATTRIB_STR_UNIT_SERIAL, }; /* Values for GYRO_MODE (bitmask) */ enum { SETTING_GYRO_MODE_OFF = 0, SETTING_GYRO_MODE_STEERING = BIT(0), SETTING_GYRO_MODE_TILT = BIT(1), SETTING_GYRO_MODE_SEND_ORIENTATION = BIT(2), SETTING_GYRO_MODE_SEND_RAW_ACCEL = BIT(3), SETTING_GYRO_MODE_SEND_RAW_GYRO = BIT(4), }; /* Trackpad modes */ enum { TRACKPAD_ABSOLUTE_MOUSE, TRACKPAD_RELATIVE_MOUSE, TRACKPAD_DPAD_FOUR_WAY_DISCRETE, TRACKPAD_DPAD_FOUR_WAY_OVERLAP, TRACKPAD_DPAD_EIGHT_WAY, TRACKPAD_RADIAL_MODE, TRACKPAD_ABSOLUTE_DPAD, TRACKPAD_NONE, TRACKPAD_GESTURE_KEYBOARD, }; /* Pad identifiers for the deck */ #define STEAM_PAD_LEFT 0 #define STEAM_PAD_RIGHT 1 #define STEAM_PAD_BOTH 2 /* Other random constants */ #define STEAM_SERIAL_LEN 0x15 struct steam_device { struct list_head list; spinlock_t lock; struct hid_device *hdev, *client_hdev; struct mutex report_mutex; unsigned long client_opened; struct input_dev __rcu *input; struct input_dev __rcu *sensors; unsigned long quirks; struct work_struct work_connect; bool connected; char serial_no[STEAM_SERIAL_LEN + 1]; struct power_supply_desc battery_desc; struct power_supply __rcu *battery; u8 battery_charge; u16 voltage; struct delayed_work mode_switch; bool did_mode_switch; bool gamepad_mode; struct work_struct rumble_work; u16 rumble_left; u16 rumble_right; unsigned int sensor_timestamp_us; }; static int steam_recv_report(struct steam_device *steam, u8 *data, int size) { struct hid_report *r; u8 *buf; int ret; r = steam->hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[0]; if (!r) { hid_err(steam->hdev, "No HID_FEATURE_REPORT submitted - nothing to read\n"); return -EINVAL; } if (hid_report_len(r) < 64) return -EINVAL; buf = hid_alloc_report_buf(r, GFP_KERNEL); if (!buf) return -ENOMEM; /* * The report ID is always 0, so strip the first byte from the output. * hid_report_len() is not counting the report ID, so +1 to the length * or else we get a EOVERFLOW. We are safe from a buffer overflow * because hid_alloc_report_buf() allocates +7 bytes. */ ret = hid_hw_raw_request(steam->hdev, 0x00, buf, hid_report_len(r) + 1, HID_FEATURE_REPORT, HID_REQ_GET_REPORT); if (ret > 0) memcpy(data, buf + 1, min(size, ret - 1)); kfree(buf); return ret; } static int steam_send_report(struct steam_device *steam, u8 *cmd, int size) { struct hid_report *r; u8 *buf; unsigned int retries = 50; int ret; r = steam->hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[0]; if (!r) { hid_err(steam->hdev, "No HID_FEATURE_REPORT submitted - nothing to read\n"); return -EINVAL; } if (hid_report_len(r) < 64) return -EINVAL; buf = hid_alloc_report_buf(r, GFP_KERNEL); if (!buf) return -ENOMEM; /* The report ID is always 0 */ memcpy(buf + 1, cmd, size); /* * Sometimes the wireless controller fails with EPIPE * when sending a feature report. * Doing a HID_REQ_GET_REPORT and waiting for a while * seems to fix that. */ do { ret = hid_hw_raw_request(steam->hdev, 0, buf, max(size, 64) + 1, HID_FEATURE_REPORT, HID_REQ_SET_REPORT); if (ret != -EPIPE) break; msleep(20); } while (--retries); kfree(buf); if (ret < 0) hid_err(steam->hdev, "%s: error %d (%*ph)\n", __func__, ret, size, cmd); return ret; } static inline int steam_send_report_byte(struct steam_device *steam, u8 cmd) { return steam_send_report(steam, &cmd, 1); } static int steam_write_settings(struct steam_device *steam, /* u8 reg, u16 val */...) { /* Send: 0x87 len (reg valLo valHi)* */ u8 reg; u16 val; u8 cmd[64] = {ID_SET_SETTINGS_VALUES, 0x00}; int ret; va_list args; va_start(args, steam); for (;;) { reg = va_arg(args, int); if (reg == 0) break; val = va_arg(args, int); cmd[cmd[1] + 2] = reg; cmd[cmd[1] + 3] = val & 0xff; cmd[cmd[1] + 4] = val >> 8; cmd[1] += 3; } va_end(args); ret = steam_send_report(steam, cmd, 2 + cmd[1]); if (ret < 0) return ret; /* * Sometimes a lingering report for this command can * get read back instead of the last set report if * this isn't explicitly queried */ return steam_recv_report(steam, cmd, 2 + cmd[1]); } static int steam_get_serial(struct steam_device *steam) { /* * Send: 0xae 0x15 0x01 * Recv: 0xae 0x15 0x01 serialnumber */ int ret = 0; u8 cmd[] = {ID_GET_STRING_ATTRIBUTE, sizeof(steam->serial_no), ATTRIB_STR_UNIT_SERIAL}; u8 reply[3 + STEAM_SERIAL_LEN + 1]; mutex_lock(&steam->report_mutex); ret = steam_send_report(steam, cmd, sizeof(cmd)); if (ret < 0) goto out; ret = steam_recv_report(steam, reply, sizeof(reply)); if (ret < 0) goto out; if (reply[0] != ID_GET_STRING_ATTRIBUTE || reply[1] < 1 || reply[1] > sizeof(steam->serial_no) || reply[2] != ATTRIB_STR_UNIT_SERIAL) { ret = -EIO; goto out; } reply[3 + STEAM_SERIAL_LEN] = 0; strscpy(steam->serial_no, reply + 3, reply[1]); out: mutex_unlock(&steam->report_mutex); return ret; } /* * This command requests the wireless adaptor to post an event * with the connection status. Useful if this driver is loaded when * the controller is already connected. */ static inline int steam_request_conn_status(struct steam_device *steam) { int ret; mutex_lock(&steam->report_mutex); ret = steam_send_report_byte(steam, ID_DONGLE_GET_WIRELESS_STATE); mutex_unlock(&steam->report_mutex); return ret; } /* * Send a haptic pulse to the trackpads * Duration and interval are measured in microseconds, count is the number * of pulses to send for duration time with interval microseconds between them * and gain is measured in decibels, ranging from -24 to +6 */ static inline int steam_haptic_pulse(struct steam_device *steam, u8 pad, u16 duration, u16 interval, u16 count, u8 gain) { int ret; u8 report[10] = {ID_TRIGGER_HAPTIC_PULSE, 8}; /* Left and right are swapped on this report for legacy reasons */ if (pad < STEAM_PAD_BOTH) pad ^= 1; report[2] = pad; report[3] = duration & 0xFF; report[4] = duration >> 8; report[5] = interval & 0xFF; report[6] = interval >> 8; report[7] = count & 0xFF; report[8] = count >> 8; report[9] = gain; mutex_lock(&steam->report_mutex); ret = steam_send_report(steam, report, sizeof(report)); mutex_unlock(&steam->report_mutex); return ret; } static inline int steam_haptic_rumble(struct steam_device *steam, u16 intensity, u16 left_speed, u16 right_speed, u8 left_gain, u8 right_gain) { int ret; u8 report[11] = {ID_TRIGGER_RUMBLE_CMD, 9}; report[3] = intensity & 0xFF; report[4] = intensity >> 8; report[5] = left_speed & 0xFF; report[6] = left_speed >> 8; report[7] = right_speed & 0xFF; report[8] = right_speed >> 8; report[9] = left_gain; report[10] = right_gain; mutex_lock(&steam->report_mutex); ret = steam_send_report(steam, report, sizeof(report)); mutex_unlock(&steam->report_mutex); return ret; } static void steam_haptic_rumble_cb(struct work_struct *work) { struct steam_device *steam = container_of(work, struct steam_device, rumble_work); steam_haptic_rumble(steam, 0, steam->rumble_left, steam->rumble_right, 2, 0); } #ifdef CONFIG_STEAM_FF static int steam_play_effect(struct input_dev *dev, void *data, struct ff_effect *effect) { struct steam_device *steam = input_get_drvdata(dev); steam->rumble_left = effect->u.rumble.strong_magnitude; steam->rumble_right = effect->u.rumble.weak_magnitude; return schedule_work(&steam->rumble_work); } #endif static void steam_set_lizard_mode(struct steam_device *steam, bool enable) { if (steam->gamepad_mode) enable = false; if (enable) { mutex_lock(&steam->report_mutex); /* enable esc, enter, cursors */ steam_send_report_byte(steam, ID_SET_DEFAULT_DIGITAL_MAPPINGS); /* reset settings */ steam_send_report_byte(steam, ID_LOAD_DEFAULT_SETTINGS); mutex_unlock(&steam->report_mutex); } else { mutex_lock(&steam->report_mutex); /* disable esc, enter, cursor */ steam_send_report_byte(steam, ID_CLEAR_DIGITAL_MAPPINGS); if (steam->quirks & STEAM_QUIRK_DECK) { steam_write_settings(steam, SETTING_LEFT_TRACKPAD_MODE, TRACKPAD_NONE, /* disable mouse */ SETTING_RIGHT_TRACKPAD_MODE, TRACKPAD_NONE, /* disable mouse */ SETTING_LEFT_TRACKPAD_CLICK_PRESSURE, 0xFFFF, /* disable haptic click */ SETTING_RIGHT_TRACKPAD_CLICK_PRESSURE, 0xFFFF, /* disable haptic click */ SETTING_STEAM_WATCHDOG_ENABLE, 0, /* disable watchdog that tests if Steam is active */ 0); mutex_unlock(&steam->report_mutex); } else { steam_write_settings(steam, SETTING_LEFT_TRACKPAD_MODE, TRACKPAD_NONE, /* disable mouse */ SETTING_RIGHT_TRACKPAD_MODE, TRACKPAD_NONE, /* disable mouse */ 0); mutex_unlock(&steam->report_mutex); } } } static int steam_input_open(struct input_dev *dev) { struct steam_device *steam = input_get_drvdata(dev); unsigned long flags; bool set_lizard_mode; /* * Disabling lizard mode automatically is only done on the Steam * Controller. On the Steam Deck, this is toggled manually by holding * the options button instead, handled by steam_mode_switch_cb. */ if (!(steam->quirks & STEAM_QUIRK_DECK)) { spin_lock_irqsave(&steam->lock, flags); set_lizard_mode = !steam->client_opened && lizard_mode; spin_unlock_irqrestore(&steam->lock, flags); if (set_lizard_mode) steam_set_lizard_mode(steam, false); } return 0; } static void steam_input_close(struct input_dev *dev) { struct steam_device *steam = input_get_drvdata(dev); unsigned long flags; bool set_lizard_mode; if (!(steam->quirks & STEAM_QUIRK_DECK)) { spin_lock_irqsave(&steam->lock, flags); set_lizard_mode = !steam->client_opened && lizard_mode; spin_unlock_irqrestore(&steam->lock, flags); if (set_lizard_mode) steam_set_lizard_mode(steam, true); } } static enum power_supply_property steam_battery_props[] = { POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_SCOPE, POWER_SUPPLY_PROP_VOLTAGE_NOW, POWER_SUPPLY_PROP_CAPACITY, }; static int steam_battery_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { struct steam_device *steam = power_supply_get_drvdata(psy); unsigned long flags; s16 volts; u8 batt; int ret = 0; spin_lock_irqsave(&steam->lock, flags); volts = steam->voltage; batt = steam->battery_charge; spin_unlock_irqrestore(&steam->lock, flags); switch (psp) { case POWER_SUPPLY_PROP_PRESENT: val->intval = 1; break; case POWER_SUPPLY_PROP_SCOPE: val->intval = POWER_SUPPLY_SCOPE_DEVICE; break; case POWER_SUPPLY_PROP_VOLTAGE_NOW: val->intval = volts * 1000; /* mV -> uV */ break; case POWER_SUPPLY_PROP_CAPACITY: val->intval = batt; break; default: ret = -EINVAL; break; } return ret; } static int steam_battery_register(struct steam_device *steam) { struct power_supply *battery; struct power_supply_config battery_cfg = { .drv_data = steam, }; unsigned long flags; int ret; steam->battery_desc.type = POWER_SUPPLY_TYPE_BATTERY; steam->battery_desc.properties = steam_battery_props; steam->battery_desc.num_properties = ARRAY_SIZE(steam_battery_props); steam->battery_desc.get_property = steam_battery_get_property; steam->battery_desc.name = devm_kasprintf(&steam->hdev->dev, GFP_KERNEL, "steam-controller-%s-battery", steam->serial_no); if (!steam->battery_desc.name) return -ENOMEM; /* avoid the warning of 0% battery while waiting for the first info */ spin_lock_irqsave(&steam->lock, flags); steam->voltage = 3000; steam->battery_charge = 100; spin_unlock_irqrestore(&steam->lock, flags); battery = power_supply_register(&steam->hdev->dev, &steam->battery_desc, &battery_cfg); if (IS_ERR(battery)) { ret = PTR_ERR(battery); hid_err(steam->hdev, "%s:power_supply_register failed with error %d\n", __func__, ret); return ret; } rcu_assign_pointer(steam->battery, battery); power_supply_powers(battery, &steam->hdev->dev); return 0; } static int steam_input_register(struct steam_device *steam) { struct hid_device *hdev = steam->hdev; struct input_dev *input; int ret; rcu_read_lock(); input = rcu_dereference(steam->input); rcu_read_unlock(); if (input) { dbg_hid("%s: already connected\n", __func__); return 0; } input = input_allocate_device(); if (!input) return -ENOMEM; input_set_drvdata(input, steam); input->dev.parent = &hdev->dev; input->open = steam_input_open; input->close = steam_input_close; input->name = (steam->quirks & STEAM_QUIRK_WIRELESS) ? "Wireless Steam Controller" : (steam->quirks & STEAM_QUIRK_DECK) ? "Steam Deck" : "Steam Controller"; input->phys = hdev->phys; input->uniq = steam->serial_no; input->id.bustype = hdev->bus; input->id.vendor = hdev->vendor; input->id.product = hdev->product; input->id.version = hdev->version; input_set_capability(input, EV_KEY, BTN_TR2); input_set_capability(input, EV_KEY, BTN_TL2); input_set_capability(input, EV_KEY, BTN_TR); input_set_capability(input, EV_KEY, BTN_TL); input_set_capability(input, EV_KEY, BTN_Y); input_set_capability(input, EV_KEY, BTN_B); input_set_capability(input, EV_KEY, BTN_X); input_set_capability(input, EV_KEY, BTN_A); input_set_capability(input, EV_KEY, BTN_DPAD_UP); input_set_capability(input, EV_KEY, BTN_DPAD_RIGHT); input_set_capability(input, EV_KEY, BTN_DPAD_LEFT); input_set_capability(input, EV_KEY, BTN_DPAD_DOWN); input_set_capability(input, EV_KEY, BTN_SELECT); input_set_capability(input, EV_KEY, BTN_MODE); input_set_capability(input, EV_KEY, BTN_START); input_set_capability(input, EV_KEY, BTN_THUMBR); input_set_capability(input, EV_KEY, BTN_THUMBL); input_set_capability(input, EV_KEY, BTN_THUMB); input_set_capability(input, EV_KEY, BTN_THUMB2); if (steam->quirks & STEAM_QUIRK_DECK) { input_set_capability(input, EV_KEY, BTN_BASE); input_set_capability(input, EV_KEY, BTN_TRIGGER_HAPPY1); input_set_capability(input, EV_KEY, BTN_TRIGGER_HAPPY2); input_set_capability(input, EV_KEY, BTN_TRIGGER_HAPPY3); input_set_capability(input, EV_KEY, BTN_TRIGGER_HAPPY4); } else { input_set_capability(input, EV_KEY, BTN_GEAR_DOWN); input_set_capability(input, EV_KEY, BTN_GEAR_UP); } input_set_abs_params(input, ABS_X, -32767, 32767, 0, 0); input_set_abs_params(input, ABS_Y, -32767, 32767, 0, 0); input_set_abs_params(input, ABS_HAT0X, -32767, 32767, STEAM_PAD_FUZZ, 0); input_set_abs_params(input, ABS_HAT0Y, -32767, 32767, STEAM_PAD_FUZZ, 0); if (steam->quirks & STEAM_QUIRK_DECK) { input_set_abs_params(input, ABS_HAT2Y, 0, 32767, 0, 0); input_set_abs_params(input, ABS_HAT2X, 0, 32767, 0, 0); input_set_abs_params(input, ABS_RX, -32767, 32767, 0, 0); input_set_abs_params(input, ABS_RY, -32767, 32767, 0, 0); input_set_abs_params(input, ABS_HAT1X, -32767, 32767, STEAM_PAD_FUZZ, 0); input_set_abs_params(input, ABS_HAT1Y, -32767, 32767, STEAM_PAD_FUZZ, 0); input_abs_set_res(input, ABS_X, STEAM_DECK_JOYSTICK_RESOLUTION); input_abs_set_res(input, ABS_Y, STEAM_DECK_JOYSTICK_RESOLUTION); input_abs_set_res(input, ABS_RX, STEAM_DECK_JOYSTICK_RESOLUTION); input_abs_set_res(input, ABS_RY, STEAM_DECK_JOYSTICK_RESOLUTION); input_abs_set_res(input, ABS_HAT1X, STEAM_PAD_RESOLUTION); input_abs_set_res(input, ABS_HAT1Y, STEAM_PAD_RESOLUTION); input_abs_set_res(input, ABS_HAT2Y, STEAM_DECK_TRIGGER_RESOLUTION); input_abs_set_res(input, ABS_HAT2X, STEAM_DECK_TRIGGER_RESOLUTION); } else { input_set_abs_params(input, ABS_HAT2Y, 0, 255, 0, 0); input_set_abs_params(input, ABS_HAT2X, 0, 255, 0, 0); input_set_abs_params(input, ABS_RX, -32767, 32767, STEAM_PAD_FUZZ, 0); input_set_abs_params(input, ABS_RY, -32767, 32767, STEAM_PAD_FUZZ, 0); input_abs_set_res(input, ABS_X, STEAM_JOYSTICK_RESOLUTION); input_abs_set_res(input, ABS_Y, STEAM_JOYSTICK_RESOLUTION); input_abs_set_res(input, ABS_RX, STEAM_PAD_RESOLUTION); input_abs_set_res(input, ABS_RY, STEAM_PAD_RESOLUTION); input_abs_set_res(input, ABS_HAT2Y, STEAM_TRIGGER_RESOLUTION); input_abs_set_res(input, ABS_HAT2X, STEAM_TRIGGER_RESOLUTION); } input_abs_set_res(input, ABS_HAT0X, STEAM_PAD_RESOLUTION); input_abs_set_res(input, ABS_HAT0Y, STEAM_PAD_RESOLUTION); #ifdef CONFIG_STEAM_FF if (steam->quirks & STEAM_QUIRK_DECK) { input_set_capability(input, EV_FF, FF_RUMBLE); ret = input_ff_create_memless(input, NULL, steam_play_effect); if (ret) goto input_register_fail; } #endif ret = input_register_device(input); if (ret) goto input_register_fail; rcu_assign_pointer(steam->input, input); return 0; input_register_fail: input_free_device(input); return ret; } static int steam_sensors_register(struct steam_device *steam) { struct hid_device *hdev = steam->hdev; struct input_dev *sensors; int ret; if (!(steam->quirks & STEAM_QUIRK_DECK)) return 0; rcu_read_lock(); sensors = rcu_dereference(steam->sensors); rcu_read_unlock(); if (sensors) { dbg_hid("%s: already connected\n", __func__); return 0; } sensors = input_allocate_device(); if (!sensors) return -ENOMEM; input_set_drvdata(sensors, steam); sensors->dev.parent = &hdev->dev; sensors->name = "Steam Deck Motion Sensors"; sensors->phys = hdev->phys; sensors->uniq = steam->serial_no; sensors->id.bustype = hdev->bus; sensors->id.vendor = hdev->vendor; sensors->id.product = hdev->product; sensors->id.version = hdev->version; __set_bit(INPUT_PROP_ACCELEROMETER, sensors->propbit); __set_bit(EV_MSC, sensors->evbit); __set_bit(MSC_TIMESTAMP, sensors->mscbit); input_set_abs_params(sensors, ABS_X, -STEAM_DECK_ACCEL_RANGE, STEAM_DECK_ACCEL_RANGE, STEAM_DECK_ACCEL_FUZZ, 0); input_set_abs_params(sensors, ABS_Y, -STEAM_DECK_ACCEL_RANGE, STEAM_DECK_ACCEL_RANGE, STEAM_DECK_ACCEL_FUZZ, 0); input_set_abs_params(sensors, ABS_Z, -STEAM_DECK_ACCEL_RANGE, STEAM_DECK_ACCEL_RANGE, STEAM_DECK_ACCEL_FUZZ, 0); input_abs_set_res(sensors, ABS_X, STEAM_DECK_ACCEL_RES_PER_G); input_abs_set_res(sensors, ABS_Y, STEAM_DECK_ACCEL_RES_PER_G); input_abs_set_res(sensors, ABS_Z, STEAM_DECK_ACCEL_RES_PER_G); input_set_abs_params(sensors, ABS_RX, -STEAM_DECK_GYRO_RANGE, STEAM_DECK_GYRO_RANGE, STEAM_DECK_GYRO_FUZZ, 0); input_set_abs_params(sensors, ABS_RY, -STEAM_DECK_GYRO_RANGE, STEAM_DECK_GYRO_RANGE, STEAM_DECK_GYRO_FUZZ, 0); input_set_abs_params(sensors, ABS_RZ, -STEAM_DECK_GYRO_RANGE, STEAM_DECK_GYRO_RANGE, STEAM_DECK_GYRO_FUZZ, 0); input_abs_set_res(sensors, ABS_RX, STEAM_DECK_GYRO_RES_PER_DPS); input_abs_set_res(sensors, ABS_RY, STEAM_DECK_GYRO_RES_PER_DPS); input_abs_set_res(sensors, ABS_RZ, STEAM_DECK_GYRO_RES_PER_DPS); ret = input_register_device(sensors); if (ret) goto sensors_register_fail; rcu_assign_pointer(steam->sensors, sensors); return 0; sensors_register_fail: input_free_device(sensors); return ret; } static void steam_input_unregister(struct steam_device *steam) { struct input_dev *input; rcu_read_lock(); input = rcu_dereference(steam->input); rcu_read_unlock(); if (!input) return; RCU_INIT_POINTER(steam->input, NULL); synchronize_rcu(); input_unregister_device(input); } static void steam_sensors_unregister(struct steam_device *steam) { struct input_dev *sensors; if (!(steam->quirks & STEAM_QUIRK_DECK)) return; rcu_read_lock(); sensors = rcu_dereference(steam->sensors); rcu_read_unlock(); if (!sensors) return; RCU_INIT_POINTER(steam->sensors, NULL); synchronize_rcu(); input_unregister_device(sensors); } static void steam_battery_unregister(struct steam_device *steam) { struct power_supply *battery; rcu_read_lock(); battery = rcu_dereference(steam->battery); rcu_read_unlock(); if (!battery) return; RCU_INIT_POINTER(steam->battery, NULL); synchronize_rcu(); power_supply_unregister(battery); } static int steam_register(struct steam_device *steam) { int ret; unsigned long client_opened; unsigned long flags; /* * This function can be called several times in a row with the * wireless adaptor, without steam_unregister() between them, because * another client send a get_connection_status command, for example. * The battery and serial number are set just once per device. */ if (!steam->serial_no[0]) { /* * Unlikely, but getting the serial could fail, and it is not so * important, so make up a serial number and go on. */ if (steam_get_serial(steam) < 0) strscpy(steam->serial_no, "XXXXXXXXXX", sizeof(steam->serial_no)); hid_info(steam->hdev, "Steam Controller '%s' connected", steam->serial_no); /* ignore battery errors, we can live without it */ if (steam->quirks & STEAM_QUIRK_WIRELESS) steam_battery_register(steam); mutex_lock(&steam_devices_lock); if (list_empty(&steam->list)) list_add(&steam->list, &steam_devices); mutex_unlock(&steam_devices_lock); } spin_lock_irqsave(&steam->lock, flags); client_opened = steam->client_opened; spin_unlock_irqrestore(&steam->lock, flags); if (!client_opened) { steam_set_lizard_mode(steam, lizard_mode); ret = steam_input_register(steam); if (ret != 0) goto steam_register_input_fail; ret = steam_sensors_register(steam); if (ret != 0) goto steam_register_sensors_fail; } return 0; steam_register_sensors_fail: steam_input_unregister(steam); steam_register_input_fail: return ret; } static void steam_unregister(struct steam_device *steam) { steam_battery_unregister(steam); steam_sensors_unregister(steam); steam_input_unregister(steam); if (steam->serial_no[0]) { hid_info(steam->hdev, "Steam Controller '%s' disconnected", steam->serial_no); mutex_lock(&steam_devices_lock); list_del_init(&steam->list); mutex_unlock(&steam_devices_lock); steam->serial_no[0] = 0; } } static void steam_work_connect_cb(struct work_struct *work) { struct steam_device *steam = container_of(work, struct steam_device, work_connect); unsigned long flags; bool connected; int ret; spin_lock_irqsave(&steam->lock, flags); connected = steam->connected; spin_unlock_irqrestore(&steam->lock, flags); if (connected) { ret = steam_register(steam); if (ret) { hid_err(steam->hdev, "%s:steam_register failed with error %d\n", __func__, ret); } } else { steam_unregister(steam); } } static void steam_mode_switch_cb(struct work_struct *work) { struct steam_device *steam = container_of(to_delayed_work(work), struct steam_device, mode_switch); unsigned long flags; bool client_opened; steam->gamepad_mode = !steam->gamepad_mode; if (!lizard_mode) return; if (steam->gamepad_mode) steam_set_lizard_mode(steam, false); else { spin_lock_irqsave(&steam->lock, flags); client_opened = steam->client_opened; spin_unlock_irqrestore(&steam->lock, flags); if (!client_opened) steam_set_lizard_mode(steam, lizard_mode); } steam_haptic_pulse(steam, STEAM_PAD_RIGHT, 0x190, 0, 1, 0); if (steam->gamepad_mode) { steam_haptic_pulse(steam, STEAM_PAD_LEFT, 0x14D, 0x14D, 0x2D, 0); } else { steam_haptic_pulse(steam, STEAM_PAD_LEFT, 0x1F4, 0x1F4, 0x1E, 0); } } static bool steam_is_valve_interface(struct hid_device *hdev) { struct hid_report_enum *rep_enum; /* * The wired device creates 3 interfaces: * 0: emulated mouse. * 1: emulated keyboard. * 2: the real game pad. * The wireless device creates 5 interfaces: * 0: emulated keyboard. * 1-4: slots where up to 4 real game pads will be connected to. * We know which one is the real gamepad interface because they are the * only ones with a feature report. */ rep_enum = &hdev->report_enum[HID_FEATURE_REPORT]; return !list_empty(&rep_enum->report_list); } static int steam_client_ll_parse(struct hid_device *hdev) { struct steam_device *steam = hdev->driver_data; return hid_parse_report(hdev, steam->hdev->dev_rdesc, steam->hdev->dev_rsize); } static int steam_client_ll_start(struct hid_device *hdev) { return 0; } static void steam_client_ll_stop(struct hid_device *hdev) { } static int steam_client_ll_open(struct hid_device *hdev) { struct steam_device *steam = hdev->driver_data; unsigned long flags; spin_lock_irqsave(&steam->lock, flags); steam->client_opened++; spin_unlock_irqrestore(&steam->lock, flags); steam_sensors_unregister(steam); steam_input_unregister(steam); return 0; } static void steam_client_ll_close(struct hid_device *hdev) { struct steam_device *steam = hdev->driver_data; unsigned long flags; bool connected; spin_lock_irqsave(&steam->lock, flags); steam->client_opened--; connected = steam->connected && !steam->client_opened; spin_unlock_irqrestore(&steam->lock, flags); if (connected) { steam_set_lizard_mode(steam, lizard_mode); steam_input_register(steam); steam_sensors_register(steam); } } static int steam_client_ll_raw_request(struct hid_device *hdev, unsigned char reportnum, u8 *buf, size_t count, unsigned char report_type, int reqtype) { struct steam_device *steam = hdev->driver_data; return hid_hw_raw_request(steam->hdev, reportnum, buf, count, report_type, reqtype); } static const struct hid_ll_driver steam_client_ll_driver = { .parse = steam_client_ll_parse, .start = steam_client_ll_start, .stop = steam_client_ll_stop, .open = steam_client_ll_open, .close = steam_client_ll_close, .raw_request = steam_client_ll_raw_request, }; static struct hid_device *steam_create_client_hid(struct hid_device *hdev) { struct hid_device *client_hdev; client_hdev = hid_allocate_device(); if (IS_ERR(client_hdev)) return client_hdev; client_hdev->ll_driver = &steam_client_ll_driver; client_hdev->dev.parent = hdev->dev.parent; client_hdev->bus = hdev->bus; client_hdev->vendor = hdev->vendor; client_hdev->product = hdev->product; client_hdev->version = hdev->version; client_hdev->type = hdev->type; client_hdev->country = hdev->country; strscpy(client_hdev->name, hdev->name, sizeof(client_hdev->name)); strscpy(client_hdev->phys, hdev->phys, sizeof(client_hdev->phys)); /* * Since we use the same device info than the real interface to * trick userspace, we will be calling steam_probe recursively. * We need to recognize the client interface somehow. */ client_hdev->group = HID_GROUP_STEAM; return client_hdev; } static int steam_probe(struct hid_device *hdev, const struct hid_device_id *id) { struct steam_device *steam; int ret; ret = hid_parse(hdev); if (ret) { hid_err(hdev, "%s:parse of hid interface failed\n", __func__); return ret; } /* * The virtual client_dev is only used for hidraw. * Also avoid the recursive probe. */ if (hdev->group == HID_GROUP_STEAM) return hid_hw_start(hdev, HID_CONNECT_HIDRAW); /* * The non-valve interfaces (mouse and keyboard emulation) are * connected without changes. */ if (!steam_is_valve_interface(hdev)) return hid_hw_start(hdev, HID_CONNECT_DEFAULT); steam = devm_kzalloc(&hdev->dev, sizeof(*steam), GFP_KERNEL); if (!steam) return -ENOMEM; steam->hdev = hdev; hid_set_drvdata(hdev, steam); spin_lock_init(&steam->lock); mutex_init(&steam->report_mutex); steam->quirks = id->driver_data; INIT_WORK(&steam->work_connect, steam_work_connect_cb); INIT_DELAYED_WORK(&steam->mode_switch, steam_mode_switch_cb); INIT_LIST_HEAD(&steam->list); INIT_WORK(&steam->rumble_work, steam_haptic_rumble_cb); steam->sensor_timestamp_us = 0; /* * With the real steam controller interface, do not connect hidraw. * Instead, create the client_hid and connect that. */ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_HIDRAW); if (ret) goto err_cancel_work; ret = hid_hw_open(hdev); if (ret) { hid_err(hdev, "%s:hid_hw_open\n", __func__); goto err_hw_stop; } if (steam->quirks & STEAM_QUIRK_WIRELESS) { hid_info(hdev, "Steam wireless receiver connected"); /* If using a wireless adaptor ask for connection status */ steam->connected = false; steam_request_conn_status(steam); } else { /* A wired connection is always present */ steam->connected = true; ret = steam_register(steam); if (ret) { hid_err(hdev, "%s:steam_register failed with error %d\n", __func__, ret); goto err_hw_close; } } steam->client_hdev = steam_create_client_hid(hdev); if (IS_ERR(steam->client_hdev)) { ret = PTR_ERR(steam->client_hdev); goto err_steam_unregister; } steam->client_hdev->driver_data = steam; ret = hid_add_device(steam->client_hdev); if (ret) goto err_destroy; return 0; err_destroy: hid_destroy_device(steam->client_hdev); err_steam_unregister: if (steam->connected) steam_unregister(steam); err_hw_close: hid_hw_close(hdev); err_hw_stop: hid_hw_stop(hdev); err_cancel_work: cancel_work_sync(&steam->work_connect); cancel_delayed_work_sync(&steam->mode_switch); cancel_work_sync(&steam->rumble_work); return ret; } static void steam_remove(struct hid_device *hdev) { struct steam_device *steam = hid_get_drvdata(hdev); if (!steam || hdev->group == HID_GROUP_STEAM) { hid_hw_stop(hdev); return; } cancel_delayed_work_sync(&steam->mode_switch); cancel_work_sync(&steam->work_connect); cancel_work_sync(&steam->rumble_work); hid_destroy_device(steam->client_hdev); steam->client_hdev = NULL; steam->client_opened = 0; if (steam->quirks & STEAM_QUIRK_WIRELESS) { hid_info(hdev, "Steam wireless receiver disconnected"); } hid_hw_close(hdev); hid_hw_stop(hdev); steam_unregister(steam); } static void steam_do_connect_event(struct steam_device *steam, bool connected) { unsigned long flags; bool changed; spin_lock_irqsave(&steam->lock, flags); changed = steam->connected != connected; steam->connected = connected; spin_unlock_irqrestore(&steam->lock, flags); if (changed && schedule_work(&steam->work_connect) == 0) dbg_hid("%s: connected=%d event already queued\n", __func__, connected); } /* * Some input data in the protocol has the opposite sign. * Clamp the values to 32767..-32767 so that the range is * symmetrical and can be negated safely. */ static inline s16 steam_le16(u8 *data) { s16 x = (s16) le16_to_cpup((__le16 *)data); return x == -32768 ? -32767 : x; } /* * The size for this message payload is 60. * The known values are: * (* values are not sent through wireless) * (* accelerator/gyro is disabled by default) * Offset| Type | Mapped to |Meaning * -------+-------+-----------+-------------------------- * 4-7 | u32 | -- | sequence number * 8-10 | 24bit | see below | buttons * 11 | u8 | ABS_HAT2Y | left trigger * 12 | u8 | ABS_HAT2X | right trigger * 13-15 | -- | -- | always 0 * 16-17 | s16 | ABS_X/ABS_HAT0X | X value * 18-19 | s16 | ABS_Y/ABS_HAT0Y | Y value * 20-21 | s16 | ABS_RX | right-pad X value * 22-23 | s16 | ABS_RY | right-pad Y value * 24-25 | s16 | -- | * left trigger * 26-27 | s16 | -- | * right trigger * 28-29 | s16 | -- | * accelerometer X value * 30-31 | s16 | -- | * accelerometer Y value * 32-33 | s16 | -- | * accelerometer Z value * 34-35 | s16 | -- | gyro X value * 36-36 | s16 | -- | gyro Y value * 38-39 | s16 | -- | gyro Z value * 40-41 | s16 | -- | quaternion W value * 42-43 | s16 | -- | quaternion X value * 44-45 | s16 | -- | quaternion Y value * 46-47 | s16 | -- | quaternion Z value * 48-49 | -- | -- | always 0 * 50-51 | s16 | -- | * left trigger (uncalibrated) * 52-53 | s16 | -- | * right trigger (uncalibrated) * 54-55 | s16 | -- | * joystick X value (uncalibrated) * 56-57 | s16 | -- | * joystick Y value (uncalibrated) * 58-59 | s16 | -- | * left-pad X value * 60-61 | s16 | -- | * left-pad Y value * 62-63 | u16 | -- | * battery voltage * * The buttons are: * Bit | Mapped to | Description * ------+------------+-------------------------------- * 8.0 | BTN_TR2 | right trigger fully pressed * 8.1 | BTN_TL2 | left trigger fully pressed * 8.2 | BTN_TR | right shoulder * 8.3 | BTN_TL | left shoulder * 8.4 | BTN_Y | button Y * 8.5 | BTN_B | button B * 8.6 | BTN_X | button X * 8.7 | BTN_A | button A * 9.0 | BTN_DPAD_UP | left-pad up * 9.1 | BTN_DPAD_RIGHT | left-pad right * 9.2 | BTN_DPAD_LEFT | left-pad left * 9.3 | BTN_DPAD_DOWN | left-pad down * 9.4 | BTN_SELECT | menu left * 9.5 | BTN_MODE | steam logo * 9.6 | BTN_START | menu right * 9.7 | BTN_GEAR_DOWN | left back lever * 10.0 | BTN_GEAR_UP | right back lever * 10.1 | -- | left-pad clicked * 10.2 | BTN_THUMBR | right-pad clicked * 10.3 | BTN_THUMB | left-pad touched (but see explanation below) * 10.4 | BTN_THUMB2 | right-pad touched * 10.5 | -- | unknown * 10.6 | BTN_THUMBL | joystick clicked * 10.7 | -- | lpad_and_joy */ static void steam_do_input_event(struct steam_device *steam, struct input_dev *input, u8 *data) { /* 24 bits of buttons */ u8 b8, b9, b10; s16 x, y; bool lpad_touched, lpad_and_joy; b8 = data[8]; b9 = data[9]; b10 = data[10]; input_report_abs(input, ABS_HAT2Y, data[11]); input_report_abs(input, ABS_HAT2X, data[12]); /* * These two bits tells how to interpret the values X and Y. * lpad_and_joy tells that the joystick and the lpad are used at the * same time. * lpad_touched tells whether X/Y are to be read as lpad coord or * joystick values. * (lpad_touched || lpad_and_joy) tells if the lpad is really touched. */ lpad_touched = b10 & BIT(3); lpad_and_joy = b10 & BIT(7); x = steam_le16(data + 16); y = -steam_le16(data + 18); input_report_abs(input, lpad_touched ? ABS_HAT0X : ABS_X, x); input_report_abs(input, lpad_touched ? ABS_HAT0Y : ABS_Y, y); /* Check if joystick is centered */ if (lpad_touched && !lpad_and_joy) { input_report_abs(input, ABS_X, 0); input_report_abs(input, ABS_Y, 0); } /* Check if lpad is untouched */ if (!(lpad_touched || lpad_and_joy)) { input_report_abs(input, ABS_HAT0X, 0); input_report_abs(input, ABS_HAT0Y, 0); } input_report_abs(input, ABS_RX, steam_le16(data + 20)); input_report_abs(input, ABS_RY, -steam_le16(data + 22)); input_event(input, EV_KEY, BTN_TR2, !!(b8 & BIT(0))); input_event(input, EV_KEY, BTN_TL2, !!(b8 & BIT(1))); input_event(input, EV_KEY, BTN_TR, !!(b8 & BIT(2))); input_event(input, EV_KEY, BTN_TL, !!(b8 & BIT(3))); input_event(input, EV_KEY, BTN_Y, !!(b8 & BIT(4))); input_event(input, EV_KEY, BTN_B, !!(b8 & BIT(5))); input_event(input, EV_KEY, BTN_X, !!(b8 & BIT(6))); input_event(input, EV_KEY, BTN_A, !!(b8 & BIT(7))); input_event(input, EV_KEY, BTN_SELECT, !!(b9 & BIT(4))); input_event(input, EV_KEY, BTN_MODE, !!(b9 & BIT(5))); input_event(input, EV_KEY, BTN_START, !!(b9 & BIT(6))); input_event(input, EV_KEY, BTN_GEAR_DOWN, !!(b9 & BIT(7))); input_event(input, EV_KEY, BTN_GEAR_UP, !!(b10 & BIT(0))); input_event(input, EV_KEY, BTN_THUMBR, !!(b10 & BIT(2))); input_event(input, EV_KEY, BTN_THUMBL, !!(b10 & BIT(6))); input_event(input, EV_KEY, BTN_THUMB, lpad_touched || lpad_and_joy); input_event(input, EV_KEY, BTN_THUMB2, !!(b10 & BIT(4))); input_event(input, EV_KEY, BTN_DPAD_UP, !!(b9 & BIT(0))); input_event(input, EV_KEY, BTN_DPAD_RIGHT, !!(b9 & BIT(1))); input_event(input, EV_KEY, BTN_DPAD_LEFT, !!(b9 & BIT(2))); input_event(input, EV_KEY, BTN_DPAD_DOWN, !!(b9 & BIT(3))); input_sync(input); } /* * The size for this message payload is 56. * The known values are: * Offset| Type | Mapped to |Meaning * -------+-------+-----------+-------------------------- * 4-7 | u32 | -- | sequence number * 8-15 | u64 | see below | buttons * 16-17 | s16 | ABS_HAT0X | left-pad X value * 18-19 | s16 | ABS_HAT0Y | left-pad Y value * 20-21 | s16 | ABS_HAT1X | right-pad X value * 22-23 | s16 | ABS_HAT1Y | right-pad Y value * 24-25 | s16 | IMU ABS_X | accelerometer X value * 26-27 | s16 | IMU ABS_Z | accelerometer Y value * 28-29 | s16 | IMU ABS_Y | accelerometer Z value * 30-31 | s16 | IMU ABS_RX | gyro X value * 32-33 | s16 | IMU ABS_RZ | gyro Y value * 34-35 | s16 | IMU ABS_RY | gyro Z value * 36-37 | s16 | -- | quaternion W value * 38-39 | s16 | -- | quaternion X value * 40-41 | s16 | -- | quaternion Y value * 42-43 | s16 | -- | quaternion Z value * 44-45 | u16 | ABS_HAT2Y | left trigger (uncalibrated) * 46-47 | u16 | ABS_HAT2X | right trigger (uncalibrated) * 48-49 | s16 | ABS_X | left joystick X * 50-51 | s16 | ABS_Y | left joystick Y * 52-53 | s16 | ABS_RX | right joystick X * 54-55 | s16 | ABS_RY | right joystick Y * 56-57 | u16 | -- | left pad pressure * 58-59 | u16 | -- | right pad pressure * * The buttons are: * Bit | Mapped to | Description * ------+------------+-------------------------------- * 8.0 | BTN_TR2 | right trigger fully pressed * 8.1 | BTN_TL2 | left trigger fully pressed * 8.2 | BTN_TR | right shoulder * 8.3 | BTN_TL | left shoulder * 8.4 | BTN_Y | button Y * 8.5 | BTN_B | button B * 8.6 | BTN_X | button X * 8.7 | BTN_A | button A * 9.0 | BTN_DPAD_UP | left-pad up * 9.1 | BTN_DPAD_RIGHT | left-pad right * 9.2 | BTN_DPAD_LEFT | left-pad left * 9.3 | BTN_DPAD_DOWN | left-pad down * 9.4 | BTN_SELECT | menu left * 9.5 | BTN_MODE | steam logo * 9.6 | BTN_START | menu right * 9.7 | BTN_TRIGGER_HAPPY3 | left bottom grip button * 10.0 | BTN_TRIGGER_HAPPY4 | right bottom grip button * 10.1 | BTN_THUMB | left pad pressed * 10.2 | BTN_THUMB2 | right pad pressed * 10.3 | -- | left pad touched * 10.4 | -- | right pad touched * 10.5 | -- | unknown * 10.6 | BTN_THUMBL | left joystick clicked * 10.7 | -- | unknown * 11.0 | -- | unknown * 11.1 | -- | unknown * 11.2 | BTN_THUMBR | right joystick clicked * 11.3 | -- | unknown * 11.4 | -- | unknown * 11.5 | -- | unknown * 11.6 | -- | unknown * 11.7 | -- | unknown * 12.0 | -- | unknown * 12.1 | -- | unknown * 12.2 | -- | unknown * 12.3 | -- | unknown * 12.4 | -- | unknown * 12.5 | -- | unknown * 12.6 | -- | unknown * 12.7 | -- | unknown * 13.0 | -- | unknown * 13.1 | BTN_TRIGGER_HAPPY1 | left top grip button * 13.2 | BTN_TRIGGER_HAPPY2 | right top grip button * 13.3 | -- | unknown * 13.4 | -- | unknown * 13.5 | -- | unknown * 13.6 | -- | left joystick touched * 13.7 | -- | right joystick touched * 14.0 | -- | unknown * 14.1 | -- | unknown * 14.2 | BTN_BASE | quick access button * 14.3 | -- | unknown * 14.4 | -- | unknown * 14.5 | -- | unknown * 14.6 | -- | unknown * 14.7 | -- | unknown * 15.0 | -- | unknown * 15.1 | -- | unknown * 15.2 | -- | unknown * 15.3 | -- | unknown * 15.4 | -- | unknown * 15.5 | -- | unknown * 15.6 | -- | unknown * 15.7 | -- | unknown */ static void steam_do_deck_input_event(struct steam_device *steam, struct input_dev *input, u8 *data) { u8 b8, b9, b10, b11, b13, b14; bool lpad_touched, rpad_touched; b8 = data[8]; b9 = data[9]; b10 = data[10]; b11 = data[11]; b13 = data[13]; b14 = data[14]; if (!(b9 & BIT(6)) && steam->did_mode_switch) { steam->did_mode_switch = false; cancel_delayed_work_sync(&steam->mode_switch); } else if (!steam->client_opened && (b9 & BIT(6)) && !steam->did_mode_switch) { steam->did_mode_switch = true; schedule_delayed_work(&steam->mode_switch, 45 * HZ / 100); } if (!steam->gamepad_mode) return; lpad_touched = b10 & BIT(3); rpad_touched = b10 & BIT(4); if (lpad_touched) { input_report_abs(input, ABS_HAT0X, steam_le16(data + 16)); input_report_abs(input, ABS_HAT0Y, steam_le16(data + 18)); } else { input_report_abs(input, ABS_HAT0X, 0); input_report_abs(input, ABS_HAT0Y, 0); } if (rpad_touched) { input_report_abs(input, ABS_HAT1X, steam_le16(data + 20)); input_report_abs(input, ABS_HAT1Y, steam_le16(data + 22)); } else { input_report_abs(input, ABS_HAT1X, 0); input_report_abs(input, ABS_HAT1Y, 0); } input_report_abs(input, ABS_X, steam_le16(data + 48)); input_report_abs(input, ABS_Y, -steam_le16(data + 50)); input_report_abs(input, ABS_RX, steam_le16(data + 52)); input_report_abs(input, ABS_RY, -steam_le16(data + 54)); input_report_abs(input, ABS_HAT2Y, steam_le16(data + 44)); input_report_abs(input, ABS_HAT2X, steam_le16(data + 46)); input_event(input, EV_KEY, BTN_TR2, !!(b8 & BIT(0))); input_event(input, EV_KEY, BTN_TL2, !!(b8 & BIT(1))); input_event(input, EV_KEY, BTN_TR, !!(b8 & BIT(2))); input_event(input, EV_KEY, BTN_TL, !!(b8 & BIT(3))); input_event(input, EV_KEY, BTN_Y, !!(b8 & BIT(4))); input_event(input, EV_KEY, BTN_B, !!(b8 & BIT(5))); input_event(input, EV_KEY, BTN_X, !!(b8 & BIT(6))); input_event(input, EV_KEY, BTN_A, !!(b8 & BIT(7))); input_event(input, EV_KEY, BTN_SELECT, !!(b9 & BIT(4))); input_event(input, EV_KEY, BTN_MODE, !!(b9 & BIT(5))); input_event(input, EV_KEY, BTN_START, !!(b9 & BIT(6))); input_event(input, EV_KEY, BTN_TRIGGER_HAPPY3, !!(b9 & BIT(7))); input_event(input, EV_KEY, BTN_TRIGGER_HAPPY4, !!(b10 & BIT(0))); input_event(input, EV_KEY, BTN_THUMBL, !!(b10 & BIT(6))); input_event(input, EV_KEY, BTN_THUMBR, !!(b11 & BIT(2))); input_event(input, EV_KEY, BTN_DPAD_UP, !!(b9 & BIT(0))); input_event(input, EV_KEY, BTN_DPAD_RIGHT, !!(b9 & BIT(1))); input_event(input, EV_KEY, BTN_DPAD_LEFT, !!(b9 & BIT(2))); input_event(input, EV_KEY, BTN_DPAD_DOWN, !!(b9 & BIT(3))); input_event(input, EV_KEY, BTN_THUMB, !!(b10 & BIT(1))); input_event(input, EV_KEY, BTN_THUMB2, !!(b10 & BIT(2))); input_event(input, EV_KEY, BTN_TRIGGER_HAPPY1, !!(b13 & BIT(1))); input_event(input, EV_KEY, BTN_TRIGGER_HAPPY2, !!(b13 & BIT(2))); input_event(input, EV_KEY, BTN_BASE, !!(b14 & BIT(2))); input_sync(input); } static void steam_do_deck_sensors_event(struct steam_device *steam, struct input_dev *sensors, u8 *data) { /* * The deck input report is received every 4 ms on average, * with a jitter of +/- 4 ms even though the USB descriptor claims * that it uses 1 kHz. * Since the HID report does not include a sensor timestamp, * use a fixed increment here. */ steam->sensor_timestamp_us += 4000; if (!steam->gamepad_mode) return; input_event(sensors, EV_MSC, MSC_TIMESTAMP, steam->sensor_timestamp_us); input_report_abs(sensors, ABS_X, steam_le16(data + 24)); input_report_abs(sensors, ABS_Z, -steam_le16(data + 26)); input_report_abs(sensors, ABS_Y, steam_le16(data + 28)); input_report_abs(sensors, ABS_RX, steam_le16(data + 30)); input_report_abs(sensors, ABS_RZ, -steam_le16(data + 32)); input_report_abs(sensors, ABS_RY, steam_le16(data + 34)); input_sync(sensors); } /* * The size for this message payload is 11. * The known values are: * Offset| Type | Meaning * -------+-------+--------------------------- * 4-7 | u32 | sequence number * 8-11 | -- | always 0 * 12-13 | u16 | voltage (mV) * 14 | u8 | battery percent */ static void steam_do_battery_event(struct steam_device *steam, struct power_supply *battery, u8 *data) { unsigned long flags; s16 volts = steam_le16(data + 12); u8 batt = data[14]; /* Creating the battery may have failed */ rcu_read_lock(); battery = rcu_dereference(steam->battery); if (likely(battery)) { spin_lock_irqsave(&steam->lock, flags); steam->voltage = volts; steam->battery_charge = batt; spin_unlock_irqrestore(&steam->lock, flags); power_supply_changed(battery); } rcu_read_unlock(); } static int steam_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size) { struct steam_device *steam = hid_get_drvdata(hdev); struct input_dev *input; struct input_dev *sensors; struct power_supply *battery; if (!steam) return 0; if (steam->client_opened) hid_input_report(steam->client_hdev, HID_FEATURE_REPORT, data, size, 0); /* * All messages are size=64, all values little-endian. * The format is: * Offset| Meaning * -------+-------------------------------------------- * 0-1 | always 0x01, 0x00, maybe protocol version? * 2 | type of message * 3 | length of the real payload (not checked) * 4-n | payload data, depends on the type * * There are these known types of message: * 0x01: input data (60 bytes) * 0x03: wireless connect/disconnect (1 byte) * 0x04: battery status (11 bytes) * 0x09: Steam Deck input data (56 bytes) */ if (size != 64 || data[0] != 1 || data[1] != 0) return 0; switch (data[2]) { case ID_CONTROLLER_STATE: if (steam->client_opened) return 0; rcu_read_lock(); input = rcu_dereference(steam->input); if (likely(input)) steam_do_input_event(steam, input, data); rcu_read_unlock(); break; case ID_CONTROLLER_DECK_STATE: if (steam->client_opened) return 0; rcu_read_lock(); input = rcu_dereference(steam->input); if (likely(input)) steam_do_deck_input_event(steam, input, data); sensors = rcu_dereference(steam->sensors); if (likely(sensors)) steam_do_deck_sensors_event(steam, sensors, data); rcu_read_unlock(); break; case ID_CONTROLLER_WIRELESS: /* * The payload of this event is a single byte: * 0x01: disconnected. * 0x02: connected. */ switch (data[4]) { case 0x01: steam_do_connect_event(steam, false); break; case 0x02: steam_do_connect_event(steam, true); break; } break; case ID_CONTROLLER_STATUS: if (steam->quirks & STEAM_QUIRK_WIRELESS) { rcu_read_lock(); battery = rcu_dereference(steam->battery); if (likely(battery)) { steam_do_battery_event(steam, battery, data); } else { dbg_hid( "%s: battery data without connect event\n", __func__); steam_do_connect_event(steam, true); } rcu_read_unlock(); } break; } return 0; } static int steam_param_set_lizard_mode(const char *val, const struct kernel_param *kp) { struct steam_device *steam; int ret; ret = param_set_bool(val, kp); if (ret) return ret; mutex_lock(&steam_devices_lock); list_for_each_entry(steam, &steam_devices, list) { if (!steam->client_opened) steam_set_lizard_mode(steam, lizard_mode); } mutex_unlock(&steam_devices_lock); return 0; } static const struct kernel_param_ops steam_lizard_mode_ops = { .set = steam_param_set_lizard_mode, .get = param_get_bool, }; module_param_cb(lizard_mode, &steam_lizard_mode_ops, &lizard_mode, 0644); MODULE_PARM_DESC(lizard_mode, "Enable mouse and keyboard emulation (lizard mode) when the gamepad is not in use"); static const struct hid_device_id steam_controllers[] = { { /* Wired Steam Controller */ HID_USB_DEVICE(USB_VENDOR_ID_VALVE, USB_DEVICE_ID_STEAM_CONTROLLER) }, { /* Wireless Steam Controller */ HID_USB_DEVICE(USB_VENDOR_ID_VALVE, USB_DEVICE_ID_STEAM_CONTROLLER_WIRELESS), .driver_data = STEAM_QUIRK_WIRELESS }, { /* Steam Deck */ HID_USB_DEVICE(USB_VENDOR_ID_VALVE, USB_DEVICE_ID_STEAM_DECK), .driver_data = STEAM_QUIRK_DECK }, {} }; MODULE_DEVICE_TABLE(hid, steam_controllers); static struct hid_driver steam_controller_driver = { .name = "hid-steam", .id_table = steam_controllers, .probe = steam_probe, .remove = steam_remove, .raw_event = steam_raw_event, }; module_hid_driver(steam_controller_driver);
15 2 42 7 45 20 21 11 11 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 // SPDX-License-Identifier: GPL-2.0 #include <linux/buffer_head.h> #include <linux/slab.h> #include "minix.h" enum {DEPTH = 3, DIRECT = 7}; /* Only double indirect */ typedef u16 block_t; /* 16 bit, host order */ static inline unsigned long block_to_cpu(block_t n) { return n; } static inline block_t cpu_to_block(unsigned long n) { return n; } static inline block_t *i_data(struct inode *inode) { return (block_t *)minix_i(inode)->u.i1_data; } static int block_to_path(struct inode * inode, long block, int offsets[DEPTH]) { int n = 0; if (block < 0) { printk("MINIX-fs: block_to_path: block %ld < 0 on dev %pg\n", block, inode->i_sb->s_bdev); return 0; } if ((u64)block * BLOCK_SIZE >= inode->i_sb->s_maxbytes) return 0; if (block < 7) { offsets[n++] = block; } else if ((block -= 7) < 512) { offsets[n++] = 7; offsets[n++] = block; } else { block -= 512; offsets[n++] = 8; offsets[n++] = block>>9; offsets[n++] = block & 511; } return n; } #include "itree_common.c" int V1_minix_get_block(struct inode * inode, long block, struct buffer_head *bh_result, int create) { return get_block(inode, block, bh_result, create); } void V1_minix_truncate(struct inode * inode) { truncate(inode); } unsigned V1_minix_blocks(loff_t size, struct super_block *sb) { return nblocks(size, sb); }
9 9 9 9 9 9 9 9 11 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. */ #include <linux/sched.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/completion.h> #include <linux/buffer_head.h> #include <linux/pagemap.h> #include <linux/pagevec.h> #include <linux/mpage.h> #include <linux/fs.h> #include <linux/writeback.h> #include <linux/swap.h> #include <linux/gfs2_ondisk.h> #include <linux/backing-dev.h> #include <linux/uio.h> #include <trace/events/writeback.h> #include <linux/sched/signal.h> #include "gfs2.h" #include "incore.h" #include "bmap.h" #include "glock.h" #include "inode.h" #include "log.h" #include "meta_io.h" #include "quota.h" #include "trans.h" #include "rgrp.h" #include "super.h" #include "util.h" #include "glops.h" #include "aops.h" void gfs2_trans_add_databufs(struct gfs2_inode *ip, struct folio *folio, size_t from, size_t len) { struct buffer_head *head = folio_buffers(folio); unsigned int bsize = head->b_size; struct buffer_head *bh; size_t to = from + len; size_t start, end; for (bh = head, start = 0; bh != head || !start; bh = bh->b_this_page, start = end) { end = start + bsize; if (end <= from) continue; if (start >= to) break; set_buffer_uptodate(bh); gfs2_trans_add_data(ip->i_gl, bh); } } /** * gfs2_get_block_noalloc - Fills in a buffer head with details about a block * @inode: The inode * @lblock: The block number to look up * @bh_result: The buffer head to return the result in * @create: Non-zero if we may add block to the file * * Returns: errno */ static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock, struct buffer_head *bh_result, int create) { int error; error = gfs2_block_map(inode, lblock, bh_result, 0); if (error) return error; if (!buffer_mapped(bh_result)) return -ENODATA; return 0; } /** * gfs2_write_jdata_folio - gfs2 jdata-specific version of block_write_full_folio * @folio: The folio to write * @wbc: The writeback control * * This is the same as calling block_write_full_folio, but it also * writes pages outside of i_size */ static int gfs2_write_jdata_folio(struct folio *folio, struct writeback_control *wbc) { struct inode * const inode = folio->mapping->host; loff_t i_size = i_size_read(inode); /* * The folio straddles i_size. It must be zeroed out on each and every * writepage invocation because it may be mmapped. "A file is mapped * in multiples of the page size. For a file that is not a multiple of * the page size, the remaining memory is zeroed when mapped, and * writes to that region are not written out to the file." */ if (folio_pos(folio) < i_size && i_size < folio_pos(folio) + folio_size(folio)) folio_zero_segment(folio, offset_in_folio(folio, i_size), folio_size(folio)); return __block_write_full_folio(inode, folio, gfs2_get_block_noalloc, wbc); } /** * __gfs2_jdata_write_folio - The core of jdata writepage * @folio: The folio to write * @wbc: The writeback control * * Implements the core of write back. If a transaction is required then * the checked flag will have been set and the transaction will have * already been started before this is called. */ static int __gfs2_jdata_write_folio(struct folio *folio, struct writeback_control *wbc) { struct inode *inode = folio->mapping->host; struct gfs2_inode *ip = GFS2_I(inode); if (folio_test_checked(folio)) { folio_clear_checked(folio); if (!folio_buffers(folio)) { create_empty_buffers(folio, inode->i_sb->s_blocksize, BIT(BH_Dirty)|BIT(BH_Uptodate)); } gfs2_trans_add_databufs(ip, folio, 0, folio_size(folio)); } return gfs2_write_jdata_folio(folio, wbc); } /** * gfs2_writepages - Write a bunch of dirty pages back to disk * @mapping: The mapping to write * @wbc: Write-back control * * Used for both ordered and writeback modes. */ static int gfs2_writepages(struct address_space *mapping, struct writeback_control *wbc) { struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); struct iomap_writepage_ctx wpc = { }; int ret; /* * Even if we didn't write enough pages here, we might still be holding * dirty pages in the ail. We forcibly flush the ail because we don't * want balance_dirty_pages() to loop indefinitely trying to write out * pages held in the ail that it can't find. */ ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops); if (ret == 0 && wbc->nr_to_write > 0) set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags); return ret; } /** * gfs2_write_jdata_batch - Write back a folio batch's worth of folios * @mapping: The mapping * @wbc: The writeback control * @fbatch: The batch of folios * @done_index: Page index * * Returns: non-zero if loop should terminate, zero otherwise */ static int gfs2_write_jdata_batch(struct address_space *mapping, struct writeback_control *wbc, struct folio_batch *fbatch, pgoff_t *done_index) { struct inode *inode = mapping->host; struct gfs2_sbd *sdp = GFS2_SB(inode); unsigned nrblocks; int i; int ret; size_t size = 0; int nr_folios = folio_batch_count(fbatch); for (i = 0; i < nr_folios; i++) size += folio_size(fbatch->folios[i]); nrblocks = size >> inode->i_blkbits; ret = gfs2_trans_begin(sdp, nrblocks, nrblocks); if (ret < 0) return ret; for (i = 0; i < nr_folios; i++) { struct folio *folio = fbatch->folios[i]; *done_index = folio->index; folio_lock(folio); if (unlikely(folio->mapping != mapping)) { continue_unlock: folio_unlock(folio); continue; } if (!folio_test_dirty(folio)) { /* someone wrote it for us */ goto continue_unlock; } if (folio_test_writeback(folio)) { if (wbc->sync_mode != WB_SYNC_NONE) folio_wait_writeback(folio); else goto continue_unlock; } BUG_ON(folio_test_writeback(folio)); if (!folio_clear_dirty_for_io(folio)) goto continue_unlock; trace_wbc_writepage(wbc, inode_to_bdi(inode)); ret = __gfs2_jdata_write_folio(folio, wbc); if (unlikely(ret)) { if (ret == AOP_WRITEPAGE_ACTIVATE) { folio_unlock(folio); ret = 0; } else { /* * done_index is set past this page, * so media errors will not choke * background writeout for the entire * file. This has consequences for * range_cyclic semantics (ie. it may * not be suitable for data integrity * writeout). */ *done_index = folio_next_index(folio); ret = 1; break; } } /* * We stop writing back only if we are not doing * integrity sync. In case of integrity sync we have to * keep going until we have written all the pages * we tagged for writeback prior to entering this loop. */ if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) { ret = 1; break; } } gfs2_trans_end(sdp); return ret; } /** * gfs2_write_cache_jdata - Like write_cache_pages but different * @mapping: The mapping to write * @wbc: The writeback control * * The reason that we use our own function here is that we need to * start transactions before we grab page locks. This allows us * to get the ordering right. */ static int gfs2_write_cache_jdata(struct address_space *mapping, struct writeback_control *wbc) { int ret = 0; int done = 0; struct folio_batch fbatch; int nr_folios; pgoff_t writeback_index; pgoff_t index; pgoff_t end; pgoff_t done_index; int cycled; int range_whole = 0; xa_mark_t tag; folio_batch_init(&fbatch); if (wbc->range_cyclic) { writeback_index = mapping->writeback_index; /* prev offset */ index = writeback_index; if (index == 0) cycled = 1; else cycled = 0; end = -1; } else { index = wbc->range_start >> PAGE_SHIFT; end = wbc->range_end >> PAGE_SHIFT; if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) range_whole = 1; cycled = 1; /* ignore range_cyclic tests */ } if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) tag = PAGECACHE_TAG_TOWRITE; else tag = PAGECACHE_TAG_DIRTY; retry: if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) tag_pages_for_writeback(mapping, index, end); done_index = index; while (!done && (index <= end)) { nr_folios = filemap_get_folios_tag(mapping, &index, end, tag, &fbatch); if (nr_folios == 0) break; ret = gfs2_write_jdata_batch(mapping, wbc, &fbatch, &done_index); if (ret) done = 1; if (ret > 0) ret = 0; folio_batch_release(&fbatch); cond_resched(); } if (!cycled && !done) { /* * range_cyclic: * We hit the last page and there is more work to be done: wrap * back to the start of the file */ cycled = 1; index = 0; end = writeback_index - 1; goto retry; } if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) mapping->writeback_index = done_index; return ret; } /** * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk * @mapping: The mapping to write * @wbc: The writeback control * */ static int gfs2_jdata_writepages(struct address_space *mapping, struct writeback_control *wbc) { struct gfs2_inode *ip = GFS2_I(mapping->host); struct gfs2_sbd *sdp = GFS2_SB(mapping->host); int ret; ret = gfs2_write_cache_jdata(mapping, wbc); if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) { gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL | GFS2_LFC_JDATA_WPAGES); ret = gfs2_write_cache_jdata(mapping, wbc); } return ret; } /** * stuffed_read_folio - Fill in a Linux folio with stuffed file data * @ip: the inode * @folio: the folio * * Returns: errno */ static int stuffed_read_folio(struct gfs2_inode *ip, struct folio *folio) { struct buffer_head *dibh = NULL; size_t dsize = i_size_read(&ip->i_inode); void *from = NULL; int error = 0; /* * Due to the order of unstuffing files and ->fault(), we can be * asked for a zero folio in the case of a stuffed file being extended, * so we need to supply one here. It doesn't happen often. */ if (unlikely(folio->index)) { dsize = 0; } else { error = gfs2_meta_inode_buffer(ip, &dibh); if (error) goto out; from = dibh->b_data + sizeof(struct gfs2_dinode); } folio_fill_tail(folio, 0, from, dsize); brelse(dibh); out: folio_end_read(folio, error == 0); return error; } /** * gfs2_read_folio - read a folio from a file * @file: The file to read * @folio: The folio in the file */ static int gfs2_read_folio(struct file *file, struct folio *folio) { struct inode *inode = folio->mapping->host; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); int error; if (!gfs2_is_jdata(ip) || (i_blocksize(inode) == PAGE_SIZE && !folio_buffers(folio))) { error = iomap_read_folio(folio, &gfs2_iomap_ops); } else if (gfs2_is_stuffed(ip)) { error = stuffed_read_folio(ip, folio); } else { error = mpage_read_folio(folio, gfs2_block_map); } if (gfs2_withdrawing_or_withdrawn(sdp)) return -EIO; return error; } /** * gfs2_internal_read - read an internal file * @ip: The gfs2 inode * @buf: The buffer to fill * @pos: The file position * @size: The amount to read * */ ssize_t gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos, size_t size) { struct address_space *mapping = ip->i_inode.i_mapping; unsigned long index = *pos >> PAGE_SHIFT; size_t copied = 0; do { size_t offset, chunk; struct folio *folio; folio = read_cache_folio(mapping, index, gfs2_read_folio, NULL); if (IS_ERR(folio)) { if (PTR_ERR(folio) == -EINTR) continue; return PTR_ERR(folio); } offset = *pos + copied - folio_pos(folio); chunk = min(size - copied, folio_size(folio) - offset); memcpy_from_folio(buf + copied, folio, offset, chunk); index = folio_next_index(folio); folio_put(folio); copied += chunk; } while(copied < size); (*pos) += size; return size; } /** * gfs2_readahead - Read a bunch of pages at once * @rac: Read-ahead control structure * * Some notes: * 1. This is only for readahead, so we can simply ignore any things * which are slightly inconvenient (such as locking conflicts between * the page lock and the glock) and return having done no I/O. Its * obviously not something we'd want to do on too regular a basis. * Any I/O we ignore at this time will be done via readpage later. * 2. We don't handle stuffed files here we let readpage do the honours. * 3. mpage_readahead() does most of the heavy lifting in the common case. * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places. */ static void gfs2_readahead(struct readahead_control *rac) { struct inode *inode = rac->mapping->host; struct gfs2_inode *ip = GFS2_I(inode); if (gfs2_is_stuffed(ip)) ; else if (gfs2_is_jdata(ip)) mpage_readahead(rac, gfs2_block_map); else iomap_readahead(rac, &gfs2_iomap_ops); } /** * adjust_fs_space - Adjusts the free space available due to gfs2_grow * @inode: the rindex inode */ void adjust_fs_space(struct inode *inode) { struct gfs2_sbd *sdp = GFS2_SB(inode); struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; struct buffer_head *m_bh; u64 fs_total, new_free; if (gfs2_trans_begin(sdp, 2 * RES_STATFS, 0) != 0) return; /* Total up the file system space, according to the latest rindex. */ fs_total = gfs2_ri_total(sdp); if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0) goto out; spin_lock(&sdp->sd_statfs_spin); gfs2_statfs_change_in(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode)); if (fs_total > (m_sc->sc_total + l_sc->sc_total)) new_free = fs_total - (m_sc->sc_total + l_sc->sc_total); else new_free = 0; spin_unlock(&sdp->sd_statfs_spin); fs_warn(sdp, "File system extended by %llu blocks.\n", (unsigned long long)new_free); gfs2_statfs_change(sdp, new_free, new_free, 0); update_statfs(sdp, m_bh); brelse(m_bh); out: sdp->sd_rindex_uptodate = 0; gfs2_trans_end(sdp); } static bool jdata_dirty_folio(struct address_space *mapping, struct folio *folio) { if (current->journal_info) folio_set_checked(folio); return block_dirty_folio(mapping, folio); } /** * gfs2_bmap - Block map function * @mapping: Address space info * @lblock: The block to map * * Returns: The disk address for the block or 0 on hole or error */ static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock) { struct gfs2_inode *ip = GFS2_I(mapping->host); struct gfs2_holder i_gh; sector_t dblock = 0; int error; error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh); if (error) return 0; if (!gfs2_is_stuffed(ip)) dblock = iomap_bmap(mapping, lblock, &gfs2_iomap_ops); gfs2_glock_dq_uninit(&i_gh); return dblock; } static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh) { struct gfs2_bufdata *bd; lock_buffer(bh); gfs2_log_lock(sdp); clear_buffer_dirty(bh); bd = bh->b_private; if (bd) { if (!list_empty(&bd->bd_list) && !buffer_pinned(bh)) list_del_init(&bd->bd_list); else { spin_lock(&sdp->sd_ail_lock); gfs2_remove_from_journal(bh, REMOVE_JDATA); spin_unlock(&sdp->sd_ail_lock); } } bh->b_bdev = NULL; clear_buffer_mapped(bh); clear_buffer_req(bh); clear_buffer_new(bh); gfs2_log_unlock(sdp); unlock_buffer(bh); } static void gfs2_invalidate_folio(struct folio *folio, size_t offset, size_t length) { struct gfs2_sbd *sdp = GFS2_SB(folio->mapping->host); size_t stop = offset + length; int partial_page = (offset || length < folio_size(folio)); struct buffer_head *bh, *head; unsigned long pos = 0; BUG_ON(!folio_test_locked(folio)); if (!partial_page) folio_clear_checked(folio); head = folio_buffers(folio); if (!head) goto out; bh = head; do { if (pos + bh->b_size > stop) return; if (offset <= pos) gfs2_discard(sdp, bh); pos += bh->b_size; bh = bh->b_this_page; } while (bh != head); out: if (!partial_page) filemap_release_folio(folio, 0); } /** * gfs2_release_folio - free the metadata associated with a folio * @folio: the folio that's being released * @gfp_mask: passed from Linux VFS, ignored by us * * Calls try_to_free_buffers() to free the buffers and put the folio if the * buffers can be released. * * Returns: true if the folio was put or else false */ bool gfs2_release_folio(struct folio *folio, gfp_t gfp_mask) { struct address_space *mapping = folio->mapping; struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); struct buffer_head *bh, *head; struct gfs2_bufdata *bd; head = folio_buffers(folio); if (!head) return false; /* * mm accommodates an old ext3 case where clean folios might * not have had the dirty bit cleared. Thus, it can send actual * dirty folios to ->release_folio() via shrink_active_list(). * * As a workaround, we skip folios that contain dirty buffers * below. Once ->release_folio isn't called on dirty folios * anymore, we can warn on dirty buffers like we used to here * again. */ gfs2_log_lock(sdp); bh = head; do { if (atomic_read(&bh->b_count)) goto cannot_release; bd = bh->b_private; if (bd && bd->bd_tr) goto cannot_release; if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh))) goto cannot_release; bh = bh->b_this_page; } while (bh != head); bh = head; do { bd = bh->b_private; if (bd) { gfs2_assert_warn(sdp, bd->bd_bh == bh); bd->bd_bh = NULL; bh->b_private = NULL; /* * The bd may still be queued as a revoke, in which * case we must not dequeue nor free it. */ if (!bd->bd_blkno && !list_empty(&bd->bd_list)) list_del_init(&bd->bd_list); if (list_empty(&bd->bd_list)) kmem_cache_free(gfs2_bufdata_cachep, bd); } bh = bh->b_this_page; } while (bh != head); gfs2_log_unlock(sdp); return try_to_free_buffers(folio); cannot_release: gfs2_log_unlock(sdp); return false; } static const struct address_space_operations gfs2_aops = { .writepages = gfs2_writepages, .read_folio = gfs2_read_folio, .readahead = gfs2_readahead, .dirty_folio = iomap_dirty_folio, .release_folio = iomap_release_folio, .invalidate_folio = iomap_invalidate_folio, .bmap = gfs2_bmap, .migrate_folio = filemap_migrate_folio, .is_partially_uptodate = iomap_is_partially_uptodate, .error_remove_folio = generic_error_remove_folio, }; static const struct address_space_operations gfs2_jdata_aops = { .writepages = gfs2_jdata_writepages, .read_folio = gfs2_read_folio, .readahead = gfs2_readahead, .dirty_folio = jdata_dirty_folio, .bmap = gfs2_bmap, .migrate_folio = buffer_migrate_folio, .invalidate_folio = gfs2_invalidate_folio, .release_folio = gfs2_release_folio, .is_partially_uptodate = block_is_partially_uptodate, .error_remove_folio = generic_error_remove_folio, }; void gfs2_set_aops(struct inode *inode) { if (gfs2_is_jdata(GFS2_I(inode))) inode->i_mapping->a_ops = &gfs2_jdata_aops; else inode->i_mapping->a_ops = &gfs2_aops; }
8 8 13 13 9 3 6 6 4 4 6 6 8 8 8 8 8 8 12 6 7 1 7 5 5 7 7 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 // SPDX-License-Identifier: GPL-2.0 /* Copyright (C) B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich */ #include "originator.h" #include "main.h" #include <linux/atomic.h> #include <linux/container_of.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/etherdevice.h> #include <linux/gfp.h> #include <linux/if_vlan.h> #include <linux/jiffies.h> #include <linux/kref.h> #include <linux/list.h> #include <linux/lockdep.h> #include <linux/netdevice.h> #include <linux/netlink.h> #include <linux/rculist.h> #include <linux/rcupdate.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/stddef.h> #include <linux/workqueue.h> #include <uapi/linux/batadv_packet.h> #include "bat_algo.h" #include "distributed-arp-table.h" #include "fragmentation.h" #include "gateway_client.h" #include "hard-interface.h" #include "hash.h" #include "log.h" #include "multicast.h" #include "netlink.h" #include "network-coding.h" #include "routing.h" #include "translation-table.h" /* hash class keys */ static struct lock_class_key batadv_orig_hash_lock_class_key; /** * batadv_orig_hash_find() - Find and return originator from orig_hash * @bat_priv: the bat priv with all the soft interface information * @data: mac address of the originator * * Return: orig_node (with increased refcnt), NULL on errors */ struct batadv_orig_node * batadv_orig_hash_find(struct batadv_priv *bat_priv, const void *data) { struct batadv_hashtable *hash = bat_priv->orig_hash; struct hlist_head *head; struct batadv_orig_node *orig_node, *orig_node_tmp = NULL; int index; if (!hash) return NULL; index = batadv_choose_orig(data, hash->size); head = &hash->table[index]; rcu_read_lock(); hlist_for_each_entry_rcu(orig_node, head, hash_entry) { if (!batadv_compare_eth(orig_node, data)) continue; if (!kref_get_unless_zero(&orig_node->refcount)) continue; orig_node_tmp = orig_node; break; } rcu_read_unlock(); return orig_node_tmp; } static void batadv_purge_orig(struct work_struct *work); /** * batadv_compare_orig() - comparing function used in the originator hash table * @node: node in the local table * @data2: second object to compare the node to * * Return: true if they are the same originator */ bool batadv_compare_orig(const struct hlist_node *node, const void *data2) { const void *data1 = container_of(node, struct batadv_orig_node, hash_entry); return batadv_compare_eth(data1, data2); } /** * batadv_orig_node_vlan_get() - get an orig_node_vlan object * @orig_node: the originator serving the VLAN * @vid: the VLAN identifier * * Return: the vlan object identified by vid and belonging to orig_node or NULL * if it does not exist. */ struct batadv_orig_node_vlan * batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node, unsigned short vid) { struct batadv_orig_node_vlan *vlan = NULL, *tmp; rcu_read_lock(); hlist_for_each_entry_rcu(tmp, &orig_node->vlan_list, list) { if (tmp->vid != vid) continue; if (!kref_get_unless_zero(&tmp->refcount)) continue; vlan = tmp; break; } rcu_read_unlock(); return vlan; } /** * batadv_vlan_id_valid() - check if vlan id is in valid batman-adv encoding * @vid: the VLAN identifier * * Return: true when either no vlan is set or if VLAN is in correct range, * false otherwise */ static bool batadv_vlan_id_valid(unsigned short vid) { unsigned short non_vlan = vid & ~(BATADV_VLAN_HAS_TAG | VLAN_VID_MASK); if (vid == 0) return true; if (!(vid & BATADV_VLAN_HAS_TAG)) return false; if (non_vlan) return false; return true; } /** * batadv_orig_node_vlan_new() - search and possibly create an orig_node_vlan * object * @orig_node: the originator serving the VLAN * @vid: the VLAN identifier * * Return: NULL in case of failure or the vlan object identified by vid and * belonging to orig_node otherwise. The object is created and added to the list * if it does not exist. * * The object is returned with refcounter increased by 1. */ struct batadv_orig_node_vlan * batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node, unsigned short vid) { struct batadv_orig_node_vlan *vlan; if (!batadv_vlan_id_valid(vid)) return NULL; spin_lock_bh(&orig_node->vlan_list_lock); /* first look if an object for this vid already exists */ vlan = batadv_orig_node_vlan_get(orig_node, vid); if (vlan) goto out; vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC); if (!vlan) goto out; kref_init(&vlan->refcount); vlan->vid = vid; kref_get(&vlan->refcount); hlist_add_head_rcu(&vlan->list, &orig_node->vlan_list); out: spin_unlock_bh(&orig_node->vlan_list_lock); return vlan; } /** * batadv_orig_node_vlan_release() - release originator-vlan object from lists * and queue for free after rcu grace period * @ref: kref pointer of the originator-vlan object */ void batadv_orig_node_vlan_release(struct kref *ref) { struct batadv_orig_node_vlan *orig_vlan; orig_vlan = container_of(ref, struct batadv_orig_node_vlan, refcount); kfree_rcu(orig_vlan, rcu); } /** * batadv_originator_init() - Initialize all originator structures * @bat_priv: the bat priv with all the soft interface information * * Return: 0 on success or negative error number in case of failure */ int batadv_originator_init(struct batadv_priv *bat_priv) { if (bat_priv->orig_hash) return 0; bat_priv->orig_hash = batadv_hash_new(1024); if (!bat_priv->orig_hash) goto err; batadv_hash_set_lock_class(bat_priv->orig_hash, &batadv_orig_hash_lock_class_key); INIT_DELAYED_WORK(&bat_priv->orig_work, batadv_purge_orig); queue_delayed_work(batadv_event_workqueue, &bat_priv->orig_work, msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD)); return 0; err: return -ENOMEM; } /** * batadv_neigh_ifinfo_release() - release neigh_ifinfo from lists and queue for * free after rcu grace period * @ref: kref pointer of the neigh_ifinfo */ void batadv_neigh_ifinfo_release(struct kref *ref) { struct batadv_neigh_ifinfo *neigh_ifinfo; neigh_ifinfo = container_of(ref, struct batadv_neigh_ifinfo, refcount); if (neigh_ifinfo->if_outgoing != BATADV_IF_DEFAULT) batadv_hardif_put(neigh_ifinfo->if_outgoing); kfree_rcu(neigh_ifinfo, rcu); } /** * batadv_hardif_neigh_release() - release hardif neigh node from lists and * queue for free after rcu grace period * @ref: kref pointer of the neigh_node */ void batadv_hardif_neigh_release(struct kref *ref) { struct batadv_hardif_neigh_node *hardif_neigh; hardif_neigh = container_of(ref, struct batadv_hardif_neigh_node, refcount); spin_lock_bh(&hardif_neigh->if_incoming->neigh_list_lock); hlist_del_init_rcu(&hardif_neigh->list); spin_unlock_bh(&hardif_neigh->if_incoming->neigh_list_lock); batadv_hardif_put(hardif_neigh->if_incoming); kfree_rcu(hardif_neigh, rcu); } /** * batadv_neigh_node_release() - release neigh_node from lists and queue for * free after rcu grace period * @ref: kref pointer of the neigh_node */ void batadv_neigh_node_release(struct kref *ref) { struct hlist_node *node_tmp; struct batadv_neigh_node *neigh_node; struct batadv_neigh_ifinfo *neigh_ifinfo; neigh_node = container_of(ref, struct batadv_neigh_node, refcount); hlist_for_each_entry_safe(neigh_ifinfo, node_tmp, &neigh_node->ifinfo_list, list) { batadv_neigh_ifinfo_put(neigh_ifinfo); } batadv_hardif_neigh_put(neigh_node->hardif_neigh); batadv_hardif_put(neigh_node->if_incoming); kfree_rcu(neigh_node, rcu); } /** * batadv_orig_router_get() - router to the originator depending on iface * @orig_node: the orig node for the router * @if_outgoing: the interface where the payload packet has been received or * the OGM should be sent to * * Return: the neighbor which should be the router for this orig_node/iface. * * The object is returned with refcounter increased by 1. */ struct batadv_neigh_node * batadv_orig_router_get(struct batadv_orig_node *orig_node, const struct batadv_hard_iface *if_outgoing) { struct batadv_orig_ifinfo *orig_ifinfo; struct batadv_neigh_node *router = NULL; rcu_read_lock(); hlist_for_each_entry_rcu(orig_ifinfo, &orig_node->ifinfo_list, list) { if (orig_ifinfo->if_outgoing != if_outgoing) continue; router = rcu_dereference(orig_ifinfo->router); break; } if (router && !kref_get_unless_zero(&router->refcount)) router = NULL; rcu_read_unlock(); return router; } /** * batadv_orig_to_router() - get next hop neighbor to an orig address * @bat_priv: the bat priv with all the soft interface information * @orig_addr: the originator MAC address to search the best next hop router for * @if_outgoing: the interface where the payload packet has been received or * the OGM should be sent to * * Return: A neighbor node which is the best router towards the given originator * address. */ struct batadv_neigh_node * batadv_orig_to_router(struct batadv_priv *bat_priv, u8 *orig_addr, struct batadv_hard_iface *if_outgoing) { struct batadv_neigh_node *neigh_node; struct batadv_orig_node *orig_node; orig_node = batadv_orig_hash_find(bat_priv, orig_addr); if (!orig_node) return NULL; neigh_node = batadv_find_router(bat_priv, orig_node, if_outgoing); batadv_orig_node_put(orig_node); return neigh_node; } /** * batadv_orig_ifinfo_get() - find the ifinfo from an orig_node * @orig_node: the orig node to be queried * @if_outgoing: the interface for which the ifinfo should be acquired * * Return: the requested orig_ifinfo or NULL if not found. * * The object is returned with refcounter increased by 1. */ struct batadv_orig_ifinfo * batadv_orig_ifinfo_get(struct batadv_orig_node *orig_node, struct batadv_hard_iface *if_outgoing) { struct batadv_orig_ifinfo *tmp, *orig_ifinfo = NULL; rcu_read_lock(); hlist_for_each_entry_rcu(tmp, &orig_node->ifinfo_list, list) { if (tmp->if_outgoing != if_outgoing) continue; if (!kref_get_unless_zero(&tmp->refcount)) continue; orig_ifinfo = tmp; break; } rcu_read_unlock(); return orig_ifinfo; } /** * batadv_orig_ifinfo_new() - search and possibly create an orig_ifinfo object * @orig_node: the orig node to be queried * @if_outgoing: the interface for which the ifinfo should be acquired * * Return: NULL in case of failure or the orig_ifinfo object for the if_outgoing * interface otherwise. The object is created and added to the list * if it does not exist. * * The object is returned with refcounter increased by 1. */ struct batadv_orig_ifinfo * batadv_orig_ifinfo_new(struct batadv_orig_node *orig_node, struct batadv_hard_iface *if_outgoing) { struct batadv_orig_ifinfo *orig_ifinfo; unsigned long reset_time; spin_lock_bh(&orig_node->neigh_list_lock); orig_ifinfo = batadv_orig_ifinfo_get(orig_node, if_outgoing); if (orig_ifinfo) goto out; orig_ifinfo = kzalloc(sizeof(*orig_ifinfo), GFP_ATOMIC); if (!orig_ifinfo) goto out; if (if_outgoing != BATADV_IF_DEFAULT) kref_get(&if_outgoing->refcount); reset_time = jiffies - 1; reset_time -= msecs_to_jiffies(BATADV_RESET_PROTECTION_MS); orig_ifinfo->batman_seqno_reset = reset_time; orig_ifinfo->if_outgoing = if_outgoing; INIT_HLIST_NODE(&orig_ifinfo->list); kref_init(&orig_ifinfo->refcount); kref_get(&orig_ifinfo->refcount); hlist_add_head_rcu(&orig_ifinfo->list, &orig_node->ifinfo_list); out: spin_unlock_bh(&orig_node->neigh_list_lock); return orig_ifinfo; } /** * batadv_neigh_ifinfo_get() - find the ifinfo from an neigh_node * @neigh: the neigh node to be queried * @if_outgoing: the interface for which the ifinfo should be acquired * * The object is returned with refcounter increased by 1. * * Return: the requested neigh_ifinfo or NULL if not found */ struct batadv_neigh_ifinfo * batadv_neigh_ifinfo_get(struct batadv_neigh_node *neigh, struct batadv_hard_iface *if_outgoing) { struct batadv_neigh_ifinfo *neigh_ifinfo = NULL, *tmp_neigh_ifinfo; rcu_read_lock(); hlist_for_each_entry_rcu(tmp_neigh_ifinfo, &neigh->ifinfo_list, list) { if (tmp_neigh_ifinfo->if_outgoing != if_outgoing) continue; if (!kref_get_unless_zero(&tmp_neigh_ifinfo->refcount)) continue; neigh_ifinfo = tmp_neigh_ifinfo; break; } rcu_read_unlock(); return neigh_ifinfo; } /** * batadv_neigh_ifinfo_new() - search and possibly create an neigh_ifinfo object * @neigh: the neigh node to be queried * @if_outgoing: the interface for which the ifinfo should be acquired * * Return: NULL in case of failure or the neigh_ifinfo object for the * if_outgoing interface otherwise. The object is created and added to the list * if it does not exist. * * The object is returned with refcounter increased by 1. */ struct batadv_neigh_ifinfo * batadv_neigh_ifinfo_new(struct batadv_neigh_node *neigh, struct batadv_hard_iface *if_outgoing) { struct batadv_neigh_ifinfo *neigh_ifinfo; spin_lock_bh(&neigh->ifinfo_lock); neigh_ifinfo = batadv_neigh_ifinfo_get(neigh, if_outgoing); if (neigh_ifinfo) goto out; neigh_ifinfo = kzalloc(sizeof(*neigh_ifinfo), GFP_ATOMIC); if (!neigh_ifinfo) goto out; if (if_outgoing) kref_get(&if_outgoing->refcount); INIT_HLIST_NODE(&neigh_ifinfo->list); kref_init(&neigh_ifinfo->refcount); neigh_ifinfo->if_outgoing = if_outgoing; kref_get(&neigh_ifinfo->refcount); hlist_add_head_rcu(&neigh_ifinfo->list, &neigh->ifinfo_list); out: spin_unlock_bh(&neigh->ifinfo_lock); return neigh_ifinfo; } /** * batadv_neigh_node_get() - retrieve a neighbour from the list * @orig_node: originator which the neighbour belongs to * @hard_iface: the interface where this neighbour is connected to * @addr: the address of the neighbour * * Looks for and possibly returns a neighbour belonging to this originator list * which is connected through the provided hard interface. * * Return: neighbor when found. Otherwise NULL */ static struct batadv_neigh_node * batadv_neigh_node_get(const struct batadv_orig_node *orig_node, const struct batadv_hard_iface *hard_iface, const u8 *addr) { struct batadv_neigh_node *tmp_neigh_node, *res = NULL; rcu_read_lock(); hlist_for_each_entry_rcu(tmp_neigh_node, &orig_node->neigh_list, list) { if (!batadv_compare_eth(tmp_neigh_node->addr, addr)) continue; if (tmp_neigh_node->if_incoming != hard_iface) continue; if (!kref_get_unless_zero(&tmp_neigh_node->refcount)) continue; res = tmp_neigh_node; break; } rcu_read_unlock(); return res; } /** * batadv_hardif_neigh_create() - create a hardif neighbour node * @hard_iface: the interface this neighbour is connected to * @neigh_addr: the interface address of the neighbour to retrieve * @orig_node: originator object representing the neighbour * * Return: the hardif neighbour node if found or created or NULL otherwise. */ static struct batadv_hardif_neigh_node * batadv_hardif_neigh_create(struct batadv_hard_iface *hard_iface, const u8 *neigh_addr, struct batadv_orig_node *orig_node) { struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); struct batadv_hardif_neigh_node *hardif_neigh; spin_lock_bh(&hard_iface->neigh_list_lock); /* check if neighbor hasn't been added in the meantime */ hardif_neigh = batadv_hardif_neigh_get(hard_iface, neigh_addr); if (hardif_neigh) goto out; hardif_neigh = kzalloc(sizeof(*hardif_neigh), GFP_ATOMIC); if (!hardif_neigh) goto out; kref_get(&hard_iface->refcount); INIT_HLIST_NODE(&hardif_neigh->list); ether_addr_copy(hardif_neigh->addr, neigh_addr); ether_addr_copy(hardif_neigh->orig, orig_node->orig); hardif_neigh->if_incoming = hard_iface; hardif_neigh->last_seen = jiffies; kref_init(&hardif_neigh->refcount); if (bat_priv->algo_ops->neigh.hardif_init) bat_priv->algo_ops->neigh.hardif_init(hardif_neigh); hlist_add_head_rcu(&hardif_neigh->list, &hard_iface->neigh_list); out: spin_unlock_bh(&hard_iface->neigh_list_lock); return hardif_neigh; } /** * batadv_hardif_neigh_get_or_create() - retrieve or create a hardif neighbour * node * @hard_iface: the interface this neighbour is connected to * @neigh_addr: the interface address of the neighbour to retrieve * @orig_node: originator object representing the neighbour * * Return: the hardif neighbour node if found or created or NULL otherwise. */ static struct batadv_hardif_neigh_node * batadv_hardif_neigh_get_or_create(struct batadv_hard_iface *hard_iface, const u8 *neigh_addr, struct batadv_orig_node *orig_node) { struct batadv_hardif_neigh_node *hardif_neigh; /* first check without locking to avoid the overhead */ hardif_neigh = batadv_hardif_neigh_get(hard_iface, neigh_addr); if (hardif_neigh) return hardif_neigh; return batadv_hardif_neigh_create(hard_iface, neigh_addr, orig_node); } /** * batadv_hardif_neigh_get() - retrieve a hardif neighbour from the list * @hard_iface: the interface where this neighbour is connected to * @neigh_addr: the address of the neighbour * * Looks for and possibly returns a neighbour belonging to this hard interface. * * Return: neighbor when found. Otherwise NULL */ struct batadv_hardif_neigh_node * batadv_hardif_neigh_get(const struct batadv_hard_iface *hard_iface, const u8 *neigh_addr) { struct batadv_hardif_neigh_node *tmp_hardif_neigh, *hardif_neigh = NULL; rcu_read_lock(); hlist_for_each_entry_rcu(tmp_hardif_neigh, &hard_iface->neigh_list, list) { if (!batadv_compare_eth(tmp_hardif_neigh->addr, neigh_addr)) continue; if (!kref_get_unless_zero(&tmp_hardif_neigh->refcount)) continue; hardif_neigh = tmp_hardif_neigh; break; } rcu_read_unlock(); return hardif_neigh; } /** * batadv_neigh_node_create() - create a neigh node object * @orig_node: originator object representing the neighbour * @hard_iface: the interface where the neighbour is connected to * @neigh_addr: the mac address of the neighbour interface * * Allocates a new neigh_node object and initialises all the generic fields. * * Return: the neighbour node if found or created or NULL otherwise. */ static struct batadv_neigh_node * batadv_neigh_node_create(struct batadv_orig_node *orig_node, struct batadv_hard_iface *hard_iface, const u8 *neigh_addr) { struct batadv_neigh_node *neigh_node; struct batadv_hardif_neigh_node *hardif_neigh = NULL; spin_lock_bh(&orig_node->neigh_list_lock); neigh_node = batadv_neigh_node_get(orig_node, hard_iface, neigh_addr); if (neigh_node) goto out; hardif_neigh = batadv_hardif_neigh_get_or_create(hard_iface, neigh_addr, orig_node); if (!hardif_neigh) goto out; neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC); if (!neigh_node) goto out; INIT_HLIST_NODE(&neigh_node->list); INIT_HLIST_HEAD(&neigh_node->ifinfo_list); spin_lock_init(&neigh_node->ifinfo_lock); kref_get(&hard_iface->refcount); ether_addr_copy(neigh_node->addr, neigh_addr); neigh_node->if_incoming = hard_iface; neigh_node->orig_node = orig_node; neigh_node->last_seen = jiffies; /* increment unique neighbor refcount */ kref_get(&hardif_neigh->refcount); neigh_node->hardif_neigh = hardif_neigh; /* extra reference for return */ kref_init(&neigh_node->refcount); kref_get(&neigh_node->refcount); hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list); batadv_dbg(BATADV_DBG_BATMAN, orig_node->bat_priv, "Creating new neighbor %pM for orig_node %pM on interface %s\n", neigh_addr, orig_node->orig, hard_iface->net_dev->name); out: spin_unlock_bh(&orig_node->neigh_list_lock); batadv_hardif_neigh_put(hardif_neigh); return neigh_node; } /** * batadv_neigh_node_get_or_create() - retrieve or create a neigh node object * @orig_node: originator object representing the neighbour * @hard_iface: the interface where the neighbour is connected to * @neigh_addr: the mac address of the neighbour interface * * Return: the neighbour node if found or created or NULL otherwise. */ struct batadv_neigh_node * batadv_neigh_node_get_or_create(struct batadv_orig_node *orig_node, struct batadv_hard_iface *hard_iface, const u8 *neigh_addr) { struct batadv_neigh_node *neigh_node; /* first check without locking to avoid the overhead */ neigh_node = batadv_neigh_node_get(orig_node, hard_iface, neigh_addr); if (neigh_node) return neigh_node; return batadv_neigh_node_create(orig_node, hard_iface, neigh_addr); } /** * batadv_hardif_neigh_dump() - Dump to netlink the neighbor infos for a * specific outgoing interface * @msg: message to dump into * @cb: parameters for the dump * * Return: 0 or error value */ int batadv_hardif_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb) { struct batadv_hard_iface *primary_if, *hard_iface; struct net_device *soft_iface; struct batadv_priv *bat_priv; int ret; soft_iface = batadv_netlink_get_softif(cb); if (IS_ERR(soft_iface)) return PTR_ERR(soft_iface); bat_priv = netdev_priv(soft_iface); primary_if = batadv_primary_if_get_selected(bat_priv); if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) { ret = -ENOENT; goto out_put_soft_iface; } hard_iface = batadv_netlink_get_hardif(bat_priv, cb); if (IS_ERR(hard_iface) && PTR_ERR(hard_iface) != -ENONET) { ret = PTR_ERR(hard_iface); goto out_put_primary_if; } else if (IS_ERR(hard_iface)) { /* => PTR_ERR(hard_iface) == -ENONET * => no hard-iface given, ok */ hard_iface = BATADV_IF_DEFAULT; } if (!bat_priv->algo_ops->neigh.dump) { ret = -EOPNOTSUPP; goto out_put_hard_iface; } bat_priv->algo_ops->neigh.dump(msg, cb, bat_priv, hard_iface); ret = msg->len; out_put_hard_iface: batadv_hardif_put(hard_iface); out_put_primary_if: batadv_hardif_put(primary_if); out_put_soft_iface: dev_put(soft_iface); return ret; } /** * batadv_orig_ifinfo_release() - release orig_ifinfo from lists and queue for * free after rcu grace period * @ref: kref pointer of the orig_ifinfo */ void batadv_orig_ifinfo_release(struct kref *ref) { struct batadv_orig_ifinfo *orig_ifinfo; struct batadv_neigh_node *router; orig_ifinfo = container_of(ref, struct batadv_orig_ifinfo, refcount); if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT) batadv_hardif_put(orig_ifinfo->if_outgoing); /* this is the last reference to this object */ router = rcu_dereference_protected(orig_ifinfo->router, true); batadv_neigh_node_put(router); kfree_rcu(orig_ifinfo, rcu); } /** * batadv_orig_node_free_rcu() - free the orig_node * @rcu: rcu pointer of the orig_node */ static void batadv_orig_node_free_rcu(struct rcu_head *rcu) { struct batadv_orig_node *orig_node; orig_node = container_of(rcu, struct batadv_orig_node, rcu); batadv_mcast_purge_orig(orig_node); batadv_frag_purge_orig(orig_node, NULL); kfree(orig_node->tt_buff); kfree(orig_node); } /** * batadv_orig_node_release() - release orig_node from lists and queue for * free after rcu grace period * @ref: kref pointer of the orig_node */ void batadv_orig_node_release(struct kref *ref) { struct hlist_node *node_tmp; struct batadv_neigh_node *neigh_node; struct batadv_orig_node *orig_node; struct batadv_orig_ifinfo *orig_ifinfo; struct batadv_orig_node_vlan *vlan; struct batadv_orig_ifinfo *last_candidate; orig_node = container_of(ref, struct batadv_orig_node, refcount); spin_lock_bh(&orig_node->neigh_list_lock); /* for all neighbors towards this originator ... */ hlist_for_each_entry_safe(neigh_node, node_tmp, &orig_node->neigh_list, list) { hlist_del_rcu(&neigh_node->list); batadv_neigh_node_put(neigh_node); } hlist_for_each_entry_safe(orig_ifinfo, node_tmp, &orig_node->ifinfo_list, list) { hlist_del_rcu(&orig_ifinfo->list); batadv_orig_ifinfo_put(orig_ifinfo); } last_candidate = orig_node->last_bonding_candidate; orig_node->last_bonding_candidate = NULL; spin_unlock_bh(&orig_node->neigh_list_lock); batadv_orig_ifinfo_put(last_candidate); spin_lock_bh(&orig_node->vlan_list_lock); hlist_for_each_entry_safe(vlan, node_tmp, &orig_node->vlan_list, list) { hlist_del_rcu(&vlan->list); batadv_orig_node_vlan_put(vlan); } spin_unlock_bh(&orig_node->vlan_list_lock); /* Free nc_nodes */ batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL); call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu); } /** * batadv_originator_free() - Free all originator structures * @bat_priv: the bat priv with all the soft interface information */ void batadv_originator_free(struct batadv_priv *bat_priv) { struct batadv_hashtable *hash = bat_priv->orig_hash; struct hlist_node *node_tmp; struct hlist_head *head; spinlock_t *list_lock; /* spinlock to protect write access */ struct batadv_orig_node *orig_node; u32 i; if (!hash) return; cancel_delayed_work_sync(&bat_priv->orig_work); bat_priv->orig_hash = NULL; for (i = 0; i < hash->size; i++) { head = &hash->table[i]; list_lock = &hash->list_locks[i]; spin_lock_bh(list_lock); hlist_for_each_entry_safe(orig_node, node_tmp, head, hash_entry) { hlist_del_rcu(&orig_node->hash_entry); batadv_orig_node_put(orig_node); } spin_unlock_bh(list_lock); } batadv_hash_destroy(hash); } /** * batadv_orig_node_new() - creates a new orig_node * @bat_priv: the bat priv with all the soft interface information * @addr: the mac address of the originator * * Creates a new originator object and initialises all the generic fields. * The new object is not added to the originator list. * * Return: the newly created object or NULL on failure. */ struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv, const u8 *addr) { struct batadv_orig_node *orig_node; struct batadv_orig_node_vlan *vlan; unsigned long reset_time; int i; batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Creating new originator: %pM\n", addr); orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC); if (!orig_node) return NULL; INIT_HLIST_HEAD(&orig_node->neigh_list); INIT_HLIST_HEAD(&orig_node->vlan_list); INIT_HLIST_HEAD(&orig_node->ifinfo_list); spin_lock_init(&orig_node->bcast_seqno_lock); spin_lock_init(&orig_node->neigh_list_lock); spin_lock_init(&orig_node->tt_buff_lock); spin_lock_init(&orig_node->tt_lock); spin_lock_init(&orig_node->vlan_list_lock); batadv_nc_init_orig(orig_node); /* extra reference for return */ kref_init(&orig_node->refcount); orig_node->bat_priv = bat_priv; ether_addr_copy(orig_node->orig, addr); batadv_dat_init_orig_node_addr(orig_node); atomic_set(&orig_node->last_ttvn, 0); orig_node->tt_buff = NULL; orig_node->tt_buff_len = 0; orig_node->last_seen = jiffies; reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS); orig_node->bcast_seqno_reset = reset_time; #ifdef CONFIG_BATMAN_ADV_MCAST orig_node->mcast_flags = BATADV_MCAST_WANT_NO_RTR4; orig_node->mcast_flags |= BATADV_MCAST_WANT_NO_RTR6; orig_node->mcast_flags |= BATADV_MCAST_HAVE_MC_PTYPE_CAPA; INIT_HLIST_NODE(&orig_node->mcast_want_all_unsnoopables_node); INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv4_node); INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv6_node); spin_lock_init(&orig_node->mcast_handler_lock); #endif /* create a vlan object for the "untagged" LAN */ vlan = batadv_orig_node_vlan_new(orig_node, BATADV_NO_FLAGS); if (!vlan) goto free_orig_node; /* batadv_orig_node_vlan_new() increases the refcounter. * Immediately release vlan since it is not needed anymore in this * context */ batadv_orig_node_vlan_put(vlan); for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) { INIT_HLIST_HEAD(&orig_node->fragments[i].fragment_list); spin_lock_init(&orig_node->fragments[i].lock); orig_node->fragments[i].size = 0; } return orig_node; free_orig_node: kfree(orig_node); return NULL; } /** * batadv_purge_neigh_ifinfo() - purge obsolete ifinfo entries from neighbor * @bat_priv: the bat priv with all the soft interface information * @neigh: orig node which is to be checked */ static void batadv_purge_neigh_ifinfo(struct batadv_priv *bat_priv, struct batadv_neigh_node *neigh) { struct batadv_neigh_ifinfo *neigh_ifinfo; struct batadv_hard_iface *if_outgoing; struct hlist_node *node_tmp; spin_lock_bh(&neigh->ifinfo_lock); /* for all ifinfo objects for this neighinator */ hlist_for_each_entry_safe(neigh_ifinfo, node_tmp, &neigh->ifinfo_list, list) { if_outgoing = neigh_ifinfo->if_outgoing; /* always keep the default interface */ if (if_outgoing == BATADV_IF_DEFAULT) continue; /* don't purge if the interface is not (going) down */ if (if_outgoing->if_status != BATADV_IF_INACTIVE && if_outgoing->if_status != BATADV_IF_NOT_IN_USE && if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED) continue; batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "neighbor/ifinfo purge: neighbor %pM, iface: %s\n", neigh->addr, if_outgoing->net_dev->name); hlist_del_rcu(&neigh_ifinfo->list); batadv_neigh_ifinfo_put(neigh_ifinfo); } spin_unlock_bh(&neigh->ifinfo_lock); } /** * batadv_purge_orig_ifinfo() - purge obsolete ifinfo entries from originator * @bat_priv: the bat priv with all the soft interface information * @orig_node: orig node which is to be checked * * Return: true if any ifinfo entry was purged, false otherwise. */ static bool batadv_purge_orig_ifinfo(struct batadv_priv *bat_priv, struct batadv_orig_node *orig_node) { struct batadv_orig_ifinfo *orig_ifinfo; struct batadv_hard_iface *if_outgoing; struct hlist_node *node_tmp; bool ifinfo_purged = false; spin_lock_bh(&orig_node->neigh_list_lock); /* for all ifinfo objects for this originator */ hlist_for_each_entry_safe(orig_ifinfo, node_tmp, &orig_node->ifinfo_list, list) { if_outgoing = orig_ifinfo->if_outgoing; /* always keep the default interface */ if (if_outgoing == BATADV_IF_DEFAULT) continue; /* don't purge if the interface is not (going) down */ if (if_outgoing->if_status != BATADV_IF_INACTIVE && if_outgoing->if_status != BATADV_IF_NOT_IN_USE && if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED) continue; batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "router/ifinfo purge: originator %pM, iface: %s\n", orig_node->orig, if_outgoing->net_dev->name); ifinfo_purged = true; hlist_del_rcu(&orig_ifinfo->list); batadv_orig_ifinfo_put(orig_ifinfo); if (orig_node->last_bonding_candidate == orig_ifinfo) { orig_node->last_bonding_candidate = NULL; batadv_orig_ifinfo_put(orig_ifinfo); } } spin_unlock_bh(&orig_node->neigh_list_lock); return ifinfo_purged; } /** * batadv_purge_orig_neighbors() - purges neighbors from originator * @bat_priv: the bat priv with all the soft interface information * @orig_node: orig node which is to be checked * * Return: true if any neighbor was purged, false otherwise */ static bool batadv_purge_orig_neighbors(struct batadv_priv *bat_priv, struct batadv_orig_node *orig_node) { struct hlist_node *node_tmp; struct batadv_neigh_node *neigh_node; bool neigh_purged = false; unsigned long last_seen; struct batadv_hard_iface *if_incoming; spin_lock_bh(&orig_node->neigh_list_lock); /* for all neighbors towards this originator ... */ hlist_for_each_entry_safe(neigh_node, node_tmp, &orig_node->neigh_list, list) { last_seen = neigh_node->last_seen; if_incoming = neigh_node->if_incoming; if (batadv_has_timed_out(last_seen, BATADV_PURGE_TIMEOUT) || if_incoming->if_status == BATADV_IF_INACTIVE || if_incoming->if_status == BATADV_IF_NOT_IN_USE || if_incoming->if_status == BATADV_IF_TO_BE_REMOVED) { if (if_incoming->if_status == BATADV_IF_INACTIVE || if_incoming->if_status == BATADV_IF_NOT_IN_USE || if_incoming->if_status == BATADV_IF_TO_BE_REMOVED) batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n", orig_node->orig, neigh_node->addr, if_incoming->net_dev->name); else batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n", orig_node->orig, neigh_node->addr, jiffies_to_msecs(last_seen)); neigh_purged = true; hlist_del_rcu(&neigh_node->list); batadv_neigh_node_put(neigh_node); } else { /* only necessary if not the whole neighbor is to be * deleted, but some interface has been removed. */ batadv_purge_neigh_ifinfo(bat_priv, neigh_node); } } spin_unlock_bh(&orig_node->neigh_list_lock); return neigh_purged; } /** * batadv_find_best_neighbor() - finds the best neighbor after purging * @bat_priv: the bat priv with all the soft interface information * @orig_node: orig node which is to be checked * @if_outgoing: the interface for which the metric should be compared * * Return: the current best neighbor, with refcount increased. */ static struct batadv_neigh_node * batadv_find_best_neighbor(struct batadv_priv *bat_priv, struct batadv_orig_node *orig_node, struct batadv_hard_iface *if_outgoing) { struct batadv_neigh_node *best = NULL, *neigh; struct batadv_algo_ops *bao = bat_priv->algo_ops; rcu_read_lock(); hlist_for_each_entry_rcu(neigh, &orig_node->neigh_list, list) { if (best && (bao->neigh.cmp(neigh, if_outgoing, best, if_outgoing) <= 0)) continue; if (!kref_get_unless_zero(&neigh->refcount)) continue; batadv_neigh_node_put(best); best = neigh; } rcu_read_unlock(); return best; } /** * batadv_purge_orig_node() - purges obsolete information from an orig_node * @bat_priv: the bat priv with all the soft interface information * @orig_node: orig node which is to be checked * * This function checks if the orig_node or substructures of it have become * obsolete, and purges this information if that's the case. * * Return: true if the orig_node is to be removed, false otherwise. */ static bool batadv_purge_orig_node(struct batadv_priv *bat_priv, struct batadv_orig_node *orig_node) { struct batadv_neigh_node *best_neigh_node; struct batadv_hard_iface *hard_iface; bool changed_ifinfo, changed_neigh; if (batadv_has_timed_out(orig_node->last_seen, 2 * BATADV_PURGE_TIMEOUT)) { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Originator timeout: originator %pM, last_seen %u\n", orig_node->orig, jiffies_to_msecs(orig_node->last_seen)); return true; } changed_ifinfo = batadv_purge_orig_ifinfo(bat_priv, orig_node); changed_neigh = batadv_purge_orig_neighbors(bat_priv, orig_node); if (!changed_ifinfo && !changed_neigh) return false; /* first for NULL ... */ best_neigh_node = batadv_find_best_neighbor(bat_priv, orig_node, BATADV_IF_DEFAULT); batadv_update_route(bat_priv, orig_node, BATADV_IF_DEFAULT, best_neigh_node); batadv_neigh_node_put(best_neigh_node); /* ... then for all other interfaces. */ rcu_read_lock(); list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { if (hard_iface->if_status != BATADV_IF_ACTIVE) continue; if (hard_iface->soft_iface != bat_priv->soft_iface) continue; if (!kref_get_unless_zero(&hard_iface->refcount)) continue; best_neigh_node = batadv_find_best_neighbor(bat_priv, orig_node, hard_iface); batadv_update_route(bat_priv, orig_node, hard_iface, best_neigh_node); batadv_neigh_node_put(best_neigh_node); batadv_hardif_put(hard_iface); } rcu_read_unlock(); return false; } /** * batadv_purge_orig_ref() - Purge all outdated originators * @bat_priv: the bat priv with all the soft interface information */ void batadv_purge_orig_ref(struct batadv_priv *bat_priv) { struct batadv_hashtable *hash = bat_priv->orig_hash; struct hlist_node *node_tmp; struct hlist_head *head; spinlock_t *list_lock; /* spinlock to protect write access */ struct batadv_orig_node *orig_node; u32 i; if (!hash) return; /* for all origins... */ for (i = 0; i < hash->size; i++) { head = &hash->table[i]; if (hlist_empty(head)) continue; list_lock = &hash->list_locks[i]; spin_lock_bh(list_lock); hlist_for_each_entry_safe(orig_node, node_tmp, head, hash_entry) { if (batadv_purge_orig_node(bat_priv, orig_node)) { batadv_gw_node_delete(bat_priv, orig_node); hlist_del_rcu(&orig_node->hash_entry); batadv_tt_global_del_orig(orig_node->bat_priv, orig_node, -1, "originator timed out"); batadv_orig_node_put(orig_node); continue; } batadv_frag_purge_orig(orig_node, batadv_frag_check_entry); } spin_unlock_bh(list_lock); } batadv_gw_election(bat_priv); } static void batadv_purge_orig(struct work_struct *work) { struct delayed_work *delayed_work; struct batadv_priv *bat_priv; delayed_work = to_delayed_work(work); bat_priv = container_of(delayed_work, struct batadv_priv, orig_work); batadv_purge_orig_ref(bat_priv); queue_delayed_work(batadv_event_workqueue, &bat_priv->orig_work, msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD)); } /** * batadv_orig_dump() - Dump to netlink the originator infos for a specific * outgoing interface * @msg: message to dump into * @cb: parameters for the dump * * Return: 0 or error value */ int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb) { struct batadv_hard_iface *primary_if, *hard_iface; struct net_device *soft_iface; struct batadv_priv *bat_priv; int ret; soft_iface = batadv_netlink_get_softif(cb); if (IS_ERR(soft_iface)) return PTR_ERR(soft_iface); bat_priv = netdev_priv(soft_iface); primary_if = batadv_primary_if_get_selected(bat_priv); if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) { ret = -ENOENT; goto out_put_soft_iface; } hard_iface = batadv_netlink_get_hardif(bat_priv, cb); if (IS_ERR(hard_iface) && PTR_ERR(hard_iface) != -ENONET) { ret = PTR_ERR(hard_iface); goto out_put_primary_if; } else if (IS_ERR(hard_iface)) { /* => PTR_ERR(hard_iface) == -ENONET * => no hard-iface given, ok */ hard_iface = BATADV_IF_DEFAULT; } if (!bat_priv->algo_ops->orig.dump) { ret = -EOPNOTSUPP; goto out_put_hard_iface; } bat_priv->algo_ops->orig.dump(msg, cb, bat_priv, hard_iface); ret = msg->len; out_put_hard_iface: batadv_hardif_put(hard_iface); out_put_primary_if: batadv_hardif_put(primary_if); out_put_soft_iface: dev_put(soft_iface); return ret; }
6 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 #ifndef __NET_NSH_H #define __NET_NSH_H 1 #include <linux/skbuff.h> /* * Network Service Header: * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |Ver|O|U| TTL | Length |U|U|U|U|MD Type| Next Protocol | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Service Path Identifier (SPI) | Service Index | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | | * ~ Mandatory/Optional Context Headers ~ * | | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * Version: The version field is used to ensure backward compatibility * going forward with future NSH specification updates. It MUST be set * to 0x0 by the sender, in this first revision of NSH. Given the * widespread implementation of existing hardware that uses the first * nibble after an MPLS label stack for ECMP decision processing, this * document reserves version 01b and this value MUST NOT be used in * future versions of the protocol. Please see [RFC7325] for further * discussion of MPLS-related forwarding requirements. * * O bit: Setting this bit indicates an Operations, Administration, and * Maintenance (OAM) packet. The actual format and processing of SFC * OAM packets is outside the scope of this specification (see for * example [I-D.ietf-sfc-oam-framework] for one approach). * * The O bit MUST be set for OAM packets and MUST NOT be set for non-OAM * packets. The O bit MUST NOT be modified along the SFP. * * SF/SFF/SFC Proxy/Classifier implementations that do not support SFC * OAM procedures SHOULD discard packets with O bit set, but MAY support * a configurable parameter to enable forwarding received SFC OAM * packets unmodified to the next element in the chain. Forwarding OAM * packets unmodified by SFC elements that do not support SFC OAM * procedures may be acceptable for a subset of OAM functions, but can * result in unexpected outcomes for others, thus it is recommended to * analyze the impact of forwarding an OAM packet for all OAM functions * prior to enabling this behavior. The configurable parameter MUST be * disabled by default. * * TTL: Indicates the maximum SFF hops for an SFP. This field is used * for service plane loop detection. The initial TTL value SHOULD be * configurable via the control plane; the configured initial value can * be specific to one or more SFPs. If no initial value is explicitly * provided, the default initial TTL value of 63 MUST be used. Each SFF * involved in forwarding an NSH packet MUST decrement the TTL value by * 1 prior to NSH forwarding lookup. Decrementing by 1 from an incoming * value of 0 shall result in a TTL value of 63. The packet MUST NOT be * forwarded if TTL is, after decrement, 0. * * All other flag fields, marked U, are unassigned and available for * future use, see Section 11.2.1. Unassigned bits MUST be set to zero * upon origination, and MUST be ignored and preserved unmodified by * other NSH supporting elements. Elements which do not understand the * meaning of any of these bits MUST NOT modify their actions based on * those unknown bits. * * Length: The total length, in 4-byte words, of NSH including the Base * Header, the Service Path Header, the Fixed Length Context Header or * Variable Length Context Header(s). The length MUST be 0x6 for MD * Type equal to 0x1, and MUST be 0x2 or greater for MD Type equal to * 0x2. The length of the NSH header MUST be an integer multiple of 4 * bytes, thus variable length metadata is always padded out to a * multiple of 4 bytes. * * MD Type: Indicates the format of NSH beyond the mandatory Base Header * and the Service Path Header. MD Type defines the format of the * metadata being carried. * * 0x0 - This is a reserved value. Implementations SHOULD silently * discard packets with MD Type 0x0. * * 0x1 - This indicates that the format of the header includes a fixed * length Context Header (see Figure 4 below). * * 0x2 - This does not mandate any headers beyond the Base Header and * Service Path Header, but may contain optional variable length Context * Header(s). The semantics of the variable length Context Header(s) * are not defined in this document. The format of the optional * variable length Context Headers is provided in Section 2.5.1. * * 0xF - This value is reserved for experimentation and testing, as per * [RFC3692]. Implementations not explicitly configured to be part of * an experiment SHOULD silently discard packets with MD Type 0xF. * * Next Protocol: indicates the protocol type of the encapsulated data. * NSH does not alter the inner payload, and the semantics on the inner * protocol remain unchanged due to NSH service function chaining. * Please see the IANA Considerations section below, Section 11.2.5. * * This document defines the following Next Protocol values: * * 0x1: IPv4 * 0x2: IPv6 * 0x3: Ethernet * 0x4: NSH * 0x5: MPLS * 0xFE: Experiment 1 * 0xFF: Experiment 2 * * Packets with Next Protocol values not supported SHOULD be silently * dropped by default, although an implementation MAY provide a * configuration parameter to forward them. Additionally, an * implementation not explicitly configured for a specific experiment * [RFC3692] SHOULD silently drop packets with Next Protocol values 0xFE * and 0xFF. * * Service Path Identifier (SPI): Identifies a service path. * Participating nodes MUST use this identifier for Service Function * Path selection. The initial classifier MUST set the appropriate SPI * for a given classification result. * * Service Index (SI): Provides location within the SFP. The initial * classifier for a given SFP SHOULD set the SI to 255, however the * control plane MAY configure the initial value of SI as appropriate * (i.e., taking into account the length of the service function path). * The Service Index MUST be decremented by a value of 1 by Service * Functions or by SFC Proxy nodes after performing required services * and the new decremented SI value MUST be used in the egress packet's * NSH. The initial Classifier MUST send the packet to the first SFF in * the identified SFP for forwarding along an SFP. If re-classification * occurs, and that re-classification results in a new SPI, the * (re)classifier is, in effect, the initial classifier for the * resultant SPI. * * The SI is used in conjunction the with Service Path Identifier for * Service Function Path Selection and for determining the next SFF/SF * in the path. The SI is also valuable when troubleshooting or * reporting service paths. Additionally, while the TTL field is the * main mechanism for service plane loop detection, the SI can also be * used for detecting service plane loops. * * When the Base Header specifies MD Type = 0x1, a Fixed Length Context * Header (16-bytes) MUST be present immediately following the Service * Path Header. The value of a Fixed Length Context * Header that carries no metadata MUST be set to zero. * * When the base header specifies MD Type = 0x2, zero or more Variable * Length Context Headers MAY be added, immediately following the * Service Path Header (see Figure 5). Therefore, Length = 0x2, * indicates that only the Base Header followed by the Service Path * Header are present. The optional Variable Length Context Headers * MUST be of an integer number of 4-bytes. The base header Length * field MUST be used to determine the offset to locate the original * packet or frame for SFC nodes that require access to that * information. * * The format of the optional variable length Context Headers * * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Metadata Class | Type |U| Length | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Variable Metadata | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * Metadata Class (MD Class): Defines the scope of the 'Type' field to * provide a hierarchical namespace. The IANA Considerations * Section 11.2.4 defines how the MD Class values can be allocated to * standards bodies, vendors, and others. * * Type: Indicates the explicit type of metadata being carried. The * definition of the Type is the responsibility of the MD Class owner. * * Unassigned bit: One unassigned bit is available for future use. This * bit MUST NOT be set, and MUST be ignored on receipt. * * Length: Indicates the length of the variable metadata, in bytes. In * case the metadata length is not an integer number of 4-byte words, * the sender MUST add pad bytes immediately following the last metadata * byte to extend the metadata to an integer number of 4-byte words. * The receiver MUST round up the length field to the nearest 4-byte * word boundary, to locate and process the next field in the packet. * The receiver MUST access only those bytes in the metadata indicated * by the length field (i.e., actual number of bytes) and MUST ignore * the remaining bytes up to the nearest 4-byte word boundary. The * Length may be 0 or greater. * * A value of 0 denotes a Context Header without a Variable Metadata * field. * * [0] https://datatracker.ietf.org/doc/draft-ietf-sfc-nsh/ */ /** * struct nsh_md1_ctx - Keeps track of NSH context data * @context: NSH Contexts. */ struct nsh_md1_ctx { __be32 context[4]; }; struct nsh_md2_tlv { __be16 md_class; u8 type; u8 length; u8 md_value[]; }; struct nshhdr { __be16 ver_flags_ttl_len; u8 mdtype; u8 np; __be32 path_hdr; union { struct nsh_md1_ctx md1; struct nsh_md2_tlv md2; }; }; /* Masking NSH header fields. */ #define NSH_VER_MASK 0xc000 #define NSH_VER_SHIFT 14 #define NSH_FLAGS_MASK 0x3000 #define NSH_FLAGS_SHIFT 12 #define NSH_TTL_MASK 0x0fc0 #define NSH_TTL_SHIFT 6 #define NSH_LEN_MASK 0x003f #define NSH_LEN_SHIFT 0 #define NSH_MDTYPE_MASK 0x0f #define NSH_MDTYPE_SHIFT 0 #define NSH_SPI_MASK 0xffffff00 #define NSH_SPI_SHIFT 8 #define NSH_SI_MASK 0x000000ff #define NSH_SI_SHIFT 0 /* MD Type Registry. */ #define NSH_M_TYPE1 0x01 #define NSH_M_TYPE2 0x02 #define NSH_M_EXP1 0xFE #define NSH_M_EXP2 0xFF /* NSH Base Header Length */ #define NSH_BASE_HDR_LEN 8 /* NSH MD Type 1 header Length. */ #define NSH_M_TYPE1_LEN 24 /* NSH header maximum Length. */ #define NSH_HDR_MAX_LEN 256 /* NSH context headers maximum Length. */ #define NSH_CTX_HDRS_MAX_LEN 248 static inline struct nshhdr *nsh_hdr(struct sk_buff *skb) { return (struct nshhdr *)skb_network_header(skb); } static inline u16 nsh_hdr_len(const struct nshhdr *nsh) { return ((ntohs(nsh->ver_flags_ttl_len) & NSH_LEN_MASK) >> NSH_LEN_SHIFT) << 2; } static inline u8 nsh_get_ver(const struct nshhdr *nsh) { return (ntohs(nsh->ver_flags_ttl_len) & NSH_VER_MASK) >> NSH_VER_SHIFT; } static inline u8 nsh_get_flags(const struct nshhdr *nsh) { return (ntohs(nsh->ver_flags_ttl_len) & NSH_FLAGS_MASK) >> NSH_FLAGS_SHIFT; } static inline u8 nsh_get_ttl(const struct nshhdr *nsh) { return (ntohs(nsh->ver_flags_ttl_len) & NSH_TTL_MASK) >> NSH_TTL_SHIFT; } static inline void __nsh_set_xflag(struct nshhdr *nsh, u16 xflag, u16 xmask) { nsh->ver_flags_ttl_len = (nsh->ver_flags_ttl_len & ~htons(xmask)) | htons(xflag); } static inline void nsh_set_flags_and_ttl(struct nshhdr *nsh, u8 flags, u8 ttl) { __nsh_set_xflag(nsh, ((flags << NSH_FLAGS_SHIFT) & NSH_FLAGS_MASK) | ((ttl << NSH_TTL_SHIFT) & NSH_TTL_MASK), NSH_FLAGS_MASK | NSH_TTL_MASK); } static inline void nsh_set_flags_ttl_len(struct nshhdr *nsh, u8 flags, u8 ttl, u8 len) { len = len >> 2; __nsh_set_xflag(nsh, ((flags << NSH_FLAGS_SHIFT) & NSH_FLAGS_MASK) | ((ttl << NSH_TTL_SHIFT) & NSH_TTL_MASK) | ((len << NSH_LEN_SHIFT) & NSH_LEN_MASK), NSH_FLAGS_MASK | NSH_TTL_MASK | NSH_LEN_MASK); } int nsh_push(struct sk_buff *skb, const struct nshhdr *pushed_nh); int nsh_pop(struct sk_buff *skb); #endif /* __NET_NSH_H */
64 12 65 65 54 65 65 65 64 64 65 64 64 65 65 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 // SPDX-License-Identifier: GPL-2.0 /* * fs/f2fs/hash.c * * Copyright (c) 2012 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * Portions of this code from linux/fs/ext3/hash.c * * Copyright (C) 2002 by Theodore Ts'o */ #include <linux/types.h> #include <linux/fs.h> #include <linux/f2fs_fs.h> #include <linux/pagemap.h> #include <linux/unicode.h> #include "f2fs.h" /* * Hashing code copied from ext3 */ #define DELTA 0x9E3779B9 static void TEA_transform(unsigned int buf[4], unsigned int const in[]) { __u32 sum = 0; __u32 b0 = buf[0], b1 = buf[1]; __u32 a = in[0], b = in[1], c = in[2], d = in[3]; int n = 16; do { sum += DELTA; b0 += ((b1 << 4)+a) ^ (b1+sum) ^ ((b1 >> 5)+b); b1 += ((b0 << 4)+c) ^ (b0+sum) ^ ((b0 >> 5)+d); } while (--n); buf[0] += b0; buf[1] += b1; } static void str2hashbuf(const unsigned char *msg, size_t len, unsigned int *buf, int num) { unsigned pad, val; int i; pad = (__u32)len | ((__u32)len << 8); pad |= pad << 16; val = pad; if (len > num * 4) len = num * 4; for (i = 0; i < len; i++) { if ((i % 4) == 0) val = pad; val = msg[i] + (val << 8); if ((i % 4) == 3) { *buf++ = val; val = pad; num--; } } if (--num >= 0) *buf++ = val; while (--num >= 0) *buf++ = pad; } static u32 TEA_hash_name(const u8 *p, size_t len) { __u32 in[8], buf[4]; /* Initialize the default seed for the hash checksum functions */ buf[0] = 0x67452301; buf[1] = 0xefcdab89; buf[2] = 0x98badcfe; buf[3] = 0x10325476; while (1) { str2hashbuf(p, len, in, 4); TEA_transform(buf, in); p += 16; if (len <= 16) break; len -= 16; } return buf[0] & ~F2FS_HASH_COL_BIT; } /* * Compute @fname->hash. For all directories, @fname->disk_name must be set. * For casefolded directories, @fname->usr_fname must be set, and also * @fname->cf_name if the filename is valid Unicode and is not "." or "..". */ void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname) { const u8 *name = fname->disk_name.name; size_t len = fname->disk_name.len; WARN_ON_ONCE(!name); if (is_dot_dotdot(name, len)) { fname->hash = 0; return; } #if IS_ENABLED(CONFIG_UNICODE) if (IS_CASEFOLDED(dir)) { /* * If the casefolded name is provided, hash it instead of the * on-disk name. If the casefolded name is *not* provided, that * should only be because the name wasn't valid Unicode or was * "." or "..", so fall back to treating the name as an opaque * byte sequence. Note that to handle encrypted directories, * the fallback must use usr_fname (plaintext) rather than * disk_name (ciphertext). */ WARN_ON_ONCE(!fname->usr_fname->name); if (fname->cf_name.name) { name = fname->cf_name.name; len = fname->cf_name.len; } else { name = fname->usr_fname->name; len = fname->usr_fname->len; } if (IS_ENCRYPTED(dir)) { struct qstr tmp = QSTR_INIT(name, len); fname->hash = cpu_to_le32(fscrypt_fname_siphash(dir, &tmp)); return; } } #endif fname->hash = cpu_to_le32(TEA_hash_name(name, len)); }
113 129 124 124 122 122 124 7 7 120 119 119 119 120 120 120 106 103 13 113 97 99 99 8 16 14 8 144 115 117 115 116 123 104 123 33 33 33 103 104 26 24 2 25 97 99 19 103 115 59 103 102 103 130 114 15 20 116 18 8 15 3 3 3 3 3 3 16 16 16 16 16 2 2 7 7 7 7 7 7 7 7 7 7 5 2 5 17 1 1 1 1 13 1 1 13 4 9 10 3 7 1 5 5 3 5 1 7 1 2 1 2 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 /* * drm_irq.c IRQ and vblank support * * \author Rickard E. (Rik) Faith <faith@valinux.com> * \author Gareth Hughes <gareth@valinux.com> * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #include <linux/export.h> #include <linux/kthread.h> #include <linux/moduleparam.h> #include <drm/drm_crtc.h> #include <drm/drm_drv.h> #include <drm/drm_framebuffer.h> #include <drm/drm_managed.h> #include <drm/drm_modeset_helper_vtables.h> #include <drm/drm_print.h> #include <drm/drm_vblank.h> #include "drm_internal.h" #include "drm_trace.h" /** * DOC: vblank handling * * From the computer's perspective, every time the monitor displays * a new frame the scanout engine has "scanned out" the display image * from top to bottom, one row of pixels at a time. The current row * of pixels is referred to as the current scanline. * * In addition to the display's visible area, there's usually a couple of * extra scanlines which aren't actually displayed on the screen. * These extra scanlines don't contain image data and are occasionally used * for features like audio and infoframes. The region made up of these * scanlines is referred to as the vertical blanking region, or vblank for * short. * * For historical reference, the vertical blanking period was designed to * give the electron gun (on CRTs) enough time to move back to the top of * the screen to start scanning out the next frame. Similar for horizontal * blanking periods. They were designed to give the electron gun enough * time to move back to the other side of the screen to start scanning the * next scanline. * * :: * * * physical → ⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽ * top of | | * display | | * | New frame | * | | * |↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓| * |~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~| ← Scanline, * |↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓| updates the * | | frame as it * | | travels down * | | ("scan out") * | Old frame | * | | * | | * | | * | | physical * | | bottom of * vertical |⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽| ← display * blanking ┆xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx┆ * region → ┆xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx┆ * ┆xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx┆ * start of → ⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽ * new frame * * "Physical top of display" is the reference point for the high-precision/ * corrected timestamp. * * On a lot of display hardware, programming needs to take effect during the * vertical blanking period so that settings like gamma, the image buffer * buffer to be scanned out, etc. can safely be changed without showing * any visual artifacts on the screen. In some unforgiving hardware, some of * this programming has to both start and end in the same vblank. To help * with the timing of the hardware programming, an interrupt is usually * available to notify the driver when it can start the updating of registers. * The interrupt is in this context named the vblank interrupt. * * The vblank interrupt may be fired at different points depending on the * hardware. Some hardware implementations will fire the interrupt when the * new frame start, other implementations will fire the interrupt at different * points in time. * * Vertical blanking plays a major role in graphics rendering. To achieve * tear-free display, users must synchronize page flips and/or rendering to * vertical blanking. The DRM API offers ioctls to perform page flips * synchronized to vertical blanking and wait for vertical blanking. * * The DRM core handles most of the vertical blanking management logic, which * involves filtering out spurious interrupts, keeping race-free blanking * counters, coping with counter wrap-around and resets and keeping use counts. * It relies on the driver to generate vertical blanking interrupts and * optionally provide a hardware vertical blanking counter. * * Drivers must initialize the vertical blanking handling core with a call to * drm_vblank_init(). Minimally, a driver needs to implement * &drm_crtc_funcs.enable_vblank and &drm_crtc_funcs.disable_vblank plus call * drm_crtc_handle_vblank() in its vblank interrupt handler for working vblank * support. * * Vertical blanking interrupts can be enabled by the DRM core or by drivers * themselves (for instance to handle page flipping operations). The DRM core * maintains a vertical blanking use count to ensure that the interrupts are not * disabled while a user still needs them. To increment the use count, drivers * call drm_crtc_vblank_get() and release the vblank reference again with * drm_crtc_vblank_put(). In between these two calls vblank interrupts are * guaranteed to be enabled. * * On many hardware disabling the vblank interrupt cannot be done in a race-free * manner, see &drm_vblank_crtc_config.disable_immediate and * &drm_driver.max_vblank_count. In that case the vblank core only disables the * vblanks after a timer has expired, which can be configured through the * ``vblankoffdelay`` module parameter. * * Drivers for hardware without support for vertical-blanking interrupts * must not call drm_vblank_init(). For such drivers, atomic helpers will * automatically generate fake vblank events as part of the display update. * This functionality also can be controlled by the driver by enabling and * disabling struct drm_crtc_state.no_vblank. */ /* Retry timestamp calculation up to 3 times to satisfy * drm_timestamp_precision before giving up. */ #define DRM_TIMESTAMP_MAXRETRIES 3 /* Threshold in nanoseconds for detection of redundant * vblank irq in drm_handle_vblank(). 1 msec should be ok. */ #define DRM_REDUNDANT_VBLIRQ_THRESH_NS 1000000 static bool drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe, ktime_t *tvblank, bool in_vblank_irq); static unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */ static int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */ module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600); module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600); MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs] (0: never disable, <0: disable immediately)"); MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]"); static struct drm_vblank_crtc * drm_vblank_crtc(struct drm_device *dev, unsigned int pipe) { return &dev->vblank[pipe]; } struct drm_vblank_crtc * drm_crtc_vblank_crtc(struct drm_crtc *crtc) { return drm_vblank_crtc(crtc->dev, drm_crtc_index(crtc)); } EXPORT_SYMBOL(drm_crtc_vblank_crtc); static void store_vblank(struct drm_device *dev, unsigned int pipe, u32 vblank_count_inc, ktime_t t_vblank, u32 last) { struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe); assert_spin_locked(&dev->vblank_time_lock); vblank->last = last; write_seqlock(&vblank->seqlock); vblank->time = t_vblank; atomic64_add(vblank_count_inc, &vblank->count); write_sequnlock(&vblank->seqlock); } static u32 drm_max_vblank_count(struct drm_device *dev, unsigned int pipe) { struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe); return vblank->max_vblank_count ?: dev->max_vblank_count; } /* * "No hw counter" fallback implementation of .get_vblank_counter() hook, * if there is no usable hardware frame counter available. */ static u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe) { drm_WARN_ON_ONCE(dev, drm_max_vblank_count(dev, pipe) != 0); return 0; } static u32 __get_vblank_counter(struct drm_device *dev, unsigned int pipe) { if (drm_core_check_feature(dev, DRIVER_MODESET)) { struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe); if (drm_WARN_ON(dev, !crtc)) return 0; if (crtc->funcs->get_vblank_counter) return crtc->funcs->get_vblank_counter(crtc); } return drm_vblank_no_hw_counter(dev, pipe); } /* * Reset the stored timestamp for the current vblank count to correspond * to the last vblank occurred. * * Only to be called from drm_crtc_vblank_on(). * * Note: caller must hold &drm_device.vbl_lock since this reads & writes * device vblank fields. */ static void drm_reset_vblank_timestamp(struct drm_device *dev, unsigned int pipe) { u32 cur_vblank; bool rc; ktime_t t_vblank; int count = DRM_TIMESTAMP_MAXRETRIES; spin_lock(&dev->vblank_time_lock); /* * sample the current counter to avoid random jumps * when drm_vblank_enable() applies the diff */ do { cur_vblank = __get_vblank_counter(dev, pipe); rc = drm_get_last_vbltimestamp(dev, pipe, &t_vblank, false); } while (cur_vblank != __get_vblank_counter(dev, pipe) && --count > 0); /* * Only reinitialize corresponding vblank timestamp if high-precision query * available and didn't fail. Otherwise reinitialize delayed at next vblank * interrupt and assign 0 for now, to mark the vblanktimestamp as invalid. */ if (!rc) t_vblank = 0; /* * +1 to make sure user will never see the same * vblank counter value before and after a modeset */ store_vblank(dev, pipe, 1, t_vblank, cur_vblank); spin_unlock(&dev->vblank_time_lock); } /* * Call back into the driver to update the appropriate vblank counter * (specified by @pipe). Deal with wraparound, if it occurred, and * update the last read value so we can deal with wraparound on the next * call if necessary. * * Only necessary when going from off->on, to account for frames we * didn't get an interrupt for. * * Note: caller must hold &drm_device.vbl_lock since this reads & writes * device vblank fields. */ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe, bool in_vblank_irq) { struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe); u32 cur_vblank, diff; bool rc; ktime_t t_vblank; int count = DRM_TIMESTAMP_MAXRETRIES; int framedur_ns = vblank->framedur_ns; u32 max_vblank_count = drm_max_vblank_count(dev, pipe); /* * Interrupts were disabled prior to this call, so deal with counter * wrap if needed. * NOTE! It's possible we lost a full dev->max_vblank_count + 1 events * here if the register is small or we had vblank interrupts off for * a long time. * * We repeat the hardware vblank counter & timestamp query until * we get consistent results. This to prevent races between gpu * updating its hardware counter while we are retrieving the * corresponding vblank timestamp. */ do { cur_vblank = __get_vblank_counter(dev, pipe); rc = drm_get_last_vbltimestamp(dev, pipe, &t_vblank, in_vblank_irq); } while (cur_vblank != __get_vblank_counter(dev, pipe) && --count > 0); if (max_vblank_count) { /* trust the hw counter when it's around */ diff = (cur_vblank - vblank->last) & max_vblank_count; } else if (rc && framedur_ns) { u64 diff_ns = ktime_to_ns(ktime_sub(t_vblank, vblank->time)); /* * Figure out how many vblanks we've missed based * on the difference in the timestamps and the * frame/field duration. */ drm_dbg_vbl(dev, "crtc %u: Calculating number of vblanks." " diff_ns = %lld, framedur_ns = %d)\n", pipe, (long long)diff_ns, framedur_ns); diff = DIV_ROUND_CLOSEST_ULL(diff_ns, framedur_ns); if (diff == 0 && in_vblank_irq) drm_dbg_vbl(dev, "crtc %u: Redundant vblirq ignored\n", pipe); } else { /* some kind of default for drivers w/o accurate vbl timestamping */ diff = in_vblank_irq ? 1 : 0; } /* * Within a drm_vblank_pre_modeset - drm_vblank_post_modeset * interval? If so then vblank irqs keep running and it will likely * happen that the hardware vblank counter is not trustworthy as it * might reset at some point in that interval and vblank timestamps * are not trustworthy either in that interval. Iow. this can result * in a bogus diff >> 1 which must be avoided as it would cause * random large forward jumps of the software vblank counter. */ if (diff > 1 && (vblank->inmodeset & 0x2)) { drm_dbg_vbl(dev, "clamping vblank bump to 1 on crtc %u: diffr=%u" " due to pre-modeset.\n", pipe, diff); diff = 1; } drm_dbg_vbl(dev, "updating vblank count on crtc %u:" " current=%llu, diff=%u, hw=%u hw_last=%u\n", pipe, (unsigned long long)atomic64_read(&vblank->count), diff, cur_vblank, vblank->last); if (diff == 0) { drm_WARN_ON_ONCE(dev, cur_vblank != vblank->last); return; } /* * Only reinitialize corresponding vblank timestamp if high-precision query * available and didn't fail, or we were called from the vblank interrupt. * Otherwise reinitialize delayed at next vblank interrupt and assign 0 * for now, to mark the vblanktimestamp as invalid. */ if (!rc && !in_vblank_irq) t_vblank = 0; store_vblank(dev, pipe, diff, t_vblank, cur_vblank); } u64 drm_vblank_count(struct drm_device *dev, unsigned int pipe) { struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe); u64 count; if (drm_WARN_ON(dev, pipe >= dev->num_crtcs)) return 0; count = atomic64_read(&vblank->count); /* * This read barrier corresponds to the implicit write barrier of the * write seqlock in store_vblank(). Note that this is the only place * where we need an explicit barrier, since all other access goes * through drm_vblank_count_and_time(), which already has the required * read barrier curtesy of the read seqlock. */ smp_rmb(); return count; } /** * drm_crtc_accurate_vblank_count - retrieve the master vblank counter * @crtc: which counter to retrieve * * This function is similar to drm_crtc_vblank_count() but this function * interpolates to handle a race with vblank interrupts using the high precision * timestamping support. * * This is mostly useful for hardware that can obtain the scanout position, but * doesn't have a hardware frame counter. */ u64 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; unsigned int pipe = drm_crtc_index(crtc); u64 vblank; unsigned long flags; drm_WARN_ONCE(dev, drm_debug_enabled(DRM_UT_VBL) && !crtc->funcs->get_vblank_timestamp, "This function requires support for accurate vblank timestamps."); spin_lock_irqsave(&dev->vblank_time_lock, flags); drm_update_vblank_count(dev, pipe, false); vblank = drm_vblank_count(dev, pipe); spin_unlock_irqrestore(&dev->vblank_time_lock, flags); return vblank; } EXPORT_SYMBOL(drm_crtc_accurate_vblank_count); static void __disable_vblank(struct drm_device *dev, unsigned int pipe) { if (drm_core_check_feature(dev, DRIVER_MODESET)) { struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe); if (drm_WARN_ON(dev, !crtc)) return; if (crtc->funcs->disable_vblank) crtc->funcs->disable_vblank(crtc); } } /* * Disable vblank irq's on crtc, make sure that last vblank count * of hardware and corresponding consistent software vblank counter * are preserved, even if there are any spurious vblank irq's after * disable. */ void drm_vblank_disable_and_save(struct drm_device *dev, unsigned int pipe) { struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe); unsigned long irqflags; assert_spin_locked(&dev->vbl_lock); /* Prevent vblank irq processing while disabling vblank irqs, * so no updates of timestamps or count can happen after we've * disabled. Needed to prevent races in case of delayed irq's. */ spin_lock_irqsave(&dev->vblank_time_lock, irqflags); /* * Update vblank count and disable vblank interrupts only if the * interrupts were enabled. This avoids calling the ->disable_vblank() * operation in atomic context with the hardware potentially runtime * suspended. */ if (!vblank->enabled) goto out; /* * Update the count and timestamp to maintain the * appearance that the counter has been ticking all along until * this time. This makes the count account for the entire time * between drm_crtc_vblank_on() and drm_crtc_vblank_off(). */ drm_update_vblank_count(dev, pipe, false); __disable_vblank(dev, pipe); vblank->enabled = false; out: spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); } static void vblank_disable_fn(struct timer_list *t) { struct drm_vblank_crtc *vblank = from_timer(vblank, t, disable_timer); struct drm_device *dev = vblank->dev; unsigned int pipe = vblank->pipe; unsigned long irqflags; spin_lock_irqsave(&dev->vbl_lock, irqflags); if (atomic_read(&vblank->refcount) == 0 && vblank->enabled) { drm_dbg_core(dev, "disabling vblank on crtc %u\n", pipe); drm_vblank_disable_and_save(dev, pipe); } spin_unlock_irqrestore(&dev->vbl_lock, irqflags); } static void drm_vblank_init_release(struct drm_device *dev, void *ptr) { struct drm_vblank_crtc *vblank = ptr; drm_WARN_ON(dev, READ_ONCE(vblank->enabled) && drm_core_check_feature(dev, DRIVER_MODESET)); drm_vblank_destroy_worker(vblank); del_timer_sync(&vblank->disable_timer); } /** * drm_vblank_init - initialize vblank support * @dev: DRM device * @num_crtcs: number of CRTCs supported by @dev * * This function initializes vblank support for @num_crtcs display pipelines. * Cleanup is handled automatically through a cleanup function added with * drmm_add_action_or_reset(). * * Returns: * Zero on success or a negative error code on failure. */ int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs) { int ret; unsigned int i; spin_lock_init(&dev->vbl_lock); spin_lock_init(&dev->vblank_time_lock); dev->vblank = drmm_kcalloc(dev, num_crtcs, sizeof(*dev->vblank), GFP_KERNEL); if (!dev->vblank) return -ENOMEM; dev->num_crtcs = num_crtcs; for (i = 0; i < num_crtcs; i++) { struct drm_vblank_crtc *vblank = &dev->vblank[i]; vblank->dev = dev; vblank->pipe = i; init_waitqueue_head(&vblank->queue); timer_setup(&vblank->disable_timer, vblank_disable_fn, 0); seqlock_init(&vblank->seqlock); ret = drmm_add_action_or_reset(dev, drm_vblank_init_release, vblank); if (ret) return ret; ret = drm_vblank_worker_init(vblank); if (ret) return ret; } return 0; } EXPORT_SYMBOL(drm_vblank_init); /** * drm_dev_has_vblank - test if vblanking has been initialized for * a device * @dev: the device * * Drivers may call this function to test if vblank support is * initialized for a device. For most hardware this means that vblanking * can also be enabled. * * Atomic helpers use this function to initialize * &drm_crtc_state.no_vblank. See also drm_atomic_helper_check_modeset(). * * Returns: * True if vblanking has been initialized for the given device, false * otherwise. */ bool drm_dev_has_vblank(const struct drm_device *dev) { return dev->num_crtcs != 0; } EXPORT_SYMBOL(drm_dev_has_vblank); /** * drm_crtc_vblank_waitqueue - get vblank waitqueue for the CRTC * @crtc: which CRTC's vblank waitqueue to retrieve * * This function returns a pointer to the vblank waitqueue for the CRTC. * Drivers can use this to implement vblank waits using wait_event() and related * functions. */ wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc) { return &crtc->dev->vblank[drm_crtc_index(crtc)].queue; } EXPORT_SYMBOL(drm_crtc_vblank_waitqueue); /** * drm_calc_timestamping_constants - calculate vblank timestamp constants * @crtc: drm_crtc whose timestamp constants should be updated. * @mode: display mode containing the scanout timings * * Calculate and store various constants which are later needed by vblank and * swap-completion timestamping, e.g, by * drm_crtc_vblank_helper_get_vblank_timestamp(). They are derived from * CRTC's true scanout timing, so they take things like panel scaling or * other adjustments into account. */ void drm_calc_timestamping_constants(struct drm_crtc *crtc, const struct drm_display_mode *mode) { struct drm_device *dev = crtc->dev; unsigned int pipe = drm_crtc_index(crtc); struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); int linedur_ns = 0, framedur_ns = 0; int dotclock = mode->crtc_clock; if (!drm_dev_has_vblank(dev)) return; if (drm_WARN_ON(dev, pipe >= dev->num_crtcs)) return; /* Valid dotclock? */ if (dotclock > 0) { int frame_size = mode->crtc_htotal * mode->crtc_vtotal; /* * Convert scanline length in pixels and video * dot clock to line duration and frame duration * in nanoseconds: */ linedur_ns = div_u64((u64) mode->crtc_htotal * 1000000, dotclock); framedur_ns = div_u64((u64) frame_size * 1000000, dotclock); /* * Fields of interlaced scanout modes are only half a frame duration. */ if (mode->flags & DRM_MODE_FLAG_INTERLACE) framedur_ns /= 2; } else { drm_err(dev, "crtc %u: Can't calculate constants, dotclock = 0!\n", crtc->base.id); } vblank->linedur_ns = linedur_ns; vblank->framedur_ns = framedur_ns; drm_mode_copy(&vblank->hwmode, mode); drm_dbg_core(dev, "crtc %u: hwmode: htotal %d, vtotal %d, vdisplay %d\n", crtc->base.id, mode->crtc_htotal, mode->crtc_vtotal, mode->crtc_vdisplay); drm_dbg_core(dev, "crtc %u: clock %d kHz framedur %d linedur %d\n", crtc->base.id, dotclock, framedur_ns, linedur_ns); } EXPORT_SYMBOL(drm_calc_timestamping_constants); /** * drm_crtc_vblank_helper_get_vblank_timestamp_internal - precise vblank * timestamp helper * @crtc: CRTC whose vblank timestamp to retrieve * @max_error: Desired maximum allowable error in timestamps (nanosecs) * On return contains true maximum error of timestamp * @vblank_time: Pointer to time which should receive the timestamp * @in_vblank_irq: * True when called from drm_crtc_handle_vblank(). Some drivers * need to apply some workarounds for gpu-specific vblank irq quirks * if flag is set. * @get_scanout_position: * Callback function to retrieve the scanout position. See * @struct drm_crtc_helper_funcs.get_scanout_position. * * Implements calculation of exact vblank timestamps from given drm_display_mode * timings and current video scanout position of a CRTC. * * The current implementation only handles standard video modes. For double scan * and interlaced modes the driver is supposed to adjust the hardware mode * (taken from &drm_crtc_state.adjusted mode for atomic modeset drivers) to * match the scanout position reported. * * Note that atomic drivers must call drm_calc_timestamping_constants() before * enabling a CRTC. The atomic helpers already take care of that in * drm_atomic_helper_calc_timestamping_constants(). * * Returns: * Returns true on success, and false on failure, i.e. when no accurate * timestamp could be acquired. */ bool drm_crtc_vblank_helper_get_vblank_timestamp_internal( struct drm_crtc *crtc, int *max_error, ktime_t *vblank_time, bool in_vblank_irq, drm_vblank_get_scanout_position_func get_scanout_position) { struct drm_device *dev = crtc->dev; unsigned int pipe = crtc->index; struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; struct timespec64 ts_etime, ts_vblank_time; ktime_t stime, etime; bool vbl_status; const struct drm_display_mode *mode; int vpos, hpos, i; int delta_ns, duration_ns; if (pipe >= dev->num_crtcs) { drm_err(dev, "Invalid crtc %u\n", pipe); return false; } /* Scanout position query not supported? Should not happen. */ if (!get_scanout_position) { drm_err(dev, "Called from CRTC w/o get_scanout_position()!?\n"); return false; } if (drm_drv_uses_atomic_modeset(dev)) mode = &vblank->hwmode; else mode = &crtc->hwmode; /* If mode timing undefined, just return as no-op: * Happens during initial modesetting of a crtc. */ if (mode->crtc_clock == 0) { drm_dbg_core(dev, "crtc %u: Noop due to uninitialized mode.\n", pipe); drm_WARN_ON_ONCE(dev, drm_drv_uses_atomic_modeset(dev)); return false; } /* Get current scanout position with system timestamp. * Repeat query up to DRM_TIMESTAMP_MAXRETRIES times * if single query takes longer than max_error nanoseconds. * * This guarantees a tight bound on maximum error if * code gets preempted or delayed for some reason. */ for (i = 0; i < DRM_TIMESTAMP_MAXRETRIES; i++) { /* * Get vertical and horizontal scanout position vpos, hpos, * and bounding timestamps stime, etime, pre/post query. */ vbl_status = get_scanout_position(crtc, in_vblank_irq, &vpos, &hpos, &stime, &etime, mode); /* Return as no-op if scanout query unsupported or failed. */ if (!vbl_status) { drm_dbg_core(dev, "crtc %u : scanoutpos query failed.\n", pipe); return false; } /* Compute uncertainty in timestamp of scanout position query. */ duration_ns = ktime_to_ns(etime) - ktime_to_ns(stime); /* Accept result with < max_error nsecs timing uncertainty. */ if (duration_ns <= *max_error) break; } /* Noisy system timing? */ if (i == DRM_TIMESTAMP_MAXRETRIES) { drm_dbg_core(dev, "crtc %u: Noisy timestamp %d us > %d us [%d reps].\n", pipe, duration_ns / 1000, *max_error / 1000, i); } /* Return upper bound of timestamp precision error. */ *max_error = duration_ns; /* Convert scanout position into elapsed time at raw_time query * since start of scanout at first display scanline. delta_ns * can be negative if start of scanout hasn't happened yet. */ delta_ns = div_s64(1000000LL * (vpos * mode->crtc_htotal + hpos), mode->crtc_clock); /* Subtract time delta from raw timestamp to get final * vblank_time timestamp for end of vblank. */ *vblank_time = ktime_sub_ns(etime, delta_ns); if (!drm_debug_enabled(DRM_UT_VBL)) return true; ts_etime = ktime_to_timespec64(etime); ts_vblank_time = ktime_to_timespec64(*vblank_time); drm_dbg_vbl(dev, "crtc %u : v p(%d,%d)@ %lld.%06ld -> %lld.%06ld [e %d us, %d rep]\n", pipe, hpos, vpos, (u64)ts_etime.tv_sec, ts_etime.tv_nsec / 1000, (u64)ts_vblank_time.tv_sec, ts_vblank_time.tv_nsec / 1000, duration_ns / 1000, i); return true; } EXPORT_SYMBOL(drm_crtc_vblank_helper_get_vblank_timestamp_internal); /** * drm_crtc_vblank_helper_get_vblank_timestamp - precise vblank timestamp * helper * @crtc: CRTC whose vblank timestamp to retrieve * @max_error: Desired maximum allowable error in timestamps (nanosecs) * On return contains true maximum error of timestamp * @vblank_time: Pointer to time which should receive the timestamp * @in_vblank_irq: * True when called from drm_crtc_handle_vblank(). Some drivers * need to apply some workarounds for gpu-specific vblank irq quirks * if flag is set. * * Implements calculation of exact vblank timestamps from given drm_display_mode * timings and current video scanout position of a CRTC. This can be directly * used as the &drm_crtc_funcs.get_vblank_timestamp implementation of a kms * driver if &drm_crtc_helper_funcs.get_scanout_position is implemented. * * The current implementation only handles standard video modes. For double scan * and interlaced modes the driver is supposed to adjust the hardware mode * (taken from &drm_crtc_state.adjusted mode for atomic modeset drivers) to * match the scanout position reported. * * Note that atomic drivers must call drm_calc_timestamping_constants() before * enabling a CRTC. The atomic helpers already take care of that in * drm_atomic_helper_calc_timestamping_constants(). * * Returns: * Returns true on success, and false on failure, i.e. when no accurate * timestamp could be acquired. */ bool drm_crtc_vblank_helper_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error, ktime_t *vblank_time, bool in_vblank_irq) { return drm_crtc_vblank_helper_get_vblank_timestamp_internal( crtc, max_error, vblank_time, in_vblank_irq, crtc->helper_private->get_scanout_position); } EXPORT_SYMBOL(drm_crtc_vblank_helper_get_vblank_timestamp); /** * drm_crtc_get_last_vbltimestamp - retrieve raw timestamp for the most * recent vblank interval * @crtc: CRTC whose vblank timestamp to retrieve * @tvblank: Pointer to target time which should receive the timestamp * @in_vblank_irq: * True when called from drm_crtc_handle_vblank(). Some drivers * need to apply some workarounds for gpu-specific vblank irq quirks * if flag is set. * * Fetches the system timestamp corresponding to the time of the most recent * vblank interval on specified CRTC. May call into kms-driver to * compute the timestamp with a high-precision GPU specific method. * * Returns zero if timestamp originates from uncorrected do_gettimeofday() * call, i.e., it isn't very precisely locked to the true vblank. * * Returns: * True if timestamp is considered to be very precise, false otherwise. */ static bool drm_crtc_get_last_vbltimestamp(struct drm_crtc *crtc, ktime_t *tvblank, bool in_vblank_irq) { bool ret = false; /* Define requested maximum error on timestamps (nanoseconds). */ int max_error = (int) drm_timestamp_precision * 1000; /* Query driver if possible and precision timestamping enabled. */ if (crtc && crtc->funcs->get_vblank_timestamp && max_error > 0) { ret = crtc->funcs->get_vblank_timestamp(crtc, &max_error, tvblank, in_vblank_irq); } /* GPU high precision timestamp query unsupported or failed. * Return current monotonic/gettimeofday timestamp as best estimate. */ if (!ret) *tvblank = ktime_get(); return ret; } static bool drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe, ktime_t *tvblank, bool in_vblank_irq) { struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe); return drm_crtc_get_last_vbltimestamp(crtc, tvblank, in_vblank_irq); } /** * drm_crtc_vblank_count - retrieve "cooked" vblank counter value * @crtc: which counter to retrieve * * Fetches the "cooked" vblank count value that represents the number of * vblank events since the system was booted, including lost events due to * modesetting activity. Note that this timer isn't correct against a racing * vblank interrupt (since it only reports the software vblank counter), see * drm_crtc_accurate_vblank_count() for such use-cases. * * Note that for a given vblank counter value drm_crtc_handle_vblank() * and drm_crtc_vblank_count() or drm_crtc_vblank_count_and_time() * provide a barrier: Any writes done before calling * drm_crtc_handle_vblank() will be visible to callers of the later * functions, if the vblank count is the same or a later one. * * See also &drm_vblank_crtc.count. * * Returns: * The software vblank counter. */ u64 drm_crtc_vblank_count(struct drm_crtc *crtc) { return drm_vblank_count(crtc->dev, drm_crtc_index(crtc)); } EXPORT_SYMBOL(drm_crtc_vblank_count); /** * drm_vblank_count_and_time - retrieve "cooked" vblank counter value and the * system timestamp corresponding to that vblank counter value. * @dev: DRM device * @pipe: index of CRTC whose counter to retrieve * @vblanktime: Pointer to ktime_t to receive the vblank timestamp. * * Fetches the "cooked" vblank count value that represents the number of * vblank events since the system was booted, including lost events due to * modesetting activity. Returns corresponding system timestamp of the time * of the vblank interval that corresponds to the current vblank counter value. * * This is the legacy version of drm_crtc_vblank_count_and_time(). */ static u64 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe, ktime_t *vblanktime) { struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe); u64 vblank_count; unsigned int seq; if (drm_WARN_ON(dev, pipe >= dev->num_crtcs)) { *vblanktime = 0; return 0; } do { seq = read_seqbegin(&vblank->seqlock); vblank_count = atomic64_read(&vblank->count); *vblanktime = vblank->time; } while (read_seqretry(&vblank->seqlock, seq)); return vblank_count; } /** * drm_crtc_vblank_count_and_time - retrieve "cooked" vblank counter value * and the system timestamp corresponding to that vblank counter value * @crtc: which counter to retrieve * @vblanktime: Pointer to time to receive the vblank timestamp. * * Fetches the "cooked" vblank count value that represents the number of * vblank events since the system was booted, including lost events due to * modesetting activity. Returns corresponding system timestamp of the time * of the vblank interval that corresponds to the current vblank counter value. * * Note that for a given vblank counter value drm_crtc_handle_vblank() * and drm_crtc_vblank_count() or drm_crtc_vblank_count_and_time() * provide a barrier: Any writes done before calling * drm_crtc_handle_vblank() will be visible to callers of the later * functions, if the vblank count is the same or a later one. * * See also &drm_vblank_crtc.count. */ u64 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc, ktime_t *vblanktime) { return drm_vblank_count_and_time(crtc->dev, drm_crtc_index(crtc), vblanktime); } EXPORT_SYMBOL(drm_crtc_vblank_count_and_time); /** * drm_crtc_next_vblank_start - calculate the time of the next vblank * @crtc: the crtc for which to calculate next vblank time * @vblanktime: pointer to time to receive the next vblank timestamp. * * Calculate the expected time of the start of the next vblank period, * based on time of previous vblank and frame duration */ int drm_crtc_next_vblank_start(struct drm_crtc *crtc, ktime_t *vblanktime) { struct drm_vblank_crtc *vblank; struct drm_display_mode *mode; u64 vblank_start; if (!drm_dev_has_vblank(crtc->dev)) return -EINVAL; vblank = drm_crtc_vblank_crtc(crtc); mode = &vblank->hwmode; if (!vblank->framedur_ns || !vblank->linedur_ns) return -EINVAL; if (!drm_crtc_get_last_vbltimestamp(crtc, vblanktime, false)) return -EINVAL; vblank_start = DIV_ROUND_DOWN_ULL( (u64)vblank->framedur_ns * mode->crtc_vblank_start, mode->crtc_vtotal); *vblanktime = ktime_add(*vblanktime, ns_to_ktime(vblank_start)); return 0; } EXPORT_SYMBOL(drm_crtc_next_vblank_start); static void send_vblank_event(struct drm_device *dev, struct drm_pending_vblank_event *e, u64 seq, ktime_t now) { struct timespec64 tv; switch (e->event.base.type) { case DRM_EVENT_VBLANK: case DRM_EVENT_FLIP_COMPLETE: tv = ktime_to_timespec64(now); e->event.vbl.sequence = seq; /* * e->event is a user space structure, with hardcoded unsigned * 32-bit seconds/microseconds. This is safe as we always use * monotonic timestamps since linux-4.15 */ e->event.vbl.tv_sec = tv.tv_sec; e->event.vbl.tv_usec = tv.tv_nsec / 1000; break; case DRM_EVENT_CRTC_SEQUENCE: if (seq) e->event.seq.sequence = seq; e->event.seq.time_ns = ktime_to_ns(now); break; } trace_drm_vblank_event_delivered(e->base.file_priv, e->pipe, seq); /* * Use the same timestamp for any associated fence signal to avoid * mismatch in timestamps for vsync & fence events triggered by the * same HW event. Frameworks like SurfaceFlinger in Android expects the * retire-fence timestamp to match exactly with HW vsync as it uses it * for its software vsync modeling. */ drm_send_event_timestamp_locked(dev, &e->base, now); } /** * drm_crtc_arm_vblank_event - arm vblank event after pageflip * @crtc: the source CRTC of the vblank event * @e: the event to send * * A lot of drivers need to generate vblank events for the very next vblank * interrupt. For example when the page flip interrupt happens when the page * flip gets armed, but not when it actually executes within the next vblank * period. This helper function implements exactly the required vblank arming * behaviour. * * NOTE: Drivers using this to send out the &drm_crtc_state.event as part of an * atomic commit must ensure that the next vblank happens at exactly the same * time as the atomic commit is committed to the hardware. This function itself * does **not** protect against the next vblank interrupt racing with either this * function call or the atomic commit operation. A possible sequence could be: * * 1. Driver commits new hardware state into vblank-synchronized registers. * 2. A vblank happens, committing the hardware state. Also the corresponding * vblank interrupt is fired off and fully processed by the interrupt * handler. * 3. The atomic commit operation proceeds to call drm_crtc_arm_vblank_event(). * 4. The event is only send out for the next vblank, which is wrong. * * An equivalent race can happen when the driver calls * drm_crtc_arm_vblank_event() before writing out the new hardware state. * * The only way to make this work safely is to prevent the vblank from firing * (and the hardware from committing anything else) until the entire atomic * commit sequence has run to completion. If the hardware does not have such a * feature (e.g. using a "go" bit), then it is unsafe to use this functions. * Instead drivers need to manually send out the event from their interrupt * handler by calling drm_crtc_send_vblank_event() and make sure that there's no * possible race with the hardware committing the atomic update. * * Caller must hold a vblank reference for the event @e acquired by a * drm_crtc_vblank_get(), which will be dropped when the next vblank arrives. */ void drm_crtc_arm_vblank_event(struct drm_crtc *crtc, struct drm_pending_vblank_event *e) { struct drm_device *dev = crtc->dev; unsigned int pipe = drm_crtc_index(crtc); assert_spin_locked(&dev->event_lock); e->pipe = pipe; e->sequence = drm_crtc_accurate_vblank_count(crtc) + 1; list_add_tail(&e->base.link, &dev->vblank_event_list); } EXPORT_SYMBOL(drm_crtc_arm_vblank_event); /** * drm_crtc_send_vblank_event - helper to send vblank event after pageflip * @crtc: the source CRTC of the vblank event * @e: the event to send * * Updates sequence # and timestamp on event for the most recently processed * vblank, and sends it to userspace. Caller must hold event lock. * * See drm_crtc_arm_vblank_event() for a helper which can be used in certain * situation, especially to send out events for atomic commit operations. */ void drm_crtc_send_vblank_event(struct drm_crtc *crtc, struct drm_pending_vblank_event *e) { struct drm_device *dev = crtc->dev; u64 seq; unsigned int pipe = drm_crtc_index(crtc); ktime_t now; if (drm_dev_has_vblank(dev)) { seq = drm_vblank_count_and_time(dev, pipe, &now); } else { seq = 0; now = ktime_get(); } e->pipe = pipe; send_vblank_event(dev, e, seq, now); } EXPORT_SYMBOL(drm_crtc_send_vblank_event); static int __enable_vblank(struct drm_device *dev, unsigned int pipe) { if (drm_core_check_feature(dev, DRIVER_MODESET)) { struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe); if (drm_WARN_ON(dev, !crtc)) return 0; if (crtc->funcs->enable_vblank) return crtc->funcs->enable_vblank(crtc); } return -EINVAL; } static int drm_vblank_enable(struct drm_device *dev, unsigned int pipe) { struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe); int ret = 0; assert_spin_locked(&dev->vbl_lock); spin_lock(&dev->vblank_time_lock); if (!vblank->enabled) { /* * Enable vblank irqs under vblank_time_lock protection. * All vblank count & timestamp updates are held off * until we are done reinitializing master counter and * timestamps. Filtercode in drm_handle_vblank() will * prevent double-accounting of same vblank interval. */ ret = __enable_vblank(dev, pipe); drm_dbg_core(dev, "enabling vblank on crtc %u, ret: %d\n", pipe, ret); if (ret) { atomic_dec(&vblank->refcount); } else { drm_update_vblank_count(dev, pipe, 0); /* drm_update_vblank_count() includes a wmb so we just * need to ensure that the compiler emits the write * to mark the vblank as enabled after the call * to drm_update_vblank_count(). */ WRITE_ONCE(vblank->enabled, true); } } spin_unlock(&dev->vblank_time_lock); return ret; } int drm_vblank_get(struct drm_device *dev, unsigned int pipe) { struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe); unsigned long irqflags; int ret = 0; if (!drm_dev_has_vblank(dev)) return -EINVAL; if (drm_WARN_ON(dev, pipe >= dev->num_crtcs)) return -EINVAL; spin_lock_irqsave(&dev->vbl_lock, irqflags); /* Going from 0->1 means we have to enable interrupts again */ if (atomic_add_return(1, &vblank->refcount) == 1) { ret = drm_vblank_enable(dev, pipe); } else { if (!vblank->enabled) { atomic_dec(&vblank->refcount); ret = -EINVAL; } } spin_unlock_irqrestore(&dev->vbl_lock, irqflags); return ret; } /** * drm_crtc_vblank_get - get a reference count on vblank events * @crtc: which CRTC to own * * Acquire a reference count on vblank events to avoid having them disabled * while in use. * * Returns: * Zero on success or a negative error code on failure. */ int drm_crtc_vblank_get(struct drm_crtc *crtc) { return drm_vblank_get(crtc->dev, drm_crtc_index(crtc)); } EXPORT_SYMBOL(drm_crtc_vblank_get); void drm_vblank_put(struct drm_device *dev, unsigned int pipe) { struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe); int vblank_offdelay = vblank->config.offdelay_ms; if (drm_WARN_ON(dev, pipe >= dev->num_crtcs)) return; if (drm_WARN_ON(dev, atomic_read(&vblank->refcount) == 0)) return; /* Last user schedules interrupt disable */ if (atomic_dec_and_test(&vblank->refcount)) { if (!vblank_offdelay) return; else if (vblank_offdelay < 0) vblank_disable_fn(&vblank->disable_timer); else if (!vblank->config.disable_immediate) mod_timer(&vblank->disable_timer, jiffies + ((vblank_offdelay * HZ) / 1000)); } } /** * drm_crtc_vblank_put - give up ownership of vblank events * @crtc: which counter to give up * * Release ownership of a given vblank counter, turning off interrupts * if possible. Disable interrupts after &drm_vblank_crtc_config.offdelay_ms * milliseconds. */ void drm_crtc_vblank_put(struct drm_crtc *crtc) { drm_vblank_put(crtc->dev, drm_crtc_index(crtc)); } EXPORT_SYMBOL(drm_crtc_vblank_put); /** * drm_wait_one_vblank - wait for one vblank * @dev: DRM device * @pipe: CRTC index * * This waits for one vblank to pass on @pipe, using the irq driver interfaces. * It is a failure to call this when the vblank irq for @pipe is disabled, e.g. * due to lack of driver support or because the crtc is off. * * This is the legacy version of drm_crtc_wait_one_vblank(). */ void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe) { struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe); int ret; u64 last; if (drm_WARN_ON(dev, pipe >= dev->num_crtcs)) return; ret = drm_vblank_get(dev, pipe); if (drm_WARN(dev, ret, "vblank not available on crtc %i, ret=%i\n", pipe, ret)) return; last = drm_vblank_count(dev, pipe); ret = wait_event_timeout(vblank->queue, last != drm_vblank_count(dev, pipe), msecs_to_jiffies(100)); drm_WARN(dev, ret == 0, "vblank wait timed out on crtc %i\n", pipe); drm_vblank_put(dev, pipe); } EXPORT_SYMBOL(drm_wait_one_vblank); /** * drm_crtc_wait_one_vblank - wait for one vblank * @crtc: DRM crtc * * This waits for one vblank to pass on @crtc, using the irq driver interfaces. * It is a failure to call this when the vblank irq for @crtc is disabled, e.g. * due to lack of driver support or because the crtc is off. */ void drm_crtc_wait_one_vblank(struct drm_crtc *crtc) { drm_wait_one_vblank(crtc->dev, drm_crtc_index(crtc)); } EXPORT_SYMBOL(drm_crtc_wait_one_vblank); /** * drm_crtc_vblank_off - disable vblank events on a CRTC * @crtc: CRTC in question * * Drivers can use this function to shut down the vblank interrupt handling when * disabling a crtc. This function ensures that the latest vblank frame count is * stored so that drm_vblank_on can restore it again. * * Drivers must use this function when the hardware vblank counter can get * reset, e.g. when suspending or disabling the @crtc in general. */ void drm_crtc_vblank_off(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; unsigned int pipe = drm_crtc_index(crtc); struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); struct drm_pending_vblank_event *e, *t; ktime_t now; u64 seq; if (drm_WARN_ON(dev, pipe >= dev->num_crtcs)) return; /* * Grab event_lock early to prevent vblank work from being scheduled * while we're in the middle of shutting down vblank interrupts */ spin_lock_irq(&dev->event_lock); spin_lock(&dev->vbl_lock); drm_dbg_vbl(dev, "crtc %d, vblank enabled %d, inmodeset %d\n", pipe, vblank->enabled, vblank->inmodeset); /* Avoid redundant vblank disables without previous * drm_crtc_vblank_on(). */ if (drm_core_check_feature(dev, DRIVER_ATOMIC) || !vblank->inmodeset) drm_vblank_disable_and_save(dev, pipe); wake_up(&vblank->queue); /* * Prevent subsequent drm_vblank_get() from re-enabling * the vblank interrupt by bumping the refcount. */ if (!vblank->inmodeset) { atomic_inc(&vblank->refcount); vblank->inmodeset = 1; } spin_unlock(&dev->vbl_lock); /* Send any queued vblank events, lest the natives grow disquiet */ seq = drm_vblank_count_and_time(dev, pipe, &now); list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) { if (e->pipe != pipe) continue; drm_dbg_core(dev, "Sending premature vblank event on disable: " "wanted %llu, current %llu\n", e->sequence, seq); list_del(&e->base.link); drm_vblank_put(dev, pipe); send_vblank_event(dev, e, seq, now); } /* Cancel any leftover pending vblank work */ drm_vblank_cancel_pending_works(vblank); spin_unlock_irq(&dev->event_lock); /* Will be reset by the modeset helpers when re-enabling the crtc by * calling drm_calc_timestamping_constants(). */ vblank->hwmode.crtc_clock = 0; /* Wait for any vblank work that's still executing to finish */ drm_vblank_flush_worker(vblank); } EXPORT_SYMBOL(drm_crtc_vblank_off); /** * drm_crtc_vblank_reset - reset vblank state to off on a CRTC * @crtc: CRTC in question * * Drivers can use this function to reset the vblank state to off at load time. * Drivers should use this together with the drm_crtc_vblank_off() and * drm_crtc_vblank_on() functions. The difference compared to * drm_crtc_vblank_off() is that this function doesn't save the vblank counter * and hence doesn't need to call any driver hooks. * * This is useful for recovering driver state e.g. on driver load, or on resume. */ void drm_crtc_vblank_reset(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); spin_lock_irq(&dev->vbl_lock); /* * Prevent subsequent drm_vblank_get() from enabling the vblank * interrupt by bumping the refcount. */ if (!vblank->inmodeset) { atomic_inc(&vblank->refcount); vblank->inmodeset = 1; } spin_unlock_irq(&dev->vbl_lock); drm_WARN_ON(dev, !list_empty(&dev->vblank_event_list)); drm_WARN_ON(dev, !list_empty(&vblank->pending_work)); } EXPORT_SYMBOL(drm_crtc_vblank_reset); /** * drm_crtc_set_max_vblank_count - configure the hw max vblank counter value * @crtc: CRTC in question * @max_vblank_count: max hardware vblank counter value * * Update the maximum hardware vblank counter value for @crtc * at runtime. Useful for hardware where the operation of the * hardware vblank counter depends on the currently active * display configuration. * * For example, if the hardware vblank counter does not work * when a specific connector is active the maximum can be set * to zero. And when that specific connector isn't active the * maximum can again be set to the appropriate non-zero value. * * If used, must be called before drm_vblank_on(). */ void drm_crtc_set_max_vblank_count(struct drm_crtc *crtc, u32 max_vblank_count) { struct drm_device *dev = crtc->dev; struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); drm_WARN_ON(dev, dev->max_vblank_count); drm_WARN_ON(dev, !READ_ONCE(vblank->inmodeset)); vblank->max_vblank_count = max_vblank_count; } EXPORT_SYMBOL(drm_crtc_set_max_vblank_count); /** * drm_crtc_vblank_on_config - enable vblank events on a CRTC with custom * configuration options * @crtc: CRTC in question * @config: Vblank configuration value * * See drm_crtc_vblank_on(). In addition, this function allows you to provide a * custom vblank configuration for a given CRTC. * * Note that @config is copied, the pointer does not need to stay valid beyond * this function call. For details of the parameters see * struct drm_vblank_crtc_config. */ void drm_crtc_vblank_on_config(struct drm_crtc *crtc, const struct drm_vblank_crtc_config *config) { struct drm_device *dev = crtc->dev; unsigned int pipe = drm_crtc_index(crtc); struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); if (drm_WARN_ON(dev, pipe >= dev->num_crtcs)) return; spin_lock_irq(&dev->vbl_lock); drm_dbg_vbl(dev, "crtc %d, vblank enabled %d, inmodeset %d\n", pipe, vblank->enabled, vblank->inmodeset); vblank->config = *config; /* Drop our private "prevent drm_vblank_get" refcount */ if (vblank->inmodeset) { atomic_dec(&vblank->refcount); vblank->inmodeset = 0; } drm_reset_vblank_timestamp(dev, pipe); /* * re-enable interrupts if there are users left, or the * user wishes vblank interrupts to be enabled all the time. */ if (atomic_read(&vblank->refcount) != 0 || !vblank->config.offdelay_ms) drm_WARN_ON(dev, drm_vblank_enable(dev, pipe)); spin_unlock_irq(&dev->vbl_lock); } EXPORT_SYMBOL(drm_crtc_vblank_on_config); /** * drm_crtc_vblank_on - enable vblank events on a CRTC * @crtc: CRTC in question * * This functions restores the vblank interrupt state captured with * drm_crtc_vblank_off() again and is generally called when enabling @crtc. Note * that calls to drm_crtc_vblank_on() and drm_crtc_vblank_off() can be * unbalanced and so can also be unconditionally called in driver load code to * reflect the current hardware state of the crtc. * * Note that unlike in drm_crtc_vblank_on_config(), default values are used. */ void drm_crtc_vblank_on(struct drm_crtc *crtc) { const struct drm_vblank_crtc_config config = { .offdelay_ms = drm_vblank_offdelay, .disable_immediate = crtc->dev->vblank_disable_immediate }; drm_crtc_vblank_on_config(crtc, &config); } EXPORT_SYMBOL(drm_crtc_vblank_on); static void drm_vblank_restore(struct drm_device *dev, unsigned int pipe) { ktime_t t_vblank; struct drm_vblank_crtc *vblank; int framedur_ns; u64 diff_ns; u32 cur_vblank, diff = 1; int count = DRM_TIMESTAMP_MAXRETRIES; u32 max_vblank_count = drm_max_vblank_count(dev, pipe); if (drm_WARN_ON(dev, pipe >= dev->num_crtcs)) return; assert_spin_locked(&dev->vbl_lock); assert_spin_locked(&dev->vblank_time_lock); vblank = drm_vblank_crtc(dev, pipe); drm_WARN_ONCE(dev, drm_debug_enabled(DRM_UT_VBL) && !vblank->framedur_ns, "Cannot compute missed vblanks without frame duration\n"); framedur_ns = vblank->framedur_ns; do { cur_vblank = __get_vblank_counter(dev, pipe); drm_get_last_vbltimestamp(dev, pipe, &t_vblank, false); } while (cur_vblank != __get_vblank_counter(dev, pipe) && --count > 0); diff_ns = ktime_to_ns(ktime_sub(t_vblank, vblank->time)); if (framedur_ns) diff = DIV_ROUND_CLOSEST_ULL(diff_ns, framedur_ns); drm_dbg_vbl(dev, "missed %d vblanks in %lld ns, frame duration=%d ns, hw_diff=%d\n", diff, diff_ns, framedur_ns, cur_vblank - vblank->last); vblank->last = (cur_vblank - diff) & max_vblank_count; } /** * drm_crtc_vblank_restore - estimate missed vblanks and update vblank count. * @crtc: CRTC in question * * Power manamement features can cause frame counter resets between vblank * disable and enable. Drivers can use this function in their * &drm_crtc_funcs.enable_vblank implementation to estimate missed vblanks since * the last &drm_crtc_funcs.disable_vblank using timestamps and update the * vblank counter. * * Note that drivers must have race-free high-precision timestamping support, * i.e. &drm_crtc_funcs.get_vblank_timestamp must be hooked up and * &drm_vblank_crtc_config.disable_immediate must be set to indicate the * time-stamping functions are race-free against vblank hardware counter * increments. */ void drm_crtc_vblank_restore(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; unsigned int pipe = drm_crtc_index(crtc); struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe); drm_WARN_ON_ONCE(dev, !crtc->funcs->get_vblank_timestamp); drm_WARN_ON_ONCE(dev, vblank->inmodeset); drm_WARN_ON_ONCE(dev, !vblank->config.disable_immediate); drm_vblank_restore(dev, pipe); } EXPORT_SYMBOL(drm_crtc_vblank_restore); static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe, u64 req_seq, union drm_wait_vblank *vblwait, struct drm_file *file_priv) { struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe); struct drm_pending_vblank_event *e; ktime_t now; u64 seq; int ret; e = kzalloc(sizeof(*e), GFP_KERNEL); if (e == NULL) { ret = -ENOMEM; goto err_put; } e->pipe = pipe; e->event.base.type = DRM_EVENT_VBLANK; e->event.base.length = sizeof(e->event.vbl); e->event.vbl.user_data = vblwait->request.signal; e->event.vbl.crtc_id = 0; if (drm_core_check_feature(dev, DRIVER_MODESET)) { struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe); if (crtc) e->event.vbl.crtc_id = crtc->base.id; } spin_lock_irq(&dev->event_lock); /* * drm_crtc_vblank_off() might have been called after we called * drm_vblank_get(). drm_crtc_vblank_off() holds event_lock around the * vblank disable, so no need for further locking. The reference from * drm_vblank_get() protects against vblank disable from another source. */ if (!READ_ONCE(vblank->enabled)) { ret = -EINVAL; goto err_unlock; } ret = drm_event_reserve_init_locked(dev, file_priv, &e->base, &e->event.base); if (ret) goto err_unlock; seq = drm_vblank_count_and_time(dev, pipe, &now); drm_dbg_core(dev, "event on vblank count %llu, current %llu, crtc %u\n", req_seq, seq, pipe); trace_drm_vblank_event_queued(file_priv, pipe, req_seq); e->sequence = req_seq; if (drm_vblank_passed(seq, req_seq)) { drm_vblank_put(dev, pipe); send_vblank_event(dev, e, seq, now); vblwait->reply.sequence = seq; } else { /* drm_handle_vblank_events will call drm_vblank_put */ list_add_tail(&e->base.link, &dev->vblank_event_list); vblwait->reply.sequence = req_seq; } spin_unlock_irq(&dev->event_lock); return 0; err_unlock: spin_unlock_irq(&dev->event_lock); kfree(e); err_put: drm_vblank_put(dev, pipe); return ret; } static bool drm_wait_vblank_is_query(union drm_wait_vblank *vblwait) { if (vblwait->request.sequence) return false; return _DRM_VBLANK_RELATIVE == (vblwait->request.type & (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_EVENT | _DRM_VBLANK_NEXTONMISS)); } /* * Widen a 32-bit param to 64-bits. * * \param narrow 32-bit value (missing upper 32 bits) * \param near 64-bit value that should be 'close' to near * * This function returns a 64-bit value using the lower 32-bits from * 'narrow' and constructing the upper 32-bits so that the result is * as close as possible to 'near'. */ static u64 widen_32_to_64(u32 narrow, u64 near) { return near + (s32) (narrow - near); } static void drm_wait_vblank_reply(struct drm_device *dev, unsigned int pipe, struct drm_wait_vblank_reply *reply) { ktime_t now; struct timespec64 ts; /* * drm_wait_vblank_reply is a UAPI structure that uses 'long' * to store the seconds. This is safe as we always use monotonic * timestamps since linux-4.15. */ reply->sequence = drm_vblank_count_and_time(dev, pipe, &now); ts = ktime_to_timespec64(now); reply->tval_sec = (u32)ts.tv_sec; reply->tval_usec = ts.tv_nsec / 1000; } static bool drm_wait_vblank_supported(struct drm_device *dev) { return drm_dev_has_vblank(dev); } int drm_wait_vblank_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_crtc *crtc; struct drm_vblank_crtc *vblank; union drm_wait_vblank *vblwait = data; int ret; u64 req_seq, seq; unsigned int pipe_index; unsigned int flags, pipe, high_pipe; if (!drm_wait_vblank_supported(dev)) return -EOPNOTSUPP; if (vblwait->request.type & _DRM_VBLANK_SIGNAL) return -EINVAL; if (vblwait->request.type & ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK | _DRM_VBLANK_HIGH_CRTC_MASK)) { drm_dbg_core(dev, "Unsupported type value 0x%x, supported mask 0x%x\n", vblwait->request.type, (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK | _DRM_VBLANK_HIGH_CRTC_MASK)); return -EINVAL; } flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK; high_pipe = (vblwait->request.type & _DRM_VBLANK_HIGH_CRTC_MASK); if (high_pipe) pipe_index = high_pipe >> _DRM_VBLANK_HIGH_CRTC_SHIFT; else pipe_index = flags & _DRM_VBLANK_SECONDARY ? 1 : 0; /* Convert lease-relative crtc index into global crtc index */ if (drm_core_check_feature(dev, DRIVER_MODESET)) { pipe = 0; drm_for_each_crtc(crtc, dev) { if (drm_lease_held(file_priv, crtc->base.id)) { if (pipe_index == 0) break; pipe_index--; } pipe++; } } else { pipe = pipe_index; } if (pipe >= dev->num_crtcs) return -EINVAL; vblank = &dev->vblank[pipe]; /* If the counter is currently enabled and accurate, short-circuit * queries to return the cached timestamp of the last vblank. */ if (vblank->config.disable_immediate && drm_wait_vblank_is_query(vblwait) && READ_ONCE(vblank->enabled)) { drm_wait_vblank_reply(dev, pipe, &vblwait->reply); return 0; } ret = drm_vblank_get(dev, pipe); if (ret) { drm_dbg_core(dev, "crtc %d failed to acquire vblank counter, %d\n", pipe, ret); return ret; } seq = drm_vblank_count(dev, pipe); switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) { case _DRM_VBLANK_RELATIVE: req_seq = seq + vblwait->request.sequence; vblwait->request.sequence = req_seq; vblwait->request.type &= ~_DRM_VBLANK_RELATIVE; break; case _DRM_VBLANK_ABSOLUTE: req_seq = widen_32_to_64(vblwait->request.sequence, seq); break; default: ret = -EINVAL; goto done; } if ((flags & _DRM_VBLANK_NEXTONMISS) && drm_vblank_passed(seq, req_seq)) { req_seq = seq + 1; vblwait->request.type &= ~_DRM_VBLANK_NEXTONMISS; vblwait->request.sequence = req_seq; } if (flags & _DRM_VBLANK_EVENT) { /* must hold on to the vblank ref until the event fires * drm_vblank_put will be called asynchronously */ return drm_queue_vblank_event(dev, pipe, req_seq, vblwait, file_priv); } if (req_seq != seq) { int wait; drm_dbg_core(dev, "waiting on vblank count %llu, crtc %u\n", req_seq, pipe); wait = wait_event_interruptible_timeout(vblank->queue, drm_vblank_passed(drm_vblank_count(dev, pipe), req_seq) || !READ_ONCE(vblank->enabled), msecs_to_jiffies(3000)); switch (wait) { case 0: /* timeout */ ret = -EBUSY; break; case -ERESTARTSYS: /* interrupted by signal */ ret = -EINTR; break; default: ret = 0; break; } } if (ret != -EINTR) { drm_wait_vblank_reply(dev, pipe, &vblwait->reply); drm_dbg_core(dev, "crtc %d returning %u to client\n", pipe, vblwait->reply.sequence); } else { drm_dbg_core(dev, "crtc %d vblank wait interrupted by signal\n", pipe); } done: drm_vblank_put(dev, pipe); return ret; } static void drm_handle_vblank_events(struct drm_device *dev, unsigned int pipe) { struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe); bool high_prec = false; struct drm_pending_vblank_event *e, *t; ktime_t now; u64 seq; assert_spin_locked(&dev->event_lock); seq = drm_vblank_count_and_time(dev, pipe, &now); list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) { if (e->pipe != pipe) continue; if (!drm_vblank_passed(seq, e->sequence)) continue; drm_dbg_core(dev, "vblank event on %llu, current %llu\n", e->sequence, seq); list_del(&e->base.link); drm_vblank_put(dev, pipe); send_vblank_event(dev, e, seq, now); } if (crtc && crtc->funcs->get_vblank_timestamp) high_prec = true; trace_drm_vblank_event(pipe, seq, now, high_prec); } /** * drm_handle_vblank - handle a vblank event * @dev: DRM device * @pipe: index of CRTC where this event occurred * * Drivers should call this routine in their vblank interrupt handlers to * update the vblank counter and send any signals that may be pending. * * This is the legacy version of drm_crtc_handle_vblank(). */ bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe) { struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe); unsigned long irqflags; bool disable_irq; if (drm_WARN_ON_ONCE(dev, !drm_dev_has_vblank(dev))) return false; if (drm_WARN_ON(dev, pipe >= dev->num_crtcs)) return false; spin_lock_irqsave(&dev->event_lock, irqflags); /* Need timestamp lock to prevent concurrent execution with * vblank enable/disable, as this would cause inconsistent * or corrupted timestamps and vblank counts. */ spin_lock(&dev->vblank_time_lock); /* Vblank irq handling disabled. Nothing to do. */ if (!vblank->enabled) { spin_unlock(&dev->vblank_time_lock); spin_unlock_irqrestore(&dev->event_lock, irqflags); return false; } drm_update_vblank_count(dev, pipe, true); spin_unlock(&dev->vblank_time_lock); wake_up(&vblank->queue); /* With instant-off, we defer disabling the interrupt until after * we finish processing the following vblank after all events have * been signaled. The disable has to be last (after * drm_handle_vblank_events) so that the timestamp is always accurate. */ disable_irq = (vblank->config.disable_immediate && vblank->config.offdelay_ms > 0 && !atomic_read(&vblank->refcount)); drm_handle_vblank_events(dev, pipe); drm_handle_vblank_works(vblank); spin_unlock_irqrestore(&dev->event_lock, irqflags); if (disable_irq) vblank_disable_fn(&vblank->disable_timer); return true; } EXPORT_SYMBOL(drm_handle_vblank); /** * drm_crtc_handle_vblank - handle a vblank event * @crtc: where this event occurred * * Drivers should call this routine in their vblank interrupt handlers to * update the vblank counter and send any signals that may be pending. * * This is the native KMS version of drm_handle_vblank(). * * Note that for a given vblank counter value drm_crtc_handle_vblank() * and drm_crtc_vblank_count() or drm_crtc_vblank_count_and_time() * provide a barrier: Any writes done before calling * drm_crtc_handle_vblank() will be visible to callers of the later * functions, if the vblank count is the same or a later one. * * See also &drm_vblank_crtc.count. * * Returns: * True if the event was successfully handled, false on failure. */ bool drm_crtc_handle_vblank(struct drm_crtc *crtc) { return drm_handle_vblank(crtc->dev, drm_crtc_index(crtc)); } EXPORT_SYMBOL(drm_crtc_handle_vblank); /* * Get crtc VBLANK count. * * \param dev DRM device * \param data user argument, pointing to a drm_crtc_get_sequence structure. * \param file_priv drm file private for the user's open file descriptor */ int drm_crtc_get_sequence_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_crtc *crtc; struct drm_vblank_crtc *vblank; int pipe; struct drm_crtc_get_sequence *get_seq = data; ktime_t now; bool vblank_enabled; int ret; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EOPNOTSUPP; if (!drm_dev_has_vblank(dev)) return -EOPNOTSUPP; crtc = drm_crtc_find(dev, file_priv, get_seq->crtc_id); if (!crtc) return -ENOENT; pipe = drm_crtc_index(crtc); vblank = drm_crtc_vblank_crtc(crtc); vblank_enabled = READ_ONCE(vblank->config.disable_immediate) && READ_ONCE(vblank->enabled); if (!vblank_enabled) { ret = drm_crtc_vblank_get(crtc); if (ret) { drm_dbg_core(dev, "crtc %d failed to acquire vblank counter, %d\n", pipe, ret); return ret; } } drm_modeset_lock(&crtc->mutex, NULL); if (crtc->state) get_seq->active = crtc->state->enable; else get_seq->active = crtc->enabled; drm_modeset_unlock(&crtc->mutex); get_seq->sequence = drm_vblank_count_and_time(dev, pipe, &now); get_seq->sequence_ns = ktime_to_ns(now); if (!vblank_enabled) drm_crtc_vblank_put(crtc); return 0; } /* * Queue a event for VBLANK sequence * * \param dev DRM device * \param data user argument, pointing to a drm_crtc_queue_sequence structure. * \param file_priv drm file private for the user's open file descriptor */ int drm_crtc_queue_sequence_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_crtc *crtc; struct drm_vblank_crtc *vblank; int pipe; struct drm_crtc_queue_sequence *queue_seq = data; ktime_t now; struct drm_pending_vblank_event *e; u32 flags; u64 seq; u64 req_seq; int ret; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EOPNOTSUPP; if (!drm_dev_has_vblank(dev)) return -EOPNOTSUPP; crtc = drm_crtc_find(dev, file_priv, queue_seq->crtc_id); if (!crtc) return -ENOENT; flags = queue_seq->flags; /* Check valid flag bits */ if (flags & ~(DRM_CRTC_SEQUENCE_RELATIVE| DRM_CRTC_SEQUENCE_NEXT_ON_MISS)) return -EINVAL; pipe = drm_crtc_index(crtc); vblank = drm_crtc_vblank_crtc(crtc); e = kzalloc(sizeof(*e), GFP_KERNEL); if (e == NULL) return -ENOMEM; ret = drm_crtc_vblank_get(crtc); if (ret) { drm_dbg_core(dev, "crtc %d failed to acquire vblank counter, %d\n", pipe, ret); goto err_free; } seq = drm_vblank_count_and_time(dev, pipe, &now); req_seq = queue_seq->sequence; if (flags & DRM_CRTC_SEQUENCE_RELATIVE) req_seq += seq; if ((flags & DRM_CRTC_SEQUENCE_NEXT_ON_MISS) && drm_vblank_passed(seq, req_seq)) req_seq = seq + 1; e->pipe = pipe; e->event.base.type = DRM_EVENT_CRTC_SEQUENCE; e->event.base.length = sizeof(e->event.seq); e->event.seq.user_data = queue_seq->user_data; spin_lock_irq(&dev->event_lock); /* * drm_crtc_vblank_off() might have been called after we called * drm_crtc_vblank_get(). drm_crtc_vblank_off() holds event_lock around the * vblank disable, so no need for further locking. The reference from * drm_crtc_vblank_get() protects against vblank disable from another source. */ if (!READ_ONCE(vblank->enabled)) { ret = -EINVAL; goto err_unlock; } ret = drm_event_reserve_init_locked(dev, file_priv, &e->base, &e->event.base); if (ret) goto err_unlock; e->sequence = req_seq; if (drm_vblank_passed(seq, req_seq)) { drm_crtc_vblank_put(crtc); send_vblank_event(dev, e, seq, now); queue_seq->sequence = seq; } else { /* drm_handle_vblank_events will call drm_vblank_put */ list_add_tail(&e->base.link, &dev->vblank_event_list); queue_seq->sequence = req_seq; } spin_unlock_irq(&dev->event_lock); return 0; err_unlock: spin_unlock_irq(&dev->event_lock); drm_crtc_vblank_put(crtc); err_free: kfree(e); return ret; }
4 4 4 4 4 4 4 4 4 4 3 1 1 1 1 1 1 4 4 5 1 4 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 // SPDX-License-Identifier: GPL-2.0-only /* * MCP2221A - Microchip USB to I2C Host Protocol Bridge * * Copyright (c) 2020, Rishi Gupta <gupt21@gmail.com> * * Datasheet: https://ww1.microchip.com/downloads/en/DeviceDoc/20005565B.pdf */ #include <linux/module.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/bitfield.h> #include <linux/completion.h> #include <linux/delay.h> #include <linux/hid.h> #include <linux/hidraw.h> #include <linux/i2c.h> #include <linux/gpio/driver.h> #include <linux/iio/iio.h> #include "hid-ids.h" /* Commands codes in a raw output report */ enum { MCP2221_I2C_WR_DATA = 0x90, MCP2221_I2C_WR_NO_STOP = 0x94, MCP2221_I2C_RD_DATA = 0x91, MCP2221_I2C_RD_RPT_START = 0x93, MCP2221_I2C_GET_DATA = 0x40, MCP2221_I2C_PARAM_OR_STATUS = 0x10, MCP2221_I2C_SET_SPEED = 0x20, MCP2221_I2C_CANCEL = 0x10, MCP2221_GPIO_SET = 0x50, MCP2221_GPIO_GET = 0x51, MCP2221_SET_SRAM_SETTINGS = 0x60, MCP2221_GET_SRAM_SETTINGS = 0x61, MCP2221_READ_FLASH_DATA = 0xb0, }; /* Response codes in a raw input report */ enum { MCP2221_SUCCESS = 0x00, MCP2221_I2C_ENG_BUSY = 0x01, MCP2221_I2C_START_TOUT = 0x12, MCP2221_I2C_STOP_TOUT = 0x62, MCP2221_I2C_WRADDRL_TOUT = 0x23, MCP2221_I2C_WRDATA_TOUT = 0x44, MCP2221_I2C_WRADDRL_NACK = 0x25, MCP2221_I2C_MASK_ADDR_NACK = 0x40, MCP2221_I2C_WRADDRL_SEND = 0x21, MCP2221_I2C_ADDR_NACK = 0x25, MCP2221_I2C_READ_PARTIAL = 0x54, MCP2221_I2C_READ_COMPL = 0x55, MCP2221_ALT_F_NOT_GPIOV = 0xEE, MCP2221_ALT_F_NOT_GPIOD = 0xEF, }; /* MCP GPIO direction encoding */ enum { MCP2221_DIR_OUT = 0x00, MCP2221_DIR_IN = 0x01, }; #define MCP_NGPIO 4 /* MCP GPIO set command layout */ struct mcp_set_gpio { u8 cmd; u8 dummy; struct { u8 change_value; u8 value; u8 change_direction; u8 direction; } gpio[MCP_NGPIO]; } __packed; /* MCP GPIO get command layout */ struct mcp_get_gpio { u8 cmd; u8 dummy; struct { u8 value; u8 direction; } gpio[MCP_NGPIO]; } __packed; /* * There is no way to distinguish responses. Therefore next command * is sent only after response to previous has been received. Mutex * lock is used for this purpose mainly. */ struct mcp2221 { struct hid_device *hdev; struct i2c_adapter adapter; struct mutex lock; struct completion wait_in_report; struct delayed_work init_work; u8 *rxbuf; u8 txbuf[64]; int rxbuf_idx; int status; u8 cur_i2c_clk_div; struct gpio_chip *gc; u8 gp_idx; u8 gpio_dir; u8 mode[4]; #if IS_REACHABLE(CONFIG_IIO) struct iio_chan_spec iio_channels[3]; u16 adc_values[3]; u8 adc_scale; u8 dac_value; u16 dac_scale; #endif }; struct mcp2221_iio { struct mcp2221 *mcp; }; /* * Default i2c bus clock frequency 400 kHz. Modify this if you * want to set some other frequency (min 50 kHz - max 400 kHz). */ static uint i2c_clk_freq = 400; /* Synchronously send output report to the device */ static int mcp_send_report(struct mcp2221 *mcp, u8 *out_report, size_t len) { u8 *buf; int ret; buf = kmemdup(out_report, len, GFP_KERNEL); if (!buf) return -ENOMEM; /* mcp2221 uses interrupt endpoint for out reports */ ret = hid_hw_output_report(mcp->hdev, buf, len); kfree(buf); if (ret < 0) return ret; return 0; } /* * Send o/p report to the device and wait for i/p report to be * received from the device. If the device does not respond, * we timeout. */ static int mcp_send_data_req_status(struct mcp2221 *mcp, u8 *out_report, int len) { int ret; unsigned long t; reinit_completion(&mcp->wait_in_report); ret = mcp_send_report(mcp, out_report, len); if (ret) return ret; t = wait_for_completion_timeout(&mcp->wait_in_report, msecs_to_jiffies(4000)); if (!t) return -ETIMEDOUT; return mcp->status; } /* Check pass/fail for actual communication with i2c slave */ static int mcp_chk_last_cmd_status(struct mcp2221 *mcp) { memset(mcp->txbuf, 0, 8); mcp->txbuf[0] = MCP2221_I2C_PARAM_OR_STATUS; return mcp_send_data_req_status(mcp, mcp->txbuf, 8); } /* Cancels last command releasing i2c bus just in case occupied */ static int mcp_cancel_last_cmd(struct mcp2221 *mcp) { memset(mcp->txbuf, 0, 8); mcp->txbuf[0] = MCP2221_I2C_PARAM_OR_STATUS; mcp->txbuf[2] = MCP2221_I2C_CANCEL; return mcp_send_data_req_status(mcp, mcp->txbuf, 8); } /* Check if the last command succeeded or failed and return the result. * If the command did fail, cancel that command which will free the i2c bus. */ static int mcp_chk_last_cmd_status_free_bus(struct mcp2221 *mcp) { int ret; ret = mcp_chk_last_cmd_status(mcp); if (ret) { /* The last command was a failure. * Send a cancel which will also free the bus. */ usleep_range(980, 1000); mcp_cancel_last_cmd(mcp); } return ret; } static int mcp_set_i2c_speed(struct mcp2221 *mcp) { int ret; memset(mcp->txbuf, 0, 8); mcp->txbuf[0] = MCP2221_I2C_PARAM_OR_STATUS; mcp->txbuf[3] = MCP2221_I2C_SET_SPEED; mcp->txbuf[4] = mcp->cur_i2c_clk_div; ret = mcp_send_data_req_status(mcp, mcp->txbuf, 8); if (ret) { /* Small delay is needed here */ usleep_range(980, 1000); mcp_cancel_last_cmd(mcp); } return 0; } /* * An output report can contain minimum 1 and maximum 60 user data * bytes. If the number of data bytes is more then 60, we send it * in chunks of 60 bytes. Last chunk may contain exactly 60 or less * bytes. Total number of bytes is informed in very first report to * mcp2221, from that point onwards it first collect all the data * from host and then send to i2c slave device. */ static int mcp_i2c_write(struct mcp2221 *mcp, struct i2c_msg *msg, int type, u8 last_status) { int ret, len, idx, sent; idx = 0; sent = 0; if (msg->len < 60) len = msg->len; else len = 60; do { mcp->txbuf[0] = type; mcp->txbuf[1] = msg->len & 0xff; mcp->txbuf[2] = msg->len >> 8; mcp->txbuf[3] = (u8)(msg->addr << 1); memcpy(&mcp->txbuf[4], &msg->buf[idx], len); ret = mcp_send_data_req_status(mcp, mcp->txbuf, len + 4); if (ret) return ret; usleep_range(980, 1000); if (last_status) { ret = mcp_chk_last_cmd_status_free_bus(mcp); if (ret) return ret; } sent = sent + len; if (sent >= msg->len) break; idx = idx + len; if ((msg->len - sent) < 60) len = msg->len - sent; else len = 60; /* * Testing shows delay is needed between successive writes * otherwise next write fails on first-try from i2c core. * This value is obtained through automated stress testing. */ usleep_range(980, 1000); } while (len > 0); return ret; } /* * Device reads all data (0 - 65535 bytes) from i2c slave device and * stores it in device itself. This data is read back from device to * host in multiples of 60 bytes using input reports. */ static int mcp_i2c_smbus_read(struct mcp2221 *mcp, struct i2c_msg *msg, int type, u16 smbus_addr, u8 smbus_len, u8 *smbus_buf) { int ret; u16 total_len; int retries = 0; mcp->txbuf[0] = type; if (msg) { mcp->txbuf[1] = msg->len & 0xff; mcp->txbuf[2] = msg->len >> 8; mcp->txbuf[3] = (u8)(msg->addr << 1); total_len = msg->len; mcp->rxbuf = msg->buf; } else { mcp->txbuf[1] = smbus_len; mcp->txbuf[2] = 0; mcp->txbuf[3] = (u8)(smbus_addr << 1); total_len = smbus_len; mcp->rxbuf = smbus_buf; } ret = mcp_send_data_req_status(mcp, mcp->txbuf, 4); if (ret) return ret; mcp->rxbuf_idx = 0; do { /* Wait for the data to be read by the device */ usleep_range(980, 1000); memset(mcp->txbuf, 0, 4); mcp->txbuf[0] = MCP2221_I2C_GET_DATA; ret = mcp_send_data_req_status(mcp, mcp->txbuf, 1); if (ret) { if (retries < 5) { /* The data wasn't ready to read. * Wait a bit longer and try again. */ usleep_range(90, 100); retries++; } else { return ret; } } else { retries = 0; } } while (mcp->rxbuf_idx < total_len); usleep_range(980, 1000); ret = mcp_chk_last_cmd_status_free_bus(mcp); return ret; } static int mcp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg msgs[], int num) { int ret; struct mcp2221 *mcp = i2c_get_adapdata(adapter); hid_hw_power(mcp->hdev, PM_HINT_FULLON); mutex_lock(&mcp->lock); if (num == 1) { if (msgs->flags & I2C_M_RD) { ret = mcp_i2c_smbus_read(mcp, msgs, MCP2221_I2C_RD_DATA, 0, 0, NULL); } else { ret = mcp_i2c_write(mcp, msgs, MCP2221_I2C_WR_DATA, 1); } if (ret) goto exit; ret = num; } else if (num == 2) { /* Ex transaction; send reg address and read its contents */ if (msgs[0].addr == msgs[1].addr && !(msgs[0].flags & I2C_M_RD) && (msgs[1].flags & I2C_M_RD)) { ret = mcp_i2c_write(mcp, &msgs[0], MCP2221_I2C_WR_NO_STOP, 0); if (ret) goto exit; ret = mcp_i2c_smbus_read(mcp, &msgs[1], MCP2221_I2C_RD_RPT_START, 0, 0, NULL); if (ret) goto exit; ret = num; } else { dev_err(&adapter->dev, "unsupported multi-msg i2c transaction\n"); ret = -EOPNOTSUPP; } } else { dev_err(&adapter->dev, "unsupported multi-msg i2c transaction\n"); ret = -EOPNOTSUPP; } exit: hid_hw_power(mcp->hdev, PM_HINT_NORMAL); mutex_unlock(&mcp->lock); return ret; } static int mcp_smbus_write(struct mcp2221 *mcp, u16 addr, u8 command, u8 *buf, u8 len, int type, u8 last_status) { int data_len, ret; mcp->txbuf[0] = type; mcp->txbuf[1] = len + 1; /* 1 is due to command byte itself */ mcp->txbuf[2] = 0; mcp->txbuf[3] = (u8)(addr << 1); mcp->txbuf[4] = command; switch (len) { case 0: data_len = 5; break; case 1: mcp->txbuf[5] = buf[0]; data_len = 6; break; case 2: mcp->txbuf[5] = buf[0]; mcp->txbuf[6] = buf[1]; data_len = 7; break; default: if (len > I2C_SMBUS_BLOCK_MAX) return -EINVAL; memcpy(&mcp->txbuf[5], buf, len); data_len = len + 5; } ret = mcp_send_data_req_status(mcp, mcp->txbuf, data_len); if (ret) return ret; if (last_status) { usleep_range(980, 1000); ret = mcp_chk_last_cmd_status_free_bus(mcp); } return ret; } static int mcp_smbus_xfer(struct i2c_adapter *adapter, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data) { int ret; struct mcp2221 *mcp = i2c_get_adapdata(adapter); hid_hw_power(mcp->hdev, PM_HINT_FULLON); mutex_lock(&mcp->lock); switch (size) { case I2C_SMBUS_QUICK: if (read_write == I2C_SMBUS_READ) ret = mcp_i2c_smbus_read(mcp, NULL, MCP2221_I2C_RD_DATA, addr, 0, &data->byte); else ret = mcp_smbus_write(mcp, addr, command, NULL, 0, MCP2221_I2C_WR_DATA, 1); break; case I2C_SMBUS_BYTE: if (read_write == I2C_SMBUS_READ) ret = mcp_i2c_smbus_read(mcp, NULL, MCP2221_I2C_RD_DATA, addr, 1, &data->byte); else ret = mcp_smbus_write(mcp, addr, command, NULL, 0, MCP2221_I2C_WR_DATA, 1); break; case I2C_SMBUS_BYTE_DATA: if (read_write == I2C_SMBUS_READ) { ret = mcp_smbus_write(mcp, addr, command, NULL, 0, MCP2221_I2C_WR_NO_STOP, 0); if (ret) goto exit; ret = mcp_i2c_smbus_read(mcp, NULL, MCP2221_I2C_RD_RPT_START, addr, 1, &data->byte); } else { ret = mcp_smbus_write(mcp, addr, command, &data->byte, 1, MCP2221_I2C_WR_DATA, 1); } break; case I2C_SMBUS_WORD_DATA: if (read_write == I2C_SMBUS_READ) { ret = mcp_smbus_write(mcp, addr, command, NULL, 0, MCP2221_I2C_WR_NO_STOP, 0); if (ret) goto exit; ret = mcp_i2c_smbus_read(mcp, NULL, MCP2221_I2C_RD_RPT_START, addr, 2, (u8 *)&data->word); } else { ret = mcp_smbus_write(mcp, addr, command, (u8 *)&data->word, 2, MCP2221_I2C_WR_DATA, 1); } break; case I2C_SMBUS_BLOCK_DATA: if (read_write == I2C_SMBUS_READ) { ret = mcp_smbus_write(mcp, addr, command, NULL, 0, MCP2221_I2C_WR_NO_STOP, 1); if (ret) goto exit; mcp->rxbuf_idx = 0; mcp->rxbuf = data->block; mcp->txbuf[0] = MCP2221_I2C_GET_DATA; ret = mcp_send_data_req_status(mcp, mcp->txbuf, 1); if (ret) goto exit; } else { if (!data->block[0]) { ret = -EINVAL; goto exit; } ret = mcp_smbus_write(mcp, addr, command, data->block, data->block[0] + 1, MCP2221_I2C_WR_DATA, 1); } break; case I2C_SMBUS_I2C_BLOCK_DATA: if (read_write == I2C_SMBUS_READ) { ret = mcp_smbus_write(mcp, addr, command, NULL, 0, MCP2221_I2C_WR_NO_STOP, 1); if (ret) goto exit; mcp->rxbuf_idx = 0; mcp->rxbuf = data->block; mcp->txbuf[0] = MCP2221_I2C_GET_DATA; ret = mcp_send_data_req_status(mcp, mcp->txbuf, 1); if (ret) goto exit; } else { if (!data->block[0]) { ret = -EINVAL; goto exit; } ret = mcp_smbus_write(mcp, addr, command, &data->block[1], data->block[0], MCP2221_I2C_WR_DATA, 1); } break; case I2C_SMBUS_PROC_CALL: ret = mcp_smbus_write(mcp, addr, command, (u8 *)&data->word, 2, MCP2221_I2C_WR_NO_STOP, 0); if (ret) goto exit; ret = mcp_i2c_smbus_read(mcp, NULL, MCP2221_I2C_RD_RPT_START, addr, 2, (u8 *)&data->word); break; case I2C_SMBUS_BLOCK_PROC_CALL: ret = mcp_smbus_write(mcp, addr, command, data->block, data->block[0] + 1, MCP2221_I2C_WR_NO_STOP, 0); if (ret) goto exit; ret = mcp_i2c_smbus_read(mcp, NULL, MCP2221_I2C_RD_RPT_START, addr, I2C_SMBUS_BLOCK_MAX, data->block); break; default: dev_err(&mcp->adapter.dev, "unsupported smbus transaction size:%d\n", size); ret = -EOPNOTSUPP; } exit: hid_hw_power(mcp->hdev, PM_HINT_NORMAL); mutex_unlock(&mcp->lock); return ret; } static u32 mcp_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_READ_BLOCK_DATA | I2C_FUNC_SMBUS_BLOCK_PROC_CALL | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_PEC); } static const struct i2c_algorithm mcp_i2c_algo = { .master_xfer = mcp_i2c_xfer, .smbus_xfer = mcp_smbus_xfer, .functionality = mcp_i2c_func, }; #if IS_REACHABLE(CONFIG_GPIOLIB) static int mcp_gpio_get(struct gpio_chip *gc, unsigned int offset) { int ret; struct mcp2221 *mcp = gpiochip_get_data(gc); mcp->txbuf[0] = MCP2221_GPIO_GET; mcp->gp_idx = offsetof(struct mcp_get_gpio, gpio[offset]); mutex_lock(&mcp->lock); ret = mcp_send_data_req_status(mcp, mcp->txbuf, 1); mutex_unlock(&mcp->lock); return ret; } static void mcp_gpio_set(struct gpio_chip *gc, unsigned int offset, int value) { struct mcp2221 *mcp = gpiochip_get_data(gc); memset(mcp->txbuf, 0, 18); mcp->txbuf[0] = MCP2221_GPIO_SET; mcp->gp_idx = offsetof(struct mcp_set_gpio, gpio[offset].value); mcp->txbuf[mcp->gp_idx - 1] = 1; mcp->txbuf[mcp->gp_idx] = !!value; mutex_lock(&mcp->lock); mcp_send_data_req_status(mcp, mcp->txbuf, 18); mutex_unlock(&mcp->lock); } static int mcp_gpio_dir_set(struct mcp2221 *mcp, unsigned int offset, u8 val) { memset(mcp->txbuf, 0, 18); mcp->txbuf[0] = MCP2221_GPIO_SET; mcp->gp_idx = offsetof(struct mcp_set_gpio, gpio[offset].direction); mcp->txbuf[mcp->gp_idx - 1] = 1; mcp->txbuf[mcp->gp_idx] = val; return mcp_send_data_req_status(mcp, mcp->txbuf, 18); } static int mcp_gpio_direction_input(struct gpio_chip *gc, unsigned int offset) { int ret; struct mcp2221 *mcp = gpiochip_get_data(gc); mutex_lock(&mcp->lock); ret = mcp_gpio_dir_set(mcp, offset, MCP2221_DIR_IN); mutex_unlock(&mcp->lock); return ret; } static int mcp_gpio_direction_output(struct gpio_chip *gc, unsigned int offset, int value) { int ret; struct mcp2221 *mcp = gpiochip_get_data(gc); mutex_lock(&mcp->lock); ret = mcp_gpio_dir_set(mcp, offset, MCP2221_DIR_OUT); mutex_unlock(&mcp->lock); /* Can't configure as output, bailout early */ if (ret) return ret; mcp_gpio_set(gc, offset, value); return 0; } static int mcp_gpio_get_direction(struct gpio_chip *gc, unsigned int offset) { int ret; struct mcp2221 *mcp = gpiochip_get_data(gc); mcp->txbuf[0] = MCP2221_GPIO_GET; mcp->gp_idx = offsetof(struct mcp_get_gpio, gpio[offset]); mutex_lock(&mcp->lock); ret = mcp_send_data_req_status(mcp, mcp->txbuf, 1); mutex_unlock(&mcp->lock); if (ret) return ret; if (mcp->gpio_dir == MCP2221_DIR_IN) return GPIO_LINE_DIRECTION_IN; return GPIO_LINE_DIRECTION_OUT; } #endif /* Gives current state of i2c engine inside mcp2221 */ static int mcp_get_i2c_eng_state(struct mcp2221 *mcp, u8 *data, u8 idx) { int ret; switch (data[idx]) { case MCP2221_I2C_WRADDRL_NACK: case MCP2221_I2C_WRADDRL_SEND: ret = -ENXIO; break; case MCP2221_I2C_START_TOUT: case MCP2221_I2C_STOP_TOUT: case MCP2221_I2C_WRADDRL_TOUT: case MCP2221_I2C_WRDATA_TOUT: ret = -ETIMEDOUT; break; case MCP2221_I2C_ENG_BUSY: ret = -EAGAIN; break; case MCP2221_SUCCESS: ret = 0x00; break; default: ret = -EIO; } return ret; } /* * MCP2221 uses interrupt endpoint for input reports. This function * is called by HID layer when it receives i/p report from mcp2221, * which is actually a response to the previously sent command. * * MCP2221A firmware specific return codes are parsed and 0 or * appropriate negative error code is returned. Delayed response * results in timeout error and stray reponses results in -EIO. */ static int mcp2221_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size) { u8 *buf; struct mcp2221 *mcp = hid_get_drvdata(hdev); switch (data[0]) { case MCP2221_I2C_WR_DATA: case MCP2221_I2C_WR_NO_STOP: case MCP2221_I2C_RD_DATA: case MCP2221_I2C_RD_RPT_START: switch (data[1]) { case MCP2221_SUCCESS: mcp->status = 0; break; default: mcp->status = mcp_get_i2c_eng_state(mcp, data, 2); } complete(&mcp->wait_in_report); break; case MCP2221_I2C_PARAM_OR_STATUS: switch (data[1]) { case MCP2221_SUCCESS: if ((mcp->txbuf[3] == MCP2221_I2C_SET_SPEED) && (data[3] != MCP2221_I2C_SET_SPEED)) { mcp->status = -EAGAIN; break; } if (data[20] & MCP2221_I2C_MASK_ADDR_NACK) { mcp->status = -ENXIO; break; } mcp->status = mcp_get_i2c_eng_state(mcp, data, 8); #if IS_REACHABLE(CONFIG_IIO) memcpy(&mcp->adc_values, &data[50], sizeof(mcp->adc_values)); #endif break; default: mcp->status = -EIO; } complete(&mcp->wait_in_report); break; case MCP2221_I2C_GET_DATA: switch (data[1]) { case MCP2221_SUCCESS: if (data[2] == MCP2221_I2C_ADDR_NACK) { mcp->status = -ENXIO; break; } if (!mcp_get_i2c_eng_state(mcp, data, 2) && (data[3] == 0)) { mcp->status = 0; break; } if (data[3] == 127) { mcp->status = -EIO; break; } if (data[2] == MCP2221_I2C_READ_COMPL || data[2] == MCP2221_I2C_READ_PARTIAL) { buf = mcp->rxbuf; memcpy(&buf[mcp->rxbuf_idx], &data[4], data[3]); mcp->rxbuf_idx = mcp->rxbuf_idx + data[3]; mcp->status = 0; break; } mcp->status = -EIO; break; default: mcp->status = -EIO; } complete(&mcp->wait_in_report); break; case MCP2221_GPIO_GET: switch (data[1]) { case MCP2221_SUCCESS: if ((data[mcp->gp_idx] == MCP2221_ALT_F_NOT_GPIOV) || (data[mcp->gp_idx + 1] == MCP2221_ALT_F_NOT_GPIOD)) { mcp->status = -ENOENT; } else { mcp->status = !!data[mcp->gp_idx]; mcp->gpio_dir = data[mcp->gp_idx + 1]; } break; default: mcp->status = -EAGAIN; } complete(&mcp->wait_in_report); break; case MCP2221_GPIO_SET: switch (data[1]) { case MCP2221_SUCCESS: if ((data[mcp->gp_idx] == MCP2221_ALT_F_NOT_GPIOV) || (data[mcp->gp_idx - 1] == MCP2221_ALT_F_NOT_GPIOV)) { mcp->status = -ENOENT; } else { mcp->status = 0; } break; default: mcp->status = -EAGAIN; } complete(&mcp->wait_in_report); break; case MCP2221_SET_SRAM_SETTINGS: switch (data[1]) { case MCP2221_SUCCESS: mcp->status = 0; break; default: mcp->status = -EAGAIN; } complete(&mcp->wait_in_report); break; case MCP2221_GET_SRAM_SETTINGS: switch (data[1]) { case MCP2221_SUCCESS: memcpy(&mcp->mode, &data[22], 4); #if IS_REACHABLE(CONFIG_IIO) mcp->dac_value = data[6] & GENMASK(4, 0); #endif mcp->status = 0; break; default: mcp->status = -EAGAIN; } complete(&mcp->wait_in_report); break; case MCP2221_READ_FLASH_DATA: switch (data[1]) { case MCP2221_SUCCESS: mcp->status = 0; /* Only handles CHIP SETTINGS subpage currently */ if (mcp->txbuf[1] != 0) { mcp->status = -EIO; break; } #if IS_REACHABLE(CONFIG_IIO) { u8 tmp; /* DAC scale value */ tmp = FIELD_GET(GENMASK(7, 6), data[6]); if ((data[6] & BIT(5)) && tmp) mcp->dac_scale = tmp + 4; else mcp->dac_scale = 5; /* ADC scale value */ tmp = FIELD_GET(GENMASK(4, 3), data[7]); if ((data[7] & BIT(2)) && tmp) mcp->adc_scale = tmp - 1; else mcp->adc_scale = 0; } #endif break; default: mcp->status = -EAGAIN; } complete(&mcp->wait_in_report); break; default: mcp->status = -EIO; complete(&mcp->wait_in_report); } return 1; } /* Device resource managed function for HID unregistration */ static void mcp2221_hid_unregister(void *ptr) { struct hid_device *hdev = ptr; hid_hw_close(hdev); hid_hw_stop(hdev); } /* This is needed to be sure hid_hw_stop() isn't called twice by the subsystem */ static void mcp2221_remove(struct hid_device *hdev) { #if IS_REACHABLE(CONFIG_IIO) struct mcp2221 *mcp = hid_get_drvdata(hdev); cancel_delayed_work_sync(&mcp->init_work); #endif } #if IS_REACHABLE(CONFIG_IIO) static int mcp2221_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *channel, int *val, int *val2, long mask) { struct mcp2221_iio *priv = iio_priv(indio_dev); struct mcp2221 *mcp = priv->mcp; int ret; if (mask == IIO_CHAN_INFO_SCALE) { if (channel->output) *val = 1 << mcp->dac_scale; else *val = 1 << mcp->adc_scale; return IIO_VAL_INT; } mutex_lock(&mcp->lock); if (channel->output) { *val = mcp->dac_value; ret = IIO_VAL_INT; } else { /* Read ADC values */ ret = mcp_chk_last_cmd_status(mcp); if (!ret) { *val = le16_to_cpu((__force __le16) mcp->adc_values[channel->address]); if (*val >= BIT(10)) ret = -EINVAL; else ret = IIO_VAL_INT; } } mutex_unlock(&mcp->lock); return ret; } static int mcp2221_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int val, int val2, long mask) { struct mcp2221_iio *priv = iio_priv(indio_dev); struct mcp2221 *mcp = priv->mcp; int ret; if (val < 0 || val >= BIT(5)) return -EINVAL; mutex_lock(&mcp->lock); memset(mcp->txbuf, 0, 12); mcp->txbuf[0] = MCP2221_SET_SRAM_SETTINGS; mcp->txbuf[4] = BIT(7) | val; ret = mcp_send_data_req_status(mcp, mcp->txbuf, 12); if (!ret) mcp->dac_value = val; mutex_unlock(&mcp->lock); return ret; } static const struct iio_info mcp2221_info = { .read_raw = &mcp2221_read_raw, .write_raw = &mcp2221_write_raw, }; static int mcp_iio_channels(struct mcp2221 *mcp) { int idx, cnt = 0; bool dac_created = false; /* GP0 doesn't have ADC/DAC alternative function */ for (idx = 1; idx < MCP_NGPIO; idx++) { struct iio_chan_spec *chan = &mcp->iio_channels[cnt]; switch (mcp->mode[idx]) { case 2: chan->address = idx - 1; chan->channel = cnt++; break; case 3: /* GP1 doesn't have DAC alternative function */ if (idx == 1 || dac_created) continue; /* DAC1 and DAC2 outputs are connected to the same DAC */ dac_created = true; chan->output = 1; cnt++; break; default: continue; } chan->type = IIO_VOLTAGE; chan->indexed = 1; chan->info_mask_separate = BIT(IIO_CHAN_INFO_RAW); chan->info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE); chan->scan_index = -1; } return cnt; } static void mcp_init_work(struct work_struct *work) { struct iio_dev *indio_dev; struct mcp2221 *mcp = container_of(work, struct mcp2221, init_work.work); struct mcp2221_iio *data; static int retries = 5; int ret, num_channels; hid_hw_power(mcp->hdev, PM_HINT_FULLON); mutex_lock(&mcp->lock); mcp->txbuf[0] = MCP2221_GET_SRAM_SETTINGS; ret = mcp_send_data_req_status(mcp, mcp->txbuf, 1); if (ret == -EAGAIN) goto reschedule_task; num_channels = mcp_iio_channels(mcp); if (!num_channels) goto unlock; mcp->txbuf[0] = MCP2221_READ_FLASH_DATA; mcp->txbuf[1] = 0; ret = mcp_send_data_req_status(mcp, mcp->txbuf, 2); if (ret == -EAGAIN) goto reschedule_task; indio_dev = devm_iio_device_alloc(&mcp->hdev->dev, sizeof(*data)); if (!indio_dev) goto unlock; data = iio_priv(indio_dev); data->mcp = mcp; indio_dev->name = "mcp2221"; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->info = &mcp2221_info; indio_dev->channels = mcp->iio_channels; indio_dev->num_channels = num_channels; devm_iio_device_register(&mcp->hdev->dev, indio_dev); unlock: mutex_unlock(&mcp->lock); hid_hw_power(mcp->hdev, PM_HINT_NORMAL); return; reschedule_task: mutex_unlock(&mcp->lock); hid_hw_power(mcp->hdev, PM_HINT_NORMAL); if (!retries--) return; /* Device is not ready to read SRAM or FLASH data, try again */ schedule_delayed_work(&mcp->init_work, msecs_to_jiffies(100)); } #endif static int mcp2221_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret; struct mcp2221 *mcp; mcp = devm_kzalloc(&hdev->dev, sizeof(*mcp), GFP_KERNEL); if (!mcp) return -ENOMEM; ret = hid_parse(hdev); if (ret) { hid_err(hdev, "can't parse reports\n"); return ret; } /* * This driver uses the .raw_event callback and therefore does not need any * HID_CONNECT_xxx flags. */ ret = hid_hw_start(hdev, 0); if (ret) { hid_err(hdev, "can't start hardware\n"); return ret; } hid_info(hdev, "USB HID v%x.%02x Device [%s] on %s\n", hdev->version >> 8, hdev->version & 0xff, hdev->name, hdev->phys); ret = hid_hw_open(hdev); if (ret) { hid_err(hdev, "can't open device\n"); hid_hw_stop(hdev); return ret; } mutex_init(&mcp->lock); init_completion(&mcp->wait_in_report); hid_set_drvdata(hdev, mcp); mcp->hdev = hdev; ret = devm_add_action_or_reset(&hdev->dev, mcp2221_hid_unregister, hdev); if (ret) return ret; hid_device_io_start(hdev); /* Set I2C bus clock diviser */ if (i2c_clk_freq > 400) i2c_clk_freq = 400; if (i2c_clk_freq < 50) i2c_clk_freq = 50; mcp->cur_i2c_clk_div = (12000000 / (i2c_clk_freq * 1000)) - 3; ret = mcp_set_i2c_speed(mcp); if (ret) { hid_err(hdev, "can't set i2c speed: %d\n", ret); return ret; } mcp->adapter.owner = THIS_MODULE; mcp->adapter.class = I2C_CLASS_HWMON; mcp->adapter.algo = &mcp_i2c_algo; mcp->adapter.retries = 1; mcp->adapter.dev.parent = &hdev->dev; ACPI_COMPANION_SET(&mcp->adapter.dev, ACPI_COMPANION(hdev->dev.parent)); snprintf(mcp->adapter.name, sizeof(mcp->adapter.name), "MCP2221 usb-i2c bridge"); i2c_set_adapdata(&mcp->adapter, mcp); ret = devm_i2c_add_adapter(&hdev->dev, &mcp->adapter); if (ret) { hid_err(hdev, "can't add usb-i2c adapter: %d\n", ret); return ret; } #if IS_REACHABLE(CONFIG_GPIOLIB) /* Setup GPIO chip */ mcp->gc = devm_kzalloc(&hdev->dev, sizeof(*mcp->gc), GFP_KERNEL); if (!mcp->gc) return -ENOMEM; mcp->gc->label = "mcp2221_gpio"; mcp->gc->direction_input = mcp_gpio_direction_input; mcp->gc->direction_output = mcp_gpio_direction_output; mcp->gc->get_direction = mcp_gpio_get_direction; mcp->gc->set = mcp_gpio_set; mcp->gc->get = mcp_gpio_get; mcp->gc->ngpio = MCP_NGPIO; mcp->gc->base = -1; mcp->gc->can_sleep = 1; mcp->gc->parent = &hdev->dev; ret = devm_gpiochip_add_data(&hdev->dev, mcp->gc, mcp); if (ret) return ret; #endif #if IS_REACHABLE(CONFIG_IIO) INIT_DELAYED_WORK(&mcp->init_work, mcp_init_work); schedule_delayed_work(&mcp->init_work, msecs_to_jiffies(100)); #endif return 0; } static const struct hid_device_id mcp2221_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_MCP2221) }, { } }; MODULE_DEVICE_TABLE(hid, mcp2221_devices); static struct hid_driver mcp2221_driver = { .name = "mcp2221", .id_table = mcp2221_devices, .probe = mcp2221_probe, .remove = mcp2221_remove, .raw_event = mcp2221_raw_event, }; /* Register with HID core */ module_hid_driver(mcp2221_driver); MODULE_AUTHOR("Rishi Gupta <gupt21@gmail.com>"); MODULE_DESCRIPTION("MCP2221 Microchip HID USB to I2C master bridge"); MODULE_LICENSE("GPL v2");
31 1211 15 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_BITMAP_H #define __LINUX_BITMAP_H #ifndef __ASSEMBLY__ #include <linux/align.h> #include <linux/bitops.h> #include <linux/cleanup.h> #include <linux/errno.h> #include <linux/find.h> #include <linux/limits.h> #include <linux/string.h> #include <linux/types.h> #include <linux/bitmap-str.h> struct device; /* * bitmaps provide bit arrays that consume one or more unsigned * longs. The bitmap interface and available operations are listed * here, in bitmap.h * * Function implementations generic to all architectures are in * lib/bitmap.c. Functions implementations that are architecture * specific are in various arch/<arch>/include/asm/bitops.h headers * and other arch/<arch> specific files. * * See lib/bitmap.c for more details. */ /** * DOC: bitmap overview * * The available bitmap operations and their rough meaning in the * case that the bitmap is a single unsigned long are thus: * * The generated code is more efficient when nbits is known at * compile-time and at most BITS_PER_LONG. * * :: * * bitmap_zero(dst, nbits) *dst = 0UL * bitmap_fill(dst, nbits) *dst = ~0UL * bitmap_copy(dst, src, nbits) *dst = *src * bitmap_and(dst, src1, src2, nbits) *dst = *src1 & *src2 * bitmap_or(dst, src1, src2, nbits) *dst = *src1 | *src2 * bitmap_xor(dst, src1, src2, nbits) *dst = *src1 ^ *src2 * bitmap_andnot(dst, src1, src2, nbits) *dst = *src1 & ~(*src2) * bitmap_complement(dst, src, nbits) *dst = ~(*src) * bitmap_equal(src1, src2, nbits) Are *src1 and *src2 equal? * bitmap_intersects(src1, src2, nbits) Do *src1 and *src2 overlap? * bitmap_subset(src1, src2, nbits) Is *src1 a subset of *src2? * bitmap_empty(src, nbits) Are all bits zero in *src? * bitmap_full(src, nbits) Are all bits set in *src? * bitmap_weight(src, nbits) Hamming Weight: number set bits * bitmap_weight_and(src1, src2, nbits) Hamming Weight of and'ed bitmap * bitmap_weight_andnot(src1, src2, nbits) Hamming Weight of andnot'ed bitmap * bitmap_set(dst, pos, nbits) Set specified bit area * bitmap_clear(dst, pos, nbits) Clear specified bit area * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area * bitmap_find_next_zero_area_off(buf, len, pos, n, mask, mask_off) as above * bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n * bitmap_cut(dst, src, first, n, nbits) Cut n bits from first, copy rest * bitmap_replace(dst, old, new, mask, nbits) *dst = (*old & ~(*mask)) | (*new & *mask) * bitmap_scatter(dst, src, mask, nbits) *dst = map(dense, sparse)(src) * bitmap_gather(dst, src, mask, nbits) *dst = map(sparse, dense)(src) * bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src) * bitmap_bitremap(oldbit, old, new, nbits) newbit = map(old, new)(oldbit) * bitmap_onto(dst, orig, relmap, nbits) *dst = orig relative to relmap * bitmap_fold(dst, orig, sz, nbits) dst bits = orig bits mod sz * bitmap_parse(buf, buflen, dst, nbits) Parse bitmap dst from kernel buf * bitmap_parse_user(ubuf, ulen, dst, nbits) Parse bitmap dst from user buf * bitmap_parselist(buf, dst, nbits) Parse bitmap dst from kernel buf * bitmap_parselist_user(buf, dst, nbits) Parse bitmap dst from user buf * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region * bitmap_release_region(bitmap, pos, order) Free specified bit region * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region * bitmap_from_arr32(dst, buf, nbits) Copy nbits from u32[] buf to dst * bitmap_from_arr64(dst, buf, nbits) Copy nbits from u64[] buf to dst * bitmap_to_arr32(buf, src, nbits) Copy nbits from buf to u32[] dst * bitmap_to_arr64(buf, src, nbits) Copy nbits from buf to u64[] dst * bitmap_get_value8(map, start) Get 8bit value from map at start * bitmap_set_value8(map, value, start) Set 8bit value to map at start * bitmap_read(map, start, nbits) Read an nbits-sized value from * map at start * bitmap_write(map, value, start, nbits) Write an nbits-sized value to * map at start * * Note, bitmap_zero() and bitmap_fill() operate over the region of * unsigned longs, that is, bits behind bitmap till the unsigned long * boundary will be zeroed or filled as well. Consider to use * bitmap_clear() or bitmap_set() to make explicit zeroing or filling * respectively. */ /** * DOC: bitmap bitops * * Also the following operations in asm/bitops.h apply to bitmaps.:: * * set_bit(bit, addr) *addr |= bit * clear_bit(bit, addr) *addr &= ~bit * change_bit(bit, addr) *addr ^= bit * test_bit(bit, addr) Is bit set in *addr? * test_and_set_bit(bit, addr) Set bit and return old value * test_and_clear_bit(bit, addr) Clear bit and return old value * test_and_change_bit(bit, addr) Change bit and return old value * find_first_zero_bit(addr, nbits) Position first zero bit in *addr * find_first_bit(addr, nbits) Position first set bit in *addr * find_next_zero_bit(addr, nbits, bit) * Position next zero bit in *addr >= bit * find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit * find_next_and_bit(addr1, addr2, nbits, bit) * Same as find_next_bit, but in * (*addr1 & *addr2) * */ /** * DOC: declare bitmap * The DECLARE_BITMAP(name,bits) macro, in linux/types.h, can be used * to declare an array named 'name' of just enough unsigned longs to * contain all bit positions from 0 to 'bits' - 1. */ /* * Allocation and deallocation of bitmap. * Provided in lib/bitmap.c to avoid circular dependency. */ unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags); unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags); unsigned long *bitmap_alloc_node(unsigned int nbits, gfp_t flags, int node); unsigned long *bitmap_zalloc_node(unsigned int nbits, gfp_t flags, int node); void bitmap_free(const unsigned long *bitmap); DEFINE_FREE(bitmap, unsigned long *, if (_T) bitmap_free(_T)) /* Managed variants of the above. */ unsigned long *devm_bitmap_alloc(struct device *dev, unsigned int nbits, gfp_t flags); unsigned long *devm_bitmap_zalloc(struct device *dev, unsigned int nbits, gfp_t flags); /* * lib/bitmap.c provides these functions: */ bool __bitmap_equal(const unsigned long *bitmap1, const unsigned long *bitmap2, unsigned int nbits); bool __pure __bitmap_or_equal(const unsigned long *src1, const unsigned long *src2, const unsigned long *src3, unsigned int nbits); void __bitmap_complement(unsigned long *dst, const unsigned long *src, unsigned int nbits); void __bitmap_shift_right(unsigned long *dst, const unsigned long *src, unsigned int shift, unsigned int nbits); void __bitmap_shift_left(unsigned long *dst, const unsigned long *src, unsigned int shift, unsigned int nbits); void bitmap_cut(unsigned long *dst, const unsigned long *src, unsigned int first, unsigned int cut, unsigned int nbits); bool __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, const unsigned long *bitmap2, unsigned int nbits); void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, const unsigned long *bitmap2, unsigned int nbits); void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, const unsigned long *bitmap2, unsigned int nbits); bool __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, const unsigned long *bitmap2, unsigned int nbits); void __bitmap_replace(unsigned long *dst, const unsigned long *old, const unsigned long *new, const unsigned long *mask, unsigned int nbits); bool __bitmap_intersects(const unsigned long *bitmap1, const unsigned long *bitmap2, unsigned int nbits); bool __bitmap_subset(const unsigned long *bitmap1, const unsigned long *bitmap2, unsigned int nbits); unsigned int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits); unsigned int __bitmap_weight_and(const unsigned long *bitmap1, const unsigned long *bitmap2, unsigned int nbits); unsigned int __bitmap_weight_andnot(const unsigned long *bitmap1, const unsigned long *bitmap2, unsigned int nbits); void __bitmap_set(unsigned long *map, unsigned int start, int len); void __bitmap_clear(unsigned long *map, unsigned int start, int len); unsigned long bitmap_find_next_zero_area_off(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, unsigned long align_mask, unsigned long align_offset); /** * bitmap_find_next_zero_area - find a contiguous aligned zero area * @map: The address to base the search on * @size: The bitmap size in bits * @start: The bitnumber to start searching at * @nr: The number of zeroed bits we're looking for * @align_mask: Alignment mask for zero area * * The @align_mask should be one less than a power of 2; the effect is that * the bit offset of all zero areas this function finds is multiples of that * power of 2. A @align_mask of 0 means no alignment is required. */ static __always_inline unsigned long bitmap_find_next_zero_area(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, unsigned long align_mask) { return bitmap_find_next_zero_area_off(map, size, start, nr, align_mask, 0); } void bitmap_remap(unsigned long *dst, const unsigned long *src, const unsigned long *old, const unsigned long *new, unsigned int nbits); int bitmap_bitremap(int oldbit, const unsigned long *old, const unsigned long *new, int bits); void bitmap_onto(unsigned long *dst, const unsigned long *orig, const unsigned long *relmap, unsigned int bits); void bitmap_fold(unsigned long *dst, const unsigned long *orig, unsigned int sz, unsigned int nbits); #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1))) #define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1))) #define bitmap_size(nbits) (ALIGN(nbits, BITS_PER_LONG) / BITS_PER_BYTE) static __always_inline void bitmap_zero(unsigned long *dst, unsigned int nbits) { unsigned int len = bitmap_size(nbits); if (small_const_nbits(nbits)) *dst = 0; else memset(dst, 0, len); } static __always_inline void bitmap_fill(unsigned long *dst, unsigned int nbits) { unsigned int len = bitmap_size(nbits); if (small_const_nbits(nbits)) *dst = ~0UL; else memset(dst, 0xff, len); } static __always_inline void bitmap_copy(unsigned long *dst, const unsigned long *src, unsigned int nbits) { unsigned int len = bitmap_size(nbits); if (small_const_nbits(nbits)) *dst = *src; else memcpy(dst, src, len); } /* * Copy bitmap and clear tail bits in last word. */ static __always_inline void bitmap_copy_clear_tail(unsigned long *dst, const unsigned long *src, unsigned int nbits) { bitmap_copy(dst, src, nbits); if (nbits % BITS_PER_LONG) dst[nbits / BITS_PER_LONG] &= BITMAP_LAST_WORD_MASK(nbits); } static inline void bitmap_copy_and_extend(unsigned long *to, const unsigned long *from, unsigned int count, unsigned int size) { unsigned int copy = BITS_TO_LONGS(count); memcpy(to, from, copy * sizeof(long)); if (count % BITS_PER_LONG) to[copy - 1] &= BITMAP_LAST_WORD_MASK(count); memset(to + copy, 0, bitmap_size(size) - copy * sizeof(long)); } /* * On 32-bit systems bitmaps are represented as u32 arrays internally. On LE64 * machines the order of hi and lo parts of numbers match the bitmap structure. * In both cases conversion is not needed when copying data from/to arrays of * u32. But in LE64 case, typecast in bitmap_copy_clear_tail() may lead * to out-of-bound access. To avoid that, both LE and BE variants of 64-bit * architectures are not using bitmap_copy_clear_tail(). */ #if BITS_PER_LONG == 64 void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf, unsigned int nbits); void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap, unsigned int nbits); #else #define bitmap_from_arr32(bitmap, buf, nbits) \ bitmap_copy_clear_tail((unsigned long *) (bitmap), \ (const unsigned long *) (buf), (nbits)) #define bitmap_to_arr32(buf, bitmap, nbits) \ bitmap_copy_clear_tail((unsigned long *) (buf), \ (const unsigned long *) (bitmap), (nbits)) #endif /* * On 64-bit systems bitmaps are represented as u64 arrays internally. So, * the conversion is not needed when copying data from/to arrays of u64. */ #if BITS_PER_LONG == 32 void bitmap_from_arr64(unsigned long *bitmap, const u64 *buf, unsigned int nbits); void bitmap_to_arr64(u64 *buf, const unsigned long *bitmap, unsigned int nbits); #else #define bitmap_from_arr64(bitmap, buf, nbits) \ bitmap_copy_clear_tail((unsigned long *)(bitmap), (const unsigned long *)(buf), (nbits)) #define bitmap_to_arr64(buf, bitmap, nbits) \ bitmap_copy_clear_tail((unsigned long *)(buf), (const unsigned long *)(bitmap), (nbits)) #endif static __always_inline bool bitmap_and(unsigned long *dst, const unsigned long *src1, const unsigned long *src2, unsigned int nbits) { if (small_const_nbits(nbits)) return (*dst = *src1 & *src2 & BITMAP_LAST_WORD_MASK(nbits)) != 0; return __bitmap_and(dst, src1, src2, nbits); } static __always_inline void bitmap_or(unsigned long *dst, const unsigned long *src1, const unsigned long *src2, unsigned int nbits) { if (small_const_nbits(nbits)) *dst = *src1 | *src2; else __bitmap_or(dst, src1, src2, nbits); } static __always_inline void bitmap_xor(unsigned long *dst, const unsigned long *src1, const unsigned long *src2, unsigned int nbits) { if (small_const_nbits(nbits)) *dst = *src1 ^ *src2; else __bitmap_xor(dst, src1, src2, nbits); } static __always_inline bool bitmap_andnot(unsigned long *dst, const unsigned long *src1, const unsigned long *src2, unsigned int nbits) { if (small_const_nbits(nbits)) return (*dst = *src1 & ~(*src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0; return __bitmap_andnot(dst, src1, src2, nbits); } static __always_inline void bitmap_complement(unsigned long *dst, const unsigned long *src, unsigned int nbits) { if (small_const_nbits(nbits)) *dst = ~(*src); else __bitmap_complement(dst, src, nbits); } #ifdef __LITTLE_ENDIAN #define BITMAP_MEM_ALIGNMENT 8 #else #define BITMAP_MEM_ALIGNMENT (8 * sizeof(unsigned long)) #endif #define BITMAP_MEM_MASK (BITMAP_MEM_ALIGNMENT - 1) static __always_inline bool bitmap_equal(const unsigned long *src1, const unsigned long *src2, unsigned int nbits) { if (small_const_nbits(nbits)) return !((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits)); if (__builtin_constant_p(nbits & BITMAP_MEM_MASK) && IS_ALIGNED(nbits, BITMAP_MEM_ALIGNMENT)) return !memcmp(src1, src2, nbits / 8); return __bitmap_equal(src1, src2, nbits); } /** * bitmap_or_equal - Check whether the or of two bitmaps is equal to a third * @src1: Pointer to bitmap 1 * @src2: Pointer to bitmap 2 will be or'ed with bitmap 1 * @src3: Pointer to bitmap 3. Compare to the result of *@src1 | *@src2 * @nbits: number of bits in each of these bitmaps * * Returns: True if (*@src1 | *@src2) == *@src3, false otherwise */ static __always_inline bool bitmap_or_equal(const unsigned long *src1, const unsigned long *src2, const unsigned long *src3, unsigned int nbits) { if (!small_const_nbits(nbits)) return __bitmap_or_equal(src1, src2, src3, nbits); return !(((*src1 | *src2) ^ *src3) & BITMAP_LAST_WORD_MASK(nbits)); } static __always_inline bool bitmap_intersects(const unsigned long *src1, const unsigned long *src2, unsigned int nbits) { if (small_const_nbits(nbits)) return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0; else return __bitmap_intersects(src1, src2, nbits); } static __always_inline bool bitmap_subset(const unsigned long *src1, const unsigned long *src2, unsigned int nbits) { if (small_const_nbits(nbits)) return ! ((*src1 & ~(*src2)) & BITMAP_LAST_WORD_MASK(nbits)); else return __bitmap_subset(src1, src2, nbits); } static __always_inline bool bitmap_empty(const unsigned long *src, unsigned nbits) { if (small_const_nbits(nbits)) return ! (*src & BITMAP_LAST_WORD_MASK(nbits)); return find_first_bit(src, nbits) == nbits; } static __always_inline bool bitmap_full(const unsigned long *src, unsigned int nbits) { if (small_const_nbits(nbits)) return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits)); return find_first_zero_bit(src, nbits) == nbits; } static __always_inline unsigned int bitmap_weight(const unsigned long *src, unsigned int nbits) { if (small_const_nbits(nbits)) return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits)); return __bitmap_weight(src, nbits); } static __always_inline unsigned long bitmap_weight_and(const unsigned long *src1, const unsigned long *src2, unsigned int nbits) { if (small_const_nbits(nbits)) return hweight_long(*src1 & *src2 & BITMAP_LAST_WORD_MASK(nbits)); return __bitmap_weight_and(src1, src2, nbits); } static __always_inline unsigned long bitmap_weight_andnot(const unsigned long *src1, const unsigned long *src2, unsigned int nbits) { if (small_const_nbits(nbits)) return hweight_long(*src1 & ~(*src2) & BITMAP_LAST_WORD_MASK(nbits)); return __bitmap_weight_andnot(src1, src2, nbits); } static __always_inline void bitmap_set(unsigned long *map, unsigned int start, unsigned int nbits) { if (__builtin_constant_p(nbits) && nbits == 1) __set_bit(start, map); else if (small_const_nbits(start + nbits)) *map |= GENMASK(start + nbits - 1, start); else if (__builtin_constant_p(start & BITMAP_MEM_MASK) && IS_ALIGNED(start, BITMAP_MEM_ALIGNMENT) && __builtin_constant_p(nbits & BITMAP_MEM_MASK) && IS_ALIGNED(nbits, BITMAP_MEM_ALIGNMENT)) memset((char *)map + start / 8, 0xff, nbits / 8); else __bitmap_set(map, start, nbits); } static __always_inline void bitmap_clear(unsigned long *map, unsigned int start, unsigned int nbits) { if (__builtin_constant_p(nbits) && nbits == 1) __clear_bit(start, map); else if (small_const_nbits(start + nbits)) *map &= ~GENMASK(start + nbits - 1, start); else if (__builtin_constant_p(start & BITMAP_MEM_MASK) && IS_ALIGNED(start, BITMAP_MEM_ALIGNMENT) && __builtin_constant_p(nbits & BITMAP_MEM_MASK) && IS_ALIGNED(nbits, BITMAP_MEM_ALIGNMENT)) memset((char *)map + start / 8, 0, nbits / 8); else __bitmap_clear(map, start, nbits); } static __always_inline void bitmap_shift_right(unsigned long *dst, const unsigned long *src, unsigned int shift, unsigned int nbits) { if (small_const_nbits(nbits)) *dst = (*src & BITMAP_LAST_WORD_MASK(nbits)) >> shift; else __bitmap_shift_right(dst, src, shift, nbits); } static __always_inline void bitmap_shift_left(unsigned long *dst, const unsigned long *src, unsigned int shift, unsigned int nbits) { if (small_const_nbits(nbits)) *dst = (*src << shift) & BITMAP_LAST_WORD_MASK(nbits); else __bitmap_shift_left(dst, src, shift, nbits); } static __always_inline void bitmap_replace(unsigned long *dst, const unsigned long *old, const unsigned long *new, const unsigned long *mask, unsigned int nbits) { if (small_const_nbits(nbits)) *dst = (*old & ~(*mask)) | (*new & *mask); else __bitmap_replace(dst, old, new, mask, nbits); } /** * bitmap_scatter - Scatter a bitmap according to the given mask * @dst: scattered bitmap * @src: gathered bitmap * @mask: mask representing bits to assign to in the scattered bitmap * @nbits: number of bits in each of these bitmaps * * Scatters bitmap with sequential bits according to the given @mask. * * Example: * If @src bitmap = 0x005a, with @mask = 0x1313, @dst will be 0x0302. * * Or in binary form * @src @mask @dst * 0000000001011010 0001001100010011 0000001100000010 * * (Bits 0, 1, 2, 3, 4, 5 are copied to the bits 0, 1, 4, 8, 9, 12) * * A more 'visual' description of the operation:: * * src: 0000000001011010 * |||||| * +------+||||| * | +----+|||| * | |+----+||| * | || +-+|| * | || | || * mask: ...v..vv...v..vv * ...0..11...0..10 * dst: 0000001100000010 * * A relationship exists between bitmap_scatter() and bitmap_gather(). * bitmap_gather() can be seen as the 'reverse' bitmap_scatter() operation. * See bitmap_scatter() for details related to this relationship. */ static __always_inline void bitmap_scatter(unsigned long *dst, const unsigned long *src, const unsigned long *mask, unsigned int nbits) { unsigned int n = 0; unsigned int bit; bitmap_zero(dst, nbits); for_each_set_bit(bit, mask, nbits) __assign_bit(bit, dst, test_bit(n++, src)); } /** * bitmap_gather - Gather a bitmap according to given mask * @dst: gathered bitmap * @src: scattered bitmap * @mask: mask representing bits to extract from in the scattered bitmap * @nbits: number of bits in each of these bitmaps * * Gathers bitmap with sparse bits according to the given @mask. * * Example: * If @src bitmap = 0x0302, with @mask = 0x1313, @dst will be 0x001a. * * Or in binary form * @src @mask @dst * 0000001100000010 0001001100010011 0000000000011010 * * (Bits 0, 1, 4, 8, 9, 12 are copied to the bits 0, 1, 2, 3, 4, 5) * * A more 'visual' description of the operation:: * * mask: ...v..vv...v..vv * src: 0000001100000010 * ^ ^^ ^ 0 * | || | 10 * | || > 010 * | |+--> 1010 * | +--> 11010 * +----> 011010 * dst: 0000000000011010 * * A relationship exists between bitmap_gather() and bitmap_scatter(). See * bitmap_scatter() for the bitmap scatter detailed operations. * Suppose scattered computed using bitmap_scatter(scattered, src, mask, n). * The operation bitmap_gather(result, scattered, mask, n) leads to a result * equal or equivalent to src. * * The result can be 'equivalent' because bitmap_scatter() and bitmap_gather() * are not bijective. * The result and src values are equivalent in that sense that a call to * bitmap_scatter(res, src, mask, n) and a call to * bitmap_scatter(res, result, mask, n) will lead to the same res value. */ static __always_inline void bitmap_gather(unsigned long *dst, const unsigned long *src, const unsigned long *mask, unsigned int nbits) { unsigned int n = 0; unsigned int bit; bitmap_zero(dst, nbits); for_each_set_bit(bit, mask, nbits) __assign_bit(n++, dst, test_bit(bit, src)); } static __always_inline void bitmap_next_set_region(unsigned long *bitmap, unsigned int *rs, unsigned int *re, unsigned int end) { *rs = find_next_bit(bitmap, end, *rs); *re = find_next_zero_bit(bitmap, end, *rs + 1); } /** * bitmap_release_region - release allocated bitmap region * @bitmap: array of unsigned longs corresponding to the bitmap * @pos: beginning of bit region to release * @order: region size (log base 2 of number of bits) to release * * This is the complement to __bitmap_find_free_region() and releases * the found region (by clearing it in the bitmap). */ static __always_inline void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order) { bitmap_clear(bitmap, pos, BIT(order)); } /** * bitmap_allocate_region - allocate bitmap region * @bitmap: array of unsigned longs corresponding to the bitmap * @pos: beginning of bit region to allocate * @order: region size (log base 2 of number of bits) to allocate * * Allocate (set bits in) a specified region of a bitmap. * * Returns: 0 on success, or %-EBUSY if specified region wasn't * free (not all bits were zero). */ static __always_inline int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order) { unsigned int len = BIT(order); if (find_next_bit(bitmap, pos + len, pos) < pos + len) return -EBUSY; bitmap_set(bitmap, pos, len); return 0; } /** * bitmap_find_free_region - find a contiguous aligned mem region * @bitmap: array of unsigned longs corresponding to the bitmap * @bits: number of bits in the bitmap * @order: region size (log base 2 of number of bits) to find * * Find a region of free (zero) bits in a @bitmap of @bits bits and * allocate them (set them to one). Only consider regions of length * a power (@order) of two, aligned to that power of two, which * makes the search algorithm much faster. * * Returns: the bit offset in bitmap of the allocated region, * or -errno on failure. */ static __always_inline int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order) { unsigned int pos, end; /* scans bitmap by regions of size order */ for (pos = 0; (end = pos + BIT(order)) <= bits; pos = end) { if (!bitmap_allocate_region(bitmap, pos, order)) return pos; } return -ENOMEM; } /** * BITMAP_FROM_U64() - Represent u64 value in the format suitable for bitmap. * @n: u64 value * * Linux bitmaps are internally arrays of unsigned longs, i.e. 32-bit * integers in 32-bit environment, and 64-bit integers in 64-bit one. * * There are four combinations of endianness and length of the word in linux * ABIs: LE64, BE64, LE32 and BE32. * * On 64-bit kernels 64-bit LE and BE numbers are naturally ordered in * bitmaps and therefore don't require any special handling. * * On 32-bit kernels 32-bit LE ABI orders lo word of 64-bit number in memory * prior to hi, and 32-bit BE orders hi word prior to lo. The bitmap on the * other hand is represented as an array of 32-bit words and the position of * bit N may therefore be calculated as: word #(N/32) and bit #(N%32) in that * word. For example, bit #42 is located at 10th position of 2nd word. * It matches 32-bit LE ABI, and we can simply let the compiler store 64-bit * values in memory as it usually does. But for BE we need to swap hi and lo * words manually. * * With all that, the macro BITMAP_FROM_U64() does explicit reordering of hi and * lo parts of u64. For LE32 it does nothing, and for BE environment it swaps * hi and lo words, as is expected by bitmap. */ #if __BITS_PER_LONG == 64 #define BITMAP_FROM_U64(n) (n) #else #define BITMAP_FROM_U64(n) ((unsigned long) ((u64)(n) & ULONG_MAX)), \ ((unsigned long) ((u64)(n) >> 32)) #endif /** * bitmap_from_u64 - Check and swap words within u64. * @mask: source bitmap * @dst: destination bitmap * * In 32-bit Big Endian kernel, when using ``(u32 *)(&val)[*]`` * to read u64 mask, we will get the wrong word. * That is ``(u32 *)(&val)[0]`` gets the upper 32 bits, * but we expect the lower 32-bits of u64. */ static __always_inline void bitmap_from_u64(unsigned long *dst, u64 mask) { bitmap_from_arr64(dst, &mask, 64); } /** * bitmap_read - read a value of n-bits from the memory region * @map: address to the bitmap memory region * @start: bit offset of the n-bit value * @nbits: size of value in bits, nonzero, up to BITS_PER_LONG * * Returns: value of @nbits bits located at the @start bit offset within the * @map memory region. For @nbits = 0 and @nbits > BITS_PER_LONG the return * value is undefined. */ static __always_inline unsigned long bitmap_read(const unsigned long *map, unsigned long start, unsigned long nbits) { size_t index = BIT_WORD(start); unsigned long offset = start % BITS_PER_LONG; unsigned long space = BITS_PER_LONG - offset; unsigned long value_low, value_high; if (unlikely(!nbits || nbits > BITS_PER_LONG)) return 0; if (space >= nbits) return (map[index] >> offset) & BITMAP_LAST_WORD_MASK(nbits); value_low = map[index] & BITMAP_FIRST_WORD_MASK(start); value_high = map[index + 1] & BITMAP_LAST_WORD_MASK(start + nbits); return (value_low >> offset) | (value_high << space); } /** * bitmap_write - write n-bit value within a memory region * @map: address to the bitmap memory region * @value: value to write, clamped to nbits * @start: bit offset of the n-bit value * @nbits: size of value in bits, nonzero, up to BITS_PER_LONG. * * bitmap_write() behaves as-if implemented as @nbits calls of __assign_bit(), * i.e. bits beyond @nbits are ignored: * * for (bit = 0; bit < nbits; bit++) * __assign_bit(start + bit, bitmap, val & BIT(bit)); * * For @nbits == 0 and @nbits > BITS_PER_LONG no writes are performed. */ static __always_inline void bitmap_write(unsigned long *map, unsigned long value, unsigned long start, unsigned long nbits) { size_t index; unsigned long offset; unsigned long space; unsigned long mask; bool fit; if (unlikely(!nbits || nbits > BITS_PER_LONG)) return; mask = BITMAP_LAST_WORD_MASK(nbits); value &= mask; offset = start % BITS_PER_LONG; space = BITS_PER_LONG - offset; fit = space >= nbits; index = BIT_WORD(start); map[index] &= (fit ? (~(mask << offset)) : ~BITMAP_FIRST_WORD_MASK(start)); map[index] |= value << offset; if (fit) return; map[index + 1] &= BITMAP_FIRST_WORD_MASK(start + nbits); map[index + 1] |= (value >> space); } #define bitmap_get_value8(map, start) \ bitmap_read(map, start, BITS_PER_BYTE) #define bitmap_set_value8(map, value, start) \ bitmap_write(map, value, start, BITS_PER_BYTE) #endif /* __ASSEMBLY__ */ #endif /* __LINUX_BITMAP_H */
1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 // SPDX-License-Identifier: GPL-2.0+ /* Copyright (c) 2022 Amarula Solutions, Dario Binacchi <dario.binacchi@amarulasolutions.com> * */ #include <linux/can/dev.h> #include <linux/ethtool.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/platform_device.h> #include "slcan.h" static const char slcan_priv_flags_strings[][ETH_GSTRING_LEN] = { #define SLCAN_PRIV_FLAGS_ERR_RST_ON_OPEN BIT(0) "err-rst-on-open", }; static void slcan_get_strings(struct net_device *ndev, u32 stringset, u8 *data) { switch (stringset) { case ETH_SS_PRIV_FLAGS: memcpy(data, slcan_priv_flags_strings, sizeof(slcan_priv_flags_strings)); } } static u32 slcan_get_priv_flags(struct net_device *ndev) { u32 flags = 0; if (slcan_err_rst_on_open(ndev)) flags |= SLCAN_PRIV_FLAGS_ERR_RST_ON_OPEN; return flags; } static int slcan_set_priv_flags(struct net_device *ndev, u32 flags) { bool err_rst_op_open = !!(flags & SLCAN_PRIV_FLAGS_ERR_RST_ON_OPEN); return slcan_enable_err_rst_on_open(ndev, err_rst_op_open); } static int slcan_get_sset_count(struct net_device *netdev, int sset) { switch (sset) { case ETH_SS_PRIV_FLAGS: return ARRAY_SIZE(slcan_priv_flags_strings); default: return -EOPNOTSUPP; } } const struct ethtool_ops slcan_ethtool_ops = { .get_strings = slcan_get_strings, .get_priv_flags = slcan_get_priv_flags, .set_priv_flags = slcan_set_priv_flags, .get_sset_count = slcan_get_sset_count, .get_ts_info = ethtool_op_get_ts_info, };
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 /* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/include/linux/clk.h * * Copyright (C) 2004 ARM Limited. * Written by Deep Blue Solutions Limited. * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org> */ #ifndef __LINUX_CLK_H #define __LINUX_CLK_H #include <linux/err.h> #include <linux/kernel.h> #include <linux/notifier.h> struct device; struct clk; struct device_node; struct of_phandle_args; /** * DOC: clk notifier callback types * * PRE_RATE_CHANGE - called immediately before the clk rate is changed, * to indicate that the rate change will proceed. Drivers must * immediately terminate any operations that will be affected by the * rate change. Callbacks may either return NOTIFY_DONE, NOTIFY_OK, * NOTIFY_STOP or NOTIFY_BAD. * * ABORT_RATE_CHANGE: called if the rate change failed for some reason * after PRE_RATE_CHANGE. In this case, all registered notifiers on * the clk will be called with ABORT_RATE_CHANGE. Callbacks must * always return NOTIFY_DONE or NOTIFY_OK. * * POST_RATE_CHANGE - called after the clk rate change has successfully * completed. Callbacks must always return NOTIFY_DONE or NOTIFY_OK. * */ #define PRE_RATE_CHANGE BIT(0) #define POST_RATE_CHANGE BIT(1) #define ABORT_RATE_CHANGE BIT(2) /** * struct clk_notifier - associate a clk with a notifier * @clk: struct clk * to associate the notifier with * @notifier_head: a blocking_notifier_head for this clk * @node: linked list pointers * * A list of struct clk_notifier is maintained by the notifier code. * An entry is created whenever code registers the first notifier on a * particular @clk. Future notifiers on that @clk are added to the * @notifier_head. */ struct clk_notifier { struct clk *clk; struct srcu_notifier_head notifier_head; struct list_head node; }; /** * struct clk_notifier_data - rate data to pass to the notifier callback * @clk: struct clk * being changed * @old_rate: previous rate of this clk * @new_rate: new rate of this clk * * For a pre-notifier, old_rate is the clk's rate before this rate * change, and new_rate is what the rate will be in the future. For a * post-notifier, old_rate and new_rate are both set to the clk's * current rate (this was done to optimize the implementation). */ struct clk_notifier_data { struct clk *clk; unsigned long old_rate; unsigned long new_rate; }; /** * struct clk_bulk_data - Data used for bulk clk operations. * * @id: clock consumer ID * @clk: struct clk * to store the associated clock * * The CLK APIs provide a series of clk_bulk_() API calls as * a convenience to consumers which require multiple clks. This * structure is used to manage data for these calls. */ struct clk_bulk_data { const char *id; struct clk *clk; }; #ifdef CONFIG_COMMON_CLK /** * clk_notifier_register - register a clock rate-change notifier callback * @clk: clock whose rate we are interested in * @nb: notifier block with callback function pointer * * ProTip: debugging across notifier chains can be frustrating. Make sure that * your notifier callback function prints a nice big warning in case of * failure. */ int clk_notifier_register(struct clk *clk, struct notifier_block *nb); /** * clk_notifier_unregister - unregister a clock rate-change notifier callback * @clk: clock whose rate we are no longer interested in * @nb: notifier block which will be unregistered */ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb); /** * devm_clk_notifier_register - register a managed rate-change notifier callback * @dev: device for clock "consumer" * @clk: clock whose rate we are interested in * @nb: notifier block with callback function pointer * * Returns 0 on success, -EERROR otherwise */ int devm_clk_notifier_register(struct device *dev, struct clk *clk, struct notifier_block *nb); /** * clk_get_accuracy - obtain the clock accuracy in ppb (parts per billion) * for a clock source. * @clk: clock source * * This gets the clock source accuracy expressed in ppb. * A perfect clock returns 0. */ long clk_get_accuracy(struct clk *clk); /** * clk_set_phase - adjust the phase shift of a clock signal * @clk: clock signal source * @degrees: number of degrees the signal is shifted * * Shifts the phase of a clock signal by the specified degrees. Returns 0 on * success, -EERROR otherwise. */ int clk_set_phase(struct clk *clk, int degrees); /** * clk_get_phase - return the phase shift of a clock signal * @clk: clock signal source * * Returns the phase shift of a clock node in degrees, otherwise returns * -EERROR. */ int clk_get_phase(struct clk *clk); /** * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal * @clk: clock signal source * @num: numerator of the duty cycle ratio to be applied * @den: denominator of the duty cycle ratio to be applied * * Adjust the duty cycle of a clock signal by the specified ratio. Returns 0 on * success, -EERROR otherwise. */ int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den); /** * clk_get_scaled_duty_cycle - return the duty cycle ratio of a clock signal * @clk: clock signal source * @scale: scaling factor to be applied to represent the ratio as an integer * * Returns the duty cycle ratio multiplied by the scale provided, otherwise * returns -EERROR. */ int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale); /** * clk_is_match - check if two clk's point to the same hardware clock * @p: clk compared against q * @q: clk compared against p * * Returns true if the two struct clk pointers both point to the same hardware * clock node. Put differently, returns true if @p and @q * share the same &struct clk_core object. * * Returns false otherwise. Note that two NULL clks are treated as matching. */ bool clk_is_match(const struct clk *p, const struct clk *q); /** * clk_rate_exclusive_get - get exclusivity over the rate control of a * producer * @clk: clock source * * This function allows drivers to get exclusive control over the rate of a * provider. It prevents any other consumer to execute, even indirectly, * opereation which could alter the rate of the provider or cause glitches * * If exlusivity is claimed more than once on clock, even by the same driver, * the rate effectively gets locked as exclusivity can't be preempted. * * Must not be called from within atomic context. * * Returns success (0) or negative errno. */ int clk_rate_exclusive_get(struct clk *clk); /** * devm_clk_rate_exclusive_get - devm variant of clk_rate_exclusive_get * @dev: device the exclusivity is bound to * @clk: clock source * * Calls clk_rate_exclusive_get() on @clk and registers a devm cleanup handler * on @dev to call clk_rate_exclusive_put(). * * Must not be called from within atomic context. */ int devm_clk_rate_exclusive_get(struct device *dev, struct clk *clk); /** * clk_rate_exclusive_put - release exclusivity over the rate control of a * producer * @clk: clock source * * This function allows drivers to release the exclusivity it previously got * from clk_rate_exclusive_get() * * The caller must balance the number of clk_rate_exclusive_get() and * clk_rate_exclusive_put() calls. * * Must not be called from within atomic context. */ void clk_rate_exclusive_put(struct clk *clk); #else static inline int clk_notifier_register(struct clk *clk, struct notifier_block *nb) { return -ENOTSUPP; } static inline int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb) { return -ENOTSUPP; } static inline int devm_clk_notifier_register(struct device *dev, struct clk *clk, struct notifier_block *nb) { return -ENOTSUPP; } static inline long clk_get_accuracy(struct clk *clk) { return -ENOTSUPP; } static inline long clk_set_phase(struct clk *clk, int phase) { return -ENOTSUPP; } static inline long clk_get_phase(struct clk *clk) { return -ENOTSUPP; } static inline int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den) { return -ENOTSUPP; } static inline unsigned int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale) { return 0; } static inline bool clk_is_match(const struct clk *p, const struct clk *q) { return p == q; } static inline int clk_rate_exclusive_get(struct clk *clk) { return 0; } static inline int devm_clk_rate_exclusive_get(struct device *dev, struct clk *clk) { return 0; } static inline void clk_rate_exclusive_put(struct clk *clk) {} #endif #ifdef CONFIG_HAVE_CLK_PREPARE /** * clk_prepare - prepare a clock source * @clk: clock source * * This prepares the clock source for use. * * Must not be called from within atomic context. */ int clk_prepare(struct clk *clk); int __must_check clk_bulk_prepare(int num_clks, const struct clk_bulk_data *clks); /** * clk_is_enabled_when_prepared - indicate if preparing a clock also enables it. * @clk: clock source * * Returns true if clk_prepare() implicitly enables the clock, effectively * making clk_enable()/clk_disable() no-ops, false otherwise. * * This is of interest mainly to the power management code where actually * disabling the clock also requires unpreparing it to have any material * effect. * * Regardless of the value returned here, the caller must always invoke * clk_enable() or clk_prepare_enable() and counterparts for usage counts * to be right. */ bool clk_is_enabled_when_prepared(struct clk *clk); #else static inline int clk_prepare(struct clk *clk) { might_sleep(); return 0; } static inline int __must_check clk_bulk_prepare(int num_clks, const struct clk_bulk_data *clks) { might_sleep(); return 0; } static inline bool clk_is_enabled_when_prepared(struct clk *clk) { return false; } #endif /** * clk_unprepare - undo preparation of a clock source * @clk: clock source * * This undoes a previously prepared clock. The caller must balance * the number of prepare and unprepare calls. * * Must not be called from within atomic context. */ #ifdef CONFIG_HAVE_CLK_PREPARE void clk_unprepare(struct clk *clk); void clk_bulk_unprepare(int num_clks, const struct clk_bulk_data *clks); #else static inline void clk_unprepare(struct clk *clk) { might_sleep(); } static inline void clk_bulk_unprepare(int num_clks, const struct clk_bulk_data *clks) { might_sleep(); } #endif #ifdef CONFIG_HAVE_CLK /** * clk_get - lookup and obtain a reference to a clock producer. * @dev: device for clock "consumer" * @id: clock consumer ID * * Returns a struct clk corresponding to the clock producer, or * valid IS_ERR() condition containing errno. The implementation * uses @dev and @id to determine the clock consumer, and thereby * the clock producer. (IOW, @id may be identical strings, but * clk_get may return different clock producers depending on @dev.) * * Drivers must assume that the clock source is not enabled. * * clk_get should not be called from within interrupt context. */ struct clk *clk_get(struct device *dev, const char *id); /** * clk_bulk_get - lookup and obtain a number of references to clock producer. * @dev: device for clock "consumer" * @num_clks: the number of clk_bulk_data * @clks: the clk_bulk_data table of consumer * * This helper function allows drivers to get several clk consumers in one * operation. If any of the clk cannot be acquired then any clks * that were obtained will be freed before returning to the caller. * * Returns 0 if all clocks specified in clk_bulk_data table are obtained * successfully, or valid IS_ERR() condition containing errno. * The implementation uses @dev and @clk_bulk_data.id to determine the * clock consumer, and thereby the clock producer. * The clock returned is stored in each @clk_bulk_data.clk field. * * Drivers must assume that the clock source is not enabled. * * clk_bulk_get should not be called from within interrupt context. */ int __must_check clk_bulk_get(struct device *dev, int num_clks, struct clk_bulk_data *clks); /** * clk_bulk_get_all - lookup and obtain all available references to clock * producer. * @dev: device for clock "consumer" * @clks: pointer to the clk_bulk_data table of consumer * * This helper function allows drivers to get all clk consumers in one * operation. If any of the clk cannot be acquired then any clks * that were obtained will be freed before returning to the caller. * * Returns a positive value for the number of clocks obtained while the * clock references are stored in the clk_bulk_data table in @clks field. * Returns 0 if there're none and a negative value if something failed. * * Drivers must assume that the clock source is not enabled. * * clk_bulk_get should not be called from within interrupt context. */ int __must_check clk_bulk_get_all(struct device *dev, struct clk_bulk_data **clks); /** * clk_bulk_get_optional - lookup and obtain a number of references to clock producer * @dev: device for clock "consumer" * @num_clks: the number of clk_bulk_data * @clks: the clk_bulk_data table of consumer * * Behaves the same as clk_bulk_get() except where there is no clock producer. * In this case, instead of returning -ENOENT, the function returns 0 and * NULL for a clk for which a clock producer could not be determined. */ int __must_check clk_bulk_get_optional(struct device *dev, int num_clks, struct clk_bulk_data *clks); /** * devm_clk_bulk_get - managed get multiple clk consumers * @dev: device for clock "consumer" * @num_clks: the number of clk_bulk_data * @clks: the clk_bulk_data table of consumer * * Return 0 on success, an errno on failure. * * This helper function allows drivers to get several clk * consumers in one operation with management, the clks will * automatically be freed when the device is unbound. */ int __must_check devm_clk_bulk_get(struct device *dev, int num_clks, struct clk_bulk_data *clks); /** * devm_clk_bulk_get_optional - managed get multiple optional consumer clocks * @dev: device for clock "consumer" * @num_clks: the number of clk_bulk_data * @clks: pointer to the clk_bulk_data table of consumer * * Behaves the same as devm_clk_bulk_get() except where there is no clock * producer. In this case, instead of returning -ENOENT, the function returns * NULL for given clk. It is assumed all clocks in clk_bulk_data are optional. * * Returns 0 if all clocks specified in clk_bulk_data table are obtained * successfully or for any clk there was no clk provider available, otherwise * returns valid IS_ERR() condition containing errno. * The implementation uses @dev and @clk_bulk_data.id to determine the * clock consumer, and thereby the clock producer. * The clock returned is stored in each @clk_bulk_data.clk field. * * Drivers must assume that the clock source is not enabled. * * clk_bulk_get should not be called from within interrupt context. */ int __must_check devm_clk_bulk_get_optional(struct device *dev, int num_clks, struct clk_bulk_data *clks); /** * devm_clk_bulk_get_all - managed get multiple clk consumers * @dev: device for clock "consumer" * @clks: pointer to the clk_bulk_data table of consumer * * Returns a positive value for the number of clocks obtained while the * clock references are stored in the clk_bulk_data table in @clks field. * Returns 0 if there're none and a negative value if something failed. * * This helper function allows drivers to get several clk * consumers in one operation with management, the clks will * automatically be freed when the device is unbound. */ int __must_check devm_clk_bulk_get_all(struct device *dev, struct clk_bulk_data **clks); /** * devm_clk_bulk_get_all_enabled - Get and enable all clocks of the consumer (managed) * @dev: device for clock "consumer" * @clks: pointer to the clk_bulk_data table of consumer * * Returns a positive value for the number of clocks obtained while the * clock references are stored in the clk_bulk_data table in @clks field. * Returns 0 if there're none and a negative value if something failed. * * This helper function allows drivers to get all clocks of the * consumer and enables them in one operation with management. * The clks will automatically be disabled and freed when the device * is unbound. */ int __must_check devm_clk_bulk_get_all_enabled(struct device *dev, struct clk_bulk_data **clks); /** * devm_clk_get - lookup and obtain a managed reference to a clock producer. * @dev: device for clock "consumer" * @id: clock consumer ID * * Context: May sleep. * * Return: a struct clk corresponding to the clock producer, or * valid IS_ERR() condition containing errno. The implementation * uses @dev and @id to determine the clock consumer, and thereby * the clock producer. (IOW, @id may be identical strings, but * clk_get may return different clock producers depending on @dev.) * * Drivers must assume that the clock source is neither prepared nor * enabled. * * The clock will automatically be freed when the device is unbound * from the bus. */ struct clk *devm_clk_get(struct device *dev, const char *id); /** * devm_clk_get_prepared - devm_clk_get() + clk_prepare() * @dev: device for clock "consumer" * @id: clock consumer ID * * Context: May sleep. * * Return: a struct clk corresponding to the clock producer, or * valid IS_ERR() condition containing errno. The implementation * uses @dev and @id to determine the clock consumer, and thereby * the clock producer. (IOW, @id may be identical strings, but * clk_get may return different clock producers depending on @dev.) * * The returned clk (if valid) is prepared. Drivers must however assume * that the clock is not enabled. * * The clock will automatically be unprepared and freed when the device * is unbound from the bus. */ struct clk *devm_clk_get_prepared(struct device *dev, const char *id); /** * devm_clk_get_enabled - devm_clk_get() + clk_prepare_enable() * @dev: device for clock "consumer" * @id: clock consumer ID * * Context: May sleep. * * Return: a struct clk corresponding to the clock producer, or * valid IS_ERR() condition containing errno. The implementation * uses @dev and @id to determine the clock consumer, and thereby * the clock producer. (IOW, @id may be identical strings, but * clk_get may return different clock producers depending on @dev.) * * The returned clk (if valid) is prepared and enabled. * * The clock will automatically be disabled, unprepared and freed * when the device is unbound from the bus. */ struct clk *devm_clk_get_enabled(struct device *dev, const char *id); /** * devm_clk_get_optional - lookup and obtain a managed reference to an optional * clock producer. * @dev: device for clock "consumer" * @id: clock consumer ID * * Context: May sleep. * * Return: a struct clk corresponding to the clock producer, or * valid IS_ERR() condition containing errno. The implementation * uses @dev and @id to determine the clock consumer, and thereby * the clock producer. If no such clk is found, it returns NULL * which serves as a dummy clk. That's the only difference compared * to devm_clk_get(). * * Drivers must assume that the clock source is neither prepared nor * enabled. * * The clock will automatically be freed when the device is unbound * from the bus. */ struct clk *devm_clk_get_optional(struct device *dev, const char *id); /** * devm_clk_get_optional_prepared - devm_clk_get_optional() + clk_prepare() * @dev: device for clock "consumer" * @id: clock consumer ID * * Context: May sleep. * * Return: a struct clk corresponding to the clock producer, or * valid IS_ERR() condition containing errno. The implementation * uses @dev and @id to determine the clock consumer, and thereby * the clock producer. If no such clk is found, it returns NULL * which serves as a dummy clk. That's the only difference compared * to devm_clk_get_prepared(). * * The returned clk (if valid) is prepared. Drivers must however * assume that the clock is not enabled. * * The clock will automatically be unprepared and freed when the * device is unbound from the bus. */ struct clk *devm_clk_get_optional_prepared(struct device *dev, const char *id); /** * devm_clk_get_optional_enabled - devm_clk_get_optional() + * clk_prepare_enable() * @dev: device for clock "consumer" * @id: clock consumer ID * * Context: May sleep. * * Return: a struct clk corresponding to the clock producer, or * valid IS_ERR() condition containing errno. The implementation * uses @dev and @id to determine the clock consumer, and thereby * the clock producer. If no such clk is found, it returns NULL * which serves as a dummy clk. That's the only difference compared * to devm_clk_get_enabled(). * * The returned clk (if valid) is prepared and enabled. * * The clock will automatically be disabled, unprepared and freed * when the device is unbound from the bus. */ struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id); /** * devm_clk_get_optional_enabled_with_rate - devm_clk_get_optional() + * clk_set_rate() + * clk_prepare_enable() * @dev: device for clock "consumer" * @id: clock consumer ID * @rate: new clock rate * * Context: May sleep. * * Return: a struct clk corresponding to the clock producer, or * valid IS_ERR() condition containing errno. The implementation * uses @dev and @id to determine the clock consumer, and thereby * the clock producer. If no such clk is found, it returns NULL * which serves as a dummy clk. That's the only difference compared * to devm_clk_get_enabled(). * * The returned clk (if valid) is prepared and enabled and rate was set. * * The clock will automatically be disabled, unprepared and freed * when the device is unbound from the bus. */ struct clk *devm_clk_get_optional_enabled_with_rate(struct device *dev, const char *id, unsigned long rate); /** * devm_get_clk_from_child - lookup and obtain a managed reference to a * clock producer from child node. * @dev: device for clock "consumer" * @np: pointer to clock consumer node * @con_id: clock consumer ID * * This function parses the clocks, and uses them to look up the * struct clk from the registered list of clock providers by using * @np and @con_id * * The clock will automatically be freed when the device is unbound * from the bus. */ struct clk *devm_get_clk_from_child(struct device *dev, struct device_node *np, const char *con_id); /** * clk_enable - inform the system when the clock source should be running. * @clk: clock source * * If the clock can not be enabled/disabled, this should return success. * * May be called from atomic contexts. * * Returns success (0) or negative errno. */ int clk_enable(struct clk *clk); /** * clk_bulk_enable - inform the system when the set of clks should be running. * @num_clks: the number of clk_bulk_data * @clks: the clk_bulk_data table of consumer * * May be called from atomic contexts. * * Returns success (0) or negative errno. */ int __must_check clk_bulk_enable(int num_clks, const struct clk_bulk_data *clks); /** * clk_disable - inform the system when the clock source is no longer required. * @clk: clock source * * Inform the system that a clock source is no longer required by * a driver and may be shut down. * * May be called from atomic contexts. * * Implementation detail: if the clock source is shared between * multiple drivers, clk_enable() calls must be balanced by the * same number of clk_disable() calls for the clock source to be * disabled. */ void clk_disable(struct clk *clk); /** * clk_bulk_disable - inform the system when the set of clks is no * longer required. * @num_clks: the number of clk_bulk_data * @clks: the clk_bulk_data table of consumer * * Inform the system that a set of clks is no longer required by * a driver and may be shut down. * * May be called from atomic contexts. * * Implementation detail: if the set of clks is shared between * multiple drivers, clk_bulk_enable() calls must be balanced by the * same number of clk_bulk_disable() calls for the clock source to be * disabled. */ void clk_bulk_disable(int num_clks, const struct clk_bulk_data *clks); /** * clk_get_rate - obtain the current clock rate (in Hz) for a clock source. * This is only valid once the clock source has been enabled. * @clk: clock source */ unsigned long clk_get_rate(struct clk *clk); /** * clk_put - "free" the clock source * @clk: clock source * * Note: drivers must ensure that all clk_enable calls made on this * clock source are balanced by clk_disable calls prior to calling * this function. * * clk_put should not be called from within interrupt context. */ void clk_put(struct clk *clk); /** * clk_bulk_put - "free" the clock source * @num_clks: the number of clk_bulk_data * @clks: the clk_bulk_data table of consumer * * Note: drivers must ensure that all clk_bulk_enable calls made on this * clock source are balanced by clk_bulk_disable calls prior to calling * this function. * * clk_bulk_put should not be called from within interrupt context. */ void clk_bulk_put(int num_clks, struct clk_bulk_data *clks); /** * clk_bulk_put_all - "free" all the clock source * @num_clks: the number of clk_bulk_data * @clks: the clk_bulk_data table of consumer * * Note: drivers must ensure that all clk_bulk_enable calls made on this * clock source are balanced by clk_bulk