Total coverage: 459919 (26%)of 1821107
188 76 14 21 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 /* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM power #if !defined(_TRACE_POWER_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_POWER_H #include <linux/cpufreq.h> #include <linux/ktime.h> #include <linux/pm_qos.h> #include <linux/tracepoint.h> #include <linux/trace_events.h> #define TPS(x) tracepoint_string(x) DECLARE_EVENT_CLASS(cpu, TP_PROTO(unsigned int state, unsigned int cpu_id), TP_ARGS(state, cpu_id), TP_STRUCT__entry( __field( u32, state ) __field( u32, cpu_id ) ), TP_fast_assign( __entry->state = state; __entry->cpu_id = cpu_id; ), TP_printk("state=%lu cpu_id=%lu", (unsigned long)__entry->state, (unsigned long)__entry->cpu_id) ); DEFINE_EVENT(cpu, cpu_idle, TP_PROTO(unsigned int state, unsigned int cpu_id), TP_ARGS(state, cpu_id) ); TRACE_EVENT(cpu_idle_miss, TP_PROTO(unsigned int cpu_id, unsigned int state, bool below), TP_ARGS(cpu_id, state, below), TP_STRUCT__entry( __field(u32, cpu_id) __field(u32, state) __field(bool, below) ), TP_fast_assign( __entry->cpu_id = cpu_id; __entry->state = state; __entry->below = below; ), TP_printk("cpu_id=%lu state=%lu type=%s", (unsigned long)__entry->cpu_id, (unsigned long)__entry->state, (__entry->below)?"below":"above") ); DECLARE_EVENT_CLASS(psci_domain_idle, TP_PROTO(unsigned int cpu_id, unsigned int state, bool s2idle), TP_ARGS(cpu_id, state, s2idle), TP_STRUCT__entry( __field(u32, cpu_id) __field(u32, state) __field(bool, s2idle) ), TP_fast_assign( __entry->cpu_id = cpu_id; __entry->state = state; __entry->s2idle = s2idle; ), TP_printk("cpu_id=%lu state=0x%lx is_s2idle=%s", (unsigned long)__entry->cpu_id, (unsigned long)__entry->state, (__entry->s2idle)?"yes":"no") ); DEFINE_EVENT(psci_domain_idle, psci_domain_idle_enter, TP_PROTO(unsigned int cpu_id, unsigned int state, bool s2idle), TP_ARGS(cpu_id, state, s2idle) ); DEFINE_EVENT(psci_domain_idle, psci_domain_idle_exit, TP_PROTO(unsigned int cpu_id, unsigned int state, bool s2idle), TP_ARGS(cpu_id, state, s2idle) ); TRACE_EVENT(powernv_throttle, TP_PROTO(int chip_id, const char *reason, int pmax), TP_ARGS(chip_id, reason, pmax), TP_STRUCT__entry( __field(int, chip_id) __string(reason, reason) __field(int, pmax) ), TP_fast_assign( __entry->chip_id = chip_id; __assign_str(reason); __entry->pmax = pmax; ), TP_printk("Chip %d Pmax %d %s", __entry->chip_id, __entry->pmax, __get_str(reason)) ); TRACE_EVENT(pstate_sample, TP_PROTO(u32 core_busy, u32 scaled_busy, u32 from, u32 to, u64 mperf, u64 aperf, u64 tsc, u32 freq, u32 io_boost ), TP_ARGS(core_busy, scaled_busy, from, to, mperf, aperf, tsc, freq, io_boost ), TP_STRUCT__entry( __field(u32, core_busy) __field(u32, scaled_busy) __field(u32, from) __field(u32, to) __field(u64, mperf) __field(u64, aperf) __field(u64, tsc) __field(u32, freq) __field(u32, io_boost) ), TP_fast_assign( __entry->core_busy = core_busy; __entry->scaled_busy = scaled_busy; __entry->from = from; __entry->to = to; __entry->mperf = mperf; __entry->aperf = aperf; __entry->tsc = tsc; __entry->freq = freq; __entry->io_boost = io_boost; ), TP_printk("core_busy=%lu scaled=%lu from=%lu to=%lu mperf=%llu aperf=%llu tsc=%llu freq=%lu io_boost=%lu", (unsigned long)__entry->core_busy, (unsigned long)__entry->scaled_busy, (unsigned long)__entry->from, (unsigned long)__entry->to, (unsigned long long)__entry->mperf, (unsigned long long)__entry->aperf, (unsigned long long)__entry->tsc, (unsigned long)__entry->freq, (unsigned long)__entry->io_boost ) ); /* This file can get included multiple times, TRACE_HEADER_MULTI_READ at top */ #ifndef _PWR_EVENT_AVOID_DOUBLE_DEFINING #define _PWR_EVENT_AVOID_DOUBLE_DEFINING #define PWR_EVENT_EXIT -1 #endif #define pm_verb_symbolic(event) \ __print_symbolic(event, \ { PM_EVENT_SUSPEND, "suspend" }, \ { PM_EVENT_RESUME, "resume" }, \ { PM_EVENT_FREEZE, "freeze" }, \ { PM_EVENT_QUIESCE, "quiesce" }, \ { PM_EVENT_HIBERNATE, "hibernate" }, \ { PM_EVENT_THAW, "thaw" }, \ { PM_EVENT_RESTORE, "restore" }, \ { PM_EVENT_RECOVER, "recover" }) DEFINE_EVENT(cpu, cpu_frequency, TP_PROTO(unsigned int frequency, unsigned int cpu_id), TP_ARGS(frequency, cpu_id) ); TRACE_EVENT(cpu_frequency_limits, TP_PROTO(struct cpufreq_policy *policy), TP_ARGS(policy), TP_STRUCT__entry( __field(u32, min_freq) __field(u32, max_freq) __field(u32, cpu_id) ), TP_fast_assign( __entry->min_freq = policy->min; __entry->max_freq = policy->max; __entry->cpu_id = policy->cpu; ), TP_printk("min=%lu max=%lu cpu_id=%lu", (unsigned long)__entry->min_freq, (unsigned long)__entry->max_freq, (unsigned long)__entry->cpu_id) ); TRACE_EVENT(device_pm_callback_start, TP_PROTO(struct device *dev, const char *pm_ops, int event), TP_ARGS(dev, pm_ops, event), TP_STRUCT__entry( __string(device, dev_name(dev)) __string(driver, dev_driver_string(dev)) __string(parent, dev->parent ? dev_name(dev->parent) : "none") __string(pm_ops, pm_ops ? pm_ops : "none ") __field(int, event) ), TP_fast_assign( __assign_str(device); __assign_str(driver); __assign_str(parent); __assign_str(pm_ops); __entry->event = event; ), TP_printk("%s %s, parent: %s, %s[%s]", __get_str(driver), __get_str(device), __get_str(parent), __get_str(pm_ops), pm_verb_symbolic(__entry->event)) ); TRACE_EVENT(device_pm_callback_end, TP_PROTO(struct device *dev, int error), TP_ARGS(dev, error), TP_STRUCT__entry( __string(device, dev_name(dev)) __string(driver, dev_driver_string(dev)) __field(int, error) ), TP_fast_assign( __assign_str(device); __assign_str(driver); __entry->error = error; ), TP_printk("%s %s, err=%d", __get_str(driver), __get_str(device), __entry->error) ); TRACE_EVENT(suspend_resume, TP_PROTO(const char *action, int val, bool start), TP_ARGS(action, val, start), TP_STRUCT__entry( __field(const char *, action) __field(int, val) __field(bool, start) ), TP_fast_assign( __entry->action = action; __entry->val = val; __entry->start = start; ), TP_printk("%s[%u] %s", __entry->action, (unsigned int)__entry->val, (__entry->start)?"begin":"end") ); DECLARE_EVENT_CLASS(wakeup_source, TP_PROTO(const char *name, unsigned int state), TP_ARGS(name, state), TP_STRUCT__entry( __string( name, name ) __field( u64, state ) ), TP_fast_assign( __assign_str(name); __entry->state = state; ), TP_printk("%s state=0x%lx", __get_str(name), (unsigned long)__entry->state) ); DEFINE_EVENT(wakeup_source, wakeup_source_activate, TP_PROTO(const char *name, unsigned int state), TP_ARGS(name, state) ); DEFINE_EVENT(wakeup_source, wakeup_source_deactivate, TP_PROTO(const char *name, unsigned int state), TP_ARGS(name, state) ); /* * The power domain events are used for power domains transitions */ DECLARE_EVENT_CLASS(power_domain, TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id), TP_ARGS(name, state, cpu_id), TP_STRUCT__entry( __string( name, name ) __field( u64, state ) __field( u64, cpu_id ) ), TP_fast_assign( __assign_str(name); __entry->state = state; __entry->cpu_id = cpu_id; ), TP_printk("%s state=%lu cpu_id=%lu", __get_str(name), (unsigned long)__entry->state, (unsigned long)__entry->cpu_id) ); DEFINE_EVENT(power_domain, power_domain_target, TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id), TP_ARGS(name, state, cpu_id) ); /* * CPU latency QoS events used for global CPU latency QoS list updates */ DECLARE_EVENT_CLASS(cpu_latency_qos_request, TP_PROTO(s32 value), TP_ARGS(value), TP_STRUCT__entry( __field( s32, value ) ), TP_fast_assign( __entry->value = value; ), TP_printk("CPU_DMA_LATENCY value=%d", __entry->value) ); DEFINE_EVENT(cpu_latency_qos_request, pm_qos_add_request, TP_PROTO(s32 value), TP_ARGS(value) ); DEFINE_EVENT(cpu_latency_qos_request, pm_qos_update_request, TP_PROTO(s32 value), TP_ARGS(value) ); DEFINE_EVENT(cpu_latency_qos_request, pm_qos_remove_request, TP_PROTO(s32 value), TP_ARGS(value) ); /* * General PM QoS events used for updates of PM QoS request lists */ DECLARE_EVENT_CLASS(pm_qos_update, TP_PROTO(enum pm_qos_req_action action, int prev_value, int curr_value), TP_ARGS(action, prev_value, curr_value), TP_STRUCT__entry( __field( enum pm_qos_req_action, action ) __field( int, prev_value ) __field( int, curr_value ) ), TP_fast_assign( __entry->action = action; __entry->prev_value = prev_value; __entry->curr_value = curr_value; ), TP_printk("action=%s prev_value=%d curr_value=%d", __print_symbolic(__entry->action, { PM_QOS_ADD_REQ, "ADD_REQ" }, { PM_QOS_UPDATE_REQ, "UPDATE_REQ" }, { PM_QOS_REMOVE_REQ, "REMOVE_REQ" }), __entry->prev_value, __entry->curr_value) ); DEFINE_EVENT(pm_qos_update, pm_qos_update_target, TP_PROTO(enum pm_qos_req_action action, int prev_value, int curr_value), TP_ARGS(action, prev_value, curr_value) ); DEFINE_EVENT_PRINT(pm_qos_update, pm_qos_update_flags, TP_PROTO(enum pm_qos_req_action action, int prev_value, int curr_value), TP_ARGS(action, prev_value, curr_value), TP_printk("action=%s prev_value=0x%x curr_value=0x%x", __print_symbolic(__entry->action, { PM_QOS_ADD_REQ, "ADD_REQ" }, { PM_QOS_UPDATE_REQ, "UPDATE_REQ" }, { PM_QOS_REMOVE_REQ, "REMOVE_REQ" }), __entry->prev_value, __entry->curr_value) ); DECLARE_EVENT_CLASS(dev_pm_qos_request, TP_PROTO(const char *name, enum dev_pm_qos_req_type type, s32 new_value), TP_ARGS(name, type, new_value), TP_STRUCT__entry( __string( name, name ) __field( enum dev_pm_qos_req_type, type ) __field( s32, new_value ) ), TP_fast_assign( __assign_str(name); __entry->type = type; __entry->new_value = new_value; ), TP_printk("device=%s type=%s new_value=%d", __get_str(name), __print_symbolic(__entry->type, { DEV_PM_QOS_RESUME_LATENCY, "DEV_PM_QOS_RESUME_LATENCY" }, { DEV_PM_QOS_FLAGS, "DEV_PM_QOS_FLAGS" }), __entry->new_value) ); DEFINE_EVENT(dev_pm_qos_request, dev_pm_qos_add_request, TP_PROTO(const char *name, enum dev_pm_qos_req_type type, s32 new_value), TP_ARGS(name, type, new_value) ); DEFINE_EVENT(dev_pm_qos_request, dev_pm_qos_update_request, TP_PROTO(const char *name, enum dev_pm_qos_req_type type, s32 new_value), TP_ARGS(name, type, new_value) ); DEFINE_EVENT(dev_pm_qos_request, dev_pm_qos_remove_request, TP_PROTO(const char *name, enum dev_pm_qos_req_type type, s32 new_value), TP_ARGS(name, type, new_value) ); TRACE_EVENT(guest_halt_poll_ns, TP_PROTO(bool grow, unsigned int new, unsigned int old), TP_ARGS(grow, new, old), TP_STRUCT__entry( __field(bool, grow) __field(unsigned int, new) __field(unsigned int, old) ), TP_fast_assign( __entry->grow = grow; __entry->new = new; __entry->old = old; ), TP_printk("halt_poll_ns %u (%s %u)", __entry->new, __entry->grow ? "grow" : "shrink", __entry->old) ); #define trace_guest_halt_poll_ns_grow(new, old) \ trace_guest_halt_poll_ns(true, new, old) #define trace_guest_halt_poll_ns_shrink(new, old) \ trace_guest_halt_poll_ns(false, new, old) #endif /* _TRACE_POWER_H */ /* This part must be outside protection */ #include <trace/define_trace.h>
4 4 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* I2C message transfer tracepoints * * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #undef TRACE_SYSTEM #define TRACE_SYSTEM i2c #if !defined(_TRACE_I2C_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_I2C_H #include <linux/i2c.h> #include <linux/tracepoint.h> /* * drivers/i2c/i2c-core-base.c */ extern int i2c_transfer_trace_reg(void); extern void i2c_transfer_trace_unreg(void); /* * __i2c_transfer() write request */ TRACE_EVENT_FN(i2c_write, TP_PROTO(const struct i2c_adapter *adap, const struct i2c_msg *msg, int num), TP_ARGS(adap, msg, num), TP_STRUCT__entry( __field(int, adapter_nr ) __field(__u16, msg_nr ) __field(__u16, addr ) __field(__u16, flags ) __field(__u16, len ) __dynamic_array(__u8, buf, msg->len) ), TP_fast_assign( __entry->adapter_nr = adap->nr; __entry->msg_nr = num; __entry->addr = msg->addr; __entry->flags = msg->flags; __entry->len = msg->len; memcpy(__get_dynamic_array(buf), msg->buf, msg->len); ), TP_printk("i2c-%d #%u a=%03x f=%04x l=%u [%*phD]", __entry->adapter_nr, __entry->msg_nr, __entry->addr, __entry->flags, __entry->len, __entry->len, __get_dynamic_array(buf) ), i2c_transfer_trace_reg, i2c_transfer_trace_unreg); /* * __i2c_transfer() read request */ TRACE_EVENT_FN(i2c_read, TP_PROTO(const struct i2c_adapter *adap, const struct i2c_msg *msg, int num), TP_ARGS(adap, msg, num), TP_STRUCT__entry( __field(int, adapter_nr ) __field(__u16, msg_nr ) __field(__u16, addr ) __field(__u16, flags ) __field(__u16, len ) ), TP_fast_assign( __entry->adapter_nr = adap->nr; __entry->msg_nr = num; __entry->addr = msg->addr; __entry->flags = msg->flags; __entry->len = msg->len; ), TP_printk("i2c-%d #%u a=%03x f=%04x l=%u", __entry->adapter_nr, __entry->msg_nr, __entry->addr, __entry->flags, __entry->len ), i2c_transfer_trace_reg, i2c_transfer_trace_unreg); /* * __i2c_transfer() read reply */ TRACE_EVENT_FN(i2c_reply, TP_PROTO(const struct i2c_adapter *adap, const struct i2c_msg *msg, int num), TP_ARGS(adap, msg, num), TP_STRUCT__entry( __field(int, adapter_nr ) __field(__u16, msg_nr ) __field(__u16, addr ) __field(__u16, flags ) __field(__u16, len ) __dynamic_array(__u8, buf, msg->len) ), TP_fast_assign( __entry->adapter_nr = adap->nr; __entry->msg_nr = num; __entry->addr = msg->addr; __entry->flags = msg->flags; __entry->len = msg->len; memcpy(__get_dynamic_array(buf), msg->buf, msg->len); ), TP_printk("i2c-%d #%u a=%03x f=%04x l=%u [%*phD]", __entry->adapter_nr, __entry->msg_nr, __entry->addr, __entry->flags, __entry->len, __entry->len, __get_dynamic_array(buf) ), i2c_transfer_trace_reg, i2c_transfer_trace_unreg); /* * __i2c_transfer() result */ TRACE_EVENT_FN(i2c_result, TP_PROTO(const struct i2c_adapter *adap, int num, int ret), TP_ARGS(adap, num, ret), TP_STRUCT__entry( __field(int, adapter_nr ) __field(__u16, nr_msgs ) __field(__s16, ret ) ), TP_fast_assign( __entry->adapter_nr = adap->nr; __entry->nr_msgs = num; __entry->ret = ret; ), TP_printk("i2c-%d n=%u ret=%d", __entry->adapter_nr, __entry->nr_msgs, __entry->ret ), i2c_transfer_trace_reg, i2c_transfer_trace_unreg); #endif /* _TRACE_I2C_H */ /* This part must be outside protection */ #include <trace/define_trace.h>
763 347 761 763 763 762 12919 3198 12912 10647 12911 5884 3988 5908 5880 5876 5873 11424 11419 10386 11415 2475 2472 2477 2488 2482 911 914 1948 2469 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 // SPDX-License-Identifier: GPL-2.0-or-later #define pr_fmt(fmt) "ref_tracker: " fmt #include <linux/export.h> #include <linux/list_sort.h> #include <linux/ref_tracker.h> #include <linux/slab.h> #include <linux/stacktrace.h> #include <linux/stackdepot.h> #define REF_TRACKER_STACK_ENTRIES 16 #define STACK_BUF_SIZE 1024 struct ref_tracker { struct list_head head; /* anchor into dir->list or dir->quarantine */ bool dead; depot_stack_handle_t alloc_stack_handle; depot_stack_handle_t free_stack_handle; }; struct ref_tracker_dir_stats { int total; int count; struct { depot_stack_handle_t stack_handle; unsigned int count; } stacks[]; }; static struct ref_tracker_dir_stats * ref_tracker_get_stats(struct ref_tracker_dir *dir, unsigned int limit) { struct ref_tracker_dir_stats *stats; struct ref_tracker *tracker; stats = kmalloc(struct_size(stats, stacks, limit), GFP_NOWAIT | __GFP_NOWARN); if (!stats) return ERR_PTR(-ENOMEM); stats->total = 0; stats->count = 0; list_for_each_entry(tracker, &dir->list, head) { depot_stack_handle_t stack = tracker->alloc_stack_handle; int i; ++stats->total; for (i = 0; i < stats->count; ++i) if (stats->stacks[i].stack_handle == stack) break; if (i >= limit) continue; if (i >= stats->count) { stats->stacks[i].stack_handle = stack; stats->stacks[i].count = 0; ++stats->count; } ++stats->stacks[i].count; } return stats; } struct ostream { char *buf; int size, used; }; #define pr_ostream(stream, fmt, args...) \ ({ \ struct ostream *_s = (stream); \ \ if (!_s->buf) { \ pr_err(fmt, ##args); \ } else { \ int ret, len = _s->size - _s->used; \ ret = snprintf(_s->buf + _s->used, len, pr_fmt(fmt), ##args); \ _s->used += min(ret, len); \ } \ }) static void __ref_tracker_dir_pr_ostream(struct ref_tracker_dir *dir, unsigned int display_limit, struct ostream *s) { struct ref_tracker_dir_stats *stats; unsigned int i = 0, skipped; depot_stack_handle_t stack; char *sbuf; lockdep_assert_held(&dir->lock); if (list_empty(&dir->list)) return; stats = ref_tracker_get_stats(dir, display_limit); if (IS_ERR(stats)) { pr_ostream(s, "%s@%pK: couldn't get stats, error %pe\n", dir->name, dir, stats); return; } sbuf = kmalloc(STACK_BUF_SIZE, GFP_NOWAIT | __GFP_NOWARN); for (i = 0, skipped = stats->total; i < stats->count; ++i) { stack = stats->stacks[i].stack_handle; if (sbuf && !stack_depot_snprint(stack, sbuf, STACK_BUF_SIZE, 4)) sbuf[0] = 0; pr_ostream(s, "%s@%pK has %d/%d users at\n%s\n", dir->name, dir, stats->stacks[i].count, stats->total, sbuf); skipped -= stats->stacks[i].count; } if (skipped) pr_ostream(s, "%s@%pK skipped reports about %d/%d users.\n", dir->name, dir, skipped, stats->total); kfree(sbuf); kfree(stats); } void ref_tracker_dir_print_locked(struct ref_tracker_dir *dir, unsigned int display_limit) { struct ostream os = {}; __ref_tracker_dir_pr_ostream(dir, display_limit, &os); } EXPORT_SYMBOL(ref_tracker_dir_print_locked); void ref_tracker_dir_print(struct ref_tracker_dir *dir, unsigned int display_limit) { unsigned long flags; spin_lock_irqsave(&dir->lock, flags); ref_tracker_dir_print_locked(dir, display_limit); spin_unlock_irqrestore(&dir->lock, flags); } EXPORT_SYMBOL(ref_tracker_dir_print); int ref_tracker_dir_snprint(struct ref_tracker_dir *dir, char *buf, size_t size) { struct ostream os = { .buf = buf, .size = size }; unsigned long flags; spin_lock_irqsave(&dir->lock, flags); __ref_tracker_dir_pr_ostream(dir, 16, &os); spin_unlock_irqrestore(&dir->lock, flags); return os.used; } EXPORT_SYMBOL(ref_tracker_dir_snprint); void ref_tracker_dir_exit(struct ref_tracker_dir *dir) { struct ref_tracker *tracker, *n; unsigned long flags; bool leak = false; dir->dead = true; spin_lock_irqsave(&dir->lock, flags); list_for_each_entry_safe(tracker, n, &dir->quarantine, head) { list_del(&tracker->head); kfree(tracker); dir->quarantine_avail++; } if (!list_empty(&dir->list)) { ref_tracker_dir_print_locked(dir, 16); leak = true; list_for_each_entry_safe(tracker, n, &dir->list, head) { list_del(&tracker->head); kfree(tracker); } } spin_unlock_irqrestore(&dir->lock, flags); WARN_ON_ONCE(leak); WARN_ON_ONCE(refcount_read(&dir->untracked) != 1); WARN_ON_ONCE(refcount_read(&dir->no_tracker) != 1); } EXPORT_SYMBOL(ref_tracker_dir_exit); int ref_tracker_alloc(struct ref_tracker_dir *dir, struct ref_tracker **trackerp, gfp_t gfp) { unsigned long entries[REF_TRACKER_STACK_ENTRIES]; struct ref_tracker *tracker; unsigned int nr_entries; gfp_t gfp_mask = gfp | __GFP_NOWARN; unsigned long flags; WARN_ON_ONCE(dir->dead); if (!trackerp) { refcount_inc(&dir->no_tracker); return 0; } if (gfp & __GFP_DIRECT_RECLAIM) gfp_mask |= __GFP_NOFAIL; *trackerp = tracker = kzalloc(sizeof(*tracker), gfp_mask); if (unlikely(!tracker)) { pr_err_once("memory allocation failure, unreliable refcount tracker.\n"); refcount_inc(&dir->untracked); return -ENOMEM; } nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 1); tracker->alloc_stack_handle = stack_depot_save(entries, nr_entries, gfp); spin_lock_irqsave(&dir->lock, flags); list_add(&tracker->head, &dir->list); spin_unlock_irqrestore(&dir->lock, flags); return 0; } EXPORT_SYMBOL_GPL(ref_tracker_alloc); int ref_tracker_free(struct ref_tracker_dir *dir, struct ref_tracker **trackerp) { unsigned long entries[REF_TRACKER_STACK_ENTRIES]; depot_stack_handle_t stack_handle; struct ref_tracker *tracker; unsigned int nr_entries; unsigned long flags; WARN_ON_ONCE(dir->dead); if (!trackerp) { refcount_dec(&dir->no_tracker); return 0; } tracker = *trackerp; if (!tracker) { refcount_dec(&dir->untracked); return -EEXIST; } nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 1); stack_handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT | __GFP_NOWARN); spin_lock_irqsave(&dir->lock, flags); if (tracker->dead) { pr_err("reference already released.\n"); if (tracker->alloc_stack_handle) { pr_err("allocated in:\n"); stack_depot_print(tracker->alloc_stack_handle); } if (tracker->free_stack_handle) { pr_err("freed in:\n"); stack_depot_print(tracker->free_stack_handle); } spin_unlock_irqrestore(&dir->lock, flags); WARN_ON_ONCE(1); return -EINVAL; } tracker->dead = true; tracker->free_stack_handle = stack_handle; list_move_tail(&tracker->head, &dir->quarantine); if (!dir->quarantine_avail) { tracker = list_first_entry(&dir->quarantine, struct ref_tracker, head); list_del(&tracker->head); } else { dir->quarantine_avail--; tracker = NULL; } spin_unlock_irqrestore(&dir->lock, flags); kfree(tracker); return 0; } EXPORT_SYMBOL_GPL(ref_tracker_free);
9 2 2 2 2 2 2 2 2 2 2 2 2 3 3 3 3 3 1 2 3 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 5 12 10 2 4 4 2 2 2 2 2 16 16 16 16 16 16 16 16 16 11 11 11 11 11 11 11 11 11 12 12 12 1 1 9 9 9 9 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 // SPDX-License-Identifier: GPL-2.0-only /* * net/sched/sch_qfq.c Quick Fair Queueing Plus Scheduler. * * Copyright (c) 2009 Fabio Checconi, Luigi Rizzo, and Paolo Valente. * Copyright (c) 2012 Paolo Valente. */ #include <linux/module.h> #include <linux/init.h> #include <linux/bitops.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/pkt_sched.h> #include <net/sch_generic.h> #include <net/pkt_sched.h> #include <net/pkt_cls.h> /* Quick Fair Queueing Plus ======================== Sources: [1] Paolo Valente, "Reducing the Execution Time of Fair-Queueing Schedulers." http://algo.ing.unimo.it/people/paolo/agg-sched/agg-sched.pdf Sources for QFQ: [2] Fabio Checconi, Luigi Rizzo, and Paolo Valente: "QFQ: Efficient Packet Scheduling with Tight Bandwidth Distribution Guarantees." See also: http://retis.sssup.it/~fabio/linux/qfq/ */ /* QFQ+ divides classes into aggregates of at most MAX_AGG_CLASSES classes. Each aggregate is timestamped with a virtual start time S and a virtual finish time F, and scheduled according to its timestamps. S and F are computed as a function of a system virtual time function V. The classes within each aggregate are instead scheduled with DRR. To speed up operations, QFQ+ divides also aggregates into a limited number of groups. Which group a class belongs to depends on the ratio between the maximum packet length for the class and the weight of the class. Groups have their own S and F. In the end, QFQ+ schedules groups, then aggregates within groups, then classes within aggregates. See [1] and [2] for a full description. Virtual time computations. S, F and V are all computed in fixed point arithmetic with FRAC_BITS decimal bits. QFQ_MAX_INDEX is the maximum index allowed for a group. We need one bit per index. QFQ_MAX_WSHIFT is the maximum power of two supported as a weight. The layout of the bits is as below: [ MTU_SHIFT ][ FRAC_BITS ] [ MAX_INDEX ][ MIN_SLOT_SHIFT ] ^.__grp->index = 0 *.__grp->slot_shift where MIN_SLOT_SHIFT is derived by difference from the others. The max group index corresponds to Lmax/w_min, where Lmax=1<<MTU_SHIFT, w_min = 1 . From this, and knowing how many groups (MAX_INDEX) we want, we can derive the shift corresponding to each group. Because we often need to compute F = S + len/w_i and V = V + len/wsum instead of storing w_i store the value inv_w = (1<<FRAC_BITS)/w_i so we can do F = S + len * inv_w * wsum. We use W_TOT in the formulas so we can easily move between static and adaptive weight sum. The per-scheduler-instance data contain all the data structures for the scheduler: bitmaps and bucket lists. */ /* * Maximum number of consecutive slots occupied by backlogged classes * inside a group. */ #define QFQ_MAX_SLOTS 32 /* * Shifts used for aggregate<->group mapping. We allow class weights that are * in the range [1, 2^MAX_WSHIFT], and we try to map each aggregate i to the * group with the smallest index that can support the L_i / r_i configured * for the classes in the aggregate. * * grp->index is the index of the group; and grp->slot_shift * is the shift for the corresponding (scaled) sigma_i. */ #define QFQ_MAX_INDEX 24 #define QFQ_MAX_WSHIFT 10 #define QFQ_MAX_WEIGHT (1<<QFQ_MAX_WSHIFT) /* see qfq_slot_insert */ #define QFQ_MAX_WSUM (64*QFQ_MAX_WEIGHT) #define FRAC_BITS 30 /* fixed point arithmetic */ #define ONE_FP (1UL << FRAC_BITS) #define QFQ_MTU_SHIFT 16 /* to support TSO/GSO */ #define QFQ_MIN_LMAX 512 /* see qfq_slot_insert */ #define QFQ_MAX_LMAX (1UL << QFQ_MTU_SHIFT) #define QFQ_MAX_AGG_CLASSES 8 /* max num classes per aggregate allowed */ /* * Possible group states. These values are used as indexes for the bitmaps * array of struct qfq_queue. */ enum qfq_state { ER, IR, EB, IB, QFQ_MAX_STATE }; struct qfq_group; struct qfq_aggregate; struct qfq_class { struct Qdisc_class_common common; struct gnet_stats_basic_sync bstats; struct gnet_stats_queue qstats; struct net_rate_estimator __rcu *rate_est; struct Qdisc *qdisc; struct list_head alist; /* Link for active-classes list. */ struct qfq_aggregate *agg; /* Parent aggregate. */ int deficit; /* DRR deficit counter. */ }; struct qfq_aggregate { struct hlist_node next; /* Link for the slot list. */ u64 S, F; /* flow timestamps (exact) */ /* group we belong to. In principle we would need the index, * which is log_2(lmax/weight), but we never reference it * directly, only the group. */ struct qfq_group *grp; /* these are copied from the flowset. */ u32 class_weight; /* Weight of each class in this aggregate. */ /* Max pkt size for the classes in this aggregate, DRR quantum. */ int lmax; u32 inv_w; /* ONE_FP/(sum of weights of classes in aggr.). */ u32 budgetmax; /* Max budget for this aggregate. */ u32 initial_budget, budget; /* Initial and current budget. */ int num_classes; /* Number of classes in this aggr. */ struct list_head active; /* DRR queue of active classes. */ struct hlist_node nonfull_next; /* See nonfull_aggs in qfq_sched. */ }; struct qfq_group { u64 S, F; /* group timestamps (approx). */ unsigned int slot_shift; /* Slot shift. */ unsigned int index; /* Group index. */ unsigned int front; /* Index of the front slot. */ unsigned long full_slots; /* non-empty slots */ /* Array of RR lists of active aggregates. */ struct hlist_head slots[QFQ_MAX_SLOTS]; }; struct qfq_sched { struct tcf_proto __rcu *filter_list; struct tcf_block *block; struct Qdisc_class_hash clhash; u64 oldV, V; /* Precise virtual times. */ struct qfq_aggregate *in_serv_agg; /* Aggregate being served. */ u32 wsum; /* weight sum */ u32 iwsum; /* inverse weight sum */ unsigned long bitmaps[QFQ_MAX_STATE]; /* Group bitmaps. */ struct qfq_group groups[QFQ_MAX_INDEX + 1]; /* The groups. */ u32 min_slot_shift; /* Index of the group-0 bit in the bitmaps. */ u32 max_agg_classes; /* Max number of classes per aggr. */ struct hlist_head nonfull_aggs; /* Aggs with room for more classes. */ }; /* * Possible reasons why the timestamps of an aggregate are updated * enqueue: the aggregate switches from idle to active and must scheduled * for service * requeue: the aggregate finishes its budget, so it stops being served and * must be rescheduled for service */ enum update_reason {enqueue, requeue}; static bool cl_is_active(struct qfq_class *cl) { return !list_empty(&cl->alist); } static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid) { struct qfq_sched *q = qdisc_priv(sch); struct Qdisc_class_common *clc; clc = qdisc_class_find(&q->clhash, classid); if (clc == NULL) return NULL; return container_of(clc, struct qfq_class, common); } static const struct netlink_range_validation lmax_range = { .min = QFQ_MIN_LMAX, .max = QFQ_MAX_LMAX, }; static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = { [TCA_QFQ_WEIGHT] = NLA_POLICY_RANGE(NLA_U32, 1, QFQ_MAX_WEIGHT), [TCA_QFQ_LMAX] = NLA_POLICY_FULL_RANGE(NLA_U32, &lmax_range), }; /* * Calculate a flow index, given its weight and maximum packet length. * index = log_2(maxlen/weight) but we need to apply the scaling. * This is used only once at flow creation. */ static int qfq_calc_index(u32 inv_w, unsigned int maxlen, u32 min_slot_shift) { u64 slot_size = (u64)maxlen * inv_w; unsigned long size_map; int index = 0; size_map = slot_size >> min_slot_shift; if (!size_map) goto out; index = __fls(size_map) + 1; /* basically a log_2 */ index -= !(slot_size - (1ULL << (index + min_slot_shift - 1))); if (index < 0) index = 0; out: pr_debug("qfq calc_index: W = %lu, L = %u, I = %d\n", (unsigned long) ONE_FP/inv_w, maxlen, index); return index; } static void qfq_deactivate_agg(struct qfq_sched *, struct qfq_aggregate *); static void qfq_activate_agg(struct qfq_sched *, struct qfq_aggregate *, enum update_reason); static void qfq_init_agg(struct qfq_sched *q, struct qfq_aggregate *agg, u32 lmax, u32 weight) { INIT_LIST_HEAD(&agg->active); hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs); agg->lmax = lmax; agg->class_weight = weight; } static struct qfq_aggregate *qfq_find_agg(struct qfq_sched *q, u32 lmax, u32 weight) { struct qfq_aggregate *agg; hlist_for_each_entry(agg, &q->nonfull_aggs, nonfull_next) if (agg->lmax == lmax && agg->class_weight == weight) return agg; return NULL; } /* Update aggregate as a function of the new number of classes. */ static void qfq_update_agg(struct qfq_sched *q, struct qfq_aggregate *agg, int new_num_classes) { u32 new_agg_weight; if (new_num_classes == q->max_agg_classes) hlist_del_init(&agg->nonfull_next); if (agg->num_classes > new_num_classes && new_num_classes == q->max_agg_classes - 1) /* agg no more full */ hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs); /* The next assignment may let * agg->initial_budget > agg->budgetmax * hold, we will take it into account in charge_actual_service(). */ agg->budgetmax = new_num_classes * agg->lmax; new_agg_weight = agg->class_weight * new_num_classes; agg->inv_w = ONE_FP/new_agg_weight; if (agg->grp == NULL) { int i = qfq_calc_index(agg->inv_w, agg->budgetmax, q->min_slot_shift); agg->grp = &q->groups[i]; } q->wsum += (int) agg->class_weight * (new_num_classes - agg->num_classes); q->iwsum = ONE_FP / q->wsum; agg->num_classes = new_num_classes; } /* Add class to aggregate. */ static void qfq_add_to_agg(struct qfq_sched *q, struct qfq_aggregate *agg, struct qfq_class *cl) { cl->agg = agg; qfq_update_agg(q, agg, agg->num_classes+1); if (cl->qdisc->q.qlen > 0) { /* adding an active class */ list_add_tail(&cl->alist, &agg->active); if (list_first_entry(&agg->active, struct qfq_class, alist) == cl && q->in_serv_agg != agg) /* agg was inactive */ qfq_activate_agg(q, agg, enqueue); /* schedule agg */ } } static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *); static void qfq_destroy_agg(struct qfq_sched *q, struct qfq_aggregate *agg) { hlist_del_init(&agg->nonfull_next); q->wsum -= agg->class_weight; if (q->wsum != 0) q->iwsum = ONE_FP / q->wsum; if (q->in_serv_agg == agg) q->in_serv_agg = qfq_choose_next_agg(q); kfree(agg); } /* Deschedule class from within its parent aggregate. */ static void qfq_deactivate_class(struct qfq_sched *q, struct qfq_class *cl) { struct qfq_aggregate *agg = cl->agg; list_del_init(&cl->alist); /* remove from RR queue of the aggregate */ if (list_empty(&agg->active)) /* agg is now inactive */ qfq_deactivate_agg(q, agg); } /* Remove class from its parent aggregate. */ static void qfq_rm_from_agg(struct qfq_sched *q, struct qfq_class *cl) { struct qfq_aggregate *agg = cl->agg; cl->agg = NULL; if (agg->num_classes == 1) { /* agg being emptied, destroy it */ qfq_destroy_agg(q, agg); return; } qfq_update_agg(q, agg, agg->num_classes-1); } /* Deschedule class and remove it from its parent aggregate. */ static void qfq_deact_rm_from_agg(struct qfq_sched *q, struct qfq_class *cl) { if (cl->qdisc->q.qlen > 0) /* class is active */ qfq_deactivate_class(q, cl); qfq_rm_from_agg(q, cl); } /* Move class to a new aggregate, matching the new class weight and/or lmax */ static int qfq_change_agg(struct Qdisc *sch, struct qfq_class *cl, u32 weight, u32 lmax) { struct qfq_sched *q = qdisc_priv(sch); struct qfq_aggregate *new_agg; /* 'lmax' can range from [QFQ_MIN_LMAX, pktlen + stab overhead] */ if (lmax > QFQ_MAX_LMAX) return -EINVAL; new_agg = qfq_find_agg(q, lmax, weight); if (new_agg == NULL) { /* create new aggregate */ new_agg = kzalloc(sizeof(*new_agg), GFP_ATOMIC); if (new_agg == NULL) return -ENOBUFS; qfq_init_agg(q, new_agg, lmax, weight); } qfq_deact_rm_from_agg(q, cl); qfq_add_to_agg(q, new_agg, cl); return 0; } static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **tca, unsigned long *arg, struct netlink_ext_ack *extack) { struct qfq_sched *q = qdisc_priv(sch); struct qfq_class *cl = (struct qfq_class *)*arg; bool existing = false; struct nlattr *tb[TCA_QFQ_MAX + 1]; struct qfq_aggregate *new_agg = NULL; u32 weight, lmax, inv_w; int err; int delta_w; if (NL_REQ_ATTR_CHECK(extack, NULL, tca, TCA_OPTIONS)) { NL_SET_ERR_MSG_MOD(extack, "missing options"); return -EINVAL; } err = nla_parse_nested_deprecated(tb, TCA_QFQ_MAX, tca[TCA_OPTIONS], qfq_policy, extack); if (err < 0) return err; weight = nla_get_u32_default(tb[TCA_QFQ_WEIGHT], 1); if (tb[TCA_QFQ_LMAX]) { lmax = nla_get_u32(tb[TCA_QFQ_LMAX]); } else { /* MTU size is user controlled */ lmax = psched_mtu(qdisc_dev(sch)); if (lmax < QFQ_MIN_LMAX || lmax > QFQ_MAX_LMAX) { NL_SET_ERR_MSG_MOD(extack, "MTU size out of bounds for qfq"); return -EINVAL; } } inv_w = ONE_FP / weight; weight = ONE_FP / inv_w; if (cl != NULL && lmax == cl->agg->lmax && weight == cl->agg->class_weight) return 0; /* nothing to change */ delta_w = weight - (cl ? cl->agg->class_weight : 0); if (q->wsum + delta_w > QFQ_MAX_WSUM) { NL_SET_ERR_MSG_FMT_MOD(extack, "total weight out of range (%d + %u)", delta_w, q->wsum); return -EINVAL; } if (cl != NULL) { /* modify existing class */ if (tca[TCA_RATE]) { err = gen_replace_estimator(&cl->bstats, NULL, &cl->rate_est, NULL, true, tca[TCA_RATE]); if (err) return err; } existing = true; goto set_change_agg; } /* create and init new class */ cl = kzalloc(sizeof(struct qfq_class), GFP_KERNEL); if (cl == NULL) return -ENOBUFS; gnet_stats_basic_sync_init(&cl->bstats); cl->common.classid = classid; cl->deficit = lmax; INIT_LIST_HEAD(&cl->alist); cl->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid, NULL); if (cl->qdisc == NULL) cl->qdisc = &noop_qdisc; if (tca[TCA_RATE]) { err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est, NULL, true, tca[TCA_RATE]); if (err) goto destroy_class; } if (cl->qdisc != &noop_qdisc) qdisc_hash_add(cl->qdisc, true); set_change_agg: sch_tree_lock(sch); new_agg = qfq_find_agg(q, lmax, weight); if (new_agg == NULL) { /* create new aggregate */ sch_tree_unlock(sch); new_agg = kzalloc(sizeof(*new_agg), GFP_KERNEL); if (new_agg == NULL) { err = -ENOBUFS; gen_kill_estimator(&cl->rate_est); goto destroy_class; } sch_tree_lock(sch); qfq_init_agg(q, new_agg, lmax, weight); } if (existing) qfq_deact_rm_from_agg(q, cl); else qdisc_class_hash_insert(&q->clhash, &cl->common); qfq_add_to_agg(q, new_agg, cl); sch_tree_unlock(sch); qdisc_class_hash_grow(sch, &q->clhash); *arg = (unsigned long)cl; return 0; destroy_class: qdisc_put(cl->qdisc); kfree(cl); return err; } static void qfq_destroy_class(struct Qdisc *sch, struct qfq_class *cl) { struct qfq_sched *q = qdisc_priv(sch); qfq_rm_from_agg(q, cl); gen_kill_estimator(&cl->rate_est); qdisc_put(cl->qdisc); kfree(cl); } static int qfq_delete_class(struct Qdisc *sch, unsigned long arg, struct netlink_ext_ack *extack) { struct qfq_sched *q = qdisc_priv(sch); struct qfq_class *cl = (struct qfq_class *)arg; if (qdisc_class_in_use(&cl->common)) { NL_SET_ERR_MSG_MOD(extack, "QFQ class in use"); return -EBUSY; } sch_tree_lock(sch); qdisc_purge_queue(cl->qdisc); qdisc_class_hash_remove(&q->clhash, &cl->common); sch_tree_unlock(sch); qfq_destroy_class(sch, cl); return 0; } static unsigned long qfq_search_class(struct Qdisc *sch, u32 classid) { return (unsigned long)qfq_find_class(sch, classid); } static struct tcf_block *qfq_tcf_block(struct Qdisc *sch, unsigned long cl, struct netlink_ext_ack *extack) { struct qfq_sched *q = qdisc_priv(sch); if (cl) return NULL; return q->block; } static unsigned long qfq_bind_tcf(struct Qdisc *sch, unsigned long parent, u32 classid) { struct qfq_class *cl = qfq_find_class(sch, classid); if (cl) qdisc_class_get(&cl->common); return (unsigned long)cl; } static void qfq_unbind_tcf(struct Qdisc *sch, unsigned long arg) { struct qfq_class *cl = (struct qfq_class *)arg; qdisc_class_put(&cl->common); } static int qfq_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, struct Qdisc **old, struct netlink_ext_ack *extack) { struct qfq_class *cl = (struct qfq_class *)arg; if (new == NULL) { new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, cl->common.classid, NULL); if (new == NULL) new = &noop_qdisc; } *old = qdisc_replace(sch, new, &cl->qdisc); return 0; } static struct Qdisc *qfq_class_leaf(struct Qdisc *sch, unsigned long arg) { struct qfq_class *cl = (struct qfq_class *)arg; return cl->qdisc; } static int qfq_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb, struct tcmsg *tcm) { struct qfq_class *cl = (struct qfq_class *)arg; struct nlattr *nest; tcm->tcm_parent = TC_H_ROOT; tcm->tcm_handle = cl->common.classid; tcm->tcm_info = cl->qdisc->handle; nest = nla_nest_start_noflag(skb, TCA_OPTIONS); if (nest == NULL) goto nla_put_failure; if (nla_put_u32(skb, TCA_QFQ_WEIGHT, cl->agg->class_weight) || nla_put_u32(skb, TCA_QFQ_LMAX, cl->agg->lmax)) goto nla_put_failure; return nla_nest_end(skb, nest); nla_put_failure: nla_nest_cancel(skb, nest); return -EMSGSIZE; } static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d) { struct qfq_class *cl = (struct qfq_class *)arg; struct tc_qfq_stats xstats; memset(&xstats, 0, sizeof(xstats)); xstats.weight = cl->agg->class_weight; xstats.lmax = cl->agg->lmax; if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 || gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || qdisc_qstats_copy(d, cl->qdisc) < 0) return -1; return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); } static void qfq_walk(struct Qdisc *sch, struct qdisc_walker *arg) { struct qfq_sched *q = qdisc_priv(sch); struct qfq_class *cl; unsigned int i; if (arg->stop) return; for (i = 0; i < q->clhash.hashsize; i++) { hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { if (!tc_qdisc_stats_dump(sch, (unsigned long)cl, arg)) return; } } } static struct qfq_class *qfq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) { struct qfq_sched *q = qdisc_priv(sch); struct qfq_class *cl; struct tcf_result res; struct tcf_proto *fl; int result; if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) { pr_debug("qfq_classify: found %d\n", skb->priority); cl = qfq_find_class(sch, skb->priority); if (cl != NULL) return cl; } *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; fl = rcu_dereference_bh(q->filter_list); result = tcf_classify(skb, NULL, fl, &res, false); if (result >= 0) { #ifdef CONFIG_NET_CLS_ACT switch (result) { case TC_ACT_QUEUED: case TC_ACT_STOLEN: case TC_ACT_TRAP: *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; fallthrough; case TC_ACT_SHOT: return NULL; } #endif cl = (struct qfq_class *)res.class; if (cl == NULL) cl = qfq_find_class(sch, res.classid); return cl; } return NULL; } /* Generic comparison function, handling wraparound. */ static inline int qfq_gt(u64 a, u64 b) { return (s64)(a - b) > 0; } /* Round a precise timestamp to its slotted value. */ static inline u64 qfq_round_down(u64 ts, unsigned int shift) { return ts & ~((1ULL << shift) - 1); } /* return the pointer to the group with lowest index in the bitmap */ static inline struct qfq_group *qfq_ffs(struct qfq_sched *q, unsigned long bitmap) { int index = __ffs(bitmap); return &q->groups[index]; } /* Calculate a mask to mimic what would be ffs_from(). */ static inline unsigned long mask_from(unsigned long bitmap, int from) { return bitmap & ~((1UL << from) - 1); } /* * The state computation relies on ER=0, IR=1, EB=2, IB=3 * First compute eligibility comparing grp->S, q->V, * then check if someone is blocking us and possibly add EB */ static int qfq_calc_state(struct qfq_sched *q, const struct qfq_group *grp) { /* if S > V we are not eligible */ unsigned int state = qfq_gt(grp->S, q->V); unsigned long mask = mask_from(q->bitmaps[ER], grp->index); struct qfq_group *next; if (mask) { next = qfq_ffs(q, mask); if (qfq_gt(grp->F, next->F)) state |= EB; } return state; } /* * In principle * q->bitmaps[dst] |= q->bitmaps[src] & mask; * q->bitmaps[src] &= ~mask; * but we should make sure that src != dst */ static inline void qfq_move_groups(struct qfq_sched *q, unsigned long mask, int src, int dst) { q->bitmaps[dst] |= q->bitmaps[src] & mask; q->bitmaps[src] &= ~mask; } static void qfq_unblock_groups(struct qfq_sched *q, int index, u64 old_F) { unsigned long mask = mask_from(q->bitmaps[ER], index + 1); struct qfq_group *next; if (mask) { next = qfq_ffs(q, mask); if (!qfq_gt(next->F, old_F)) return; } mask = (1UL << index) - 1; qfq_move_groups(q, mask, EB, ER); qfq_move_groups(q, mask, IB, IR); } /* * perhaps * old_V ^= q->V; old_V >>= q->min_slot_shift; if (old_V) { ... } * */ static void qfq_make_eligible(struct qfq_sched *q) { unsigned long vslot = q->V >> q->min_slot_shift; unsigned long old_vslot = q->oldV >> q->min_slot_shift; if (vslot != old_vslot) { unsigned long mask; int last_flip_pos = fls(vslot ^ old_vslot); if (last_flip_pos > 31) /* higher than the number of groups */ mask = ~0UL; /* make all groups eligible */ else mask = (1UL << last_flip_pos) - 1; qfq_move_groups(q, mask, IR, ER); qfq_move_groups(q, mask, IB, EB); } } /* * The index of the slot in which the input aggregate agg is to be * inserted must not be higher than QFQ_MAX_SLOTS-2. There is a '-2' * and not a '-1' because the start time of the group may be moved * backward by one slot after the aggregate has been inserted, and * this would cause non-empty slots to be right-shifted by one * position. * * QFQ+ fully satisfies this bound to the slot index if the parameters * of the classes are not changed dynamically, and if QFQ+ never * happens to postpone the service of agg unjustly, i.e., it never * happens that the aggregate becomes backlogged and eligible, or just * eligible, while an aggregate with a higher approximated finish time * is being served. In particular, in this case QFQ+ guarantees that * the timestamps of agg are low enough that the slot index is never * higher than 2. Unfortunately, QFQ+ cannot provide the same * guarantee if it happens to unjustly postpone the service of agg, or * if the parameters of some class are changed. * * As for the first event, i.e., an out-of-order service, the * upper bound to the slot index guaranteed by QFQ+ grows to * 2 + * QFQ_MAX_AGG_CLASSES * ((1<<QFQ_MTU_SHIFT)/QFQ_MIN_LMAX) * * (current_max_weight/current_wsum) <= 2 + 8 * 128 * 1. * * The following function deals with this problem by backward-shifting * the timestamps of agg, if needed, so as to guarantee that the slot * index is never higher than QFQ_MAX_SLOTS-2. This backward-shift may * cause the service of other aggregates to be postponed, yet the * worst-case guarantees of these aggregates are not violated. In * fact, in case of no out-of-order service, the timestamps of agg * would have been even lower than they are after the backward shift, * because QFQ+ would have guaranteed a maximum value equal to 2 for * the slot index, and 2 < QFQ_MAX_SLOTS-2. Hence the aggregates whose * service is postponed because of the backward-shift would have * however waited for the service of agg before being served. * * The other event that may cause the slot index to be higher than 2 * for agg is a recent change of the parameters of some class. If the * weight of a class is increased or the lmax (max_pkt_size) of the * class is decreased, then a new aggregate with smaller slot size * than the original parent aggregate of the class may happen to be * activated. The activation of this aggregate should be properly * delayed to when the service of the class has finished in the ideal * system tracked by QFQ+. If the activation of the aggregate is not * delayed to this reference time instant, then this aggregate may be * unjustly served before other aggregates waiting for service. This * may cause the above bound to the slot index to be violated for some * of these unlucky aggregates. * * Instead of delaying the activation of the new aggregate, which is * quite complex, the above-discussed capping of the slot index is * used to handle also the consequences of a change of the parameters * of a class. */ static void qfq_slot_insert(struct qfq_group *grp, struct qfq_aggregate *agg, u64 roundedS) { u64 slot = (roundedS - grp->S) >> grp->slot_shift; unsigned int i; /* slot index in the bucket list */ if (unlikely(slot > QFQ_MAX_SLOTS - 2)) { u64 deltaS = roundedS - grp->S - ((u64)(QFQ_MAX_SLOTS - 2)<<grp->slot_shift); agg->S -= deltaS; agg->F -= deltaS; slot = QFQ_MAX_SLOTS - 2; } i = (grp->front + slot) % QFQ_MAX_SLOTS; hlist_add_head(&agg->next, &grp->slots[i]); __set_bit(slot, &grp->full_slots); } /* Maybe introduce hlist_first_entry?? */ static struct qfq_aggregate *qfq_slot_head(struct qfq_group *grp) { return hlist_entry(grp->slots[grp->front].first, struct qfq_aggregate, next); } /* * remove the entry from the slot */ static void qfq_front_slot_remove(struct qfq_group *grp) { struct qfq_aggregate *agg = qfq_slot_head(grp); BUG_ON(!agg); hlist_del(&agg->next); if (hlist_empty(&grp->slots[grp->front])) __clear_bit(0, &grp->full_slots); } /* * Returns the first aggregate in the first non-empty bucket of the * group. As a side effect, adjusts the bucket list so the first * non-empty bucket is at position 0 in full_slots. */ static struct qfq_aggregate *qfq_slot_scan(struct qfq_group *grp) { unsigned int i; pr_debug("qfq slot_scan: grp %u full %#lx\n", grp->index, grp->full_slots); if (grp->full_slots == 0) return NULL; i = __ffs(grp->full_slots); /* zero based */ if (i > 0) { grp->front = (grp->front + i) % QFQ_MAX_SLOTS; grp->full_slots >>= i; } return qfq_slot_head(grp); } /* * adjust the bucket list. When the start time of a group decreases, * we move the index down (modulo QFQ_MAX_SLOTS) so we don't need to * move the objects. The mask of occupied slots must be shifted * because we use ffs() to find the first non-empty slot. * This covers decreases in the group's start time, but what about * increases of the start time ? * Here too we should make sure that i is less than 32 */ static void qfq_slot_rotate(struct qfq_group *grp, u64 roundedS) { unsigned int i = (grp->S - roundedS) >> grp->slot_shift; grp->full_slots <<= i; grp->front = (grp->front - i) % QFQ_MAX_SLOTS; } static void qfq_update_eligible(struct qfq_sched *q) { struct qfq_group *grp; unsigned long ineligible; ineligible = q->bitmaps[IR] | q->bitmaps[IB]; if (ineligible) { if (!q->bitmaps[ER]) { grp = qfq_ffs(q, ineligible); if (qfq_gt(grp->S, q->V)) q->V = grp->S; } qfq_make_eligible(q); } } /* Dequeue head packet of the head class in the DRR queue of the aggregate. */ static struct sk_buff *agg_dequeue(struct qfq_aggregate *agg, struct qfq_class *cl, unsigned int len) { struct sk_buff *skb = qdisc_dequeue_peeked(cl->qdisc); if (!skb) return NULL; cl->deficit -= (int) len; if (cl->qdisc->q.qlen == 0) /* no more packets, remove from list */ list_del_init(&cl->alist); else if (cl->deficit < qdisc_pkt_len(cl->qdisc->ops->peek(cl->qdisc))) { cl->deficit += agg->lmax; list_move_tail(&cl->alist, &agg->active); } return skb; } static inline struct sk_buff *qfq_peek_skb(struct qfq_aggregate *agg, struct qfq_class **cl, unsigned int *len) { struct sk_buff *skb; *cl = list_first_entry(&agg->active, struct qfq_class, alist); skb = (*cl)->qdisc->ops->peek((*cl)->qdisc); if (skb == NULL) qdisc_warn_nonwc("qfq_dequeue", (*cl)->qdisc); else *len = qdisc_pkt_len(skb); return skb; } /* Update F according to the actual service received by the aggregate. */ static inline void charge_actual_service(struct qfq_aggregate *agg) { /* Compute the service received by the aggregate, taking into * account that, after decreasing the number of classes in * agg, it may happen that * agg->initial_budget - agg->budget > agg->bugdetmax */ u32 service_received = min(agg->budgetmax, agg->initial_budget - agg->budget); agg->F = agg->S + (u64)service_received * agg->inv_w; } /* Assign a reasonable start time for a new aggregate in group i. * Admissible values for \hat(F) are multiples of \sigma_i * no greater than V+\sigma_i . Larger values mean that * we had a wraparound so we consider the timestamp to be stale. * * If F is not stale and F >= V then we set S = F. * Otherwise we should assign S = V, but this may violate * the ordering in EB (see [2]). So, if we have groups in ER, * set S to the F_j of the first group j which would be blocking us. * We are guaranteed not to move S backward because * otherwise our group i would still be blocked. */ static void qfq_update_start(struct qfq_sched *q, struct qfq_aggregate *agg) { unsigned long mask; u64 limit, roundedF; int slot_shift = agg->grp->slot_shift; roundedF = qfq_round_down(agg->F, slot_shift); limit = qfq_round_down(q->V, slot_shift) + (1ULL << slot_shift); if (!qfq_gt(agg->F, q->V) || qfq_gt(roundedF, limit)) { /* timestamp was stale */ mask = mask_from(q->bitmaps[ER], agg->grp->index); if (mask) { struct qfq_group *next = qfq_ffs(q, mask); if (qfq_gt(roundedF, next->F)) { if (qfq_gt(limit, next->F)) agg->S = next->F; else /* preserve timestamp correctness */ agg->S = limit; return; } } agg->S = q->V; } else /* timestamp is not stale */ agg->S = agg->F; } /* Update the timestamps of agg before scheduling/rescheduling it for * service. In particular, assign to agg->F its maximum possible * value, i.e., the virtual finish time with which the aggregate * should be labeled if it used all its budget once in service. */ static inline void qfq_update_agg_ts(struct qfq_sched *q, struct qfq_aggregate *agg, enum update_reason reason) { if (reason != requeue) qfq_update_start(q, agg); else /* just charge agg for the service received */ agg->S = agg->F; agg->F = agg->S + (u64)agg->budgetmax * agg->inv_w; } static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg); static struct sk_buff *qfq_dequeue(struct Qdisc *sch) { struct qfq_sched *q = qdisc_priv(sch); struct qfq_aggregate *in_serv_agg = q->in_serv_agg; struct qfq_class *cl; struct sk_buff *skb = NULL; /* next-packet len, 0 means no more active classes in in-service agg */ unsigned int len = 0; if (in_serv_agg == NULL) return NULL; if (!list_empty(&in_serv_agg->active)) skb = qfq_peek_skb(in_serv_agg, &cl, &len); /* * If there are no active classes in the in-service aggregate, * or if the aggregate has not enough budget to serve its next * class, then choose the next aggregate to serve. */ if (len == 0 || in_serv_agg->budget < len) { charge_actual_service(in_serv_agg); /* recharge the budget of the aggregate */ in_serv_agg->initial_budget = in_serv_agg->budget = in_serv_agg->budgetmax; if (!list_empty(&in_serv_agg->active)) { /* * Still active: reschedule for * service. Possible optimization: if no other * aggregate is active, then there is no point * in rescheduling this aggregate, and we can * just keep it as the in-service one. This * should be however a corner case, and to * handle it, we would need to maintain an * extra num_active_aggs field. */ qfq_update_agg_ts(q, in_serv_agg, requeue); qfq_schedule_agg(q, in_serv_agg); } else if (sch->q.qlen == 0) { /* no aggregate to serve */ q->in_serv_agg = NULL; return NULL; } /* * If we get here, there are other aggregates queued: * choose the new aggregate to serve. */ in_serv_agg = q->in_serv_agg = qfq_choose_next_agg(q); skb = qfq_peek_skb(in_serv_agg, &cl, &len); } if (!skb) return NULL; sch->q.qlen--; skb = agg_dequeue(in_serv_agg, cl, len); if (!skb) { sch->q.qlen++; return NULL; } qdisc_qstats_backlog_dec(sch, skb); qdisc_bstats_update(sch, skb); /* If lmax is lowered, through qfq_change_class, for a class * owning pending packets with larger size than the new value * of lmax, then the following condition may hold. */ if (unlikely(in_serv_agg->budget < len)) in_serv_agg->budget = 0; else in_serv_agg->budget -= len; q->V += (u64)len * q->iwsum; pr_debug("qfq dequeue: len %u F %lld now %lld\n", len, (unsigned long long) in_serv_agg->F, (unsigned long long) q->V); return skb; } static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *q) { struct qfq_group *grp; struct qfq_aggregate *agg, *new_front_agg; u64 old_F; qfq_update_eligible(q); q->oldV = q->V; if (!q->bitmaps[ER]) return NULL; grp = qfq_ffs(q, q->bitmaps[ER]); old_F = grp->F; agg = qfq_slot_head(grp); /* agg starts to be served, remove it from schedule */ qfq_front_slot_remove(grp); new_front_agg = qfq_slot_scan(grp); if (new_front_agg == NULL) /* group is now inactive, remove from ER */ __clear_bit(grp->index, &q->bitmaps[ER]); else { u64 roundedS = qfq_round_down(new_front_agg->S, grp->slot_shift); unsigned int s; if (grp->S == roundedS) return agg; grp->S = roundedS; grp->F = roundedS + (2ULL << grp->slot_shift); __clear_bit(grp->index, &q->bitmaps[ER]); s = qfq_calc_state(q, grp); __set_bit(grp->index, &q->bitmaps[s]); } qfq_unblock_groups(q, grp->index, old_F); return agg; } static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { unsigned int len = qdisc_pkt_len(skb), gso_segs; struct qfq_sched *q = qdisc_priv(sch); struct qfq_class *cl; struct qfq_aggregate *agg; int err = 0; cl = qfq_classify(skb, sch, &err); if (cl == NULL) { if (err & __NET_XMIT_BYPASS) qdisc_qstats_drop(sch); __qdisc_drop(skb, to_free); return err; } pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid); if (unlikely(cl->agg->lmax < len)) { pr_debug("qfq: increasing maxpkt from %u to %u for class %u", cl->agg->lmax, len, cl->common.classid); err = qfq_change_agg(sch, cl, cl->agg->class_weight, len); if (err) { cl->qstats.drops++; return qdisc_drop(skb, sch, to_free); } } gso_segs = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; err = qdisc_enqueue(skb, cl->qdisc, to_free); if (unlikely(err != NET_XMIT_SUCCESS)) { pr_debug("qfq_enqueue: enqueue failed %d\n", err); if (net_xmit_drop_count(err)) { cl->qstats.drops++; qdisc_qstats_drop(sch); } return err; } _bstats_update(&cl->bstats, len, gso_segs); sch->qstats.backlog += len; ++sch->q.qlen; agg = cl->agg; /* if the class is active, then done here */ if (cl_is_active(cl)) { if (unlikely(skb == cl->qdisc->ops->peek(cl->qdisc)) && list_first_entry(&agg->active, struct qfq_class, alist) == cl && cl->deficit < len) list_move_tail(&cl->alist, &agg->active); return err; } /* schedule class for service within the aggregate */ cl->deficit = agg->lmax; list_add_tail(&cl->alist, &agg->active); if (list_first_entry(&agg->active, struct qfq_class, alist) != cl || q->in_serv_agg == agg) return err; /* non-empty or in service, nothing else to do */ qfq_activate_agg(q, agg, enqueue); return err; } /* * Schedule aggregate according to its timestamps. */ static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg) { struct qfq_group *grp = agg->grp; u64 roundedS; int s; roundedS = qfq_round_down(agg->S, grp->slot_shift); /* * Insert agg in the correct bucket. * If agg->S >= grp->S we don't need to adjust the * bucket list and simply go to the insertion phase. * Otherwise grp->S is decreasing, we must make room * in the bucket list, and also recompute the group state. * Finally, if there were no flows in this group and nobody * was in ER make sure to adjust V. */ if (grp->full_slots) { if (!qfq_gt(grp->S, agg->S)) goto skip_update; /* create a slot for this agg->S */ qfq_slot_rotate(grp, roundedS); /* group was surely ineligible, remove */ __clear_bit(grp->index, &q->bitmaps[IR]); __clear_bit(grp->index, &q->bitmaps[IB]); } else if (!q->bitmaps[ER] && qfq_gt(roundedS, q->V) && q->in_serv_agg == NULL) q->V = roundedS; grp->S = roundedS; grp->F = roundedS + (2ULL << grp->slot_shift); s = qfq_calc_state(q, grp); __set_bit(grp->index, &q->bitmaps[s]); pr_debug("qfq enqueue: new state %d %#lx S %lld F %lld V %lld\n", s, q->bitmaps[s], (unsigned long long) agg->S, (unsigned long long) agg->F, (unsigned long long) q->V); skip_update: qfq_slot_insert(grp, agg, roundedS); } /* Update agg ts and schedule agg for service */ static void qfq_activate_agg(struct qfq_sched *q, struct qfq_aggregate *agg, enum update_reason reason) { agg->initial_budget = agg->budget = agg->budgetmax; /* recharge budg. */ qfq_update_agg_ts(q, agg, reason); if (q->in_serv_agg == NULL) { /* no aggr. in service or scheduled */ q->in_serv_agg = agg; /* start serving this aggregate */ /* update V: to be in service, agg must be eligible */ q->oldV = q->V = agg->S; } else if (agg != q->in_serv_agg) qfq_schedule_agg(q, agg); } static void qfq_slot_remove(struct qfq_sched *q, struct qfq_group *grp, struct qfq_aggregate *agg) { unsigned int i, offset; u64 roundedS; roundedS = qfq_round_down(agg->S, grp->slot_shift); offset = (roundedS - grp->S) >> grp->slot_shift; i = (grp->front + offset) % QFQ_MAX_SLOTS; hlist_del(&agg->next); if (hlist_empty(&grp->slots[i])) __clear_bit(offset, &grp->full_slots); } /* * Called to forcibly deschedule an aggregate. If the aggregate is * not in the front bucket, or if the latter has other aggregates in * the front bucket, we can simply remove the aggregate with no other * side effects. * Otherwise we must propagate the event up. */ static void qfq_deactivate_agg(struct qfq_sched *q, struct qfq_aggregate *agg) { struct qfq_group *grp = agg->grp; unsigned long mask; u64 roundedS; int s; if (agg == q->in_serv_agg) { charge_actual_service(agg); q->in_serv_agg = qfq_choose_next_agg(q); return; } agg->F = agg->S; qfq_slot_remove(q, grp, agg); if (!grp->full_slots) { __clear_bit(grp->index, &q->bitmaps[IR]); __clear_bit(grp->index, &q->bitmaps[EB]); __clear_bit(grp->index, &q->bitmaps[IB]); if (test_bit(grp->index, &q->bitmaps[ER]) && !(q->bitmaps[ER] & ~((1UL << grp->index) - 1))) { mask = q->bitmaps[ER] & ((1UL << grp->index) - 1); if (mask) mask = ~((1UL << __fls(mask)) - 1); else mask = ~0UL; qfq_move_groups(q, mask, EB, ER); qfq_move_groups(q, mask, IB, IR); } __clear_bit(grp->index, &q->bitmaps[ER]); } else if (hlist_empty(&grp->slots[grp->front])) { agg = qfq_slot_scan(grp); roundedS = qfq_round_down(agg->S, grp->slot_shift); if (grp->S != roundedS) { __clear_bit(grp->index, &q->bitmaps[ER]); __clear_bit(grp->index, &q->bitmaps[IR]); __clear_bit(grp->index, &q->bitmaps[EB]); __clear_bit(grp->index, &q->bitmaps[IB]); grp->S = roundedS; grp->F = roundedS + (2ULL << grp->slot_shift); s = qfq_calc_state(q, grp); __set_bit(grp->index, &q->bitmaps[s]); } } } static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg) { struct qfq_sched *q = qdisc_priv(sch); struct qfq_class *cl = (struct qfq_class *)arg; if (list_empty(&cl->alist)) return; qfq_deactivate_class(q, cl); } static int qfq_init_qdisc(struct Qdisc *sch, struct nlattr *opt, struct netlink_ext_ack *extack) { struct qfq_sched *q = qdisc_priv(sch); struct qfq_group *grp; int i, j, err; u32 max_cl_shift, maxbudg_shift, max_classes; err = tcf_block_get(&q->block, &q->filter_list, sch, extack); if (err) return err; err = qdisc_class_hash_init(&q->clhash); if (err < 0) return err; max_classes = min_t(u64, (u64)qdisc_dev(sch)->tx_queue_len + 1, QFQ_MAX_AGG_CLASSES); /* max_cl_shift = floor(log_2(max_classes)) */ max_cl_shift = __fls(max_classes); q->max_agg_classes = 1<<max_cl_shift; /* maxbudg_shift = log2(max_len * max_classes_per_agg) */ maxbudg_shift = QFQ_MTU_SHIFT + max_cl_shift; q->min_slot_shift = FRAC_BITS + maxbudg_shift - QFQ_MAX_INDEX; for (i = 0; i <= QFQ_MAX_INDEX; i++) { grp = &q->groups[i]; grp->index = i; grp->slot_shift = q->min_slot_shift + i; for (j = 0; j < QFQ_MAX_SLOTS; j++) INIT_HLIST_HEAD(&grp->slots[j]); } INIT_HLIST_HEAD(&q->nonfull_aggs); return 0; } static void qfq_reset_qdisc(struct Qdisc *sch) { struct qfq_sched *q = qdisc_priv(sch); struct qfq_class *cl; unsigned int i; for (i = 0; i < q->clhash.hashsize; i++) { hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { if (cl->qdisc->q.qlen > 0) qfq_deactivate_class(q, cl); qdisc_reset(cl->qdisc); } } } static void qfq_destroy_qdisc(struct Qdisc *sch) { struct qfq_sched *q = qdisc_priv(sch); struct qfq_class *cl; struct hlist_node *next; unsigned int i; tcf_block_put(q->block); for (i = 0; i < q->clhash.hashsize; i++) { hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i], common.hnode) { qfq_destroy_class(sch, cl); } } qdisc_class_hash_destroy(&q->clhash); } static const struct Qdisc_class_ops qfq_class_ops = { .change = qfq_change_class, .delete = qfq_delete_class, .find = qfq_search_class, .tcf_block = qfq_tcf_block, .bind_tcf = qfq_bind_tcf, .unbind_tcf = qfq_unbind_tcf, .graft = qfq_graft_class, .leaf = qfq_class_leaf, .qlen_notify = qfq_qlen_notify, .dump = qfq_dump_class, .dump_stats = qfq_dump_class_stats, .walk = qfq_walk, }; static struct Qdisc_ops qfq_qdisc_ops __read_mostly = { .cl_ops = &qfq_class_ops, .id = "qfq", .priv_size = sizeof(struct qfq_sched), .enqueue = qfq_enqueue, .dequeue = qfq_dequeue, .peek = qdisc_peek_dequeued, .init = qfq_init_qdisc, .reset = qfq_reset_qdisc, .destroy = qfq_destroy_qdisc, .owner = THIS_MODULE, }; MODULE_ALIAS_NET_SCH("qfq"); static int __init qfq_init(void) { return register_qdisc(&qfq_qdisc_ops); } static void __exit qfq_exit(void) { unregister_qdisc(&qfq_qdisc_ops); } module_init(qfq_init); module_exit(qfq_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Quick Fair Queueing Plus qdisc");
21 21 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 // SPDX-License-Identifier: GPL-2.0-only /* * This file is part of UBIFS. * * Copyright (C) 2006-2008 Nokia Corporation. * * Authors: Artem Bityutskiy (Битюцкий Артём) * Adrian Hunter */ /* * This file implements UBIFS shrinker which evicts clean znodes from the TNC * tree when Linux VM needs more RAM. * * We do not implement any LRU lists to find oldest znodes to free because it * would add additional overhead to the file system fast paths. So the shrinker * just walks the TNC tree when searching for znodes to free. * * If the root of a TNC sub-tree is clean and old enough, then the children are * also clean and old enough. So the shrinker walks the TNC in level order and * dumps entire sub-trees. * * The age of znodes is just the time-stamp when they were last looked at. * The current shrinker first tries to evict old znodes, then young ones. * * Since the shrinker is global, it has to protect against races with FS * un-mounts, which is done by the 'ubifs_infos_lock' and 'c->umount_mutex'. */ #include "ubifs.h" /* List of all UBIFS file-system instances */ LIST_HEAD(ubifs_infos); /* * We number each shrinker run and record the number on the ubifs_info structure * so that we can easily work out which ubifs_info structures have already been * done by the current run. */ static unsigned int shrinker_run_no; /* Protects 'ubifs_infos' list */ DEFINE_SPINLOCK(ubifs_infos_lock); /* Global clean znode counter (for all mounted UBIFS instances) */ atomic_long_t ubifs_clean_zn_cnt; /** * shrink_tnc - shrink TNC tree. * @c: UBIFS file-system description object * @nr: number of znodes to free * @age: the age of znodes to free * @contention: if any contention, this is set to %1 * * This function traverses TNC tree and frees clean znodes. It does not free * clean znodes which younger then @age. Returns number of freed znodes. */ static int shrink_tnc(struct ubifs_info *c, int nr, int age, int *contention) { int total_freed = 0; struct ubifs_znode *znode, *zprev; time64_t time = ktime_get_seconds(); ubifs_assert(c, mutex_is_locked(&c->umount_mutex)); ubifs_assert(c, mutex_is_locked(&c->tnc_mutex)); if (!c->zroot.znode || atomic_long_read(&c->clean_zn_cnt) == 0) return 0; /* * Traverse the TNC tree in levelorder manner, so that it is possible * to destroy large sub-trees. Indeed, if a znode is old, then all its * children are older or of the same age. * * Note, we are holding 'c->tnc_mutex', so we do not have to lock the * 'c->space_lock' when _reading_ 'c->clean_zn_cnt', because it is * changed only when the 'c->tnc_mutex' is held. */ zprev = NULL; znode = ubifs_tnc_levelorder_next(c, c->zroot.znode, NULL); while (znode && total_freed < nr && atomic_long_read(&c->clean_zn_cnt) > 0) { int freed; /* * If the znode is clean, but it is in the 'c->cnext' list, this * means that this znode has just been written to flash as a * part of commit and was marked clean. They will be removed * from the list at end commit. We cannot change the list, * because it is not protected by any mutex (design decision to * make commit really independent and parallel to main I/O). So * we just skip these znodes. * * Note, the 'clean_zn_cnt' counters are not updated until * after the commit, so the UBIFS shrinker does not report * the znodes which are in the 'c->cnext' list as freeable. * * Also note, if the root of a sub-tree is not in 'c->cnext', * then the whole sub-tree is not in 'c->cnext' as well, so it * is safe to dump whole sub-tree. */ if (znode->cnext) { /* * Very soon these znodes will be removed from the list * and become freeable. */ *contention = 1; } else if (!ubifs_zn_dirty(znode) && abs(time - znode->time) >= age) { if (znode->parent) znode->parent->zbranch[znode->iip].znode = NULL; else c->zroot.znode = NULL; freed = ubifs_destroy_tnc_subtree(c, znode); atomic_long_sub(freed, &ubifs_clean_zn_cnt); atomic_long_sub(freed, &c->clean_zn_cnt); total_freed += freed; znode = zprev; } if (unlikely(!c->zroot.znode)) break; zprev = znode; znode = ubifs_tnc_levelorder_next(c, c->zroot.znode, znode); cond_resched(); } return total_freed; } /** * shrink_tnc_trees - shrink UBIFS TNC trees. * @nr: number of znodes to free * @age: the age of znodes to free * @contention: if any contention, this is set to %1 * * This function walks the list of mounted UBIFS file-systems and frees clean * znodes which are older than @age, until at least @nr znodes are freed. * Returns the number of freed znodes. */ static int shrink_tnc_trees(int nr, int age, int *contention) { struct ubifs_info *c; struct list_head *p; unsigned int run_no; int freed = 0; spin_lock(&ubifs_infos_lock); do { run_no = ++shrinker_run_no; } while (run_no == 0); /* Iterate over all mounted UBIFS file-systems and try to shrink them */ p = ubifs_infos.next; while (p != &ubifs_infos) { c = list_entry(p, struct ubifs_info, infos_list); /* * We move the ones we do to the end of the list, so we stop * when we see one we have already done. */ if (c->shrinker_run_no == run_no) break; if (!mutex_trylock(&c->umount_mutex)) { /* Some un-mount is in progress, try next FS */ *contention = 1; p = p->next; continue; } /* * We're holding 'c->umount_mutex', so the file-system won't go * away. */ if (!mutex_trylock(&c->tnc_mutex)) { mutex_unlock(&c->umount_mutex); *contention = 1; p = p->next; continue; } spin_unlock(&ubifs_infos_lock); /* * OK, now we have TNC locked, the file-system cannot go away - * it is safe to reap the cache. */ c->shrinker_run_no = run_no; freed += shrink_tnc(c, nr, age, contention); mutex_unlock(&c->tnc_mutex); spin_lock(&ubifs_infos_lock); /* Get the next list element before we move this one */ p = p->next; /* * Move this one to the end of the list to provide some * fairness. */ list_move_tail(&c->infos_list, &ubifs_infos); mutex_unlock(&c->umount_mutex); if (freed >= nr) break; } spin_unlock(&ubifs_infos_lock); return freed; } /** * kick_a_thread - kick a background thread to start commit. * * This function kicks a background thread to start background commit. Returns * %-1 if a thread was kicked or there is another reason to assume the memory * will soon be freed or become freeable. If there are no dirty znodes, returns * %0. */ static int kick_a_thread(void) { int i; struct ubifs_info *c; /* * Iterate over all mounted UBIFS file-systems and find out if there is * already an ongoing commit operation there. If no, then iterate for * the second time and initiate background commit. */ spin_lock(&ubifs_infos_lock); for (i = 0; i < 2; i++) { list_for_each_entry(c, &ubifs_infos, infos_list) { long dirty_zn_cnt; if (!mutex_trylock(&c->umount_mutex)) { /* * Some un-mount is in progress, it will * certainly free memory, so just return. */ spin_unlock(&ubifs_infos_lock); return -1; } dirty_zn_cnt = atomic_long_read(&c->dirty_zn_cnt); if (!dirty_zn_cnt || c->cmt_state == COMMIT_BROKEN || c->ro_mount || c->ro_error) { mutex_unlock(&c->umount_mutex); continue; } if (c->cmt_state != COMMIT_RESTING) { spin_unlock(&ubifs_infos_lock); mutex_unlock(&c->umount_mutex); return -1; } if (i == 1) { list_move_tail(&c->infos_list, &ubifs_infos); spin_unlock(&ubifs_infos_lock); ubifs_request_bg_commit(c); mutex_unlock(&c->umount_mutex); return -1; } mutex_unlock(&c->umount_mutex); } } spin_unlock(&ubifs_infos_lock); return 0; } unsigned long ubifs_shrink_count(struct shrinker *shrink, struct shrink_control *sc) { long clean_zn_cnt = atomic_long_read(&ubifs_clean_zn_cnt); /* * Due to the way UBIFS updates the clean znode counter it may * temporarily be negative. */ return clean_zn_cnt >= 0 ? clean_zn_cnt : 1; } unsigned long ubifs_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) { unsigned long nr = sc->nr_to_scan; int contention = 0; unsigned long freed; long clean_zn_cnt = atomic_long_read(&ubifs_clean_zn_cnt); if (!clean_zn_cnt) { /* * No clean znodes, nothing to reap. All we can do in this case * is to kick background threads to start commit, which will * probably make clean znodes which, in turn, will be freeable. * And we return -1 which means will make VM call us again * later. */ dbg_tnc("no clean znodes, kick a thread"); return kick_a_thread(); } freed = shrink_tnc_trees(nr, OLD_ZNODE_AGE, &contention); if (freed >= nr) goto out; dbg_tnc("not enough old znodes, try to free young ones"); freed += shrink_tnc_trees(nr - freed, YOUNG_ZNODE_AGE, &contention); if (freed >= nr) goto out; dbg_tnc("not enough young znodes, free all"); freed += shrink_tnc_trees(nr - freed, 0, &contention); if (!freed && contention) { dbg_tnc("freed nothing, but contention"); return SHRINK_STOP; } out: dbg_tnc("%lu znodes were freed, requested %lu", freed, nr); return freed; }
2 2 2 4 4 4 2 4 4 1 1 6 6 6 6 6 5 4 6 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 // SPDX-License-Identifier: GPL-2.0-only /* * Input layer to RF Kill interface connector * * Copyright (c) 2007 Dmitry Torokhov * Copyright 2009 Johannes Berg <johannes@sipsolutions.net> * * If you ever run into a situation in which you have a SW_ type rfkill * input device, then you can revive code that was removed in the patch * "rfkill-input: remove unused code". */ #include <linux/input.h> #include <linux/slab.h> #include <linux/moduleparam.h> #include <linux/workqueue.h> #include <linux/init.h> #include <linux/rfkill.h> #include <linux/sched.h> #include "rfkill.h" enum rfkill_input_master_mode { RFKILL_INPUT_MASTER_UNLOCK = 0, RFKILL_INPUT_MASTER_RESTORE = 1, RFKILL_INPUT_MASTER_UNBLOCKALL = 2, NUM_RFKILL_INPUT_MASTER_MODES }; /* Delay (in ms) between consecutive switch ops */ #define RFKILL_OPS_DELAY 200 static enum rfkill_input_master_mode rfkill_master_switch_mode = RFKILL_INPUT_MASTER_UNBLOCKALL; module_param_named(master_switch_mode, rfkill_master_switch_mode, uint, 0); MODULE_PARM_DESC(master_switch_mode, "SW_RFKILL_ALL ON should: 0=do nothing (only unlock); 1=restore; 2=unblock all"); static DEFINE_SPINLOCK(rfkill_op_lock); static bool rfkill_op_pending; static unsigned long rfkill_sw_pending[BITS_TO_LONGS(NUM_RFKILL_TYPES)]; static unsigned long rfkill_sw_state[BITS_TO_LONGS(NUM_RFKILL_TYPES)]; enum rfkill_sched_op { RFKILL_GLOBAL_OP_EPO = 0, RFKILL_GLOBAL_OP_RESTORE, RFKILL_GLOBAL_OP_UNLOCK, RFKILL_GLOBAL_OP_UNBLOCK, }; static enum rfkill_sched_op rfkill_master_switch_op; static enum rfkill_sched_op rfkill_op; static void __rfkill_handle_global_op(enum rfkill_sched_op op) { unsigned int i; switch (op) { case RFKILL_GLOBAL_OP_EPO: rfkill_epo(); break; case RFKILL_GLOBAL_OP_RESTORE: rfkill_restore_states(); break; case RFKILL_GLOBAL_OP_UNLOCK: rfkill_remove_epo_lock(); break; case RFKILL_GLOBAL_OP_UNBLOCK: rfkill_remove_epo_lock(); for (i = 0; i < NUM_RFKILL_TYPES; i++) rfkill_switch_all(i, false); break; default: /* memory corruption or bug, fail safely */ rfkill_epo(); WARN(1, "Unknown requested operation %d! " "rfkill Emergency Power Off activated\n", op); } } static void __rfkill_handle_normal_op(const enum rfkill_type type, const bool complement) { bool blocked; blocked = rfkill_get_global_sw_state(type); if (complement) blocked = !blocked; rfkill_switch_all(type, blocked); } static void rfkill_op_handler(struct work_struct *work) { unsigned int i; bool c; spin_lock_irq(&rfkill_op_lock); do { if (rfkill_op_pending) { enum rfkill_sched_op op = rfkill_op; rfkill_op_pending = false; memset(rfkill_sw_pending, 0, sizeof(rfkill_sw_pending)); spin_unlock_irq(&rfkill_op_lock); __rfkill_handle_global_op(op); spin_lock_irq(&rfkill_op_lock); /* * handle global ops first -- during unlocked period * we might have gotten a new global op. */ if (rfkill_op_pending) continue; } if (rfkill_is_epo_lock_active()) continue; for (i = 0; i < NUM_RFKILL_TYPES; i++) { if (__test_and_clear_bit(i, rfkill_sw_pending)) { c = __test_and_clear_bit(i, rfkill_sw_state); spin_unlock_irq(&rfkill_op_lock); __rfkill_handle_normal_op(i, c); spin_lock_irq(&rfkill_op_lock); } } } while (rfkill_op_pending); spin_unlock_irq(&rfkill_op_lock); } static DECLARE_DELAYED_WORK(rfkill_op_work, rfkill_op_handler); static unsigned long rfkill_last_scheduled; static unsigned long rfkill_ratelimit(const unsigned long last) { const unsigned long delay = msecs_to_jiffies(RFKILL_OPS_DELAY); return time_after(jiffies, last + delay) ? 0 : delay; } static void rfkill_schedule_ratelimited(void) { if (schedule_delayed_work(&rfkill_op_work, rfkill_ratelimit(rfkill_last_scheduled))) rfkill_last_scheduled = jiffies; } static void rfkill_schedule_global_op(enum rfkill_sched_op op) { unsigned long flags; spin_lock_irqsave(&rfkill_op_lock, flags); rfkill_op = op; rfkill_op_pending = true; if (op == RFKILL_GLOBAL_OP_EPO && !rfkill_is_epo_lock_active()) { /* bypass the limiter for EPO */ mod_delayed_work(system_wq, &rfkill_op_work, 0); rfkill_last_scheduled = jiffies; } else rfkill_schedule_ratelimited(); spin_unlock_irqrestore(&rfkill_op_lock, flags); } static void rfkill_schedule_toggle(enum rfkill_type type) { unsigned long flags; if (rfkill_is_epo_lock_active()) return; spin_lock_irqsave(&rfkill_op_lock, flags); if (!rfkill_op_pending) { __set_bit(type, rfkill_sw_pending); __change_bit(type, rfkill_sw_state); rfkill_schedule_ratelimited(); } spin_unlock_irqrestore(&rfkill_op_lock, flags); } static void rfkill_schedule_evsw_rfkillall(int state) { if (state) rfkill_schedule_global_op(rfkill_master_switch_op); else rfkill_schedule_global_op(RFKILL_GLOBAL_OP_EPO); } static void rfkill_event(struct input_handle *handle, unsigned int type, unsigned int code, int data) { if (type == EV_KEY && data == 1) { switch (code) { case KEY_WLAN: rfkill_schedule_toggle(RFKILL_TYPE_WLAN); break; case KEY_BLUETOOTH: rfkill_schedule_toggle(RFKILL_TYPE_BLUETOOTH); break; case KEY_UWB: rfkill_schedule_toggle(RFKILL_TYPE_UWB); break; case KEY_WIMAX: rfkill_schedule_toggle(RFKILL_TYPE_WIMAX); break; case KEY_RFKILL: rfkill_schedule_toggle(RFKILL_TYPE_ALL); break; } } else if (type == EV_SW && code == SW_RFKILL_ALL) rfkill_schedule_evsw_rfkillall(data); } static int rfkill_connect(struct input_handler *handler, struct input_dev *dev, const struct input_device_id *id) { struct input_handle *handle; int error; handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL); if (!handle) return -ENOMEM; handle->dev = dev; handle->handler = handler; handle->name = "rfkill"; /* causes rfkill_start() to be called */ error = input_register_handle(handle); if (error) goto err_free_handle; error = input_open_device(handle); if (error) goto err_unregister_handle; return 0; err_unregister_handle: input_unregister_handle(handle); err_free_handle: kfree(handle); return error; } static void rfkill_start(struct input_handle *handle) { /* * Take event_lock to guard against configuration changes, we * should be able to deal with concurrency with rfkill_event() * just fine (which event_lock will also avoid). */ spin_lock_irq(&handle->dev->event_lock); if (test_bit(EV_SW, handle->dev->evbit) && test_bit(SW_RFKILL_ALL, handle->dev->swbit)) rfkill_schedule_evsw_rfkillall(test_bit(SW_RFKILL_ALL, handle->dev->sw)); spin_unlock_irq(&handle->dev->event_lock); } static void rfkill_disconnect(struct input_handle *handle) { input_close_device(handle); input_unregister_handle(handle); kfree(handle); } static const struct input_device_id rfkill_ids[] = { { .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, .evbit = { BIT_MASK(EV_KEY) }, .keybit = { [BIT_WORD(KEY_WLAN)] = BIT_MASK(KEY_WLAN) }, }, { .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, .evbit = { BIT_MASK(EV_KEY) }, .keybit = { [BIT_WORD(KEY_BLUETOOTH)] = BIT_MASK(KEY_BLUETOOTH) }, }, { .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, .evbit = { BIT_MASK(EV_KEY) }, .keybit = { [BIT_WORD(KEY_UWB)] = BIT_MASK(KEY_UWB) }, }, { .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, .evbit = { BIT_MASK(EV_KEY) }, .keybit = { [BIT_WORD(KEY_WIMAX)] = BIT_MASK(KEY_WIMAX) }, }, { .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, .evbit = { BIT_MASK(EV_KEY) }, .keybit = { [BIT_WORD(KEY_RFKILL)] = BIT_MASK(KEY_RFKILL) }, }, { .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_SWBIT, .evbit = { BIT(EV_SW) }, .swbit = { [BIT_WORD(SW_RFKILL_ALL)] = BIT_MASK(SW_RFKILL_ALL) }, }, { } }; static struct input_handler rfkill_handler = { .name = "rfkill", .event = rfkill_event, .connect = rfkill_connect, .start = rfkill_start, .disconnect = rfkill_disconnect, .id_table = rfkill_ids, }; int __init rfkill_handler_init(void) { switch (rfkill_master_switch_mode) { case RFKILL_INPUT_MASTER_UNBLOCKALL: rfkill_master_switch_op = RFKILL_GLOBAL_OP_UNBLOCK; break; case RFKILL_INPUT_MASTER_RESTORE: rfkill_master_switch_op = RFKILL_GLOBAL_OP_RESTORE; break; case RFKILL_INPUT_MASTER_UNLOCK: rfkill_master_switch_op = RFKILL_GLOBAL_OP_UNLOCK; break; default: return -EINVAL; } /* Avoid delay at first schedule */ rfkill_last_scheduled = jiffies - msecs_to_jiffies(RFKILL_OPS_DELAY) - 1; return input_register_handler(&rfkill_handler); } void __exit rfkill_handler_exit(void) { input_unregister_handler(&rfkill_handler); cancel_delayed_work_sync(&rfkill_op_work); }
1992 2032 2030 2021 21 2024 2027 2029 2029 2029 2033 2028 2031 2034 2028 2027 1874 15 1878 1881 53 53 53 53 40 40 53 53 4 65 10 10 10 3 3 7 64 1 1 1 1 1 1 1 1 1 1 1 1884 1885 122 1886 120 1888 1216 41 1218 1191 1218 1215 1213 398 2 395 1157 1153 396 1214 1214 1217 1214 1214 4 1210 62 1149 1584 1588 1585 1215 1218 1153 1149 1212 1585 1876 1204 1202 1144 1148 1207 1709 1887 1887 3 3 1884 2 2 24 24 24 24 24 24 24 28 28 17 27 3 990 1819 1825 992 1817 1799 1798 2 982 1800 1980 1990 1975 1990 1975 1855 1985 1990 1977 4 1 4 4 1054 725 729 735 1845 1849 1848 1848 1827 1796 1802 1800 1803 1797 1799 1798 1831 1 1850 1894 1902 1938 1911 1909 1941 1938 1901 1056 314 314 307 185 46 144 144 143 92 144 144 144 4 139 50 314 102 20 19 85 4 3 102 83 20 20 20 20 8 12 12 12 4 12 12 12 12 20 1808 1807 1805 1903 1903 1903 1838 2 2 2 1 3 3 3 1874 1878 1874 1880 1873 1876 1874 1875 1877 1845 1877 1876 1874 1 1878 1876 1873 1842 1846 1840 383 1838 1845 1842 1846 1878 20 20 19 1876 21 1858 1853 1840 1861 1818 1823 1831 1824 1817 1843 1862 1874 1872 95 81 81 81 14 79 75 4 3 2 64 64 3 3 67 67 6 5 5 5 5 4 8 8 32 32 4 3 11 10 1 78 81 77 90 95 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 // SPDX-License-Identifier: GPL-2.0 /* * message.c - synchronous message handling * * Released under the GPLv2 only. */ #include <linux/acpi.h> #include <linux/pci.h> /* for scatterlist macros */ #include <linux/usb.h> #include <linux/module.h> #include <linux/of.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/timer.h> #include <linux/ctype.h> #include <linux/nls.h> #include <linux/device.h> #include <linux/scatterlist.h> #include <linux/usb/cdc.h> #include <linux/usb/quirks.h> #include <linux/usb/hcd.h> /* for usbcore internals */ #include <linux/usb/of.h> #include <asm/byteorder.h> #include "usb.h" static void cancel_async_set_config(struct usb_device *udev); struct api_context { struct completion done; int status; }; static void usb_api_blocking_completion(struct urb *urb) { struct api_context *ctx = urb->context; ctx->status = urb->status; complete(&ctx->done); } /* * Starts urb and waits for completion or timeout. Note that this call * is NOT interruptible. Many device driver i/o requests should be * interruptible and therefore these drivers should implement their * own interruptible routines. */ static int usb_start_wait_urb(struct urb *urb, int timeout, int *actual_length) { struct api_context ctx; unsigned long expire; int retval; init_completion(&ctx.done); urb->context = &ctx; urb->actual_length = 0; retval = usb_submit_urb(urb, GFP_NOIO); if (unlikely(retval)) goto out; expire = timeout ? msecs_to_jiffies(timeout) : MAX_SCHEDULE_TIMEOUT; if (!wait_for_completion_timeout(&ctx.done, expire)) { usb_kill_urb(urb); retval = (ctx.status == -ENOENT ? -ETIMEDOUT : ctx.status); dev_dbg(&urb->dev->dev, "%s timed out on ep%d%s len=%u/%u\n", current->comm, usb_endpoint_num(&urb->ep->desc), usb_urb_dir_in(urb) ? "in" : "out", urb->actual_length, urb->transfer_buffer_length); } else retval = ctx.status; out: if (actual_length) *actual_length = urb->actual_length; usb_free_urb(urb); return retval; } /*-------------------------------------------------------------------*/ /* returns status (negative) or length (positive) */ static int usb_internal_control_msg(struct usb_device *usb_dev, unsigned int pipe, struct usb_ctrlrequest *cmd, void *data, int len, int timeout) { struct urb *urb; int retv; int length; urb = usb_alloc_urb(0, GFP_NOIO); if (!urb) return -ENOMEM; usb_fill_control_urb(urb, usb_dev, pipe, (unsigned char *)cmd, data, len, usb_api_blocking_completion, NULL); retv = usb_start_wait_urb(urb, timeout, &length); if (retv < 0) return retv; else return length; } /** * usb_control_msg - Builds a control urb, sends it off and waits for completion * @dev: pointer to the usb device to send the message to * @pipe: endpoint "pipe" to send the message to * @request: USB message request value * @requesttype: USB message request type value * @value: USB message value * @index: USB message index value * @data: pointer to the data to send * @size: length in bytes of the data to send * @timeout: time in msecs to wait for the message to complete before timing * out (if 0 the wait is forever) * * Context: task context, might sleep. * * This function sends a simple control message to a specified endpoint and * waits for the message to complete, or timeout. * * Don't use this function from within an interrupt context. If you need * an asynchronous message, or need to send a message from within interrupt * context, use usb_submit_urb(). If a thread in your driver uses this call, * make sure your disconnect() method can wait for it to complete. Since you * don't have a handle on the URB used, you can't cancel the request. * * Return: If successful, the number of bytes transferred. Otherwise, a negative * error number. */ int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request, __u8 requesttype, __u16 value, __u16 index, void *data, __u16 size, int timeout) { struct usb_ctrlrequest *dr; int ret; dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO); if (!dr) return -ENOMEM; dr->bRequestType = requesttype; dr->bRequest = request; dr->wValue = cpu_to_le16(value); dr->wIndex = cpu_to_le16(index); dr->wLength = cpu_to_le16(size); ret = usb_internal_control_msg(dev, pipe, dr, data, size, timeout); /* Linger a bit, prior to the next control message. */ if (dev->quirks & USB_QUIRK_DELAY_CTRL_MSG) msleep(200); kfree(dr); return ret; } EXPORT_SYMBOL_GPL(usb_control_msg); /** * usb_control_msg_send - Builds a control "send" message, sends it off and waits for completion * @dev: pointer to the usb device to send the message to * @endpoint: endpoint to send the message to * @request: USB message request value * @requesttype: USB message request type value * @value: USB message value * @index: USB message index value * @driver_data: pointer to the data to send * @size: length in bytes of the data to send * @timeout: time in msecs to wait for the message to complete before timing * out (if 0 the wait is forever) * @memflags: the flags for memory allocation for buffers * * Context: !in_interrupt () * * This function sends a control message to a specified endpoint that is not * expected to fill in a response (i.e. a "send message") and waits for the * message to complete, or timeout. * * Do not use this function from within an interrupt context. If you need * an asynchronous message, or need to send a message from within interrupt * context, use usb_submit_urb(). If a thread in your driver uses this call, * make sure your disconnect() method can wait for it to complete. Since you * don't have a handle on the URB used, you can't cancel the request. * * The data pointer can be made to a reference on the stack, or anywhere else, * as it will not be modified at all. This does not have the restriction that * usb_control_msg() has where the data pointer must be to dynamically allocated * memory (i.e. memory that can be successfully DMAed to a device). * * Return: If successful, 0 is returned, Otherwise, a negative error number. */ int usb_control_msg_send(struct usb_device *dev, __u8 endpoint, __u8 request, __u8 requesttype, __u16 value, __u16 index, const void *driver_data, __u16 size, int timeout, gfp_t memflags) { unsigned int pipe = usb_sndctrlpipe(dev, endpoint); int ret; u8 *data = NULL; if (size) { data = kmemdup(driver_data, size, memflags); if (!data) return -ENOMEM; } ret = usb_control_msg(dev, pipe, request, requesttype, value, index, data, size, timeout); kfree(data); if (ret < 0) return ret; return 0; } EXPORT_SYMBOL_GPL(usb_control_msg_send); /** * usb_control_msg_recv - Builds a control "receive" message, sends it off and waits for completion * @dev: pointer to the usb device to send the message to * @endpoint: endpoint to send the message to * @request: USB message request value * @requesttype: USB message request type value * @value: USB message value * @index: USB message index value * @driver_data: pointer to the data to be filled in by the message * @size: length in bytes of the data to be received * @timeout: time in msecs to wait for the message to complete before timing * out (if 0 the wait is forever) * @memflags: the flags for memory allocation for buffers * * Context: !in_interrupt () * * This function sends a control message to a specified endpoint that is * expected to fill in a response (i.e. a "receive message") and waits for the * message to complete, or timeout. * * Do not use this function from within an interrupt context. If you need * an asynchronous message, or need to send a message from within interrupt * context, use usb_submit_urb(). If a thread in your driver uses this call, * make sure your disconnect() method can wait for it to complete. Since you * don't have a handle on the URB used, you can't cancel the request. * * The data pointer can be made to a reference on the stack, or anywhere else * that can be successfully written to. This function does not have the * restriction that usb_control_msg() has where the data pointer must be to * dynamically allocated memory (i.e. memory that can be successfully DMAed to a * device). * * The "whole" message must be properly received from the device in order for * this function to be successful. If a device returns less than the expected * amount of data, then the function will fail. Do not use this for messages * where a variable amount of data might be returned. * * Return: If successful, 0 is returned, Otherwise, a negative error number. */ int usb_control_msg_recv(struct usb_device *dev, __u8 endpoint, __u8 request, __u8 requesttype, __u16 value, __u16 index, void *driver_data, __u16 size, int timeout, gfp_t memflags) { unsigned int pipe = usb_rcvctrlpipe(dev, endpoint); int ret; u8 *data; if (!size || !driver_data) return -EINVAL; data = kmalloc(size, memflags); if (!data) return -ENOMEM; ret = usb_control_msg(dev, pipe, request, requesttype, value, index, data, size, timeout); if (ret < 0) goto exit; if (ret == size) { memcpy(driver_data, data, size); ret = 0; } else { ret = -EREMOTEIO; } exit: kfree(data); return ret; } EXPORT_SYMBOL_GPL(usb_control_msg_recv); /** * usb_interrupt_msg - Builds an interrupt urb, sends it off and waits for completion * @usb_dev: pointer to the usb device to send the message to * @pipe: endpoint "pipe" to send the message to * @data: pointer to the data to send * @len: length in bytes of the data to send * @actual_length: pointer to a location to put the actual length transferred * in bytes * @timeout: time in msecs to wait for the message to complete before * timing out (if 0 the wait is forever) * * Context: task context, might sleep. * * This function sends a simple interrupt message to a specified endpoint and * waits for the message to complete, or timeout. * * Don't use this function from within an interrupt context. If you need * an asynchronous message, or need to send a message from within interrupt * context, use usb_submit_urb() If a thread in your driver uses this call, * make sure your disconnect() method can wait for it to complete. Since you * don't have a handle on the URB used, you can't cancel the request. * * Return: * If successful, 0. Otherwise a negative error number. The number of actual * bytes transferred will be stored in the @actual_length parameter. */ int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe, void *data, int len, int *actual_length, int timeout) { return usb_bulk_msg(usb_dev, pipe, data, len, actual_length, timeout); } EXPORT_SYMBOL_GPL(usb_interrupt_msg); /** * usb_bulk_msg - Builds a bulk urb, sends it off and waits for completion * @usb_dev: pointer to the usb device to send the message to * @pipe: endpoint "pipe" to send the message to * @data: pointer to the data to send * @len: length in bytes of the data to send * @actual_length: pointer to a location to put the actual length transferred * in bytes * @timeout: time in msecs to wait for the message to complete before * timing out (if 0 the wait is forever) * * Context: task context, might sleep. * * This function sends a simple bulk message to a specified endpoint * and waits for the message to complete, or timeout. * * Don't use this function from within an interrupt context. If you need * an asynchronous message, or need to send a message from within interrupt * context, use usb_submit_urb() If a thread in your driver uses this call, * make sure your disconnect() method can wait for it to complete. Since you * don't have a handle on the URB used, you can't cancel the request. * * Because there is no usb_interrupt_msg() and no USBDEVFS_INTERRUPT ioctl, * users are forced to abuse this routine by using it to submit URBs for * interrupt endpoints. We will take the liberty of creating an interrupt URB * (with the default interval) if the target is an interrupt endpoint. * * Return: * If successful, 0. Otherwise a negative error number. The number of actual * bytes transferred will be stored in the @actual_length parameter. * */ int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe, void *data, int len, int *actual_length, int timeout) { struct urb *urb; struct usb_host_endpoint *ep; ep = usb_pipe_endpoint(usb_dev, pipe); if (!ep || len < 0) return -EINVAL; urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) return -ENOMEM; if ((ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_INT) { pipe = (pipe & ~(3 << 30)) | (PIPE_INTERRUPT << 30); usb_fill_int_urb(urb, usb_dev, pipe, data, len, usb_api_blocking_completion, NULL, ep->desc.bInterval); } else usb_fill_bulk_urb(urb, usb_dev, pipe, data, len, usb_api_blocking_completion, NULL); return usb_start_wait_urb(urb, timeout, actual_length); } EXPORT_SYMBOL_GPL(usb_bulk_msg); /*-------------------------------------------------------------------*/ static void sg_clean(struct usb_sg_request *io) { if (io->urbs) { while (io->entries--) usb_free_urb(io->urbs[io->entries]); kfree(io->urbs); io->urbs = NULL; } io->dev = NULL; } static void sg_complete(struct urb *urb) { unsigned long flags; struct usb_sg_request *io = urb->context; int status = urb->status; spin_lock_irqsave(&io->lock, flags); /* In 2.5 we require hcds' endpoint queues not to progress after fault * reports, until the completion callback (this!) returns. That lets * device driver code (like this routine) unlink queued urbs first, * if it needs to, since the HC won't work on them at all. So it's * not possible for page N+1 to overwrite page N, and so on. * * That's only for "hard" faults; "soft" faults (unlinks) sometimes * complete before the HCD can get requests away from hardware, * though never during cleanup after a hard fault. */ if (io->status && (io->status != -ECONNRESET || status != -ECONNRESET) && urb->actual_length) { dev_err(io->dev->bus->controller, "dev %s ep%d%s scatterlist error %d/%d\n", io->dev->devpath, usb_endpoint_num(&urb->ep->desc), usb_urb_dir_in(urb) ? "in" : "out", status, io->status); /* BUG (); */ } if (io->status == 0 && status && status != -ECONNRESET) { int i, found, retval; io->status = status; /* the previous urbs, and this one, completed already. * unlink pending urbs so they won't rx/tx bad data. * careful: unlink can sometimes be synchronous... */ spin_unlock_irqrestore(&io->lock, flags); for (i = 0, found = 0; i < io->entries; i++) { if (!io->urbs[i]) continue; if (found) { usb_block_urb(io->urbs[i]); retval = usb_unlink_urb(io->urbs[i]); if (retval != -EINPROGRESS && retval != -ENODEV && retval != -EBUSY && retval != -EIDRM) dev_err(&io->dev->dev, "%s, unlink --> %d\n", __func__, retval); } else if (urb == io->urbs[i]) found = 1; } spin_lock_irqsave(&io->lock, flags); } /* on the last completion, signal usb_sg_wait() */ io->bytes += urb->actual_length; io->count--; if (!io->count) complete(&io->complete); spin_unlock_irqrestore(&io->lock, flags); } /** * usb_sg_init - initializes scatterlist-based bulk/interrupt I/O request * @io: request block being initialized. until usb_sg_wait() returns, * treat this as a pointer to an opaque block of memory, * @dev: the usb device that will send or receive the data * @pipe: endpoint "pipe" used to transfer the data * @period: polling rate for interrupt endpoints, in frames or * (for high speed endpoints) microframes; ignored for bulk * @sg: scatterlist entries * @nents: how many entries in the scatterlist * @length: how many bytes to send from the scatterlist, or zero to * send every byte identified in the list. * @mem_flags: SLAB_* flags affecting memory allocations in this call * * This initializes a scatter/gather request, allocating resources such as * I/O mappings and urb memory (except maybe memory used by USB controller * drivers). * * The request must be issued using usb_sg_wait(), which waits for the I/O to * complete (or to be canceled) and then cleans up all resources allocated by * usb_sg_init(). * * The request may be canceled with usb_sg_cancel(), either before or after * usb_sg_wait() is called. * * Return: Zero for success, else a negative errno value. */ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev, unsigned pipe, unsigned period, struct scatterlist *sg, int nents, size_t length, gfp_t mem_flags) { int i; int urb_flags; int use_sg; if (!io || !dev || !sg || usb_pipecontrol(pipe) || usb_pipeisoc(pipe) || nents <= 0) return -EINVAL; spin_lock_init(&io->lock); io->dev = dev; io->pipe = pipe; if (dev->bus->sg_tablesize > 0) { use_sg = true; io->entries = 1; } else { use_sg = false; io->entries = nents; } /* initialize all the urbs we'll use */ io->urbs = kmalloc_array(io->entries, sizeof(*io->urbs), mem_flags); if (!io->urbs) goto nomem; urb_flags = URB_NO_INTERRUPT; if (usb_pipein(pipe)) urb_flags |= URB_SHORT_NOT_OK; for_each_sg(sg, sg, io->entries, i) { struct urb *urb; unsigned len; urb = usb_alloc_urb(0, mem_flags); if (!urb) { io->entries = i; goto nomem; } io->urbs[i] = urb; urb->dev = NULL; urb->pipe = pipe; urb->interval = period; urb->transfer_flags = urb_flags; urb->complete = sg_complete; urb->context = io; urb->sg = sg; if (use_sg) { /* There is no single transfer buffer */ urb->transfer_buffer = NULL; urb->num_sgs = nents; /* A length of zero means transfer the whole sg list */ len = length; if (len == 0) { struct scatterlist *sg2; int j; for_each_sg(sg, sg2, nents, j) len += sg2->length; } } else { /* * Some systems can't use DMA; they use PIO instead. * For their sakes, transfer_buffer is set whenever * possible. */ if (!PageHighMem(sg_page(sg))) urb->transfer_buffer = sg_virt(sg); else urb->transfer_buffer = NULL; len = sg->length; if (length) { len = min_t(size_t, len, length); length -= len; if (length == 0) io->entries = i + 1; } } urb->transfer_buffer_length = len; } io->urbs[--i]->transfer_flags &= ~URB_NO_INTERRUPT; /* transaction state */ io->count = io->entries; io->status = 0; io->bytes = 0; init_completion(&io->complete); return 0; nomem: sg_clean(io); return -ENOMEM; } EXPORT_SYMBOL_GPL(usb_sg_init); /** * usb_sg_wait - synchronously execute scatter/gather request * @io: request block handle, as initialized with usb_sg_init(). * some fields become accessible when this call returns. * * Context: task context, might sleep. * * This function blocks until the specified I/O operation completes. It * leverages the grouping of the related I/O requests to get good transfer * rates, by queueing the requests. At higher speeds, such queuing can * significantly improve USB throughput. * * There are three kinds of completion for this function. * * (1) success, where io->status is zero. The number of io->bytes * transferred is as requested. * (2) error, where io->status is a negative errno value. The number * of io->bytes transferred before the error is usually less * than requested, and can be nonzero. * (3) cancellation, a type of error with status -ECONNRESET that * is initiated by usb_sg_cancel(). * * When this function returns, all memory allocated through usb_sg_init() or * this call will have been freed. The request block parameter may still be * passed to usb_sg_cancel(), or it may be freed. It could also be * reinitialized and then reused. * * Data Transfer Rates: * * Bulk transfers are valid for full or high speed endpoints. * The best full speed data rate is 19 packets of 64 bytes each * per frame, or 1216 bytes per millisecond. * The best high speed data rate is 13 packets of 512 bytes each * per microframe, or 52 KBytes per millisecond. * * The reason to use interrupt transfers through this API would most likely * be to reserve high speed bandwidth, where up to 24 KBytes per millisecond * could be transferred. That capability is less useful for low or full * speed interrupt endpoints, which allow at most one packet per millisecond, * of at most 8 or 64 bytes (respectively). * * It is not necessary to call this function to reserve bandwidth for devices * under an xHCI host controller, as the bandwidth is reserved when the * configuration or interface alt setting is selected. */ void usb_sg_wait(struct usb_sg_request *io) { int i; int entries = io->entries; /* queue the urbs. */ spin_lock_irq(&io->lock); i = 0; while (i < entries && !io->status) { int retval; io->urbs[i]->dev = io->dev; spin_unlock_irq(&io->lock); retval = usb_submit_urb(io->urbs[i], GFP_NOIO); switch (retval) { /* maybe we retrying will recover */ case -ENXIO: /* hc didn't queue this one */ case -EAGAIN: case -ENOMEM: retval = 0; yield(); break; /* no error? continue immediately. * * NOTE: to work better with UHCI (4K I/O buffer may * need 3K of TDs) it may be good to limit how many * URBs are queued at once; N milliseconds? */ case 0: ++i; cpu_relax(); break; /* fail any uncompleted urbs */ default: io->urbs[i]->status = retval; dev_dbg(&io->dev->dev, "%s, submit --> %d\n", __func__, retval); usb_sg_cancel(io); } spin_lock_irq(&io->lock); if (retval && (io->status == 0 || io->status == -ECONNRESET)) io->status = retval; } io->count -= entries - i; if (io->count == 0) complete(&io->complete); spin_unlock_irq(&io->lock); /* OK, yes, this could be packaged as non-blocking. * So could the submit loop above ... but it's easier to * solve neither problem than to solve both! */ wait_for_completion(&io->complete); sg_clean(io); } EXPORT_SYMBOL_GPL(usb_sg_wait); /** * usb_sg_cancel - stop scatter/gather i/o issued by usb_sg_wait() * @io: request block, initialized with usb_sg_init() * * This stops a request after it has been started by usb_sg_wait(). * It can also prevents one initialized by usb_sg_init() from starting, * so that call just frees resources allocated to the request. */ void usb_sg_cancel(struct usb_sg_request *io) { unsigned long flags; int i, retval; spin_lock_irqsave(&io->lock, flags); if (io->status || io->count == 0) { spin_unlock_irqrestore(&io->lock, flags); return; } /* shut everything down */ io->status = -ECONNRESET; io->count++; /* Keep the request alive until we're done */ spin_unlock_irqrestore(&io->lock, flags); for (i = io->entries - 1; i >= 0; --i) { usb_block_urb(io->urbs[i]); retval = usb_unlink_urb(io->urbs[i]); if (retval != -EINPROGRESS && retval != -ENODEV && retval != -EBUSY && retval != -EIDRM) dev_warn(&io->dev->dev, "%s, unlink --> %d\n", __func__, retval); } spin_lock_irqsave(&io->lock, flags); io->count--; if (!io->count) complete(&io->complete); spin_unlock_irqrestore(&io->lock, flags); } EXPORT_SYMBOL_GPL(usb_sg_cancel); /*-------------------------------------------------------------------*/ /** * usb_get_descriptor - issues a generic GET_DESCRIPTOR request * @dev: the device whose descriptor is being retrieved * @type: the descriptor type (USB_DT_*) * @index: the number of the descriptor * @buf: where to put the descriptor * @size: how big is "buf"? * * Context: task context, might sleep. * * Gets a USB descriptor. Convenience functions exist to simplify * getting some types of descriptors. Use * usb_get_string() or usb_string() for USB_DT_STRING. * Device (USB_DT_DEVICE) and configuration descriptors (USB_DT_CONFIG) * are part of the device structure. * In addition to a number of USB-standard descriptors, some * devices also use class-specific or vendor-specific descriptors. * * This call is synchronous, and may not be used in an interrupt context. * * Return: The number of bytes received on success, or else the status code * returned by the underlying usb_control_msg() call. */ int usb_get_descriptor(struct usb_device *dev, unsigned char type, unsigned char index, void *buf, int size) { int i; int result; if (size <= 0) /* No point in asking for no data */ return -EINVAL; memset(buf, 0, size); /* Make sure we parse really received data */ for (i = 0; i < 3; ++i) { /* retry on length 0 or error; some devices are flakey */ result = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), USB_REQ_GET_DESCRIPTOR, USB_DIR_IN, (type << 8) + index, 0, buf, size, USB_CTRL_GET_TIMEOUT); if (result <= 0 && result != -ETIMEDOUT) continue; if (result > 1 && ((u8 *)buf)[1] != type) { result = -ENODATA; continue; } break; } return result; } EXPORT_SYMBOL_GPL(usb_get_descriptor); /** * usb_get_string - gets a string descriptor * @dev: the device whose string descriptor is being retrieved * @langid: code for language chosen (from string descriptor zero) * @index: the number of the descriptor * @buf: where to put the string * @size: how big is "buf"? * * Context: task context, might sleep. * * Retrieves a string, encoded using UTF-16LE (Unicode, 16 bits per character, * in little-endian byte order). * The usb_string() function will often be a convenient way to turn * these strings into kernel-printable form. * * Strings may be referenced in device, configuration, interface, or other * descriptors, and could also be used in vendor-specific ways. * * This call is synchronous, and may not be used in an interrupt context. * * Return: The number of bytes received on success, or else the status code * returned by the underlying usb_control_msg() call. */ static int usb_get_string(struct usb_device *dev, unsigned short langid, unsigned char index, void *buf, int size) { int i; int result; if (size <= 0) /* No point in asking for no data */ return -EINVAL; for (i = 0; i < 3; ++i) { /* retry on length 0 or stall; some devices are flakey */ result = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), USB_REQ_GET_DESCRIPTOR, USB_DIR_IN, (USB_DT_STRING << 8) + index, langid, buf, size, USB_CTRL_GET_TIMEOUT); if (result == 0 || result == -EPIPE) continue; if (result > 1 && ((u8 *) buf)[1] != USB_DT_STRING) { result = -ENODATA; continue; } break; } return result; } static void usb_try_string_workarounds(unsigned char *buf, int *length) { int newlength, oldlength = *length; for (newlength = 2; newlength + 1 < oldlength; newlength += 2) if (!isprint(buf[newlength]) || buf[newlength + 1]) break; if (newlength > 2) { buf[0] = newlength; *length = newlength; } } static int usb_string_sub(struct usb_device *dev, unsigned int langid, unsigned int index, unsigned char *buf) { int rc; /* Try to read the string descriptor by asking for the maximum * possible number of bytes */ if (dev->quirks & USB_QUIRK_STRING_FETCH_255) rc = -EIO; else rc = usb_get_string(dev, langid, index, buf, 255); /* If that failed try to read the descriptor length, then * ask for just that many bytes */ if (rc < 2) { rc = usb_get_string(dev, langid, index, buf, 2); if (rc == 2) rc = usb_get_string(dev, langid, index, buf, buf[0]); } if (rc >= 2) { if (!buf[0] && !buf[1]) usb_try_string_workarounds(buf, &rc); /* There might be extra junk at the end of the descriptor */ if (buf[0] < rc) rc = buf[0]; rc = rc - (rc & 1); /* force a multiple of two */ } if (rc < 2) rc = (rc < 0 ? rc : -EINVAL); return rc; } static int usb_get_langid(struct usb_device *dev, unsigned char *tbuf) { int err; if (dev->have_langid) return 0; if (dev->string_langid < 0) return -EPIPE; err = usb_string_sub(dev, 0, 0, tbuf); /* If the string was reported but is malformed, default to english * (0x0409) */ if (err == -ENODATA || (err > 0 && err < 4)) { dev->string_langid = 0x0409; dev->have_langid = 1; dev_err(&dev->dev, "language id specifier not provided by device, defaulting to English\n"); return 0; } /* In case of all other errors, we assume the device is not able to * deal with strings at all. Set string_langid to -1 in order to * prevent any string to be retrieved from the device */ if (err < 0) { dev_info(&dev->dev, "string descriptor 0 read error: %d\n", err); dev->string_langid = -1; return -EPIPE; } /* always use the first langid listed */ dev->string_langid = tbuf[2] | (tbuf[3] << 8); dev->have_langid = 1; dev_dbg(&dev->dev, "default language 0x%04x\n", dev->string_langid); return 0; } /** * usb_string - returns UTF-8 version of a string descriptor * @dev: the device whose string descriptor is being retrieved * @index: the number of the descriptor * @buf: where to put the string * @size: how big is "buf"? * * Context: task context, might sleep. * * This converts the UTF-16LE encoded strings returned by devices, from * usb_get_string_descriptor(), to null-terminated UTF-8 encoded ones * that are more usable in most kernel contexts. Note that this function * chooses strings in the first language supported by the device. * * This call is synchronous, and may not be used in an interrupt context. * * Return: length of the string (>= 0) or usb_control_msg status (< 0). */ int usb_string(struct usb_device *dev, int index, char *buf, size_t size) { unsigned char *tbuf; int err; if (dev->state == USB_STATE_SUSPENDED) return -EHOSTUNREACH; if (size <= 0 || !buf) return -EINVAL; buf[0] = 0; if (index <= 0 || index >= 256) return -EINVAL; tbuf = kmalloc(256, GFP_NOIO); if (!tbuf) return -ENOMEM; err = usb_get_langid(dev, tbuf); if (err < 0) goto errout; err = usb_string_sub(dev, dev->string_langid, index, tbuf); if (err < 0) goto errout; size--; /* leave room for trailing NULL char in output buffer */ err = utf16s_to_utf8s((wchar_t *) &tbuf[2], (err - 2) / 2, UTF16_LITTLE_ENDIAN, buf, size); buf[err] = 0; if (tbuf[1] != USB_DT_STRING) dev_dbg(&dev->dev, "wrong descriptor type %02x for string %d (\"%s\")\n", tbuf[1], index, buf); errout: kfree(tbuf); return err; } EXPORT_SYMBOL_GPL(usb_string); /* one UTF-8-encoded 16-bit character has at most three bytes */ #define MAX_USB_STRING_SIZE (127 * 3 + 1) /** * usb_cache_string - read a string descriptor and cache it for later use * @udev: the device whose string descriptor is being read * @index: the descriptor index * * Return: A pointer to a kmalloc'ed buffer containing the descriptor string, * or %NULL if the index is 0 or the string could not be read. */ char *usb_cache_string(struct usb_device *udev, int index) { char *buf; char *smallbuf = NULL; int len; if (index <= 0) return NULL; buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO); if (buf) { len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE); if (len > 0) { smallbuf = kmalloc(++len, GFP_NOIO); if (!smallbuf) return buf; memcpy(smallbuf, buf, len); } kfree(buf); } return smallbuf; } EXPORT_SYMBOL_GPL(usb_cache_string); /* * usb_get_device_descriptor - read the device descriptor * @udev: the device whose device descriptor should be read * * Context: task context, might sleep. * * Not exported, only for use by the core. If drivers really want to read * the device descriptor directly, they can call usb_get_descriptor() with * type = USB_DT_DEVICE and index = 0. * * Returns: a pointer to a dynamically allocated usb_device_descriptor * structure (which the caller must deallocate), or an ERR_PTR value. */ struct usb_device_descriptor *usb_get_device_descriptor(struct usb_device *udev) { struct usb_device_descriptor *desc; int ret; desc = kmalloc(sizeof(*desc), GFP_NOIO); if (!desc) return ERR_PTR(-ENOMEM); ret = usb_get_descriptor(udev, USB_DT_DEVICE, 0, desc, sizeof(*desc)); if (ret == sizeof(*desc)) return desc; if (ret >= 0) ret = -EMSGSIZE; kfree(desc); return ERR_PTR(ret); } /* * usb_set_isoch_delay - informs the device of the packet transmit delay * @dev: the device whose delay is to be informed * Context: task context, might sleep * * Since this is an optional request, we don't bother if it fails. */ int usb_set_isoch_delay(struct usb_device *dev) { /* skip hub devices */ if (dev->descriptor.bDeviceClass == USB_CLASS_HUB) return 0; /* skip non-SS/non-SSP devices */ if (dev->speed < USB_SPEED_SUPER) return 0; return usb_control_msg_send(dev, 0, USB_REQ_SET_ISOCH_DELAY, USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE, dev->hub_delay, 0, NULL, 0, USB_CTRL_SET_TIMEOUT, GFP_NOIO); } /** * usb_get_status - issues a GET_STATUS call * @dev: the device whose status is being checked * @recip: USB_RECIP_*; for device, interface, or endpoint * @type: USB_STATUS_TYPE_*; for standard or PTM status types * @target: zero (for device), else interface or endpoint number * @data: pointer to two bytes of bitmap data * * Context: task context, might sleep. * * Returns device, interface, or endpoint status. Normally only of * interest to see if the device is self powered, or has enabled the * remote wakeup facility; or whether a bulk or interrupt endpoint * is halted ("stalled"). * * Bits in these status bitmaps are set using the SET_FEATURE request, * and cleared using the CLEAR_FEATURE request. The usb_clear_halt() * function should be used to clear halt ("stall") status. * * This call is synchronous, and may not be used in an interrupt context. * * Returns 0 and the status value in *@data (in host byte order) on success, * or else the status code from the underlying usb_control_msg() call. */ int usb_get_status(struct usb_device *dev, int recip, int type, int target, void *data) { int ret; void *status; int length; switch (type) { case USB_STATUS_TYPE_STANDARD: length = 2; break; case USB_STATUS_TYPE_PTM: if (recip != USB_RECIP_DEVICE) return -EINVAL; length = 4; break; default: return -EINVAL; } status = kmalloc(length, GFP_KERNEL); if (!status) return -ENOMEM; ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), USB_REQ_GET_STATUS, USB_DIR_IN | recip, USB_STATUS_TYPE_STANDARD, target, status, length, USB_CTRL_GET_TIMEOUT); switch (ret) { case 4: if (type != USB_STATUS_TYPE_PTM) { ret = -EIO; break; } *(u32 *) data = le32_to_cpu(*(__le32 *) status); ret = 0; break; case 2: if (type != USB_STATUS_TYPE_STANDARD) { ret = -EIO; break; } *(u16 *) data = le16_to_cpu(*(__le16 *) status); ret = 0; break; default: ret = -EIO; } kfree(status); return ret; } EXPORT_SYMBOL_GPL(usb_get_status); /** * usb_clear_halt - tells device to clear endpoint halt/stall condition * @dev: device whose endpoint is halted * @pipe: endpoint "pipe" being cleared * * Context: task context, might sleep. * * This is used to clear halt conditions for bulk and interrupt endpoints, * as reported by URB completion status. Endpoints that are halted are * sometimes referred to as being "stalled". Such endpoints are unable * to transmit or receive data until the halt status is cleared. Any URBs * queued for such an endpoint should normally be unlinked by the driver * before clearing the halt condition, as described in sections 5.7.5 * and 5.8.5 of the USB 2.0 spec. * * Note that control and isochronous endpoints don't halt, although control * endpoints report "protocol stall" (for unsupported requests) using the * same status code used to report a true stall. * * This call is synchronous, and may not be used in an interrupt context. * If a thread in your driver uses this call, make sure your disconnect() * method can wait for it to complete. * * Return: Zero on success, or else the status code returned by the * underlying usb_control_msg() call. */ int usb_clear_halt(struct usb_device *dev, int pipe) { int result; int endp = usb_pipeendpoint(pipe); if (usb_pipein(pipe)) endp |= USB_DIR_IN; /* we don't care if it wasn't halted first. in fact some devices * (like some ibmcam model 1 units) seem to expect hosts to make * this request for iso endpoints, which can't halt! */ result = usb_control_msg_send(dev, 0, USB_REQ_CLEAR_FEATURE, USB_RECIP_ENDPOINT, USB_ENDPOINT_HALT, endp, NULL, 0, USB_CTRL_SET_TIMEOUT, GFP_NOIO); /* don't un-halt or force to DATA0 except on success */ if (result) return result; /* NOTE: seems like Microsoft and Apple don't bother verifying * the clear "took", so some devices could lock up if you check... * such as the Hagiwara FlashGate DUAL. So we won't bother. * * NOTE: make sure the logic here doesn't diverge much from * the copy in usb-storage, for as long as we need two copies. */ usb_reset_endpoint(dev, endp); return 0; } EXPORT_SYMBOL_GPL(usb_clear_halt); static int create_intf_ep_devs(struct usb_interface *intf) { struct usb_device *udev = interface_to_usbdev(intf); struct usb_host_interface *alt = intf->cur_altsetting; int i; if (intf->ep_devs_created || intf->unregistering) return 0; for (i = 0; i < alt->desc.bNumEndpoints; ++i) (void) usb_create_ep_devs(&intf->dev, &alt->endpoint[i], udev); intf->ep_devs_created = 1; return 0; } static void remove_intf_ep_devs(struct usb_interface *intf) { struct usb_host_interface *alt = intf->cur_altsetting; int i; if (!intf->ep_devs_created) return; for (i = 0; i < alt->desc.bNumEndpoints; ++i) usb_remove_ep_devs(&alt->endpoint[i]); intf->ep_devs_created = 0; } /** * usb_disable_endpoint -- Disable an endpoint by address * @dev: the device whose endpoint is being disabled * @epaddr: the endpoint's address. Endpoint number for output, * endpoint number + USB_DIR_IN for input * @reset_hardware: flag to erase any endpoint state stored in the * controller hardware * * Disables the endpoint for URB submission and nukes all pending URBs. * If @reset_hardware is set then also deallocates hcd/hardware state * for the endpoint. */ void usb_disable_endpoint(struct usb_device *dev, unsigned int epaddr, bool reset_hardware) { unsigned int epnum = epaddr & USB_ENDPOINT_NUMBER_MASK; struct usb_host_endpoint *ep; if (!dev) return; if (usb_endpoint_out(epaddr)) { ep = dev->ep_out[epnum]; if (reset_hardware && epnum != 0) dev->ep_out[epnum] = NULL; } else { ep = dev->ep_in[epnum]; if (reset_hardware && epnum != 0) dev->ep_in[epnum] = NULL; } if (ep) { ep->enabled = 0; usb_hcd_flush_endpoint(dev, ep); if (reset_hardware) usb_hcd_disable_endpoint(dev, ep); } } /** * usb_reset_endpoint - Reset an endpoint's state. * @dev: the device whose endpoint is to be reset * @epaddr: the endpoint's address. Endpoint number for output, * endpoint number + USB_DIR_IN for input * * Resets any host-side endpoint state such as the toggle bit, * sequence number or current window. */ void usb_reset_endpoint(struct usb_device *dev, unsigned int epaddr) { unsigned int epnum = epaddr & USB_ENDPOINT_NUMBER_MASK; struct usb_host_endpoint *ep; if (usb_endpoint_out(epaddr)) ep = dev->ep_out[epnum]; else ep = dev->ep_in[epnum]; if (ep) usb_hcd_reset_endpoint(dev, ep); } EXPORT_SYMBOL_GPL(usb_reset_endpoint); /** * usb_disable_interface -- Disable all endpoints for an interface * @dev: the device whose interface is being disabled * @intf: pointer to the interface descriptor * @reset_hardware: flag to erase any endpoint state stored in the * controller hardware * * Disables all the endpoints for the interface's current altsetting. */ void usb_disable_interface(struct usb_device *dev, struct usb_interface *intf, bool reset_hardware) { struct usb_host_interface *alt = intf->cur_altsetting; int i; for (i = 0; i < alt->desc.bNumEndpoints; ++i) { usb_disable_endpoint(dev, alt->endpoint[i].desc.bEndpointAddress, reset_hardware); } } /* * usb_disable_device_endpoints -- Disable all endpoints for a device * @dev: the device whose endpoints are being disabled * @skip_ep0: 0 to disable endpoint 0, 1 to skip it. */ static void usb_disable_device_endpoints(struct usb_device *dev, int skip_ep0) { struct usb_hcd *hcd = bus_to_hcd(dev->bus); int i; if (hcd->driver->check_bandwidth) { /* First pass: Cancel URBs, leave endpoint pointers intact. */ for (i = skip_ep0; i < 16; ++i) { usb_disable_endpoint(dev, i, false); usb_disable_endpoint(dev, i + USB_DIR_IN, false); } /* Remove endpoints from the host controller internal state */ mutex_lock(hcd->bandwidth_mutex); usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); mutex_unlock(hcd->bandwidth_mutex); } /* Second pass: remove endpoint pointers */ for (i = skip_ep0; i < 16; ++i) { usb_disable_endpoint(dev, i, true); usb_disable_endpoint(dev, i + USB_DIR_IN, true); } } /** * usb_disable_device - Disable all the endpoints for a USB device * @dev: the device whose endpoints are being disabled * @skip_ep0: 0 to disable endpoint 0, 1 to skip it. * * Disables all the device's endpoints, potentially including endpoint 0. * Deallocates hcd/hardware state for the endpoints (nuking all or most * pending urbs) and usbcore state for the interfaces, so that usbcore * must usb_set_configuration() before any interfaces could be used. */ void usb_disable_device(struct usb_device *dev, int skip_ep0) { int i; /* getting rid of interfaces will disconnect * any drivers bound to them (a key side effect) */ if (dev->actconfig) { /* * FIXME: In order to avoid self-deadlock involving the * bandwidth_mutex, we have to mark all the interfaces * before unregistering any of them. */ for (i = 0; i < dev->actconfig->desc.bNumInterfaces; i++) dev->actconfig->interface[i]->unregistering = 1; for (i = 0; i < dev->actconfig->desc.bNumInterfaces; i++) { struct usb_interface *interface; /* remove this interface if it has been registered */ interface = dev->actconfig->interface[i]; if (!device_is_registered(&interface->dev)) continue; dev_dbg(&dev->dev, "unregistering interface %s\n", dev_name(&interface->dev)); remove_intf_ep_devs(interface); device_del(&interface->dev); } /* Now that the interfaces are unbound, nobody should * try to access them. */ for (i = 0; i < dev->actconfig->desc.bNumInterfaces; i++) { put_device(&dev->actconfig->interface[i]->dev); dev->actconfig->interface[i] = NULL; } usb_disable_usb2_hardware_lpm(dev); usb_unlocked_disable_lpm(dev); usb_disable_ltm(dev); dev->actconfig = NULL; if (dev->state == USB_STATE_CONFIGURED) usb_set_device_state(dev, USB_STATE_ADDRESS); } dev_dbg(&dev->dev, "%s nuking %s URBs\n", __func__, skip_ep0 ? "non-ep0" : "all"); usb_disable_device_endpoints(dev, skip_ep0); } /** * usb_enable_endpoint - Enable an endpoint for USB communications * @dev: the device whose interface is being enabled * @ep: the endpoint * @reset_ep: flag to reset the endpoint state * * Resets the endpoint state if asked, and sets dev->ep_{in,out} pointers. * For control endpoints, both the input and output sides are handled. */ void usb_enable_endpoint(struct usb_device *dev, struct usb_host_endpoint *ep, bool reset_ep) { int epnum = usb_endpoint_num(&ep->desc); int is_out = usb_endpoint_dir_out(&ep->desc); int is_control = usb_endpoint_xfer_control(&ep->desc); if (reset_ep) usb_hcd_reset_endpoint(dev, ep); if (is_out || is_control) dev->ep_out[epnum] = ep; if (!is_out || is_control) dev->ep_in[epnum] = ep; ep->enabled = 1; } /** * usb_enable_interface - Enable all the endpoints for an interface * @dev: the device whose interface is being enabled * @intf: pointer to the interface descriptor * @reset_eps: flag to reset the endpoints' state * * Enables all the endpoints for the interface's current altsetting. */ void usb_enable_interface(struct usb_device *dev, struct usb_interface *intf, bool reset_eps) { struct usb_host_interface *alt = intf->cur_altsetting; int i; for (i = 0; i < alt->desc.bNumEndpoints; ++i) usb_enable_endpoint(dev, &alt->endpoint[i], reset_eps); } /** * usb_set_interface - Makes a particular alternate setting be current * @dev: the device whose interface is being updated * @interface: the interface being updated * @alternate: the setting being chosen. * * Context: task context, might sleep. * * This is used to enable data transfers on interfaces that may not * be enabled by default. Not all devices support such configurability. * Only the driver bound to an interface may change its setting. * * Within any given configuration, each interface may have several * alternative settings. These are often used to control levels of * bandwidth consumption. For example, the default setting for a high * speed interrupt endpoint may not send more than 64 bytes per microframe, * while interrupt transfers of up to 3KBytes per microframe are legal. * Also, isochronous endpoints may never be part of an * interface's default setting. To access such bandwidth, alternate * interface settings must be made current. * * Note that in the Linux USB subsystem, bandwidth associated with * an endpoint in a given alternate setting is not reserved until an URB * is submitted that needs that bandwidth. Some other operating systems * allocate bandwidth early, when a configuration is chosen. * * xHCI reserves bandwidth and configures the alternate setting in * usb_hcd_alloc_bandwidth(). If it fails the original interface altsetting * may be disabled. Drivers cannot rely on any particular alternate * setting being in effect after a failure. * * This call is synchronous, and may not be used in an interrupt context. * Also, drivers must not change altsettings while urbs are scheduled for * endpoints in that interface; all such urbs must first be completed * (perhaps forced by unlinking). If a thread in your driver uses this call, * make sure your disconnect() method can wait for it to complete. * * Return: Zero on success, or else the status code returned by the * underlying usb_control_msg() call. */ int usb_set_interface(struct usb_device *dev, int interface, int alternate) { struct usb_interface *iface; struct usb_host_interface *alt; struct usb_hcd *hcd = bus_to_hcd(dev->bus); int i, ret, manual = 0; unsigned int epaddr; unsigned int pipe; if (dev->state == USB_STATE_SUSPENDED) return -EHOSTUNREACH; iface = usb_ifnum_to_if(dev, interface); if (!iface) { dev_dbg(&dev->dev, "selecting invalid interface %d\n", interface); return -EINVAL; } if (iface->unregistering) return -ENODEV; alt = usb_altnum_to_altsetting(iface, alternate); if (!alt) { dev_warn(&dev->dev, "selecting invalid altsetting %d\n", alternate); return -EINVAL; } /* * usb3 hosts configure the interface in usb_hcd_alloc_bandwidth, * including freeing dropped endpoint ring buffers. * Make sure the interface endpoints are flushed before that */ usb_disable_interface(dev, iface, false); /* Make sure we have enough bandwidth for this alternate interface. * Remove the current alt setting and add the new alt setting. */ mutex_lock(hcd->bandwidth_mutex); /* Disable LPM, and re-enable it once the new alt setting is installed, * so that the xHCI driver can recalculate the U1/U2 timeouts. */ if (usb_disable_lpm(dev)) { dev_err(&iface->dev, "%s Failed to disable LPM\n", __func__); mutex_unlock(hcd->bandwidth_mutex); return -ENOMEM; } /* Changing alt-setting also frees any allocated streams */ for (i = 0; i < iface->cur_altsetting->desc.bNumEndpoints; i++) iface->cur_altsetting->endpoint[i].streams = 0; ret = usb_hcd_alloc_bandwidth(dev, NULL, iface->cur_altsetting, alt); if (ret < 0) { dev_info(&dev->dev, "Not enough bandwidth for altsetting %d\n", alternate); usb_enable_lpm(dev); mutex_unlock(hcd->bandwidth_mutex); return ret; } if (dev->quirks & USB_QUIRK_NO_SET_INTF) ret = -EPIPE; else ret = usb_control_msg_send(dev, 0, USB_REQ_SET_INTERFACE, USB_RECIP_INTERFACE, alternate, interface, NULL, 0, 5000, GFP_NOIO); /* 9.4.10 says devices don't need this and are free to STALL the * request if the interface only has one alternate setting. */ if (ret == -EPIPE && iface->num_altsetting == 1) { dev_dbg(&dev->dev, "manual set_interface for iface %d, alt %d\n", interface, alternate); manual = 1; } else if (ret) { /* Re-instate the old alt setting */ usb_hcd_alloc_bandwidth(dev, NULL, alt, iface->cur_altsetting); usb_enable_lpm(dev); mutex_unlock(hcd->bandwidth_mutex); return ret; } mutex_unlock(hcd->bandwidth_mutex); /* FIXME drivers shouldn't need to replicate/bugfix the logic here * when they implement async or easily-killable versions of this or * other "should-be-internal" functions (like clear_halt). * should hcd+usbcore postprocess control requests? */ /* prevent submissions using previous endpoint settings */ if (iface->cur_altsetting != alt) { remove_intf_ep_devs(iface); usb_remove_sysfs_intf_files(iface); } usb_disable_interface(dev, iface, true); iface->cur_altsetting = alt; /* Now that the interface is installed, re-enable LPM. */ usb_unlocked_enable_lpm(dev); /* If the interface only has one altsetting and the device didn't * accept the request, we attempt to carry out the equivalent action * by manually clearing the HALT feature for each endpoint in the * new altsetting. */ if (manual) { for (i = 0; i < alt->desc.bNumEndpoints; i++) { epaddr = alt->endpoint[i].desc.bEndpointAddress; pipe = __create_pipe(dev, USB_ENDPOINT_NUMBER_MASK & epaddr) | (usb_endpoint_out(epaddr) ? USB_DIR_OUT : USB_DIR_IN); usb_clear_halt(dev, pipe); } } /* 9.1.1.5: reset toggles for all endpoints in the new altsetting * * Note: * Despite EP0 is always present in all interfaces/AS, the list of * endpoints from the descriptor does not contain EP0. Due to its * omnipresence one might expect EP0 being considered "affected" by * any SetInterface request and hence assume toggles need to be reset. * However, EP0 toggles are re-synced for every individual transfer * during the SETUP stage - hence EP0 toggles are "don't care" here. * (Likewise, EP0 never "halts" on well designed devices.) */ usb_enable_interface(dev, iface, true); if (device_is_registered(&iface->dev)) { usb_create_sysfs_intf_files(iface); create_intf_ep_devs(iface); } return 0; } EXPORT_SYMBOL_GPL(usb_set_interface); /** * usb_reset_configuration - lightweight device reset * @dev: the device whose configuration is being reset * * This issues a standard SET_CONFIGURATION request to the device using * the current configuration. The effect is to reset most USB-related * state in the device, including interface altsettings (reset to zero), * endpoint halts (cleared), and endpoint state (only for bulk and interrupt * endpoints). Other usbcore state is unchanged, including bindings of * usb device drivers to interfaces. * * Because this affects multiple interfaces, avoid using this with composite * (multi-interface) devices. Instead, the driver for each interface may * use usb_set_interface() on the interfaces it claims. Be careful though; * some devices don't support the SET_INTERFACE request, and others won't * reset all the interface state (notably endpoint state). Resetting the whole * configuration would affect other drivers' interfaces. * * The caller must own the device lock. * * Return: Zero on success, else a negative error code. * * If this routine fails the device will probably be in an unusable state * with endpoints disabled, and interfaces only partially enabled. */ int usb_reset_configuration(struct usb_device *dev) { int i, retval; struct usb_host_config *config; struct usb_hcd *hcd = bus_to_hcd(dev->bus); if (dev->state == USB_STATE_SUSPENDED) return -EHOSTUNREACH; /* caller must have locked the device and must own * the usb bus readlock (so driver bindings are stable); * calls during probe() are fine */ usb_disable_device_endpoints(dev, 1); /* skip ep0*/ config = dev->actconfig; retval = 0; mutex_lock(hcd->bandwidth_mutex); /* Disable LPM, and re-enable it once the configuration is reset, so * that the xHCI driver can recalculate the U1/U2 timeouts. */ if (usb_disable_lpm(dev)) { dev_err(&dev->dev, "%s Failed to disable LPM\n", __func__); mutex_unlock(hcd->bandwidth_mutex); return -ENOMEM; } /* xHCI adds all endpoints in usb_hcd_alloc_bandwidth */ retval = usb_hcd_alloc_bandwidth(dev, config, NULL, NULL); if (retval < 0) { usb_enable_lpm(dev); mutex_unlock(hcd->bandwidth_mutex); return retval; } retval = usb_control_msg_send(dev, 0, USB_REQ_SET_CONFIGURATION, 0, config->desc.bConfigurationValue, 0, NULL, 0, USB_CTRL_SET_TIMEOUT, GFP_NOIO); if (retval) { usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); usb_enable_lpm(dev); mutex_unlock(hcd->bandwidth_mutex); return retval; } mutex_unlock(hcd->bandwidth_mutex); /* re-init hc/hcd interface/endpoint state */ for (i = 0; i < config->desc.bNumInterfaces; i++) { struct usb_interface *intf = config->interface[i]; struct usb_host_interface *alt; alt = usb_altnum_to_altsetting(intf, 0); /* No altsetting 0? We'll assume the first altsetting. * We could use a GetInterface call, but if a device is * so non-compliant that it doesn't have altsetting 0 * then I wouldn't trust its reply anyway. */ if (!alt) alt = &intf->altsetting[0]; if (alt != intf->cur_altsetting) { remove_intf_ep_devs(intf); usb_remove_sysfs_intf_files(intf); } intf->cur_altsetting = alt; usb_enable_interface(dev, intf, true); if (device_is_registered(&intf->dev)) { usb_create_sysfs_intf_files(intf); create_intf_ep_devs(intf); } } /* Now that the interfaces are installed, re-enable LPM. */ usb_unlocked_enable_lpm(dev); return 0; } EXPORT_SYMBOL_GPL(usb_reset_configuration); static void usb_release_interface(struct device *dev) { struct usb_interface *intf = to_usb_interface(dev); struct usb_interface_cache *intfc = altsetting_to_usb_interface_cache(intf->altsetting); kref_put(&intfc->ref, usb_release_interface_cache); usb_put_dev(interface_to_usbdev(intf)); of_node_put(dev->of_node); kfree(intf); } /* * usb_deauthorize_interface - deauthorize an USB interface * * @intf: USB interface structure */ void usb_deauthorize_interface(struct usb_interface *intf) { struct device *dev = &intf->dev; device_lock(dev->parent); if (intf->authorized) { device_lock(dev); intf->authorized = 0; device_unlock(dev); usb_forced_unbind_intf(intf); } device_unlock(dev->parent); } /* * usb_authorize_interface - authorize an USB interface * * @intf: USB interface structure */ void usb_authorize_interface(struct usb_interface *intf) { struct device *dev = &intf->dev; if (!intf->authorized) { device_lock(dev); intf->authorized = 1; /* authorize interface */ device_unlock(dev); } } static int usb_if_uevent(const struct device *dev, struct kobj_uevent_env *env) { const struct usb_device *usb_dev; const struct usb_interface *intf; const struct usb_host_interface *alt; intf = to_usb_interface(dev); usb_dev = interface_to_usbdev(intf); alt = intf->cur_altsetting; if (add_uevent_var(env, "INTERFACE=%d/%d/%d", alt->desc.bInterfaceClass, alt->desc.bInterfaceSubClass, alt->desc.bInterfaceProtocol)) return -ENOMEM; if (add_uevent_var(env, "MODALIAS=usb:" "v%04Xp%04Xd%04Xdc%02Xdsc%02Xdp%02Xic%02Xisc%02Xip%02Xin%02X", le16_to_cpu(usb_dev->descriptor.idVendor), le16_to_cpu(usb_dev->descriptor.idProduct), le16_to_cpu(usb_dev->descriptor.bcdDevice), usb_dev->descriptor.bDeviceClass, usb_dev->descriptor.bDeviceSubClass, usb_dev->descriptor.bDeviceProtocol, alt->desc.bInterfaceClass, alt->desc.bInterfaceSubClass, alt->desc.bInterfaceProtocol, alt->desc.bInterfaceNumber)) return -ENOMEM; return 0; } const struct device_type usb_if_device_type = { .name = "usb_interface", .release = usb_release_interface, .uevent = usb_if_uevent, }; static struct usb_interface_assoc_descriptor *find_iad(struct usb_device *dev, struct usb_host_config *config, u8 inum) { struct usb_interface_assoc_descriptor *retval = NULL; struct usb_interface_assoc_descriptor *intf_assoc; int first_intf; int last_intf; int i; for (i = 0; (i < USB_MAXIADS && config->intf_assoc[i]); i++) { intf_assoc = config->intf_assoc[i]; if (intf_assoc->bInterfaceCount == 0) continue; first_intf = intf_assoc->bFirstInterface; last_intf = first_intf + (intf_assoc->bInterfaceCount - 1); if (inum >= first_intf && inum <= last_intf) { if (!retval) retval = intf_assoc; else dev_err(&dev->dev, "Interface #%d referenced" " by multiple IADs\n", inum); } } return retval; } /* * Internal function to queue a device reset * See usb_queue_reset_device() for more details */ static void __usb_queue_reset_device(struct work_struct *ws) { int rc; struct usb_interface *iface = container_of(ws, struct usb_interface, reset_ws); struct usb_device *udev = interface_to_usbdev(iface); rc = usb_lock_device_for_reset(udev, iface); if (rc >= 0) { usb_reset_device(udev); usb_unlock_device(udev); } usb_put_intf(iface); /* Undo _get_ in usb_queue_reset_device() */ } /* * Internal function to set the wireless_status sysfs attribute * See usb_set_wireless_status() for more details */ static void __usb_wireless_status_intf(struct work_struct *ws) { struct usb_interface *iface = container_of(ws, struct usb_interface, wireless_status_work); device_lock(iface->dev.parent); if (iface->sysfs_files_created) usb_update_wireless_status_attr(iface); device_unlock(iface->dev.parent); usb_put_intf(iface); /* Undo _get_ in usb_set_wireless_status() */ } /** * usb_set_wireless_status - sets the wireless_status struct member * @iface: the interface to modify * @status: the new wireless status * * Set the wireless_status struct member to the new value, and emit * sysfs changes as necessary. * * Returns: 0 on success, -EALREADY if already set. */ int usb_set_wireless_status(struct usb_interface *iface, enum usb_wireless_status status) { if (iface->wireless_status == status) return -EALREADY; usb_get_intf(iface); iface->wireless_status = status; schedule_work(&iface->wireless_status_work); return 0; } EXPORT_SYMBOL_GPL(usb_set_wireless_status); /* * usb_set_configuration - Makes a particular device setting be current * @dev: the device whose configuration is being updated * @configuration: the configuration being chosen. * * Context: task context, might sleep. Caller holds device lock. * * This is used to enable non-default device modes. Not all devices * use this kind of configurability; many devices only have one * configuration. * * @configuration is the value of the configuration to be installed. * According to the USB spec (e.g. section 9.1.1.5), configuration values * must be non-zero; a value of zero indicates that the device in * unconfigured. However some devices erroneously use 0 as one of their * configuration values. To help manage such devices, this routine will * accept @configuration = -1 as indicating the device should be put in * an unconfigured state. * * USB device configurations may affect Linux interoperability, * power consumption and the functionality available. For example, * the default configuration is limited to using 100mA of bus power, * so that when certain device functionality requires more power, * and the device is bus powered, that functionality should be in some * non-default device configuration. Other device modes may also be * reflected as configuration options, such as whether two ISDN * channels are available independently; and choosing between open * standard device protocols (like CDC) or proprietary ones. * * Note that a non-authorized device (dev->authorized == 0) will only * be put in unconfigured mode. * * Note that USB has an additional level of device configurability, * associated with interfaces. That configurability is accessed using * usb_set_interface(). * * This call is synchronous. The calling context must be able to sleep, * must own the device lock, and must not hold the driver model's USB * bus mutex; usb interface driver probe() methods cannot use this routine. * * Returns zero on success, or else the status code returned by the * underlying call that failed. On successful completion, each interface * in the original device configuration has been destroyed, and each one * in the new configuration has been probed by all relevant usb device * drivers currently known to the kernel. */ int usb_set_configuration(struct usb_device *dev, int configuration) { int i, ret; struct usb_host_config *cp = NULL; struct usb_interface **new_interfaces = NULL; struct usb_hcd *hcd = bus_to_hcd(dev->bus); int n, nintf; if (dev->authorized == 0 || configuration == -1) configuration = 0; else { for (i = 0; i < dev->descriptor.bNumConfigurations; i++) { if (dev->config[i].desc.bConfigurationValue == configuration) { cp = &dev->config[i]; break; } } } if ((!cp && configuration != 0)) return -EINVAL; /* The USB spec says configuration 0 means unconfigured. * But if a device includes a configuration numbered 0, * we will accept it as a correctly configured state. * Use -1 if you really want to unconfigure the device. */ if (cp && configuration == 0) dev_warn(&dev->dev, "config 0 descriptor??\n"); /* Allocate memory for new interfaces before doing anything else, * so that if we run out then nothing will have changed. */ n = nintf = 0; if (cp) { nintf = cp->desc.bNumInterfaces; new_interfaces = kmalloc_array(nintf, sizeof(*new_interfaces), GFP_NOIO); if (!new_interfaces) return -ENOMEM; for (; n < nintf; ++n) { new_interfaces[n] = kzalloc( sizeof(struct usb_interface), GFP_NOIO); if (!new_interfaces[n]) { ret = -ENOMEM; free_interfaces: while (--n >= 0) kfree(new_interfaces[n]); kfree(new_interfaces); return ret; } } i = dev->bus_mA - usb_get_max_power(dev, cp); if (i < 0) dev_warn(&dev->dev, "new config #%d exceeds power " "limit by %dmA\n", configuration, -i); } /* Wake up the device so we can send it the Set-Config request */ ret = usb_autoresume_device(dev); if (ret) goto free_interfaces; /* if it's already configured, clear out old state first. * getting rid of old interfaces means unbinding their drivers. */ if (dev->state != USB_STATE_ADDRESS) usb_disable_device(dev, 1); /* Skip ep0 */ /* Get rid of pending async Set-Config requests for this device */ cancel_async_set_config(dev); /* Make sure we have bandwidth (and available HCD resources) for this * configuration. Remove endpoints from the schedule if we're dropping * this configuration to set configuration 0. After this point, the * host controller will not allow submissions to dropped endpoints. If * this call fails, the device state is unchanged. */ mutex_lock(hcd->bandwidth_mutex); /* Disable LPM, and re-enable it once the new configuration is * installed, so that the xHCI driver can recalculate the U1/U2 * timeouts. */ if (dev->actconfig && usb_disable_lpm(dev)) { dev_err(&dev->dev, "%s Failed to disable LPM\n", __func__); mutex_unlock(hcd->bandwidth_mutex); ret = -ENOMEM; goto free_interfaces; } ret = usb_hcd_alloc_bandwidth(dev, cp, NULL, NULL); if (ret < 0) { if (dev->actconfig) usb_enable_lpm(dev); mutex_unlock(hcd->bandwidth_mutex); usb_autosuspend_device(dev); goto free_interfaces; } /* * Initialize the new interface structures and the * hc/hcd/usbcore interface/endpoint state. */ for (i = 0; i < nintf; ++i) { struct usb_interface_cache *intfc; struct usb_interface *intf; struct usb_host_interface *alt; u8 ifnum; cp->interface[i] = intf = new_interfaces[i]; intfc = cp->intf_cache[i]; intf->altsetting = intfc->altsetting; intf->num_altsetting = intfc->num_altsetting; intf->authorized = !!HCD_INTF_AUTHORIZED(hcd); kref_get(&intfc->ref); alt = usb_altnum_to_altsetting(intf, 0); /* No altsetting 0? We'll assume the first altsetting. * We could use a GetInterface call, but if a device is * so non-compliant that it doesn't have altsetting 0 * then I wouldn't trust its reply anyway. */ if (!alt) alt = &intf->altsetting[0]; ifnum = alt->desc.bInterfaceNumber; intf->intf_assoc = find_iad(dev, cp, ifnum); intf->cur_altsetting = alt; usb_enable_interface(dev, intf, true); intf->dev.parent = &dev->dev; if (usb_of_has_combined_node(dev)) { device_set_of_node_from_dev(&intf->dev, &dev->dev); } else { intf->dev.of_node = usb_of_get_interface_node(dev, configuration, ifnum); } ACPI_COMPANION_SET(&intf->dev, ACPI_COMPANION(&dev->dev)); intf->dev.driver = NULL; intf->dev.bus = &usb_bus_type; intf->dev.type = &usb_if_device_type; intf->dev.groups = usb_interface_groups; INIT_WORK(&intf->reset_ws, __usb_queue_reset_device); INIT_WORK(&intf->wireless_status_work, __usb_wireless_status_intf); intf->minor = -1; device_initialize(&intf->dev); pm_runtime_no_callbacks(&intf->dev); dev_set_name(&intf->dev, "%d-%s:%d.%d", dev->bus->busnum, dev->devpath, configuration, ifnum); usb_get_dev(dev); } kfree(new_interfaces); ret = usb_control_msg_send(dev, 0, USB_REQ_SET_CONFIGURATION, 0, configuration, 0, NULL, 0, USB_CTRL_SET_TIMEOUT, GFP_NOIO); if (ret && cp) { /* * All the old state is gone, so what else can we do? * The device is probably useless now anyway. */ usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); for (i = 0; i < nintf; ++i) { usb_disable_interface(dev, cp->interface[i], true); put_device(&cp->interface[i]->dev); cp->interface[i] = NULL; } cp = NULL; } dev->actconfig = cp; mutex_unlock(hcd->bandwidth_mutex); if (!cp) { usb_set_device_state(dev, USB_STATE_ADDRESS); /* Leave LPM disabled while the device is unconfigured. */ usb_autosuspend_device(dev); return ret; } usb_set_device_state(dev, USB_STATE_CONFIGURED); if (cp->string == NULL && !(dev->quirks & USB_QUIRK_CONFIG_INTF_STRINGS)) cp->string = usb_cache_string(dev, cp->desc.iConfiguration); /* Now that the interfaces are installed, re-enable LPM. */ usb_unlocked_enable_lpm(dev); /* Enable LTM if it was turned off by usb_disable_device. */ usb_enable_ltm(dev); /* Now that all the interfaces are set up, register them * to trigger binding of drivers to interfaces. probe() * routines may install different altsettings and may * claim() any interfaces not yet bound. Many class drivers * need that: CDC, audio, video, etc. */ for (i = 0; i < nintf; ++i) { struct usb_interface *intf = cp->interface[i]; if (intf->dev.of_node && !of_device_is_available(intf->dev.of_node)) { dev_info(&dev->dev, "skipping disabled interface %d\n", intf->cur_altsetting->desc.bInterfaceNumber); continue; } dev_dbg(&dev->dev, "adding %s (config #%d, interface %d)\n", dev_name(&intf->dev), configuration, intf->cur_altsetting->desc.bInterfaceNumber); device_enable_async_suspend(&intf->dev); ret = device_add(&intf->dev); if (ret != 0) { dev_err(&dev->dev, "device_add(%s) --> %d\n", dev_name(&intf->dev), ret); continue; } create_intf_ep_devs(intf); } usb_autosuspend_device(dev); return 0; } EXPORT_SYMBOL_GPL(usb_set_configuration); static LIST_HEAD(set_config_list); static DEFINE_SPINLOCK(set_config_lock); struct set_config_request { struct usb_device *udev; int config; struct work_struct work; struct list_head node; }; /* Worker routine for usb_driver_set_configuration() */ static void driver_set_config_work(struct work_struct *work) { struct set_config_request *req = container_of(work, struct set_config_request, work); struct usb_device *udev = req->udev; usb_lock_device(udev); spin_lock(&set_config_lock); list_del(&req->node); spin_unlock(&set_config_lock); if (req->config >= -1) /* Is req still valid? */ usb_set_configuration(udev, req->config); usb_unlock_device(udev); usb_put_dev(udev); kfree(req); } /* Cancel pending Set-Config requests for a device whose configuration * was just changed */ static void cancel_async_set_config(struct usb_device *udev) { struct set_config_request *req; spin_lock(&set_config_lock); list_for_each_entry(req, &set_config_list, node) { if (req->udev == udev) req->config = -999; /* Mark as cancelled */ } spin_unlock(&set_config_lock); } /** * usb_driver_set_configuration - Provide a way for drivers to change device configurations * @udev: the device whose configuration is being updated * @config: the configuration being chosen. * Context: In process context, must be able to sleep * * Device interface drivers are not allowed to change device configurations. * This is because changing configurations will destroy the interface the * driver is bound to and create new ones; it would be like a floppy-disk * driver telling the computer to replace the floppy-disk drive with a * tape drive! * * Still, in certain specialized circumstances the need may arise. This * routine gets around the normal restrictions by using a work thread to * submit the change-config request. * * Return: 0 if the request was successfully queued, error code otherwise. * The caller has no way to know whether the queued request will eventually * succeed. */ int usb_driver_set_configuration(struct usb_device *udev, int config) { struct set_config_request *req; req = kmalloc(sizeof(*req), GFP_KERNEL); if (!req) return -ENOMEM; req->udev = udev; req->config = config; INIT_WORK(&req->work, driver_set_config_work); spin_lock(&set_config_lock); list_add(&req->node, &set_config_list); spin_unlock(&set_config_lock); usb_get_dev(udev); schedule_work(&req->work); return 0; } EXPORT_SYMBOL_GPL(usb_driver_set_configuration); /** * cdc_parse_cdc_header - parse the extra headers present in CDC devices * @hdr: the place to put the results of the parsing * @intf: the interface for which parsing is requested * @buffer: pointer to the extra headers to be parsed * @buflen: length of the extra headers * * This evaluates the extra headers present in CDC devices which * bind the interfaces for data and control and provide details * about the capabilities of the device. * * Return: number of descriptors parsed or -EINVAL * if the header is contradictory beyond salvage */ int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr, struct usb_interface *intf, u8 *buffer, int buflen) { /* duplicates are ignored */ struct usb_cdc_union_desc *union_header = NULL; /* duplicates are not tolerated */ struct usb_cdc_header_desc *header = NULL; struct usb_cdc_ether_desc *ether = NULL; struct usb_cdc_mdlm_detail_desc *detail = NULL; struct usb_cdc_mdlm_desc *desc = NULL; unsigned int elength; int cnt = 0; memset(hdr, 0x00, sizeof(struct usb_cdc_parsed_header)); hdr->phonet_magic_present = false; while (buflen > 0) { elength = buffer[0]; if (!elength) { dev_err(&intf->dev, "skipping garbage byte\n"); elength = 1; goto next_desc; } if ((buflen < elength) || (elength < 3)) { dev_err(&intf->dev, "invalid descriptor buffer length\n"); break; } if (buffer[1] != USB_DT_CS_INTERFACE) { dev_err(&intf->dev, "skipping garbage\n"); goto next_desc; } switch (buffer[2]) { case USB_CDC_UNION_TYPE: /* we've found it */ if (elength < sizeof(struct usb_cdc_union_desc)) goto next_desc; if (union_header) { dev_err(&intf->dev, "More than one union descriptor, skipping ...\n"); goto next_desc; } union_header = (struct usb_cdc_union_desc *)buffer; break; case USB_CDC_COUNTRY_TYPE: if (elength < sizeof(struct usb_cdc_country_functional_desc)) goto next_desc; hdr->usb_cdc_country_functional_desc = (struct usb_cdc_country_functional_desc *)buffer; break; case USB_CDC_HEADER_TYPE: if (elength != sizeof(struct usb_cdc_header_desc)) goto next_desc; if (header) return -EINVAL; header = (struct usb_cdc_header_desc *)buffer; break; case USB_CDC_ACM_TYPE: if (elength < sizeof(struct usb_cdc_acm_descriptor)) goto next_desc; hdr->usb_cdc_acm_descriptor = (struct usb_cdc_acm_descriptor *)buffer; break; case USB_CDC_ETHERNET_TYPE: if (elength != sizeof(struct usb_cdc_ether_desc)) goto next_desc; if (ether) return -EINVAL; ether = (struct usb_cdc_ether_desc *)buffer; break; case USB_CDC_CALL_MANAGEMENT_TYPE: if (elength < sizeof(struct usb_cdc_call_mgmt_descriptor)) goto next_desc; hdr->usb_cdc_call_mgmt_descriptor = (struct usb_cdc_call_mgmt_descriptor *)buffer; break; case USB_CDC_DMM_TYPE: if (elength < sizeof(struct usb_cdc_dmm_desc)) goto next_desc; hdr->usb_cdc_dmm_desc = (struct usb_cdc_dmm_desc *)buffer; break; case USB_CDC_MDLM_TYPE: if (elength < sizeof(struct usb_cdc_mdlm_desc)) goto next_desc; if (desc) return -EINVAL; desc = (struct usb_cdc_mdlm_desc *)buffer; break; case USB_CDC_MDLM_DETAIL_TYPE: if (elength < sizeof(struct usb_cdc_mdlm_detail_desc)) goto next_desc; if (detail) return -EINVAL; detail = (struct usb_cdc_mdlm_detail_desc *)buffer; break; case USB_CDC_NCM_TYPE: if (elength < sizeof(struct usb_cdc_ncm_desc)) goto next_desc; hdr->usb_cdc_ncm_desc = (struct usb_cdc_ncm_desc *)buffer; break; case USB_CDC_MBIM_TYPE: if (elength < sizeof(struct usb_cdc_mbim_desc)) goto next_desc; hdr->usb_cdc_mbim_desc = (struct usb_cdc_mbim_desc *)buffer; break; case USB_CDC_MBIM_EXTENDED_TYPE: if (elength < sizeof(struct usb_cdc_mbim_extended_desc)) break; hdr->usb_cdc_mbim_extended_desc = (struct usb_cdc_mbim_extended_desc *)buffer; break; case CDC_PHONET_MAGIC_NUMBER: hdr->phonet_magic_present = true; break; default: /* * there are LOTS more CDC descriptors that * could legitimately be found here. */ dev_dbg(&intf->dev, "Ignoring descriptor: type %02x, length %ud\n", buffer[2], elength); goto next_desc; } cnt++; next_desc: buflen -= elength; buffer += elength; } hdr->usb_cdc_union_desc = union_header; hdr->usb_cdc_header_desc = header; hdr->usb_cdc_mdlm_detail_desc = detail; hdr->usb_cdc_mdlm_desc = desc; hdr->usb_cdc_ether_desc = ether; return cnt; } EXPORT_SYMBOL(cdc_parse_cdc_header);
1 1 1 1 1 1 14 14 14 14 14 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 58 58 59 59 59 59 87 85 29 59 59 87 85 85 85 85 85 85 85 85 85 4 4 4 1 1 1 1 70 70 70 70 70 70 70 70 70 70 70 70 110 110 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. */ #include <linux/sched.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/completion.h> #include <linux/buffer_head.h> #include <linux/pagemap.h> #include <linux/pagevec.h> #include <linux/mpage.h> #include <linux/fs.h> #include <linux/writeback.h> #include <linux/swap.h> #include <linux/gfs2_ondisk.h> #include <linux/backing-dev.h> #include <linux/uio.h> #include <trace/events/writeback.h> #include <linux/sched/signal.h> #include "gfs2.h" #include "incore.h" #include "bmap.h" #include "glock.h" #include "inode.h" #include "log.h" #include "meta_io.h" #include "quota.h" #include "trans.h" #include "rgrp.h" #include "super.h" #include "util.h" #include "glops.h" #include "aops.h" /** * gfs2_get_block_noalloc - Fills in a buffer head with details about a block * @inode: The inode * @lblock: The block number to look up * @bh_result: The buffer head to return the result in * @create: Non-zero if we may add block to the file * * Returns: errno */ static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock, struct buffer_head *bh_result, int create) { int error; error = gfs2_block_map(inode, lblock, bh_result, 0); if (error) return error; if (!buffer_mapped(bh_result)) return -ENODATA; return 0; } /** * gfs2_write_jdata_folio - gfs2 jdata-specific version of block_write_full_folio * @folio: The folio to write * @wbc: The writeback control * * This is the same as calling block_write_full_folio, but it also * writes pages outside of i_size */ static int gfs2_write_jdata_folio(struct folio *folio, struct writeback_control *wbc) { struct inode * const inode = folio->mapping->host; loff_t i_size = i_size_read(inode); /* * The folio straddles i_size. It must be zeroed out on each and every * writepage invocation because it may be mmapped. "A file is mapped * in multiples of the page size. For a file that is not a multiple of * the page size, the remaining memory is zeroed when mapped, and * writes to that region are not written out to the file." */ if (folio_pos(folio) < i_size && i_size < folio_pos(folio) + folio_size(folio)) folio_zero_segment(folio, offset_in_folio(folio, i_size), folio_size(folio)); return __block_write_full_folio(inode, folio, gfs2_get_block_noalloc, wbc); } /** * __gfs2_jdata_write_folio - The core of jdata writepage * @folio: The folio to write * @wbc: The writeback control * * Implements the core of write back. If a transaction is required then * the checked flag will have been set and the transaction will have * already been started before this is called. */ static int __gfs2_jdata_write_folio(struct folio *folio, struct writeback_control *wbc) { struct inode *inode = folio->mapping->host; struct gfs2_inode *ip = GFS2_I(inode); if (folio_test_checked(folio)) { folio_clear_checked(folio); if (!folio_buffers(folio)) { create_empty_buffers(folio, inode->i_sb->s_blocksize, BIT(BH_Dirty)|BIT(BH_Uptodate)); } gfs2_trans_add_databufs(ip->i_gl, folio, 0, folio_size(folio)); } return gfs2_write_jdata_folio(folio, wbc); } /** * gfs2_jdata_writeback - Write jdata folios to the log * @mapping: The mapping to write * @wbc: The writeback control * * Returns: errno */ int gfs2_jdata_writeback(struct address_space *mapping, struct writeback_control *wbc) { struct inode *inode = mapping->host; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(mapping->host); struct folio *folio = NULL; int error; BUG_ON(current->journal_info); if (gfs2_assert_withdraw(sdp, ip->i_gl->gl_state == LM_ST_EXCLUSIVE)) return 0; while ((folio = writeback_iter(mapping, wbc, folio, &error))) { if (folio_test_checked(folio)) { folio_redirty_for_writepage(wbc, folio); folio_unlock(folio); continue; } error = __gfs2_jdata_write_folio(folio, wbc); } return error; } /** * gfs2_writepages - Write a bunch of dirty pages back to disk * @mapping: The mapping to write * @wbc: Write-back control * * Used for both ordered and writeback modes. */ static int gfs2_writepages(struct address_space *mapping, struct writeback_control *wbc) { struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); struct iomap_writepage_ctx wpc = { }; int ret; /* * Even if we didn't write enough pages here, we might still be holding * dirty pages in the ail. We forcibly flush the ail because we don't * want balance_dirty_pages() to loop indefinitely trying to write out * pages held in the ail that it can't find. */ ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops); if (ret == 0 && wbc->nr_to_write > 0) set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags); return ret; } /** * gfs2_write_jdata_batch - Write back a folio batch's worth of folios * @mapping: The mapping * @wbc: The writeback control * @fbatch: The batch of folios * @done_index: Page index * * Returns: non-zero if loop should terminate, zero otherwise */ static int gfs2_write_jdata_batch(struct address_space *mapping, struct writeback_control *wbc, struct folio_batch *fbatch, pgoff_t *done_index) { struct inode *inode = mapping->host; struct gfs2_sbd *sdp = GFS2_SB(inode); unsigned nrblocks; int i; int ret; size_t size = 0; int nr_folios = folio_batch_count(fbatch); for (i = 0; i < nr_folios; i++) size += folio_size(fbatch->folios[i]); nrblocks = size >> inode->i_blkbits; ret = gfs2_trans_begin(sdp, nrblocks, nrblocks); if (ret < 0) return ret; for (i = 0; i < nr_folios; i++) { struct folio *folio = fbatch->folios[i]; *done_index = folio->index; folio_lock(folio); if (unlikely(folio->mapping != mapping)) { continue_unlock: folio_unlock(folio); continue; } if (!folio_test_dirty(folio)) { /* someone wrote it for us */ goto continue_unlock; } if (folio_test_writeback(folio)) { if (wbc->sync_mode != WB_SYNC_NONE) folio_wait_writeback(folio); else goto continue_unlock; } BUG_ON(folio_test_writeback(folio)); if (!folio_clear_dirty_for_io(folio)) goto continue_unlock; trace_wbc_writepage(wbc, inode_to_bdi(inode)); ret = __gfs2_jdata_write_folio(folio, wbc); if (unlikely(ret)) { /* * done_index is set past this page, so media errors * will not choke background writeout for the entire * file. This has consequences for range_cyclic * semantics (ie. it may not be suitable for data * integrity writeout). */ *done_index = folio_next_index(folio); ret = 1; break; } /* * We stop writing back only if we are not doing * integrity sync. In case of integrity sync we have to * keep going until we have written all the pages * we tagged for writeback prior to entering this loop. */ if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) { ret = 1; break; } } gfs2_trans_end(sdp); return ret; } /** * gfs2_write_cache_jdata - Like write_cache_pages but different * @mapping: The mapping to write * @wbc: The writeback control * * The reason that we use our own function here is that we need to * start transactions before we grab page locks. This allows us * to get the ordering right. */ static int gfs2_write_cache_jdata(struct address_space *mapping, struct writeback_control *wbc) { int ret = 0; int done = 0; struct folio_batch fbatch; int nr_folios; pgoff_t writeback_index; pgoff_t index; pgoff_t end; pgoff_t done_index; int cycled; int range_whole = 0; xa_mark_t tag; folio_batch_init(&fbatch); if (wbc->range_cyclic) { writeback_index = mapping->writeback_index; /* prev offset */ index = writeback_index; if (index == 0) cycled = 1; else cycled = 0; end = -1; } else { index = wbc->range_start >> PAGE_SHIFT; end = wbc->range_end >> PAGE_SHIFT; if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) range_whole = 1; cycled = 1; /* ignore range_cyclic tests */ } if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) tag = PAGECACHE_TAG_TOWRITE; else tag = PAGECACHE_TAG_DIRTY; retry: if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) tag_pages_for_writeback(mapping, index, end); done_index = index; while (!done && (index <= end)) { nr_folios = filemap_get_folios_tag(mapping, &index, end, tag, &fbatch); if (nr_folios == 0) break; ret = gfs2_write_jdata_batch(mapping, wbc, &fbatch, &done_index); if (ret) done = 1; if (ret > 0) ret = 0; folio_batch_release(&fbatch); cond_resched(); } if (!cycled && !done) { /* * range_cyclic: * We hit the last page and there is more work to be done: wrap * back to the start of the file */ cycled = 1; index = 0; end = writeback_index - 1; goto retry; } if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) mapping->writeback_index = done_index; return ret; } /** * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk * @mapping: The mapping to write * @wbc: The writeback control * */ static int gfs2_jdata_writepages(struct address_space *mapping, struct writeback_control *wbc) { struct gfs2_inode *ip = GFS2_I(mapping->host); struct gfs2_sbd *sdp = GFS2_SB(mapping->host); int ret; ret = gfs2_write_cache_jdata(mapping, wbc); if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) { gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL | GFS2_LFC_JDATA_WPAGES); ret = gfs2_write_cache_jdata(mapping, wbc); } return ret; } /** * stuffed_read_folio - Fill in a Linux folio with stuffed file data * @ip: the inode * @folio: the folio * * Returns: errno */ static int stuffed_read_folio(struct gfs2_inode *ip, struct folio *folio) { struct buffer_head *dibh = NULL; size_t dsize = i_size_read(&ip->i_inode); void *from = NULL; int error = 0; /* * Due to the order of unstuffing files and ->fault(), we can be * asked for a zero folio in the case of a stuffed file being extended, * so we need to supply one here. It doesn't happen often. */ if (unlikely(folio->index)) { dsize = 0; } else { error = gfs2_meta_inode_buffer(ip, &dibh); if (error) goto out; from = dibh->b_data + sizeof(struct gfs2_dinode); } folio_fill_tail(folio, 0, from, dsize); brelse(dibh); out: folio_end_read(folio, error == 0); return error; } /** * gfs2_read_folio - read a folio from a file * @file: The file to read * @folio: The folio in the file */ static int gfs2_read_folio(struct file *file, struct folio *folio) { struct inode *inode = folio->mapping->host; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); int error; if (!gfs2_is_jdata(ip) || (i_blocksize(inode) == PAGE_SIZE && !folio_buffers(folio))) { error = iomap_read_folio(folio, &gfs2_iomap_ops); } else if (gfs2_is_stuffed(ip)) { error = stuffed_read_folio(ip, folio); } else { error = mpage_read_folio(folio, gfs2_block_map); } if (gfs2_withdrawing_or_withdrawn(sdp)) return -EIO; return error; } /** * gfs2_internal_read - read an internal file * @ip: The gfs2 inode * @buf: The buffer to fill * @pos: The file position * @size: The amount to read * */ ssize_t gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos, size_t size) { struct address_space *mapping = ip->i_inode.i_mapping; unsigned long index = *pos >> PAGE_SHIFT; size_t copied = 0; do { size_t offset, chunk; struct folio *folio; folio = read_cache_folio(mapping, index, gfs2_read_folio, NULL); if (IS_ERR(folio)) { if (PTR_ERR(folio) == -EINTR) continue; return PTR_ERR(folio); } offset = *pos + copied - folio_pos(folio); chunk = min(size - copied, folio_size(folio) - offset); memcpy_from_folio(buf + copied, folio, offset, chunk); index = folio_next_index(folio); folio_put(folio); copied += chunk; } while(copied < size); (*pos) += size; return size; } /** * gfs2_readahead - Read a bunch of pages at once * @rac: Read-ahead control structure * * Some notes: * 1. This is only for readahead, so we can simply ignore any things * which are slightly inconvenient (such as locking conflicts between * the page lock and the glock) and return having done no I/O. Its * obviously not something we'd want to do on too regular a basis. * Any I/O we ignore at this time will be done via readpage later. * 2. We don't handle stuffed files here we let readpage do the honours. * 3. mpage_readahead() does most of the heavy lifting in the common case. * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places. */ static void gfs2_readahead(struct readahead_control *rac) { struct inode *inode = rac->mapping->host; struct gfs2_inode *ip = GFS2_I(inode); if (gfs2_is_stuffed(ip)) ; else if (gfs2_is_jdata(ip)) mpage_readahead(rac, gfs2_block_map); else iomap_readahead(rac, &gfs2_iomap_ops); } /** * adjust_fs_space - Adjusts the free space available due to gfs2_grow * @inode: the rindex inode */ void adjust_fs_space(struct inode *inode) { struct gfs2_sbd *sdp = GFS2_SB(inode); struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; struct buffer_head *m_bh; u64 fs_total, new_free; if (gfs2_trans_begin(sdp, 2 * RES_STATFS, 0) != 0) return; /* Total up the file system space, according to the latest rindex. */ fs_total = gfs2_ri_total(sdp); if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0) goto out; spin_lock(&sdp->sd_statfs_spin); gfs2_statfs_change_in(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode)); if (fs_total > (m_sc->sc_total + l_sc->sc_total)) new_free = fs_total - (m_sc->sc_total + l_sc->sc_total); else new_free = 0; spin_unlock(&sdp->sd_statfs_spin); fs_warn(sdp, "File system extended by %llu blocks.\n", (unsigned long long)new_free); gfs2_statfs_change(sdp, new_free, new_free, 0); update_statfs(sdp, m_bh); brelse(m_bh); out: sdp->sd_rindex_uptodate = 0; gfs2_trans_end(sdp); } static bool gfs2_jdata_dirty_folio(struct address_space *mapping, struct folio *folio) { if (current->journal_info) folio_set_checked(folio); return block_dirty_folio(mapping, folio); } /** * gfs2_bmap - Block map function * @mapping: Address space info * @lblock: The block to map * * Returns: The disk address for the block or 0 on hole or error */ static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock) { struct gfs2_inode *ip = GFS2_I(mapping->host); struct gfs2_holder i_gh; sector_t dblock = 0; int error; error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh); if (error) return 0; if (!gfs2_is_stuffed(ip)) dblock = iomap_bmap(mapping, lblock, &gfs2_iomap_ops); gfs2_glock_dq_uninit(&i_gh); return dblock; } static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh) { struct gfs2_bufdata *bd; lock_buffer(bh); gfs2_log_lock(sdp); clear_buffer_dirty(bh); bd = bh->b_private; if (bd) { if (!list_empty(&bd->bd_list) && !buffer_pinned(bh)) list_del_init(&bd->bd_list); else { spin_lock(&sdp->sd_ail_lock); gfs2_remove_from_journal(bh, REMOVE_JDATA); spin_unlock(&sdp->sd_ail_lock); } } bh->b_bdev = NULL; clear_buffer_mapped(bh); clear_buffer_req(bh); clear_buffer_new(bh); gfs2_log_unlock(sdp); unlock_buffer(bh); } static void gfs2_invalidate_folio(struct folio *folio, size_t offset, size_t length) { struct gfs2_sbd *sdp = GFS2_SB(folio->mapping->host); size_t stop = offset + length; int partial_page = (offset || length < folio_size(folio)); struct buffer_head *bh, *head; unsigned long pos = 0; BUG_ON(!folio_test_locked(folio)); if (!partial_page) folio_clear_checked(folio); head = folio_buffers(folio); if (!head) goto out; bh = head; do { if (pos + bh->b_size > stop) return; if (offset <= pos) gfs2_discard(sdp, bh); pos += bh->b_size; bh = bh->b_this_page; } while (bh != head); out: if (!partial_page) filemap_release_folio(folio, 0); } /** * gfs2_release_folio - free the metadata associated with a folio * @folio: the folio that's being released * @gfp_mask: passed from Linux VFS, ignored by us * * Calls try_to_free_buffers() to free the buffers and put the folio if the * buffers can be released. * * Returns: true if the folio was put or else false */ bool gfs2_release_folio(struct folio *folio, gfp_t gfp_mask) { struct address_space *mapping = folio->mapping; struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); struct buffer_head *bh, *head; struct gfs2_bufdata *bd; head = folio_buffers(folio); if (!head) return false; /* * mm accommodates an old ext3 case where clean folios might * not have had the dirty bit cleared. Thus, it can send actual * dirty folios to ->release_folio() via shrink_active_list(). * * As a workaround, we skip folios that contain dirty buffers * below. Once ->release_folio isn't called on dirty folios * anymore, we can warn on dirty buffers like we used to here * again. */ gfs2_log_lock(sdp); bh = head; do { if (atomic_read(&bh->b_count)) goto cannot_release; bd = bh->b_private; if (bd && bd->bd_tr) goto cannot_release; if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh))) goto cannot_release; bh = bh->b_this_page; } while (bh != head); bh = head; do { bd = bh->b_private; if (bd) { gfs2_assert_warn(sdp, bd->bd_bh == bh); bd->bd_bh = NULL; bh->b_private = NULL; /* * The bd may still be queued as a revoke, in which * case we must not dequeue nor free it. */ if (!bd->bd_blkno && !list_empty(&bd->bd_list)) list_del_init(&bd->bd_list); if (list_empty(&bd->bd_list)) kmem_cache_free(gfs2_bufdata_cachep, bd); } bh = bh->b_this_page; } while (bh != head); gfs2_log_unlock(sdp); return try_to_free_buffers(folio); cannot_release: gfs2_log_unlock(sdp); return false; } static const struct address_space_operations gfs2_aops = { .writepages = gfs2_writepages, .read_folio = gfs2_read_folio, .readahead = gfs2_readahead, .dirty_folio = iomap_dirty_folio, .release_folio = iomap_release_folio, .invalidate_folio = iomap_invalidate_folio, .bmap = gfs2_bmap, .migrate_folio = filemap_migrate_folio, .is_partially_uptodate = iomap_is_partially_uptodate, .error_remove_folio = generic_error_remove_folio, }; static const struct address_space_operations gfs2_jdata_aops = { .writepages = gfs2_jdata_writepages, .read_folio = gfs2_read_folio, .readahead = gfs2_readahead, .dirty_folio = gfs2_jdata_dirty_folio, .bmap = gfs2_bmap, .migrate_folio = buffer_migrate_folio, .invalidate_folio = gfs2_invalidate_folio, .release_folio = gfs2_release_folio, .is_partially_uptodate = block_is_partially_uptodate, .error_remove_folio = generic_error_remove_folio, }; void gfs2_set_aops(struct inode *inode) { if (gfs2_is_jdata(GFS2_I(inode))) inode->i_mapping->a_ops = &gfs2_jdata_aops; else inode->i_mapping->a_ops = &gfs2_aops; }
120 120 120 69 119 120 116 116 117 117 59 117 69 69 68 118 120 59 119 120 118 120 78 93 78 78 78 78 78 78 78 107 107 107 107 107 107 105 107 107 107 107 107 74 8 74 74 74 74 18 18 119 120 120 76 74 74 74 61 74 61 120 120 122 122 120 122 122 122 7 7 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 // SPDX-License-Identifier: GPL-2.0-or-later /* * Squashfs - a compressed read only filesystem for Linux * * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 * Phillip Lougher <phillip@squashfs.org.uk> * * cache.c */ /* * Blocks in Squashfs are compressed. To avoid repeatedly decompressing * recently accessed data Squashfs uses two small metadata and fragment caches. * * This file implements a generic cache implementation used for both caches, * plus functions layered ontop of the generic cache implementation to * access the metadata and fragment caches. * * To avoid out of memory and fragmentation issues with vmalloc the cache * uses sequences of kmalloced PAGE_SIZE buffers. * * It should be noted that the cache is not used for file datablocks, these * are decompressed and cached in the page-cache in the normal way. The * cache is only used to temporarily cache fragment and metadata blocks * which have been read as as a result of a metadata (i.e. inode or * directory) or fragment access. Because metadata and fragments are packed * together into blocks (to gain greater compression) the read of a particular * piece of metadata or fragment will retrieve other metadata/fragments which * have been packed with it, these because of locality-of-reference may be read * in the near future. Temporarily caching them ensures they are available for * near future access without requiring an additional read and decompress. */ #include <linux/fs.h> #include <linux/vfs.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/wait.h> #include <linux/pagemap.h> #include "squashfs_fs.h" #include "squashfs_fs_sb.h" #include "squashfs.h" #include "page_actor.h" /* * Look-up block in cache, and increment usage count. If not in cache, read * and decompress it from disk. */ struct squashfs_cache_entry *squashfs_cache_get(struct super_block *sb, struct squashfs_cache *cache, u64 block, int length) { int i, n; struct squashfs_cache_entry *entry; spin_lock(&cache->lock); while (1) { for (i = cache->curr_blk, n = 0; n < cache->entries; n++) { if (cache->entry[i].block == block) { cache->curr_blk = i; break; } i = (i + 1) % cache->entries; } if (n == cache->entries) { /* * Block not in cache, if all cache entries are used * go to sleep waiting for one to become available. */ if (cache->unused == 0) { cache->num_waiters++; spin_unlock(&cache->lock); wait_event(cache->wait_queue, cache->unused); spin_lock(&cache->lock); cache->num_waiters--; continue; } /* * At least one unused cache entry. A simple * round-robin strategy is used to choose the entry to * be evicted from the cache. */ i = cache->next_blk; for (n = 0; n < cache->entries; n++) { if (cache->entry[i].refcount == 0) break; i = (i + 1) % cache->entries; } cache->next_blk = (i + 1) % cache->entries; entry = &cache->entry[i]; /* * Initialise chosen cache entry, and fill it in from * disk. */ cache->unused--; entry->block = block; entry->refcount = 1; entry->pending = 1; entry->num_waiters = 0; entry->error = 0; spin_unlock(&cache->lock); entry->length = squashfs_read_data(sb, block, length, &entry->next_index, entry->actor); spin_lock(&cache->lock); if (entry->length < 0) entry->error = entry->length; entry->pending = 0; /* * While filling this entry one or more other processes * have looked it up in the cache, and have slept * waiting for it to become available. */ if (entry->num_waiters) { spin_unlock(&cache->lock); wake_up_all(&entry->wait_queue); } else spin_unlock(&cache->lock); goto out; } /* * Block already in cache. Increment refcount so it doesn't * get reused until we're finished with it, if it was * previously unused there's one less cache entry available * for reuse. */ entry = &cache->entry[i]; if (entry->refcount == 0) cache->unused--; entry->refcount++; /* * If the entry is currently being filled in by another process * go to sleep waiting for it to become available. */ if (entry->pending) { entry->num_waiters++; spin_unlock(&cache->lock); wait_event(entry->wait_queue, !entry->pending); } else spin_unlock(&cache->lock); goto out; } out: TRACE("Got %s %d, start block %lld, refcount %d, error %d\n", cache->name, i, entry->block, entry->refcount, entry->error); if (entry->error) ERROR("Unable to read %s cache entry [%llx]\n", cache->name, block); return entry; } /* * Release cache entry, once usage count is zero it can be reused. */ void squashfs_cache_put(struct squashfs_cache_entry *entry) { struct squashfs_cache *cache = entry->cache; spin_lock(&cache->lock); entry->refcount--; if (entry->refcount == 0) { cache->unused++; /* * If there's any processes waiting for a block to become * available, wake one up. */ if (cache->num_waiters) { spin_unlock(&cache->lock); wake_up(&cache->wait_queue); return; } } spin_unlock(&cache->lock); } /* * Delete cache reclaiming all kmalloced buffers. */ void squashfs_cache_delete(struct squashfs_cache *cache) { int i, j; if (IS_ERR(cache) || cache == NULL) return; for (i = 0; i < cache->entries; i++) { if (cache->entry[i].data) { for (j = 0; j < cache->pages; j++) kfree(cache->entry[i].data[j]); kfree(cache->entry[i].data); } kfree(cache->entry[i].actor); } kfree(cache->entry); kfree(cache); } /* * Initialise cache allocating the specified number of entries, each of * size block_size. To avoid vmalloc fragmentation issues each entry * is allocated as a sequence of kmalloced PAGE_SIZE buffers. */ struct squashfs_cache *squashfs_cache_init(char *name, int entries, int block_size) { int i, j; struct squashfs_cache *cache; if (entries == 0) return NULL; cache = kzalloc(sizeof(*cache), GFP_KERNEL); if (cache == NULL) { ERROR("Failed to allocate %s cache\n", name); return ERR_PTR(-ENOMEM); } cache->entry = kcalloc(entries, sizeof(*(cache->entry)), GFP_KERNEL); if (cache->entry == NULL) { ERROR("Failed to allocate %s cache\n", name); goto cleanup; } cache->curr_blk = 0; cache->next_blk = 0; cache->unused = entries; cache->entries = entries; cache->block_size = block_size; cache->pages = block_size >> PAGE_SHIFT; cache->pages = cache->pages ? cache->pages : 1; cache->name = name; cache->num_waiters = 0; spin_lock_init(&cache->lock); init_waitqueue_head(&cache->wait_queue); for (i = 0; i < entries; i++) { struct squashfs_cache_entry *entry = &cache->entry[i]; init_waitqueue_head(&cache->entry[i].wait_queue); entry->cache = cache; entry->block = SQUASHFS_INVALID_BLK; entry->data = kcalloc(cache->pages, sizeof(void *), GFP_KERNEL); if (entry->data == NULL) { ERROR("Failed to allocate %s cache entry\n", name); goto cleanup; } for (j = 0; j < cache->pages; j++) { entry->data[j] = kmalloc(PAGE_SIZE, GFP_KERNEL); if (entry->data[j] == NULL) { ERROR("Failed to allocate %s buffer\n", name); goto cleanup; } } entry->actor = squashfs_page_actor_init(entry->data, cache->pages, 0); if (entry->actor == NULL) { ERROR("Failed to allocate %s cache entry\n", name); goto cleanup; } } return cache; cleanup: squashfs_cache_delete(cache); return ERR_PTR(-ENOMEM); } /* * Copy up to length bytes from cache entry to buffer starting at offset bytes * into the cache entry. If there's not length bytes then copy the number of * bytes available. In all cases return the number of bytes copied. */ int squashfs_copy_data(void *buffer, struct squashfs_cache_entry *entry, int offset, int length) { int remaining = length; if (length == 0) return 0; else if (buffer == NULL) return min(length, entry->length - offset); while (offset < entry->length) { void *buff = entry->data[offset / PAGE_SIZE] + (offset % PAGE_SIZE); int bytes = min_t(int, entry->length - offset, PAGE_SIZE - (offset % PAGE_SIZE)); if (bytes >= remaining) { memcpy(buffer, buff, remaining); remaining = 0; break; } memcpy(buffer, buff, bytes); buffer += bytes; remaining -= bytes; offset += bytes; } return length - remaining; } /* * Read length bytes from metadata position <block, offset> (block is the * start of the compressed block on disk, and offset is the offset into * the block once decompressed). Data is packed into consecutive blocks, * and length bytes may require reading more than one block. */ int squashfs_read_metadata(struct super_block *sb, void *buffer, u64 *block, int *offset, int length) { struct squashfs_sb_info *msblk = sb->s_fs_info; int bytes, res = length; struct squashfs_cache_entry *entry; TRACE("Entered squashfs_read_metadata [%llx:%x]\n", *block, *offset); if (unlikely(length < 0)) return -EIO; while (length) { entry = squashfs_cache_get(sb, msblk->block_cache, *block, 0); if (entry->error) { res = entry->error; goto error; } else if (*offset >= entry->length) { res = -EIO; goto error; } bytes = squashfs_copy_data(buffer, entry, *offset, length); if (buffer) buffer += bytes; length -= bytes; *offset += bytes; if (*offset == entry->length) { *block = entry->next_index; *offset = 0; } squashfs_cache_put(entry); } return res; error: squashfs_cache_put(entry); return res; } /* * Look-up in the fragmment cache the fragment located at <start_block> in the * filesystem. If necessary read and decompress it from disk. */ struct squashfs_cache_entry *squashfs_get_fragment(struct super_block *sb, u64 start_block, int length) { struct squashfs_sb_info *msblk = sb->s_fs_info; return squashfs_cache_get(sb, msblk->fragment_cache, start_block, length); } /* * Read and decompress the datablock located at <start_block> in the * filesystem. The cache is used here to avoid duplicating locking and * read/decompress code. */ struct squashfs_cache_entry *squashfs_get_datablock(struct super_block *sb, u64 start_block, int length) { struct squashfs_sb_info *msblk = sb->s_fs_info; return squashfs_cache_get(sb, msblk->read_page, start_block, length); } /* * Read a filesystem table (uncompressed sequence of bytes) from disk */ void *squashfs_read_table(struct super_block *sb, u64 block, int length) { int pages = (length + PAGE_SIZE - 1) >> PAGE_SHIFT; int i, res; void *table, *buffer, **data; struct squashfs_page_actor *actor; table = buffer = kmalloc(length, GFP_KERNEL); if (table == NULL) return ERR_PTR(-ENOMEM); data = kcalloc(pages, sizeof(void *), GFP_KERNEL); if (data == NULL) { res = -ENOMEM; goto failed; } actor = squashfs_page_actor_init(data, pages, length); if (actor == NULL) { res = -ENOMEM; goto failed2; } for (i = 0; i < pages; i++, buffer += PAGE_SIZE) data[i] = buffer; res = squashfs_read_data(sb, block, length | SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, actor); kfree(data); kfree(actor); if (res < 0) goto failed; return table; failed2: kfree(data); failed: kfree(table); return ERR_PTR(res); }
1 2 1 2 2 1 2 2 2 2 2 2 2 1 1 2 2 2 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 // SPDX-License-Identifier: GPL-2.0-only /* * TCP HYBLA * * TCP-HYBLA Congestion control algorithm, based on: * C.Caini, R.Firrincieli, "TCP-Hybla: A TCP Enhancement * for Heterogeneous Networks", * International Journal on satellite Communications, * September 2004 * Daniele Lacamera * root at danielinux.net */ #include <linux/module.h> #include <net/tcp.h> /* Tcp Hybla structure. */ struct hybla { bool hybla_en; u32 snd_cwnd_cents; /* Keeps increment values when it is <1, <<7 */ u32 rho; /* Rho parameter, integer part */ u32 rho2; /* Rho * Rho, integer part */ u32 rho_3ls; /* Rho parameter, <<3 */ u32 rho2_7ls; /* Rho^2, <<7 */ u32 minrtt_us; /* Minimum smoothed round trip time value seen */ }; /* Hybla reference round trip time (default= 1/40 sec = 25 ms), in ms */ static int rtt0 = 25; module_param(rtt0, int, 0644); MODULE_PARM_DESC(rtt0, "reference rout trip time (ms)"); /* This is called to refresh values for hybla parameters */ static inline void hybla_recalc_param (struct sock *sk) { struct hybla *ca = inet_csk_ca(sk); ca->rho_3ls = max_t(u32, tcp_sk(sk)->srtt_us / (rtt0 * USEC_PER_MSEC), 8U); ca->rho = ca->rho_3ls >> 3; ca->rho2_7ls = (ca->rho_3ls * ca->rho_3ls) << 1; ca->rho2 = ca->rho2_7ls >> 7; } static void hybla_init(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct hybla *ca = inet_csk_ca(sk); ca->rho = 0; ca->rho2 = 0; ca->rho_3ls = 0; ca->rho2_7ls = 0; ca->snd_cwnd_cents = 0; ca->hybla_en = true; tcp_snd_cwnd_set(tp, 2); tp->snd_cwnd_clamp = 65535; /* 1st Rho measurement based on initial srtt */ hybla_recalc_param(sk); /* set minimum rtt as this is the 1st ever seen */ ca->minrtt_us = tp->srtt_us; tcp_snd_cwnd_set(tp, ca->rho); } static void hybla_state(struct sock *sk, u8 ca_state) { struct hybla *ca = inet_csk_ca(sk); ca->hybla_en = (ca_state == TCP_CA_Open); } static inline u32 hybla_fraction(u32 odds) { static const u32 fractions[] = { 128, 139, 152, 165, 181, 197, 215, 234, }; return (odds < ARRAY_SIZE(fractions)) ? fractions[odds] : 128; } /* TCP Hybla main routine. * This is the algorithm behavior: * o Recalc Hybla parameters if min_rtt has changed * o Give cwnd a new value based on the model proposed * o remember increments <1 */ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked) { struct tcp_sock *tp = tcp_sk(sk); struct hybla *ca = inet_csk_ca(sk); u32 increment, odd, rho_fractions; int is_slowstart = 0; /* Recalculate rho only if this srtt is the lowest */ if (tp->srtt_us < ca->minrtt_us) { hybla_recalc_param(sk); ca->minrtt_us = tp->srtt_us; } if (!tcp_is_cwnd_limited(sk)) return; if (!ca->hybla_en) { tcp_reno_cong_avoid(sk, ack, acked); return; } if (ca->rho == 0) hybla_recalc_param(sk); rho_fractions = ca->rho_3ls - (ca->rho << 3); if (tcp_in_slow_start(tp)) { /* * slow start * INC = 2^RHO - 1 * This is done by splitting the rho parameter * into 2 parts: an integer part and a fraction part. * Inrement<<7 is estimated by doing: * [2^(int+fract)]<<7 * that is equal to: * (2^int) * [(2^fract) <<7] * 2^int is straightly computed as 1<<int, * while we will use hybla_slowstart_fraction_increment() to * calculate 2^fract in a <<7 value. */ is_slowstart = 1; increment = ((1 << min(ca->rho, 16U)) * hybla_fraction(rho_fractions)) - 128; } else { /* * congestion avoidance * INC = RHO^2 / W * as long as increment is estimated as (rho<<7)/window * it already is <<7 and we can easily count its fractions. */ increment = ca->rho2_7ls / tcp_snd_cwnd(tp); if (increment < 128) tp->snd_cwnd_cnt++; } odd = increment % 128; tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + (increment >> 7)); ca->snd_cwnd_cents += odd; /* check when fractions goes >=128 and increase cwnd by 1. */ while (ca->snd_cwnd_cents >= 128) { tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1); ca->snd_cwnd_cents -= 128; tp->snd_cwnd_cnt = 0; } /* check when cwnd has not been incremented for a while */ if (increment == 0 && odd == 0 && tp->snd_cwnd_cnt >= tcp_snd_cwnd(tp)) { tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1); tp->snd_cwnd_cnt = 0; } /* clamp down slowstart cwnd to ssthresh value. */ if (is_slowstart) tcp_snd_cwnd_set(tp, min(tcp_snd_cwnd(tp), tp->snd_ssthresh)); tcp_snd_cwnd_set(tp, min(tcp_snd_cwnd(tp), tp->snd_cwnd_clamp)); } static struct tcp_congestion_ops tcp_hybla __read_mostly = { .init = hybla_init, .ssthresh = tcp_reno_ssthresh, .undo_cwnd = tcp_reno_undo_cwnd, .cong_avoid = hybla_cong_avoid, .set_state = hybla_state, .owner = THIS_MODULE, .name = "hybla" }; static int __init hybla_register(void) { BUILD_BUG_ON(sizeof(struct hybla) > ICSK_CA_PRIV_SIZE); return tcp_register_congestion_control(&tcp_hybla); } static void __exit hybla_unregister(void) { tcp_unregister_congestion_control(&tcp_hybla); } module_init(hybla_register); module_exit(hybla_unregister); MODULE_AUTHOR("Daniele Lacamera"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("TCP Hybla");
4 4 4 4 4 4 4 4 4 4 4 4 4 4 166 166 31 164 164 164 14 164 1 166 164 116 156 16 100 95 95 230 230 230 188 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 // SPDX-License-Identifier: GPL-2.0-only /* * misc.c * * PURPOSE * Miscellaneous routines for the OSTA-UDF(tm) filesystem. * * COPYRIGHT * (C) 1998 Dave Boynton * (C) 1998-2004 Ben Fennema * (C) 1999-2000 Stelias Computing Inc * * HISTORY * * 04/19/99 blf partial support for reading/writing specific EA's */ #include "udfdecl.h" #include <linux/fs.h> #include <linux/string.h> #include <linux/crc-itu-t.h> #include "udf_i.h" #include "udf_sb.h" struct genericFormat *udf_add_extendedattr(struct inode *inode, uint32_t size, uint32_t type, uint8_t loc) { uint8_t *ea = NULL, *ad = NULL; int offset; uint16_t crclen; struct udf_inode_info *iinfo = UDF_I(inode); ea = iinfo->i_data; if (iinfo->i_lenEAttr) { ad = iinfo->i_data + iinfo->i_lenEAttr; } else { ad = ea; size += sizeof(struct extendedAttrHeaderDesc); } offset = inode->i_sb->s_blocksize - udf_file_entry_alloc_offset(inode) - iinfo->i_lenAlloc; /* TODO - Check for FreeEASpace */ if (loc & 0x01 && offset >= size) { struct extendedAttrHeaderDesc *eahd; eahd = (struct extendedAttrHeaderDesc *)ea; if (iinfo->i_lenAlloc) memmove(&ad[size], ad, iinfo->i_lenAlloc); if (iinfo->i_lenEAttr) { /* check checksum/crc */ if (eahd->descTag.tagIdent != cpu_to_le16(TAG_IDENT_EAHD) || le32_to_cpu(eahd->descTag.tagLocation) != iinfo->i_location.logicalBlockNum) return NULL; } else { struct udf_sb_info *sbi = UDF_SB(inode->i_sb); size -= sizeof(struct extendedAttrHeaderDesc); iinfo->i_lenEAttr += sizeof(struct extendedAttrHeaderDesc); eahd->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EAHD); if (sbi->s_udfrev >= 0x0200) eahd->descTag.descVersion = cpu_to_le16(3); else eahd->descTag.descVersion = cpu_to_le16(2); eahd->descTag.tagSerialNum = cpu_to_le16(sbi->s_serial_number); eahd->descTag.tagLocation = cpu_to_le32( iinfo->i_location.logicalBlockNum); eahd->impAttrLocation = cpu_to_le32(0xFFFFFFFF); eahd->appAttrLocation = cpu_to_le32(0xFFFFFFFF); } offset = iinfo->i_lenEAttr; if (type < 2048) { if (le32_to_cpu(eahd->appAttrLocation) < iinfo->i_lenEAttr) { uint32_t aal = le32_to_cpu(eahd->appAttrLocation); memmove(&ea[offset - aal + size], &ea[aal], offset - aal); offset -= aal; eahd->appAttrLocation = cpu_to_le32(aal + size); } if (le32_to_cpu(eahd->impAttrLocation) < iinfo->i_lenEAttr) { uint32_t ial = le32_to_cpu(eahd->impAttrLocation); memmove(&ea[offset - ial + size], &ea[ial], offset - ial); offset -= ial; eahd->impAttrLocation = cpu_to_le32(ial + size); } } else if (type < 65536) { if (le32_to_cpu(eahd->appAttrLocation) < iinfo->i_lenEAttr) { uint32_t aal = le32_to_cpu(eahd->appAttrLocation); memmove(&ea[offset - aal + size], &ea[aal], offset - aal); offset -= aal; eahd->appAttrLocation = cpu_to_le32(aal + size); } } /* rewrite CRC + checksum of eahd */ crclen = sizeof(struct extendedAttrHeaderDesc) - sizeof(struct tag); eahd->descTag.descCRCLength = cpu_to_le16(crclen); eahd->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)eahd + sizeof(struct tag), crclen)); eahd->descTag.tagChecksum = udf_tag_checksum(&eahd->descTag); iinfo->i_lenEAttr += size; return (struct genericFormat *)&ea[offset]; } return NULL; } struct genericFormat *udf_get_extendedattr(struct inode *inode, uint32_t type, uint8_t subtype) { struct genericFormat *gaf; uint8_t *ea = NULL; uint32_t offset; struct udf_inode_info *iinfo = UDF_I(inode); ea = iinfo->i_data; if (iinfo->i_lenEAttr) { struct extendedAttrHeaderDesc *eahd; eahd = (struct extendedAttrHeaderDesc *)ea; /* check checksum/crc */ if (eahd->descTag.tagIdent != cpu_to_le16(TAG_IDENT_EAHD) || le32_to_cpu(eahd->descTag.tagLocation) != iinfo->i_location.logicalBlockNum) return NULL; if (type < 2048) offset = sizeof(struct extendedAttrHeaderDesc); else if (type < 65536) offset = le32_to_cpu(eahd->impAttrLocation); else offset = le32_to_cpu(eahd->appAttrLocation); while (offset + sizeof(*gaf) < iinfo->i_lenEAttr) { uint32_t attrLength; gaf = (struct genericFormat *)&ea[offset]; attrLength = le32_to_cpu(gaf->attrLength); /* Detect undersized elements and buffer overflows */ if ((attrLength < sizeof(*gaf)) || (attrLength > (iinfo->i_lenEAttr - offset))) break; if (le32_to_cpu(gaf->attrType) == type && gaf->attrSubtype == subtype) return gaf; else offset += attrLength; } } return NULL; } /* * udf_read_tagged * * PURPOSE * Read the first block of a tagged descriptor. * * HISTORY * July 1, 1997 - Andrew E. Mileski * Written, tested, and released. */ struct buffer_head *udf_read_tagged(struct super_block *sb, uint32_t block, uint32_t location, uint16_t *ident) { struct tag *tag_p; struct buffer_head *bh = NULL; u8 checksum; /* Read the block */ if (block == 0xFFFFFFFF) return NULL; bh = sb_bread(sb, block); if (!bh) { udf_err(sb, "read failed, block=%u, location=%u\n", block, location); return NULL; } tag_p = (struct tag *)(bh->b_data); *ident = le16_to_cpu(tag_p->tagIdent); if (location != le32_to_cpu(tag_p->tagLocation)) { udf_debug("location mismatch block %u, tag %u != %u\n", block, le32_to_cpu(tag_p->tagLocation), location); goto error_out; } /* Verify the tag checksum */ checksum = udf_tag_checksum(tag_p); if (checksum != tag_p->tagChecksum) { udf_err(sb, "tag checksum failed, block %u: 0x%02x != 0x%02x\n", block, checksum, tag_p->tagChecksum); goto error_out; } /* Verify the tag version */ if (tag_p->descVersion != cpu_to_le16(0x0002U) && tag_p->descVersion != cpu_to_le16(0x0003U)) { udf_err(sb, "tag version 0x%04x != 0x0002 || 0x0003, block %u\n", le16_to_cpu(tag_p->descVersion), block); goto error_out; } /* Verify the descriptor CRC */ if (le16_to_cpu(tag_p->descCRCLength) + sizeof(struct tag) > sb->s_blocksize || le16_to_cpu(tag_p->descCRC) == crc_itu_t(0, bh->b_data + sizeof(struct tag), le16_to_cpu(tag_p->descCRCLength))) return bh; udf_debug("Crc failure block %u: crc = %u, crclen = %u\n", block, le16_to_cpu(tag_p->descCRC), le16_to_cpu(tag_p->descCRCLength)); error_out: brelse(bh); return NULL; } struct buffer_head *udf_read_ptagged(struct super_block *sb, struct kernel_lb_addr *loc, uint32_t offset, uint16_t *ident) { return udf_read_tagged(sb, udf_get_lb_pblock(sb, loc, offset), loc->logicalBlockNum + offset, ident); } void udf_update_tag(char *data, int length) { struct tag *tptr = (struct tag *)data; length -= sizeof(struct tag); tptr->descCRCLength = cpu_to_le16(length); tptr->descCRC = cpu_to_le16(crc_itu_t(0, data + sizeof(struct tag), length)); tptr->tagChecksum = udf_tag_checksum(tptr); } void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum, uint32_t loc, int length) { struct tag *tptr = (struct tag *)data; tptr->tagIdent = cpu_to_le16(ident); tptr->descVersion = cpu_to_le16(version); tptr->tagSerialNum = cpu_to_le16(snum); tptr->tagLocation = cpu_to_le32(loc); udf_update_tag(data, length); } u8 udf_tag_checksum(const struct tag *t) { u8 *data = (u8 *)t; u8 checksum = 0; int i; for (i = 0; i < sizeof(struct tag); ++i) if (i != 4) /* position of checksum */ checksum += data[i]; return checksum; }
5 8 7 8 8 7 4 8 7 2 5 8 9 3 8 2 7 13 13 13 3 3 3 1 1 1 1 1 8 1 8 8 14 3 13 10 9 9 9 1 8 2 1 1 1 14 14 14 14 3 14 1 12 14 14 10 14 13 13 1 1 13 5 12 10 2 9 3 3 9 6 6 6 2 6 2 2 2 6 6 3 3 8 7 8 14 2 2 2 2 2 2 2 2 1 2 2 2 2 1 2 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 // SPDX-License-Identifier: GPL-2.0-only /* * Berkeley Packet Filter based traffic classifier * * Might be used to classify traffic through flexible, user-defined and * possibly JIT-ed BPF filters for traffic control as an alternative to * ematches. * * (C) 2013 Daniel Borkmann <dborkman@redhat.com> */ #include <linux/module.h> #include <linux/types.h> #include <linux/skbuff.h> #include <linux/filter.h> #include <linux/bpf.h> #include <linux/idr.h> #include <net/rtnetlink.h> #include <net/pkt_cls.h> #include <net/sock.h> #include <net/tc_wrapper.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>"); MODULE_DESCRIPTION("TC BPF based classifier"); #define CLS_BPF_NAME_LEN 256 #define CLS_BPF_SUPPORTED_GEN_FLAGS \ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW) struct cls_bpf_head { struct list_head plist; struct idr handle_idr; struct rcu_head rcu; }; struct cls_bpf_prog { struct bpf_prog *filter; struct list_head link; struct tcf_result res; bool exts_integrated; u32 gen_flags; unsigned int in_hw_count; struct tcf_exts exts; u32 handle; u16 bpf_num_ops; struct sock_filter *bpf_ops; const char *bpf_name; struct tcf_proto *tp; struct rcu_work rwork; }; static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = { [TCA_BPF_CLASSID] = { .type = NLA_U32 }, [TCA_BPF_FLAGS] = { .type = NLA_U32 }, [TCA_BPF_FLAGS_GEN] = { .type = NLA_U32 }, [TCA_BPF_FD] = { .type = NLA_U32 }, [TCA_BPF_NAME] = { .type = NLA_NUL_STRING, .len = CLS_BPF_NAME_LEN }, [TCA_BPF_OPS_LEN] = { .type = NLA_U16 }, [TCA_BPF_OPS] = { .type = NLA_BINARY, .len = sizeof(struct sock_filter) * BPF_MAXINSNS }, }; static int cls_bpf_exec_opcode(int code) { switch (code) { case TC_ACT_OK: case TC_ACT_SHOT: case TC_ACT_STOLEN: case TC_ACT_TRAP: case TC_ACT_REDIRECT: case TC_ACT_UNSPEC: return code; default: return TC_ACT_UNSPEC; } } TC_INDIRECT_SCOPE int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res) { struct cls_bpf_head *head = rcu_dereference_bh(tp->root); bool at_ingress = skb_at_tc_ingress(skb); struct cls_bpf_prog *prog; int ret = -1; list_for_each_entry_rcu(prog, &head->plist, link) { int filter_res; qdisc_skb_cb(skb)->tc_classid = prog->res.classid; if (tc_skip_sw(prog->gen_flags)) { filter_res = prog->exts_integrated ? TC_ACT_UNSPEC : 0; } else if (at_ingress) { /* It is safe to push/pull even if skb_shared() */ __skb_push(skb, skb->mac_len); bpf_compute_data_pointers(skb); filter_res = bpf_prog_run(prog->filter, skb); __skb_pull(skb, skb->mac_len); } else { bpf_compute_data_pointers(skb); filter_res = bpf_prog_run(prog->filter, skb); } if (unlikely(!skb->tstamp && skb->tstamp_type)) skb->tstamp_type = SKB_CLOCK_REALTIME; if (prog->exts_integrated) { res->class = 0; res->classid = TC_H_MAJ(prog->res.classid) | qdisc_skb_cb(skb)->tc_classid; ret = cls_bpf_exec_opcode(filter_res); if (ret == TC_ACT_UNSPEC) continue; break; } if (filter_res == 0) continue; if (filter_res != -1) { res->class = 0; res->classid = filter_res; } else { *res = prog->res; } ret = tcf_exts_exec(skb, &prog->exts, res); if (ret < 0) continue; break; } return ret; } static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog) { return !prog->bpf_ops; } static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog, struct cls_bpf_prog *oldprog, struct netlink_ext_ack *extack) { struct tcf_block *block = tp->chain->block; struct tc_cls_bpf_offload cls_bpf = {}; struct cls_bpf_prog *obj; bool skip_sw; int err; skip_sw = prog && tc_skip_sw(prog->gen_flags); obj = prog ?: oldprog; tc_cls_common_offload_init(&cls_bpf.common, tp, obj->gen_flags, extack); cls_bpf.command = TC_CLSBPF_OFFLOAD; cls_bpf.exts = &obj->exts; cls_bpf.prog = prog ? prog->filter : NULL; cls_bpf.oldprog = oldprog ? oldprog->filter : NULL; cls_bpf.name = obj->bpf_name; cls_bpf.exts_integrated = obj->exts_integrated; if (oldprog && prog) err = tc_setup_cb_replace(block, tp, TC_SETUP_CLSBPF, &cls_bpf, skip_sw, &oldprog->gen_flags, &oldprog->in_hw_count, &prog->gen_flags, &prog->in_hw_count, true); else if (prog) err = tc_setup_cb_add(block, tp, TC_SETUP_CLSBPF, &cls_bpf, skip_sw, &prog->gen_flags, &prog->in_hw_count, true); else err = tc_setup_cb_destroy(block, tp, TC_SETUP_CLSBPF, &cls_bpf, skip_sw, &oldprog->gen_flags, &oldprog->in_hw_count, true); if (prog && err) { cls_bpf_offload_cmd(tp, oldprog, prog, extack); return err; } if (prog && skip_sw && !(prog->gen_flags & TCA_CLS_FLAGS_IN_HW)) return -EINVAL; return 0; } static u32 cls_bpf_flags(u32 flags) { return flags & CLS_BPF_SUPPORTED_GEN_FLAGS; } static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog, struct cls_bpf_prog *oldprog, struct netlink_ext_ack *extack) { if (prog && oldprog && cls_bpf_flags(prog->gen_flags) != cls_bpf_flags(oldprog->gen_flags)) return -EINVAL; if (prog && tc_skip_hw(prog->gen_flags)) prog = NULL; if (oldprog && tc_skip_hw(oldprog->gen_flags)) oldprog = NULL; if (!prog && !oldprog) return 0; return cls_bpf_offload_cmd(tp, prog, oldprog, extack); } static void cls_bpf_stop_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog, struct netlink_ext_ack *extack) { int err; err = cls_bpf_offload_cmd(tp, NULL, prog, extack); if (err) pr_err("Stopping hardware offload failed: %d\n", err); } static void cls_bpf_offload_update_stats(struct tcf_proto *tp, struct cls_bpf_prog *prog) { struct tcf_block *block = tp->chain->block; struct tc_cls_bpf_offload cls_bpf = {}; tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags, NULL); cls_bpf.command = TC_CLSBPF_STATS; cls_bpf.exts = &prog->exts; cls_bpf.prog = prog->filter; cls_bpf.name = prog->bpf_name; cls_bpf.exts_integrated = prog->exts_integrated; tc_setup_cb_call(block, TC_SETUP_CLSBPF, &cls_bpf, false, true); } static int cls_bpf_init(struct tcf_proto *tp) { struct cls_bpf_head *head; head = kzalloc(sizeof(*head), GFP_KERNEL); if (head == NULL) return -ENOBUFS; INIT_LIST_HEAD_RCU(&head->plist); idr_init(&head->handle_idr); rcu_assign_pointer(tp->root, head); return 0; } static void cls_bpf_free_parms(struct cls_bpf_prog *prog) { if (cls_bpf_is_ebpf(prog)) bpf_prog_put(prog->filter); else bpf_prog_destroy(prog->filter); kfree(prog->bpf_name); kfree(prog->bpf_ops); } static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog) { tcf_exts_destroy(&prog->exts); tcf_exts_put_net(&prog->exts); cls_bpf_free_parms(prog); kfree(prog); } static void cls_bpf_delete_prog_work(struct work_struct *work) { struct cls_bpf_prog *prog = container_of(to_rcu_work(work), struct cls_bpf_prog, rwork); rtnl_lock(); __cls_bpf_delete_prog(prog); rtnl_unlock(); } static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog, struct netlink_ext_ack *extack) { struct cls_bpf_head *head = rtnl_dereference(tp->root); idr_remove(&head->handle_idr, prog->handle); cls_bpf_stop_offload(tp, prog, extack); list_del_rcu(&prog->link); tcf_unbind_filter(tp, &prog->res); if (tcf_exts_get_net(&prog->exts)) tcf_queue_work(&prog->rwork, cls_bpf_delete_prog_work); else __cls_bpf_delete_prog(prog); } static int cls_bpf_delete(struct tcf_proto *tp, void *arg, bool *last, bool rtnl_held, struct netlink_ext_ack *extack) { struct cls_bpf_head *head = rtnl_dereference(tp->root); __cls_bpf_delete(tp, arg, extack); *last = list_empty(&head->plist); return 0; } static void cls_bpf_destroy(struct tcf_proto *tp, bool rtnl_held, struct netlink_ext_ack *extack) { struct cls_bpf_head *head = rtnl_dereference(tp->root); struct cls_bpf_prog *prog, *tmp; list_for_each_entry_safe(prog, tmp, &head->plist, link) __cls_bpf_delete(tp, prog, extack); idr_destroy(&head->handle_idr); kfree_rcu(head, rcu); } static void *cls_bpf_get(struct tcf_proto *tp, u32 handle) { struct cls_bpf_head *head = rtnl_dereference(tp->root); struct cls_bpf_prog *prog; list_for_each_entry(prog, &head->plist, link) { if (prog->handle == handle) return prog; } return NULL; } static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog) { struct sock_filter *bpf_ops; struct sock_fprog_kern fprog_tmp; struct bpf_prog *fp; u16 bpf_size, bpf_num_ops; int ret; bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]); if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0) return -EINVAL; bpf_size = bpf_num_ops * sizeof(*bpf_ops); if (bpf_size != nla_len(tb[TCA_BPF_OPS])) return -EINVAL; bpf_ops = kmemdup(nla_data(tb[TCA_BPF_OPS]), bpf_size, GFP_KERNEL); if (bpf_ops == NULL) return -ENOMEM; fprog_tmp.len = bpf_num_ops; fprog_tmp.filter = bpf_ops; ret = bpf_prog_create(&fp, &fprog_tmp); if (ret < 0) { kfree(bpf_ops); return ret; } prog->bpf_ops = bpf_ops; prog->bpf_num_ops = bpf_num_ops; prog->bpf_name = NULL; prog->filter = fp; return 0; } static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog, u32 gen_flags, const struct tcf_proto *tp) { struct bpf_prog *fp; char *name = NULL; bool skip_sw; u32 bpf_fd; bpf_fd = nla_get_u32(tb[TCA_BPF_FD]); skip_sw = gen_flags & TCA_CLS_FLAGS_SKIP_SW; fp = bpf_prog_get_type_dev(bpf_fd, BPF_PROG_TYPE_SCHED_CLS, skip_sw); if (IS_ERR(fp)) return PTR_ERR(fp); if (tb[TCA_BPF_NAME]) { name = nla_memdup(tb[TCA_BPF_NAME], GFP_KERNEL); if (!name) { bpf_prog_put(fp); return -ENOMEM; } } prog->bpf_ops = NULL; prog->bpf_name = name; prog->filter = fp; if (fp->dst_needed) tcf_block_netif_keep_dst(tp->chain->block); return 0; } static int cls_bpf_change(struct net *net, struct sk_buff *in_skb, struct tcf_proto *tp, unsigned long base, u32 handle, struct nlattr **tca, void **arg, u32 flags, struct netlink_ext_ack *extack) { struct cls_bpf_head *head = rtnl_dereference(tp->root); bool is_bpf, is_ebpf, have_exts = false; struct cls_bpf_prog *oldprog = *arg; struct nlattr *tb[TCA_BPF_MAX + 1]; bool bound_to_filter = false; struct cls_bpf_prog *prog; u32 gen_flags = 0; int ret; if (tca[TCA_OPTIONS] == NULL) return -EINVAL; ret = nla_parse_nested_deprecated(tb, TCA_BPF_MAX, tca[TCA_OPTIONS], bpf_policy, NULL); if (ret < 0) return ret; prog = kzalloc(sizeof(*prog), GFP_KERNEL); if (!prog) return -ENOBUFS; ret = tcf_exts_init(&prog->exts, net, TCA_BPF_ACT, TCA_BPF_POLICE); if (ret < 0) goto errout; if (oldprog) { if (handle && oldprog->handle != handle) { ret = -EINVAL; goto errout; } } if (handle == 0) { handle = 1; ret = idr_alloc_u32(&head->handle_idr, prog, &handle, INT_MAX, GFP_KERNEL); } else if (!oldprog) { ret = idr_alloc_u32(&head->handle_idr, prog, &handle, handle, GFP_KERNEL); } if (ret) goto errout; prog->handle = handle; is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS]; is_ebpf = tb[TCA_BPF_FD]; if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf)) { ret = -EINVAL; goto errout_idr; } ret = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &prog->exts, flags, extack); if (ret < 0) goto errout_idr; if (tb[TCA_BPF_FLAGS]) { u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]); if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT) { ret = -EINVAL; goto errout_idr; } have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT; } if (tb[TCA_BPF_FLAGS_GEN]) { gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]); if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS || !tc_flags_valid(gen_flags)) { ret = -EINVAL; goto errout_idr; } } prog->exts_integrated = have_exts; prog->gen_flags = gen_flags; ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) : cls_bpf_prog_from_efd(tb, prog, gen_flags, tp); if (ret < 0) goto errout_idr; if (tb[TCA_BPF_CLASSID]) { prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]); tcf_bind_filter(tp, &prog->res, base); bound_to_filter = true; } ret = cls_bpf_offload(tp, prog, oldprog, extack); if (ret) goto errout_parms; if (!tc_in_hw(prog->gen_flags)) prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW; tcf_proto_update_usesw(tp, prog->gen_flags); if (oldprog) { idr_replace(&head->handle_idr, prog, handle); list_replace_rcu(&oldprog->link, &prog->link); tcf_unbind_filter(tp, &oldprog->res); tcf_exts_get_net(&oldprog->exts); tcf_queue_work(&oldprog->rwork, cls_bpf_delete_prog_work); } else { list_add_rcu(&prog->link, &head->plist); } *arg = prog; return 0; errout_parms: if (bound_to_filter) tcf_unbind_filter(tp, &prog->res); cls_bpf_free_parms(prog); errout_idr: if (!oldprog) idr_remove(&head->handle_idr, prog->handle); errout: tcf_exts_destroy(&prog->exts); kfree(prog); return ret; } static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog, struct sk_buff *skb) { struct nlattr *nla; if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops)) return -EMSGSIZE; nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops * sizeof(struct sock_filter)); if (nla == NULL) return -EMSGSIZE; memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla)); return 0; } static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog, struct sk_buff *skb) { struct nlattr *nla; if (prog->bpf_name && nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name)) return -EMSGSIZE; if (nla_put_u32(skb, TCA_BPF_ID, prog->filter->aux->id)) return -EMSGSIZE; nla = nla_reserve(skb, TCA_BPF_TAG, sizeof(prog->filter->tag)); if (nla == NULL) return -EMSGSIZE; memcpy(nla_data(nla), prog->filter->tag, nla_len(nla)); return 0; } static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, void *fh, struct sk_buff *skb, struct tcmsg *tm, bool rtnl_held) { struct cls_bpf_prog *prog = fh; struct nlattr *nest; u32 bpf_flags = 0; int ret; if (prog == NULL) return skb->len; tm->tcm_handle = prog->handle; cls_bpf_offload_update_stats(tp, prog); nest = nla_nest_start_noflag(skb, TCA_OPTIONS); if (nest == NULL) goto nla_put_failure; if (prog->res.classid && nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid)) goto nla_put_failure; if (cls_bpf_is_ebpf(prog)) ret = cls_bpf_dump_ebpf_info(prog, skb); else ret = cls_bpf_dump_bpf_info(prog, skb); if (ret) goto nla_put_failure; if (tcf_exts_dump(skb, &prog->exts) < 0) goto nla_put_failure; if (prog->exts_integrated) bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT; if (bpf_flags && nla_put_u32(skb, TCA_BPF_FLAGS, bpf_flags)) goto nla_put_failure; if (prog->gen_flags && nla_put_u32(skb, TCA_BPF_FLAGS_GEN, prog->gen_flags)) goto nla_put_failure; nla_nest_end(skb, nest); if (tcf_exts_dump_stats(skb, &prog->exts) < 0) goto nla_put_failure; return skb->len; nla_put_failure: nla_nest_cancel(skb, nest); return -1; } static void cls_bpf_bind_class(void *fh, u32 classid, unsigned long cl, void *q, unsigned long base) { struct cls_bpf_prog *prog = fh; tc_cls_bind_class(classid, cl, q, &prog->res, base); } static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg, bool rtnl_held) { struct cls_bpf_head *head = rtnl_dereference(tp->root); struct cls_bpf_prog *prog; list_for_each_entry(prog, &head->plist, link) { if (!tc_cls_stats_dump(tp, arg, prog)) break; } } static int cls_bpf_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb, void *cb_priv, struct netlink_ext_ack *extack) { struct cls_bpf_head *head = rtnl_dereference(tp->root); struct tcf_block *block = tp->chain->block; struct tc_cls_bpf_offload cls_bpf = {}; struct cls_bpf_prog *prog; int err; list_for_each_entry(prog, &head->plist, link) { if (tc_skip_hw(prog->gen_flags)) continue; tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags, extack); cls_bpf.command = TC_CLSBPF_OFFLOAD; cls_bpf.exts = &prog->exts; cls_bpf.prog = add ? prog->filter : NULL; cls_bpf.oldprog = add ? NULL : prog->filter; cls_bpf.name = prog->bpf_name; cls_bpf.exts_integrated = prog->exts_integrated; err = tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSBPF, &cls_bpf, cb_priv, &prog->gen_flags, &prog->in_hw_count); if (err) return err; } return 0; } static struct tcf_proto_ops cls_bpf_ops __read_mostly = { .kind = "bpf", .owner = THIS_MODULE, .classify = cls_bpf_classify, .init = cls_bpf_init, .destroy = cls_bpf_destroy, .get = cls_bpf_get, .change = cls_bpf_change, .delete = cls_bpf_delete, .walk = cls_bpf_walk, .reoffload = cls_bpf_reoffload, .dump = cls_bpf_dump, .bind_class = cls_bpf_bind_class, }; MODULE_ALIAS_NET_CLS("bpf"); static int __init cls_bpf_init_mod(void) { return register_tcf_proto_ops(&cls_bpf_ops); } static void __exit cls_bpf_exit_mod(void) { unregister_tcf_proto_ops(&cls_bpf_ops); } module_init(cls_bpf_init_mod); module_exit(cls_bpf_exit_mod);
3 3 3 1 1 1 1 1 1 1 1 1 4 4 4 1 3 4 3 3 3 2 1 1 1 1 1 1 1 1 1 1 1 2 2 2 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 // SPDX-License-Identifier: GPL-2.0-only /* * hid-cp2112.c - Silicon Labs HID USB to SMBus master bridge * Copyright (c) 2013,2014 Uplogix, Inc. * David Barksdale <dbarksdale@uplogix.com> */ /* * The Silicon Labs CP2112 chip is a USB HID device which provides an * SMBus controller for talking to slave devices and 8 GPIO pins. The * host communicates with the CP2112 via raw HID reports. * * Data Sheet: * https://www.silabs.com/Support%20Documents/TechnicalDocs/CP2112.pdf * Programming Interface Specification: * https://www.silabs.com/documents/public/application-notes/an495-cp2112-interface-specification.pdf */ #include <linux/bitops.h> #include <linux/cleanup.h> #include <linux/gpio/driver.h> #include <linux/hid.h> #include <linux/hidraw.h> #include <linux/i2c.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/nls.h> #include <linux/string_choices.h> #include <linux/usb/ch9.h> #include "hid-ids.h" #define CP2112_REPORT_MAX_LENGTH 64 #define CP2112_GPIO_CONFIG_LENGTH 5 #define CP2112_GPIO_GET_LENGTH 2 #define CP2112_GPIO_SET_LENGTH 3 #define CP2112_GPIO_MAX_GPIO 8 #define CP2112_GPIO_ALL_GPIO_MASK GENMASK(7, 0) enum { CP2112_GPIO_CONFIG = 0x02, CP2112_GPIO_GET = 0x03, CP2112_GPIO_SET = 0x04, CP2112_GET_VERSION_INFO = 0x05, CP2112_SMBUS_CONFIG = 0x06, CP2112_DATA_READ_REQUEST = 0x10, CP2112_DATA_WRITE_READ_REQUEST = 0x11, CP2112_DATA_READ_FORCE_SEND = 0x12, CP2112_DATA_READ_RESPONSE = 0x13, CP2112_DATA_WRITE_REQUEST = 0x14, CP2112_TRANSFER_STATUS_REQUEST = 0x15, CP2112_TRANSFER_STATUS_RESPONSE = 0x16, CP2112_CANCEL_TRANSFER = 0x17, CP2112_LOCK_BYTE = 0x20, CP2112_USB_CONFIG = 0x21, CP2112_MANUFACTURER_STRING = 0x22, CP2112_PRODUCT_STRING = 0x23, CP2112_SERIAL_STRING = 0x24, }; enum { STATUS0_IDLE = 0x00, STATUS0_BUSY = 0x01, STATUS0_COMPLETE = 0x02, STATUS0_ERROR = 0x03, }; enum { STATUS1_TIMEOUT_NACK = 0x00, STATUS1_TIMEOUT_BUS = 0x01, STATUS1_ARBITRATION_LOST = 0x02, STATUS1_READ_INCOMPLETE = 0x03, STATUS1_WRITE_INCOMPLETE = 0x04, STATUS1_SUCCESS = 0x05, }; struct cp2112_smbus_config_report { u8 report; /* CP2112_SMBUS_CONFIG */ __be32 clock_speed; /* Hz */ u8 device_address; /* Stored in the upper 7 bits */ u8 auto_send_read; /* 1 = enabled, 0 = disabled */ __be16 write_timeout; /* ms, 0 = no timeout */ __be16 read_timeout; /* ms, 0 = no timeout */ u8 scl_low_timeout; /* 1 = enabled, 0 = disabled */ __be16 retry_time; /* # of retries, 0 = no limit */ } __packed; struct cp2112_usb_config_report { u8 report; /* CP2112_USB_CONFIG */ __le16 vid; /* Vendor ID */ __le16 pid; /* Product ID */ u8 max_power; /* Power requested in 2mA units */ u8 power_mode; /* 0x00 = bus powered 0x01 = self powered & regulator off 0x02 = self powered & regulator on */ u8 release_major; u8 release_minor; u8 mask; /* What fields to program */ } __packed; struct cp2112_read_req_report { u8 report; /* CP2112_DATA_READ_REQUEST */ u8 slave_address; __be16 length; } __packed; struct cp2112_write_read_req_report { u8 report; /* CP2112_DATA_WRITE_READ_REQUEST */ u8 slave_address; __be16 length; u8 target_address_length; u8 target_address[16]; } __packed; struct cp2112_write_req_report { u8 report; /* CP2112_DATA_WRITE_REQUEST */ u8 slave_address; u8 length; u8 data[61]; } __packed; struct cp2112_force_read_report { u8 report; /* CP2112_DATA_READ_FORCE_SEND */ __be16 length; } __packed; struct cp2112_xfer_status_report { u8 report; /* CP2112_TRANSFER_STATUS_RESPONSE */ u8 status0; /* STATUS0_* */ u8 status1; /* STATUS1_* */ __be16 retries; __be16 length; } __packed; struct cp2112_string_report { u8 dummy; /* force .string to be aligned */ struct_group_attr(contents, __packed, u8 report; /* CP2112_*_STRING */ u8 length; /* length in bytes of everything after .report */ u8 type; /* USB_DT_STRING */ wchar_t string[30]; /* UTF16_LITTLE_ENDIAN string */ ); } __packed; /* Number of times to request transfer status before giving up waiting for a transfer to complete. This may need to be changed if SMBUS clock, retries, or read/write/scl_low timeout settings are changed. */ static const int XFER_STATUS_RETRIES = 10; /* Time in ms to wait for a CP2112_DATA_READ_RESPONSE or CP2112_TRANSFER_STATUS_RESPONSE. */ static const int RESPONSE_TIMEOUT = 50; static const struct hid_device_id cp2112_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_CP2112) }, { } }; MODULE_DEVICE_TABLE(hid, cp2112_devices); struct cp2112_device { struct i2c_adapter adap; struct hid_device *hdev; wait_queue_head_t wait; u8 read_data[61]; u8 read_length; u8 hwversion; int xfer_status; atomic_t read_avail; atomic_t xfer_avail; struct gpio_chip gc; u8 *in_out_buffer; struct mutex lock; bool gpio_poll; struct delayed_work gpio_poll_worker; unsigned long irq_mask; u8 gpio_prev_state; }; static int gpio_push_pull = CP2112_GPIO_ALL_GPIO_MASK; module_param(gpio_push_pull, int, 0644); MODULE_PARM_DESC(gpio_push_pull, "GPIO push-pull configuration bitmask"); static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset) { struct cp2112_device *dev = gpiochip_get_data(chip); struct hid_device *hdev = dev->hdev; u8 *buf = dev->in_out_buffer; int ret; guard(mutex)(&dev->lock); ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT, HID_REQ_GET_REPORT); if (ret != CP2112_GPIO_CONFIG_LENGTH) { hid_err(hdev, "error requesting GPIO config: %d\n", ret); if (ret >= 0) ret = -EIO; return ret; } buf[1] &= ~BIT(offset); buf[2] = gpio_push_pull; ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT, HID_REQ_SET_REPORT); if (ret != CP2112_GPIO_CONFIG_LENGTH) { hid_err(hdev, "error setting GPIO config: %d\n", ret); if (ret >= 0) ret = -EIO; return ret; } return 0; } static int cp2112_gpio_set_unlocked(struct cp2112_device *dev, unsigned int offset, int value) { struct hid_device *hdev = dev->hdev; u8 *buf = dev->in_out_buffer; int ret; buf[0] = CP2112_GPIO_SET; buf[1] = value ? CP2112_GPIO_ALL_GPIO_MASK : 0; buf[2] = BIT(offset); ret = hid_hw_raw_request(hdev, CP2112_GPIO_SET, buf, CP2112_GPIO_SET_LENGTH, HID_FEATURE_REPORT, HID_REQ_SET_REPORT); if (ret < 0) hid_err(hdev, "error setting GPIO values: %d\n", ret); return ret; } static int cp2112_gpio_set(struct gpio_chip *chip, unsigned int offset, int value) { struct cp2112_device *dev = gpiochip_get_data(chip); guard(mutex)(&dev->lock); return cp2112_gpio_set_unlocked(dev, offset, value); } static int cp2112_gpio_get_all(struct gpio_chip *chip) { struct cp2112_device *dev = gpiochip_get_data(chip); struct hid_device *hdev = dev->hdev; u8 *buf = dev->in_out_buffer; int ret; guard(mutex)(&dev->lock); ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf, CP2112_GPIO_GET_LENGTH, HID_FEATURE_REPORT, HID_REQ_GET_REPORT); if (ret != CP2112_GPIO_GET_LENGTH) { hid_err(hdev, "error requesting GPIO values: %d\n", ret); return ret < 0 ? ret : -EIO; } return buf[1]; } static int cp2112_gpio_get(struct gpio_chip *chip, unsigned int offset) { int ret; ret = cp2112_gpio_get_all(chip); if (ret < 0) return ret; return (ret >> offset) & 1; } static int cp2112_gpio_direction_output(struct gpio_chip *chip, unsigned offset, int value) { struct cp2112_device *dev = gpiochip_get_data(chip); struct hid_device *hdev = dev->hdev; u8 *buf = dev->in_out_buffer; int ret; guard(mutex)(&dev->lock); ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT, HID_REQ_GET_REPORT); if (ret != CP2112_GPIO_CONFIG_LENGTH) { hid_err(hdev, "error requesting GPIO config: %d\n", ret); return ret < 0 ? ret : -EIO; } buf[1] |= 1 << offset; buf[2] = gpio_push_pull; ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT, HID_REQ_SET_REPORT); if (ret < 0) { hid_err(hdev, "error setting GPIO config: %d\n", ret); return ret; } /* * Set gpio value when output direction is already set, * as specified in AN495, Rev. 0.2, cpt. 4.4 */ cp2112_gpio_set_unlocked(dev, offset, value); return 0; } static int cp2112_hid_get(struct hid_device *hdev, unsigned char report_number, u8 *data, size_t count, unsigned char report_type) { u8 *buf; int ret; buf = kmalloc(count, GFP_KERNEL); if (!buf) return -ENOMEM; ret = hid_hw_raw_request(hdev, report_number, buf, count, report_type, HID_REQ_GET_REPORT); memcpy(data, buf, count); kfree(buf); return ret; } static int cp2112_hid_output(struct hid_device *hdev, u8 *data, size_t count, unsigned char report_type) { u8 *buf; int ret; buf = kmemdup(data, count, GFP_KERNEL); if (!buf) return -ENOMEM; if (report_type == HID_OUTPUT_REPORT) ret = hid_hw_output_report(hdev, buf, count); else ret = hid_hw_raw_request(hdev, buf[0], buf, count, report_type, HID_REQ_SET_REPORT); kfree(buf); return ret; } static int cp2112_wait(struct cp2112_device *dev, atomic_t *avail) { int ret = 0; /* We have sent either a CP2112_TRANSFER_STATUS_REQUEST or a * CP2112_DATA_READ_FORCE_SEND and we are waiting for the response to * come in cp2112_raw_event or timeout. There will only be one of these * in flight at any one time. The timeout is extremely large and is a * last resort if the CP2112 has died. If we do timeout we don't expect * to receive the response which would cause data races, it's not like * we can do anything about it anyway. */ ret = wait_event_interruptible_timeout(dev->wait, atomic_read(avail), msecs_to_jiffies(RESPONSE_TIMEOUT)); if (-ERESTARTSYS == ret) return ret; if (!ret) return -ETIMEDOUT; atomic_set(avail, 0); return 0; } static int cp2112_xfer_status(struct cp2112_device *dev) { struct hid_device *hdev = dev->hdev; u8 buf[2]; int ret; buf[0] = CP2112_TRANSFER_STATUS_REQUEST; buf[1] = 0x01; atomic_set(&dev->xfer_avail, 0); ret = cp2112_hid_output(hdev, buf, 2, HID_OUTPUT_REPORT); if (ret < 0) { hid_warn(hdev, "Error requesting status: %d\n", ret); return ret; } ret = cp2112_wait(dev, &dev->xfer_avail); if (ret) return ret; return dev->xfer_status; } static int cp2112_read(struct cp2112_device *dev, u8 *data, size_t size) { struct hid_device *hdev = dev->hdev; struct cp2112_force_read_report report; int ret; if (size > sizeof(dev->read_data)) size = sizeof(dev->read_data); report.report = CP2112_DATA_READ_FORCE_SEND; report.length = cpu_to_be16(size); atomic_set(&dev->read_avail, 0); ret = cp2112_hid_output(hdev, &report.report, sizeof(report), HID_OUTPUT_REPORT); if (ret < 0) { hid_warn(hdev, "Error requesting data: %d\n", ret); return ret; } ret = cp2112_wait(dev, &dev->read_avail); if (ret) return ret; hid_dbg(hdev, "read %d of %zd bytes requested\n", dev->read_length, size); if (size > dev->read_length) size = dev->read_length; memcpy(data, dev->read_data, size); return dev->read_length; } static int cp2112_read_req(void *buf, u8 slave_address, u16 length) { struct cp2112_read_req_report *report = buf; if (length < 1 || length > 512) return -EINVAL; report->report = CP2112_DATA_READ_REQUEST; report->slave_address = slave_address << 1; report->length = cpu_to_be16(length); return sizeof(*report); } static int cp2112_write_read_req(void *buf, u8 slave_address, u16 length, u8 command, u8 *data, u8 data_length) { struct cp2112_write_read_req_report *report = buf; if (length < 1 || length > 512 || data_length > sizeof(report->target_address) - 1) return -EINVAL; report->report = CP2112_DATA_WRITE_READ_REQUEST; report->slave_address = slave_address << 1; report->length = cpu_to_be16(length); report->target_address_length = data_length + 1; report->target_address[0] = command; memcpy(&report->target_address[1], data, data_length); return data_length + 6; } static int cp2112_write_req(void *buf, u8 slave_address, u8 command, u8 *data, u8 data_length) { struct cp2112_write_req_report *report = buf; if (data_length > sizeof(report->data) - 1) return -EINVAL; report->report = CP2112_DATA_WRITE_REQUEST; report->slave_address = slave_address << 1; report->length = data_length + 1; report->data[0] = command; memcpy(&report->data[1], data, data_length); return data_length + 4; } static int cp2112_i2c_write_req(void *buf, u8 slave_address, u8 *data, u8 data_length) { struct cp2112_write_req_report *report = buf; if (data_length > sizeof(report->data)) return -EINVAL; report->report = CP2112_DATA_WRITE_REQUEST; report->slave_address = slave_address << 1; report->length = data_length; memcpy(report->data, data, data_length); return data_length + 3; } static int cp2112_i2c_write_read_req(void *buf, u8 slave_address, u8 *addr, int addr_length, int read_length) { struct cp2112_write_read_req_report *report = buf; if (read_length < 1 || read_length > 512 || addr_length > sizeof(report->target_address)) return -EINVAL; report->report = CP2112_DATA_WRITE_READ_REQUEST; report->slave_address = slave_address << 1; report->length = cpu_to_be16(read_length); report->target_address_length = addr_length; memcpy(report->target_address, addr, addr_length); return addr_length + 5; } static int cp2112_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct cp2112_device *dev = (struct cp2112_device *)adap->algo_data; struct hid_device *hdev = dev->hdev; u8 buf[64]; ssize_t count; ssize_t read_length = 0; u8 *read_buf = NULL; unsigned int retries; int ret; hid_dbg(hdev, "I2C %d messages\n", num); if (num == 1) { hid_dbg(hdev, "I2C %s %#04x len %d\n", str_read_write(msgs->flags & I2C_M_RD), msgs->addr, msgs->len); if (msgs->flags & I2C_M_RD) { read_length = msgs->len; read_buf = msgs->buf; count = cp2112_read_req(buf, msgs->addr, msgs->len); } else { count = cp2112_i2c_write_req(buf, msgs->addr, msgs->buf, msgs->len); } if (count < 0) return count; } else if (dev->hwversion > 1 && /* no repeated start in rev 1 */ num == 2 && msgs[0].addr == msgs[1].addr && !(msgs[0].flags & I2C_M_RD) && (msgs[1].flags & I2C_M_RD)) { hid_dbg(hdev, "I2C write-read %#04x wlen %d rlen %d\n", msgs[0].addr, msgs[0].len, msgs[1].len); read_length = msgs[1].len; read_buf = msgs[1].buf; count = cp2112_i2c_write_read_req(buf, msgs[0].addr, msgs[0].buf, msgs[0].len, msgs[1].len); if (count < 0) return count; } else { hid_err(hdev, "Multi-message I2C transactions not supported\n"); return -EOPNOTSUPP; } ret = hid_hw_power(hdev, PM_HINT_FULLON); if (ret < 0) { hid_err(hdev, "power management error: %d\n", ret); return ret; } ret = cp2112_hid_output(hdev, buf, count, HID_OUTPUT_REPORT); if (ret < 0) { hid_warn(hdev, "Error starting transaction: %d\n", ret); goto power_normal; } for (retries = 0; retries < XFER_STATUS_RETRIES; ++retries) { ret = cp2112_xfer_status(dev); if (-EBUSY == ret) continue; if (ret < 0) goto power_normal; break; } if (XFER_STATUS_RETRIES <= retries) { hid_warn(hdev, "Transfer timed out, cancelling.\n"); buf[0] = CP2112_CANCEL_TRANSFER; buf[1] = 0x01; ret = cp2112_hid_output(hdev, buf, 2, HID_OUTPUT_REPORT); if (ret < 0) hid_warn(hdev, "Error cancelling transaction: %d\n", ret); ret = -ETIMEDOUT; goto power_normal; } for (count = 0; count < read_length;) { ret = cp2112_read(dev, read_buf + count, read_length - count); if (ret < 0) goto power_normal; if (ret == 0) { hid_err(hdev, "read returned 0\n"); ret = -EIO; goto power_normal; } count += ret; if (count > read_length) { /* * The hardware returned too much data. * This is mostly harmless because cp2112_read() * has a limit check so didn't overrun our * buffer. Nevertheless, we return an error * because something is seriously wrong and * it shouldn't go unnoticed. */ hid_err(hdev, "long read: %d > %zd\n", ret, read_length - count + ret); ret = -EIO; goto power_normal; } } /* return the number of transferred messages */ ret = num; power_normal: hid_hw_power(hdev, PM_HINT_NORMAL); hid_dbg(hdev, "I2C transfer finished: %d\n", ret); return ret; } static int cp2112_xfer(struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data) { struct cp2112_device *dev = (struct cp2112_device *)adap->algo_data; struct hid_device *hdev = dev->hdev; u8 buf[64]; __le16 word; ssize_t count; size_t read_length = 0; unsigned int retries; int ret; hid_dbg(hdev, "%s addr 0x%x flags 0x%x cmd 0x%x size %d\n", str_write_read(read_write == I2C_SMBUS_WRITE), addr, flags, command, size); switch (size) { case I2C_SMBUS_BYTE: read_length = 1; if (I2C_SMBUS_READ == read_write) count = cp2112_read_req(buf, addr, read_length); else count = cp2112_write_req(buf, addr, command, NULL, 0); break; case I2C_SMBUS_BYTE_DATA: read_length = 1; if (I2C_SMBUS_READ == read_write) count = cp2112_write_read_req(buf, addr, read_length, command, NULL, 0); else count = cp2112_write_req(buf, addr, command, &data->byte, 1); break; case I2C_SMBUS_WORD_DATA: read_length = 2; word = cpu_to_le16(data->word); if (I2C_SMBUS_READ == read_write) count = cp2112_write_read_req(buf, addr, read_length, command, NULL, 0); else count = cp2112_write_req(buf, addr, command, (u8 *)&word, 2); break; case I2C_SMBUS_PROC_CALL: size = I2C_SMBUS_WORD_DATA; read_write = I2C_SMBUS_READ; read_length = 2; word = cpu_to_le16(data->word); count = cp2112_write_read_req(buf, addr, read_length, command, (u8 *)&word, 2); break; case I2C_SMBUS_I2C_BLOCK_DATA: if (read_write == I2C_SMBUS_READ) { read_length = data->block[0]; count = cp2112_write_read_req(buf, addr, read_length, command, NULL, 0); } else { count = cp2112_write_req(buf, addr, command, data->block + 1, data->block[0]); } break; case I2C_SMBUS_BLOCK_DATA: if (I2C_SMBUS_READ == read_write) { count = cp2112_write_read_req(buf, addr, I2C_SMBUS_BLOCK_MAX, command, NULL, 0); } else { count = cp2112_write_req(buf, addr, command, data->block, data->block[0] + 1); } break; case I2C_SMBUS_BLOCK_PROC_CALL: size = I2C_SMBUS_BLOCK_DATA; read_write = I2C_SMBUS_READ; count = cp2112_write_read_req(buf, addr, I2C_SMBUS_BLOCK_MAX, command, data->block, data->block[0] + 1); break; default: hid_warn(hdev, "Unsupported transaction %d\n", size); return -EOPNOTSUPP; } if (count < 0) return count; ret = hid_hw_power(hdev, PM_HINT_FULLON); if (ret < 0) { hid_err(hdev, "power management error: %d\n", ret); return ret; } ret = cp2112_hid_output(hdev, buf, count, HID_OUTPUT_REPORT); if (ret < 0) { hid_warn(hdev, "Error starting transaction: %d\n", ret); goto power_normal; } for (retries = 0; retries < XFER_STATUS_RETRIES; ++retries) { ret = cp2112_xfer_status(dev); if (-EBUSY == ret) continue; if (ret < 0) goto power_normal; break; } if (XFER_STATUS_RETRIES <= retries) { hid_warn(hdev, "Transfer timed out, cancelling.\n"); buf[0] = CP2112_CANCEL_TRANSFER; buf[1] = 0x01; ret = cp2112_hid_output(hdev, buf, 2, HID_OUTPUT_REPORT); if (ret < 0) hid_warn(hdev, "Error cancelling transaction: %d\n", ret); ret = -ETIMEDOUT; goto power_normal; } if (I2C_SMBUS_WRITE == read_write) { ret = 0; goto power_normal; } if (I2C_SMBUS_BLOCK_DATA == size) read_length = ret; ret = cp2112_read(dev, buf, read_length); if (ret < 0) goto power_normal; if (ret != read_length) { hid_warn(hdev, "short read: %d < %zd\n", ret, read_length); ret = -EIO; goto power_normal; } switch (size) { case I2C_SMBUS_BYTE: case I2C_SMBUS_BYTE_DATA: data->byte = buf[0]; break; case I2C_SMBUS_WORD_DATA: data->word = le16_to_cpup((__le16 *)buf); break; case I2C_SMBUS_I2C_BLOCK_DATA: if (read_length > I2C_SMBUS_BLOCK_MAX) { ret = -EINVAL; goto power_normal; } memcpy(data->block + 1, buf, read_length); break; case I2C_SMBUS_BLOCK_DATA: if (read_length > I2C_SMBUS_BLOCK_MAX) { ret = -EPROTO; goto power_normal; } memcpy(data->block, buf, read_length); break; } ret = 0; power_normal: hid_hw_power(hdev, PM_HINT_NORMAL); hid_dbg(hdev, "transfer finished: %d\n", ret); return ret; } static u32 cp2112_functionality(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BLOCK_DATA | I2C_FUNC_SMBUS_I2C_BLOCK | I2C_FUNC_SMBUS_PROC_CALL | I2C_FUNC_SMBUS_BLOCK_PROC_CALL; } static const struct i2c_algorithm smbus_algorithm = { .master_xfer = cp2112_i2c_xfer, .smbus_xfer = cp2112_xfer, .functionality = cp2112_functionality, }; static int cp2112_get_usb_config(struct hid_device *hdev, struct cp2112_usb_config_report *cfg) { int ret; ret = cp2112_hid_get(hdev, CP2112_USB_CONFIG, (u8 *)cfg, sizeof(*cfg), HID_FEATURE_REPORT); if (ret != sizeof(*cfg)) { hid_err(hdev, "error reading usb config: %d\n", ret); if (ret < 0) return ret; return -EIO; } return 0; } static int cp2112_set_usb_config(struct hid_device *hdev, struct cp2112_usb_config_report *cfg) { int ret; if (WARN_ON(cfg->report != CP2112_USB_CONFIG)) return -EINVAL; ret = cp2112_hid_output(hdev, (u8 *)cfg, sizeof(*cfg), HID_FEATURE_REPORT); if (ret != sizeof(*cfg)) { hid_err(hdev, "error writing usb config: %d\n", ret); if (ret < 0) return ret; return -EIO; } return 0; } static void chmod_sysfs_attrs(struct hid_device *hdev); #define CP2112_CONFIG_ATTR(name, store, format, ...) \ static ssize_t name##_store(struct device *kdev, \ struct device_attribute *attr, const char *buf, \ size_t count) \ { \ struct hid_device *hdev = to_hid_device(kdev); \ struct cp2112_usb_config_report cfg; \ int ret = cp2112_get_usb_config(hdev, &cfg); \ if (ret) \ return ret; \ store; \ ret = cp2112_set_usb_config(hdev, &cfg); \ if (ret) \ return ret; \ chmod_sysfs_attrs(hdev); \ return count; \ } \ static ssize_t name##_show(struct device *kdev, \ struct device_attribute *attr, char *buf) \ { \ struct hid_device *hdev = to_hid_device(kdev); \ struct cp2112_usb_config_report cfg; \ int ret = cp2112_get_usb_config(hdev, &cfg); \ if (ret) \ return ret; \ return sysfs_emit(buf, format, ##__VA_ARGS__); \ } \ static DEVICE_ATTR_RW(name); CP2112_CONFIG_ATTR(vendor_id, ({ u16 vid; if (sscanf(buf, "%hi", &vid) != 1) return -EINVAL; cfg.vid = cpu_to_le16(vid); cfg.mask = 0x01; }), "0x%04x\n", le16_to_cpu(cfg.vid)); CP2112_CONFIG_ATTR(product_id, ({ u16 pid; if (sscanf(buf, "%hi", &pid) != 1) return -EINVAL; cfg.pid = cpu_to_le16(pid); cfg.mask = 0x02; }), "0x%04x\n", le16_to_cpu(cfg.pid)); CP2112_CONFIG_ATTR(max_power, ({ int mA; if (sscanf(buf, "%i", &mA) != 1) return -EINVAL; cfg.max_power = (mA + 1) / 2; cfg.mask = 0x04; }), "%u mA\n", cfg.max_power * 2); CP2112_CONFIG_ATTR(power_mode, ({ if (sscanf(buf, "%hhi", &cfg.power_mode) != 1) return -EINVAL; cfg.mask = 0x08; }), "%u\n", cfg.power_mode); CP2112_CONFIG_ATTR(release_version, ({ if (sscanf(buf, "%hhi.%hhi", &cfg.release_major, &cfg.release_minor) != 2) return -EINVAL; cfg.mask = 0x10; }), "%u.%u\n", cfg.release_major, cfg.release_minor); #undef CP2112_CONFIG_ATTR static ssize_t pstr_store(struct device *kdev, struct device_attribute *kattr, const char *buf, size_t count, int number) { struct hid_device *hdev = to_hid_device(kdev); struct cp2112_string_report report; int ret; memset(&report, 0, sizeof(report)); ret = utf8s_to_utf16s(buf, count, UTF16_LITTLE_ENDIAN, report.string, ARRAY_SIZE(report.string)); report.report = number; report.length = ret * sizeof(report.string[0]) + 2; report.type = USB_DT_STRING; ret = cp2112_hid_output(hdev, &report.report, report.length + 1, HID_FEATURE_REPORT); if (ret != report.length + 1) { hid_err(hdev, "error writing %s string: %d\n", kattr->attr.name, ret); if (ret < 0) return ret; return -EIO; } chmod_sysfs_attrs(hdev); return count; } static ssize_t pstr_show(struct device *kdev, struct device_attribute *kattr, char *buf, int number) { struct hid_device *hdev = to_hid_device(kdev); struct cp2112_string_report report; u8 length; int ret; ret = cp2112_hid_get(hdev, number, (u8 *)&report.contents, sizeof(report.contents), HID_FEATURE_REPORT); if (ret < 3) { hid_err(hdev, "error reading %s string: %d\n", kattr->attr.name, ret); if (ret < 0) return ret; return -EIO; } if (report.length < 2) { hid_err(hdev, "invalid %s string length: %d\n", kattr->attr.name, report.length); return -EIO; } length = report.length > ret - 1 ? ret - 1 : report.length; length = (length - 2) / sizeof(report.string[0]); ret = utf16s_to_utf8s(report.string, length, UTF16_LITTLE_ENDIAN, buf, PAGE_SIZE - 1); buf[ret++] = '\n'; return ret; } #define CP2112_PSTR_ATTR(name, _report) \ static ssize_t name##_store(struct device *kdev, struct device_attribute *kattr, \ const char *buf, size_t count) \ { \ return pstr_store(kdev, kattr, buf, count, _report); \ } \ static ssize_t name##_show(struct device *kdev, struct device_attribute *kattr, char *buf) \ { \ return pstr_show(kdev, kattr, buf, _report); \ } \ static DEVICE_ATTR_RW(name); CP2112_PSTR_ATTR(manufacturer, CP2112_MANUFACTURER_STRING); CP2112_PSTR_ATTR(product, CP2112_PRODUCT_STRING); CP2112_PSTR_ATTR(serial, CP2112_SERIAL_STRING); #undef CP2112_PSTR_ATTR static const struct attribute_group cp2112_attr_group = { .attrs = (struct attribute *[]){ &dev_attr_vendor_id.attr, &dev_attr_product_id.attr, &dev_attr_max_power.attr, &dev_attr_power_mode.attr, &dev_attr_release_version.attr, &dev_attr_manufacturer.attr, &dev_attr_product.attr, &dev_attr_serial.attr, NULL } }; /* Chmoding our sysfs attributes is simply a way to expose which fields in the * PROM have already been programmed. We do not depend on this preventing * writing to these attributes since the CP2112 will simply ignore writes to * already-programmed fields. This is why there is no sense in fixing this * racy behaviour. */ static void chmod_sysfs_attrs(struct hid_device *hdev) { struct attribute **attr; u8 buf[2]; int ret; ret = cp2112_hid_get(hdev, CP2112_LOCK_BYTE, buf, sizeof(buf), HID_FEATURE_REPORT); if (ret != sizeof(buf)) { hid_err(hdev, "error reading lock byte: %d\n", ret); return; } for (attr = cp2112_attr_group.attrs; *attr; ++attr) { umode_t mode = (buf[1] & 1) ? 0644 : 0444; ret = sysfs_chmod_file(&hdev->dev.kobj, *attr, mode); if (ret < 0) hid_err(hdev, "error chmoding sysfs file %s\n", (*attr)->name); buf[1] >>= 1; } } static void cp2112_gpio_irq_ack(struct irq_data *d) { } static void cp2112_gpio_irq_mask(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct cp2112_device *dev = gpiochip_get_data(gc); irq_hw_number_t hwirq = irqd_to_hwirq(d); __clear_bit(hwirq, &dev->irq_mask); gpiochip_disable_irq(gc, hwirq); } static void cp2112_gpio_irq_unmask(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct cp2112_device *dev = gpiochip_get_data(gc); irq_hw_number_t hwirq = irqd_to_hwirq(d); gpiochip_enable_irq(gc, hwirq); __set_bit(hwirq, &dev->irq_mask); } static void cp2112_gpio_poll_callback(struct work_struct *work) { struct cp2112_device *dev = container_of(work, struct cp2112_device, gpio_poll_worker.work); u8 gpio_mask; u32 irq_type; int irq, virq, ret; ret = cp2112_gpio_get_all(&dev->gc); if (ret == -ENODEV) /* the hardware has been disconnected */ return; if (ret < 0) goto exit; gpio_mask = ret; for_each_set_bit(virq, &dev->irq_mask, CP2112_GPIO_MAX_GPIO) { irq = irq_find_mapping(dev->gc.irq.domain, virq); if (!irq) continue; irq_type = irq_get_trigger_type(irq); if (!irq_type) continue; if (gpio_mask & BIT(virq)) { /* Level High */ if (irq_type & IRQ_TYPE_LEVEL_HIGH) handle_nested_irq(irq); if ((irq_type & IRQ_TYPE_EDGE_RISING) && !(dev->gpio_prev_state & BIT(virq))) handle_nested_irq(irq); } else { /* Level Low */ if (irq_type & IRQ_TYPE_LEVEL_LOW) handle_nested_irq(irq); if ((irq_type & IRQ_TYPE_EDGE_FALLING) && (dev->gpio_prev_state & BIT(virq))) handle_nested_irq(irq); } } dev->gpio_prev_state = gpio_mask; exit: if (dev->gpio_poll) schedule_delayed_work(&dev->gpio_poll_worker, 10); } static unsigned int cp2112_gpio_irq_startup(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct cp2112_device *dev = gpiochip_get_data(gc); if (!dev->gpio_poll) { dev->gpio_poll = true; schedule_delayed_work(&dev->gpio_poll_worker, 0); } cp2112_gpio_irq_unmask(d); return 0; } static void cp2112_gpio_irq_shutdown(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct cp2112_device *dev = gpiochip_get_data(gc); cp2112_gpio_irq_mask(d); if (!dev->irq_mask) { dev->gpio_poll = false; cancel_delayed_work_sync(&dev->gpio_poll_worker); } } static int cp2112_gpio_irq_type(struct irq_data *d, unsigned int type) { return 0; } static const struct irq_chip cp2112_gpio_irqchip = { .name = "cp2112-gpio", .irq_startup = cp2112_gpio_irq_startup, .irq_shutdown = cp2112_gpio_irq_shutdown, .irq_ack = cp2112_gpio_irq_ack, .irq_mask = cp2112_gpio_irq_mask, .irq_unmask = cp2112_gpio_irq_unmask, .irq_set_type = cp2112_gpio_irq_type, .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_IMMUTABLE, GPIOCHIP_IRQ_RESOURCE_HELPERS, }; static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id) { struct cp2112_device *dev; u8 buf[3]; struct cp2112_smbus_config_report config; struct gpio_irq_chip *girq; int ret; dev = devm_kzalloc(&hdev->dev, sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; dev->in_out_buffer = devm_kzalloc(&hdev->dev, CP2112_REPORT_MAX_LENGTH, GFP_KERNEL); if (!dev->in_out_buffer) return -ENOMEM; ret = devm_mutex_init(&hdev->dev, &dev->lock); if (ret) { hid_err(hdev, "mutex init failed\n"); return ret; } ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); return ret; } ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW); if (ret) { hid_err(hdev, "hw start failed\n"); return ret; } ret = hid_hw_open(hdev); if (ret) { hid_err(hdev, "hw open failed\n"); goto err_hid_stop; } ret = hid_hw_power(hdev, PM_HINT_FULLON); if (ret < 0) { hid_err(hdev, "power management error: %d\n", ret); goto err_hid_close; } ret = cp2112_hid_get(hdev, CP2112_GET_VERSION_INFO, buf, sizeof(buf), HID_FEATURE_REPORT); if (ret != sizeof(buf)) { hid_err(hdev, "error requesting version\n"); if (ret >= 0) ret = -EIO; goto err_power_normal; } hid_info(hdev, "Part Number: 0x%02X Device Version: 0x%02X\n", buf[1], buf[2]); ret = cp2112_hid_get(hdev, CP2112_SMBUS_CONFIG, (u8 *)&config, sizeof(config), HID_FEATURE_REPORT); if (ret != sizeof(config)) { hid_err(hdev, "error requesting SMBus config\n"); if (ret >= 0) ret = -EIO; goto err_power_normal; } config.retry_time = cpu_to_be16(1); ret = cp2112_hid_output(hdev, (u8 *)&config, sizeof(config), HID_FEATURE_REPORT); if (ret != sizeof(config)) { hid_err(hdev, "error setting SMBus config\n"); if (ret >= 0) ret = -EIO; goto err_power_normal; } hid_set_drvdata(hdev, (void *)dev); dev->hdev = hdev; dev->adap.owner = THIS_MODULE; dev->adap.class = I2C_CLASS_HWMON; dev->adap.algo = &smbus_algorithm; dev->adap.algo_data = dev; dev->adap.dev.parent = &hdev->dev; snprintf(dev->adap.name, sizeof(dev->adap.name), "CP2112 SMBus Bridge on hidraw%d", ((struct hidraw *)hdev->hidraw)->minor); dev->hwversion = buf[2]; init_waitqueue_head(&dev->wait); hid_device_io_start(hdev); ret = i2c_add_adapter(&dev->adap); hid_device_io_stop(hdev); if (ret) { hid_err(hdev, "error registering i2c adapter\n"); goto err_power_normal; } hid_dbg(hdev, "adapter registered\n"); dev->gc.label = "cp2112_gpio"; dev->gc.direction_input = cp2112_gpio_direction_input; dev->gc.direction_output = cp2112_gpio_direction_output; dev->gc.set_rv = cp2112_gpio_set; dev->gc.get = cp2112_gpio_get; dev->gc.base = -1; dev->gc.ngpio = CP2112_GPIO_MAX_GPIO; dev->gc.can_sleep = 1; dev->gc.parent = &hdev->dev; girq = &dev->gc.irq; gpio_irq_chip_set_chip(girq, &cp2112_gpio_irqchip); /* The event comes from the outside so no parent handler */ girq->parent_handler = NULL; girq->num_parents = 0; girq->parents = NULL; girq->default_type = IRQ_TYPE_NONE; girq->handler = handle_simple_irq; girq->threaded = true; INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback); ret = gpiochip_add_data(&dev->gc, dev); if (ret < 0) { hid_err(hdev, "error registering gpio chip\n"); goto err_free_i2c; } ret = sysfs_create_group(&hdev->dev.kobj, &cp2112_attr_group); if (ret < 0) { hid_err(hdev, "error creating sysfs attrs\n"); goto err_gpiochip_remove; } chmod_sysfs_attrs(hdev); hid_hw_power(hdev, PM_HINT_NORMAL); return ret; err_gpiochip_remove: gpiochip_remove(&dev->gc); err_free_i2c: i2c_del_adapter(&dev->adap); err_power_normal: hid_hw_power(hdev, PM_HINT_NORMAL); err_hid_close: hid_hw_close(hdev); err_hid_stop: hid_hw_stop(hdev); return ret; } static void cp2112_remove(struct hid_device *hdev) { struct cp2112_device *dev = hid_get_drvdata(hdev); sysfs_remove_group(&hdev->dev.kobj, &cp2112_attr_group); i2c_del_adapter(&dev->adap); if (dev->gpio_poll) { dev->gpio_poll = false; cancel_delayed_work_sync(&dev->gpio_poll_worker); } gpiochip_remove(&dev->gc); /* i2c_del_adapter has finished removing all i2c devices from our * adapter. Well behaved devices should no longer call our cp2112_xfer * and should have waited for any pending calls to finish. It has also * waited for device_unregister(&adap->dev) to complete. Therefore we * can safely free our struct cp2112_device. */ hid_hw_close(hdev); hid_hw_stop(hdev); } static int cp2112_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size) { struct cp2112_device *dev = hid_get_drvdata(hdev); struct cp2112_xfer_status_report *xfer = (void *)data; switch (data[0]) { case CP2112_TRANSFER_STATUS_RESPONSE: hid_dbg(hdev, "xfer status: %02x %02x %04x %04x\n", xfer->status0, xfer->status1, be16_to_cpu(xfer->retries), be16_to_cpu(xfer->length)); switch (xfer->status0) { case STATUS0_IDLE: dev->xfer_status = -EAGAIN; break; case STATUS0_BUSY: dev->xfer_status = -EBUSY; break; case STATUS0_COMPLETE: dev->xfer_status = be16_to_cpu(xfer->length); break; case STATUS0_ERROR: switch (xfer->status1) { case STATUS1_TIMEOUT_NACK: case STATUS1_TIMEOUT_BUS: dev->xfer_status = -ETIMEDOUT; break; default: dev->xfer_status = -EIO; break; } break; default: dev->xfer_status = -EINVAL; break; } atomic_set(&dev->xfer_avail, 1); break; case CP2112_DATA_READ_RESPONSE: hid_dbg(hdev, "read response: %02x %02x\n", data[1], data[2]); dev->read_length = data[2]; if (dev->read_length > sizeof(dev->read_data)) dev->read_length = sizeof(dev->read_data); memcpy(dev->read_data, &data[3], dev->read_length); atomic_set(&dev->read_avail, 1); break; default: hid_err(hdev, "unknown report\n"); return 0; } wake_up_interruptible(&dev->wait); return 1; } static struct hid_driver cp2112_driver = { .name = "cp2112", .id_table = cp2112_devices, .probe = cp2112_probe, .remove = cp2112_remove, .raw_event = cp2112_raw_event, }; module_hid_driver(cp2112_driver); MODULE_DESCRIPTION("Silicon Labs HID USB to SMBus master bridge"); MODULE_AUTHOR("David Barksdale <dbarksdale@uplogix.com>"); MODULE_LICENSE("GPL");
1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 1 1 1 2 1 1 1 1 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * HID driver for NVIDIA SHIELD peripherals. */ #include <linux/hid.h> #include <linux/idr.h> #include <linux/input-event-codes.h> #include <linux/input.h> #include <linux/jiffies.h> #include <linux/leds.h> #include <linux/module.h> #include <linux/power_supply.h> #include <linux/spinlock.h> #include <linux/timer.h> #include <linux/workqueue.h> #include "hid-ids.h" #define NOT_INIT_STR "NOT INITIALIZED" #define android_map_key(c) hid_map_usage(hi, usage, bit, max, EV_KEY, (c)) enum { HID_USAGE_ANDROID_PLAYPAUSE_BTN = 0xcd, /* Double-tap volume slider */ HID_USAGE_ANDROID_VOLUMEUP_BTN = 0xe9, HID_USAGE_ANDROID_VOLUMEDOWN_BTN = 0xea, HID_USAGE_ANDROID_SEARCH_BTN = 0x221, /* NVIDIA btn on Thunderstrike */ HID_USAGE_ANDROID_HOME_BTN = 0x223, HID_USAGE_ANDROID_BACK_BTN = 0x224, }; enum { SHIELD_FW_VERSION_INITIALIZED = 0, SHIELD_BOARD_INFO_INITIALIZED, SHIELD_BATTERY_STATS_INITIALIZED, SHIELD_CHARGER_STATE_INITIALIZED, }; enum { THUNDERSTRIKE_FW_VERSION_UPDATE = 0, THUNDERSTRIKE_BOARD_INFO_UPDATE, THUNDERSTRIKE_HAPTICS_UPDATE, THUNDERSTRIKE_LED_UPDATE, THUNDERSTRIKE_POWER_SUPPLY_STATS_UPDATE, }; enum { THUNDERSTRIKE_HOSTCMD_REPORT_SIZE = 33, THUNDERSTRIKE_HOSTCMD_REQ_REPORT_ID = 0x4, THUNDERSTRIKE_HOSTCMD_RESP_REPORT_ID = 0x3, }; enum { THUNDERSTRIKE_HOSTCMD_ID_FW_VERSION = 1, THUNDERSTRIKE_HOSTCMD_ID_LED = 6, THUNDERSTRIKE_HOSTCMD_ID_BATTERY, THUNDERSTRIKE_HOSTCMD_ID_BOARD_INFO = 16, THUNDERSTRIKE_HOSTCMD_ID_USB_INIT = 53, THUNDERSTRIKE_HOSTCMD_ID_HAPTICS = 57, THUNDERSTRIKE_HOSTCMD_ID_CHARGER, }; struct power_supply_dev { struct power_supply *psy; struct power_supply_desc desc; }; struct thunderstrike_psy_prop_values { int voltage_min; int voltage_now; int voltage_avg; int voltage_boot; int capacity; int status; int charge_type; int temp; }; static const enum power_supply_property thunderstrike_battery_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_CHARGE_TYPE, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_VOLTAGE_MIN, POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN, POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, POWER_SUPPLY_PROP_VOLTAGE_NOW, POWER_SUPPLY_PROP_VOLTAGE_AVG, POWER_SUPPLY_PROP_VOLTAGE_BOOT, POWER_SUPPLY_PROP_CAPACITY, POWER_SUPPLY_PROP_SCOPE, POWER_SUPPLY_PROP_TEMP, POWER_SUPPLY_PROP_TEMP_MIN, POWER_SUPPLY_PROP_TEMP_MAX, POWER_SUPPLY_PROP_TEMP_ALERT_MIN, POWER_SUPPLY_PROP_TEMP_ALERT_MAX, }; enum thunderstrike_led_state { THUNDERSTRIKE_LED_OFF = 1, THUNDERSTRIKE_LED_ON = 8, } __packed; static_assert(sizeof(enum thunderstrike_led_state) == 1); struct thunderstrike_hostcmd_battery { __le16 voltage_avg; u8 reserved_at_10; __le16 thermistor; __le16 voltage_min; __le16 voltage_boot; __le16 voltage_now; u8 capacity; } __packed; enum thunderstrike_charger_type { THUNDERSTRIKE_CHARGER_TYPE_NONE = 0, THUNDERSTRIKE_CHARGER_TYPE_TRICKLE, THUNDERSTRIKE_CHARGER_TYPE_NORMAL, } __packed; static_assert(sizeof(enum thunderstrike_charger_type) == 1); enum thunderstrike_charger_state { THUNDERSTRIKE_CHARGER_STATE_UNKNOWN = 0, THUNDERSTRIKE_CHARGER_STATE_DISABLED, THUNDERSTRIKE_CHARGER_STATE_CHARGING, THUNDERSTRIKE_CHARGER_STATE_FULL, THUNDERSTRIKE_CHARGER_STATE_FAILED = 8, } __packed; static_assert(sizeof(enum thunderstrike_charger_state) == 1); struct thunderstrike_hostcmd_charger { u8 connected; enum thunderstrike_charger_type type; enum thunderstrike_charger_state state; } __packed; struct thunderstrike_hostcmd_board_info { __le16 revision; __le16 serial[7]; } __packed; struct thunderstrike_hostcmd_haptics { u8 motor_left; u8 motor_right; } __packed; struct thunderstrike_hostcmd_resp_report { u8 report_id; /* THUNDERSTRIKE_HOSTCMD_RESP_REPORT_ID */ u8 cmd_id; u8 reserved_at_10; union { struct thunderstrike_hostcmd_board_info board_info; struct thunderstrike_hostcmd_haptics motors; __le16 fw_version; enum thunderstrike_led_state led_state; struct thunderstrike_hostcmd_battery battery; struct thunderstrike_hostcmd_charger charger; u8 payload[30]; } __packed; } __packed; static_assert(sizeof(struct thunderstrike_hostcmd_resp_report) == THUNDERSTRIKE_HOSTCMD_REPORT_SIZE); struct thunderstrike_hostcmd_req_report { u8 report_id; /* THUNDERSTRIKE_HOSTCMD_REQ_REPORT_ID */ u8 cmd_id; u8 reserved_at_10; union { struct __packed { u8 update; enum thunderstrike_led_state state; } led; struct __packed { u8 update; struct thunderstrike_hostcmd_haptics motors; } haptics; } __packed; u8 reserved_at_30[27]; } __packed; static_assert(sizeof(struct thunderstrike_hostcmd_req_report) == THUNDERSTRIKE_HOSTCMD_REPORT_SIZE); /* Common struct for shield accessories. */ struct shield_device { struct hid_device *hdev; struct power_supply_dev battery_dev; unsigned long initialized_flags; const char *codename; u16 fw_version; struct { u16 revision; char serial_number[15]; } board_info; }; /* * Non-trivial to uniquely identify Thunderstrike controllers at initialization * time. Use an ID allocator to help with this. */ static DEFINE_IDA(thunderstrike_ida); struct thunderstrike { struct shield_device base; int id; /* Sub-devices */ struct input_dev *haptics_dev; struct led_classdev led_dev; /* Resources */ void *req_report_dmabuf; unsigned long update_flags; struct thunderstrike_hostcmd_haptics haptics_val; spinlock_t haptics_update_lock; u8 led_state : 1; enum thunderstrike_led_state led_value; struct thunderstrike_psy_prop_values psy_stats; spinlock_t psy_stats_lock; struct timer_list psy_stats_timer; struct work_struct hostcmd_req_work; }; static inline void thunderstrike_hostcmd_req_report_init( struct thunderstrike_hostcmd_req_report *report, u8 cmd_id) { memset(report, 0, sizeof(*report)); report->report_id = THUNDERSTRIKE_HOSTCMD_REQ_REPORT_ID; report->cmd_id = cmd_id; } static inline void shield_strrev(char *dest, size_t len, u16 rev) { dest[0] = ('A' - 1) + (rev >> 8); snprintf(&dest[1], len - 1, "%02X", 0xff & rev); } static struct input_dev *shield_allocate_input_dev(struct hid_device *hdev, const char *name_suffix) { struct input_dev *idev; idev = input_allocate_device(); if (!idev) goto err_device; idev->id.bustype = hdev->bus; idev->id.vendor = hdev->vendor; idev->id.product = hdev->product; idev->id.version = hdev->version; idev->uniq = hdev->uniq; idev->name = devm_kasprintf(&hdev->dev, GFP_KERNEL, "%s %s", hdev->name, name_suffix); if (!idev->name) goto err_name; input_set_drvdata(idev, hdev); return idev; err_name: input_free_device(idev); err_device: return ERR_PTR(-ENOMEM); } static struct input_dev *shield_haptics_create( struct shield_device *dev, int (*play_effect)(struct input_dev *, void *, struct ff_effect *)) { struct input_dev *haptics; int ret; if (!IS_ENABLED(CONFIG_NVIDIA_SHIELD_FF)) return NULL; haptics = shield_allocate_input_dev(dev->hdev, "Haptics"); if (IS_ERR(haptics)) return haptics; input_set_capability(haptics, EV_FF, FF_RUMBLE); ret = input_ff_create_memless(haptics, NULL, play_effect); if (ret) goto err; ret = input_register_device(haptics); if (ret) goto err; return haptics; err: input_free_device(haptics); return ERR_PTR(ret); } static inline void thunderstrike_send_hostcmd_request(struct thunderstrike *ts) { struct thunderstrike_hostcmd_req_report *report = ts->req_report_dmabuf; struct shield_device *shield_dev = &ts->base; int ret; ret = hid_hw_raw_request(shield_dev->hdev, report->report_id, ts->req_report_dmabuf, THUNDERSTRIKE_HOSTCMD_REPORT_SIZE, HID_OUTPUT_REPORT, HID_REQ_SET_REPORT); if (ret < 0) { hid_err(shield_dev->hdev, "Failed to output Thunderstrike HOSTCMD request HID report due to %pe\n", ERR_PTR(ret)); } } static void thunderstrike_hostcmd_req_work_handler(struct work_struct *work) { struct thunderstrike *ts = container_of(work, struct thunderstrike, hostcmd_req_work); struct thunderstrike_hostcmd_req_report *report; unsigned long flags; report = ts->req_report_dmabuf; if (test_and_clear_bit(THUNDERSTRIKE_FW_VERSION_UPDATE, &ts->update_flags)) { thunderstrike_hostcmd_req_report_init( report, THUNDERSTRIKE_HOSTCMD_ID_FW_VERSION); thunderstrike_send_hostcmd_request(ts); } if (test_and_clear_bit(THUNDERSTRIKE_LED_UPDATE, &ts->update_flags)) { thunderstrike_hostcmd_req_report_init(report, THUNDERSTRIKE_HOSTCMD_ID_LED); report->led.update = 1; report->led.state = ts->led_value; thunderstrike_send_hostcmd_request(ts); } if (test_and_clear_bit(THUNDERSTRIKE_POWER_SUPPLY_STATS_UPDATE, &ts->update_flags)) { thunderstrike_hostcmd_req_report_init( report, THUNDERSTRIKE_HOSTCMD_ID_BATTERY); thunderstrike_send_hostcmd_request(ts); thunderstrike_hostcmd_req_report_init( report, THUNDERSTRIKE_HOSTCMD_ID_CHARGER); thunderstrike_send_hostcmd_request(ts); } if (test_and_clear_bit(THUNDERSTRIKE_BOARD_INFO_UPDATE, &ts->update_flags)) { thunderstrike_hostcmd_req_report_init( report, THUNDERSTRIKE_HOSTCMD_ID_BOARD_INFO); thunderstrike_send_hostcmd_request(ts); } if (test_and_clear_bit(THUNDERSTRIKE_HAPTICS_UPDATE, &ts->update_flags)) { thunderstrike_hostcmd_req_report_init( report, THUNDERSTRIKE_HOSTCMD_ID_HAPTICS); report->haptics.update = 1; spin_lock_irqsave(&ts->haptics_update_lock, flags); report->haptics.motors = ts->haptics_val; spin_unlock_irqrestore(&ts->haptics_update_lock, flags); thunderstrike_send_hostcmd_request(ts); } } static inline void thunderstrike_request_firmware_version(struct thunderstrike *ts) { set_bit(THUNDERSTRIKE_FW_VERSION_UPDATE, &ts->update_flags); schedule_work(&ts->hostcmd_req_work); } static inline void thunderstrike_request_board_info(struct thunderstrike *ts) { set_bit(THUNDERSTRIKE_BOARD_INFO_UPDATE, &ts->update_flags); schedule_work(&ts->hostcmd_req_work); } static inline int thunderstrike_update_haptics(struct thunderstrike *ts, struct thunderstrike_hostcmd_haptics *motors) { unsigned long flags; spin_lock_irqsave(&ts->haptics_update_lock, flags); ts->haptics_val = *motors; spin_unlock_irqrestore(&ts->haptics_update_lock, flags); set_bit(THUNDERSTRIKE_HAPTICS_UPDATE, &ts->update_flags); schedule_work(&ts->hostcmd_req_work); return 0; } static int thunderstrike_play_effect(struct input_dev *idev, void *data, struct ff_effect *effect) { struct hid_device *hdev = input_get_drvdata(idev); struct thunderstrike_hostcmd_haptics motors; struct shield_device *shield_dev; struct thunderstrike *ts; if (effect->type != FF_RUMBLE) return 0; shield_dev = hid_get_drvdata(hdev); ts = container_of(shield_dev, struct thunderstrike, base); /* Thunderstrike motor values range from 0 to 32 inclusively */ motors.motor_left = effect->u.rumble.strong_magnitude / 2047; motors.motor_right = effect->u.rumble.weak_magnitude / 2047; hid_dbg(hdev, "Thunderstrike FF_RUMBLE request, left: %u right: %u\n", motors.motor_left, motors.motor_right); return thunderstrike_update_haptics(ts, &motors); } static enum led_brightness thunderstrike_led_get_brightness(struct led_classdev *led) { struct hid_device *hdev = to_hid_device(led->dev->parent); struct shield_device *shield_dev = hid_get_drvdata(hdev); struct thunderstrike *ts; ts = container_of(shield_dev, struct thunderstrike, base); return ts->led_state; } static void thunderstrike_led_set_brightness(struct led_classdev *led, enum led_brightness value) { struct hid_device *hdev = to_hid_device(led->dev->parent); struct shield_device *shield_dev = hid_get_drvdata(hdev); struct thunderstrike *ts; ts = container_of(shield_dev, struct thunderstrike, base); switch (value) { case LED_OFF: ts->led_value = THUNDERSTRIKE_LED_OFF; break; default: ts->led_value = THUNDERSTRIKE_LED_ON; break; } set_bit(THUNDERSTRIKE_LED_UPDATE, &ts->update_flags); schedule_work(&ts->hostcmd_req_work); } static int thunderstrike_battery_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { struct shield_device *shield_dev = power_supply_get_drvdata(psy); struct thunderstrike_psy_prop_values prop_values; struct thunderstrike *ts; int ret = 0; ts = container_of(shield_dev, struct thunderstrike, base); spin_lock(&ts->psy_stats_lock); prop_values = ts->psy_stats; spin_unlock(&ts->psy_stats_lock); switch (psp) { case POWER_SUPPLY_PROP_STATUS: val->intval = prop_values.status; break; case POWER_SUPPLY_PROP_CHARGE_TYPE: val->intval = prop_values.charge_type; break; case POWER_SUPPLY_PROP_PRESENT: val->intval = 1; break; case POWER_SUPPLY_PROP_VOLTAGE_MIN: val->intval = prop_values.voltage_min; break; case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN: val->intval = 2900000; /* 2.9 V */ break; case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN: val->intval = 2200000; /* 2.2 V */ break; case POWER_SUPPLY_PROP_VOLTAGE_NOW: val->intval = prop_values.voltage_now; break; case POWER_SUPPLY_PROP_VOLTAGE_AVG: val->intval = prop_values.voltage_avg; break; case POWER_SUPPLY_PROP_VOLTAGE_BOOT: val->intval = prop_values.voltage_boot; break; case POWER_SUPPLY_PROP_CAPACITY: val->intval = prop_values.capacity; break; case POWER_SUPPLY_PROP_SCOPE: val->intval = POWER_SUPPLY_SCOPE_DEVICE; break; case POWER_SUPPLY_PROP_TEMP: val->intval = prop_values.temp; break; case POWER_SUPPLY_PROP_TEMP_MIN: val->intval = 0; /* 0 C */ break; case POWER_SUPPLY_PROP_TEMP_MAX: val->intval = 400; /* 40 C */ break; case POWER_SUPPLY_PROP_TEMP_ALERT_MIN: val->intval = 15; /* 1.5 C */ break; case POWER_SUPPLY_PROP_TEMP_ALERT_MAX: val->intval = 380; /* 38 C */ break; default: ret = -EINVAL; break; } return ret; } static inline void thunderstrike_request_psy_stats(struct thunderstrike *ts) { set_bit(THUNDERSTRIKE_POWER_SUPPLY_STATS_UPDATE, &ts->update_flags); schedule_work(&ts->hostcmd_req_work); } static void thunderstrike_psy_stats_timer_handler(struct timer_list *timer) { struct thunderstrike *ts = container_of(timer, struct thunderstrike, psy_stats_timer); thunderstrike_request_psy_stats(ts); /* Query battery statistics from device every five minutes */ mod_timer(timer, jiffies + 300 * HZ); } static void thunderstrike_parse_fw_version_payload(struct shield_device *shield_dev, __le16 fw_version) { shield_dev->fw_version = le16_to_cpu(fw_version); set_bit(SHIELD_FW_VERSION_INITIALIZED, &shield_dev->initialized_flags); hid_dbg(shield_dev->hdev, "Thunderstrike firmware version 0x%04X\n", shield_dev->fw_version); } static void thunderstrike_parse_board_info_payload(struct shield_device *shield_dev, struct thunderstrike_hostcmd_board_info *board_info) { char board_revision_str[4]; int i; shield_dev->board_info.revision = le16_to_cpu(board_info->revision); for (i = 0; i < 7; ++i) { u16 val = le16_to_cpu(board_info->serial[i]); shield_dev->board_info.serial_number[2 * i] = val & 0xFF; shield_dev->board_info.serial_number[2 * i + 1] = val >> 8; } shield_dev->board_info.serial_number[14] = '\0'; set_bit(SHIELD_BOARD_INFO_INITIALIZED, &shield_dev->initialized_flags); shield_strrev(board_revision_str, 4, shield_dev->board_info.revision); hid_dbg(shield_dev->hdev, "Thunderstrike BOARD_REVISION_%s (0x%04X) S/N: %s\n", board_revision_str, shield_dev->board_info.revision, shield_dev->board_info.serial_number); } static inline void thunderstrike_parse_haptics_payload(struct shield_device *shield_dev, struct thunderstrike_hostcmd_haptics *haptics) { hid_dbg(shield_dev->hdev, "Thunderstrike haptics HOSTCMD response, left: %u right: %u\n", haptics->motor_left, haptics->motor_right); } static void thunderstrike_parse_led_payload(struct shield_device *shield_dev, enum thunderstrike_led_state led_state) { struct thunderstrike *ts = container_of(shield_dev, struct thunderstrike, base); switch (led_state) { case THUNDERSTRIKE_LED_OFF: ts->led_state = 0; break; case THUNDERSTRIKE_LED_ON: ts->led_state = 1; break; } hid_dbg(shield_dev->hdev, "Thunderstrike led HOSTCMD response, 0x%02X\n", led_state); } static void thunderstrike_parse_battery_payload( struct shield_device *shield_dev, struct thunderstrike_hostcmd_battery *battery) { struct thunderstrike *ts = container_of(shield_dev, struct thunderstrike, base); u16 hostcmd_voltage_boot = le16_to_cpu(battery->voltage_boot); u16 hostcmd_voltage_avg = le16_to_cpu(battery->voltage_avg); u16 hostcmd_voltage_min = le16_to_cpu(battery->voltage_min); u16 hostcmd_voltage_now = le16_to_cpu(battery->voltage_now); u16 hostcmd_thermistor = le16_to_cpu(battery->thermistor); int voltage_boot, voltage_avg, voltage_min, voltage_now; struct hid_device *hdev = shield_dev->hdev; u8 capacity = battery->capacity; int temp; /* Convert thunderstrike device values to µV and tenths of degree Celsius */ voltage_boot = hostcmd_voltage_boot * 1000; voltage_avg = hostcmd_voltage_avg * 1000; voltage_min = hostcmd_voltage_min * 1000; voltage_now = hostcmd_voltage_now * 1000; temp = (1378 - (int)hostcmd_thermistor) * 10 / 19; /* Copy converted values */ spin_lock(&ts->psy_stats_lock); ts->psy_stats.voltage_boot = voltage_boot; ts->psy_stats.voltage_avg = voltage_avg; ts->psy_stats.voltage_min = voltage_min; ts->psy_stats.voltage_now = voltage_now; ts->psy_stats.capacity = capacity; ts->psy_stats.temp = temp; spin_unlock(&ts->psy_stats_lock); set_bit(SHIELD_BATTERY_STATS_INITIALIZED, &shield_dev->initialized_flags); hid_dbg(hdev, "Thunderstrike battery HOSTCMD response, voltage_avg: %u voltage_now: %u\n", hostcmd_voltage_avg, hostcmd_voltage_now); hid_dbg(hdev, "Thunderstrike battery HOSTCMD response, voltage_boot: %u voltage_min: %u\n", hostcmd_voltage_boot, hostcmd_voltage_min); hid_dbg(hdev, "Thunderstrike battery HOSTCMD response, thermistor: %u\n", hostcmd_thermistor); hid_dbg(hdev, "Thunderstrike battery HOSTCMD response, capacity: %u%%\n", capacity); } static void thunderstrike_parse_charger_payload( struct shield_device *shield_dev, struct thunderstrike_hostcmd_charger *charger) { struct thunderstrike *ts = container_of(shield_dev, struct thunderstrike, base); int charge_type = POWER_SUPPLY_CHARGE_TYPE_UNKNOWN; struct hid_device *hdev = shield_dev->hdev; int status = POWER_SUPPLY_STATUS_UNKNOWN; switch (charger->type) { case THUNDERSTRIKE_CHARGER_TYPE_NONE: charge_type = POWER_SUPPLY_CHARGE_TYPE_NONE; break; case THUNDERSTRIKE_CHARGER_TYPE_TRICKLE: charge_type = POWER_SUPPLY_CHARGE_TYPE_TRICKLE; break; case THUNDERSTRIKE_CHARGER_TYPE_NORMAL: charge_type = POWER_SUPPLY_CHARGE_TYPE_STANDARD; break; default: hid_warn(hdev, "Unhandled Thunderstrike charger HOSTCMD type, %u\n", charger->type); break; } switch (charger->state) { case THUNDERSTRIKE_CHARGER_STATE_UNKNOWN: status = POWER_SUPPLY_STATUS_UNKNOWN; break; case THUNDERSTRIKE_CHARGER_STATE_DISABLED: /* Indicates charger is disconnected */ break; case THUNDERSTRIKE_CHARGER_STATE_CHARGING: status = POWER_SUPPLY_STATUS_CHARGING; break; case THUNDERSTRIKE_CHARGER_STATE_FULL: status = POWER_SUPPLY_STATUS_FULL; break; case THUNDERSTRIKE_CHARGER_STATE_FAILED: status = POWER_SUPPLY_STATUS_NOT_CHARGING; hid_err(hdev, "Thunderstrike device failed to charge\n"); break; default: hid_warn(hdev, "Unhandled Thunderstrike charger HOSTCMD state, %u\n", charger->state); break; } if (!charger->connected) status = POWER_SUPPLY_STATUS_DISCHARGING; spin_lock(&ts->psy_stats_lock); ts->psy_stats.charge_type = charge_type; ts->psy_stats.status = status; spin_unlock(&ts->psy_stats_lock); set_bit(SHIELD_CHARGER_STATE_INITIALIZED, &shield_dev->initialized_flags); hid_dbg(hdev, "Thunderstrike charger HOSTCMD response, connected: %u, type: %u, state: %u\n", charger->connected, charger->type, charger->state); } static inline void thunderstrike_device_init_info(struct shield_device *shield_dev) { struct thunderstrike *ts = container_of(shield_dev, struct thunderstrike, base); if (!test_bit(SHIELD_FW_VERSION_INITIALIZED, &shield_dev->initialized_flags)) thunderstrike_request_firmware_version(ts); if (!test_bit(SHIELD_BOARD_INFO_INITIALIZED, &shield_dev->initialized_flags)) thunderstrike_request_board_info(ts); if (!test_bit(SHIELD_BATTERY_STATS_INITIALIZED, &shield_dev->initialized_flags) || !test_bit(SHIELD_CHARGER_STATE_INITIALIZED, &shield_dev->initialized_flags)) thunderstrike_psy_stats_timer_handler(&ts->psy_stats_timer); } static int thunderstrike_parse_report(struct shield_device *shield_dev, struct hid_report *report, u8 *data, int size) { struct thunderstrike_hostcmd_resp_report *hostcmd_resp_report; struct hid_device *hdev = shield_dev->hdev; switch (report->id) { case THUNDERSTRIKE_HOSTCMD_RESP_REPORT_ID: if (size != THUNDERSTRIKE_HOSTCMD_REPORT_SIZE) { hid_err(hdev, "Encountered Thunderstrike HOSTCMD HID report with unexpected size %d\n", size); return -EINVAL; } hostcmd_resp_report = (struct thunderstrike_hostcmd_resp_report *)data; switch (hostcmd_resp_report->cmd_id) { case THUNDERSTRIKE_HOSTCMD_ID_FW_VERSION: thunderstrike_parse_fw_version_payload( shield_dev, hostcmd_resp_report->fw_version); break; case THUNDERSTRIKE_HOSTCMD_ID_LED: thunderstrike_parse_led_payload(shield_dev, hostcmd_resp_report->led_state); break; case THUNDERSTRIKE_HOSTCMD_ID_BATTERY: thunderstrike_parse_battery_payload(shield_dev, &hostcmd_resp_report->battery); break; case THUNDERSTRIKE_HOSTCMD_ID_BOARD_INFO: thunderstrike_parse_board_info_payload( shield_dev, &hostcmd_resp_report->board_info); break; case THUNDERSTRIKE_HOSTCMD_ID_HAPTICS: thunderstrike_parse_haptics_payload( shield_dev, &hostcmd_resp_report->motors); break; case THUNDERSTRIKE_HOSTCMD_ID_USB_INIT: /* May block HOSTCMD requests till received initially */ thunderstrike_device_init_info(shield_dev); break; case THUNDERSTRIKE_HOSTCMD_ID_CHARGER: /* May block HOSTCMD requests till received initially */ thunderstrike_device_init_info(shield_dev); thunderstrike_parse_charger_payload( shield_dev, &hostcmd_resp_report->charger); break; default: hid_warn(hdev, "Unhandled Thunderstrike HOSTCMD id %d\n", hostcmd_resp_report->cmd_id); return -ENOENT; } break; default: return 0; } return 0; } static inline int thunderstrike_led_create(struct thunderstrike *ts) { struct led_classdev *led = &ts->led_dev; led->name = devm_kasprintf(&ts->base.hdev->dev, GFP_KERNEL, "thunderstrike%d:blue:led", ts->id); if (!led->name) return -ENOMEM; led->max_brightness = 1; led->flags = LED_CORE_SUSPENDRESUME | LED_RETAIN_AT_SHUTDOWN; led->brightness_get = &thunderstrike_led_get_brightness; led->brightness_set = &thunderstrike_led_set_brightness; return led_classdev_register(&ts->base.hdev->dev, led); } static inline int thunderstrike_psy_create(struct shield_device *shield_dev) { struct thunderstrike *ts = container_of(shield_dev, struct thunderstrike, base); struct power_supply_config psy_cfg = { .drv_data = shield_dev, }; struct hid_device *hdev = shield_dev->hdev; int ret; /* * Set an initial capacity and temperature value to avoid prematurely * triggering alerts. Will be replaced by values queried from initial * HOSTCMD requests. */ ts->psy_stats.capacity = 100; ts->psy_stats.temp = 182; shield_dev->battery_dev.desc.properties = thunderstrike_battery_props; shield_dev->battery_dev.desc.num_properties = ARRAY_SIZE(thunderstrike_battery_props); shield_dev->battery_dev.desc.get_property = thunderstrike_battery_get_property; shield_dev->battery_dev.desc.type = POWER_SUPPLY_TYPE_BATTERY; shield_dev->battery_dev.desc.name = devm_kasprintf(&ts->base.hdev->dev, GFP_KERNEL, "thunderstrike_%d", ts->id); if (!shield_dev->battery_dev.desc.name) return -ENOMEM; shield_dev->battery_dev.psy = power_supply_register( &hdev->dev, &shield_dev->battery_dev.desc, &psy_cfg); if (IS_ERR(shield_dev->battery_dev.psy)) { hid_err(hdev, "Failed to register Thunderstrike battery device\n"); return PTR_ERR(shield_dev->battery_dev.psy); } ret = power_supply_powers(shield_dev->battery_dev.psy, &hdev->dev); if (ret) { hid_err(hdev, "Failed to associate battery device to Thunderstrike\n"); goto err; } return 0; err: power_supply_unregister(shield_dev->battery_dev.psy); return ret; } static struct shield_device *thunderstrike_create(struct hid_device *hdev) { struct shield_device *shield_dev; struct thunderstrike *ts; int ret; ts = devm_kzalloc(&hdev->dev, sizeof(*ts), GFP_KERNEL); if (!ts) return ERR_PTR(-ENOMEM); ts->req_report_dmabuf = devm_kzalloc( &hdev->dev, THUNDERSTRIKE_HOSTCMD_REPORT_SIZE, GFP_KERNEL); if (!ts->req_report_dmabuf) return ERR_PTR(-ENOMEM); shield_dev = &ts->base; shield_dev->hdev = hdev; shield_dev->codename = "Thunderstrike"; spin_lock_init(&ts->haptics_update_lock); spin_lock_init(&ts->psy_stats_lock); INIT_WORK(&ts->hostcmd_req_work, thunderstrike_hostcmd_req_work_handler); hid_set_drvdata(hdev, shield_dev); ts->id = ida_alloc(&thunderstrike_ida, GFP_KERNEL); if (ts->id < 0) return ERR_PTR(ts->id); ts->haptics_dev = shield_haptics_create(shield_dev, thunderstrike_play_effect); if (IS_ERR(ts->haptics_dev)) { hid_err(hdev, "Failed to create Thunderstrike haptics instance\n"); ret = PTR_ERR(ts->haptics_dev); goto err_id; } ret = thunderstrike_psy_create(shield_dev); if (ret) { hid_err(hdev, "Failed to create Thunderstrike power supply instance\n"); goto err_haptics; } ret = thunderstrike_led_create(ts); if (ret) { hid_err(hdev, "Failed to create Thunderstrike LED instance\n"); goto err_psy; } timer_setup(&ts->psy_stats_timer, thunderstrike_psy_stats_timer_handler, 0); hid_info(hdev, "Registered Thunderstrike controller\n"); return shield_dev; err_psy: power_supply_unregister(shield_dev->battery_dev.psy); err_haptics: if (ts->haptics_dev) input_unregister_device(ts->haptics_dev); err_id: ida_free(&thunderstrike_ida, ts->id); return ERR_PTR(ret); } static void thunderstrike_destroy(struct thunderstrike *ts) { led_classdev_unregister(&ts->led_dev); power_supply_unregister(ts->base.battery_dev.psy); if (ts->haptics_dev) input_unregister_device(ts->haptics_dev); ida_free(&thunderstrike_ida, ts->id); } static int android_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { if ((usage->hid & HID_USAGE_PAGE) != HID_UP_CONSUMER) return 0; switch (usage->hid & HID_USAGE) { case HID_USAGE_ANDROID_PLAYPAUSE_BTN: android_map_key(KEY_PLAYPAUSE); break; case HID_USAGE_ANDROID_VOLUMEUP_BTN: android_map_key(KEY_VOLUMEUP); break; case HID_USAGE_ANDROID_VOLUMEDOWN_BTN: android_map_key(KEY_VOLUMEDOWN); break; case HID_USAGE_ANDROID_SEARCH_BTN: android_map_key(BTN_Z); break; case HID_USAGE_ANDROID_HOME_BTN: android_map_key(BTN_MODE); break; case HID_USAGE_ANDROID_BACK_BTN: android_map_key(BTN_SELECT); break; default: return 0; } return 1; } static ssize_t firmware_version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hid_device *hdev = to_hid_device(dev); struct shield_device *shield_dev; int ret; shield_dev = hid_get_drvdata(hdev); if (test_bit(SHIELD_FW_VERSION_INITIALIZED, &shield_dev->initialized_flags)) ret = sysfs_emit(buf, "0x%04X\n", shield_dev->fw_version); else ret = sysfs_emit(buf, NOT_INIT_STR "\n"); return ret; } static DEVICE_ATTR_RO(firmware_version); static ssize_t hardware_version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hid_device *hdev = to_hid_device(dev); struct shield_device *shield_dev; char board_revision_str[4]; int ret; shield_dev = hid_get_drvdata(hdev); if (test_bit(SHIELD_BOARD_INFO_INITIALIZED, &shield_dev->initialized_flags)) { shield_strrev(board_revision_str, 4, shield_dev->board_info.revision); ret = sysfs_emit(buf, "%s BOARD_REVISION_%s (0x%04X)\n", shield_dev->codename, board_revision_str, shield_dev->board_info.revision); } else ret = sysfs_emit(buf, NOT_INIT_STR "\n"); return ret; } static DEVICE_ATTR_RO(hardware_version); static ssize_t serial_number_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hid_device *hdev = to_hid_device(dev); struct shield_device *shield_dev; int ret; shield_dev = hid_get_drvdata(hdev); if (test_bit(SHIELD_BOARD_INFO_INITIALIZED, &shield_dev->initialized_flags)) ret = sysfs_emit(buf, "%s\n", shield_dev->board_info.serial_number); else ret = sysfs_emit(buf, NOT_INIT_STR "\n"); return ret; } static DEVICE_ATTR_RO(serial_number); static struct attribute *shield_device_attrs[] = { &dev_attr_firmware_version.attr, &dev_attr_hardware_version.attr, &dev_attr_serial_number.attr, NULL, }; ATTRIBUTE_GROUPS(shield_device); static int shield_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size) { struct shield_device *dev = hid_get_drvdata(hdev); return thunderstrike_parse_report(dev, report, data, size); } static int shield_probe(struct hid_device *hdev, const struct hid_device_id *id) { struct shield_device *shield_dev = NULL; struct thunderstrike *ts; int ret; ret = hid_parse(hdev); if (ret) { hid_err(hdev, "Parse failed\n"); return ret; } switch (id->product) { case USB_DEVICE_ID_NVIDIA_THUNDERSTRIKE_CONTROLLER: shield_dev = thunderstrike_create(hdev); break; } if (unlikely(!shield_dev)) { hid_err(hdev, "Failed to identify SHIELD device\n"); return -ENODEV; } if (IS_ERR(shield_dev)) { hid_err(hdev, "Failed to create SHIELD device\n"); return PTR_ERR(shield_dev); } ts = container_of(shield_dev, struct thunderstrike, base); ret = hid_hw_start(hdev, HID_CONNECT_HIDINPUT); if (ret) { hid_err(hdev, "Failed to start HID device\n"); goto err_ts_create; } ret = hid_hw_open(hdev); if (ret) { hid_err(hdev, "Failed to open HID device\n"); goto err_stop; } thunderstrike_device_init_info(shield_dev); return ret; err_stop: hid_hw_stop(hdev); err_ts_create: thunderstrike_destroy(ts); return ret; } static void shield_remove(struct hid_device *hdev) { struct shield_device *dev = hid_get_drvdata(hdev); struct thunderstrike *ts; ts = container_of(dev, struct thunderstrike, base); hid_hw_close(hdev); thunderstrike_destroy(ts); timer_delete_sync(&ts->psy_stats_timer); cancel_work_sync(&ts->hostcmd_req_work); hid_hw_stop(hdev); } static const struct hid_device_id shield_devices[] = { { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NVIDIA, USB_DEVICE_ID_NVIDIA_THUNDERSTRIKE_CONTROLLER) }, { HID_USB_DEVICE(USB_VENDOR_ID_NVIDIA, USB_DEVICE_ID_NVIDIA_THUNDERSTRIKE_CONTROLLER) }, { } }; MODULE_DEVICE_TABLE(hid, shield_devices); static struct hid_driver shield_driver = { .name = "shield", .id_table = shield_devices, .input_mapping = android_input_mapping, .probe = shield_probe, .remove = shield_remove, .raw_event = shield_raw_event, .driver = { .dev_groups = shield_device_groups, }, }; module_hid_driver(shield_driver); MODULE_AUTHOR("Rahul Rameshbabu <rrameshbabu@nvidia.com>"); MODULE_DESCRIPTION("HID Driver for NVIDIA SHIELD peripherals."); MODULE_LICENSE("GPL");
98 1 1 97 98 98 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 // SPDX-License-Identifier: GPL-2.0 #include <linux/memblock.h> #include <linux/mmdebug.h> #include <linux/export.h> #include <linux/mm.h> #include <asm/page.h> #include <linux/vmalloc.h> #include "physaddr.h" #ifdef CONFIG_X86_64 #ifdef CONFIG_DEBUG_VIRTUAL unsigned long __phys_addr(unsigned long x) { unsigned long y = x - __START_KERNEL_map; /* use the carry flag to determine if x was < __START_KERNEL_map */ if (unlikely(x > y)) { x = y + phys_base; VIRTUAL_BUG_ON(y >= KERNEL_IMAGE_SIZE); } else { x = y + (__START_KERNEL_map - PAGE_OFFSET); /* carry flag will be set if starting x was >= PAGE_OFFSET */ VIRTUAL_BUG_ON((x > y) || !phys_addr_valid(x)); } return x; } EXPORT_SYMBOL(__phys_addr); unsigned long __phys_addr_symbol(unsigned long x) { unsigned long y = x - __START_KERNEL_map; /* only check upper bounds since lower bounds will trigger carry */ VIRTUAL_BUG_ON(y >= KERNEL_IMAGE_SIZE); return y + phys_base; } EXPORT_SYMBOL(__phys_addr_symbol); #endif bool __virt_addr_valid(unsigned long x) { unsigned long y = x - __START_KERNEL_map; /* use the carry flag to determine if x was < __START_KERNEL_map */ if (unlikely(x > y)) { x = y + phys_base; if (y >= KERNEL_IMAGE_SIZE) return false; } else { x = y + (__START_KERNEL_map - PAGE_OFFSET); /* carry flag will be set if starting x was >= PAGE_OFFSET */ if ((x > y) || !phys_addr_valid(x)) return false; } return pfn_valid(x >> PAGE_SHIFT); } EXPORT_SYMBOL(__virt_addr_valid); #else #ifdef CONFIG_DEBUG_VIRTUAL unsigned long __phys_addr(unsigned long x) { unsigned long phys_addr = x - PAGE_OFFSET; /* VMALLOC_* aren't constants */ VIRTUAL_BUG_ON(x < PAGE_OFFSET); VIRTUAL_BUG_ON(__vmalloc_start_set && is_vmalloc_addr((void *) x)); /* max_low_pfn is set early, but not _that_ early */ if (max_low_pfn) { VIRTUAL_BUG_ON((phys_addr >> PAGE_SHIFT) > max_low_pfn); BUG_ON(slow_virt_to_phys((void *)x) != phys_addr); } return phys_addr; } EXPORT_SYMBOL(__phys_addr); #endif bool __virt_addr_valid(unsigned long x) { if (x < PAGE_OFFSET) return false; if (__vmalloc_start_set && is_vmalloc_addr((void *) x)) return false; if (x >= FIXADDR_START) return false; return pfn_valid((x - PAGE_OFFSET) >> PAGE_SHIFT); } EXPORT_SYMBOL(__virt_addr_valid); #endif /* CONFIG_X86_64 */
81 37 37 37 37 71 71 17 16 16 16 16 71 77 42 14 44 16 16 16 43 22 22 22 22 22 22 22 2 2 2 43 16 16 29 4 1 4 4 18 15 13 6 2 6 7 1 7 3 1 1 1 1 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 // SPDX-License-Identifier: GPL-2.0-or-later /* * OSS compatible sequencer driver * * synth device handlers * * Copyright (C) 1998,99 Takashi Iwai <tiwai@suse.de> */ #include "seq_oss_synth.h" #include "seq_oss_midi.h" #include "../seq_lock.h" #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/nospec.h> /* * constants */ #define SNDRV_SEQ_OSS_MAX_SYNTH_NAME 30 #define MAX_SYSEX_BUFLEN 128 /* * definition of synth info records */ /* synth info */ struct seq_oss_synth { int seq_device; /* for synth_info */ int synth_type; int synth_subtype; int nr_voices; char name[SNDRV_SEQ_OSS_MAX_SYNTH_NAME]; struct snd_seq_oss_callback oper; int opened; void *private_data; snd_use_lock_t use_lock; }; /* * device table */ static int max_synth_devs; static struct seq_oss_synth *synth_devs[SNDRV_SEQ_OSS_MAX_SYNTH_DEVS]; static struct seq_oss_synth midi_synth_dev = { .seq_device = -1, .synth_type = SYNTH_TYPE_MIDI, .synth_subtype = 0, .nr_voices = 16, .name = "MIDI", }; static DEFINE_SPINLOCK(register_lock); /* * prototypes */ static struct seq_oss_synth *get_synthdev(struct seq_oss_devinfo *dp, int dev); static void reset_channels(struct seq_oss_synthinfo *info); /* * global initialization */ void __init snd_seq_oss_synth_init(void) { snd_use_lock_init(&midi_synth_dev.use_lock); } /* * registration of the synth device */ int snd_seq_oss_synth_probe(struct device *_dev) { struct snd_seq_device *dev = to_seq_dev(_dev); int i; struct seq_oss_synth *rec; struct snd_seq_oss_reg *reg = SNDRV_SEQ_DEVICE_ARGPTR(dev); unsigned long flags; rec = kzalloc(sizeof(*rec), GFP_KERNEL); if (!rec) return -ENOMEM; rec->seq_device = -1; rec->synth_type = reg->type; rec->synth_subtype = reg->subtype; rec->nr_voices = reg->nvoices; rec->oper = reg->oper; rec->private_data = reg->private_data; rec->opened = 0; snd_use_lock_init(&rec->use_lock); /* copy and truncate the name of synth device */ strscpy(rec->name, dev->name, sizeof(rec->name)); /* registration */ spin_lock_irqsave(&register_lock, flags); for (i = 0; i < max_synth_devs; i++) { if (synth_devs[i] == NULL) break; } if (i >= max_synth_devs) { if (max_synth_devs >= SNDRV_SEQ_OSS_MAX_SYNTH_DEVS) { spin_unlock_irqrestore(&register_lock, flags); pr_err("ALSA: seq_oss: no more synth slot\n"); kfree(rec); return -ENOMEM; } max_synth_devs++; } rec->seq_device = i; synth_devs[i] = rec; spin_unlock_irqrestore(&register_lock, flags); dev->driver_data = rec; #ifdef SNDRV_OSS_INFO_DEV_SYNTH if (i < SNDRV_CARDS) snd_oss_info_register(SNDRV_OSS_INFO_DEV_SYNTH, i, rec->name); #endif return 0; } int snd_seq_oss_synth_remove(struct device *_dev) { struct snd_seq_device *dev = to_seq_dev(_dev); int index; struct seq_oss_synth *rec = dev->driver_data; unsigned long flags; spin_lock_irqsave(&register_lock, flags); for (index = 0; index < max_synth_devs; index++) { if (synth_devs[index] == rec) break; } if (index >= max_synth_devs) { spin_unlock_irqrestore(&register_lock, flags); pr_err("ALSA: seq_oss: can't unregister synth\n"); return -EINVAL; } synth_devs[index] = NULL; if (index == max_synth_devs - 1) { for (index--; index >= 0; index--) { if (synth_devs[index]) break; } max_synth_devs = index + 1; } spin_unlock_irqrestore(&register_lock, flags); #ifdef SNDRV_OSS_INFO_DEV_SYNTH if (rec->seq_device < SNDRV_CARDS) snd_oss_info_unregister(SNDRV_OSS_INFO_DEV_SYNTH, rec->seq_device); #endif snd_use_lock_sync(&rec->use_lock); kfree(rec); return 0; } /* */ static struct seq_oss_synth * get_sdev(int dev) { struct seq_oss_synth *rec; unsigned long flags; spin_lock_irqsave(&register_lock, flags); rec = synth_devs[dev]; if (rec) snd_use_lock_use(&rec->use_lock); spin_unlock_irqrestore(&register_lock, flags); return rec; } /* * set up synth tables */ void snd_seq_oss_synth_setup(struct seq_oss_devinfo *dp) { int i; struct seq_oss_synth *rec; struct seq_oss_synthinfo *info; dp->max_synthdev = max_synth_devs; dp->synth_opened = 0; memset(dp->synths, 0, sizeof(dp->synths)); for (i = 0; i < dp->max_synthdev; i++) { rec = get_sdev(i); if (rec == NULL) continue; if (rec->oper.open == NULL || rec->oper.close == NULL) { snd_use_lock_free(&rec->use_lock); continue; } info = &dp->synths[i]; info->arg.app_index = dp->port; info->arg.file_mode = dp->file_mode; info->arg.seq_mode = dp->seq_mode; if (dp->seq_mode == SNDRV_SEQ_OSS_MODE_SYNTH) info->arg.event_passing = SNDRV_SEQ_OSS_PROCESS_EVENTS; else info->arg.event_passing = SNDRV_SEQ_OSS_PASS_EVENTS; info->opened = 0; if (!try_module_get(rec->oper.owner)) { snd_use_lock_free(&rec->use_lock); continue; } if (rec->oper.open(&info->arg, rec->private_data) < 0) { module_put(rec->oper.owner); snd_use_lock_free(&rec->use_lock); continue; } info->nr_voices = rec->nr_voices; if (info->nr_voices > 0) { info->ch = kcalloc(info->nr_voices, sizeof(struct seq_oss_chinfo), GFP_KERNEL); if (!info->ch) { rec->oper.close(&info->arg); module_put(rec->oper.owner); snd_use_lock_free(&rec->use_lock); continue; } reset_channels(info); } info->opened++; rec->opened++; dp->synth_opened++; snd_use_lock_free(&rec->use_lock); } } /* * set up synth tables for MIDI emulation - /dev/music mode only */ void snd_seq_oss_synth_setup_midi(struct seq_oss_devinfo *dp) { int i; if (dp->max_synthdev >= SNDRV_SEQ_OSS_MAX_SYNTH_DEVS) return; for (i = 0; i < dp->max_mididev; i++) { struct seq_oss_synthinfo *info; info = &dp->synths[dp->max_synthdev]; if (snd_seq_oss_midi_open(dp, i, dp->file_mode) < 0) continue; info->arg.app_index = dp->port; info->arg.file_mode = dp->file_mode; info->arg.seq_mode = dp->seq_mode; info->arg.private_data = info; info->is_midi = 1; info->midi_mapped = i; info->arg.event_passing = SNDRV_SEQ_OSS_PASS_EVENTS; snd_seq_oss_midi_get_addr(dp, i, &info->arg.addr); info->opened = 1; midi_synth_dev.opened++; dp->max_synthdev++; if (dp->max_synthdev >= SNDRV_SEQ_OSS_MAX_SYNTH_DEVS) break; } } /* * clean up synth tables */ void snd_seq_oss_synth_cleanup(struct seq_oss_devinfo *dp) { int i; struct seq_oss_synth *rec; struct seq_oss_synthinfo *info; if (snd_BUG_ON(dp->max_synthdev > SNDRV_SEQ_OSS_MAX_SYNTH_DEVS)) return; for (i = 0; i < dp->max_synthdev; i++) { info = &dp->synths[i]; if (! info->opened) continue; if (info->is_midi) { if (midi_synth_dev.opened > 0) { snd_seq_oss_midi_close(dp, info->midi_mapped); midi_synth_dev.opened--; } } else { rec = get_sdev(i); if (rec == NULL) continue; if (rec->opened > 0) { rec->oper.close(&info->arg); module_put(rec->oper.owner); rec->opened = 0; } snd_use_lock_free(&rec->use_lock); } kfree(info->ch); info->ch = NULL; } dp->synth_opened = 0; dp->max_synthdev = 0; } static struct seq_oss_synthinfo * get_synthinfo_nospec(struct seq_oss_devinfo *dp, int dev) { if (dev < 0 || dev >= dp->max_synthdev) return NULL; dev = array_index_nospec(dev, SNDRV_SEQ_OSS_MAX_SYNTH_DEVS); return &dp->synths[dev]; } /* * return synth device information pointer */ static struct seq_oss_synth * get_synthdev(struct seq_oss_devinfo *dp, int dev) { struct seq_oss_synth *rec; struct seq_oss_synthinfo *info = get_synthinfo_nospec(dp, dev); if (!info) return NULL; if (!info->opened) return NULL; if (info->is_midi) { rec = &midi_synth_dev; snd_use_lock_use(&rec->use_lock); } else { rec = get_sdev(dev); if (!rec) return NULL; } if (! rec->opened) { snd_use_lock_free(&rec->use_lock); return NULL; } return rec; } /* * reset note and velocity on each channel. */ static void reset_channels(struct seq_oss_synthinfo *info) { int i; if (info->ch == NULL || ! info->nr_voices) return; for (i = 0; i < info->nr_voices; i++) { info->ch[i].note = -1; info->ch[i].vel = 0; } } /* * reset synth device: * call reset callback. if no callback is defined, send a heartbeat * event to the corresponding port. */ void snd_seq_oss_synth_reset(struct seq_oss_devinfo *dp, int dev) { struct seq_oss_synth *rec; struct seq_oss_synthinfo *info; info = get_synthinfo_nospec(dp, dev); if (!info || !info->opened) return; reset_channels(info); if (info->is_midi) { if (midi_synth_dev.opened <= 0) return; snd_seq_oss_midi_reset(dp, info->midi_mapped); /* reopen the device */ snd_seq_oss_midi_close(dp, dev); if (snd_seq_oss_midi_open(dp, info->midi_mapped, dp->file_mode) < 0) { midi_synth_dev.opened--; info->opened = 0; kfree(info->ch); info->ch = NULL; } return; } rec = get_sdev(dev); if (rec == NULL) return; if (rec->oper.reset) { rec->oper.reset(&info->arg); } else { struct snd_seq_event ev; memset(&ev, 0, sizeof(ev)); snd_seq_oss_fill_addr(dp, &ev, info->arg.addr.client, info->arg.addr.port); ev.type = SNDRV_SEQ_EVENT_RESET; snd_seq_oss_dispatch(dp, &ev, 0, 0); } snd_use_lock_free(&rec->use_lock); } /* * load a patch record: * call load_patch callback function */ int snd_seq_oss_synth_load_patch(struct seq_oss_devinfo *dp, int dev, int fmt, const char __user *buf, int p, int c) { struct seq_oss_synth *rec; struct seq_oss_synthinfo *info; int rc; info = get_synthinfo_nospec(dp, dev); if (!info) return -ENXIO; if (info->is_midi) return 0; rec = get_synthdev(dp, dev); if (!rec) return -ENXIO; if (rec->oper.load_patch == NULL) rc = -ENXIO; else rc = rec->oper.load_patch(&info->arg, fmt, buf, p, c); snd_use_lock_free(&rec->use_lock); return rc; } /* * check if the device is valid synth device and return the synth info */ struct seq_oss_synthinfo * snd_seq_oss_synth_info(struct seq_oss_devinfo *dp, int dev) { struct seq_oss_synth *rec; rec = get_synthdev(dp, dev); if (rec) { snd_use_lock_free(&rec->use_lock); return get_synthinfo_nospec(dp, dev); } return NULL; } /* * receive OSS 6 byte sysex packet: * the event is filled and prepared for sending immediately * (i.e. sysex messages are fragmented) */ int snd_seq_oss_synth_sysex(struct seq_oss_devinfo *dp, int dev, unsigned char *buf, struct snd_seq_event *ev) { unsigned char *p; int len = 6; p = memchr(buf, 0xff, 6); if (p) len = p - buf + 1; /* copy the data to event record and send it */ if (snd_seq_oss_synth_addr(dp, dev, ev)) return -EINVAL; ev->flags = SNDRV_SEQ_EVENT_LENGTH_VARIABLE; ev->data.ext.len = len; ev->data.ext.ptr = buf; return 0; } /* * fill the event source/destination addresses */ int snd_seq_oss_synth_addr(struct seq_oss_devinfo *dp, int dev, struct snd_seq_event *ev) { struct seq_oss_synthinfo *info = snd_seq_oss_synth_info(dp, dev); if (!info) return -EINVAL; snd_seq_oss_fill_addr(dp, ev, info->arg.addr.client, info->arg.addr.port); return 0; } /* * OSS compatible ioctl */ int snd_seq_oss_synth_ioctl(struct seq_oss_devinfo *dp, int dev, unsigned int cmd, unsigned long addr) { struct seq_oss_synth *rec; struct seq_oss_synthinfo *info; int rc; info = get_synthinfo_nospec(dp, dev); if (!info || info->is_midi) return -ENXIO; rec = get_synthdev(dp, dev); if (!rec) return -ENXIO; if (rec->oper.ioctl == NULL) rc = -ENXIO; else rc = rec->oper.ioctl(&info->arg, cmd, addr); snd_use_lock_free(&rec->use_lock); return rc; } /* * send OSS raw events - SEQ_PRIVATE and SEQ_VOLUME */ int snd_seq_oss_synth_raw_event(struct seq_oss_devinfo *dp, int dev, unsigned char *data, struct snd_seq_event *ev) { struct seq_oss_synthinfo *info; info = snd_seq_oss_synth_info(dp, dev); if (!info || info->is_midi) return -ENXIO; ev->type = SNDRV_SEQ_EVENT_OSS; memcpy(ev->data.raw8.d, data, 8); return snd_seq_oss_synth_addr(dp, dev, ev); } /* * create OSS compatible synth_info record */ int snd_seq_oss_synth_make_info(struct seq_oss_devinfo *dp, int dev, struct synth_info *inf) { struct seq_oss_synth *rec; struct seq_oss_synthinfo *info = get_synthinfo_nospec(dp, dev); if (!info) return -ENXIO; if (info->is_midi) { struct midi_info minf; if (snd_seq_oss_midi_make_info(dp, info->midi_mapped, &minf)) return -ENXIO; inf->synth_type = SYNTH_TYPE_MIDI; inf->synth_subtype = 0; inf->nr_voices = 16; inf->device = dev; strscpy(inf->name, minf.name, sizeof(inf->name)); } else { rec = get_synthdev(dp, dev); if (!rec) return -ENXIO; inf->synth_type = rec->synth_type; inf->synth_subtype = rec->synth_subtype; inf->nr_voices = rec->nr_voices; inf->device = dev; strscpy(inf->name, rec->name, sizeof(inf->name)); snd_use_lock_free(&rec->use_lock); } return 0; } #ifdef CONFIG_SND_PROC_FS /* * proc interface */ void snd_seq_oss_synth_info_read(struct snd_info_buffer *buf) { int i; struct seq_oss_synth *rec; snd_iprintf(buf, "\nNumber of synth devices: %d\n", max_synth_devs); for (i = 0; i < max_synth_devs; i++) { snd_iprintf(buf, "\nsynth %d: ", i); rec = get_sdev(i); if (rec == NULL) { snd_iprintf(buf, "*empty*\n"); continue; } snd_iprintf(buf, "[%s]\n", rec->name); snd_iprintf(buf, " type 0x%x : subtype 0x%x : voices %d\n", rec->synth_type, rec->synth_subtype, rec->nr_voices); snd_iprintf(buf, " capabilities : ioctl %s / load_patch %s\n", str_enabled_disabled((long)rec->oper.ioctl), str_enabled_disabled((long)rec->oper.load_patch)); snd_use_lock_free(&rec->use_lock); } } #endif /* CONFIG_SND_PROC_FS */
49 48 47 48 24 24 20 20 20 20 1 1 1 11 20 35 25 20 20 2 19 4 4 4 4 2 2 2 3 3 1 9 24 24 24 22 9 22 22 22 22 14 29 28 33 30 31 31 31 4 27 14 2 14 14 2 32 23 23 24 1 1 1 1 1 1 1 1 1 1 1 1 1 1 19 19 19 1 18 18 19 22 22 22 22 22 13 17 17 2 15 9 15 15 1 15 12 12 14 14 12 9 8 20 2 19 5 5 3 2 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 // SPDX-License-Identifier: GPL-2.0-or-later #include <linux/slab.h> #include <linux/sched/rt.h> #include <linux/sched/task.h> #include "futex.h" #include "../locking/rtmutex_common.h" /* * PI code: */ int refill_pi_state_cache(void) { struct futex_pi_state *pi_state; if (likely(current->pi_state_cache)) return 0; pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL); if (!pi_state) return -ENOMEM; INIT_LIST_HEAD(&pi_state->list); /* pi_mutex gets initialized later */ pi_state->owner = NULL; refcount_set(&pi_state->refcount, 1); pi_state->key = FUTEX_KEY_INIT; current->pi_state_cache = pi_state; return 0; } static struct futex_pi_state *alloc_pi_state(void) { struct futex_pi_state *pi_state = current->pi_state_cache; WARN_ON(!pi_state); current->pi_state_cache = NULL; return pi_state; } static void pi_state_update_owner(struct futex_pi_state *pi_state, struct task_struct *new_owner) { struct task_struct *old_owner = pi_state->owner; lockdep_assert_held(&pi_state->pi_mutex.wait_lock); if (old_owner) { raw_spin_lock(&old_owner->pi_lock); WARN_ON(list_empty(&pi_state->list)); list_del_init(&pi_state->list); raw_spin_unlock(&old_owner->pi_lock); } if (new_owner) { raw_spin_lock(&new_owner->pi_lock); WARN_ON(!list_empty(&pi_state->list)); list_add(&pi_state->list, &new_owner->pi_state_list); pi_state->owner = new_owner; raw_spin_unlock(&new_owner->pi_lock); } } void get_pi_state(struct futex_pi_state *pi_state) { WARN_ON_ONCE(!refcount_inc_not_zero(&pi_state->refcount)); } /* * Drops a reference to the pi_state object and frees or caches it * when the last reference is gone. */ void put_pi_state(struct futex_pi_state *pi_state) { if (!pi_state) return; if (!refcount_dec_and_test(&pi_state->refcount)) return; /* * If pi_state->owner is NULL, the owner is most probably dying * and has cleaned up the pi_state already */ if (pi_state->owner) { unsigned long flags; raw_spin_lock_irqsave(&pi_state->pi_mutex.wait_lock, flags); pi_state_update_owner(pi_state, NULL); rt_mutex_proxy_unlock(&pi_state->pi_mutex); raw_spin_unlock_irqrestore(&pi_state->pi_mutex.wait_lock, flags); } if (current->pi_state_cache) { kfree(pi_state); } else { /* * pi_state->list is already empty. * clear pi_state->owner. * refcount is at 0 - put it back to 1. */ pi_state->owner = NULL; refcount_set(&pi_state->refcount, 1); current->pi_state_cache = pi_state; } } /* * We need to check the following states: * * Waiter | pi_state | pi->owner | uTID | uODIED | ? * * [1] NULL | --- | --- | 0 | 0/1 | Valid * [2] NULL | --- | --- | >0 | 0/1 | Valid * * [3] Found | NULL | -- | Any | 0/1 | Invalid * * [4] Found | Found | NULL | 0 | 1 | Valid * [5] Found | Found | NULL | >0 | 1 | Invalid * * [6] Found | Found | task | 0 | 1 | Valid * * [7] Found | Found | NULL | Any | 0 | Invalid * * [8] Found | Found | task | ==taskTID | 0/1 | Valid * [9] Found | Found | task | 0 | 0 | Invalid * [10] Found | Found | task | !=taskTID | 0/1 | Invalid * * [1] Indicates that the kernel can acquire the futex atomically. We * came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit. * * [2] Valid, if TID does not belong to a kernel thread. If no matching * thread is found then it indicates that the owner TID has died. * * [3] Invalid. The waiter is queued on a non PI futex * * [4] Valid state after exit_robust_list(), which sets the user space * value to FUTEX_WAITERS | FUTEX_OWNER_DIED. * * [5] The user space value got manipulated between exit_robust_list() * and exit_pi_state_list() * * [6] Valid state after exit_pi_state_list() which sets the new owner in * the pi_state but cannot access the user space value. * * [7] pi_state->owner can only be NULL when the OWNER_DIED bit is set. * * [8] Owner and user space value match * * [9] There is no transient state which sets the user space TID to 0 * except exit_robust_list(), but this is indicated by the * FUTEX_OWNER_DIED bit. See [4] * * [10] There is no transient state which leaves owner and user space * TID out of sync. Except one error case where the kernel is denied * write access to the user address, see fixup_pi_state_owner(). * * * Serialization and lifetime rules: * * hb->lock: * * hb -> futex_q, relation * futex_q -> pi_state, relation * * (cannot be raw because hb can contain arbitrary amount * of futex_q's) * * pi_mutex->wait_lock: * * {uval, pi_state} * * (and pi_mutex 'obviously') * * p->pi_lock: * * p->pi_state_list -> pi_state->list, relation * pi_mutex->owner -> pi_state->owner, relation * * pi_state->refcount: * * pi_state lifetime * * * Lock order: * * hb->lock * pi_mutex->wait_lock * p->pi_lock * */ /* * Validate that the existing waiter has a pi_state and sanity check * the pi_state against the user space value. If correct, attach to * it. */ static int attach_to_pi_state(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_state, struct futex_pi_state **ps) { pid_t pid = uval & FUTEX_TID_MASK; u32 uval2; int ret; /* * Userspace might have messed up non-PI and PI futexes [3] */ if (unlikely(!pi_state)) return -EINVAL; /* * We get here with hb->lock held, and having found a * futex_top_waiter(). This means that futex_lock_pi() of said futex_q * has dropped the hb->lock in between futex_queue() and futex_unqueue_pi(), * which in turn means that futex_lock_pi() still has a reference on * our pi_state. * * The waiter holding a reference on @pi_state also protects against * the unlocked put_pi_state() in futex_unlock_pi(), futex_lock_pi() * and futex_wait_requeue_pi() as it cannot go to 0 and consequently * free pi_state before we can take a reference ourselves. */ WARN_ON(!refcount_read(&pi_state->refcount)); /* * Now that we have a pi_state, we can acquire wait_lock * and do the state validation. */ raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); /* * Since {uval, pi_state} is serialized by wait_lock, and our current * uval was read without holding it, it can have changed. Verify it * still is what we expect it to be, otherwise retry the entire * operation. */ if (futex_get_value_locked(&uval2, uaddr)) goto out_efault; if (uval != uval2) goto out_eagain; /* * Handle the owner died case: */ if (uval & FUTEX_OWNER_DIED) { /* * exit_pi_state_list sets owner to NULL and wakes the * topmost waiter. The task which acquires the * pi_state->rt_mutex will fixup owner. */ if (!pi_state->owner) { /* * No pi state owner, but the user space TID * is not 0. Inconsistent state. [5] */ if (pid) goto out_einval; /* * Take a ref on the state and return success. [4] */ goto out_attach; } /* * If TID is 0, then either the dying owner has not * yet executed exit_pi_state_list() or some waiter * acquired the rtmutex in the pi state, but did not * yet fixup the TID in user space. * * Take a ref on the state and return success. [6] */ if (!pid) goto out_attach; } else { /* * If the owner died bit is not set, then the pi_state * must have an owner. [7] */ if (!pi_state->owner) goto out_einval; } /* * Bail out if user space manipulated the futex value. If pi * state exists then the owner TID must be the same as the * user space TID. [9/10] */ if (pid != task_pid_vnr(pi_state->owner)) goto out_einval; out_attach: get_pi_state(pi_state); raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); *ps = pi_state; return 0; out_einval: ret = -EINVAL; goto out_error; out_eagain: ret = -EAGAIN; goto out_error; out_efault: ret = -EFAULT; goto out_error; out_error: raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); return ret; } static int handle_exit_race(u32 __user *uaddr, u32 uval, struct task_struct *tsk) { u32 uval2; /* * If the futex exit state is not yet FUTEX_STATE_DEAD, tell the * caller that the alleged owner is busy. */ if (tsk && tsk->futex_state != FUTEX_STATE_DEAD) return -EBUSY; /* * Reread the user space value to handle the following situation: * * CPU0 CPU1 * * sys_exit() sys_futex() * do_exit() futex_lock_pi() * futex_lock_pi_atomic() * exit_signals(tsk) No waiters: * tsk->flags |= PF_EXITING; *uaddr == 0x00000PID * mm_release(tsk) Set waiter bit * exit_robust_list(tsk) { *uaddr = 0x80000PID; * Set owner died attach_to_pi_owner() { * *uaddr = 0xC0000000; tsk = get_task(PID); * } if (!tsk->flags & PF_EXITING) { * ... attach(); * tsk->futex_state = } else { * FUTEX_STATE_DEAD; if (tsk->futex_state != * FUTEX_STATE_DEAD) * return -EAGAIN; * return -ESRCH; <--- FAIL * } * * Returning ESRCH unconditionally is wrong here because the * user space value has been changed by the exiting task. * * The same logic applies to the case where the exiting task is * already gone. */ if (futex_get_value_locked(&uval2, uaddr)) return -EFAULT; /* If the user space value has changed, try again. */ if (uval2 != uval) return -EAGAIN; /* * The exiting task did not have a robust list, the robust list was * corrupted or the user space value in *uaddr is simply bogus. * Give up and tell user space. */ return -ESRCH; } static void __attach_to_pi_owner(struct task_struct *p, union futex_key *key, struct futex_pi_state **ps) { /* * No existing pi state. First waiter. [2] * * This creates pi_state, we have hb->lock held, this means nothing can * observe this state, wait_lock is irrelevant. */ struct futex_pi_state *pi_state = alloc_pi_state(); /* * Initialize the pi_mutex in locked state and make @p * the owner of it: */ rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p); /* Store the key for possible exit cleanups: */ pi_state->key = *key; WARN_ON(!list_empty(&pi_state->list)); list_add(&pi_state->list, &p->pi_state_list); /* * Assignment without holding pi_state->pi_mutex.wait_lock is safe * because there is no concurrency as the object is not published yet. */ pi_state->owner = p; *ps = pi_state; } /* * Lookup the task for the TID provided from user space and attach to * it after doing proper sanity checks. */ static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key, struct futex_pi_state **ps, struct task_struct **exiting) { pid_t pid = uval & FUTEX_TID_MASK; struct task_struct *p; /* * We are the first waiter - try to look up the real owner and attach * the new pi_state to it, but bail out when TID = 0 [1] * * The !pid check is paranoid. None of the call sites should end up * with pid == 0, but better safe than sorry. Let the caller retry */ if (!pid) return -EAGAIN; p = find_get_task_by_vpid(pid); if (!p) return handle_exit_race(uaddr, uval, NULL); if (unlikely(p->flags & PF_KTHREAD)) { put_task_struct(p); return -EPERM; } /* * We need to look at the task state to figure out, whether the * task is exiting. To protect against the change of the task state * in futex_exit_release(), we do this protected by p->pi_lock: */ raw_spin_lock_irq(&p->pi_lock); if (unlikely(p->futex_state != FUTEX_STATE_OK)) { /* * The task is on the way out. When the futex state is * FUTEX_STATE_DEAD, we know that the task has finished * the cleanup: */ int ret = handle_exit_race(uaddr, uval, p); raw_spin_unlock_irq(&p->pi_lock); /* * If the owner task is between FUTEX_STATE_EXITING and * FUTEX_STATE_DEAD then store the task pointer and keep * the reference on the task struct. The calling code will * drop all locks, wait for the task to reach * FUTEX_STATE_DEAD and then drop the refcount. This is * required to prevent a live lock when the current task * preempted the exiting task between the two states. */ if (ret == -EBUSY) *exiting = p; else put_task_struct(p); return ret; } __attach_to_pi_owner(p, key, ps); raw_spin_unlock_irq(&p->pi_lock); put_task_struct(p); return 0; } static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval) { int err; u32 curval; if (unlikely(should_fail_futex(true))) return -EFAULT; err = futex_cmpxchg_value_locked(&curval, uaddr, uval, newval); if (unlikely(err)) return err; /* If user space value changed, let the caller retry */ return curval != uval ? -EAGAIN : 0; } /** * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex * @uaddr: the pi futex user address * @hb: the pi futex hash bucket * @key: the futex key associated with uaddr and hb * @ps: the pi_state pointer where we store the result of the * lookup * @task: the task to perform the atomic lock work for. This will * be "current" except in the case of requeue pi. * @exiting: Pointer to store the task pointer of the owner task * which is in the middle of exiting * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0) * * Return: * - 0 - ready to wait; * - 1 - acquired the lock; * - <0 - error * * The hb->lock must be held by the caller. * * @exiting is only set when the return value is -EBUSY. If so, this holds * a refcount on the exiting task on return and the caller needs to drop it * after waiting for the exit to complete. */ int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, union futex_key *key, struct futex_pi_state **ps, struct task_struct *task, struct task_struct **exiting, int set_waiters) { u32 uval, newval, vpid = task_pid_vnr(task); struct futex_q *top_waiter; int ret; /* * Read the user space value first so we can validate a few * things before proceeding further. */ if (futex_get_value_locked(&uval, uaddr)) return -EFAULT; if (unlikely(should_fail_futex(true))) return -EFAULT; /* * Detect deadlocks. */ if ((unlikely((uval & FUTEX_TID_MASK) == vpid))) return -EDEADLK; if ((unlikely(should_fail_futex(true)))) return -EDEADLK; /* * Lookup existing state first. If it exists, try to attach to * its pi_state. */ top_waiter = futex_top_waiter(hb, key); if (top_waiter) return attach_to_pi_state(uaddr, uval, top_waiter->pi_state, ps); /* * No waiter and user TID is 0. We are here because the * waiters or the owner died bit is set or called from * requeue_cmp_pi or for whatever reason something took the * syscall. */ if (!(uval & FUTEX_TID_MASK)) { /* * We take over the futex. No other waiters and the user space * TID is 0. We preserve the owner died bit. */ newval = uval & FUTEX_OWNER_DIED; newval |= vpid; /* The futex requeue_pi code can enforce the waiters bit */ if (set_waiters) newval |= FUTEX_WAITERS; ret = lock_pi_update_atomic(uaddr, uval, newval); if (ret) return ret; /* * If the waiter bit was requested the caller also needs PI * state attached to the new owner of the user space futex. * * @task is guaranteed to be alive and it cannot be exiting * because it is either sleeping or waiting in * futex_requeue_pi_wakeup_sync(). * * No need to do the full attach_to_pi_owner() exercise * because @task is known and valid. */ if (set_waiters) { raw_spin_lock_irq(&task->pi_lock); __attach_to_pi_owner(task, key, ps); raw_spin_unlock_irq(&task->pi_lock); } return 1; } /* * First waiter. Set the waiters bit before attaching ourself to * the owner. If owner tries to unlock, it will be forced into * the kernel and blocked on hb->lock. */ newval = uval | FUTEX_WAITERS; ret = lock_pi_update_atomic(uaddr, uval, newval); if (ret) return ret; /* * If the update of the user space value succeeded, we try to * attach to the owner. If that fails, no harm done, we only * set the FUTEX_WAITERS bit in the user space variable. */ return attach_to_pi_owner(uaddr, newval, key, ps, exiting); } /* * Caller must hold a reference on @pi_state. */ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_state, struct rt_mutex_waiter *top_waiter) { struct task_struct *new_owner; bool postunlock = false; DEFINE_RT_WAKE_Q(wqh); u32 curval, newval; int ret = 0; new_owner = top_waiter->task; /* * We pass it to the next owner. The WAITERS bit is always kept * enabled while there is PI state around. We cleanup the owner * died bit, because we are the owner. */ newval = FUTEX_WAITERS | task_pid_vnr(new_owner); if (unlikely(should_fail_futex(true))) { ret = -EFAULT; goto out_unlock; } ret = futex_cmpxchg_value_locked(&curval, uaddr, uval, newval); if (!ret && (curval != uval)) { /* * If a unconditional UNLOCK_PI operation (user space did not * try the TID->0 transition) raced with a waiter setting the * FUTEX_WAITERS flag between get_user() and locking the hash * bucket lock, retry the operation. */ if ((FUTEX_TID_MASK & curval) == uval) ret = -EAGAIN; else ret = -EINVAL; } if (!ret) { /* * This is a point of no return; once we modified the uval * there is no going back and subsequent operations must * not fail. */ pi_state_update_owner(pi_state, new_owner); postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wqh); } out_unlock: raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); if (postunlock) rt_mutex_postunlock(&wqh); return ret; } static int __fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, struct task_struct *argowner) { struct futex_pi_state *pi_state = q->pi_state; struct task_struct *oldowner, *newowner; u32 uval, curval, newval, newtid; int err = 0; oldowner = pi_state->owner; /* * We are here because either: * * - we stole the lock and pi_state->owner needs updating to reflect * that (@argowner == current), * * or: * * - someone stole our lock and we need to fix things to point to the * new owner (@argowner == NULL). * * Either way, we have to replace the TID in the user space variable. * This must be atomic as we have to preserve the owner died bit here. * * Note: We write the user space value _before_ changing the pi_state * because we can fault here. Imagine swapped out pages or a fork * that marked all the anonymous memory readonly for cow. * * Modifying pi_state _before_ the user space value would leave the * pi_state in an inconsistent state when we fault here, because we * need to drop the locks to handle the fault. This might be observed * in the PID checks when attaching to PI state . */ retry: if (!argowner) { if (oldowner != current) { /* * We raced against a concurrent self; things are * already fixed up. Nothing to do. */ return 0; } if (__rt_mutex_futex_trylock(&pi_state->pi_mutex)) { /* We got the lock. pi_state is correct. Tell caller. */ return 1; } /* * The trylock just failed, so either there is an owner or * there is a higher priority waiter than this one. */ newowner = rt_mutex_owner(&pi_state->pi_mutex); /* * If the higher priority waiter has not yet taken over the * rtmutex then newowner is NULL. We can't return here with * that state because it's inconsistent vs. the user space * state. So drop the locks and try again. It's a valid * situation and not any different from the other retry * conditions. */ if (unlikely(!newowner)) { err = -EAGAIN; goto handle_err; } } else { WARN_ON_ONCE(argowner != current); if (oldowner == current) { /* * We raced against a concurrent self; things are * already fixed up. Nothing to do. */ return 1; } newowner = argowner; } newtid = task_pid_vnr(newowner) | FUTEX_WAITERS; /* Owner died? */ if (!pi_state->owner) newtid |= FUTEX_OWNER_DIED; err = futex_get_value_locked(&uval, uaddr); if (err) goto handle_err; for (;;) { newval = (uval & FUTEX_OWNER_DIED) | newtid; err = futex_cmpxchg_value_locked(&curval, uaddr, uval, newval); if (err) goto handle_err; if (curval == uval) break; uval = curval; } /* * We fixed up user space. Now we need to fix the pi_state * itself. */ pi_state_update_owner(pi_state, newowner); return argowner == current; /* * In order to reschedule or handle a page fault, we need to drop the * locks here. In the case of a fault, this gives the other task * (either the highest priority waiter itself or the task which stole * the rtmutex) the chance to try the fixup of the pi_state. So once we * are back from handling the fault we need to check the pi_state after * reacquiring the locks and before trying to do another fixup. When * the fixup has been done already we simply return. * * Note: we hold both hb->lock and pi_mutex->wait_lock. We can safely * drop hb->lock since the caller owns the hb -> futex_q relation. * Dropping the pi_mutex->wait_lock requires the state revalidate. */ handle_err: raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); spin_unlock(q->lock_ptr); switch (err) { case -EFAULT: err = fault_in_user_writeable(uaddr); break; case -EAGAIN: cond_resched(); err = 0; break; default: WARN_ON_ONCE(1); break; } futex_q_lockptr_lock(q); raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); /* * Check if someone else fixed it for us: */ if (pi_state->owner != oldowner) return argowner == current; /* Retry if err was -EAGAIN or the fault in succeeded */ if (!err) goto retry; /* * fault_in_user_writeable() failed so user state is immutable. At * best we can make the kernel state consistent but user state will * be most likely hosed and any subsequent unlock operation will be * rejected due to PI futex rule [10]. * * Ensure that the rtmutex owner is also the pi_state owner despite * the user space value claiming something different. There is no * point in unlocking the rtmutex if current is the owner as it * would need to wait until the next waiter has taken the rtmutex * to guarantee consistent state. Keep it simple. Userspace asked * for this wreckaged state. * * The rtmutex has an owner - either current or some other * task. See the EAGAIN loop above. */ pi_state_update_owner(pi_state, rt_mutex_owner(&pi_state->pi_mutex)); return err; } static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, struct task_struct *argowner) { struct futex_pi_state *pi_state = q->pi_state; int ret; lockdep_assert_held(q->lock_ptr); raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); ret = __fixup_pi_state_owner(uaddr, q, argowner); raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); return ret; } /** * fixup_pi_owner() - Post lock pi_state and corner case management * @uaddr: user address of the futex * @q: futex_q (contains pi_state and access to the rt_mutex) * @locked: if the attempt to take the rt_mutex succeeded (1) or not (0) * * After attempting to lock an rt_mutex, this function is called to cleanup * the pi_state owner as well as handle race conditions that may allow us to * acquire the lock. Must be called with the hb lock held. * * Return: * - 1 - success, lock taken; * - 0 - success, lock not taken; * - <0 - on error (-EFAULT) */ int fixup_pi_owner(u32 __user *uaddr, struct futex_q *q, int locked) { if (locked) { /* * Got the lock. We might not be the anticipated owner if we * did a lock-steal - fix up the PI-state in that case: * * Speculative pi_state->owner read (we don't hold wait_lock); * since we own the lock pi_state->owner == current is the * stable state, anything else needs more attention. */ if (q->pi_state->owner != current) return fixup_pi_state_owner(uaddr, q, current); return 1; } /* * If we didn't get the lock; check if anybody stole it from us. In * that case, we need to fix up the uval to point to them instead of * us, otherwise bad things happen. [10] * * Another speculative read; pi_state->owner == current is unstable * but needs our attention. */ if (q->pi_state->owner == current) return fixup_pi_state_owner(uaddr, q, NULL); /* * Paranoia check. If we did not take the lock, then we should not be * the owner of the rt_mutex. Warn and establish consistent state. */ if (WARN_ON_ONCE(rt_mutex_owner(&q->pi_state->pi_mutex) == current)) return fixup_pi_state_owner(uaddr, q, current); return 0; } /* * Userspace tried a 0 -> TID atomic transition of the futex value * and failed. The kernel side here does the whole locking operation: * if there are waiters then it will block as a consequence of relying * on rt-mutexes, it does PI, etc. (Due to races the kernel might see * a 0 value of the futex too.). * * Also serves as futex trylock_pi()'ing, and due semantics. */ int futex_lock_pi(u32 __user *uaddr, unsigned int flags, ktime_t *time, int trylock) { struct hrtimer_sleeper timeout, *to; struct task_struct *exiting = NULL; struct rt_mutex_waiter rt_waiter; struct futex_q q = futex_q_init; DEFINE_WAKE_Q(wake_q); int res, ret; if (!IS_ENABLED(CONFIG_FUTEX_PI)) return -ENOSYS; if (refill_pi_state_cache()) return -ENOMEM; to = futex_setup_timer(time, &timeout, flags, 0); retry: ret = get_futex_key(uaddr, flags, &q.key, FUTEX_WRITE); if (unlikely(ret != 0)) goto out; retry_private: if (1) { CLASS(hb, hb)(&q.key); futex_q_lock(&q, hb); ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, &exiting, 0); if (unlikely(ret)) { /* * Atomic work succeeded and we got the lock, * or failed. Either way, we do _not_ block. */ switch (ret) { case 1: /* We got the lock. */ ret = 0; goto out_unlock_put_key; case -EFAULT: goto uaddr_faulted; case -EBUSY: case -EAGAIN: /* * Two reasons for this: * - EBUSY: Task is exiting and we just wait for the * exit to complete. * - EAGAIN: The user space value changed. */ futex_q_unlock(hb); /* * Handle the case where the owner is in the middle of * exiting. Wait for the exit to complete otherwise * this task might loop forever, aka. live lock. */ wait_for_owner_exiting(ret, exiting); cond_resched(); goto retry; default: goto out_unlock_put_key; } } WARN_ON(!q.pi_state); /* * Only actually queue now that the atomic ops are done: */ __futex_queue(&q, hb, current); if (trylock) { ret = rt_mutex_futex_trylock(&q.pi_state->pi_mutex); /* Fixup the trylock return value: */ ret = ret ? 0 : -EWOULDBLOCK; goto no_block; } /* * Caution; releasing @hb in-scope. The hb->lock is still locked * while the reference is dropped. The reference can not be dropped * after the unlock because if a user initiated resize is in progress * then we might need to wake him. This can not be done after the * rt_mutex_pre_schedule() invocation. The hb will remain valid because * the thread, performing resize, will block on hb->lock during * the requeue. */ futex_hash_put(no_free_ptr(hb)); /* * Must be done before we enqueue the waiter, here is unfortunately * under the hb lock, but that *should* work because it does nothing. */ rt_mutex_pre_schedule(); rt_mutex_init_waiter(&rt_waiter); /* * On PREEMPT_RT, when hb->lock becomes an rt_mutex, we must not * hold it while doing rt_mutex_start_proxy(), because then it will * include hb->lock in the blocking chain, even through we'll not in * fact hold it while blocking. This will lead it to report -EDEADLK * and BUG when futex_unlock_pi() interleaves with this. * * Therefore acquire wait_lock while holding hb->lock, but drop the * latter before calling __rt_mutex_start_proxy_lock(). This * interleaves with futex_unlock_pi() -- which does a similar lock * handoff -- such that the latter can observe the futex_q::pi_state * before __rt_mutex_start_proxy_lock() is done. */ raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock); spin_unlock(q.lock_ptr); /* * __rt_mutex_start_proxy_lock() unconditionally enqueues the @rt_waiter * such that futex_unlock_pi() is guaranteed to observe the waiter when * it sees the futex_q::pi_state. */ ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current, &wake_q); raw_spin_unlock_irq_wake(&q.pi_state->pi_mutex.wait_lock, &wake_q); if (ret) { if (ret == 1) ret = 0; goto cleanup; } if (unlikely(to)) hrtimer_sleeper_start_expires(to, HRTIMER_MODE_ABS); ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter); cleanup: /* * If we failed to acquire the lock (deadlock/signal/timeout), we must * unwind the above, however we canont lock hb->lock because * rt_mutex already has a waiter enqueued and hb->lock can itself try * and enqueue an rt_waiter through rtlock. * * Doing the cleanup without holding hb->lock can cause inconsistent * state between hb and pi_state, but only in the direction of not * seeing a waiter that is leaving. * * See futex_unlock_pi(), it deals with this inconsistency. * * There be dragons here, since we must deal with the inconsistency on * the way out (here), it is impossible to detect/warn about the race * the other way around (missing an incoming waiter). * * What could possibly go wrong... */ if (ret && !rt_mutex_cleanup_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter)) ret = 0; /* * Now that the rt_waiter has been dequeued, it is safe to use * spinlock/rtlock (which might enqueue its own rt_waiter) and fix up * the */ futex_q_lockptr_lock(&q); /* * Waiter is unqueued. */ rt_mutex_post_schedule(); no_block: /* * Fixup the pi_state owner and possibly acquire the lock if we * haven't already. */ res = fixup_pi_owner(uaddr, &q, !ret); /* * If fixup_pi_owner() returned an error, propagate that. If it acquired * the lock, clear our -ETIMEDOUT or -EINTR. */ if (res) ret = (res < 0) ? res : 0; futex_unqueue_pi(&q); spin_unlock(q.lock_ptr); if (q.drop_hb_ref) { CLASS(hb, hb)(&q.key); /* Additional reference from futex_unlock_pi() */ futex_hash_put(hb); } goto out; out_unlock_put_key: futex_q_unlock(hb); goto out; uaddr_faulted: futex_q_unlock(hb); ret = fault_in_user_writeable(uaddr); if (ret) goto out; if (!(flags & FLAGS_SHARED)) goto retry_private; goto retry; } out: if (to) { hrtimer_cancel(&to->timer); destroy_hrtimer_on_stack(&to->timer); } return ret != -EINTR ? ret : -ERESTARTNOINTR; } /* * Userspace attempted a TID -> 0 atomic transition, and failed. * This is the in-kernel slowpath: we look up the PI state (if any), * and do the rt-mutex unlock. */ int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) { u32 curval, uval, vpid = task_pid_vnr(current); union futex_key key = FUTEX_KEY_INIT; struct futex_q *top_waiter; int ret; if (!IS_ENABLED(CONFIG_FUTEX_PI)) return -ENOSYS; retry: if (get_user(uval, uaddr)) return -EFAULT; /* * We release only a lock we actually own: */ if ((uval & FUTEX_TID_MASK) != vpid) return -EPERM; ret = get_futex_key(uaddr, flags, &key, FUTEX_WRITE); if (ret) return ret; CLASS(hb, hb)(&key); spin_lock(&hb->lock); retry_hb: /* * Check waiters first. We do not trust user space values at * all and we at least want to know if user space fiddled * with the futex value instead of blindly unlocking. */ top_waiter = futex_top_waiter(hb, &key); if (top_waiter) { struct futex_pi_state *pi_state = top_waiter->pi_state; struct rt_mutex_waiter *rt_waiter; ret = -EINVAL; if (!pi_state) goto out_unlock; /* * If current does not own the pi_state then the futex is * inconsistent and user space fiddled with the futex value. */ if (pi_state->owner != current) goto out_unlock; /* * By taking wait_lock while still holding hb->lock, we ensure * there is no point where we hold neither; and thereby * wake_futex_pi() must observe any new waiters. * * Since the cleanup: case in futex_lock_pi() removes the * rt_waiter without holding hb->lock, it is possible for * wake_futex_pi() to not find a waiter while the above does, * in this case the waiter is on the way out and it can be * ignored. * * In particular; this forces __rt_mutex_start_proxy() to * complete such that we're guaranteed to observe the * rt_waiter. */ raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); /* * Futex vs rt_mutex waiter state -- if there are no rt_mutex * waiters even though futex thinks there are, then the waiter * is leaving. The entry needs to be removed from the list so a * new futex_lock_pi() is not using this stale PI-state while * the futex is available in user space again. * There can be more than one task on its way out so it needs * to retry. */ rt_waiter = rt_mutex_top_waiter(&pi_state->pi_mutex); if (!rt_waiter) { /* * Acquire a reference for the leaving waiter to ensure * valid futex_q::lock_ptr. */ futex_hash_get(hb); top_waiter->drop_hb_ref = true; __futex_unqueue(top_waiter); raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); goto retry_hb; } get_pi_state(pi_state); spin_unlock(&hb->lock); /* drops pi_state->pi_mutex.wait_lock */ ret = wake_futex_pi(uaddr, uval, pi_state, rt_waiter); put_pi_state(pi_state); /* * Success, we're done! No tricky corner cases. */ if (!ret) return ret; /* * The atomic access to the futex value generated a * pagefault, so retry the user-access and the wakeup: */ if (ret == -EFAULT) goto pi_faulted; /* * A unconditional UNLOCK_PI op raced against a waiter * setting the FUTEX_WAITERS bit. Try again. */ if (ret == -EAGAIN) goto pi_retry; /* * wake_futex_pi has detected invalid state. Tell user * space. */ return ret; } /* * We have no kernel internal state, i.e. no waiters in the * kernel. Waiters which are about to queue themselves are stuck * on hb->lock. So we can safely ignore them. We do neither * preserve the WAITERS bit not the OWNER_DIED one. We are the * owner. */ if ((ret = futex_cmpxchg_value_locked(&curval, uaddr, uval, 0))) { spin_unlock(&hb->lock); switch (ret) { case -EFAULT: goto pi_faulted; case -EAGAIN: goto pi_retry; default: WARN_ON_ONCE(1); return ret; } } /* * If uval has changed, let user space handle it. */ ret = (curval == uval) ? 0 : -EAGAIN; out_unlock: spin_unlock(&hb->lock); return ret; pi_retry: cond_resched(); goto retry; pi_faulted: ret = fault_in_user_writeable(uaddr); if (!ret) goto retry; return ret; }
5 4 1 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 // SPDX-License-Identifier: GPL-2.0 /* * LED Triggers for USB Activity * * Copyright 2014 Michal Sojka <sojka@merica.cz> */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/leds.h> #include <linux/usb.h> #include "common.h" #define BLINK_DELAY 30 DEFINE_LED_TRIGGER(ledtrig_usb_gadget); DEFINE_LED_TRIGGER(ledtrig_usb_host); void usb_led_activity(enum usb_led_event ev) { struct led_trigger *trig = NULL; switch (ev) { case USB_LED_EVENT_GADGET: trig = ledtrig_usb_gadget; break; case USB_LED_EVENT_HOST: trig = ledtrig_usb_host; break; } /* led_trigger_blink_oneshot() handles trig == NULL gracefully */ led_trigger_blink_oneshot(trig, BLINK_DELAY, BLINK_DELAY, 0); } EXPORT_SYMBOL_GPL(usb_led_activity); void __init ledtrig_usb_init(void) { led_trigger_register_simple("usb-gadget", &ledtrig_usb_gadget); led_trigger_register_simple("usb-host", &ledtrig_usb_host); } void __exit ledtrig_usb_exit(void) { led_trigger_unregister_simple(ledtrig_usb_gadget); led_trigger_unregister_simple(ledtrig_usb_host); }
1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 // SPDX-License-Identifier: GPL-2.0-only /* * Line 6 Linux USB driver * * Copyright (C) 2004-2010 Markus Grabner (line6@grabner-graz.at) */ #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/usb.h> #include <linux/wait.h> #include <linux/module.h> #include <sound/core.h> #include "driver.h" #define VARIAX_STARTUP_DELAY1 1000 #define VARIAX_STARTUP_DELAY3 100 #define VARIAX_STARTUP_DELAY4 100 /* Stages of Variax startup procedure */ enum { VARIAX_STARTUP_VERSIONREQ, VARIAX_STARTUP_ACTIVATE, VARIAX_STARTUP_SETUP, }; enum { LINE6_PODXTLIVE_VARIAX, LINE6_VARIAX }; struct usb_line6_variax { /* Generic Line 6 USB data */ struct usb_line6 line6; /* Buffer for activation code */ unsigned char *buffer_activate; /* Current progress in startup procedure */ int startup_progress; }; #define line6_to_variax(x) container_of(x, struct usb_line6_variax, line6) #define VARIAX_OFFSET_ACTIVATE 7 /* This message is sent by the device during initialization and identifies the connected guitar version. */ static const char variax_init_version[] = { 0xf0, 0x7e, 0x7f, 0x06, 0x02, 0x00, 0x01, 0x0c, 0x07, 0x00, 0x00, 0x00 }; /* This message is the last one sent by the device during initialization. */ static const char variax_init_done[] = { 0xf0, 0x00, 0x01, 0x0c, 0x07, 0x00, 0x6b }; static const char variax_activate[] = { 0xf0, 0x00, 0x01, 0x0c, 0x07, 0x00, 0x2a, 0x01, 0xf7 }; static void variax_activate_async(struct usb_line6_variax *variax, int a) { variax->buffer_activate[VARIAX_OFFSET_ACTIVATE] = a; line6_send_raw_message_async(&variax->line6, variax->buffer_activate, sizeof(variax_activate)); } /* Variax startup procedure. This is a sequence of functions with special requirements (e.g., must not run immediately after initialization, must not run in interrupt context). After the last one has finished, the device is ready to use. */ static void variax_startup(struct usb_line6 *line6) { struct usb_line6_variax *variax = line6_to_variax(line6); switch (variax->startup_progress) { case VARIAX_STARTUP_VERSIONREQ: /* repeat request until getting the response */ schedule_delayed_work(&line6->startup_work, msecs_to_jiffies(VARIAX_STARTUP_DELAY1)); /* request firmware version: */ line6_version_request_async(line6); break; case VARIAX_STARTUP_ACTIVATE: /* activate device: */ variax_activate_async(variax, 1); variax->startup_progress = VARIAX_STARTUP_SETUP; schedule_delayed_work(&line6->startup_work, msecs_to_jiffies(VARIAX_STARTUP_DELAY4)); break; case VARIAX_STARTUP_SETUP: /* ALSA audio interface: */ snd_card_register(variax->line6.card); break; } } /* Process a completely received message. */ static void line6_variax_process_message(struct usb_line6 *line6) { struct usb_line6_variax *variax = line6_to_variax(line6); const unsigned char *buf = variax->line6.buffer_message; switch (buf[0]) { case LINE6_RESET: dev_info(variax->line6.ifcdev, "VARIAX reset\n"); break; case LINE6_SYSEX_BEGIN: if (memcmp(buf + 1, variax_init_version + 1, sizeof(variax_init_version) - 1) == 0) { if (variax->startup_progress >= VARIAX_STARTUP_ACTIVATE) break; variax->startup_progress = VARIAX_STARTUP_ACTIVATE; cancel_delayed_work(&line6->startup_work); schedule_delayed_work(&line6->startup_work, msecs_to_jiffies(VARIAX_STARTUP_DELAY3)); } else if (memcmp(buf + 1, variax_init_done + 1, sizeof(variax_init_done) - 1) == 0) { /* notify of complete initialization: */ if (variax->startup_progress >= VARIAX_STARTUP_SETUP) break; cancel_delayed_work(&line6->startup_work); schedule_delayed_work(&line6->startup_work, 0); } break; } } /* Variax destructor. */ static void line6_variax_disconnect(struct usb_line6 *line6) { struct usb_line6_variax *variax = line6_to_variax(line6); kfree(variax->buffer_activate); } /* Try to init workbench device. */ static int variax_init(struct usb_line6 *line6, const struct usb_device_id *id) { struct usb_line6_variax *variax = line6_to_variax(line6); line6->process_message = line6_variax_process_message; line6->disconnect = line6_variax_disconnect; line6->startup = variax_startup; /* initialize USB buffers: */ variax->buffer_activate = kmemdup(variax_activate, sizeof(variax_activate), GFP_KERNEL); if (variax->buffer_activate == NULL) return -ENOMEM; /* initiate startup procedure: */ schedule_delayed_work(&line6->startup_work, msecs_to_jiffies(VARIAX_STARTUP_DELAY1)); return 0; } #define LINE6_DEVICE(prod) USB_DEVICE(0x0e41, prod) #define LINE6_IF_NUM(prod, n) USB_DEVICE_INTERFACE_NUMBER(0x0e41, prod, n) /* table of devices that work with this driver */ static const struct usb_device_id variax_id_table[] = { { LINE6_IF_NUM(0x4650, 1), .driver_info = LINE6_PODXTLIVE_VARIAX }, { LINE6_DEVICE(0x534d), .driver_info = LINE6_VARIAX }, {} }; MODULE_DEVICE_TABLE(usb, variax_id_table); static const struct line6_properties variax_properties_table[] = { [LINE6_PODXTLIVE_VARIAX] = { .id = "PODxtLive", .name = "PODxt Live", .capabilities = LINE6_CAP_CONTROL | LINE6_CAP_CONTROL_MIDI, .altsetting = 1, .ep_ctrl_r = 0x86, .ep_ctrl_w = 0x05, .ep_audio_r = 0x82, .ep_audio_w = 0x01, }, [LINE6_VARIAX] = { .id = "Variax", .name = "Variax Workbench", .capabilities = LINE6_CAP_CONTROL | LINE6_CAP_CONTROL_MIDI, .altsetting = 1, .ep_ctrl_r = 0x82, .ep_ctrl_w = 0x01, /* no audio channel */ } }; /* Probe USB device. */ static int variax_probe(struct usb_interface *interface, const struct usb_device_id *id) { return line6_probe(interface, id, "Line6-Variax", &variax_properties_table[id->driver_info], variax_init, sizeof(struct usb_line6_variax)); } static struct usb_driver variax_driver = { .name = KBUILD_MODNAME, .probe = variax_probe, .disconnect = line6_disconnect, #ifdef CONFIG_PM .suspend = line6_suspend, .resume = line6_resume, .reset_resume = line6_resume, #endif .id_table = variax_id_table, }; module_usb_driver(variax_driver); MODULE_DESCRIPTION("Variax Workbench USB driver"); MODULE_LICENSE("GPL");
15 29 18 18 18 18 14 14 14 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. */ #ifndef _WG_QUEUEING_H #define _WG_QUEUEING_H #include "peer.h" #include <linux/types.h> #include <linux/skbuff.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <net/ip_tunnels.h> struct wg_device; struct wg_peer; struct multicore_worker; struct crypt_queue; struct prev_queue; struct sk_buff; /* queueing.c APIs: */ int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function, unsigned int len); void wg_packet_queue_free(struct crypt_queue *queue, bool purge); struct multicore_worker __percpu * wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr); /* receive.c APIs: */ void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb); void wg_packet_handshake_receive_worker(struct work_struct *work); /* NAPI poll function: */ int wg_packet_rx_poll(struct napi_struct *napi, int budget); /* Workqueue worker: */ void wg_packet_decrypt_worker(struct work_struct *work); /* send.c APIs: */ void wg_packet_send_queued_handshake_initiation(struct wg_peer *peer, bool is_retry); void wg_packet_send_handshake_response(struct wg_peer *peer); void wg_packet_send_handshake_cookie(struct wg_device *wg, struct sk_buff *initiating_skb, __le32 sender_index); void wg_packet_send_keepalive(struct wg_peer *peer); void wg_packet_purge_staged_packets(struct wg_peer *peer); void wg_packet_send_staged_packets(struct wg_peer *peer); /* Workqueue workers: */ void wg_packet_handshake_send_worker(struct work_struct *work); void wg_packet_tx_worker(struct work_struct *work); void wg_packet_encrypt_worker(struct work_struct *work); enum packet_state { PACKET_STATE_UNCRYPTED, PACKET_STATE_CRYPTED, PACKET_STATE_DEAD }; struct packet_cb { u64 nonce; struct noise_keypair *keypair; atomic_t state; u32 mtu; u8 ds; }; #define PACKET_CB(skb) ((struct packet_cb *)((skb)->cb)) #define PACKET_PEER(skb) (PACKET_CB(skb)->keypair->entry.peer) static inline bool wg_check_packet_protocol(struct sk_buff *skb) { __be16 real_protocol = ip_tunnel_parse_protocol(skb); return real_protocol && skb->protocol == real_protocol; } static inline void wg_reset_packet(struct sk_buff *skb, bool encapsulating) { u8 l4_hash = skb->l4_hash; u8 sw_hash = skb->sw_hash; u32 hash = skb->hash; skb_scrub_packet(skb, true); memset(&skb->headers, 0, sizeof(skb->headers)); if (encapsulating) { skb->l4_hash = l4_hash; skb->sw_hash = sw_hash; skb->hash = hash; } skb->queue_mapping = 0; skb->nohdr = 0; skb->peeked = 0; skb->mac_len = 0; skb->dev = NULL; #ifdef CONFIG_NET_SCHED skb->tc_index = 0; #endif skb_reset_redirect(skb); skb->hdr_len = skb_headroom(skb); skb_reset_mac_header(skb); skb_reset_network_header(skb); skb_reset_transport_header(skb); skb_probe_transport_header(skb); skb_reset_inner_headers(skb); } static inline int wg_cpumask_choose_online(int *stored_cpu, unsigned int id) { unsigned int cpu = *stored_cpu, cpu_index, i; if (unlikely(cpu >= nr_cpu_ids || !cpumask_test_cpu(cpu, cpu_online_mask))) { cpu_index = id % cpumask_weight(cpu_online_mask); cpu = cpumask_first(cpu_online_mask); for (i = 0; i < cpu_index; ++i) cpu = cpumask_next(cpu, cpu_online_mask); *stored_cpu = cpu; } return cpu; } /* This function is racy, in the sense that it's called while last_cpu is * unlocked, so it could return the same CPU twice. Adding locking or using * atomic sequence numbers is slower though, and the consequences of racing are * harmless, so live with it. */ static inline int wg_cpumask_next_online(int *last_cpu) { int cpu = cpumask_next(READ_ONCE(*last_cpu), cpu_online_mask); if (cpu >= nr_cpu_ids) cpu = cpumask_first(cpu_online_mask); WRITE_ONCE(*last_cpu, cpu); return cpu; } void wg_prev_queue_init(struct prev_queue *queue); /* Multi producer */ bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb); /* Single consumer */ struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue); /* Single consumer */ static inline struct sk_buff *wg_prev_queue_peek(struct prev_queue *queue) { if (queue->peeked) return queue->peeked; queue->peeked = wg_prev_queue_dequeue(queue); return queue->peeked; } /* Single consumer */ static inline void wg_prev_queue_drop_peeked(struct prev_queue *queue) { queue->peeked = NULL; } static inline int wg_queue_enqueue_per_device_and_peer( struct crypt_queue *device_queue, struct prev_queue *peer_queue, struct sk_buff *skb, struct workqueue_struct *wq) { int cpu; atomic_set_release(&PACKET_CB(skb)->state, PACKET_STATE_UNCRYPTED); /* We first queue this up for the peer ingestion, but the consumer * will wait for the state to change to CRYPTED or DEAD before. */ if (unlikely(!wg_prev_queue_enqueue(peer_queue, skb))) return -ENOSPC; /* Then we queue it up in the device queue, which consumes the * packet as soon as it can. */ cpu = wg_cpumask_next_online(&device_queue->last_cpu); if (unlikely(ptr_ring_produce_bh(&device_queue->ring, skb))) return -EPIPE; queue_work_on(cpu, wq, &per_cpu_ptr(device_queue->worker, cpu)->work); return 0; } static inline void wg_queue_enqueue_per_peer_tx(struct sk_buff *skb, enum packet_state state) { /* We take a reference, because as soon as we call atomic_set, the * peer can be freed from below us. */ struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb)); atomic_set_release(&PACKET_CB(skb)->state, state); queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu, peer->internal_id), peer->device->packet_crypt_wq, &peer->transmit_packet_work); wg_peer_put(peer); } static inline void wg_queue_enqueue_per_peer_rx(struct sk_buff *skb, enum packet_state state) { /* We take a reference, because as soon as we call atomic_set, the * peer can be freed from below us. */ struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb)); atomic_set_release(&PACKET_CB(skb)->state, state); napi_schedule(&peer->napi); wg_peer_put(peer); } #ifdef DEBUG bool wg_packet_counter_selftest(void); #endif #endif /* _WG_QUEUEING_H */
13 141 394 7 7 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* * Copyright (c) 2005 Voltaire Inc. All rights reserved. * Copyright (c) 2005 Intel Corporation. All rights reserved. */ #ifndef IB_ADDR_H #define IB_ADDR_H #include <linux/ethtool.h> #include <linux/in.h> #include <linux/in6.h> #include <linux/if_arp.h> #include <linux/netdevice.h> #include <linux/inetdevice.h> #include <linux/socket.h> #include <linux/if_vlan.h> #include <net/ipv6.h> #include <net/if_inet6.h> #include <net/ip.h> #include <rdma/ib_verbs.h> #include <rdma/ib_pack.h> #include <net/net_namespace.h> /** * struct rdma_dev_addr - Contains resolved RDMA hardware addresses * @src_dev_addr: Source MAC address. * @dst_dev_addr: Destination MAC address. * @broadcast: Broadcast address of the device. * @dev_type: The interface hardware type of the device. * @bound_dev_if: An optional device interface index. * @transport: The transport type used. * @net: Network namespace containing the bound_dev_if net_dev. * @sgid_attr: GID attribute to use for identified SGID */ struct rdma_dev_addr { unsigned char src_dev_addr[MAX_ADDR_LEN]; unsigned char dst_dev_addr[MAX_ADDR_LEN]; unsigned char broadcast[MAX_ADDR_LEN]; unsigned short dev_type; int bound_dev_if; enum rdma_transport_type transport; struct net *net; const struct ib_gid_attr *sgid_attr; enum rdma_network_type network; int hoplimit; }; /** * rdma_translate_ip - Translate a local IP address to an RDMA hardware * address. * * The dev_addr->net field must be initialized. */ int rdma_translate_ip(const struct sockaddr *addr, struct rdma_dev_addr *dev_addr); /** * rdma_resolve_ip - Resolve source and destination IP addresses to * RDMA hardware addresses. * @src_addr: An optional source address to use in the resolution. If a * source address is not provided, a usable address will be returned via * the callback. * @dst_addr: The destination address to resolve. * @addr: A reference to a data location that will receive the resolved * addresses. The data location must remain valid until the callback has * been invoked. The net field of the addr struct must be valid. * @timeout_ms: Amount of time to wait for the address resolution to complete. * @callback: Call invoked once address resolution has completed, timed out, * or been canceled. A status of 0 indicates success. * @resolve_by_gid_attr: Resolve the ip based on the GID attribute from * rdma_dev_addr. * @context: User-specified context associated with the call. */ int rdma_resolve_ip(struct sockaddr *src_addr, const struct sockaddr *dst_addr, struct rdma_dev_addr *addr, unsigned long timeout_ms, void (*callback)(int status, struct sockaddr *src_addr, struct rdma_dev_addr *addr, void *context), bool resolve_by_gid_attr, void *context); void rdma_addr_cancel(struct rdma_dev_addr *addr); int rdma_addr_size(const struct sockaddr *addr); int rdma_addr_size_in6(struct sockaddr_in6 *addr); int rdma_addr_size_kss(struct __kernel_sockaddr_storage *addr); static inline u16 ib_addr_get_pkey(struct rdma_dev_addr *dev_addr) { return ((u16)dev_addr->broadcast[8] << 8) | (u16)dev_addr->broadcast[9]; } static inline void ib_addr_set_pkey(struct rdma_dev_addr *dev_addr, u16 pkey) { dev_addr->broadcast[8] = pkey >> 8; dev_addr->broadcast[9] = (unsigned char) pkey; } static inline void ib_addr_get_mgid(struct rdma_dev_addr *dev_addr, union ib_gid *gid) { memcpy(gid, dev_addr->broadcast + 4, sizeof *gid); } static inline int rdma_addr_gid_offset(struct rdma_dev_addr *dev_addr) { return dev_addr->dev_type == ARPHRD_INFINIBAND ? 4 : 0; } static inline u16 rdma_vlan_dev_vlan_id(const struct net_device *dev) { return is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 0xffff; } static inline int rdma_ip2gid(struct sockaddr *addr, union ib_gid *gid) { switch (addr->sa_family) { case AF_INET: ipv6_addr_set_v4mapped(((struct sockaddr_in *) addr)->sin_addr.s_addr, (struct in6_addr *)gid); break; case AF_INET6: *(struct in6_addr *)&gid->raw = ((struct sockaddr_in6 *)addr)->sin6_addr; break; default: return -EINVAL; } return 0; } /* Important - sockaddr should be a union of sockaddr_in and sockaddr_in6 */ static inline void rdma_gid2ip(struct sockaddr *out, const union ib_gid *gid) { if (ipv6_addr_v4mapped((struct in6_addr *)gid)) { struct sockaddr_in *out_in = (struct sockaddr_in *)out; memset(out_in, 0, sizeof(*out_in)); out_in->sin_family = AF_INET; memcpy(&out_in->sin_addr.s_addr, gid->raw + 12, 4); } else { struct sockaddr_in6 *out_in = (struct sockaddr_in6 *)out; memset(out_in, 0, sizeof(*out_in)); out_in->sin6_family = AF_INET6; memcpy(&out_in->sin6_addr.s6_addr, gid->raw, 16); } } /* * rdma_get/set_sgid/dgid() APIs are applicable to IB, and iWarp. * They are not applicable to RoCE. * RoCE GIDs are derived from the IP addresses. */ static inline void rdma_addr_get_sgid(struct rdma_dev_addr *dev_addr, union ib_gid *gid) { memcpy(gid, dev_addr->src_dev_addr + rdma_addr_gid_offset(dev_addr), sizeof(*gid)); } static inline void rdma_addr_set_sgid(struct rdma_dev_addr *dev_addr, union ib_gid *gid) { memcpy(dev_addr->src_dev_addr + rdma_addr_gid_offset(dev_addr), gid, sizeof *gid); } static inline void rdma_addr_get_dgid(struct rdma_dev_addr *dev_addr, union ib_gid *gid) { memcpy(gid, dev_addr->dst_dev_addr + rdma_addr_gid_offset(dev_addr), sizeof *gid); } static inline void rdma_addr_set_dgid(struct rdma_dev_addr *dev_addr, union ib_gid *gid) { memcpy(dev_addr->dst_dev_addr + rdma_addr_gid_offset(dev_addr), gid, sizeof *gid); } static inline enum ib_mtu iboe_get_mtu(int mtu) { /* * Reduce IB headers from effective IBoE MTU. */ mtu = mtu - (IB_GRH_BYTES + IB_UDP_BYTES + IB_BTH_BYTES + IB_EXT_XRC_BYTES + IB_EXT_ATOMICETH_BYTES + IB_ICRC_BYTES); if (mtu >= ib_mtu_enum_to_int(IB_MTU_4096)) return IB_MTU_4096; else if (mtu >= ib_mtu_enum_to_int(IB_MTU_2048)) return IB_MTU_2048; else if (mtu >= ib_mtu_enum_to_int(IB_MTU_1024)) return IB_MTU_1024; else if (mtu >= ib_mtu_enum_to_int(IB_MTU_512)) return IB_MTU_512; else if (mtu >= ib_mtu_enum_to_int(IB_MTU_256)) return IB_MTU_256; else return 0; } static inline int rdma_link_local_addr(struct in6_addr *addr) { if (addr->s6_addr32[0] == htonl(0xfe800000) && addr->s6_addr32[1] == 0) return 1; return 0; } static inline void rdma_get_ll_mac(struct in6_addr *addr, u8 *mac) { memcpy(mac, &addr->s6_addr[8], 3); memcpy(mac + 3, &addr->s6_addr[13], 3); mac[0] ^= 2; } static inline int rdma_is_multicast_addr(struct in6_addr *addr) { __be32 ipv4_addr; if (addr->s6_addr[0] == 0xff) return 1; ipv4_addr = addr->s6_addr32[3]; return (ipv6_addr_v4mapped(addr) && ipv4_is_multicast(ipv4_addr)); } static inline void rdma_get_mcast_mac(struct in6_addr *addr, u8 *mac) { int i; mac[0] = 0x33; mac[1] = 0x33; for (i = 2; i < 6; ++i) mac[i] = addr->s6_addr[i + 10]; } static inline u16 rdma_get_vlan_id(union ib_gid *dgid) { u16 vid; vid = dgid->raw[11] << 8 | dgid->raw[12]; return vid < 0x1000 ? vid : 0xffff; } static inline struct net_device *rdma_vlan_dev_real_dev(const struct net_device *dev) { return is_vlan_dev(dev) ? vlan_dev_real_dev(dev) : NULL; } #endif /* IB_ADDR_H */
102 56 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 // SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2006-2007 Silicon Graphics, Inc. * All Rights Reserved. */ #ifndef __XFS_FILESTREAM_H__ #define __XFS_FILESTREAM_H__ struct xfs_mount; struct xfs_inode; struct xfs_bmalloca; struct xfs_alloc_arg; int xfs_filestream_mount(struct xfs_mount *mp); void xfs_filestream_unmount(struct xfs_mount *mp); void xfs_filestream_deassociate(struct xfs_inode *ip); int xfs_filestream_select_ag(struct xfs_bmalloca *ap, struct xfs_alloc_arg *args, xfs_extlen_t *blen); static inline int xfs_inode_is_filestream( struct xfs_inode *ip) { return xfs_has_filestreams(ip->i_mount) || (ip->i_diflags & XFS_DIFLAG_FILESTREAM); } #endif /* __XFS_FILESTREAM_H__ */
78 76 19 77 18 18 18 18 18 18 1 18 25 929 925 929 929 1 4 3 1 4 1 1 1 5 5 5 5 5 1 1 1 1 6 3 3 3 6 7 5 2 2 1 2 2 1 1 5 6 53 1 1 1 1 1 19 11 11 11 5 5 6 6 27 2 2 6 18 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 1995 Linus Torvalds * * Pentium III FXSR, SSE support * Gareth Hughes <gareth@valinux.com>, May 2000 * * X86-64 port * Andi Kleen. * * CPU hotplug support - ashok.raj@intel.com */ /* * This file handles the architecture-dependent parts of process handling.. */ #include <linux/cpu.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/sched/task.h> #include <linux/sched/task_stack.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/elfcore.h> #include <linux/smp.h> #include <linux/slab.h> #include <linux/user.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/export.h> #include <linux/ptrace.h> #include <linux/notifier.h> #include <linux/kprobes.h> #include <linux/kdebug.h> #include <linux/prctl.h> #include <linux/uaccess.h> #include <linux/io.h> #include <linux/ftrace.h> #include <linux/syscalls.h> #include <linux/iommu.h> #include <asm/processor.h> #include <asm/pkru.h> #include <asm/fpu/sched.h> #include <asm/mmu_context.h> #include <asm/prctl.h> #include <asm/desc.h> #include <asm/proto.h> #include <asm/ia32.h> #include <asm/debugreg.h> #include <asm/switch_to.h> #include <asm/xen/hypervisor.h> #include <asm/vdso.h> #include <asm/resctrl.h> #include <asm/unistd.h> #include <asm/fsgsbase.h> #include <asm/fred.h> #include <asm/msr.h> #ifdef CONFIG_IA32_EMULATION /* Not included via unistd.h */ #include <asm/unistd_32_ia32.h> #endif #include "process.h" /* Prints also some state that isn't saved in the pt_regs */ void __show_regs(struct pt_regs *regs, enum show_regs_mode mode, const char *log_lvl) { unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs; unsigned long d0, d1, d2, d3, d6, d7; unsigned int fsindex, gsindex; unsigned int ds, es; show_iret_regs(regs, log_lvl); if (regs->orig_ax != -1) pr_cont(" ORIG_RAX: %016lx\n", regs->orig_ax); else pr_cont("\n"); printk("%sRAX: %016lx RBX: %016lx RCX: %016lx\n", log_lvl, regs->ax, regs->bx, regs->cx); printk("%sRDX: %016lx RSI: %016lx RDI: %016lx\n", log_lvl, regs->dx, regs->si, regs->di); printk("%sRBP: %016lx R08: %016lx R09: %016lx\n", log_lvl, regs->bp, regs->r8, regs->r9); printk("%sR10: %016lx R11: %016lx R12: %016lx\n", log_lvl, regs->r10, regs->r11, regs->r12); printk("%sR13: %016lx R14: %016lx R15: %016lx\n", log_lvl, regs->r13, regs->r14, regs->r15); if (mode == SHOW_REGS_SHORT) return; if (mode == SHOW_REGS_USER) { rdmsrq(MSR_FS_BASE, fs); rdmsrq(MSR_KERNEL_GS_BASE, shadowgs); printk("%sFS: %016lx GS: %016lx\n", log_lvl, fs, shadowgs); return; } asm("movl %%ds,%0" : "=r" (ds)); asm("movl %%es,%0" : "=r" (es)); asm("movl %%fs,%0" : "=r" (fsindex)); asm("movl %%gs,%0" : "=r" (gsindex)); rdmsrq(MSR_FS_BASE, fs); rdmsrq(MSR_GS_BASE, gs); rdmsrq(MSR_KERNEL_GS_BASE, shadowgs); cr0 = read_cr0(); cr2 = read_cr2(); cr3 = __read_cr3(); cr4 = __read_cr4(); printk("%sFS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n", log_lvl, fs, fsindex, gs, gsindex, shadowgs); printk("%sCS: %04x DS: %04x ES: %04x CR0: %016lx\n", log_lvl, regs->cs, ds, es, cr0); printk("%sCR2: %016lx CR3: %016lx CR4: %016lx\n", log_lvl, cr2, cr3, cr4); get_debugreg(d0, 0); get_debugreg(d1, 1); get_debugreg(d2, 2); get_debugreg(d3, 3); get_debugreg(d6, 6); get_debugreg(d7, 7); /* Only print out debug registers if they are in their non-default state. */ if (!((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) && (d6 == DR6_RESERVED) && (d7 == DR7_FIXED_1))) { printk("%sDR0: %016lx DR1: %016lx DR2: %016lx\n", log_lvl, d0, d1, d2); printk("%sDR3: %016lx DR6: %016lx DR7: %016lx\n", log_lvl, d3, d6, d7); } if (cr4 & X86_CR4_PKE) printk("%sPKRU: %08x\n", log_lvl, read_pkru()); } void release_thread(struct task_struct *dead_task) { WARN_ON(dead_task->mm); } enum which_selector { FS, GS }; /* * Out of line to be protected from kprobes and tracing. If this would be * traced or probed than any access to a per CPU variable happens with * the wrong GS. * * It is not used on Xen paravirt. When paravirt support is needed, it * needs to be renamed with native_ prefix. */ static noinstr unsigned long __rdgsbase_inactive(void) { unsigned long gsbase; lockdep_assert_irqs_disabled(); /* * SWAPGS is no longer needed thus NOT allowed with FRED because * FRED transitions ensure that an operating system can _always_ * operate with its own GS base address: * - For events that occur in ring 3, FRED event delivery swaps * the GS base address with the IA32_KERNEL_GS_BASE MSR. * - ERETU (the FRED transition that returns to ring 3) also swaps * the GS base address with the IA32_KERNEL_GS_BASE MSR. * * And the operating system can still setup the GS segment for a * user thread without the need of loading a user thread GS with: * - Using LKGS, available with FRED, to modify other attributes * of the GS segment without compromising its ability always to * operate with its own GS base address. * - Accessing the GS segment base address for a user thread as * before using RDMSR or WRMSR on the IA32_KERNEL_GS_BASE MSR. * * Note, LKGS loads the GS base address into the IA32_KERNEL_GS_BASE * MSR instead of the GS segment’s descriptor cache. As such, the * operating system never changes its runtime GS base address. */ if (!cpu_feature_enabled(X86_FEATURE_FRED) && !cpu_feature_enabled(X86_FEATURE_XENPV)) { native_swapgs(); gsbase = rdgsbase(); native_swapgs(); } else { instrumentation_begin(); rdmsrq(MSR_KERNEL_GS_BASE, gsbase); instrumentation_end(); } return gsbase; } /* * Out of line to be protected from kprobes and tracing. If this would be * traced or probed than any access to a per CPU variable happens with * the wrong GS. * * It is not used on Xen paravirt. When paravirt support is needed, it * needs to be renamed with native_ prefix. */ static noinstr void __wrgsbase_inactive(unsigned long gsbase) { lockdep_assert_irqs_disabled(); if (!cpu_feature_enabled(X86_FEATURE_FRED) && !cpu_feature_enabled(X86_FEATURE_XENPV)) { native_swapgs(); wrgsbase(gsbase); native_swapgs(); } else { instrumentation_begin(); wrmsrq(MSR_KERNEL_GS_BASE, gsbase); instrumentation_end(); } } /* * Saves the FS or GS base for an outgoing thread if FSGSBASE extensions are * not available. The goal is to be reasonably fast on non-FSGSBASE systems. * It's forcibly inlined because it'll generate better code and this function * is hot. */ static __always_inline void save_base_legacy(struct task_struct *prev_p, unsigned short selector, enum which_selector which) { if (likely(selector == 0)) { /* * On Intel (without X86_BUG_NULL_SEG), the segment base could * be the pre-existing saved base or it could be zero. On AMD * (with X86_BUG_NULL_SEG), the segment base could be almost * anything. * * This branch is very hot (it's hit twice on almost every * context switch between 64-bit programs), and avoiding * the RDMSR helps a lot, so we just assume that whatever * value is already saved is correct. This matches historical * Linux behavior, so it won't break existing applications. * * To avoid leaking state, on non-X86_BUG_NULL_SEG CPUs, if we * report that the base is zero, it needs to actually be zero: * see the corresponding logic in load_seg_legacy. */ } else { /* * If the selector is 1, 2, or 3, then the base is zero on * !X86_BUG_NULL_SEG CPUs and could be anything on * X86_BUG_NULL_SEG CPUs. In the latter case, Linux * has never attempted to preserve the base across context * switches. * * If selector > 3, then it refers to a real segment, and * saving the base isn't necessary. */ if (which == FS) prev_p->thread.fsbase = 0; else prev_p->thread.gsbase = 0; } } static __always_inline void save_fsgs(struct task_struct *task) { savesegment(fs, task->thread.fsindex); savesegment(gs, task->thread.gsindex); if (static_cpu_has(X86_FEATURE_FSGSBASE)) { /* * If FSGSBASE is enabled, we can't make any useful guesses * about the base, and user code expects us to save the current * value. Fortunately, reading the base directly is efficient. */ task->thread.fsbase = rdfsbase(); task->thread.gsbase = __rdgsbase_inactive(); } else { save_base_legacy(task, task->thread.fsindex, FS); save_base_legacy(task, task->thread.gsindex, GS); } } /* * While a process is running,current->thread.fsbase and current->thread.gsbase * may not match the corresponding CPU registers (see save_base_legacy()). */ void current_save_fsgs(void) { unsigned long flags; /* Interrupts need to be off for FSGSBASE */ local_irq_save(flags); save_fsgs(current); local_irq_restore(flags); } #if IS_ENABLED(CONFIG_KVM) EXPORT_SYMBOL_GPL(current_save_fsgs); #endif static __always_inline void loadseg(enum which_selector which, unsigned short sel) { if (which == FS) loadsegment(fs, sel); else load_gs_index(sel); } static __always_inline void load_seg_legacy(unsigned short prev_index, unsigned long prev_base, unsigned short next_index, unsigned long next_base, enum which_selector which) { if (likely(next_index <= 3)) { /* * The next task is using 64-bit TLS, is not using this * segment at all, or is having fun with arcane CPU features. */ if (next_base == 0) { /* * Nasty case: on AMD CPUs, we need to forcibly zero * the base. */ if (static_cpu_has_bug(X86_BUG_NULL_SEG)) { loadseg(which, __USER_DS); loadseg(which, next_index); } else { /* * We could try to exhaustively detect cases * under which we can skip the segment load, * but there's really only one case that matters * for performance: if both the previous and * next states are fully zeroed, we can skip * the load. * * (This assumes that prev_base == 0 has no * false positives. This is the case on * Intel-style CPUs.) */ if (likely(prev_index | next_index | prev_base)) loadseg(which, next_index); } } else { if (prev_index != next_index) loadseg(which, next_index); wrmsrq(which == FS ? MSR_FS_BASE : MSR_KERNEL_GS_BASE, next_base); } } else { /* * The next task is using a real segment. Loading the selector * is sufficient. */ loadseg(which, next_index); } } /* * Store prev's PKRU value and load next's PKRU value if they differ. PKRU * is not XSTATE managed on context switch because that would require a * lookup in the task's FPU xsave buffer and require to keep that updated * in various places. */ static __always_inline void x86_pkru_load(struct thread_struct *prev, struct thread_struct *next) { if (!cpu_feature_enabled(X86_FEATURE_OSPKE)) return; /* Stash the prev task's value: */ prev->pkru = rdpkru(); /* * PKRU writes are slightly expensive. Avoid them when not * strictly necessary: */ if (prev->pkru != next->pkru) wrpkru(next->pkru); } static __always_inline void x86_fsgsbase_load(struct thread_struct *prev, struct thread_struct *next) { if (static_cpu_has(X86_FEATURE_FSGSBASE)) { /* Update the FS and GS selectors if they could have changed. */ if (unlikely(prev->fsindex || next->fsindex)) loadseg(FS, next->fsindex); if (unlikely(prev->gsindex || next->gsindex)) loadseg(GS, next->gsindex); /* Update the bases. */ wrfsbase(next->fsbase); __wrgsbase_inactive(next->gsbase); } else { load_seg_legacy(prev->fsindex, prev->fsbase, next->fsindex, next->fsbase, FS); load_seg_legacy(prev->gsindex, prev->gsbase, next->gsindex, next->gsbase, GS); } } unsigned long x86_fsgsbase_read_task(struct task_struct *task, unsigned short selector) { unsigned short idx = selector >> 3; unsigned long base; if (likely((selector & SEGMENT_TI_MASK) == 0)) { if (unlikely(idx >= GDT_ENTRIES)) return 0; /* * There are no user segments in the GDT with nonzero bases * other than the TLS segments. */ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) return 0; idx -= GDT_ENTRY_TLS_MIN; base = get_desc_base(&task->thread.tls_array[idx]); } else { #ifdef CONFIG_MODIFY_LDT_SYSCALL struct ldt_struct *ldt; /* * If performance here mattered, we could protect the LDT * with RCU. This is a slow path, though, so we can just * take the mutex. */ mutex_lock(&task->mm->context.lock); ldt = task->mm->context.ldt; if (unlikely(!ldt || idx >= ldt->nr_entries)) base = 0; else base = get_desc_base(ldt->entries + idx); mutex_unlock(&task->mm->context.lock); #else base = 0; #endif } return base; } unsigned long x86_gsbase_read_cpu_inactive(void) { unsigned long gsbase; if (boot_cpu_has(X86_FEATURE_FSGSBASE)) { unsigned long flags; local_irq_save(flags); gsbase = __rdgsbase_inactive(); local_irq_restore(flags); } else { rdmsrq(MSR_KERNEL_GS_BASE, gsbase); } return gsbase; } void x86_gsbase_write_cpu_inactive(unsigned long gsbase) { if (boot_cpu_has(X86_FEATURE_FSGSBASE)) { unsigned long flags; local_irq_save(flags); __wrgsbase_inactive(gsbase); local_irq_restore(flags); } else { wrmsrq(MSR_KERNEL_GS_BASE, gsbase); } } unsigned long x86_fsbase_read_task(struct task_struct *task) { unsigned long fsbase; if (task == current) fsbase = x86_fsbase_read_cpu(); else if (boot_cpu_has(X86_FEATURE_FSGSBASE) || (task->thread.fsindex == 0)) fsbase = task->thread.fsbase; else fsbase = x86_fsgsbase_read_task(task, task->thread.fsindex); return fsbase; } unsigned long x86_gsbase_read_task(struct task_struct *task) { unsigned long gsbase; if (task == current) gsbase = x86_gsbase_read_cpu_inactive(); else if (boot_cpu_has(X86_FEATURE_FSGSBASE) || (task->thread.gsindex == 0)) gsbase = task->thread.gsbase; else gsbase = x86_fsgsbase_read_task(task, task->thread.gsindex); return gsbase; } void x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase) { WARN_ON_ONCE(task == current); task->thread.fsbase = fsbase; } void x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase) { WARN_ON_ONCE(task == current); task->thread.gsbase = gsbase; } static void start_thread_common(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp, u16 _cs, u16 _ss, u16 _ds) { WARN_ON_ONCE(regs != current_pt_regs()); if (static_cpu_has(X86_BUG_NULL_SEG)) { /* Loading zero below won't clear the base. */ loadsegment(fs, __USER_DS); load_gs_index(__USER_DS); } reset_thread_features(); loadsegment(fs, 0); loadsegment(es, _ds); loadsegment(ds, _ds); load_gs_index(0); regs->ip = new_ip; regs->sp = new_sp; regs->csx = _cs; regs->ssx = _ss; /* * Allow single-step trap and NMI when starting a new task, thus * once the new task enters user space, single-step trap and NMI * are both enabled immediately. * * Entering a new task is logically speaking a return from a * system call (exec, fork, clone, etc.). As such, if ptrace * enables single stepping a single step exception should be * allowed to trigger immediately upon entering user space. * This is not optional. * * NMI should *never* be disabled in user space. As such, this * is an optional, opportunistic way to catch errors. * * Paranoia: High-order 48 bits above the lowest 16 bit SS are * discarded by the legacy IRET instruction on all Intel, AMD, * and Cyrix/Centaur/VIA CPUs, thus can be set unconditionally, * even when FRED is not enabled. But we choose the safer side * to use these bits only when FRED is enabled. */ if (cpu_feature_enabled(X86_FEATURE_FRED)) { regs->fred_ss.swevent = true; regs->fred_ss.nmi = true; } regs->flags = X86_EFLAGS_IF | X86_EFLAGS_FIXED; } void start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) { start_thread_common(regs, new_ip, new_sp, __USER_CS, __USER_DS, 0); } EXPORT_SYMBOL_GPL(start_thread); #ifdef CONFIG_COMPAT void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp, bool x32) { start_thread_common(regs, new_ip, new_sp, x32 ? __USER_CS : __USER32_CS, __USER_DS, __USER_DS); } #endif /* * switch_to(x,y) should switch tasks from x to y. * * This could still be optimized: * - fold all the options into a flag word and test it with a single test. * - could test fs/gs bitsliced * * Kprobes not supported here. Set the probe on schedule instead. * Function graph tracer not supported too. */ __no_kmsan_checks __visible __notrace_funcgraph struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct *next_p) { struct thread_struct *prev = &prev_p->thread; struct thread_struct *next = &next_p->thread; int cpu = smp_processor_id(); WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) && this_cpu_read(hardirq_stack_inuse)); switch_fpu(prev_p, cpu); /* We must save %fs and %gs before load_TLS() because * %fs and %gs may be cleared by load_TLS(). * * (e.g. xen_load_tls()) */ save_fsgs(prev_p); /* * Load TLS before restoring any segments so that segment loads * reference the correct GDT entries. */ load_TLS(next, cpu); /* * Leave lazy mode, flushing any hypercalls made here. This * must be done after loading TLS entries in the GDT but before * loading segments that might reference them. */ arch_end_context_switch(next_p); /* Switch DS and ES. * * Reading them only returns the selectors, but writing them (if * nonzero) loads the full descriptor from the GDT or LDT. The * LDT for next is loaded in switch_mm, and the GDT is loaded * above. * * We therefore need to write new values to the segment * registers on every context switch unless both the new and old * values are zero. * * Note that we don't need to do anything for CS and SS, as * those are saved and restored as part of pt_regs. */ savesegment(es, prev->es); if (unlikely(next->es | prev->es)) loadsegment(es, next->es); savesegment(ds, prev->ds); if (unlikely(next->ds | prev->ds)) loadsegment(ds, next->ds); x86_fsgsbase_load(prev, next); x86_pkru_load(prev, next); /* * Switch the PDA and FPU contexts. */ raw_cpu_write(current_task, next_p); raw_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p)); /* Reload sp0. */ update_task_stack(next_p); switch_to_extra(prev_p, next_p); if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) { /* * AMD CPUs have a misfeature: SYSRET sets the SS selector but * does not update the cached descriptor. As a result, if we * do SYSRET while SS is NULL, we'll end up in user mode with * SS apparently equal to __USER_DS but actually unusable. * * The straightforward workaround would be to fix it up just * before SYSRET, but that would slow down the system call * fast paths. Instead, we ensure that SS is never NULL in * system call context. We do this by replacing NULL SS * selectors at every context switch. SYSCALL sets up a valid * SS, so the only way to get NULL is to re-enter the kernel * from CPL 3 through an interrupt. Since that can't happen * in the same task as a running syscall, we are guaranteed to * context switch between every interrupt vector entry and a * subsequent SYSRET. * * We read SS first because SS reads are much faster than * writes. Out of caution, we force SS to __KERNEL_DS even if * it previously had a different non-NULL value. */ unsigned short ss_sel; savesegment(ss, ss_sel); if (ss_sel != __KERNEL_DS) loadsegment(ss, __KERNEL_DS); } /* Load the Intel cache allocation PQR MSR. */ resctrl_arch_sched_in(next_p); return prev_p; } void set_personality_64bit(void) { /* inherit personality from parent */ /* Make sure to be in 64bit mode */ clear_thread_flag(TIF_ADDR32); /* Pretend that this comes from a 64bit execve */ task_pt_regs(current)->orig_ax = __NR_execve; current_thread_info()->status &= ~TS_COMPAT; if (current->mm) __set_bit(MM_CONTEXT_HAS_VSYSCALL, &current->mm->context.flags); /* TBD: overwrites user setup. Should have two bits. But 64bit processes have always behaved this way, so it's not too bad. The main problem is just that 32bit children are affected again. */ current->personality &= ~READ_IMPLIES_EXEC; } static void __set_personality_x32(void) { #ifdef CONFIG_X86_X32_ABI if (current->mm) current->mm->context.flags = 0; current->personality &= ~READ_IMPLIES_EXEC; /* * in_32bit_syscall() uses the presence of the x32 syscall bit * flag to determine compat status. The x86 mmap() code relies on * the syscall bitness so set x32 syscall bit right here to make * in_32bit_syscall() work during exec(). * * Pretend to come from a x32 execve. */ task_pt_regs(current)->orig_ax = __NR_x32_execve | __X32_SYSCALL_BIT; current_thread_info()->status &= ~TS_COMPAT; #endif } static void __set_personality_ia32(void) { #ifdef CONFIG_IA32_EMULATION if (current->mm) { /* * uprobes applied to this MM need to know this and * cannot use user_64bit_mode() at that time. */ __set_bit(MM_CONTEXT_UPROBE_IA32, &current->mm->context.flags); } current->personality |= force_personality32; /* Prepare the first "return" to user space */ task_pt_regs(current)->orig_ax = __NR_ia32_execve; current_thread_info()->status |= TS_COMPAT; #endif } void set_personality_ia32(bool x32) { /* Make sure to be in 32bit mode */ set_thread_flag(TIF_ADDR32); if (x32) __set_personality_x32(); else __set_personality_ia32(); } EXPORT_SYMBOL_GPL(set_personality_ia32); #ifdef CONFIG_CHECKPOINT_RESTORE static long prctl_map_vdso(const struct vdso_image *image, unsigned long addr) { int ret; ret = map_vdso_once(image, addr); if (ret) return ret; return (long)image->size; } #endif #ifdef CONFIG_ADDRESS_MASKING #define LAM_U57_BITS 6 static void enable_lam_func(void *__mm) { struct mm_struct *mm = __mm; unsigned long lam; if (this_cpu_read(cpu_tlbstate.loaded_mm) == mm) { lam = mm_lam_cr3_mask(mm); write_cr3(__read_cr3() | lam); cpu_tlbstate_update_lam(lam, mm_untag_mask(mm)); } } static void mm_enable_lam(struct mm_struct *mm) { mm->context.lam_cr3_mask = X86_CR3_LAM_U57; mm->context.untag_mask = ~GENMASK(62, 57); /* * Even though the process must still be single-threaded at this * point, kernel threads may be using the mm. IPI those kernel * threads if they exist. */ on_each_cpu_mask(mm_cpumask(mm), enable_lam_func, mm, true); set_bit(MM_CONTEXT_LOCK_LAM, &mm->context.flags); } static int prctl_enable_tagged_addr(struct mm_struct *mm, unsigned long nr_bits) { if (!cpu_feature_enabled(X86_FEATURE_LAM)) return -ENODEV; /* PTRACE_ARCH_PRCTL */ if (current->mm != mm) return -EINVAL; if (mm_valid_pasid(mm) && !test_bit(MM_CONTEXT_FORCE_TAGGED_SVA, &mm->context.flags)) return -EINVAL; if (mmap_write_lock_killable(mm)) return -EINTR; /* * MM_CONTEXT_LOCK_LAM is set on clone. Prevent LAM from * being enabled unless the process is single threaded: */ if (test_bit(MM_CONTEXT_LOCK_LAM, &mm->context.flags)) { mmap_write_unlock(mm); return -EBUSY; } if (!nr_bits || nr_bits > LAM_U57_BITS) { mmap_write_unlock(mm); return -EINVAL; } mm_enable_lam(mm); mmap_write_unlock(mm); return 0; } #endif long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2) { int ret = 0; switch (option) { case ARCH_SET_GS: { if (unlikely(arg2 >= TASK_SIZE_MAX)) return -EPERM; preempt_disable(); /* * ARCH_SET_GS has always overwritten the index * and the base. Zero is the most sensible value * to put in the index, and is the only value that * makes any sense if FSGSBASE is unavailable. */ if (task == current) { loadseg(GS, 0); x86_gsbase_write_cpu_inactive(arg2); /* * On non-FSGSBASE systems, save_base_legacy() expects * that we also fill in thread.gsbase. */ task->thread.gsbase = arg2; } else { task->thread.gsindex = 0; x86_gsbase_write_task(task, arg2); } preempt_enable(); break; } case ARCH_SET_FS: { /* * Not strictly needed for %fs, but do it for symmetry * with %gs */ if (unlikely(arg2 >= TASK_SIZE_MAX)) return -EPERM; preempt_disable(); /* * Set the selector to 0 for the same reason * as %gs above. */ if (task == current) { loadseg(FS, 0); x86_fsbase_write_cpu(arg2); /* * On non-FSGSBASE systems, save_base_legacy() expects * that we also fill in thread.fsbase. */ task->thread.fsbase = arg2; } else { task->thread.fsindex = 0; x86_fsbase_write_task(task, arg2); } preempt_enable(); break; } case ARCH_GET_FS: { unsigned long base = x86_fsbase_read_task(task); ret = put_user(base, (unsigned long __user *)arg2); break; } case ARCH_GET_GS: { unsigned long base = x86_gsbase_read_task(task); ret = put_user(base, (unsigned long __user *)arg2); break; } #ifdef CONFIG_CHECKPOINT_RESTORE # ifdef CONFIG_X86_X32_ABI case ARCH_MAP_VDSO_X32: return prctl_map_vdso(&vdso_image_x32, arg2); # endif # ifdef CONFIG_IA32_EMULATION case ARCH_MAP_VDSO_32: return prctl_map_vdso(&vdso_image_32, arg2); # endif case ARCH_MAP_VDSO_64: return prctl_map_vdso(&vdso_image_64, arg2); #endif #ifdef CONFIG_ADDRESS_MASKING case ARCH_GET_UNTAG_MASK: return put_user(task->mm->context.untag_mask, (unsigned long __user *)arg2); case ARCH_ENABLE_TAGGED_ADDR: return prctl_enable_tagged_addr(task->mm, arg2); case ARCH_FORCE_TAGGED_SVA: if (current != task) return -EINVAL; set_bit(MM_CONTEXT_FORCE_TAGGED_SVA, &task->mm->context.flags); return 0; case ARCH_GET_MAX_TAG_BITS: if (!cpu_feature_enabled(X86_FEATURE_LAM)) return put_user(0, (unsigned long __user *)arg2); else return put_user(LAM_U57_BITS, (unsigned long __user *)arg2); #endif case ARCH_SHSTK_ENABLE: case ARCH_SHSTK_DISABLE: case ARCH_SHSTK_LOCK: case ARCH_SHSTK_UNLOCK: case ARCH_SHSTK_STATUS: return shstk_prctl(task, option, arg2); default: ret = -EINVAL; break; } return ret; }
35 30 25 25 25 8 2 2 32 32 32 3 3 2 33 30 23 23 22 21 1 20 2 21 52 46 45 46 46 13 13 11 11 11 11 11 17 15 15 14 11 16 8 6 5 4 8 31 25 31 29 6 24 17 8 35 35 35 35 32 34 28 29 32 30 2 31 31 29 3 28 28 4 28 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 // SPDX-License-Identifier: GPL-2.0-only /* * Landlock - System call implementations and user space interfaces * * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net> * Copyright © 2018-2020 ANSSI * Copyright © 2021-2025 Microsoft Corporation */ #include <asm/current.h> #include <linux/anon_inodes.h> #include <linux/bitops.h> #include <linux/build_bug.h> #include <linux/capability.h> #include <linux/cleanup.h> #include <linux/compiler_types.h> #include <linux/dcache.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/limits.h> #include <linux/mount.h> #include <linux/path.h> #include <linux/sched.h> #include <linux/security.h> #include <linux/stddef.h> #include <linux/syscalls.h> #include <linux/types.h> #include <linux/uaccess.h> #include <uapi/linux/landlock.h> #include "cred.h" #include "domain.h" #include "fs.h" #include "limits.h" #include "net.h" #include "ruleset.h" #include "setup.h" static bool is_initialized(void) { if (likely(landlock_initialized)) return true; pr_warn_once( "Disabled but requested by user space. " "You should enable Landlock at boot time: " "https://docs.kernel.org/userspace-api/landlock.html#boot-time-configuration\n"); return false; } /** * copy_min_struct_from_user - Safe future-proof argument copying * * Extend copy_struct_from_user() to check for consistent user buffer. * * @dst: Kernel space pointer or NULL. * @ksize: Actual size of the data pointed to by @dst. * @ksize_min: Minimal required size to be copied. * @src: User space pointer or NULL. * @usize: (Alleged) size of the data pointed to by @src. */ static __always_inline int copy_min_struct_from_user(void *const dst, const size_t ksize, const size_t ksize_min, const void __user *const src, const size_t usize) { /* Checks buffer inconsistencies. */ BUILD_BUG_ON(!dst); if (!src) return -EFAULT; /* Checks size ranges. */ BUILD_BUG_ON(ksize <= 0); BUILD_BUG_ON(ksize < ksize_min); if (usize < ksize_min) return -EINVAL; if (usize > PAGE_SIZE) return -E2BIG; /* Copies user buffer and fills with zeros. */ return copy_struct_from_user(dst, ksize, src, usize); } /* * This function only contains arithmetic operations with constants, leading to * BUILD_BUG_ON(). The related code is evaluated and checked at build time, * but it is then ignored thanks to compiler optimizations. */ static void build_check_abi(void) { struct landlock_ruleset_attr ruleset_attr; struct landlock_path_beneath_attr path_beneath_attr; struct landlock_net_port_attr net_port_attr; size_t ruleset_size, path_beneath_size, net_port_size; /* * For each user space ABI structures, first checks that there is no * hole in them, then checks that all architectures have the same * struct size. */ ruleset_size = sizeof(ruleset_attr.handled_access_fs); ruleset_size += sizeof(ruleset_attr.handled_access_net); ruleset_size += sizeof(ruleset_attr.scoped); BUILD_BUG_ON(sizeof(ruleset_attr) != ruleset_size); BUILD_BUG_ON(sizeof(ruleset_attr) != 24); path_beneath_size = sizeof(path_beneath_attr.allowed_access); path_beneath_size += sizeof(path_beneath_attr.parent_fd); BUILD_BUG_ON(sizeof(path_beneath_attr) != path_beneath_size); BUILD_BUG_ON(sizeof(path_beneath_attr) != 12); net_port_size = sizeof(net_port_attr.allowed_access); net_port_size += sizeof(net_port_attr.port); BUILD_BUG_ON(sizeof(net_port_attr) != net_port_size); BUILD_BUG_ON(sizeof(net_port_attr) != 16); } /* Ruleset handling */ static int fop_ruleset_release(struct inode *const inode, struct file *const filp) { struct landlock_ruleset *ruleset = filp->private_data; landlock_put_ruleset(ruleset); return 0; } static ssize_t fop_dummy_read(struct file *const filp, char __user *const buf, const size_t size, loff_t *const ppos) { /* Dummy handler to enable FMODE_CAN_READ. */ return -EINVAL; } static ssize_t fop_dummy_write(struct file *const filp, const char __user *const buf, const size_t size, loff_t *const ppos) { /* Dummy handler to enable FMODE_CAN_WRITE. */ return -EINVAL; } /* * A ruleset file descriptor enables to build a ruleset by adding (i.e. * writing) rule after rule, without relying on the task's context. This * reentrant design is also used in a read way to enforce the ruleset on the * current task. */ static const struct file_operations ruleset_fops = { .release = fop_ruleset_release, .read = fop_dummy_read, .write = fop_dummy_write, }; /* * The Landlock ABI version should be incremented for each new Landlock-related * user space visible change (e.g. Landlock syscalls). This version should * only be incremented once per Linux release, and the date in * Documentation/userspace-api/landlock.rst should be updated to reflect the * UAPI change. */ const int landlock_abi_version = 7; /** * sys_landlock_create_ruleset - Create a new ruleset * * @attr: Pointer to a &struct landlock_ruleset_attr identifying the scope of * the new ruleset. * @size: Size of the pointed &struct landlock_ruleset_attr (needed for * backward and forward compatibility). * @flags: Supported values: * * - %LANDLOCK_CREATE_RULESET_VERSION * - %LANDLOCK_CREATE_RULESET_ERRATA * * This system call enables to create a new Landlock ruleset, and returns the * related file descriptor on success. * * If %LANDLOCK_CREATE_RULESET_VERSION or %LANDLOCK_CREATE_RULESET_ERRATA is * set, then @attr must be NULL and @size must be 0. * * Possible returned errors are: * * - %EOPNOTSUPP: Landlock is supported by the kernel but disabled at boot time; * - %EINVAL: unknown @flags, or unknown access, or unknown scope, or too small @size; * - %E2BIG: @attr or @size inconsistencies; * - %EFAULT: @attr or @size inconsistencies; * - %ENOMSG: empty &landlock_ruleset_attr.handled_access_fs. * * .. kernel-doc:: include/uapi/linux/landlock.h * :identifiers: landlock_create_ruleset_flags */ SYSCALL_DEFINE3(landlock_create_ruleset, const struct landlock_ruleset_attr __user *const, attr, const size_t, size, const __u32, flags) { struct landlock_ruleset_attr ruleset_attr; struct landlock_ruleset *ruleset; int err, ruleset_fd; /* Build-time checks. */ build_check_abi(); if (!is_initialized()) return -EOPNOTSUPP; if (flags) { if (attr || size) return -EINVAL; if (flags == LANDLOCK_CREATE_RULESET_VERSION) return landlock_abi_version; if (flags == LANDLOCK_CREATE_RULESET_ERRATA) return landlock_errata; return -EINVAL; } /* Copies raw user space buffer. */ err = copy_min_struct_from_user(&ruleset_attr, sizeof(ruleset_attr), offsetofend(typeof(ruleset_attr), handled_access_fs), attr, size); if (err) return err; /* Checks content (and 32-bits cast). */ if ((ruleset_attr.handled_access_fs | LANDLOCK_MASK_ACCESS_FS) != LANDLOCK_MASK_ACCESS_FS) return -EINVAL; /* Checks network content (and 32-bits cast). */ if ((ruleset_attr.handled_access_net | LANDLOCK_MASK_ACCESS_NET) != LANDLOCK_MASK_ACCESS_NET) return -EINVAL; /* Checks IPC scoping content (and 32-bits cast). */ if ((ruleset_attr.scoped | LANDLOCK_MASK_SCOPE) != LANDLOCK_MASK_SCOPE) return -EINVAL; /* Checks arguments and transforms to kernel struct. */ ruleset = landlock_create_ruleset(ruleset_attr.handled_access_fs, ruleset_attr.handled_access_net, ruleset_attr.scoped); if (IS_ERR(ruleset)) return PTR_ERR(ruleset); /* Creates anonymous FD referring to the ruleset. */ ruleset_fd = anon_inode_getfd("[landlock-ruleset]", &ruleset_fops, ruleset, O_RDWR | O_CLOEXEC); if (ruleset_fd < 0) landlock_put_ruleset(ruleset); return ruleset_fd; } /* * Returns an owned ruleset from a FD. It is thus needed to call * landlock_put_ruleset() on the return value. */ static struct landlock_ruleset *get_ruleset_from_fd(const int fd, const fmode_t mode) { CLASS(fd, ruleset_f)(fd); struct landlock_ruleset *ruleset; if (fd_empty(ruleset_f)) return ERR_PTR(-EBADF); /* Checks FD type and access right. */ if (fd_file(ruleset_f)->f_op != &ruleset_fops) return ERR_PTR(-EBADFD); if (!(fd_file(ruleset_f)->f_mode & mode)) return ERR_PTR(-EPERM); ruleset = fd_file(ruleset_f)->private_data; if (WARN_ON_ONCE(ruleset->num_layers != 1)) return ERR_PTR(-EINVAL); landlock_get_ruleset(ruleset); return ruleset; } /* Path handling */ /* * @path: Must call put_path(@path) after the call if it succeeded. */ static int get_path_from_fd(const s32 fd, struct path *const path) { CLASS(fd_raw, f)(fd); BUILD_BUG_ON(!__same_type( fd, ((struct landlock_path_beneath_attr *)NULL)->parent_fd)); if (fd_empty(f)) return -EBADF; /* * Forbids ruleset FDs, internal filesystems (e.g. nsfs), including * pseudo filesystems that will never be mountable (e.g. sockfs, * pipefs). */ if ((fd_file(f)->f_op == &ruleset_fops) || (fd_file(f)->f_path.mnt->mnt_flags & MNT_INTERNAL) || (fd_file(f)->f_path.dentry->d_sb->s_flags & SB_NOUSER) || d_is_negative(fd_file(f)->f_path.dentry) || IS_PRIVATE(d_backing_inode(fd_file(f)->f_path.dentry))) return -EBADFD; *path = fd_file(f)->f_path; path_get(path); return 0; } static int add_rule_path_beneath(struct landlock_ruleset *const ruleset, const void __user *const rule_attr) { struct landlock_path_beneath_attr path_beneath_attr; struct path path; int res, err; access_mask_t mask; /* Copies raw user space buffer. */ res = copy_from_user(&path_beneath_attr, rule_attr, sizeof(path_beneath_attr)); if (res) return -EFAULT; /* * Informs about useless rule: empty allowed_access (i.e. deny rules) * are ignored in path walks. */ if (!path_beneath_attr.allowed_access) return -ENOMSG; /* Checks that allowed_access matches the @ruleset constraints. */ mask = ruleset->access_masks[0].fs; if ((path_beneath_attr.allowed_access | mask) != mask) return -EINVAL; /* Gets and checks the new rule. */ err = get_path_from_fd(path_beneath_attr.parent_fd, &path); if (err) return err; /* Imports the new rule. */ err = landlock_append_fs_rule(ruleset, &path, path_beneath_attr.allowed_access); path_put(&path); return err; } static int add_rule_net_port(struct landlock_ruleset *ruleset, const void __user *const rule_attr) { struct landlock_net_port_attr net_port_attr; int res; access_mask_t mask; /* Copies raw user space buffer. */ res = copy_from_user(&net_port_attr, rule_attr, sizeof(net_port_attr)); if (res) return -EFAULT; /* * Informs about useless rule: empty allowed_access (i.e. deny rules) * are ignored by network actions. */ if (!net_port_attr.allowed_access) return -ENOMSG; /* Checks that allowed_access matches the @ruleset constraints. */ mask = landlock_get_net_access_mask(ruleset, 0); if ((net_port_attr.allowed_access | mask) != mask) return -EINVAL; /* Denies inserting a rule with port greater than 65535. */ if (net_port_attr.port > U16_MAX) return -EINVAL; /* Imports the new rule. */ return landlock_append_net_rule(ruleset, net_port_attr.port, net_port_attr.allowed_access); } /** * sys_landlock_add_rule - Add a new rule to a ruleset * * @ruleset_fd: File descriptor tied to the ruleset that should be extended * with the new rule. * @rule_type: Identify the structure type pointed to by @rule_attr: * %LANDLOCK_RULE_PATH_BENEATH or %LANDLOCK_RULE_NET_PORT. * @rule_attr: Pointer to a rule (matching the @rule_type). * @flags: Must be 0. * * This system call enables to define a new rule and add it to an existing * ruleset. * * Possible returned errors are: * * - %EOPNOTSUPP: Landlock is supported by the kernel but disabled at boot time; * - %EAFNOSUPPORT: @rule_type is %LANDLOCK_RULE_NET_PORT but TCP/IP is not * supported by the running kernel; * - %EINVAL: @flags is not 0; * - %EINVAL: The rule accesses are inconsistent (i.e. * &landlock_path_beneath_attr.allowed_access or * &landlock_net_port_attr.allowed_access is not a subset of the ruleset * handled accesses) * - %EINVAL: &landlock_net_port_attr.port is greater than 65535; * - %ENOMSG: Empty accesses (e.g. &landlock_path_beneath_attr.allowed_access is * 0); * - %EBADF: @ruleset_fd is not a file descriptor for the current thread, or a * member of @rule_attr is not a file descriptor as expected; * - %EBADFD: @ruleset_fd is not a ruleset file descriptor, or a member of * @rule_attr is not the expected file descriptor type; * - %EPERM: @ruleset_fd has no write access to the underlying ruleset; * - %EFAULT: @rule_attr was not a valid address. */ SYSCALL_DEFINE4(landlock_add_rule, const int, ruleset_fd, const enum landlock_rule_type, rule_type, const void __user *const, rule_attr, const __u32, flags) { struct landlock_ruleset *ruleset __free(landlock_put_ruleset) = NULL; if (!is_initialized()) return -EOPNOTSUPP; /* No flag for now. */ if (flags) return -EINVAL; /* Gets and checks the ruleset. */ ruleset = get_ruleset_from_fd(ruleset_fd, FMODE_CAN_WRITE); if (IS_ERR(ruleset)) return PTR_ERR(ruleset); switch (rule_type) { case LANDLOCK_RULE_PATH_BENEATH: return add_rule_path_beneath(ruleset, rule_attr); case LANDLOCK_RULE_NET_PORT: return add_rule_net_port(ruleset, rule_attr); default: return -EINVAL; } } /* Enforcement */ /** * sys_landlock_restrict_self - Enforce a ruleset on the calling thread * * @ruleset_fd: File descriptor tied to the ruleset to merge with the target. * @flags: Supported values: * * - %LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF * - %LANDLOCK_RESTRICT_SELF_LOG_NEW_EXEC_ON * - %LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF * * This system call enables to enforce a Landlock ruleset on the current * thread. Enforcing a ruleset requires that the task has %CAP_SYS_ADMIN in its * namespace or is running with no_new_privs. This avoids scenarios where * unprivileged tasks can affect the behavior of privileged children. * * Possible returned errors are: * * - %EOPNOTSUPP: Landlock is supported by the kernel but disabled at boot time; * - %EINVAL: @flags contains an unknown bit. * - %EBADF: @ruleset_fd is not a file descriptor for the current thread; * - %EBADFD: @ruleset_fd is not a ruleset file descriptor; * - %EPERM: @ruleset_fd has no read access to the underlying ruleset, or the * current thread is not running with no_new_privs, or it doesn't have * %CAP_SYS_ADMIN in its namespace. * - %E2BIG: The maximum number of stacked rulesets is reached for the current * thread. * * .. kernel-doc:: include/uapi/linux/landlock.h * :identifiers: landlock_restrict_self_flags */ SYSCALL_DEFINE2(landlock_restrict_self, const int, ruleset_fd, const __u32, flags) { struct landlock_ruleset *new_dom, *ruleset __free(landlock_put_ruleset) = NULL; struct cred *new_cred; struct landlock_cred_security *new_llcred; bool __maybe_unused log_same_exec, log_new_exec, log_subdomains, prev_log_subdomains; if (!is_initialized()) return -EOPNOTSUPP; /* * Similar checks as for seccomp(2), except that an -EPERM may be * returned. */ if (!task_no_new_privs(current) && !ns_capable_noaudit(current_user_ns(), CAP_SYS_ADMIN)) return -EPERM; if ((flags | LANDLOCK_MASK_RESTRICT_SELF) != LANDLOCK_MASK_RESTRICT_SELF) return -EINVAL; /* Translates "off" flag to boolean. */ log_same_exec = !(flags & LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF); /* Translates "on" flag to boolean. */ log_new_exec = !!(flags & LANDLOCK_RESTRICT_SELF_LOG_NEW_EXEC_ON); /* Translates "off" flag to boolean. */ log_subdomains = !(flags & LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF); /* * It is allowed to set LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF with * -1 as ruleset_fd, but no other flag must be set. */ if (!(ruleset_fd == -1 && flags == LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF)) { /* Gets and checks the ruleset. */ ruleset = get_ruleset_from_fd(ruleset_fd, FMODE_CAN_READ); if (IS_ERR(ruleset)) return PTR_ERR(ruleset); } /* Prepares new credentials. */ new_cred = prepare_creds(); if (!new_cred) return -ENOMEM; new_llcred = landlock_cred(new_cred); #ifdef CONFIG_AUDIT prev_log_subdomains = !new_llcred->log_subdomains_off; new_llcred->log_subdomains_off = !prev_log_subdomains || !log_subdomains; #endif /* CONFIG_AUDIT */ /* * The only case when a ruleset may not be set is if * LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF is set and ruleset_fd is -1. * We could optimize this case by not calling commit_creds() if this flag * was already set, but it is not worth the complexity. */ if (!ruleset) return commit_creds(new_cred); /* * There is no possible race condition while copying and manipulating * the current credentials because they are dedicated per thread. */ new_dom = landlock_merge_ruleset(new_llcred->domain, ruleset); if (IS_ERR(new_dom)) { abort_creds(new_cred); return PTR_ERR(new_dom); } #ifdef CONFIG_AUDIT new_dom->hierarchy->log_same_exec = log_same_exec; new_dom->hierarchy->log_new_exec = log_new_exec; if ((!log_same_exec && !log_new_exec) || !prev_log_subdomains) new_dom->hierarchy->log_status = LANDLOCK_LOG_DISABLED; #endif /* CONFIG_AUDIT */ /* Replaces the old (prepared) domain. */ landlock_put_ruleset(new_llcred->domain); new_llcred->domain = new_dom; #ifdef CONFIG_AUDIT new_llcred->domain_exec |= BIT(new_dom->num_layers - 1); #endif /* CONFIG_AUDIT */ return commit_creds(new_cred); }
6 5 5 5 1 1 1 5 4 4 3 2 6 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 // SPDX-License-Identifier: GPL-2.0-only /* * Kernel module to match various things tied to sockets associated with * locally generated outgoing packets. * * (C) 2000 Marc Boucher <marc@mbsi.ca> * * Copyright © CC Computer Consultants GmbH, 2007 - 2008 */ #include <linux/module.h> #include <linux/skbuff.h> #include <linux/file.h> #include <linux/cred.h> #include <net/sock.h> #include <net/inet_sock.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_owner.h> static int owner_check(const struct xt_mtchk_param *par) { struct xt_owner_match_info *info = par->matchinfo; struct net *net = par->net; if (info->match & ~XT_OWNER_MASK) return -EINVAL; /* Only allow the common case where the userns of the writer * matches the userns of the network namespace. */ if ((info->match & (XT_OWNER_UID|XT_OWNER_GID)) && (current_user_ns() != net->user_ns)) return -EINVAL; /* Ensure the uids are valid */ if (info->match & XT_OWNER_UID) { kuid_t uid_min = make_kuid(net->user_ns, info->uid_min); kuid_t uid_max = make_kuid(net->user_ns, info->uid_max); if (!uid_valid(uid_min) || !uid_valid(uid_max) || (info->uid_max < info->uid_min) || uid_lt(uid_max, uid_min)) { return -EINVAL; } } /* Ensure the gids are valid */ if (info->match & XT_OWNER_GID) { kgid_t gid_min = make_kgid(net->user_ns, info->gid_min); kgid_t gid_max = make_kgid(net->user_ns, info->gid_max); if (!gid_valid(gid_min) || !gid_valid(gid_max) || (info->gid_max < info->gid_min) || gid_lt(gid_max, gid_min)) { return -EINVAL; } } return 0; } static bool owner_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_owner_match_info *info = par->matchinfo; const struct file *filp; struct sock *sk = skb_to_full_sk(skb); struct net *net = xt_net(par); if (!sk || !sk->sk_socket || !net_eq(net, sock_net(sk))) return (info->match ^ info->invert) == 0; else if (info->match & info->invert & XT_OWNER_SOCKET) /* * Socket exists but user wanted ! --socket-exists. * (Single ampersands intended.) */ return false; read_lock_bh(&sk->sk_callback_lock); filp = sk->sk_socket ? sk->sk_socket->file : NULL; if (filp == NULL) { read_unlock_bh(&sk->sk_callback_lock); return ((info->match ^ info->invert) & (XT_OWNER_UID | XT_OWNER_GID)) == 0; } if (info->match & XT_OWNER_UID) { kuid_t uid_min = make_kuid(net->user_ns, info->uid_min); kuid_t uid_max = make_kuid(net->user_ns, info->uid_max); if ((uid_gte(filp->f_cred->fsuid, uid_min) && uid_lte(filp->f_cred->fsuid, uid_max)) ^ !(info->invert & XT_OWNER_UID)) { read_unlock_bh(&sk->sk_callback_lock); return false; } } if (info->match & XT_OWNER_GID) { unsigned int i, match = false; kgid_t gid_min = make_kgid(net->user_ns, info->gid_min); kgid_t gid_max = make_kgid(net->user_ns, info->gid_max); struct group_info *gi = filp->f_cred->group_info; if (gid_gte(filp->f_cred->fsgid, gid_min) && gid_lte(filp->f_cred->fsgid, gid_max)) match = true; if (!match && (info->match & XT_OWNER_SUPPL_GROUPS) && gi) { for (i = 0; i < gi->ngroups; ++i) { kgid_t group = gi->gid[i]; if (gid_gte(group, gid_min) && gid_lte(group, gid_max)) { match = true; break; } } } if (match ^ !(info->invert & XT_OWNER_GID)) { read_unlock_bh(&sk->sk_callback_lock); return false; } } read_unlock_bh(&sk->sk_callback_lock); return true; } static struct xt_match owner_mt_reg __read_mostly = { .name = "owner", .revision = 1, .family = NFPROTO_UNSPEC, .checkentry = owner_check, .match = owner_mt, .matchsize = sizeof(struct xt_owner_match_info), .hooks = (1 << NF_INET_LOCAL_OUT) | (1 << NF_INET_POST_ROUTING), .me = THIS_MODULE, }; static int __init owner_mt_init(void) { return xt_register_match(&owner_mt_reg); } static void __exit owner_mt_exit(void) { xt_unregister_match(&owner_mt_reg); } module_init(owner_mt_init); module_exit(owner_mt_exit); MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>"); MODULE_DESCRIPTION("Xtables: socket owner matching"); MODULE_LICENSE("GPL"); MODULE_ALIAS("ipt_owner"); MODULE_ALIAS("ip6t_owner");
156 51 135 135 135 135 135 127 135 51 51 50 258 135 258 258 119 2 1 2 3 1 1 5 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _EYTZINGER_H #define _EYTZINGER_H #include <linux/bitops.h> #include <linux/log2.h> #ifdef EYTZINGER_DEBUG #include <linux/bug.h> #define EYTZINGER_BUG_ON(cond) BUG_ON(cond) #else #define EYTZINGER_BUG_ON(cond) #endif /* * Traversal for trees in eytzinger layout - a full binary tree layed out in an * array. * * Consider using an eytzinger tree any time you would otherwise be doing binary * search over an array. Binary search is a worst case scenario for branch * prediction and prefetching, but in an eytzinger tree every node's children * are adjacent in memory, thus we can prefetch children before knowing the * result of the comparison, assuming multiple nodes fit on a cacheline. * * Two variants are provided, for one based indexing and zero based indexing. * * Zero based indexing is more convenient, but one based indexing has better * alignment and thus better performance because each new level of the tree * starts at a power of two, and thus if element 0 was cacheline aligned, each * new level will be as well. */ static inline unsigned eytzinger1_child(unsigned i, unsigned child) { EYTZINGER_BUG_ON(child > 1); return (i << 1) + child; } static inline unsigned eytzinger1_left_child(unsigned i) { return eytzinger1_child(i, 0); } static inline unsigned eytzinger1_right_child(unsigned i) { return eytzinger1_child(i, 1); } static inline unsigned eytzinger1_first(unsigned size) { return size ? rounddown_pow_of_two(size) : 0; } static inline unsigned eytzinger1_last(unsigned size) { return rounddown_pow_of_two(size + 1) - 1; } static inline unsigned eytzinger1_next(unsigned i, unsigned size) { EYTZINGER_BUG_ON(i == 0 || i > size); if (eytzinger1_right_child(i) <= size) { i = eytzinger1_right_child(i); i <<= __fls(size) - __fls(i); i >>= i > size; } else { i >>= ffz(i) + 1; } return i; } static inline unsigned eytzinger1_prev(unsigned i, unsigned size) { EYTZINGER_BUG_ON(i == 0 || i > size); if (eytzinger1_left_child(i) <= size) { i = eytzinger1_left_child(i) + 1; i <<= __fls(size) - __fls(i); i -= 1; i >>= i > size; } else { i >>= __ffs(i) + 1; } return i; } static inline unsigned eytzinger1_extra(unsigned size) { return size ? (size + 1 - rounddown_pow_of_two(size)) << 1 : 0; } static inline unsigned __eytzinger1_to_inorder(unsigned i, unsigned size, unsigned extra) { unsigned b = __fls(i); unsigned shift = __fls(size) - b; int s; EYTZINGER_BUG_ON(!i || i > size); i ^= 1U << b; i <<= 1; i |= 1; i <<= shift; /* * sign bit trick: * * if (i > extra) * i -= (i - extra) >> 1; */ s = extra - i; i += (s >> 1) & (s >> 31); return i; } static inline unsigned __inorder_to_eytzinger1(unsigned i, unsigned size, unsigned extra) { unsigned shift; int s; EYTZINGER_BUG_ON(!i || i > size); /* * sign bit trick: * * if (i > extra) * i += i - extra; */ s = extra - i; i -= s & (s >> 31); shift = __ffs(i); i >>= shift + 1; i |= 1U << (__fls(size) - shift); return i; } static inline unsigned eytzinger1_to_inorder(unsigned i, unsigned size) { return __eytzinger1_to_inorder(i, size, eytzinger1_extra(size)); } static inline unsigned inorder_to_eytzinger1(unsigned i, unsigned size) { return __inorder_to_eytzinger1(i, size, eytzinger1_extra(size)); } #define eytzinger1_for_each(_i, _size) \ for (unsigned (_i) = eytzinger1_first((_size)); \ (_i) != 0; \ (_i) = eytzinger1_next((_i), (_size))) /* Zero based indexing version: */ static inline unsigned eytzinger0_child(unsigned i, unsigned child) { EYTZINGER_BUG_ON(child > 1); return (i << 1) + 1 + child; } static inline unsigned eytzinger0_left_child(unsigned i) { return eytzinger0_child(i, 0); } static inline unsigned eytzinger0_right_child(unsigned i) { return eytzinger0_child(i, 1); } static inline unsigned eytzinger0_first(unsigned size) { return eytzinger1_first(size) - 1; } static inline unsigned eytzinger0_last(unsigned size) { return eytzinger1_last(size) - 1; } static inline unsigned eytzinger0_next(unsigned i, unsigned size) { return eytzinger1_next(i + 1, size) - 1; } static inline unsigned eytzinger0_prev(unsigned i, unsigned size) { return eytzinger1_prev(i + 1, size) - 1; } static inline unsigned eytzinger0_extra(unsigned size) { return eytzinger1_extra(size); } static inline unsigned __eytzinger0_to_inorder(unsigned i, unsigned size, unsigned extra) { return __eytzinger1_to_inorder(i + 1, size, extra) - 1; } static inline unsigned __inorder_to_eytzinger0(unsigned i, unsigned size, unsigned extra) { return __inorder_to_eytzinger1(i + 1, size, extra) - 1; } static inline unsigned eytzinger0_to_inorder(unsigned i, unsigned size) { return __eytzinger0_to_inorder(i, size, eytzinger0_extra(size)); } static inline unsigned inorder_to_eytzinger0(unsigned i, unsigned size) { return __inorder_to_eytzinger0(i, size, eytzinger0_extra(size)); } #define eytzinger0_for_each(_i, _size) \ for (unsigned (_i) = eytzinger0_first((_size)); \ (_i) != -1; \ (_i) = eytzinger0_next((_i), (_size))) #define eytzinger0_for_each_prev(_i, _size) \ for (unsigned (_i) = eytzinger0_last((_size)); \ (_i) != -1; \ (_i) = eytzinger0_prev((_i), (_size))) /* return greatest node <= @search, or -1 if not found */ static inline int eytzinger0_find_le(void *base, size_t nr, size_t size, cmp_func_t cmp, const void *search) { void *base1 = base - size; unsigned n = 1; while (n <= nr) n = eytzinger1_child(n, cmp(base1 + n * size, search) <= 0); n >>= __ffs(n) + 1; return n - 1; } /* return smallest node > @search, or -1 if not found */ static inline int eytzinger0_find_gt(void *base, size_t nr, size_t size, cmp_func_t cmp, const void *search) { void *base1 = base - size; unsigned n = 1; while (n <= nr) n = eytzinger1_child(n, cmp(base1 + n * size, search) <= 0); n >>= __ffs(n + 1) + 1; return n - 1; } /* return smallest node >= @search, or -1 if not found */ static inline int eytzinger0_find_ge(void *base, size_t nr, size_t size, cmp_func_t cmp, const void *search) { void *base1 = base - size; unsigned n = 1; while (n <= nr) n = eytzinger1_child(n, cmp(base1 + n * size, search) < 0); n >>= __ffs(n + 1) + 1; return n - 1; } #define eytzinger0_find(base, nr, size, _cmp, search) \ ({ \ size_t _size = (size); \ void *_base1 = (void *)(base) - _size; \ const void *_search = (search); \ size_t _nr = (nr); \ size_t _i = 1; \ int _res; \ \ while (_i <= _nr && \ (_res = _cmp(_search, _base1 + _i * _size))) \ _i = eytzinger1_child(_i, _res > 0); \ _i - 1; \ }) void eytzinger0_sort_r(void *, size_t, size_t, cmp_r_func_t, swap_r_func_t, const void *); void eytzinger0_sort(void *, size_t, size_t, cmp_func_t, swap_func_t); #endif /* _EYTZINGER_H */
16 16 16 408 50 409 406 17582 403 406 405 404 408 17202 17200 1187 1193 1185 963 963 957 970 959 59 59 59 59 59 4 4 4 4 4 4 4 16539 15516 15327 5324 16 1 16535 16455 16531 16457 16524 504 16450 404 16372 16442 16440 16437 16454 653 10 151 68 113 43 43 44 7 37 44 44 44 44 153 153 404 404 404 404 403 2 2 2 1 23 23 4 4 4 4 4 4 4 4 2 2 4 2 2 4 56 56 75 74 75 945 75 75 13036 13036 4247 14879 945 883 14222 16970 13126 14943 29 29 14855 410 14860 225 14853 16967 16910 16741 539 403 449 541 16963 16953 10980 10978 16933 16949 16979 16991 16997 16917 5339 4550 4546 146 4446 5182 4414 119 485 482 28 621 600 603 605 584 587 622 478 6 6 6 6 14319 15676 5 14319 20 20 23 15 15 23 23 13145 13129 13145 13144 13137 13037 13108 13047 8 8 46 45 4 4 4 4 4 2 2 2 1 2 46 46 46 46 2 44 44 46 40 46 10 10 10 4 8 8 8 8 6 46 11 11 10 40 2 40 30 23 30 16 11 30 30 40 39 42 45 8 15 8 11 2 9 6 11 11 4 11 11 113 113 12 12 12 12 11 11 11 12 11 11 11 11 11 11 6 11 11 11 11 11 11 6 6 11 11 12 13108 13058 490 491 12785 12768 12779 13117 2233 97 2233 292 2240 282 62 199 198 197 199 197 197 79 79 78 23 79 80 2173 2180 2174 2175 2177 2179 586 586 9 9 582 39 584 581 2040 2176 207 1999 2176 3 3 3 19 19 19 17 17 17 3 3 1 3 17 17 13 13 13 1 17 17 17 17 8 17 17 17 8 8 7 5 5 16822 16823 16848 1984 1985 9623 218 221 1861 1826 62 8 5 63 42 44 42 65 51 51 65 1 1 1 1 1 1 1 1 65 42 64 23 57 9 64 162 164 31 164 156 156 48 155 155 9 4 5 5 5 5 5 5 155 146 48 47 119 118 6 58 156 54 156 854 635 509 848 72 247 252 215 216 216 217 215 8 214 40 45 40 40 40 40 10 5 2 10 10 10 10 16 1 16 10 16 16 15 16 16 16 6 15 16 16 16 8 16 16 16 16 16 12 14 16 16 16 27 27 22 27 1 6 6 6 6 6 6 6 6 716 694 42 69 70 46 46 46 46 45 4 45 46 28 28 7 7 7 7 7 720 161 159 16 19 160 20 20 20 20 20 20 2 20 20 2 2 2 2 2 2 2 94 95 1 1 1 1 1 1 1 1 1 95 7 7 2 7 7 575 527 574 567 574 2008 299 2003 1062 1068 73 73 73 73 331 3 330 3 2 2 1 3 1 12 11961 15 89 89 89 89 78 89 63 89 89 89 89 91 95 95 95 95 91 90 91 91 68 27 27 27 2 91 88 91 90 68 68 65 41 91 68 68 91 41 91 91 38 38 10 38 38 38 38 38 223 224 222 222 61 62 62 20 20 18 62 62 62 62 62 45 44 1 1 62 62 62 62 62 59 62 21 62 62 62 61 62 62 62 6 6 62 6 7 51 51 49 49 49 49 42 49 49 41 50 49 7 2 7 7 7 62 62 1 1 1 1 1 62 18 20 19 19 16 17 4 12 8 8 8 8 8 2 9 7 7 7 7 7 2 2 18 18 20 5 5 5 5 5 5 40 62 62 62 10 62 62 62 25 62 36 37 25 25 37 3 60 45 45 20 16 1 45 13 30 45 41 26 2 44 45 45 45 45 24 43 30 15 10 3 12 30 30 15 45 45 44 26 126 124 1 125 125 21 22 6 6 22 22 16 22 22 22 22 4 2 99 2 98 178 130 176 140 138 140 32 20 32 114 115 115 114 52 53 2 2 2 2 83 78 80 80 78 53 75 75 75 80 1 239 76 236 240 1 1 6 98 97 1 96 98 99 98 98 99 97 98 98 97 58 99 3 71 160 155 71 7 7 6 6 6 7 9 2 9 9 5 4 2 2 2 2 2 2 1 2 2 1415 1414 1417 1420 145 215 131 131 185 93 215 215 1 22 51 33 14 3 22 36 72 71 72 1 1 1 37 37 36 1 1 1 34 35 36 36 24 24 94 32 95 19 93 45 38 23 23 44 23 23 45 22 23 23 23 2 3 38 38 38 37 38 38 38 37 4 38 3 38 38 38 38 5 36 35 132 133 133 10 156 158 483 483 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995 5996 5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046 6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 6057 6058 6059 6060 6061 6062 6063 6064 6065 6066 6067 6068 6069 6070 6071 6072 6073 6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090 6091 6092 6093 6094 6095 6096 6097 6098 6099 6100 6101 6102 6103 6104 6105 6106 6107 6108 6109 6110 6111 6112 6113 6114 6115 6116 6117 6118 6119 6120 6121 6122 6123 6124 6125 6126 6127 6128 6129 6130 6131 6132 6133 6134 6135 6136 6137 6138 6139 6140 6141 6142 6143 6144 6145 6146 6147 6148 6149 6150 6151 6152 6153 6154 6155 6156 6157 6158 6159 6160 6161 6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180 6181 6182 6183 6184 6185 6186 6187 6188 6189 6190 6191 6192 6193 6194 6195 6196 6197 6198 6199 6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227 6228 6229 6230 6231 6232 6233 6234 6235 6236 6237 6238 6239 6240 6241 6242 6243 6244 6245 6246 6247 6248 6249 6250 6251 6252 6253 6254 6255 6256 6257 6258 6259 6260 6261 6262 6263 6264 6265 6266 6267 6268 6269 6270 6271 6272 6273 6274 6275 6276 6277 6278 6279 6280 6281 6282 6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317 6318 6319 6320 6321 6322 6323 6324 6325 6326 6327 6328 6329 6330 6331 6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346 6347 6348 6349 6350 6351 6352 6353 6354 6355 6356 6357 6358 6359 6360 6361 6362 6363 6364 6365 6366 6367 6368 6369 6370 6371 6372 6373 6374 6375 6376 6377 6378 6379 6380 6381 6382 6383 6384 6385 6386 6387 6388 6389 6390 6391 6392 6393 6394 6395 6396 6397 6398 6399 6400 6401 6402 6403 6404 6405 6406 6407 6408 6409 6410 6411 6412 6413 6414 6415 6416 6417 6418 6419 6420 6421 6422 6423 6424 6425 6426 6427 6428 6429 6430 6431 6432 6433 6434 6435 6436 6437 6438 6439 6440 6441 6442 6443 6444 6445 6446 6447 6448 6449 6450 6451 6452 6453 6454 6455 6456 6457 6458 6459 6460 6461 6462 6463 6464 6465 6466 6467 6468 6469 6470 6471 6472 6473 6474 6475 6476 6477 6478 6479 6480 6481 6482 6483 6484 6485 6486 6487 6488 6489 6490 6491 6492 6493 6494 6495 6496 6497 6498 6499 6500 6501 6502 6503 6504 6505 6506 6507 6508 6509 6510 6511 6512 6513 6514 6515 6516 6517 6518 6519 6520 6521 6522 6523 6524 6525 6526 6527 6528 6529 6530 6531 6532 6533 6534 6535 6536 6537 6538 6539 6540 6541 6542 6543 6544 6545 6546 6547 6548 6549 6550 6551 6552 6553 6554 6555 6556 6557 6558 6559 6560 6561 6562 6563 6564 6565 6566 6567 6568 6569 6570 6571 6572 6573 6574 6575 6576 6577 6578 6579 6580 6581 6582 6583 6584 6585 6586 6587 6588 6589 6590 6591 6592 6593 6594 6595 6596 6597 6598 6599 6600 6601 6602 6603 6604 6605 6606 6607 6608 6609 6610 6611 6612 6613 6614 6615 6616 6617 6618 6619 6620 6621 6622 6623 6624 6625 6626 6627 6628 6629 6630 6631 6632 6633 6634 6635 6636 6637 6638 6639 6640 6641 6642 6643 6644 6645 6646 6647 6648 6649 6650 6651 6652 6653 6654 6655 6656 6657 6658 6659 6660 6661 6662 6663 6664 6665 6666 6667 6668 6669 6670 6671 6672 6673 6674 6675 6676 6677 6678 6679 6680 6681 6682 6683 6684 6685 6686 6687 6688 6689 6690 6691 6692 6693 6694 6695 6696 6697 6698 6699 6700 6701 6702 6703 6704 6705 6706 6707 6708 6709 6710 6711 6712 6713 6714 6715 6716 6717 6718 6719 6720 6721 6722 6723 6724 6725 6726 6727 6728 6729 6730 6731 6732 6733 6734 6735 6736 6737 6738 6739 6740 6741 6742 6743 6744 6745 6746 6747 6748 6749 6750 6751 6752 6753 6754 6755 6756 6757 6758 6759 6760 6761 6762 6763 6764 6765 6766 6767 6768 6769 6770 6771 6772 6773 6774 6775 6776 6777 6778 6779 6780 6781 6782 6783 6784 6785 6786 6787 6788 6789 6790 6791 6792 6793 6794 6795 6796 6797 6798 6799 6800 6801 6802 6803 6804 6805 6806 6807 6808 6809 6810 6811 6812 6813 6814 6815 6816 6817 6818 6819 6820 6821 6822 6823 6824 6825 6826 6827 6828 6829 6830 6831 6832 6833 6834 6835 6836 6837 6838 6839 6840 6841 6842 6843 6844 6845 6846 6847 6848 6849 6850 6851 6852 6853 6854 6855 6856 6857 6858 6859 6860 6861 6862 6863 6864 6865 6866 6867 6868 6869 6870 6871 6872 6873 6874 6875 6876 6877 6878 6879 6880 6881 6882 6883 6884 6885 6886 6887 6888 6889 6890 6891 6892 6893 6894 6895 6896 6897 6898 6899 6900 6901 6902 6903 6904 6905 6906 6907 6908 6909 6910 6911 6912 6913 6914 6915 6916 6917 6918 6919 6920 6921 6922 6923 6924 6925 6926 6927 6928 6929 6930 6931 6932 6933 6934 6935 6936 6937 6938 6939 6940 6941 6942 6943 6944 6945 6946 6947 6948 6949 6950 6951 6952 6953 6954 6955 6956 6957 6958 6959 6960 6961 6962 6963 6964 6965 6966 6967 6968 6969 6970 6971 6972 6973 6974 6975 6976 6977 6978 6979 6980 6981 6982 6983 6984 6985 6986 6987 6988 6989 6990 6991 6992 6993 6994 6995 6996 6997 6998 6999 7000 7001 7002 7003 7004 7005 7006 7007 7008 7009 7010 7011 7012 7013 7014 7015 7016 7017 7018 7019 7020 7021 7022 7023 7024 7025 7026 7027 7028 7029 7030 7031 7032 7033 7034 7035 7036 7037 7038 7039 7040 7041 7042 7043 7044 7045 7046 7047 7048 7049 7050 7051 7052 7053 7054 7055 7056 7057 7058 7059 7060 7061 7062 7063 7064 7065 7066 7067 7068 7069 7070 7071 7072 7073 7074 7075 7076 7077 7078 7079 7080 7081 7082 7083 7084 7085 7086 7087 7088 7089 7090 7091 7092 7093 7094 7095 7096 7097 7098 7099 7100 7101 7102 7103 7104 7105 7106 7107 7108 7109 7110 7111 7112 7113 7114 7115 7116 7117 7118 7119 7120 7121 7122 7123 7124 7125 7126 7127 7128 7129 7130 7131 7132 7133 7134 7135 7136 7137 7138 7139 7140 7141 7142 7143 7144 7145 7146 7147 7148 7149 7150 7151 7152 7153 7154 7155 7156 7157 7158 7159 7160 7161 7162 7163 7164 7165 7166 7167 7168 7169 7170 7171 7172 7173 7174 7175 7176 7177 7178 7179 7180 7181 7182 7183 7184 7185 7186 7187 7188 7189 7190 7191 7192 7193 7194 7195 7196 7197 7198 7199 7200 7201 7202 7203 7204 7205 7206 7207 7208 7209 7210 7211 7212 7213 7214 7215 7216 7217 7218 7219 7220 7221 7222 7223 7224 7225 7226 7227 7228 7229 7230 7231 7232 7233 7234 7235 7236 7237 7238 7239 7240 7241 7242 7243 7244 7245 7246 7247 7248 7249 7250 7251 7252 7253 7254 7255 7256 7257 7258 7259 7260 7261 7262 7263 7264 7265 7266 7267 7268 7269 7270 7271 7272 7273 7274 7275 7276 7277 7278 7279 7280 7281 7282 7283 7284 7285 7286 7287 7288 7289 7290 7291 7292 7293 7294 7295 7296 7297 7298 7299 7300 7301 7302 7303 7304 7305 7306 7307 7308 7309 7310 7311 7312 7313 7314 7315 7316 7317 7318 7319 7320 7321 7322 7323 7324 7325 7326 7327 7328 7329 7330 7331 7332 7333 7334 7335 7336 7337 7338 7339 7340 7341 7342 7343 7344 7345 7346 7347 7348 7349 7350 7351 7352 7353 7354 7355 7356 7357 7358 7359 7360 7361 7362 7363 7364 7365 7366 7367 7368 7369 7370 7371 7372 7373 7374 // SPDX-License-Identifier: GPL-2.0-or-later /* * Routines having to do with the 'struct sk_buff' memory handlers. * * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> * Florian La Roche <rzsfl@rz.uni-sb.de> * * Fixes: * Alan Cox : Fixed the worst of the load * balancer bugs. * Dave Platt : Interrupt stacking fix. * Richard Kooijman : Timestamp fixes. * Alan Cox : Changed buffer format. * Alan Cox : destructor hook for AF_UNIX etc. * Linus Torvalds : Better skb_clone. * Alan Cox : Added skb_copy. * Alan Cox : Added all the changed routines Linus * only put in the headers * Ray VanTassle : Fixed --skb->lock in free * Alan Cox : skb_copy copy arp field * Andi Kleen : slabified it. * Robert Olsson : Removed skb_head_pool * * NOTE: * The __skb_ routines should be called with interrupts * disabled, or you better be *real* sure that the operation is atomic * with respect to whatever list is being frobbed (e.g. via lock_sock() * or via disabling bottom half handlers, etc). */ /* * The functions in this file will not compile correctly with gcc 2.4.x */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/in.h> #include <linux/inet.h> #include <linux/slab.h> #include <linux/tcp.h> #include <linux/udp.h> #include <linux/sctp.h> #include <linux/netdevice.h> #ifdef CONFIG_NET_CLS_ACT #include <net/pkt_sched.h> #endif #include <linux/string.h> #include <linux/skbuff.h> #include <linux/skbuff_ref.h> #include <linux/splice.h> #include <linux/cache.h> #include <linux/rtnetlink.h> #include <linux/init.h> #include <linux/scatterlist.h> #include <linux/errqueue.h> #include <linux/prefetch.h> #include <linux/bitfield.h> #include <linux/if_vlan.h> #include <linux/mpls.h> #include <linux/kcov.h> #include <linux/iov_iter.h> #include <linux/crc32.h> #include <net/protocol.h> #include <net/dst.h> #include <net/sock.h> #include <net/checksum.h> #include <net/gro.h> #include <net/gso.h> #include <net/hotdata.h> #include <net/ip6_checksum.h> #include <net/xfrm.h> #include <net/mpls.h> #include <net/mptcp.h> #include <net/mctp.h> #include <net/page_pool/helpers.h> #include <net/dropreason.h> #include <linux/uaccess.h> #include <trace/events/skb.h> #include <linux/highmem.h> #include <linux/capability.h> #include <linux/user_namespace.h> #include <linux/indirect_call_wrapper.h> #include <linux/textsearch.h> #include "dev.h" #include "devmem.h" #include "netmem_priv.h" #include "sock_destructor.h" #ifdef CONFIG_SKB_EXTENSIONS static struct kmem_cache *skbuff_ext_cache __ro_after_init; #endif #define GRO_MAX_HEAD_PAD (GRO_MAX_HEAD + NET_SKB_PAD + NET_IP_ALIGN) #define SKB_SMALL_HEAD_SIZE SKB_HEAD_ALIGN(max(MAX_TCP_HEADER, \ GRO_MAX_HEAD_PAD)) /* We want SKB_SMALL_HEAD_CACHE_SIZE to not be a power of two. * This should ensure that SKB_SMALL_HEAD_HEADROOM is a unique * size, and we can differentiate heads from skb_small_head_cache * vs system slabs by looking at their size (skb_end_offset()). */ #define SKB_SMALL_HEAD_CACHE_SIZE \ (is_power_of_2(SKB_SMALL_HEAD_SIZE) ? \ (SKB_SMALL_HEAD_SIZE + L1_CACHE_BYTES) : \ SKB_SMALL_HEAD_SIZE) #define SKB_SMALL_HEAD_HEADROOM \ SKB_WITH_OVERHEAD(SKB_SMALL_HEAD_CACHE_SIZE) /* kcm_write_msgs() relies on casting paged frags to bio_vec to use * iov_iter_bvec(). These static asserts ensure the cast is valid is long as the * netmem is a page. */ static_assert(offsetof(struct bio_vec, bv_page) == offsetof(skb_frag_t, netmem)); static_assert(sizeof_field(struct bio_vec, bv_page) == sizeof_field(skb_frag_t, netmem)); static_assert(offsetof(struct bio_vec, bv_len) == offsetof(skb_frag_t, len)); static_assert(sizeof_field(struct bio_vec, bv_len) == sizeof_field(skb_frag_t, len)); static_assert(offsetof(struct bio_vec, bv_offset) == offsetof(skb_frag_t, offset)); static_assert(sizeof_field(struct bio_vec, bv_offset) == sizeof_field(skb_frag_t, offset)); #undef FN #define FN(reason) [SKB_DROP_REASON_##reason] = #reason, static const char * const drop_reasons[] = { [SKB_CONSUMED] = "CONSUMED", DEFINE_DROP_REASON(FN, FN) }; static const struct drop_reason_list drop_reasons_core = { .reasons = drop_reasons, .n_reasons = ARRAY_SIZE(drop_reasons), }; const struct drop_reason_list __rcu * drop_reasons_by_subsys[SKB_DROP_REASON_SUBSYS_NUM] = { [SKB_DROP_REASON_SUBSYS_CORE] = RCU_INITIALIZER(&drop_reasons_core), }; EXPORT_SYMBOL(drop_reasons_by_subsys); /** * drop_reasons_register_subsys - register another drop reason subsystem * @subsys: the subsystem to register, must not be the core * @list: the list of drop reasons within the subsystem, must point to * a statically initialized list */ void drop_reasons_register_subsys(enum skb_drop_reason_subsys subsys, const struct drop_reason_list *list) { if (WARN(subsys <= SKB_DROP_REASON_SUBSYS_CORE || subsys >= ARRAY_SIZE(drop_reasons_by_subsys), "invalid subsystem %d\n", subsys)) return; /* must point to statically allocated memory, so INIT is OK */ RCU_INIT_POINTER(drop_reasons_by_subsys[subsys], list); } EXPORT_SYMBOL_GPL(drop_reasons_register_subsys); /** * drop_reasons_unregister_subsys - unregister a drop reason subsystem * @subsys: the subsystem to remove, must not be the core * * Note: This will synchronize_rcu() to ensure no users when it returns. */ void drop_reasons_unregister_subsys(enum skb_drop_reason_subsys subsys) { if (WARN(subsys <= SKB_DROP_REASON_SUBSYS_CORE || subsys >= ARRAY_SIZE(drop_reasons_by_subsys), "invalid subsystem %d\n", subsys)) return; RCU_INIT_POINTER(drop_reasons_by_subsys[subsys], NULL); synchronize_rcu(); } EXPORT_SYMBOL_GPL(drop_reasons_unregister_subsys); /** * skb_panic - private function for out-of-line support * @skb: buffer * @sz: size * @addr: address * @msg: skb_over_panic or skb_under_panic * * Out-of-line support for skb_put() and skb_push(). * Called via the wrapper skb_over_panic() or skb_under_panic(). * Keep out of line to prevent kernel bloat. * __builtin_return_address is not used because it is not always reliable. */ static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr, const char msg[]) { pr_emerg("%s: text:%px len:%d put:%d head:%px data:%px tail:%#lx end:%#lx dev:%s\n", msg, addr, skb->len, sz, skb->head, skb->data, (unsigned long)skb->tail, (unsigned long)skb->end, skb->dev ? skb->dev->name : "<NULL>"); BUG(); } static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) { skb_panic(skb, sz, addr, __func__); } static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) { skb_panic(skb, sz, addr, __func__); } #define NAPI_SKB_CACHE_SIZE 64 #define NAPI_SKB_CACHE_BULK 16 #define NAPI_SKB_CACHE_HALF (NAPI_SKB_CACHE_SIZE / 2) struct napi_alloc_cache { local_lock_t bh_lock; struct page_frag_cache page; unsigned int skb_count; void *skb_cache[NAPI_SKB_CACHE_SIZE]; }; static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache) = { .bh_lock = INIT_LOCAL_LOCK(bh_lock), }; void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) { struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); void *data; fragsz = SKB_DATA_ALIGN(fragsz); local_lock_nested_bh(&napi_alloc_cache.bh_lock); data = __page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC | __GFP_NOWARN, align_mask); local_unlock_nested_bh(&napi_alloc_cache.bh_lock); return data; } EXPORT_SYMBOL(__napi_alloc_frag_align); void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) { void *data; if (in_hardirq() || irqs_disabled()) { struct page_frag_cache *nc = this_cpu_ptr(&netdev_alloc_cache); fragsz = SKB_DATA_ALIGN(fragsz); data = __page_frag_alloc_align(nc, fragsz, GFP_ATOMIC | __GFP_NOWARN, align_mask); } else { local_bh_disable(); data = __napi_alloc_frag_align(fragsz, align_mask); local_bh_enable(); } return data; } EXPORT_SYMBOL(__netdev_alloc_frag_align); static struct sk_buff *napi_skb_cache_get(void) { struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); struct sk_buff *skb; local_lock_nested_bh(&napi_alloc_cache.bh_lock); if (unlikely(!nc->skb_count)) { nc->skb_count = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, GFP_ATOMIC | __GFP_NOWARN, NAPI_SKB_CACHE_BULK, nc->skb_cache); if (unlikely(!nc->skb_count)) { local_unlock_nested_bh(&napi_alloc_cache.bh_lock); return NULL; } } skb = nc->skb_cache[--nc->skb_count]; local_unlock_nested_bh(&napi_alloc_cache.bh_lock); kasan_mempool_unpoison_object(skb, kmem_cache_size(net_hotdata.skbuff_cache)); return skb; } /** * napi_skb_cache_get_bulk - obtain a number of zeroed skb heads from the cache * @skbs: pointer to an at least @n-sized array to fill with skb pointers * @n: number of entries to provide * * Tries to obtain @n &sk_buff entries from the NAPI percpu cache and writes * the pointers into the provided array @skbs. If there are less entries * available, tries to replenish the cache and bulk-allocates the diff from * the MM layer if needed. * The heads are being zeroed with either memset() or %__GFP_ZERO, so they are * ready for {,__}build_skb_around() and don't have any data buffers attached. * Must be called *only* from the BH context. * * Return: number of successfully allocated skbs (@n if no actual allocation * needed or kmem_cache_alloc_bulk() didn't fail). */ u32 napi_skb_cache_get_bulk(void **skbs, u32 n) { struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); u32 bulk, total = n; local_lock_nested_bh(&napi_alloc_cache.bh_lock); if (nc->skb_count >= n) goto get; /* No enough cached skbs. Try refilling the cache first */ bulk = min(NAPI_SKB_CACHE_SIZE - nc->skb_count, NAPI_SKB_CACHE_BULK); nc->skb_count += kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, GFP_ATOMIC | __GFP_NOWARN, bulk, &nc->skb_cache[nc->skb_count]); if (likely(nc->skb_count >= n)) goto get; /* Still not enough. Bulk-allocate the missing part directly, zeroed */ n -= kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, GFP_ATOMIC | __GFP_ZERO | __GFP_NOWARN, n - nc->skb_count, &skbs[nc->skb_count]); if (likely(nc->skb_count >= n)) goto get; /* kmem_cache didn't allocate the number we need, limit the output */ total -= n - nc->skb_count; n = nc->skb_count; get: for (u32 base = nc->skb_count - n, i = 0; i < n; i++) { u32 cache_size = kmem_cache_size(net_hotdata.skbuff_cache); skbs[i] = nc->skb_cache[base + i]; kasan_mempool_unpoison_object(skbs[i], cache_size); memset(skbs[i], 0, offsetof(struct sk_buff, tail)); } nc->skb_count -= n; local_unlock_nested_bh(&napi_alloc_cache.bh_lock); return total; } EXPORT_SYMBOL_GPL(napi_skb_cache_get_bulk); static inline void __finalize_skb_around(struct sk_buff *skb, void *data, unsigned int size) { struct skb_shared_info *shinfo; size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); /* Assumes caller memset cleared SKB */ skb->truesize = SKB_TRUESIZE(size); refcount_set(&skb->users, 1); skb->head = data; skb->data = data; skb_reset_tail_pointer(skb); skb_set_end_offset(skb, size); skb->mac_header = (typeof(skb->mac_header))~0U; skb->transport_header = (typeof(skb->transport_header))~0U; skb->alloc_cpu = raw_smp_processor_id(); /* make sure we initialize shinfo sequentially */ shinfo = skb_shinfo(skb); memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); atomic_set(&shinfo->dataref, 1); skb_set_kcov_handle(skb, kcov_common_handle()); } static inline void *__slab_build_skb(struct sk_buff *skb, void *data, unsigned int *size) { void *resized; /* Must find the allocation size (and grow it to match). */ *size = ksize(data); /* krealloc() will immediately return "data" when * "ksize(data)" is requested: it is the existing upper * bounds. As a result, GFP_ATOMIC will be ignored. Note * that this "new" pointer needs to be passed back to the * caller for use so the __alloc_size hinting will be * tracked correctly. */ resized = krealloc(data, *size, GFP_ATOMIC); WARN_ON_ONCE(resized != data); return resized; } /* build_skb() variant which can operate on slab buffers. * Note that this should be used sparingly as slab buffers * cannot be combined efficiently by GRO! */ struct sk_buff *slab_build_skb(void *data) { struct sk_buff *skb; unsigned int size; skb = kmem_cache_alloc(net_hotdata.skbuff_cache, GFP_ATOMIC | __GFP_NOWARN); if (unlikely(!skb)) return NULL; memset(skb, 0, offsetof(struct sk_buff, tail)); data = __slab_build_skb(skb, data, &size); __finalize_skb_around(skb, data, size); return skb; } EXPORT_SYMBOL(slab_build_skb); /* Caller must provide SKB that is memset cleared */ static void __build_skb_around(struct sk_buff *skb, void *data, unsigned int frag_size) { unsigned int size = frag_size; /* frag_size == 0 is considered deprecated now. Callers * using slab buffer should use slab_build_skb() instead. */ if (WARN_ONCE(size == 0, "Use slab_build_skb() instead")) data = __slab_build_skb(skb, data, &size); __finalize_skb_around(skb, data, size); } /** * __build_skb - build a network buffer * @data: data buffer provided by caller * @frag_size: size of data (must not be 0) * * Allocate a new &sk_buff. Caller provides space holding head and * skb_shared_info. @data must have been allocated from the page * allocator or vmalloc(). (A @frag_size of 0 to indicate a kmalloc() * allocation is deprecated, and callers should use slab_build_skb() * instead.) * The return is the new skb buffer. * On a failure the return is %NULL, and @data is not freed. * Notes : * Before IO, driver allocates only data buffer where NIC put incoming frame * Driver should add room at head (NET_SKB_PAD) and * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info)) * After IO, driver calls build_skb(), to allocate sk_buff and populate it * before giving packet to stack. * RX rings only contains data buffers, not full skbs. */ struct sk_buff *__build_skb(void *data, unsigned int frag_size) { struct sk_buff *skb; skb = kmem_cache_alloc(net_hotdata.skbuff_cache, GFP_ATOMIC | __GFP_NOWARN); if (unlikely(!skb)) return NULL; memset(skb, 0, offsetof(struct sk_buff, tail)); __build_skb_around(skb, data, frag_size); return skb; } /* build_skb() is wrapper over __build_skb(), that specifically * takes care of skb->head and skb->pfmemalloc */ struct sk_buff *build_skb(void *data, unsigned int frag_size) { struct sk_buff *skb = __build_skb(data, frag_size); if (likely(skb && frag_size)) { skb->head_frag = 1; skb_propagate_pfmemalloc(virt_to_head_page(data), skb); } return skb; } EXPORT_SYMBOL(build_skb); /** * build_skb_around - build a network buffer around provided skb * @skb: sk_buff provide by caller, must be memset cleared * @data: data buffer provided by caller * @frag_size: size of data */ struct sk_buff *build_skb_around(struct sk_buff *skb, void *data, unsigned int frag_size) { if (unlikely(!skb)) return NULL; __build_skb_around(skb, data, frag_size); if (frag_size) { skb->head_frag = 1; skb_propagate_pfmemalloc(virt_to_head_page(data), skb); } return skb; } EXPORT_SYMBOL(build_skb_around); /** * __napi_build_skb - build a network buffer * @data: data buffer provided by caller * @frag_size: size of data * * Version of __build_skb() that uses NAPI percpu caches to obtain * skbuff_head instead of inplace allocation. * * Returns a new &sk_buff on success, %NULL on allocation failure. */ static struct sk_buff *__napi_build_skb(void *data, unsigned int frag_size) { struct sk_buff *skb; skb = napi_skb_cache_get(); if (unlikely(!skb)) return NULL; memset(skb, 0, offsetof(struct sk_buff, tail)); __build_skb_around(skb, data, frag_size); return skb; } /** * napi_build_skb - build a network buffer * @data: data buffer provided by caller * @frag_size: size of data * * Version of __napi_build_skb() that takes care of skb->head_frag * and skb->pfmemalloc when the data is a page or page fragment. * * Returns a new &sk_buff on success, %NULL on allocation failure. */ struct sk_buff *napi_build_skb(void *data, unsigned int frag_size) { struct sk_buff *skb = __napi_build_skb(data, frag_size); if (likely(skb) && frag_size) { skb->head_frag = 1; skb_propagate_pfmemalloc(virt_to_head_page(data), skb); } return skb; } EXPORT_SYMBOL(napi_build_skb); /* * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells * the caller if emergency pfmemalloc reserves are being used. If it is and * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves * may be used. Otherwise, the packet data may be discarded until enough * memory is free */ static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node, bool *pfmemalloc) { bool ret_pfmemalloc = false; size_t obj_size; void *obj; obj_size = SKB_HEAD_ALIGN(*size); if (obj_size <= SKB_SMALL_HEAD_CACHE_SIZE && !(flags & KMALLOC_NOT_NORMAL_BITS)) { obj = kmem_cache_alloc_node(net_hotdata.skb_small_head_cache, flags | __GFP_NOMEMALLOC | __GFP_NOWARN, node); *size = SKB_SMALL_HEAD_CACHE_SIZE; if (obj || !(gfp_pfmemalloc_allowed(flags))) goto out; /* Try again but now we are using pfmemalloc reserves */ ret_pfmemalloc = true; obj = kmem_cache_alloc_node(net_hotdata.skb_small_head_cache, flags, node); goto out; } obj_size = kmalloc_size_roundup(obj_size); /* The following cast might truncate high-order bits of obj_size, this * is harmless because kmalloc(obj_size >= 2^32) will fail anyway. */ *size = (unsigned int)obj_size; /* * Try a regular allocation, when that fails and we're not entitled * to the reserves, fail. */ obj = kmalloc_node_track_caller(obj_size, flags | __GFP_NOMEMALLOC | __GFP_NOWARN, node); if (obj || !(gfp_pfmemalloc_allowed(flags))) goto out; /* Try again but now we are using pfmemalloc reserves */ ret_pfmemalloc = true; obj = kmalloc_node_track_caller(obj_size, flags, node); out: if (pfmemalloc) *pfmemalloc = ret_pfmemalloc; return obj; } /* Allocate a new skbuff. We do this ourselves so we can fill in a few * 'private' fields and also do memory statistics to find all the * [BEEP] leaks. * */ /** * __alloc_skb - allocate a network buffer * @size: size to allocate * @gfp_mask: allocation mask * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache * instead of head cache and allocate a cloned (child) skb. * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for * allocations in case the data is required for writeback * @node: numa node to allocate memory on * * Allocate a new &sk_buff. The returned buffer has no headroom and a * tail room of at least size bytes. The object has a reference count * of one. The return is the buffer. On a failure the return is %NULL. * * Buffers may only be allocated from interrupts using a @gfp_mask of * %GFP_ATOMIC. */ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, int flags, int node) { struct kmem_cache *cache; struct sk_buff *skb; bool pfmemalloc; u8 *data; cache = (flags & SKB_ALLOC_FCLONE) ? net_hotdata.skbuff_fclone_cache : net_hotdata.skbuff_cache; if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX)) gfp_mask |= __GFP_MEMALLOC; /* Get the HEAD */ if ((flags & (SKB_ALLOC_FCLONE | SKB_ALLOC_NAPI)) == SKB_ALLOC_NAPI && likely(node == NUMA_NO_NODE || node == numa_mem_id())) skb = napi_skb_cache_get(); else skb = kmem_cache_alloc_node(cache, gfp_mask & ~GFP_DMA, node); if (unlikely(!skb)) return NULL; prefetchw(skb); /* We do our best to align skb_shared_info on a separate cache * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives * aligned memory blocks, unless SLUB/SLAB debug is enabled. * Both skb->head and skb_shared_info are cache line aligned. */ data = kmalloc_reserve(&size, gfp_mask, node, &pfmemalloc); if (unlikely(!data)) goto nodata; /* kmalloc_size_roundup() might give us more room than requested. * Put skb_shared_info exactly at the end of allocated zone, * to allow max possible filling before reallocation. */ prefetchw(data + SKB_WITH_OVERHEAD(size)); /* * Only clear those fields we need to clear, not those that we will * actually initialise below. Hence, don't put any more fields after * the tail pointer in struct sk_buff! */ memset(skb, 0, offsetof(struct sk_buff, tail)); __build_skb_around(skb, data, size); skb->pfmemalloc = pfmemalloc; if (flags & SKB_ALLOC_FCLONE) { struct sk_buff_fclones *fclones; fclones = container_of(skb, struct sk_buff_fclones, skb1); skb->fclone = SKB_FCLONE_ORIG; refcount_set(&fclones->fclone_ref, 1); } return skb; nodata: kmem_cache_free(cache, skb); return NULL; } EXPORT_SYMBOL(__alloc_skb); /** * __netdev_alloc_skb - allocate an skbuff for rx on a specific device * @dev: network device to receive on * @len: length to allocate * @gfp_mask: get_free_pages mask, passed to alloc_skb * * Allocate a new &sk_buff and assign it a usage count of one. The * buffer has NET_SKB_PAD headroom built in. Users should allocate * the headroom they think they need without accounting for the * built in space. The built in space is used for optimisations. * * %NULL is returned if there is no free memory. */ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len, gfp_t gfp_mask) { struct page_frag_cache *nc; struct sk_buff *skb; bool pfmemalloc; void *data; len += NET_SKB_PAD; /* If requested length is either too small or too big, * we use kmalloc() for skb->head allocation. */ if (len <= SKB_WITH_OVERHEAD(SKB_SMALL_HEAD_CACHE_SIZE) || len > SKB_WITH_OVERHEAD(PAGE_SIZE) || (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); if (!skb) goto skb_fail; goto skb_success; } len = SKB_HEAD_ALIGN(len); if (sk_memalloc_socks()) gfp_mask |= __GFP_MEMALLOC; if (in_hardirq() || irqs_disabled()) { nc = this_cpu_ptr(&netdev_alloc_cache); data = page_frag_alloc(nc, len, gfp_mask); pfmemalloc = page_frag_cache_is_pfmemalloc(nc); } else { local_bh_disable(); local_lock_nested_bh(&napi_alloc_cache.bh_lock); nc = this_cpu_ptr(&napi_alloc_cache.page); data = page_frag_alloc(nc, len, gfp_mask); pfmemalloc = page_frag_cache_is_pfmemalloc(nc); local_unlock_nested_bh(&napi_alloc_cache.bh_lock); local_bh_enable(); } if (unlikely(!data)) return NULL; skb = __build_skb(data, len); if (unlikely(!skb)) { skb_free_frag(data); return NULL; } if (pfmemalloc) skb->pfmemalloc = 1; skb->head_frag = 1; skb_success: skb_reserve(skb, NET_SKB_PAD); skb->dev = dev; skb_fail: return skb; } EXPORT_SYMBOL(__netdev_alloc_skb); /** * napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance * @napi: napi instance this buffer was allocated for * @len: length to allocate * * Allocate a new sk_buff for use in NAPI receive. This buffer will * attempt to allocate the head from a special reserved region used * only for NAPI Rx allocation. By doing this we can save several * CPU cycles by avoiding having to disable and re-enable IRQs. * * %NULL is returned if there is no free memory. */ struct sk_buff *napi_alloc_skb(struct napi_struct *napi, unsigned int len) { gfp_t gfp_mask = GFP_ATOMIC | __GFP_NOWARN; struct napi_alloc_cache *nc; struct sk_buff *skb; bool pfmemalloc; void *data; DEBUG_NET_WARN_ON_ONCE(!in_softirq()); len += NET_SKB_PAD + NET_IP_ALIGN; /* If requested length is either too small or too big, * we use kmalloc() for skb->head allocation. */ if (len <= SKB_WITH_OVERHEAD(SKB_SMALL_HEAD_CACHE_SIZE) || len > SKB_WITH_OVERHEAD(PAGE_SIZE) || (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX | SKB_ALLOC_NAPI, NUMA_NO_NODE); if (!skb) goto skb_fail; goto skb_success; } len = SKB_HEAD_ALIGN(len); if (sk_memalloc_socks()) gfp_mask |= __GFP_MEMALLOC; local_lock_nested_bh(&napi_alloc_cache.bh_lock); nc = this_cpu_ptr(&napi_alloc_cache); data = page_frag_alloc(&nc->page, len, gfp_mask); pfmemalloc = page_frag_cache_is_pfmemalloc(&nc->page); local_unlock_nested_bh(&napi_alloc_cache.bh_lock); if (unlikely(!data)) return NULL; skb = __napi_build_skb(data, len); if (unlikely(!skb)) { skb_free_frag(data); return NULL; } if (pfmemalloc) skb->pfmemalloc = 1; skb->head_frag = 1; skb_success: skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); skb->dev = napi->dev; skb_fail: return skb; } EXPORT_SYMBOL(napi_alloc_skb); void skb_add_rx_frag_netmem(struct sk_buff *skb, int i, netmem_ref netmem, int off, int size, unsigned int truesize) { DEBUG_NET_WARN_ON_ONCE(size > truesize); skb_fill_netmem_desc(skb, i, netmem, off, size); skb->len += size; skb->data_len += size; skb->truesize += truesize; } EXPORT_SYMBOL(skb_add_rx_frag_netmem); void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, unsigned int truesize) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; DEBUG_NET_WARN_ON_ONCE(size > truesize); skb_frag_size_add(frag, size); skb->len += size; skb->data_len += size; skb->truesize += truesize; } EXPORT_SYMBOL(skb_coalesce_rx_frag); static void skb_drop_list(struct sk_buff **listp) { kfree_skb_list(*listp); *listp = NULL; } static inline void skb_drop_fraglist(struct sk_buff *skb) { skb_drop_list(&skb_shinfo(skb)->frag_list); } static void skb_clone_fraglist(struct sk_buff *skb) { struct sk_buff *list; skb_walk_frags(skb, list) skb_get(list); } int skb_pp_cow_data(struct page_pool *pool, struct sk_buff **pskb, unsigned int headroom) { #if IS_ENABLED(CONFIG_PAGE_POOL) u32 size, truesize, len, max_head_size, off; struct sk_buff *skb = *pskb, *nskb; int err, i, head_off; void *data; /* XDP does not support fraglist so we need to linearize * the skb. */ if (skb_has_frag_list(skb)) return -EOPNOTSUPP; max_head_size = SKB_WITH_OVERHEAD(PAGE_SIZE - headroom); if (skb->len > max_head_size + MAX_SKB_FRAGS * PAGE_SIZE) return -ENOMEM; size = min_t(u32, skb->len, max_head_size); truesize = SKB_HEAD_ALIGN(size) + headroom; data = page_pool_dev_alloc_va(pool, &truesize); if (!data) return -ENOMEM; nskb = napi_build_skb(data, truesize); if (!nskb) { page_pool_free_va(pool, data, true); return -ENOMEM; } skb_reserve(nskb, headroom); skb_copy_header(nskb, skb); skb_mark_for_recycle(nskb); err = skb_copy_bits(skb, 0, nskb->data, size); if (err) { consume_skb(nskb); return err; } skb_put(nskb, size); head_off = skb_headroom(nskb) - skb_headroom(skb); skb_headers_offset_update(nskb, head_off); off = size; len = skb->len - off; for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) { struct page *page; u32 page_off; size = min_t(u32, len, PAGE_SIZE); truesize = size; page = page_pool_dev_alloc(pool, &page_off, &truesize); if (!page) { consume_skb(nskb); return -ENOMEM; } skb_add_rx_frag(nskb, i, page, page_off, size, truesize); err = skb_copy_bits(skb, off, page_address(page) + page_off, size); if (err) { consume_skb(nskb); return err; } len -= size; off += size; } consume_skb(skb); *pskb = nskb; return 0; #else return -EOPNOTSUPP; #endif } EXPORT_SYMBOL(skb_pp_cow_data); int skb_cow_data_for_xdp(struct page_pool *pool, struct sk_buff **pskb, const struct bpf_prog *prog) { if (!prog->aux->xdp_has_frags) return -EINVAL; return skb_pp_cow_data(pool, pskb, XDP_PACKET_HEADROOM); } EXPORT_SYMBOL(skb_cow_data_for_xdp); #if IS_ENABLED(CONFIG_PAGE_POOL) bool napi_pp_put_page(netmem_ref netmem) { netmem = netmem_compound_head(netmem); if (unlikely(!netmem_is_pp(netmem))) return false; page_pool_put_full_netmem(netmem_get_pp(netmem), netmem, false); return true; } EXPORT_SYMBOL(napi_pp_put_page); #endif static bool skb_pp_recycle(struct sk_buff *skb, void *data) { if (!IS_ENABLED(CONFIG_PAGE_POOL) || !skb->pp_recycle) return false; return napi_pp_put_page(page_to_netmem(virt_to_page(data))); } /** * skb_pp_frag_ref() - Increase fragment references of a page pool aware skb * @skb: page pool aware skb * * Increase the fragment reference count (pp_ref_count) of a skb. This is * intended to gain fragment references only for page pool aware skbs, * i.e. when skb->pp_recycle is true, and not for fragments in a * non-pp-recycling skb. It has a fallback to increase references on normal * pages, as page pool aware skbs may also have normal page fragments. */ static int skb_pp_frag_ref(struct sk_buff *skb) { struct skb_shared_info *shinfo; netmem_ref head_netmem; int i; if (!skb->pp_recycle) return -EINVAL; shinfo = skb_shinfo(skb); for (i = 0; i < shinfo->nr_frags; i++) { head_netmem = netmem_compound_head(shinfo->frags[i].netmem); if (likely(netmem_is_pp(head_netmem))) page_pool_ref_netmem(head_netmem); else page_ref_inc(netmem_to_page(head_netmem)); } return 0; } static void skb_kfree_head(void *head, unsigned int end_offset) { if (end_offset == SKB_SMALL_HEAD_HEADROOM) kmem_cache_free(net_hotdata.skb_small_head_cache, head); else kfree(head); } static void skb_free_head(struct sk_buff *skb) { unsigned char *head = skb->head; if (skb->head_frag) { if (skb_pp_recycle(skb, head)) return; skb_free_frag(head); } else { skb_kfree_head(head, skb_end_offset(skb)); } } static void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason) { struct skb_shared_info *shinfo = skb_shinfo(skb); int i; if (!skb_data_unref(skb, shinfo)) goto exit; if (skb_zcopy(skb)) { bool skip_unref = shinfo->flags & SKBFL_MANAGED_FRAG_REFS; skb_zcopy_clear(skb, true); if (skip_unref) goto free_head; } for (i = 0; i < shinfo->nr_frags; i++) __skb_frag_unref(&shinfo->frags[i], skb->pp_recycle); free_head: if (shinfo->frag_list) kfree_skb_list_reason(shinfo->frag_list, reason); skb_free_head(skb); exit: /* When we clone an SKB we copy the reycling bit. The pp_recycle * bit is only set on the head though, so in order to avoid races * while trying to recycle fragments on __skb_frag_unref() we need * to make one SKB responsible for triggering the recycle path. * So disable the recycling bit if an SKB is cloned and we have * additional references to the fragmented part of the SKB. * Eventually the last SKB will have the recycling bit set and it's * dataref set to 0, which will trigger the recycling */ skb->pp_recycle = 0; } /* * Free an skbuff by memory without cleaning the state. */ static void kfree_skbmem(struct sk_buff *skb) { struct sk_buff_fclones *fclones; switch (skb->fclone) { case SKB_FCLONE_UNAVAILABLE: kmem_cache_free(net_hotdata.skbuff_cache, skb); return; case SKB_FCLONE_ORIG: fclones = container_of(skb, struct sk_buff_fclones, skb1); /* We usually free the clone (TX completion) before original skb * This test would have no chance to be true for the clone, * while here, branch prediction will be good. */ if (refcount_read(&fclones->fclone_ref) == 1) goto fastpath; break; default: /* SKB_FCLONE_CLONE */ fclones = container_of(skb, struct sk_buff_fclones, skb2); break; } if (!refcount_dec_and_test(&fclones->fclone_ref)) return; fastpath: kmem_cache_free(net_hotdata.skbuff_fclone_cache, fclones); } void skb_release_head_state(struct sk_buff *skb) { skb_dst_drop(skb); if (skb->destructor) { DEBUG_NET_WARN_ON_ONCE(in_hardirq()); skb->destructor(skb); } #if IS_ENABLED(CONFIG_NF_CONNTRACK) nf_conntrack_put(skb_nfct(skb)); #endif skb_ext_put(skb); } /* Free everything but the sk_buff shell. */ static void skb_release_all(struct sk_buff *skb, enum skb_drop_reason reason) { skb_release_head_state(skb); if (likely(skb->head)) skb_release_data(skb, reason); } /** * __kfree_skb - private function * @skb: buffer * * Free an sk_buff. Release anything attached to the buffer. * Clean the state. This is an internal helper function. Users should * always call kfree_skb */ void __kfree_skb(struct sk_buff *skb) { skb_release_all(skb, SKB_DROP_REASON_NOT_SPECIFIED); kfree_skbmem(skb); } EXPORT_SYMBOL(__kfree_skb); static __always_inline bool __sk_skb_reason_drop(struct sock *sk, struct sk_buff *skb, enum skb_drop_reason reason) { if (unlikely(!skb_unref(skb))) return false; DEBUG_NET_WARN_ON_ONCE(reason == SKB_NOT_DROPPED_YET || u32_get_bits(reason, SKB_DROP_REASON_SUBSYS_MASK) >= SKB_DROP_REASON_SUBSYS_NUM); if (reason == SKB_CONSUMED) trace_consume_skb(skb, __builtin_return_address(0)); else trace_kfree_skb(skb, __builtin_return_address(0), reason, sk); return true; } /** * sk_skb_reason_drop - free an sk_buff with special reason * @sk: the socket to receive @skb, or NULL if not applicable * @skb: buffer to free * @reason: reason why this skb is dropped * * Drop a reference to the buffer and free it if the usage count has hit * zero. Meanwhile, pass the receiving socket and drop reason to * 'kfree_skb' tracepoint. */ void __fix_address sk_skb_reason_drop(struct sock *sk, struct sk_buff *skb, enum skb_drop_reason reason) { if (__sk_skb_reason_drop(sk, skb, reason)) __kfree_skb(skb); } EXPORT_SYMBOL(sk_skb_reason_drop); #define KFREE_SKB_BULK_SIZE 16 struct skb_free_array { unsigned int skb_count; void *skb_array[KFREE_SKB_BULK_SIZE]; }; static void kfree_skb_add_bulk(struct sk_buff *skb, struct skb_free_array *sa, enum skb_drop_reason reason) { /* if SKB is a clone, don't handle this case */ if (unlikely(skb->fclone != SKB_FCLONE_UNAVAILABLE)) { __kfree_skb(skb); return; } skb_release_all(skb, reason); sa->skb_array[sa->skb_count++] = skb; if (unlikely(sa->skb_count == KFREE_SKB_BULK_SIZE)) { kmem_cache_free_bulk(net_hotdata.skbuff_cache, KFREE_SKB_BULK_SIZE, sa->skb_array); sa->skb_count = 0; } } void __fix_address kfree_skb_list_reason(struct sk_buff *segs, enum skb_drop_reason reason) { struct skb_free_array sa; sa.skb_count = 0; while (segs) { struct sk_buff *next = segs->next; if (__sk_skb_reason_drop(NULL, segs, reason)) { skb_poison_list(segs); kfree_skb_add_bulk(segs, &sa, reason); } segs = next; } if (sa.skb_count) kmem_cache_free_bulk(net_hotdata.skbuff_cache, sa.skb_count, sa.skb_array); } EXPORT_SYMBOL(kfree_skb_list_reason); /* Dump skb information and contents. * * Must only be called from net_ratelimit()-ed paths. * * Dumps whole packets if full_pkt, only headers otherwise. */ void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt) { struct skb_shared_info *sh = skb_shinfo(skb); struct net_device *dev = skb->dev; struct sock *sk = skb->sk; struct sk_buff *list_skb; bool has_mac, has_trans; int headroom, tailroom; int i, len, seg_len; if (full_pkt) len = skb->len; else len = min_t(int, skb->len, MAX_HEADER + 128); headroom = skb_headroom(skb); tailroom = skb_tailroom(skb); has_mac = skb_mac_header_was_set(skb); has_trans = skb_transport_header_was_set(skb); printk("%sskb len=%u headroom=%u headlen=%u tailroom=%u\n" "mac=(%d,%d) mac_len=%u net=(%d,%d) trans=%d\n" "shinfo(txflags=%u nr_frags=%u gso(size=%hu type=%u segs=%hu))\n" "csum(0x%x start=%u offset=%u ip_summed=%u complete_sw=%u valid=%u level=%u)\n" "hash(0x%x sw=%u l4=%u) proto=0x%04x pkttype=%u iif=%d\n" "priority=0x%x mark=0x%x alloc_cpu=%u vlan_all=0x%x\n" "encapsulation=%d inner(proto=0x%04x, mac=%u, net=%u, trans=%u)\n", level, skb->len, headroom, skb_headlen(skb), tailroom, has_mac ? skb->mac_header : -1, has_mac ? skb_mac_header_len(skb) : -1, skb->mac_len, skb->network_header, has_trans ? skb_network_header_len(skb) : -1, has_trans ? skb->transport_header : -1, sh->tx_flags, sh->nr_frags, sh->gso_size, sh->gso_type, sh->gso_segs, skb->csum, skb->csum_start, skb->csum_offset, skb->ip_summed, skb->csum_complete_sw, skb->csum_valid, skb->csum_level, skb->hash, skb->sw_hash, skb->l4_hash, ntohs(skb->protocol), skb->pkt_type, skb->skb_iif, skb->priority, skb->mark, skb->alloc_cpu, skb->vlan_all, skb->encapsulation, skb->inner_protocol, skb->inner_mac_header, skb->inner_network_header, skb->inner_transport_header); if (dev) printk("%sdev name=%s feat=%pNF\n", level, dev->name, &dev->features); if (sk) printk("%ssk family=%hu type=%u proto=%u\n", level, sk->sk_family, sk->sk_type, sk->sk_protocol); if (full_pkt && headroom) print_hex_dump(level, "skb headroom: ", DUMP_PREFIX_OFFSET, 16, 1, skb->head, headroom, false); seg_len = min_t(int, skb_headlen(skb), len); if (seg_len) print_hex_dump(level, "skb linear: ", DUMP_PREFIX_OFFSET, 16, 1, skb->data, seg_len, false); len -= seg_len; if (full_pkt && tailroom) print_hex_dump(level, "skb tailroom: ", DUMP_PREFIX_OFFSET, 16, 1, skb_tail_pointer(skb), tailroom, false); for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; u32 p_off, p_len, copied; struct page *p; u8 *vaddr; if (skb_frag_is_net_iov(frag)) { printk("%sskb frag %d: not readable\n", level, i); len -= skb_frag_size(frag); if (!len) break; continue; } skb_frag_foreach_page(frag, skb_frag_off(frag), skb_frag_size(frag), p, p_off, p_len, copied) { seg_len = min_t(int, p_len, len); vaddr = kmap_atomic(p); print_hex_dump(level, "skb frag: ", DUMP_PREFIX_OFFSET, 16, 1, vaddr + p_off, seg_len, false); kunmap_atomic(vaddr); len -= seg_len; if (!len) break; } } if (full_pkt && skb_has_frag_list(skb)) { printk("skb fraglist:\n"); skb_walk_frags(skb, list_skb) skb_dump(level, list_skb, true); } } EXPORT_SYMBOL(skb_dump); /** * skb_tx_error - report an sk_buff xmit error * @skb: buffer that triggered an error * * Report xmit error if a device callback is tracking this skb. * skb must be freed afterwards. */ void skb_tx_error(struct sk_buff *skb) { if (skb) { skb_zcopy_downgrade_managed(skb); skb_zcopy_clear(skb, true); } } EXPORT_SYMBOL(skb_tx_error); #ifdef CONFIG_TRACEPOINTS /** * consume_skb - free an skbuff * @skb: buffer to free * * Drop a ref to the buffer and free it if the usage count has hit zero * Functions identically to kfree_skb, but kfree_skb assumes that the frame * is being dropped after a failure and notes that */ void consume_skb(struct sk_buff *skb) { if (!skb_unref(skb)) return; trace_consume_skb(skb, __builtin_return_address(0)); __kfree_skb(skb); } EXPORT_SYMBOL(consume_skb); #endif /** * __consume_stateless_skb - free an skbuff, assuming it is stateless * @skb: buffer to free * * Alike consume_skb(), but this variant assumes that this is the last * skb reference and all the head states have been already dropped */ void __consume_stateless_skb(struct sk_buff *skb) { trace_consume_skb(skb, __builtin_return_address(0)); skb_release_data(skb, SKB_CONSUMED); kfree_skbmem(skb); } static void napi_skb_cache_put(struct sk_buff *skb) { struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); u32 i; if (!kasan_mempool_poison_object(skb)) return; local_lock_nested_bh(&napi_alloc_cache.bh_lock); nc->skb_cache[nc->skb_count++] = skb; if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) { for (i = NAPI_SKB_CACHE_HALF; i < NAPI_SKB_CACHE_SIZE; i++) kasan_mempool_unpoison_object(nc->skb_cache[i], kmem_cache_size(net_hotdata.skbuff_cache)); kmem_cache_free_bulk(net_hotdata.skbuff_cache, NAPI_SKB_CACHE_HALF, nc->skb_cache + NAPI_SKB_CACHE_HALF); nc->skb_count = NAPI_SKB_CACHE_HALF; } local_unlock_nested_bh(&napi_alloc_cache.bh_lock); } void __napi_kfree_skb(struct sk_buff *skb, enum skb_drop_reason reason) { skb_release_all(skb, reason); napi_skb_cache_put(skb); } void napi_skb_free_stolen_head(struct sk_buff *skb) { if (unlikely(skb->slow_gro)) { nf_reset_ct(skb); skb_dst_drop(skb); skb_ext_put(skb); skb_orphan(skb); skb->slow_gro = 0; } napi_skb_cache_put(skb); } void napi_consume_skb(struct sk_buff *skb, int budget) { /* Zero budget indicate non-NAPI context called us, like netpoll */ if (unlikely(!budget)) { dev_consume_skb_any(skb); return; } DEBUG_NET_WARN_ON_ONCE(!in_softirq()); if (!skb_unref(skb)) return; /* if reaching here SKB is ready to free */ trace_consume_skb(skb, __builtin_return_address(0)); /* if SKB is a clone, don't handle this case */ if (skb->fclone != SKB_FCLONE_UNAVAILABLE) { __kfree_skb(skb); return; } skb_release_all(skb, SKB_CONSUMED); napi_skb_cache_put(skb); } EXPORT_SYMBOL(napi_consume_skb); /* Make sure a field is contained by headers group */ #define CHECK_SKB_FIELD(field) \ BUILD_BUG_ON(offsetof(struct sk_buff, field) != \ offsetof(struct sk_buff, headers.field)); \ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) { new->tstamp = old->tstamp; /* We do not copy old->sk */ new->dev = old->dev; memcpy(new->cb, old->cb, sizeof(old->cb)); skb_dst_copy(new, old); __skb_ext_copy(new, old); __nf_copy(new, old, false); /* Note : this field could be in the headers group. * It is not yet because we do not want to have a 16 bit hole */ new->queue_mapping = old->queue_mapping; memcpy(&new->headers, &old->headers, sizeof(new->headers)); CHECK_SKB_FIELD(protocol); CHECK_SKB_FIELD(csum); CHECK_SKB_FIELD(hash); CHECK_SKB_FIELD(priority); CHECK_SKB_FIELD(skb_iif); CHECK_SKB_FIELD(vlan_proto); CHECK_SKB_FIELD(vlan_tci); CHECK_SKB_FIELD(transport_header); CHECK_SKB_FIELD(network_header); CHECK_SKB_FIELD(mac_header); CHECK_SKB_FIELD(inner_protocol); CHECK_SKB_FIELD(inner_transport_header); CHECK_SKB_FIELD(inner_network_header); CHECK_SKB_FIELD(inner_mac_header); CHECK_SKB_FIELD(mark); #ifdef CONFIG_NETWORK_SECMARK CHECK_SKB_FIELD(secmark); #endif #ifdef CONFIG_NET_RX_BUSY_POLL CHECK_SKB_FIELD(napi_id); #endif CHECK_SKB_FIELD(alloc_cpu); #ifdef CONFIG_XPS CHECK_SKB_FIELD(sender_cpu); #endif #ifdef CONFIG_NET_SCHED CHECK_SKB_FIELD(tc_index); #endif } /* * You should not add any new code to this function. Add it to * __copy_skb_header above instead. */ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) { #define C(x) n->x = skb->x n->next = n->prev = NULL; n->sk = NULL; __copy_skb_header(n, skb); C(len); C(data_len); C(mac_len); n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; n->cloned = 1; n->nohdr = 0; n->peeked = 0; C(pfmemalloc); C(pp_recycle); n->destructor = NULL; C(tail); C(end); C(head); C(head_frag); C(data); C(truesize); refcount_set(&n->users, 1); atomic_inc(&(skb_shinfo(skb)->dataref)); skb->cloned = 1; return n; #undef C } /** * alloc_skb_for_msg() - allocate sk_buff to wrap frag list forming a msg * @first: first sk_buff of the msg */ struct sk_buff *alloc_skb_for_msg(struct sk_buff *first) { struct sk_buff *n; n = alloc_skb(0, GFP_ATOMIC); if (!n) return NULL; n->len = first->len; n->data_len = first->len; n->truesize = first->truesize; skb_shinfo(n)->frag_list = first; __copy_skb_header(n, first); n->destructor = NULL; return n; } EXPORT_SYMBOL_GPL(alloc_skb_for_msg); /** * skb_morph - morph one skb into another * @dst: the skb to receive the contents * @src: the skb to supply the contents * * This is identical to skb_clone except that the target skb is * supplied by the user. * * The target skb is returned upon exit. */ struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) { skb_release_all(dst, SKB_CONSUMED); return __skb_clone(dst, src); } EXPORT_SYMBOL_GPL(skb_morph); int mm_account_pinned_pages(struct mmpin *mmp, size_t size) { unsigned long max_pg, num_pg, new_pg, old_pg, rlim; struct user_struct *user; if (capable(CAP_IPC_LOCK) || !size) return 0; rlim = rlimit(RLIMIT_MEMLOCK); if (rlim == RLIM_INFINITY) return 0; num_pg = (size >> PAGE_SHIFT) + 2; /* worst case */ max_pg = rlim >> PAGE_SHIFT; user = mmp->user ? : current_user(); old_pg = atomic_long_read(&user->locked_vm); do { new_pg = old_pg + num_pg; if (new_pg > max_pg) return -ENOBUFS; } while (!atomic_long_try_cmpxchg(&user->locked_vm, &old_pg, new_pg)); if (!mmp->user) { mmp->user = get_uid(user); mmp->num_pg = num_pg; } else { mmp->num_pg += num_pg; } return 0; } EXPORT_SYMBOL_GPL(mm_account_pinned_pages); void mm_unaccount_pinned_pages(struct mmpin *mmp) { if (mmp->user) { atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm); free_uid(mmp->user); } } EXPORT_SYMBOL_GPL(mm_unaccount_pinned_pages); static struct ubuf_info *msg_zerocopy_alloc(struct sock *sk, size_t size, bool devmem) { struct ubuf_info_msgzc *uarg; struct sk_buff *skb; WARN_ON_ONCE(!in_task()); skb = sock_omalloc(sk, 0, GFP_KERNEL); if (!skb) return NULL; BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb)); uarg = (void *)skb->cb; uarg->mmp.user = NULL; if (likely(!devmem) && mm_account_pinned_pages(&uarg->mmp, size)) { kfree_skb(skb); return NULL; } uarg->ubuf.ops = &msg_zerocopy_ubuf_ops; uarg->id = ((u32)atomic_inc_return(&sk->sk_zckey)) - 1; uarg->len = 1; uarg->bytelen = size; uarg->zerocopy = 1; uarg->ubuf.flags = SKBFL_ZEROCOPY_FRAG | SKBFL_DONT_ORPHAN; refcount_set(&uarg->ubuf.refcnt, 1); sock_hold(sk); return &uarg->ubuf; } static inline struct sk_buff *skb_from_uarg(struct ubuf_info_msgzc *uarg) { return container_of((void *)uarg, struct sk_buff, cb); } struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size, struct ubuf_info *uarg, bool devmem) { if (uarg) { struct ubuf_info_msgzc *uarg_zc; const u32 byte_limit = 1 << 19; /* limit to a few TSO */ u32 bytelen, next; /* there might be non MSG_ZEROCOPY users */ if (uarg->ops != &msg_zerocopy_ubuf_ops) return NULL; /* realloc only when socket is locked (TCP, UDP cork), * so uarg->len and sk_zckey access is serialized */ if (!sock_owned_by_user(sk)) { WARN_ON_ONCE(1); return NULL; } uarg_zc = uarg_to_msgzc(uarg); bytelen = uarg_zc->bytelen + size; if (uarg_zc->len == USHRT_MAX - 1 || bytelen > byte_limit) { /* TCP can create new skb to attach new uarg */ if (sk->sk_type == SOCK_STREAM) goto new_alloc; return NULL; } next = (u32)atomic_read(&sk->sk_zckey); if ((u32)(uarg_zc->id + uarg_zc->len) == next) { if (likely(!devmem) && mm_account_pinned_pages(&uarg_zc->mmp, size)) return NULL; uarg_zc->len++; uarg_zc->bytelen = bytelen; atomic_set(&sk->sk_zckey, ++next); /* no extra ref when appending to datagram (MSG_MORE) */ if (sk->sk_type == SOCK_STREAM) net_zcopy_get(uarg); return uarg; } } new_alloc: return msg_zerocopy_alloc(sk, size, devmem); } EXPORT_SYMBOL_GPL(msg_zerocopy_realloc); static bool skb_zerocopy_notify_extend(struct sk_buff *skb, u32 lo, u16 len) { struct sock_exterr_skb *serr = SKB_EXT_ERR(skb); u32 old_lo, old_hi; u64 sum_len; old_lo = serr->ee.ee_info; old_hi = serr->ee.ee_data; sum_len = old_hi - old_lo + 1ULL + len; if (sum_len >= (1ULL << 32)) return false; if (lo != old_hi + 1) return false; serr->ee.ee_data += len; return true; } static void __msg_zerocopy_callback(struct ubuf_info_msgzc *uarg) { struct sk_buff *tail, *skb = skb_from_uarg(uarg); struct sock_exterr_skb *serr; struct sock *sk = skb->sk; struct sk_buff_head *q; unsigned long flags; bool is_zerocopy; u32 lo, hi; u16 len; mm_unaccount_pinned_pages(&uarg->mmp); /* if !len, there was only 1 call, and it was aborted * so do not queue a completion notification */ if (!uarg->len || sock_flag(sk, SOCK_DEAD)) goto release; len = uarg->len; lo = uarg->id; hi = uarg->id + len - 1; is_zerocopy = uarg->zerocopy; serr = SKB_EXT_ERR(skb); memset(serr, 0, sizeof(*serr)); serr->ee.ee_errno = 0; serr->ee.ee_origin = SO_EE_ORIGIN_ZEROCOPY; serr->ee.ee_data = hi; serr->ee.ee_info = lo; if (!is_zerocopy) serr->ee.ee_code |= SO_EE_CODE_ZEROCOPY_COPIED; q = &sk->sk_error_queue; spin_lock_irqsave(&q->lock, flags); tail = skb_peek_tail(q); if (!tail || SKB_EXT_ERR(tail)->ee.ee_origin != SO_EE_ORIGIN_ZEROCOPY || !skb_zerocopy_notify_extend(tail, lo, len)) { __skb_queue_tail(q, skb); skb = NULL; } spin_unlock_irqrestore(&q->lock, flags); sk_error_report(sk); release: consume_skb(skb); sock_put(sk); } static void msg_zerocopy_complete(struct sk_buff *skb, struct ubuf_info *uarg, bool success) { struct ubuf_info_msgzc *uarg_zc = uarg_to_msgzc(uarg); uarg_zc->zerocopy = uarg_zc->zerocopy & success; if (refcount_dec_and_test(&uarg->refcnt)) __msg_zerocopy_callback(uarg_zc); } void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref) { struct sock *sk = skb_from_uarg(uarg_to_msgzc(uarg))->sk; atomic_dec(&sk->sk_zckey); uarg_to_msgzc(uarg)->len--; if (have_uref) msg_zerocopy_complete(NULL, uarg, true); } EXPORT_SYMBOL_GPL(msg_zerocopy_put_abort); const struct ubuf_info_ops msg_zerocopy_ubuf_ops = { .complete = msg_zerocopy_complete, }; EXPORT_SYMBOL_GPL(msg_zerocopy_ubuf_ops); int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, struct msghdr *msg, int len, struct ubuf_info *uarg, struct net_devmem_dmabuf_binding *binding) { int err, orig_len = skb->len; if (uarg->ops->link_skb) { err = uarg->ops->link_skb(skb, uarg); if (err) return err; } else { struct ubuf_info *orig_uarg = skb_zcopy(skb); /* An skb can only point to one uarg. This edge case happens * when TCP appends to an skb, but zerocopy_realloc triggered * a new alloc. */ if (orig_uarg && uarg != orig_uarg) return -EEXIST; } err = __zerocopy_sg_from_iter(msg, sk, skb, &msg->msg_iter, len, binding); if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) { struct sock *save_sk = skb->sk; /* Streams do not free skb on error. Reset to prev state. */ iov_iter_revert(&msg->msg_iter, skb->len - orig_len); skb->sk = sk; ___pskb_trim(skb, orig_len); skb->sk = save_sk; return err; } skb_zcopy_set(skb, uarg, NULL); return skb->len - orig_len; } EXPORT_SYMBOL_GPL(skb_zerocopy_iter_stream); void __skb_zcopy_downgrade_managed(struct sk_buff *skb) { int i; skb_shinfo(skb)->flags &= ~SKBFL_MANAGED_FRAG_REFS; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) skb_frag_ref(skb, i); } EXPORT_SYMBOL_GPL(__skb_zcopy_downgrade_managed); static int skb_zerocopy_clone(struct sk_buff *nskb, struct sk_buff *orig, gfp_t gfp_mask) { if (skb_zcopy(orig)) { if (skb_zcopy(nskb)) { /* !gfp_mask callers are verified to !skb_zcopy(nskb) */ if (!gfp_mask) { WARN_ON_ONCE(1); return -ENOMEM; } if (skb_uarg(nskb) == skb_uarg(orig)) return 0; if (skb_copy_ubufs(nskb, GFP_ATOMIC)) return -EIO; } skb_zcopy_set(nskb, skb_uarg(orig), NULL); } return 0; } /** * skb_copy_ubufs - copy userspace skb frags buffers to kernel * @skb: the skb to modify * @gfp_mask: allocation priority * * This must be called on skb with SKBFL_ZEROCOPY_ENABLE. * It will copy all frags into kernel and drop the reference * to userspace pages. * * If this function is called from an interrupt gfp_mask() must be * %GFP_ATOMIC. * * Returns 0 on success or a negative error code on failure * to allocate kernel memory to copy to. */ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) { int num_frags = skb_shinfo(skb)->nr_frags; struct page *page, *head = NULL; int i, order, psize, new_frags; u32 d_off; if (skb_shared(skb) || skb_unclone(skb, gfp_mask)) return -EINVAL; if (!skb_frags_readable(skb)) return -EFAULT; if (!num_frags) goto release; /* We might have to allocate high order pages, so compute what minimum * page order is needed. */ order = 0; while ((PAGE_SIZE << order) * MAX_SKB_FRAGS < __skb_pagelen(skb)) order++; psize = (PAGE_SIZE << order); new_frags = (__skb_pagelen(skb) + psize - 1) >> (PAGE_SHIFT + order); for (i = 0; i < new_frags; i++) { page = alloc_pages(gfp_mask | __GFP_COMP, order); if (!page) { while (head) { struct page *next = (struct page *)page_private(head); put_page(head); head = next; } return -ENOMEM; } set_page_private(page, (unsigned long)head); head = page; } page = head; d_off = 0; for (i = 0; i < num_frags; i++) { skb_frag_t *f = &skb_shinfo(skb)->frags[i]; u32 p_off, p_len, copied; struct page *p; u8 *vaddr; skb_frag_foreach_page(f, skb_frag_off(f), skb_frag_size(f), p, p_off, p_len, copied) { u32 copy, done = 0; vaddr = kmap_atomic(p); while (done < p_len) { if (d_off == psize) { d_off = 0; page = (struct page *)page_private(page); } copy = min_t(u32, psize - d_off, p_len - done); memcpy(page_address(page) + d_off, vaddr + p_off + done, copy); done += copy; d_off += copy; } kunmap_atomic(vaddr); } } /* skb frags release userspace buffers */ for (i = 0; i < num_frags; i++) skb_frag_unref(skb, i); /* skb frags point to kernel buffers */ for (i = 0; i < new_frags - 1; i++) { __skb_fill_netmem_desc(skb, i, page_to_netmem(head), 0, psize); head = (struct page *)page_private(head); } __skb_fill_netmem_desc(skb, new_frags - 1, page_to_netmem(head), 0, d_off); skb_shinfo(skb)->nr_frags = new_frags; release: skb_zcopy_clear(skb, false); return 0; } EXPORT_SYMBOL_GPL(skb_copy_ubufs); /** * skb_clone - duplicate an sk_buff * @skb: buffer to clone * @gfp_mask: allocation priority * * Duplicate an &sk_buff. The new one is not owned by a socket. Both * copies share the same packet data but not structure. The new * buffer has a reference count of 1. If the allocation fails the * function returns %NULL otherwise the new buffer is returned. * * If this function is called from an interrupt gfp_mask() must be * %GFP_ATOMIC. */ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) { struct sk_buff_fclones *fclones = container_of(skb, struct sk_buff_fclones, skb1); struct sk_buff *n; if (skb_orphan_frags(skb, gfp_mask)) return NULL; if (skb->fclone == SKB_FCLONE_ORIG && refcount_read(&fclones->fclone_ref) == 1) { n = &fclones->skb2; refcount_set(&fclones->fclone_ref, 2); n->fclone = SKB_FCLONE_CLONE; } else { if (skb_pfmemalloc(skb)) gfp_mask |= __GFP_MEMALLOC; n = kmem_cache_alloc(net_hotdata.skbuff_cache, gfp_mask); if (!n) return NULL; n->fclone = SKB_FCLONE_UNAVAILABLE; } return __skb_clone(n, skb); } EXPORT_SYMBOL(skb_clone); void skb_headers_offset_update(struct sk_buff *skb, int off) { /* Only adjust this if it actually is csum_start rather than csum */ if (skb->ip_summed == CHECKSUM_PARTIAL) skb->csum_start += off; /* {transport,network,mac}_header and tail are relative to skb->head */ skb->transport_header += off; skb->network_header += off; if (skb_mac_header_was_set(skb)) skb->mac_header += off; skb->inner_transport_header += off; skb->inner_network_header += off; skb->inner_mac_header += off; } EXPORT_SYMBOL(skb_headers_offset_update); void skb_copy_header(struct sk_buff *new, const struct sk_buff *old) { __copy_skb_header(new, old); skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; } EXPORT_SYMBOL(skb_copy_header); static inline int skb_alloc_rx_flag(const struct sk_buff *skb) { if (skb_pfmemalloc(skb)) return SKB_ALLOC_RX; return 0; } /** * skb_copy - create private copy of an sk_buff * @skb: buffer to copy * @gfp_mask: allocation priority * * Make a copy of both an &sk_buff and its data. This is used when the * caller wishes to modify the data and needs a private copy of the * data to alter. Returns %NULL on failure or the pointer to the buffer * on success. The returned buffer has a reference count of 1. * * As by-product this function converts non-linear &sk_buff to linear * one, so that &sk_buff becomes completely private and caller is allowed * to modify all the data of returned buffer. This means that this * function is not recommended for use in circumstances when only * header is going to be modified. Use pskb_copy() instead. */ struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) { struct sk_buff *n; unsigned int size; int headerlen; if (!skb_frags_readable(skb)) return NULL; if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)) return NULL; headerlen = skb_headroom(skb); size = skb_end_offset(skb) + skb->data_len; n = __alloc_skb(size, gfp_mask, skb_alloc_rx_flag(skb), NUMA_NO_NODE); if (!n) return NULL; /* Set the data pointer */ skb_reserve(n, headerlen); /* Set the tail pointer and length */ skb_put(n, skb->len); BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)); skb_copy_header(n, skb); return n; } EXPORT_SYMBOL(skb_copy); /** * __pskb_copy_fclone - create copy of an sk_buff with private head. * @skb: buffer to copy * @headroom: headroom of new skb * @gfp_mask: allocation priority * @fclone: if true allocate the copy of the skb from the fclone * cache instead of the head cache; it is recommended to set this * to true for the cases where the copy will likely be cloned * * Make a copy of both an &sk_buff and part of its data, located * in header. Fragmented data remain shared. This is used when * the caller wishes to modify only header of &sk_buff and needs * private copy of the header to alter. Returns %NULL on failure * or the pointer to the buffer on success. * The returned buffer has a reference count of 1. */ struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, gfp_t gfp_mask, bool fclone) { unsigned int size = skb_headlen(skb) + headroom; int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0); struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE); if (!n) goto out; /* Set the data pointer */ skb_reserve(n, headroom); /* Set the tail pointer and length */ skb_put(n, skb_headlen(skb)); /* Copy the bytes */ skb_copy_from_linear_data(skb, n->data, n->len); n->truesize += skb->data_len; n->data_len = skb->data_len; n->len = skb->len; if (skb_shinfo(skb)->nr_frags) { int i; if (skb_orphan_frags(skb, gfp_mask) || skb_zerocopy_clone(n, skb, gfp_mask)) { kfree_skb(n); n = NULL; goto out; } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; skb_frag_ref(skb, i); } skb_shinfo(n)->nr_frags = i; } if (skb_has_frag_list(skb)) { skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; skb_clone_fraglist(n); } skb_copy_header(n, skb); out: return n; } EXPORT_SYMBOL(__pskb_copy_fclone); /** * pskb_expand_head - reallocate header of &sk_buff * @skb: buffer to reallocate * @nhead: room to add at head * @ntail: room to add at tail * @gfp_mask: allocation priority * * Expands (or creates identical copy, if @nhead and @ntail are zero) * header of @skb. &sk_buff itself is not changed. &sk_buff MUST have * reference count of 1. Returns zero in the case of success or error, * if expansion failed. In the last case, &sk_buff is not changed. * * All the pointers pointing into skb header may change and must be * reloaded after call to this function. */ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask) { unsigned int osize = skb_end_offset(skb); unsigned int size = osize + nhead + ntail; long off; u8 *data; int i; BUG_ON(nhead < 0); BUG_ON(skb_shared(skb)); skb_zcopy_downgrade_managed(skb); if (skb_pfmemalloc(skb)) gfp_mask |= __GFP_MEMALLOC; data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL); if (!data) goto nodata; size = SKB_WITH_OVERHEAD(size); /* Copy only real data... and, alas, header. This should be * optimized for the cases when header is void. */ memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); memcpy((struct skb_shared_info *)(data + size), skb_shinfo(skb), offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); /* * if shinfo is shared we must drop the old head gracefully, but if it * is not we can just drop the old head and let the existing refcount * be since all we did is relocate the values */ if (skb_cloned(skb)) { if (skb_orphan_frags(skb, gfp_mask)) goto nofrags; if (skb_zcopy(skb)) refcount_inc(&skb_uarg(skb)->refcnt); for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) skb_frag_ref(skb, i); if (skb_has_frag_list(skb)) skb_clone_fraglist(skb); skb_release_data(skb, SKB_CONSUMED); } else { skb_free_head(skb); } off = (data + nhead) - skb->head; skb->head = data; skb->head_frag = 0; skb->data += off; skb_set_end_offset(skb, size); #ifdef NET_SKBUFF_DATA_USES_OFFSET off = nhead; #endif skb->tail += off; skb_headers_offset_update(skb, nhead); skb->cloned = 0; skb->hdr_len = 0; skb->nohdr = 0; atomic_set(&skb_shinfo(skb)->dataref, 1); skb_metadata_clear(skb); /* It is not generally safe to change skb->truesize. * For the moment, we really care of rx path, or * when skb is orphaned (not attached to a socket). */ if (!skb->sk || skb->destructor == sock_edemux) skb->truesize += size - osize; return 0; nofrags: skb_kfree_head(data, size); nodata: return -ENOMEM; } EXPORT_SYMBOL(pskb_expand_head); /* Make private copy of skb with writable head and some headroom */ struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) { struct sk_buff *skb2; int delta = headroom - skb_headroom(skb); if (delta <= 0) skb2 = pskb_copy(skb, GFP_ATOMIC); else { skb2 = skb_clone(skb, GFP_ATOMIC); if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) { kfree_skb(skb2); skb2 = NULL; } } return skb2; } EXPORT_SYMBOL(skb_realloc_headroom); /* Note: We plan to rework this in linux-6.4 */ int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri) { unsigned int saved_end_offset, saved_truesize; struct skb_shared_info *shinfo; int res; saved_end_offset = skb_end_offset(skb); saved_truesize = skb->truesize; res = pskb_expand_head(skb, 0, 0, pri); if (res) return res; skb->truesize = saved_truesize; if (likely(skb_end_offset(skb) == saved_end_offset)) return 0; /* We can not change skb->end if the original or new value * is SKB_SMALL_HEAD_HEADROOM, as it might break skb_kfree_head(). */ if (saved_end_offset == SKB_SMALL_HEAD_HEADROOM || skb_end_offset(skb) == SKB_SMALL_HEAD_HEADROOM) { /* We think this path should not be taken. * Add a temporary trace to warn us just in case. */ pr_err_once("__skb_unclone_keeptruesize() skb_end_offset() %u -> %u\n", saved_end_offset, skb_end_offset(skb)); WARN_ON_ONCE(1); return 0; } shinfo = skb_shinfo(skb); /* We are about to change back skb->end, * we need to move skb_shinfo() to its new location. */ memmove(skb->head + saved_end_offset, shinfo, offsetof(struct skb_shared_info, frags[shinfo->nr_frags])); skb_set_end_offset(skb, saved_end_offset); return 0; } /** * skb_expand_head - reallocate header of &sk_buff * @skb: buffer to reallocate * @headroom: needed headroom * * Unlike skb_realloc_headroom, this one does not allocate a new skb * if possible; copies skb->sk to new skb as needed * and frees original skb in case of failures. * * It expect increased headroom and generates warning otherwise. */ struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom) { int delta = headroom - skb_headroom(skb); int osize = skb_end_offset(skb); struct sock *sk = skb->sk; if (WARN_ONCE(delta <= 0, "%s is expecting an increase in the headroom", __func__)) return skb; delta = SKB_DATA_ALIGN(delta); /* pskb_expand_head() might crash, if skb is shared. */ if (skb_shared(skb) || !is_skb_wmem(skb)) { struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); if (unlikely(!nskb)) goto fail; if (sk) skb_set_owner_w(nskb, sk); consume_skb(skb); skb = nskb; } if (pskb_expand_head(skb, delta, 0, GFP_ATOMIC)) goto fail; if (sk && is_skb_wmem(skb)) { delta = skb_end_offset(skb) - osize; refcount_add(delta, &sk->sk_wmem_alloc); skb->truesize += delta; } return skb; fail: kfree_skb(skb); return NULL; } EXPORT_SYMBOL(skb_expand_head); /** * skb_copy_expand - copy and expand sk_buff * @skb: buffer to copy * @newheadroom: new free bytes at head * @newtailroom: new free bytes at tail * @gfp_mask: allocation priority * * Make a copy of both an &sk_buff and its data and while doing so * allocate additional space. * * This is used when the caller wishes to modify the data and needs a * private copy of the data to alter as well as more space for new fields. * Returns %NULL on failure or the pointer to the buffer * on success. The returned buffer has a reference count of 1. * * You must pass %GFP_ATOMIC as the allocation priority if this function * is called from an interrupt. */ struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom, int newtailroom, gfp_t gfp_mask) { /* * Allocate the copy buffer */ int head_copy_len, head_copy_off; struct sk_buff *n; int oldheadroom; if (!skb_frags_readable(skb)) return NULL; if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)) return NULL; oldheadroom = skb_headroom(skb); n = __alloc_skb(newheadroom + skb->len + newtailroom, gfp_mask, skb_alloc_rx_flag(skb), NUMA_NO_NODE); if (!n) return NULL; skb_reserve(n, newheadroom); /* Set the tail pointer and length */ skb_put(n, skb->len); head_copy_len = oldheadroom; head_copy_off = 0; if (newheadroom <= head_copy_len) head_copy_len = newheadroom; else head_copy_off = newheadroom - head_copy_len; /* Copy the linear header and data. */ BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, skb->len + head_copy_len)); skb_copy_header(n, skb); skb_headers_offset_update(n, newheadroom - oldheadroom); return n; } EXPORT_SYMBOL(skb_copy_expand); /** * __skb_pad - zero pad the tail of an skb * @skb: buffer to pad * @pad: space to pad * @free_on_error: free buffer on error * * Ensure that a buffer is followed by a padding area that is zero * filled. Used by network drivers which may DMA or transfer data * beyond the buffer end onto the wire. * * May return error in out of memory cases. The skb is freed on error * if @free_on_error is true. */ int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error) { int err; int ntail; /* If the skbuff is non linear tailroom is always zero.. */ if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { memset(skb->data+skb->len, 0, pad); return 0; } ntail = skb->data_len + pad - (skb->end - skb->tail); if (likely(skb_cloned(skb) || ntail > 0)) { err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); if (unlikely(err)) goto free_skb; } /* FIXME: The use of this function with non-linear skb's really needs * to be audited. */ err = skb_linearize(skb); if (unlikely(err)) goto free_skb; memset(skb->data + skb->len, 0, pad); return 0; free_skb: if (free_on_error) kfree_skb(skb); return err; } EXPORT_SYMBOL(__skb_pad); /** * pskb_put - add data to the tail of a potentially fragmented buffer * @skb: start of the buffer to use * @tail: tail fragment of the buffer to use * @len: amount of data to add * * This function extends the used data area of the potentially * fragmented buffer. @tail must be the last fragment of @skb -- or * @skb itself. If this would exceed the total buffer size the kernel * will panic. A pointer to the first byte of the extra data is * returned. */ void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) { if (tail != skb) { skb->data_len += len; skb->len += len; } return skb_put(tail, len); } EXPORT_SYMBOL_GPL(pskb_put); /** * skb_put - add data to a buffer * @skb: buffer to use * @len: amount of data to add * * This function extends the used data area of the buffer. If this would * exceed the total buffer size the kernel will panic. A pointer to the * first byte of the extra data is returned. */ void *skb_put(struct sk_buff *skb, unsigned int len) { void *tmp = skb_tail_pointer(skb); SKB_LINEAR_ASSERT(skb); skb->tail += len; skb->len += len; if (unlikely(skb->tail > skb->end)) skb_over_panic(skb, len, __builtin_return_address(0)); return tmp; } EXPORT_SYMBOL(skb_put); /** * skb_push - add data to the start of a buffer * @skb: buffer to use * @len: amount of data to add * * This function extends the used data area of the buffer at the buffer * start. If this would exceed the total buffer headroom the kernel will * panic. A pointer to the first byte of the extra data is returned. */ void *skb_push(struct sk_buff *skb, unsigned int len) { skb->data -= len; skb->len += len; if (unlikely(skb->data < skb->head)) skb_under_panic(skb, len, __builtin_return_address(0)); return skb->data; } EXPORT_SYMBOL(skb_push); /** * skb_pull - remove data from the start of a buffer * @skb: buffer to use * @len: amount of data to remove * * This function removes data from the start of a buffer, returning * the memory to the headroom. A pointer to the next data in the buffer * is returned. Once the data has been pulled future pushes will overwrite * the old data. */ void *skb_pull(struct sk_buff *skb, unsigned int len) { return skb_pull_inline(skb, len); } EXPORT_SYMBOL(skb_pull); /** * skb_pull_data - remove data from the start of a buffer returning its * original position. * @skb: buffer to use * @len: amount of data to remove * * This function removes data from the start of a buffer, returning * the memory to the headroom. A pointer to the original data in the buffer * is returned after checking if there is enough data to pull. Once the * data has been pulled future pushes will overwrite the old data. */ void *skb_pull_data(struct sk_buff *skb, size_t len) { void *data = skb->data; if (skb->len < len) return NULL; skb_pull(skb, len); return data; } EXPORT_SYMBOL(skb_pull_data); /** * skb_trim - remove end from a buffer * @skb: buffer to alter * @len: new length * * Cut the length of a buffer down by removing data from the tail. If * the buffer is already under the length specified it is not modified. * The skb must be linear. */ void skb_trim(struct sk_buff *skb, unsigned int len) { if (skb->len > len) __skb_trim(skb, len); } EXPORT_SYMBOL(skb_trim); /* Trims skb to length len. It can change skb pointers. */ int ___pskb_trim(struct sk_buff *skb, unsigned int len) { struct sk_buff **fragp; struct sk_buff *frag; int offset = skb_headlen(skb); int nfrags = skb_shinfo(skb)->nr_frags; int i; int err; if (skb_cloned(skb) && unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) return err; i = 0; if (offset >= len) goto drop_pages; for (; i < nfrags; i++) { int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); if (end < len) { offset = end; continue; } skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); drop_pages: skb_shinfo(skb)->nr_frags = i; for (; i < nfrags; i++) skb_frag_unref(skb, i); if (skb_has_frag_list(skb)) skb_drop_fraglist(skb); goto done; } for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); fragp = &frag->next) { int end = offset + frag->len; if (skb_shared(frag)) { struct sk_buff *nfrag; nfrag = skb_clone(frag, GFP_ATOMIC); if (unlikely(!nfrag)) return -ENOMEM; nfrag->next = frag->next; consume_skb(frag); frag = nfrag; *fragp = frag; } if (end < len) { offset = end; continue; } if (end > len && unlikely((err = pskb_trim(frag, len - offset)))) return err; if (frag->next) skb_drop_list(&frag->next); break; } done: if (len > skb_headlen(skb)) { skb->data_len -= skb->len - len; skb->len = len; } else { skb->len = len; skb->data_len = 0; skb_set_tail_pointer(skb, len); } if (!skb->sk || skb->destructor == sock_edemux) skb_condense(skb); return 0; } EXPORT_SYMBOL(___pskb_trim); /* Note : use pskb_trim_rcsum() instead of calling this directly */ int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len) { if (skb->ip_summed == CHECKSUM_COMPLETE) { int delta = skb->len - len; skb->csum = csum_block_sub(skb->csum, skb_checksum(skb, len, delta, 0), len); } else if (skb->ip_summed == CHECKSUM_PARTIAL) { int hdlen = (len > skb_headlen(skb)) ? skb_headlen(skb) : len; int offset = skb_checksum_start_offset(skb) + skb->csum_offset; if (offset + sizeof(__sum16) > hdlen) return -EINVAL; } return __pskb_trim(skb, len); } EXPORT_SYMBOL(pskb_trim_rcsum_slow); /** * __pskb_pull_tail - advance tail of skb header * @skb: buffer to reallocate * @delta: number of bytes to advance tail * * The function makes a sense only on a fragmented &sk_buff, * it expands header moving its tail forward and copying necessary * data from fragmented part. * * &sk_buff MUST have reference count of 1. * * Returns %NULL (and &sk_buff does not change) if pull failed * or value of new tail of skb in the case of success. * * All the pointers pointing into skb header may change and must be * reloaded after call to this function. */ /* Moves tail of skb head forward, copying data from fragmented part, * when it is necessary. * 1. It may fail due to malloc failure. * 2. It may change skb pointers. * * It is pretty complicated. Luckily, it is called only in exceptional cases. */ void *__pskb_pull_tail(struct sk_buff *skb, int delta) { /* If skb has not enough free space at tail, get new one * plus 128 bytes for future expansions. If we have enough * room at tail, reallocate without expansion only if skb is cloned. */ int i, k, eat = (skb->tail + delta) - skb->end; if (!skb_frags_readable(skb)) return NULL; if (eat > 0 || skb_cloned(skb)) { if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, GFP_ATOMIC)) return NULL; } BUG_ON(skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta)); /* Optimization: no fragments, no reasons to preestimate * size of pulled pages. Superb. */ if (!skb_has_frag_list(skb)) goto pull_pages; /* Estimate size of pulled pages. */ eat = delta; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); if (size >= eat) goto pull_pages; eat -= size; } /* If we need update frag list, we are in troubles. * Certainly, it is possible to add an offset to skb data, * but taking into account that pulling is expected to * be very rare operation, it is worth to fight against * further bloating skb head and crucify ourselves here instead. * Pure masohism, indeed. 8)8) */ if (eat) { struct sk_buff *list = skb_shinfo(skb)->frag_list; struct sk_buff *clone = NULL; struct sk_buff *insp = NULL; do { if (list->len <= eat) { /* Eaten as whole. */ eat -= list->len; list = list->next; insp = list; } else { /* Eaten partially. */ if (skb_is_gso(skb) && !list->head_frag && skb_headlen(list)) skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; if (skb_shared(list)) { /* Sucks! We need to fork list. :-( */ clone = skb_clone(list, GFP_ATOMIC); if (!clone) return NULL; insp = list->next; list = clone; } else { /* This may be pulled without * problems. */ insp = list; } if (!pskb_pull(list, eat)) { kfree_skb(clone); return NULL; } break; } } while (eat); /* Free pulled out fragments. */ while ((list = skb_shinfo(skb)->frag_list) != insp) { skb_shinfo(skb)->frag_list = list->next; consume_skb(list); } /* And insert new clone at head. */ if (clone) { clone->next = list; skb_shinfo(skb)->frag_list = clone; } } /* Success! Now we may commit changes to skb data. */ pull_pages: eat = delta; k = 0; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); if (size <= eat) { skb_frag_unref(skb, i); eat -= size; } else { skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; *frag = skb_shinfo(skb)->frags[i]; if (eat) { skb_frag_off_add(frag, eat); skb_frag_size_sub(frag, eat); if (!i) goto end; eat = 0; } k++; } } skb_shinfo(skb)->nr_frags = k; end: skb->tail += delta; skb->data_len -= delta; if (!skb->data_len) skb_zcopy_clear(skb, false); return skb_tail_pointer(skb); } EXPORT_SYMBOL(__pskb_pull_tail); /** * skb_copy_bits - copy bits from skb to kernel buffer * @skb: source skb * @offset: offset in source * @to: destination buffer * @len: number of bytes to copy * * Copy the specified number of bytes from the source skb to the * destination buffer. * * CAUTION ! : * If its prototype is ever changed, * check arch/{*}/net/{*}.S files, * since it is called from BPF assembly code. */ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) { int start = skb_headlen(skb); struct sk_buff *frag_iter; int i, copy; if (offset > (int)skb->len - len) goto fault; /* Copy header. */ if ((copy = start - offset) > 0) { if (copy > len) copy = len; skb_copy_from_linear_data_offset(skb, offset, to, copy); if ((len -= copy) == 0) return 0; offset += copy; to += copy; } if (!skb_frags_readable(skb)) goto fault; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { int end; skb_frag_t *f = &skb_shinfo(skb)->frags[i]; WARN_ON(start > offset + len); end = start + skb_frag_size(f); if ((copy = end - offset) > 0) { u32 p_off, p_len, copied; struct page *p; u8 *vaddr; if (copy > len) copy = len; skb_frag_foreach_page(f, skb_frag_off(f) + offset - start, copy, p, p_off, p_len, copied) { vaddr = kmap_atomic(p); memcpy(to + copied, vaddr + p_off, p_len); kunmap_atomic(vaddr); } if ((len -= copy) == 0) return 0; offset += copy; to += copy; } start = end; } skb_walk_frags(skb, frag_iter) { int end; WARN_ON(start > offset + len); end = start + frag_iter->len; if ((copy = end - offset) > 0) { if (copy > len) copy = len; if (skb_copy_bits(frag_iter, offset - start, to, copy)) goto fault; if ((len -= copy) == 0) return 0; offset += copy; to += copy; } start = end; } if (!len) return 0; fault: return -EFAULT; } EXPORT_SYMBOL(skb_copy_bits); /* * Callback from splice_to_pipe(), if we need to release some pages * at the end of the spd in case we error'ed out in filling the pipe. */ static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) { put_page(spd->pages[i]); } static struct page *linear_to_page(struct page *page, unsigned int *len, unsigned int *offset, struct sock *sk) { struct page_frag *pfrag = sk_page_frag(sk); if (!sk_page_frag_refill(sk, pfrag)) return NULL; *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset); memcpy(page_address(pfrag->page) + pfrag->offset, page_address(page) + *offset, *len); *offset = pfrag->offset; pfrag->offset += *len; return pfrag->page; } static bool spd_can_coalesce(const struct splice_pipe_desc *spd, struct page *page, unsigned int offset) { return spd->nr_pages && spd->pages[spd->nr_pages - 1] == page && (spd->partial[spd->nr_pages - 1].offset + spd->partial[spd->nr_pages - 1].len == offset); } /* * Fill page/offset/length into spd, if it can hold more pages. */ static bool spd_fill_page(struct splice_pipe_desc *spd, struct pipe_inode_info *pipe, struct page *page, unsigned int *len, unsigned int offset, bool linear, struct sock *sk) { if (unlikely(spd->nr_pages == MAX_SKB_FRAGS)) return true; if (linear) { page = linear_to_page(page, len, &offset, sk); if (!page) return true; } if (spd_can_coalesce(spd, page, offset)) { spd->partial[spd->nr_pages - 1].len += *len; return false; } get_page(page); spd->pages[spd->nr_pages] = page; spd->partial[spd->nr_pages].len = *len; spd->partial[spd->nr_pages].offset = offset; spd->nr_pages++; return false; } static bool __splice_segment(struct page *page, unsigned int poff, unsigned int plen, unsigned int *off, unsigned int *len, struct splice_pipe_desc *spd, bool linear, struct sock *sk, struct pipe_inode_info *pipe) { if (!*len) return true; /* skip this segment if already processed */ if (*off >= plen) { *off -= plen; return false; } /* ignore any bits we already processed */ poff += *off; plen -= *off; *off = 0; do { unsigned int flen = min(*len, plen); if (spd_fill_page(spd, pipe, page, &flen, poff, linear, sk)) return true; poff += flen; plen -= flen; *len -= flen; } while (*len && plen); return false; } /* * Map linear and fragment data from the skb to spd. It reports true if the * pipe is full or if we already spliced the requested length. */ static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, unsigned int *offset, unsigned int *len, struct splice_pipe_desc *spd, struct sock *sk) { int seg; struct sk_buff *iter; /* map the linear part : * If skb->head_frag is set, this 'linear' part is backed by a * fragment, and if the head is not shared with any clones then * we can avoid a copy since we own the head portion of this page. */ if (__splice_segment(virt_to_page(skb->data), (unsigned long) skb->data & (PAGE_SIZE - 1), skb_headlen(skb), offset, len, spd, skb_head_is_locked(skb), sk, pipe)) return true; /* * then map the fragments */ if (!skb_frags_readable(skb)) return false; for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; if (WARN_ON_ONCE(!skb_frag_page(f))) return false; if (__splice_segment(skb_frag_page(f), skb_frag_off(f), skb_frag_size(f), offset, len, spd, false, sk, pipe)) return true; } skb_walk_frags(skb, iter) { if (*offset >= iter->len) { *offset -= iter->len; continue; } /* __skb_splice_bits() only fails if the output has no room * left, so no point in going over the frag_list for the error * case. */ if (__skb_splice_bits(iter, pipe, offset, len, spd, sk)) return true; } return false; } /* * Map data from the skb to a pipe. Should handle both the linear part, * the fragments, and the frag list. */ int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset, struct pipe_inode_info *pipe, unsigned int tlen, unsigned int flags) { struct partial_page partial[MAX_SKB_FRAGS]; struct page *pages[MAX_SKB_FRAGS]; struct splice_pipe_desc spd = { .pages = pages, .partial = partial, .nr_pages_max = MAX_SKB_FRAGS, .ops = &nosteal_pipe_buf_ops, .spd_release = sock_spd_release, }; int ret = 0; __skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk); if (spd.nr_pages) ret = splice_to_pipe(pipe, &spd); return ret; } EXPORT_SYMBOL_GPL(skb_splice_bits); static int sendmsg_locked(struct sock *sk, struct msghdr *msg) { struct socket *sock = sk->sk_socket; size_t size = msg_data_left(msg); if (!sock) return -EINVAL; if (!sock->ops->sendmsg_locked) return sock_no_sendmsg_locked(sk, msg, size); return sock->ops->sendmsg_locked(sk, msg, size); } static int sendmsg_unlocked(struct sock *sk, struct msghdr *msg) { struct socket *sock = sk->sk_socket; if (!sock) return -EINVAL; return sock_sendmsg(sock, msg); } typedef int (*sendmsg_func)(struct sock *sk, struct msghdr *msg); static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len, sendmsg_func sendmsg, int flags) { unsigned int orig_len = len; struct sk_buff *head = skb; unsigned short fragidx; int slen, ret; do_frag_list: /* Deal with head data */ while (offset < skb_headlen(skb) && len) { struct kvec kv; struct msghdr msg; slen = min_t(int, len, skb_headlen(skb) - offset); kv.iov_base = skb->data + offset; kv.iov_len = slen; memset(&msg, 0, sizeof(msg)); msg.msg_flags = MSG_DONTWAIT | flags; iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &kv, 1, slen); ret = INDIRECT_CALL_2(sendmsg, sendmsg_locked, sendmsg_unlocked, sk, &msg); if (ret <= 0) goto error; offset += ret; len -= ret; } /* All the data was skb head? */ if (!len) goto out; /* Make offset relative to start of frags */ offset -= skb_headlen(skb); /* Find where we are in frag list */ for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; if (offset < skb_frag_size(frag)) break; offset -= skb_frag_size(frag); } for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; slen = min_t(size_t, len, skb_frag_size(frag) - offset); while (slen) { struct bio_vec bvec; struct msghdr msg = { .msg_flags = MSG_SPLICE_PAGES | MSG_DONTWAIT | flags, }; bvec_set_page(&bvec, skb_frag_page(frag), slen, skb_frag_off(frag) + offset); iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, slen); ret = INDIRECT_CALL_2(sendmsg, sendmsg_locked, sendmsg_unlocked, sk, &msg); if (ret <= 0) goto error; len -= ret; offset += ret; slen -= ret; } offset = 0; } if (len) { /* Process any frag lists */ if (skb == head) { if (skb_has_frag_list(skb)) { skb = skb_shinfo(skb)->frag_list; goto do_frag_list; } } else if (skb->next) { skb = skb->next; goto do_frag_list; } } out: return orig_len - len; error: return orig_len == len ? ret : orig_len - len; } /* Send skb data on a socket. Socket must be locked. */ int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset, int len) { return __skb_send_sock(sk, skb, offset, len, sendmsg_locked, 0); } EXPORT_SYMBOL_GPL(skb_send_sock_locked); int skb_send_sock_locked_with_flags(struct sock *sk, struct sk_buff *skb, int offset, int len, int flags) { return __skb_send_sock(sk, skb, offset, len, sendmsg_locked, flags); } EXPORT_SYMBOL_GPL(skb_send_sock_locked_with_flags); /* Send skb data on a socket. Socket must be unlocked. */ int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len) { return __skb_send_sock(sk, skb, offset, len, sendmsg_unlocked, 0); } /** * skb_store_bits - store bits from kernel buffer to skb * @skb: destination buffer * @offset: offset in destination * @from: source buffer * @len: number of bytes to copy * * Copy the specified number of bytes from the source buffer to the * destination skb. This function handles all the messy bits of * traversing fragment lists and such. */ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) { int start = skb_headlen(skb); struct sk_buff *frag_iter; int i, copy; if (offset > (int)skb->len - len) goto fault; if ((copy = start - offset) > 0) { if (copy > len) copy = len; skb_copy_to_linear_data_offset(skb, offset, from, copy); if ((len -= copy) == 0) return 0; offset += copy; from += copy; } if (!skb_frags_readable(skb)) goto fault; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; int end; WARN_ON(start > offset + len); end = start + skb_frag_size(frag); if ((copy = end - offset) > 0) { u32 p_off, p_len, copied; struct page *p; u8 *vaddr; if (copy > len) copy = len; skb_frag_foreach_page(frag, skb_frag_off(frag) + offset - start, copy, p, p_off, p_len, copied) { vaddr = kmap_atomic(p); memcpy(vaddr + p_off, from + copied, p_len); kunmap_atomic(vaddr); } if ((len -= copy) == 0) return 0; offset += copy; from += copy; } start = end; } skb_walk_frags(skb, frag_iter) { int end; WARN_ON(start > offset + len); end = start + frag_iter->len; if ((copy = end - offset) > 0) { if (copy > len) copy = len; if (skb_store_bits(frag_iter, offset - start, from, copy)) goto fault; if ((len -= copy) == 0) return 0; offset += copy; from += copy; } start = end; } if (!len) return 0; fault: return -EFAULT; } EXPORT_SYMBOL(skb_store_bits); /* Checksum skb data. */ __wsum skb_checksum(const struct sk_buff *skb, int offset, int len, __wsum csum) { int start = skb_headlen(skb); int i, copy = start - offset; struct sk_buff *frag_iter; int pos = 0; /* Checksum header. */ if (copy > 0) { if (copy > len) copy = len; csum = csum_partial(skb->data + offset, copy, csum); if ((len -= copy) == 0) return csum; offset += copy; pos = copy; } if (WARN_ON_ONCE(!skb_frags_readable(skb))) return 0; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { int end; skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; WARN_ON(start > offset + len); end = start + skb_frag_size(frag); if ((copy = end - offset) > 0) { u32 p_off, p_len, copied; struct page *p; __wsum csum2; u8 *vaddr; if (copy > len) copy = len; skb_frag_foreach_page(frag, skb_frag_off(frag) + offset - start, copy, p, p_off, p_len, copied) { vaddr = kmap_atomic(p); csum2 = csum_partial(vaddr + p_off, p_len, 0); kunmap_atomic(vaddr); csum = csum_block_add(csum, csum2, pos); pos += p_len; } if (!(len -= copy)) return csum; offset += copy; } start = end; } skb_walk_frags(skb, frag_iter) { int end; WARN_ON(start > offset + len); end = start + frag_iter->len; if ((copy = end - offset) > 0) { __wsum csum2; if (copy > len) copy = len; csum2 = skb_checksum(frag_iter, offset - start, copy, 0); csum = csum_block_add(csum, csum2, pos); if ((len -= copy) == 0) return csum; offset += copy; pos += copy; } start = end; } BUG_ON(len); return csum; } EXPORT_SYMBOL(skb_checksum); /* Both of above in one bottle. */ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to, int len) { int start = skb_headlen(skb); int i, copy = start - offset; struct sk_buff *frag_iter; int pos = 0; __wsum csum = 0; /* Copy header. */ if (copy > 0) { if (copy > len) copy = len; csum = csum_partial_copy_nocheck(skb->data + offset, to, copy); if ((len -= copy) == 0) return csum; offset += copy; to += copy; pos = copy; } if (!skb_frags_readable(skb)) return 0; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { int end; WARN_ON(start > offset + len); end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); if ((copy = end - offset) > 0) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; u32 p_off, p_len, copied; struct page *p; __wsum csum2; u8 *vaddr; if (copy > len) copy = len; skb_frag_foreach_page(frag, skb_frag_off(frag) + offset - start, copy, p, p_off, p_len, copied) { vaddr = kmap_atomic(p); csum2 = csum_partial_copy_nocheck(vaddr + p_off, to + copied, p_len); kunmap_atomic(vaddr); csum = csum_block_add(csum, csum2, pos); pos += p_len; } if (!(len -= copy)) return csum; offset += copy; to += copy; } start = end; } skb_walk_frags(skb, frag_iter) { __wsum csum2; int end; WARN_ON(start > offset + len); end = start + frag_iter->len; if ((copy = end - offset) > 0) { if (copy > len) copy = len; csum2 = skb_copy_and_csum_bits(frag_iter, offset - start, to, copy); csum = csum_block_add(csum, csum2, pos); if ((len -= copy) == 0) return csum; offset += copy; to += copy; pos += copy; } start = end; } BUG_ON(len); return csum; } EXPORT_SYMBOL(skb_copy_and_csum_bits); #ifdef CONFIG_NET_CRC32C u32 skb_crc32c(const struct sk_buff *skb, int offset, int len, u32 crc) { int start = skb_headlen(skb); int i, copy = start - offset; struct sk_buff *frag_iter; if (copy > 0) { copy = min(copy, len); crc = crc32c(crc, skb->data + offset, copy); len -= copy; if (len == 0) return crc; offset += copy; } if (WARN_ON_ONCE(!skb_frags_readable(skb))) return 0; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { int end; skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; WARN_ON(start > offset + len); end = start + skb_frag_size(frag); copy = end - offset; if (copy > 0) { u32 p_off, p_len, copied; struct page *p; u8 *vaddr; copy = min(copy, len); skb_frag_foreach_page(frag, skb_frag_off(frag) + offset - start, copy, p, p_off, p_len, copied) { vaddr = kmap_atomic(p); crc = crc32c(crc, vaddr + p_off, p_len); kunmap_atomic(vaddr); } len -= copy; if (len == 0) return crc; offset += copy; } start = end; } skb_walk_frags(skb, frag_iter) { int end; WARN_ON(start > offset + len); end = start + frag_iter->len; copy = end - offset; if (copy > 0) { copy = min(copy, len); crc = skb_crc32c(frag_iter, offset - start, copy, crc); len -= copy; if (len == 0) return crc; offset += copy; } start = end; } BUG_ON(len); return crc; } EXPORT_SYMBOL(skb_crc32c); #endif /* CONFIG_NET_CRC32C */ __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len) { __sum16 sum; sum = csum_fold(skb_checksum(skb, 0, len, skb->csum)); /* See comments in __skb_checksum_complete(). */ if (likely(!sum)) { if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && !skb->csum_complete_sw) netdev_rx_csum_fault(skb->dev, skb); } if (!skb_shared(skb)) skb->csum_valid = !sum; return sum; } EXPORT_SYMBOL(__skb_checksum_complete_head); /* This function assumes skb->csum already holds pseudo header's checksum, * which has been changed from the hardware checksum, for example, by * __skb_checksum_validate_complete(). And, the original skb->csum must * have been validated unsuccessfully for CHECKSUM_COMPLETE case. * * It returns non-zero if the recomputed checksum is still invalid, otherwise * zero. The new checksum is stored back into skb->csum unless the skb is * shared. */ __sum16 __skb_checksum_complete(struct sk_buff *skb) { __wsum csum; __sum16 sum; csum = skb_checksum(skb, 0, skb->len, 0); sum = csum_fold(csum_add(skb->csum, csum)); /* This check is inverted, because we already knew the hardware * checksum is invalid before calling this function. So, if the * re-computed checksum is valid instead, then we have a mismatch * between the original skb->csum and skb_checksum(). This means either * the original hardware checksum is incorrect or we screw up skb->csum * when moving skb->data around. */ if (likely(!sum)) { if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && !skb->csum_complete_sw) netdev_rx_csum_fault(skb->dev, skb); } if (!skb_shared(skb)) { /* Save full packet checksum */ skb->csum = csum; skb->ip_summed = CHECKSUM_COMPLETE; skb->csum_complete_sw = 1; skb->csum_valid = !sum; } return sum; } EXPORT_SYMBOL(__skb_checksum_complete); /** * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy() * @from: source buffer * * Calculates the amount of linear headroom needed in the 'to' skb passed * into skb_zerocopy(). */ unsigned int skb_zerocopy_headlen(const struct sk_buff *from) { unsigned int hlen = 0; if (!from->head_frag || skb_headlen(from) < L1_CACHE_BYTES || skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) { hlen = skb_headlen(from); if (!hlen) hlen = from->len; } if (skb_has_frag_list(from)) hlen = from->len; return hlen; } EXPORT_SYMBOL_GPL(skb_zerocopy_headlen); /** * skb_zerocopy - Zero copy skb to skb * @to: destination buffer * @from: source buffer * @len: number of bytes to copy from source buffer * @hlen: size of linear headroom in destination buffer * * Copies up to `len` bytes from `from` to `to` by creating references * to the frags in the source buffer. * * The `hlen` as calculated by skb_zerocopy_headlen() specifies the * headroom in the `to` buffer. * * Return value: * 0: everything is OK * -ENOMEM: couldn't orphan frags of @from due to lack of memory * -EFAULT: skb_copy_bits() found some problem with skb geometry */ int skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen) { int i, j = 0; int plen = 0; /* length of skb->head fragment */ int ret; struct page *page; unsigned int offset; BUG_ON(!from->head_frag && !hlen); /* dont bother with small payloads */ if (len <= skb_tailroom(to)) return skb_copy_bits(from, 0, skb_put(to, len), len); if (hlen) { ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen); if (unlikely(ret)) return ret; len -= hlen; } else { plen = min_t(int, skb_headlen(from), len); if (plen) { page = virt_to_head_page(from->head); offset = from->data - (unsigned char *)page_address(page); __skb_fill_netmem_desc(to, 0, page_to_netmem(page), offset, plen); get_page(page); j = 1; len -= plen; } } skb_len_add(to, len + plen); if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) { skb_tx_error(from); return -ENOMEM; } skb_zerocopy_clone(to, from, GFP_ATOMIC); for (i = 0; i < skb_shinfo(from)->nr_frags; i++) { int size; if (!len) break; skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i]; size = min_t(int, skb_frag_size(&skb_shinfo(to)->frags[j]), len); skb_frag_size_set(&skb_shinfo(to)->frags[j], size); len -= size; skb_frag_ref(to, j); j++; } skb_shinfo(to)->nr_frags = j; return 0; } EXPORT_SYMBOL_GPL(skb_zerocopy); void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) { __wsum csum; long csstart; if (skb->ip_summed == CHECKSUM_PARTIAL) csstart = skb_checksum_start_offset(skb); else csstart = skb_headlen(skb); BUG_ON(csstart > skb_headlen(skb)); skb_copy_from_linear_data(skb, to, csstart); csum = 0; if (csstart != skb->len) csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, skb->len - csstart); if (skb->ip_summed == CHECKSUM_PARTIAL) { long csstuff = csstart + skb->csum_offset; *((__sum16 *)(to + csstuff)) = csum_fold(csum); } } EXPORT_SYMBOL(skb_copy_and_csum_dev); /** * skb_dequeue - remove from the head of the queue * @list: list to dequeue from * * Remove the head of the list. The list lock is taken so the function * may be used safely with other locking list functions. The head item is * returned or %NULL if the list is empty. */ struct sk_buff *skb_dequeue(struct sk_buff_head *list) { unsigned long flags; struct sk_buff *result; spin_lock_irqsave(&list->lock, flags); result = __skb_dequeue(list); spin_unlock_irqrestore(&list->lock, flags); return result; } EXPORT_SYMBOL(skb_dequeue); /** * skb_dequeue_tail - remove from the tail of the queue * @list: list to dequeue from * * Remove the tail of the list. The list lock is taken so the function * may be used safely with other locking list functions. The tail item is * returned or %NULL if the list is empty. */ struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) { unsigned long flags; struct sk_buff *result; spin_lock_irqsave(&list->lock, flags); result = __skb_dequeue_tail(list); spin_unlock_irqrestore(&list->lock, flags); return result; } EXPORT_SYMBOL(skb_dequeue_tail); /** * skb_queue_purge_reason - empty a list * @list: list to empty * @reason: drop reason * * Delete all buffers on an &sk_buff list. Each buffer is removed from * the list and one reference dropped. This function takes the list * lock and is atomic with respect to other list locking functions. */ void skb_queue_purge_reason(struct sk_buff_head *list, enum skb_drop_reason reason) { struct sk_buff_head tmp; unsigned long flags; if (skb_queue_empty_lockless(list)) return; __skb_queue_head_init(&tmp); spin_lock_irqsave(&list->lock, flags); skb_queue_splice_init(list, &tmp); spin_unlock_irqrestore(&list->lock, flags); __skb_queue_purge_reason(&tmp, reason); } EXPORT_SYMBOL(skb_queue_purge_reason); /** * skb_rbtree_purge - empty a skb rbtree * @root: root of the rbtree to empty * Return value: the sum of truesizes of all purged skbs. * * Delete all buffers on an &sk_buff rbtree. Each buffer is removed from * the list and one reference dropped. This function does not take * any lock. Synchronization should be handled by the caller (e.g., TCP * out-of-order queue is protected by the socket lock). */ unsigned int skb_rbtree_purge(struct rb_root *root) { struct rb_node *p = rb_first(root); unsigned int sum = 0; while (p) { struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode); p = rb_next(p); rb_erase(&skb->rbnode, root); sum += skb->truesize; kfree_skb(skb); } return sum; } void skb_errqueue_purge(struct sk_buff_head *list) { struct sk_buff *skb, *next; struct sk_buff_head kill; unsigned long flags; __skb_queue_head_init(&kill); spin_lock_irqsave(&list->lock, flags); skb_queue_walk_safe(list, skb, next) { if (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ZEROCOPY || SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) continue; __skb_unlink(skb, list); __skb_queue_tail(&kill, skb); } spin_unlock_irqrestore(&list->lock, flags); __skb_queue_purge(&kill); } EXPORT_SYMBOL(skb_errqueue_purge); /** * skb_queue_head - queue a buffer at the list head * @list: list to use * @newsk: buffer to queue * * Queue a buffer at the start of the list. This function takes the * list lock and can be used safely with other locking &sk_buff functions * safely. * * A buffer cannot be placed on two lists at the same time. */ void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) { unsigned long flags; spin_lock_irqsave(&list->lock, flags); __skb_queue_head(list, newsk); spin_unlock_irqrestore(&list->lock, flags); } EXPORT_SYMBOL(skb_queue_head); /** * skb_queue_tail - queue a buffer at the list tail * @list: list to use * @newsk: buffer to queue * * Queue a buffer at the tail of the list. This function takes the * list lock and can be used safely with other locking &sk_buff functions * safely. * * A buffer cannot be placed on two lists at the same time. */ void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) { unsigned long flags; spin_lock_irqsave(&list->lock, flags); __skb_queue_tail(list, newsk); spin_unlock_irqrestore(&list->lock, flags); } EXPORT_SYMBOL(skb_queue_tail); /** * skb_unlink - remove a buffer from a list * @skb: buffer to remove * @list: list to use * * Remove a packet from a list. The list locks are taken and this * function is atomic with respect to other list locked calls * * You must know what list the SKB is on. */ void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) { unsigned long flags; spin_lock_irqsave(&list->lock, flags); __skb_unlink(skb, list); spin_unlock_irqrestore(&list->lock, flags); } EXPORT_SYMBOL(skb_unlink); /** * skb_append - append a buffer * @old: buffer to insert after * @newsk: buffer to insert * @list: list to use * * Place a packet after a given packet in a list. The list locks are taken * and this function is atomic with respect to other list locked calls. * A buffer cannot be placed on two lists at the same time. */ void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) { unsigned long flags; spin_lock_irqsave(&list->lock, flags); __skb_queue_after(list, old, newsk); spin_unlock_irqrestore(&list->lock, flags); } EXPORT_SYMBOL(skb_append); static inline void skb_split_inside_header(struct sk_buff *skb, struct sk_buff* skb1, const u32 len, const int pos) { int i; skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), pos - len); /* And move data appendix as is. */ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; skb1->unreadable = skb->unreadable; skb_shinfo(skb)->nr_frags = 0; skb1->data_len = skb->data_len; skb1->len += skb1->data_len; skb->data_len = 0; skb->len = len; skb_set_tail_pointer(skb, len); } static inline void skb_split_no_header(struct sk_buff *skb, struct sk_buff* skb1, const u32 len, int pos) { int i, k = 0; const int nfrags = skb_shinfo(skb)->nr_frags; skb_shinfo(skb)->nr_frags = 0; skb1->len = skb1->data_len = skb->len - len; skb->len = len; skb->data_len = len - pos; for (i = 0; i < nfrags; i++) { int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); if (pos + size > len) { skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; if (pos < len) { /* Split frag. * We have two variants in this case: * 1. Move all the frag to the second * part, if it is possible. F.e. * this approach is mandatory for TUX, * where splitting is expensive. * 2. Split is accurately. We make this. */ skb_frag_ref(skb, i); skb_frag_off_add(&skb_shinfo(skb1)->frags[0], len - pos); skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos); skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); skb_shinfo(skb)->nr_frags++; } k++; } else skb_shinfo(skb)->nr_frags++; pos += size; } skb_shinfo(skb1)->nr_frags = k; skb1->unreadable = skb->unreadable; } /** * skb_split - Split fragmented skb to two parts at length len. * @skb: the buffer to split * @skb1: the buffer to receive the second part * @len: new length for skb */ void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) { int pos = skb_headlen(skb); const int zc_flags = SKBFL_SHARED_FRAG | SKBFL_PURE_ZEROCOPY; skb_zcopy_downgrade_managed(skb); skb_shinfo(skb1)->flags |= skb_shinfo(skb)->flags & zc_flags; skb_zerocopy_clone(skb1, skb, 0); if (len < pos) /* Split line is inside header. */ skb_split_inside_header(skb, skb1, len, pos); else /* Second chunk has no header, nothing to copy. */ skb_split_no_header(skb, skb1, len, pos); } EXPORT_SYMBOL(skb_split); /* Shifting from/to a cloned skb is a no-go. * * Caller cannot keep skb_shinfo related pointers past calling here! */ static int skb_prepare_for_shift(struct sk_buff *skb) { return skb_unclone_keeptruesize(skb, GFP_ATOMIC); } /** * skb_shift - Shifts paged data partially from skb to another * @tgt: buffer into which tail data gets added * @skb: buffer from which the paged data comes from * @shiftlen: shift up to this many bytes * * Attempts to shift up to shiftlen worth of bytes, which may be less than * the length of the skb, from skb to tgt. Returns number bytes shifted. * It's up to caller to free skb if everything was shifted. * * If @tgt runs out of frags, the whole operation is aborted. * * Skb cannot include anything else but paged data while tgt is allowed * to have non-paged data as well. * * TODO: full sized shift could be optimized but that would need * specialized skb free'er to handle frags without up-to-date nr_frags. */ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) { int from, to, merge, todo; skb_frag_t *fragfrom, *fragto; BUG_ON(shiftlen > skb->len); if (skb_headlen(skb)) return 0; if (skb_zcopy(tgt) || skb_zcopy(skb)) return 0; DEBUG_NET_WARN_ON_ONCE(tgt->pp_recycle != skb->pp_recycle); DEBUG_NET_WARN_ON_ONCE(skb_cmp_decrypted(tgt, skb)); todo = shiftlen; from = 0; to = skb_shinfo(tgt)->nr_frags; fragfrom = &skb_shinfo(skb)->frags[from]; /* Actual merge is delayed until the point when we know we can * commit all, so that we don't have to undo partial changes */ if (!skb_can_coalesce(tgt, to, skb_frag_page(fragfrom), skb_frag_off(fragfrom))) { merge = -1; } else { merge = to - 1; todo -= skb_frag_size(fragfrom); if (todo < 0) { if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) return 0; /* All previous frag pointers might be stale! */ fragfrom = &skb_shinfo(skb)->frags[from]; fragto = &skb_shinfo(tgt)->frags[merge]; skb_frag_size_add(fragto, shiftlen); skb_frag_size_sub(fragfrom, shiftlen); skb_frag_off_add(fragfrom, shiftlen); goto onlymerged; } from++; } /* Skip full, not-fitting skb to avoid expensive operations */ if ((shiftlen == skb->len) && (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) return 0; if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) return 0; while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { if (to == MAX_SKB_FRAGS) return 0; fragfrom = &skb_shinfo(skb)->frags[from]; fragto = &skb_shinfo(tgt)->frags[to]; if (todo >= skb_frag_size(fragfrom)) { *fragto = *fragfrom; todo -= skb_frag_size(fragfrom); from++; to++; } else { __skb_frag_ref(fragfrom); skb_frag_page_copy(fragto, fragfrom); skb_frag_off_copy(fragto, fragfrom); skb_frag_size_set(fragto, todo); skb_frag_off_add(fragfrom, todo); skb_frag_size_sub(fragfrom, todo); todo = 0; to++; break; } } /* Ready to "commit" this state change to tgt */ skb_shinfo(tgt)->nr_frags = to; if (merge >= 0) { fragfrom = &skb_shinfo(skb)->frags[0]; fragto = &skb_shinfo(tgt)->frags[merge]; skb_frag_size_add(fragto, skb_frag_size(fragfrom)); __skb_frag_unref(fragfrom, skb->pp_recycle); } /* Reposition in the original skb */ to = 0; while (from < skb_shinfo(skb)->nr_frags) skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; skb_shinfo(skb)->nr_frags = to; BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); onlymerged: /* Most likely the tgt won't ever need its checksum anymore, skb on * the other hand might need it if it needs to be resent */ tgt->ip_summed = CHECKSUM_PARTIAL; skb->ip_summed = CHECKSUM_PARTIAL; skb_len_add(skb, -shiftlen); skb_len_add(tgt, shiftlen); return shiftlen; } /** * skb_prepare_seq_read - Prepare a sequential read of skb data * @skb: the buffer to read * @from: lower offset of data to be read * @to: upper offset of data to be read * @st: state variable * * Initializes the specified state variable. Must be called before * invoking skb_seq_read() for the first time. */ void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, unsigned int to, struct skb_seq_state *st) { st->lower_offset = from; st->upper_offset = to; st->root_skb = st->cur_skb = skb; st->frag_idx = st->stepped_offset = 0; st->frag_data = NULL; st->frag_off = 0; } EXPORT_SYMBOL(skb_prepare_seq_read); /** * skb_seq_read - Sequentially read skb data * @consumed: number of bytes consumed by the caller so far * @data: destination pointer for data to be returned * @st: state variable * * Reads a block of skb data at @consumed relative to the * lower offset specified to skb_prepare_seq_read(). Assigns * the head of the data block to @data and returns the length * of the block or 0 if the end of the skb data or the upper * offset has been reached. * * The caller is not required to consume all of the data * returned, i.e. @consumed is typically set to the number * of bytes already consumed and the next call to * skb_seq_read() will return the remaining part of the block. * * Note 1: The size of each block of data returned can be arbitrary, * this limitation is the cost for zerocopy sequential * reads of potentially non linear data. * * Note 2: Fragment lists within fragments are not implemented * at the moment, state->root_skb could be replaced with * a stack for this purpose. */ unsigned int skb_seq_read(unsigned int consumed, const u8 **data, struct skb_seq_state *st) { unsigned int block_limit, abs_offset = consumed + st->lower_offset; skb_frag_t *frag; if (unlikely(abs_offset >= st->upper_offset)) { if (st->frag_data) { kunmap_atomic(st->frag_data); st->frag_data = NULL; } return 0; } next_skb: block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; if (abs_offset < block_limit && !st->frag_data) { *data = st->cur_skb->data + (abs_offset - st->stepped_offset); return block_limit - abs_offset; } if (!skb_frags_readable(st->cur_skb)) return 0; if (st->frag_idx == 0 && !st->frag_data) st->stepped_offset += skb_headlen(st->cur_skb); while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { unsigned int pg_idx, pg_off, pg_sz; frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; pg_idx = 0; pg_off = skb_frag_off(frag); pg_sz = skb_frag_size(frag); if (skb_frag_must_loop(skb_frag_page(frag))) { pg_idx = (pg_off + st->frag_off) >> PAGE_SHIFT; pg_off = offset_in_page(pg_off + st->frag_off); pg_sz = min_t(unsigned int, pg_sz - st->frag_off, PAGE_SIZE - pg_off); } block_limit = pg_sz + st->stepped_offset; if (abs_offset < block_limit) { if (!st->frag_data) st->frag_data = kmap_atomic(skb_frag_page(frag) + pg_idx); *data = (u8 *)st->frag_data + pg_off + (abs_offset - st->stepped_offset); return block_limit - abs_offset; } if (st->frag_data) { kunmap_atomic(st->frag_data); st->frag_data = NULL; } st->stepped_offset += pg_sz; st->frag_off += pg_sz; if (st->frag_off == skb_frag_size(frag)) { st->frag_off = 0; st->frag_idx++; } } if (st->frag_data) { kunmap_atomic(st->frag_data); st->frag_data = NULL; } if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) { st->cur_skb = skb_shinfo(st->root_skb)->frag_list; st->frag_idx = 0; goto next_skb; } else if (st->cur_skb->next) { st->cur_skb = st->cur_skb->next; st->frag_idx = 0; goto next_skb; } return 0; } EXPORT_SYMBOL(skb_seq_read); /** * skb_abort_seq_read - Abort a sequential read of skb data * @st: state variable * * Must be called if skb_seq_read() was not called until it * returned 0. */ void skb_abort_seq_read(struct skb_seq_state *st) { if (st->frag_data) kunmap_atomic(st->frag_data); } EXPORT_SYMBOL(skb_abort_seq_read); /** * skb_copy_seq_read() - copy from a skb_seq_state to a buffer * @st: source skb_seq_state * @offset: offset in source * @to: destination buffer * @len: number of bytes to copy * * Copy @len bytes from @offset bytes into the source @st to the destination * buffer @to. `offset` should increase (or be unchanged) with each subsequent * call to this function. If offset needs to decrease from the previous use `st` * should be reset first. * * Return: 0 on success or -EINVAL if the copy ended early */ int skb_copy_seq_read(struct skb_seq_state *st, int offset, void *to, int len) { const u8 *data; u32 sqlen; for (;;) { sqlen = skb_seq_read(offset, &data, st); if (sqlen == 0) return -EINVAL; if (sqlen >= len) { memcpy(to, data, len); return 0; } memcpy(to, data, sqlen); to += sqlen; offset += sqlen; len -= sqlen; } } EXPORT_SYMBOL(skb_copy_seq_read); #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, struct ts_config *conf, struct ts_state *state) { return skb_seq_read(offset, text, TS_SKB_CB(state)); } static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) { skb_abort_seq_read(TS_SKB_CB(state)); } /** * skb_find_text - Find a text pattern in skb data * @skb: the buffer to look in * @from: search offset * @to: search limit * @config: textsearch configuration * * Finds a pattern in the skb data according to the specified * textsearch configuration. Use textsearch_next() to retrieve * subsequent occurrences of the pattern. Returns the offset * to the first occurrence or UINT_MAX if no match was found. */ unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, unsigned int to, struct ts_config *config) { unsigned int patlen = config->ops->get_pattern_len(config); struct ts_state state; unsigned int ret; BUILD_BUG_ON(sizeof(struct skb_seq_state) > sizeof(state.cb)); config->get_next_block = skb_ts_get_next_block; config->finish = skb_ts_finish; skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state)); ret = textsearch_find(config, &state); return (ret + patlen <= to - from ? ret : UINT_MAX); } EXPORT_SYMBOL(skb_find_text); int skb_append_pagefrags(struct sk_buff *skb, struct page *page, int offset, size_t size, size_t max_frags) { int i = skb_shinfo(skb)->nr_frags; if (skb_can_coalesce(skb, i, page, offset)) { skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size); } else if (i < max_frags) { skb_zcopy_downgrade_managed(skb); get_page(page); skb_fill_page_desc_noacc(skb, i, page, offset, size); } else { return -EMSGSIZE; } return 0; } EXPORT_SYMBOL_GPL(skb_append_pagefrags); /** * skb_pull_rcsum - pull skb and update receive checksum * @skb: buffer to update * @len: length of data pulled * * This function performs an skb_pull on the packet and updates * the CHECKSUM_COMPLETE checksum. It should be used on * receive path processing instead of skb_pull unless you know * that the checksum difference is zero (e.g., a valid IP header) * or you are setting ip_summed to CHECKSUM_NONE. */ void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) { unsigned char *data = skb->data; BUG_ON(len > skb->len); __skb_pull(skb, len); skb_postpull_rcsum(skb, data, len); return skb->data; } EXPORT_SYMBOL_GPL(skb_pull_rcsum); static inline skb_frag_t skb_head_frag_to_page_desc(struct sk_buff *frag_skb) { skb_frag_t head_frag; struct page *page; page = virt_to_head_page(frag_skb->head); skb_frag_fill_page_desc(&head_frag, page, frag_skb->data - (unsigned char *)page_address(page), skb_headlen(frag_skb)); return head_frag; } struct sk_buff *skb_segment_list(struct sk_buff *skb, netdev_features_t features, unsigned int offset) { struct sk_buff *list_skb = skb_shinfo(skb)->frag_list; unsigned int tnl_hlen = skb_tnl_header_len(skb); unsigned int delta_truesize = 0; unsigned int delta_len = 0; struct sk_buff *tail = NULL; struct sk_buff *nskb, *tmp; int len_diff, err; skb_push(skb, -skb_network_offset(skb) + offset); /* Ensure the head is writeable before touching the shared info */ err = skb_unclone(skb, GFP_ATOMIC); if (err) goto err_linearize; skb_shinfo(skb)->frag_list = NULL; while (list_skb) { nskb = list_skb; list_skb = list_skb->next; err = 0; delta_truesize += nskb->truesize; if (skb_shared(nskb)) { tmp = skb_clone(nskb, GFP_ATOMIC); if (tmp) { consume_skb(nskb); nskb = tmp; err = skb_unclone(nskb, GFP_ATOMIC); } else { err = -ENOMEM; } } if (!tail) skb->next = nskb; else tail->next = nskb; if (unlikely(err)) { nskb->next = list_skb; goto err_linearize; } tail = nskb; delta_len += nskb->len; skb_push(nskb, -skb_network_offset(nskb) + offset); skb_release_head_state(nskb); len_diff = skb_network_header_len(nskb) - skb_network_header_len(skb); __copy_skb_header(nskb, skb); skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb)); nskb->transport_header += len_diff; skb_copy_from_linear_data_offset(skb, -tnl_hlen, nskb->data - tnl_hlen, offset + tnl_hlen); if (skb_needs_linearize(nskb, features) && __skb_linearize(nskb)) goto err_linearize; } skb->truesize = skb->truesize - delta_truesize; skb->data_len = skb->data_len - delta_len; skb->len = skb->len - delta_len; skb_gso_reset(skb); skb->prev = tail; if (skb_needs_linearize(skb, features) && __skb_linearize(skb)) goto err_linearize; skb_get(skb); return skb; err_linearize: kfree_skb_list(skb->next); skb->next = NULL; return ERR_PTR(-ENOMEM); } EXPORT_SYMBOL_GPL(skb_segment_list); /** * skb_segment - Perform protocol segmentation on skb. * @head_skb: buffer to segment * @features: features for the output path (see dev->features) * * This function performs segmentation on the given skb. It returns * a pointer to the first in a list of new skbs for the segments. * In case of error it returns ERR_PTR(err). */ struct sk_buff *skb_segment(struct sk_buff *head_skb, netdev_features_t features) { struct sk_buff *segs = NULL; struct sk_buff *tail = NULL; struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list; unsigned int mss = skb_shinfo(head_skb)->gso_size; unsigned int doffset = head_skb->data - skb_mac_header(head_skb); unsigned int offset = doffset; unsigned int tnl_hlen = skb_tnl_header_len(head_skb); unsigned int partial_segs = 0; unsigned int headroom; unsigned int len = head_skb->len; struct sk_buff *frag_skb; skb_frag_t *frag; __be16 proto; bool csum, sg; int err = -ENOMEM; int i = 0; int nfrags, pos; if ((skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY) && mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb)) { struct sk_buff *check_skb; for (check_skb = list_skb; check_skb; check_skb = check_skb->next) { if (skb_headlen(check_skb) && !check_skb->head_frag) { /* gso_size is untrusted, and we have a frag_list with * a linear non head_frag item. * * If head_skb's headlen does not fit requested gso_size, * it means that the frag_list members do NOT terminate * on exact gso_size boundaries. Hence we cannot perform * skb_frag_t page sharing. Therefore we must fallback to * copying the frag_list skbs; we do so by disabling SG. */ features &= ~NETIF_F_SG; break; } } } __skb_push(head_skb, doffset); proto = skb_network_protocol(head_skb, NULL); if (unlikely(!proto)) return ERR_PTR(-EINVAL); sg = !!(features & NETIF_F_SG); csum = !!can_checksum_protocol(features, proto); if (sg && csum && (mss != GSO_BY_FRAGS)) { if (!(features & NETIF_F_GSO_PARTIAL)) { struct sk_buff *iter; unsigned int frag_len; if (!list_skb || !net_gso_ok(features, skb_shinfo(head_skb)->gso_type)) goto normal; /* If we get here then all the required * GSO features except frag_list are supported. * Try to split the SKB to multiple GSO SKBs * with no frag_list. * Currently we can do that only when the buffers don't * have a linear part and all the buffers except * the last are of the same length. */ frag_len = list_skb->len; skb_walk_frags(head_skb, iter) { if (frag_len != iter->len && iter->next) goto normal; if (skb_headlen(iter) && !iter->head_frag) goto normal; len -= iter->len; } if (len != frag_len) goto normal; } /* GSO partial only requires that we trim off any excess that * doesn't fit into an MSS sized block, so take care of that * now. * Cap len to not accidentally hit GSO_BY_FRAGS. */ partial_segs = min(len, GSO_BY_FRAGS - 1) / mss; if (partial_segs > 1) mss *= partial_segs; else partial_segs = 0; } normal: headroom = skb_headroom(head_skb); pos = skb_headlen(head_skb); if (skb_orphan_frags(head_skb, GFP_ATOMIC)) return ERR_PTR(-ENOMEM); nfrags = skb_shinfo(head_skb)->nr_frags; frag = skb_shinfo(head_skb)->frags; frag_skb = head_skb; do { struct sk_buff *nskb; skb_frag_t *nskb_frag; int hsize; int size; if (unlikely(mss == GSO_BY_FRAGS)) { len = list_skb->len; } else { len = head_skb->len - offset; if (len > mss) len = mss; } hsize = skb_headlen(head_skb) - offset; if (hsize <= 0 && i >= nfrags && skb_headlen(list_skb) && (skb_headlen(list_skb) == len || sg)) { BUG_ON(skb_headlen(list_skb) > len); nskb = skb_clone(list_skb, GFP_ATOMIC); if (unlikely(!nskb)) goto err; i = 0; nfrags = skb_shinfo(list_skb)->nr_frags; frag = skb_shinfo(list_skb)->frags; frag_skb = list_skb; pos += skb_headlen(list_skb); while (pos < offset + len) { BUG_ON(i >= nfrags); size = skb_frag_size(frag); if (pos + size > offset + len) break; i++; pos += size; frag++; } list_skb = list_skb->next; if (unlikely(pskb_trim(nskb, len))) { kfree_skb(nskb); goto err; } hsize = skb_end_offset(nskb); if (skb_cow_head(nskb, doffset + headroom)) { kfree_skb(nskb); goto err; } nskb->truesize += skb_end_offset(nskb) - hsize; skb_release_head_state(nskb); __skb_push(nskb, doffset); } else { if (hsize < 0) hsize = 0; if (hsize > len || !sg) hsize = len; nskb = __alloc_skb(hsize + doffset + headroom, GFP_ATOMIC, skb_alloc_rx_flag(head_skb), NUMA_NO_NODE); if (unlikely(!nskb)) goto err; skb_reserve(nskb, headroom); __skb_put(nskb, doffset); } if (segs) tail->next = nskb; else segs = nskb; tail = nskb; __copy_skb_header(nskb, head_skb); skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom); skb_reset_mac_len(nskb); skb_copy_from_linear_data_offset(head_skb, -tnl_hlen, nskb->data - tnl_hlen, doffset + tnl_hlen); if (nskb->len == len + doffset) goto perform_csum_check; if (!sg) { if (!csum) { if (!nskb->remcsum_offload) nskb->ip_summed = CHECKSUM_NONE; SKB_GSO_CB(nskb)->csum = skb_copy_and_csum_bits(head_skb, offset, skb_put(nskb, len), len); SKB_GSO_CB(nskb)->csum_start = skb_headroom(nskb) + doffset; } else { if (skb_copy_bits(head_skb, offset, skb_put(nskb, len), len)) goto err; } continue; } nskb_frag = skb_shinfo(nskb)->frags; skb_copy_from_linear_data_offset(head_skb, offset, skb_put(nskb, hsize), hsize); skb_shinfo(nskb)->flags |= skb_shinfo(head_skb)->flags & SKBFL_SHARED_FRAG; if (skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC)) goto err; while (pos < offset + len) { if (i >= nfrags) { if (skb_orphan_frags(list_skb, GFP_ATOMIC) || skb_zerocopy_clone(nskb, list_skb, GFP_ATOMIC)) goto err; i = 0; nfrags = skb_shinfo(list_skb)->nr_frags; frag = skb_shinfo(list_skb)->frags; frag_skb = list_skb; if (!skb_headlen(list_skb)) { BUG_ON(!nfrags); } else { BUG_ON(!list_skb->head_frag); /* to make room for head_frag. */ i--; frag--; } list_skb = list_skb->next; } if (unlikely(skb_shinfo(nskb)->nr_frags >= MAX_SKB_FRAGS)) { net_warn_ratelimited( "skb_segment: too many frags: %u %u\n", pos, mss); err = -EINVAL; goto err; } *nskb_frag = (i < 0) ? skb_head_frag_to_page_desc(frag_skb) : *frag; __skb_frag_ref(nskb_frag); size = skb_frag_size(nskb_frag); if (pos < offset) { skb_frag_off_add(nskb_frag, offset - pos); skb_frag_size_sub(nskb_frag, offset - pos); } skb_shinfo(nskb)->nr_frags++; if (pos + size <= offset + len) { i++; frag++; pos += size; } else { skb_frag_size_sub(nskb_frag, pos + size - (offset + len)); goto skip_fraglist; } nskb_frag++; } skip_fraglist: nskb->data_len = len - hsize; nskb->len += nskb->data_len; nskb->truesize += nskb->data_len; perform_csum_check: if (!csum) { if (skb_has_shared_frag(nskb) && __skb_linearize(nskb)) goto err; if (!nskb->remcsum_offload) nskb->ip_summed = CHECKSUM_NONE; SKB_GSO_CB(nskb)->csum = skb_checksum(nskb, doffset, nskb->len - doffset, 0); SKB_GSO_CB(nskb)->csum_start = skb_headroom(nskb) + doffset; } } while ((offset += len) < head_skb->len); /* Some callers want to get the end of the list. * Put it in segs->prev to avoid walking the list. * (see validate_xmit_skb_list() for example) */ segs->prev = tail; if (partial_segs) { struct sk_buff *iter; int type = skb_shinfo(head_skb)->gso_type; unsigned short gso_size = skb_shinfo(head_skb)->gso_size; /* Update type to add partial and then remove dodgy if set */ type |= (features & NETIF_F_GSO_PARTIAL) / NETIF_F_GSO_PARTIAL * SKB_GSO_PARTIAL; type &= ~SKB_GSO_DODGY; /* Update GSO info and prepare to start updating headers on * our way back down the stack of protocols. */ for (iter = segs; iter; iter = iter->next) { skb_shinfo(iter)->gso_size = gso_size; skb_shinfo(iter)->gso_segs = partial_segs; skb_shinfo(iter)->gso_type = type; SKB_GSO_CB(iter)->data_offset = skb_headroom(iter) + doffset; } if (tail->len - doffset <= gso_size) skb_shinfo(tail)->gso_size = 0; else if (tail != segs) skb_shinfo(tail)->gso_segs = DIV_ROUND_UP(tail->len - doffset, gso_size); } /* Following permits correct backpressure, for protocols * using skb_set_owner_w(). * Idea is to tranfert ownership from head_skb to last segment. */ if (head_skb->destructor == sock_wfree) { swap(tail->truesize, head_skb->truesize); swap(tail->destructor, head_skb->destructor); swap(tail->sk, head_skb->sk); } return segs; err: kfree_skb_list(segs); return ERR_PTR(err); } EXPORT_SYMBOL_GPL(skb_segment); #ifdef CONFIG_SKB_EXTENSIONS #define SKB_EXT_ALIGN_VALUE 8 #define SKB_EXT_CHUNKSIZEOF(x) (ALIGN((sizeof(x)), SKB_EXT_ALIGN_VALUE) / SKB_EXT_ALIGN_VALUE) static const u8 skb_ext_type_len[] = { #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) [SKB_EXT_BRIDGE_NF] = SKB_EXT_CHUNKSIZEOF(struct nf_bridge_info), #endif #ifdef CONFIG_XFRM [SKB_EXT_SEC_PATH] = SKB_EXT_CHUNKSIZEOF(struct sec_path), #endif #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) [TC_SKB_EXT] = SKB_EXT_CHUNKSIZEOF(struct tc_skb_ext), #endif #if IS_ENABLED(CONFIG_MPTCP) [SKB_EXT_MPTCP] = SKB_EXT_CHUNKSIZEOF(struct mptcp_ext), #endif #if IS_ENABLED(CONFIG_MCTP_FLOWS) [SKB_EXT_MCTP] = SKB_EXT_CHUNKSIZEOF(struct mctp_flow), #endif }; static __always_inline unsigned int skb_ext_total_length(void) { unsigned int l = SKB_EXT_CHUNKSIZEOF(struct skb_ext); int i; for (i = 0; i < ARRAY_SIZE(skb_ext_type_len); i++) l += skb_ext_type_len[i]; return l; } static void skb_extensions_init(void) { BUILD_BUG_ON(SKB_EXT_NUM >= 8); #if !IS_ENABLED(CONFIG_KCOV_INSTRUMENT_ALL) BUILD_BUG_ON(skb_ext_total_length() > 255); #endif skbuff_ext_cache = kmem_cache_create("skbuff_ext_cache", SKB_EXT_ALIGN_VALUE * skb_ext_total_length(), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); } #else static void skb_extensions_init(void) {} #endif /* The SKB kmem_cache slab is critical for network performance. Never * merge/alias the slab with similar sized objects. This avoids fragmentation * that hurts performance of kmem_cache_{alloc,free}_bulk APIs. */ #ifndef CONFIG_SLUB_TINY #define FLAG_SKB_NO_MERGE SLAB_NO_MERGE #else /* CONFIG_SLUB_TINY - simple loop in kmem_cache_alloc_bulk */ #define FLAG_SKB_NO_MERGE 0 #endif void __init skb_init(void) { net_hotdata.skbuff_cache = kmem_cache_create_usercopy("skbuff_head_cache", sizeof(struct sk_buff), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC| FLAG_SKB_NO_MERGE, offsetof(struct sk_buff, cb), sizeof_field(struct sk_buff, cb), NULL); net_hotdata.skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", sizeof(struct sk_buff_fclones), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); /* usercopy should only access first SKB_SMALL_HEAD_HEADROOM bytes. * struct skb_shared_info is located at the end of skb->head, * and should not be copied to/from user. */ net_hotdata.skb_small_head_cache = kmem_cache_create_usercopy("skbuff_small_head", SKB_SMALL_HEAD_CACHE_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, 0, SKB_SMALL_HEAD_HEADROOM, NULL); skb_extensions_init(); } static int __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len, unsigned int recursion_level) { int start = skb_headlen(skb); int i, copy = start - offset; struct sk_buff *frag_iter; int elt = 0; if (unlikely(recursion_level >= 24)) return -EMSGSIZE; if (copy > 0) { if (copy > len) copy = len; sg_set_buf(sg, skb->data + offset, copy); elt++; if ((len -= copy) == 0) return elt; offset += copy; } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { int end; WARN_ON(start > offset + len); end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); if ((copy = end - offset) > 0) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; if (unlikely(elt && sg_is_last(&sg[elt - 1]))) return -EMSGSIZE; if (copy > len) copy = len; sg_set_page(&sg[elt], skb_frag_page(frag), copy, skb_frag_off(frag) + offset - start); elt++; if (!(len -= copy)) return elt; offset += copy; } start = end; } skb_walk_frags(skb, frag_iter) { int end, ret; WARN_ON(start > offset + len); end = start + frag_iter->len; if ((copy = end - offset) > 0) { if (unlikely(elt && sg_is_last(&sg[elt - 1]))) return -EMSGSIZE; if (copy > len) copy = len; ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start, copy, recursion_level + 1); if (unlikely(ret < 0)) return ret; elt += ret; if ((len -= copy) == 0) return elt; offset += copy; } start = end; } BUG_ON(len); return elt; } /** * skb_to_sgvec - Fill a scatter-gather list from a socket buffer * @skb: Socket buffer containing the buffers to be mapped * @sg: The scatter-gather list to map into * @offset: The offset into the buffer's contents to start mapping * @len: Length of buffer space to be mapped * * Fill the specified scatter-gather list with mappings/pointers into a * region of the buffer space attached to a socket buffer. Returns either * the number of scatterlist items used, or -EMSGSIZE if the contents * could not fit. */ int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) { int nsg = __skb_to_sgvec(skb, sg, offset, len, 0); if (nsg <= 0) return nsg; sg_mark_end(&sg[nsg - 1]); return nsg; } EXPORT_SYMBOL_GPL(skb_to_sgvec); /* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given * sglist without mark the sg which contain last skb data as the end. * So the caller can mannipulate sg list as will when padding new data after * the first call without calling sg_unmark_end to expend sg list. * * Scenario to use skb_to_sgvec_nomark: * 1. sg_init_table * 2. skb_to_sgvec_nomark(payload1) * 3. skb_to_sgvec_nomark(payload2) * * This is equivalent to: * 1. sg_init_table * 2. skb_to_sgvec(payload1) * 3. sg_unmark_end * 4. skb_to_sgvec(payload2) * * When mapping multiple payload conditionally, skb_to_sgvec_nomark * is more preferable. */ int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) { return __skb_to_sgvec(skb, sg, offset, len, 0); } EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark); /** * skb_cow_data - Check that a socket buffer's data buffers are writable * @skb: The socket buffer to check. * @tailbits: Amount of trailing space to be added * @trailer: Returned pointer to the skb where the @tailbits space begins * * Make sure that the data buffers attached to a socket buffer are * writable. If they are not, private copies are made of the data buffers * and the socket buffer is set to use these instead. * * If @tailbits is given, make sure that there is space to write @tailbits * bytes of data beyond current end of socket buffer. @trailer will be * set to point to the skb in which this space begins. * * The number of scatterlist elements required to completely map the * COW'd and extended socket buffer will be returned. */ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) { int copyflag; int elt; struct sk_buff *skb1, **skb_p; /* If skb is cloned or its head is paged, reallocate * head pulling out all the pages (pages are considered not writable * at the moment even if they are anonymous). */ if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && !__pskb_pull_tail(skb, __skb_pagelen(skb))) return -ENOMEM; /* Easy case. Most of packets will go this way. */ if (!skb_has_frag_list(skb)) { /* A little of trouble, not enough of space for trailer. * This should not happen, when stack is tuned to generate * good frames. OK, on miss we reallocate and reserve even more * space, 128 bytes is fair. */ if (skb_tailroom(skb) < tailbits && pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) return -ENOMEM; /* Voila! */ *trailer = skb; return 1; } /* Misery. We are in troubles, going to mincer fragments... */ elt = 1; skb_p = &skb_shinfo(skb)->frag_list; copyflag = 0; while ((skb1 = *skb_p) != NULL) { int ntail = 0; /* The fragment is partially pulled by someone, * this can happen on input. Copy it and everything * after it. */ if (skb_shared(skb1)) copyflag = 1; /* If the skb is the last, worry about trailer. */ if (skb1->next == NULL && tailbits) { if (skb_shinfo(skb1)->nr_frags || skb_has_frag_list(skb1) || skb_tailroom(skb1) < tailbits) ntail = tailbits + 128; } if (copyflag || skb_cloned(skb1) || ntail || skb_shinfo(skb1)->nr_frags || skb_has_frag_list(skb1)) { struct sk_buff *skb2; /* Fuck, we are miserable poor guys... */ if (ntail == 0) skb2 = skb_copy(skb1, GFP_ATOMIC); else skb2 = skb_copy_expand(skb1, skb_headroom(skb1), ntail, GFP_ATOMIC); if (unlikely(skb2 == NULL)) return -ENOMEM; if (skb1->sk) skb_set_owner_w(skb2, skb1->sk); /* Looking around. Are we still alive? * OK, link new skb, drop old one */ skb2->next = skb1->next; *skb_p = skb2; kfree_skb(skb1); skb1 = skb2; } elt++; *trailer = skb1; skb_p = &skb1->next; } return elt; } EXPORT_SYMBOL_GPL(skb_cow_data); static void sock_rmem_free(struct sk_buff *skb) { struct sock *sk = skb->sk; atomic_sub(skb->truesize, &sk->sk_rmem_alloc); } static void skb_set_err_queue(struct sk_buff *skb) { /* pkt_type of skbs received on local sockets is never PACKET_OUTGOING. * So, it is safe to (mis)use it to mark skbs on the error queue. */ skb->pkt_type = PACKET_OUTGOING; BUILD_BUG_ON(PACKET_OUTGOING == 0); } /* * Note: We dont mem charge error packets (no sk_forward_alloc changes) */ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) { if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= (unsigned int)READ_ONCE(sk->sk_rcvbuf)) return -ENOMEM; skb_orphan(skb); skb->sk = sk; skb->destructor = sock_rmem_free; atomic_add(skb->truesize, &sk->sk_rmem_alloc); skb_set_err_queue(skb); /* before exiting rcu section, make sure dst is refcounted */ skb_dst_force(skb); skb_queue_tail(&sk->sk_error_queue, skb); if (!sock_flag(sk, SOCK_DEAD)) sk_error_report(sk); return 0; } EXPORT_SYMBOL(sock_queue_err_skb); static bool is_icmp_err_skb(const struct sk_buff *skb) { return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP || SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6); } struct sk_buff *sock_dequeue_err_skb(struct sock *sk) { struct sk_buff_head *q = &sk->sk_error_queue; struct sk_buff *skb, *skb_next = NULL; bool icmp_next = false; unsigned long flags; if (skb_queue_empty_lockless(q)) return NULL; spin_lock_irqsave(&q->lock, flags); skb = __skb_dequeue(q); if (skb && (skb_next = skb_peek(q))) { icmp_next = is_icmp_err_skb(skb_next); if (icmp_next) sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_errno; } spin_unlock_irqrestore(&q->lock, flags); if (is_icmp_err_skb(skb) && !icmp_next) sk->sk_err = 0; if (skb_next) sk_error_report(sk); return skb; } EXPORT_SYMBOL(sock_dequeue_err_skb); /** * skb_clone_sk - create clone of skb, and take reference to socket * @skb: the skb to clone * * This function creates a clone of a buffer that holds a reference on * sk_refcnt. Buffers created via this function are meant to be * returned using sock_queue_err_skb, or free via kfree_skb. * * When passing buffers allocated with this function to sock_queue_err_skb * it is necessary to wrap the call with sock_hold/sock_put in order to * prevent the socket from being released prior to being enqueued on * the sk_error_queue. */ struct sk_buff *skb_clone_sk(struct sk_buff *skb) { struct sock *sk = skb->sk; struct sk_buff *clone; if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt)) return NULL; clone = skb_clone(skb, GFP_ATOMIC); if (!clone) { sock_put(sk); return NULL; } clone->sk = sk; clone->destructor = sock_efree; return clone; } EXPORT_SYMBOL(skb_clone_sk); static void __skb_complete_tx_timestamp(struct sk_buff *skb, struct sock *sk, int tstype, bool opt_stats) { struct sock_exterr_skb *serr; int err; BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb)); serr = SKB_EXT_ERR(skb); memset(serr, 0, sizeof(*serr)); serr->ee.ee_errno = ENOMSG; serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; serr->ee.ee_info = tstype; serr->opt_stats = opt_stats; serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0; if (READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID) { serr->ee.ee_data = skb_shinfo(skb)->tskey; if (sk_is_tcp(sk)) serr->ee.ee_data -= atomic_read(&sk->sk_tskey); } err = sock_queue_err_skb(sk, skb); if (err) kfree_skb(skb); } static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly) { bool ret; if (likely(tsonly || READ_ONCE(sock_net(sk)->core.sysctl_tstamp_allow_data))) return true; read_lock_bh(&sk->sk_callback_lock); ret = sk->sk_socket && sk->sk_socket->file && file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW); read_unlock_bh(&sk->sk_callback_lock); return ret; } void skb_complete_tx_timestamp(struct sk_buff *skb, struct skb_shared_hwtstamps *hwtstamps) { struct sock *sk = skb->sk; if (!skb_may_tx_timestamp(sk, false)) goto err; /* Take a reference to prevent skb_orphan() from freeing the socket, * but only if the socket refcount is not zero. */ if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) { *skb_hwtstamps(skb) = *hwtstamps; __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false); sock_put(sk); return; } err: kfree_skb(skb); } EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp); static bool skb_tstamp_tx_report_so_timestamping(struct sk_buff *skb, struct skb_shared_hwtstamps *hwtstamps, int tstype) { switch (tstype) { case SCM_TSTAMP_SCHED: return skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP; case SCM_TSTAMP_SND: return skb_shinfo(skb)->tx_flags & (hwtstamps ? SKBTX_HW_TSTAMP_NOBPF : SKBTX_SW_TSTAMP); case SCM_TSTAMP_ACK: return TCP_SKB_CB(skb)->txstamp_ack & TSTAMP_ACK_SK; case SCM_TSTAMP_COMPLETION: return skb_shinfo(skb)->tx_flags & SKBTX_COMPLETION_TSTAMP; } return false; } static void skb_tstamp_tx_report_bpf_timestamping(struct sk_buff *skb, struct skb_shared_hwtstamps *hwtstamps, struct sock *sk, int tstype) { int op; switch (tstype) { case SCM_TSTAMP_SCHED: op = BPF_SOCK_OPS_TSTAMP_SCHED_CB; break; case SCM_TSTAMP_SND: if (hwtstamps) { op = BPF_SOCK_OPS_TSTAMP_SND_HW_CB; *skb_hwtstamps(skb) = *hwtstamps; } else { op = BPF_SOCK_OPS_TSTAMP_SND_SW_CB; } break; case SCM_TSTAMP_ACK: op = BPF_SOCK_OPS_TSTAMP_ACK_CB; break; default: return; } bpf_skops_tx_timestamping(sk, skb, op); } void __skb_tstamp_tx(struct sk_buff *orig_skb, const struct sk_buff *ack_skb, struct skb_shared_hwtstamps *hwtstamps, struct sock *sk, int tstype) { struct sk_buff *skb; bool tsonly, opt_stats = false; u32 tsflags; if (!sk) return; if (skb_shinfo(orig_skb)->tx_flags & SKBTX_BPF) skb_tstamp_tx_report_bpf_timestamping(orig_skb, hwtstamps, sk, tstype); if (!skb_tstamp_tx_report_so_timestamping(orig_skb, hwtstamps, tstype)) return; tsflags = READ_ONCE(sk->sk_tsflags); if (!hwtstamps && !(tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) && skb_shinfo(orig_skb)->tx_flags & SKBTX_IN_PROGRESS) return; tsonly = tsflags & SOF_TIMESTAMPING_OPT_TSONLY; if (!skb_may_tx_timestamp(sk, tsonly)) return; if (tsonly) { #ifdef CONFIG_INET if ((tsflags & SOF_TIMESTAMPING_OPT_STATS) && sk_is_tcp(sk)) { skb = tcp_get_timestamping_opt_stats(sk, orig_skb, ack_skb); opt_stats = true; } else #endif skb = alloc_skb(0, GFP_ATOMIC); } else { skb = skb_clone(orig_skb, GFP_ATOMIC); if (skb_orphan_frags_rx(skb, GFP_ATOMIC)) { kfree_skb(skb); return; } } if (!skb) return; if (tsonly) { skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags & SKBTX_ANY_TSTAMP; skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey; } if (hwtstamps) *skb_hwtstamps(skb) = *hwtstamps; else __net_timestamp(skb); __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats); } EXPORT_SYMBOL_GPL(__skb_tstamp_tx); void skb_tstamp_tx(struct sk_buff *orig_skb, struct skb_shared_hwtstamps *hwtstamps) { return __skb_tstamp_tx(orig_skb, NULL, hwtstamps, orig_skb->sk, SCM_TSTAMP_SND); } EXPORT_SYMBOL_GPL(skb_tstamp_tx); #ifdef CONFIG_WIRELESS void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) { struct sock *sk = skb->sk; struct sock_exterr_skb *serr; int err = 1; skb->wifi_acked_valid = 1; skb->wifi_acked = acked; serr = SKB_EXT_ERR(skb); memset(serr, 0, sizeof(*serr)); serr->ee.ee_errno = ENOMSG; serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; /* Take a reference to prevent skb_orphan() from freeing the socket, * but only if the socket refcount is not zero. */ if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) { err = sock_queue_err_skb(sk, skb); sock_put(sk); } if (err) kfree_skb(skb); } EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); #endif /* CONFIG_WIRELESS */ /** * skb_partial_csum_set - set up and verify partial csum values for packet * @skb: the skb to set * @start: the number of bytes after skb->data to start checksumming. * @off: the offset from start to place the checksum. * * For untrusted partially-checksummed packets, we need to make sure the values * for skb->csum_start and skb->csum_offset are valid so we don't oops. * * This function checks and sets those values and skb->ip_summed: if this * returns false you should drop the packet. */ bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) { u32 csum_end = (u32)start + (u32)off + sizeof(__sum16); u32 csum_start = skb_headroom(skb) + (u32)start; if (unlikely(csum_start >= U16_MAX || csum_end > skb_headlen(skb))) { net_warn_ratelimited("bad partial csum: csum=%u/%u headroom=%u headlen=%u\n", start, off, skb_headroom(skb), skb_headlen(skb)); return false; } skb->ip_summed = CHECKSUM_PARTIAL; skb->csum_start = csum_start; skb->csum_offset = off; skb->transport_header = csum_start; return true; } EXPORT_SYMBOL_GPL(skb_partial_csum_set); static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len, unsigned int max) { if (skb_headlen(skb) >= len) return 0; /* If we need to pullup then pullup to the max, so we * won't need to do it again. */ if (max > skb->len) max = skb->len; if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL) return -ENOMEM; if (skb_headlen(skb) < len) return -EPROTO; return 0; } #define MAX_TCP_HDR_LEN (15 * 4) static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb, typeof(IPPROTO_IP) proto, unsigned int off) { int err; switch (proto) { case IPPROTO_TCP: err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr), off + MAX_TCP_HDR_LEN); if (!err && !skb_partial_csum_set(skb, off, offsetof(struct tcphdr, check))) err = -EPROTO; return err ? ERR_PTR(err) : &tcp_hdr(skb)->check; case IPPROTO_UDP: err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr), off + sizeof(struct udphdr)); if (!err && !skb_partial_csum_set(skb, off, offsetof(struct udphdr, check))) err = -EPROTO; return err ? ERR_PTR(err) : &udp_hdr(skb)->check; } return ERR_PTR(-EPROTO); } /* This value should be large enough to cover a tagged ethernet header plus * maximally sized IP and TCP or UDP headers. */ #define MAX_IP_HDR_LEN 128 static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate) { unsigned int off; bool fragment; __sum16 *csum; int err; fragment = false; err = skb_maybe_pull_tail(skb, sizeof(struct iphdr), MAX_IP_HDR_LEN); if (err < 0) goto out; if (ip_is_fragment(ip_hdr(skb))) fragment = true; off = ip_hdrlen(skb); err = -EPROTO; if (fragment) goto out; csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off); if (IS_ERR(csum)) return PTR_ERR(csum); if (recalculate) *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, skb->len - off, ip_hdr(skb)->protocol, 0); err = 0; out: return err; } /* This value should be large enough to cover a tagged ethernet header plus * an IPv6 header, all options, and a maximal TCP or UDP header. */ #define MAX_IPV6_HDR_LEN 256 #define OPT_HDR(type, skb, off) \ (type *)(skb_network_header(skb) + (off)) static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate) { int err; u8 nexthdr; unsigned int off; unsigned int len; bool fragment; bool done; __sum16 *csum; fragment = false; done = false; off = sizeof(struct ipv6hdr); err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN); if (err < 0) goto out; nexthdr = ipv6_hdr(skb)->nexthdr; len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len); while (off <= len && !done) { switch (nexthdr) { case IPPROTO_DSTOPTS: case IPPROTO_HOPOPTS: case IPPROTO_ROUTING: { struct ipv6_opt_hdr *hp; err = skb_maybe_pull_tail(skb, off + sizeof(struct ipv6_opt_hdr), MAX_IPV6_HDR_LEN); if (err < 0) goto out; hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); nexthdr = hp->nexthdr; off += ipv6_optlen(hp); break; } case IPPROTO_AH: { struct ip_auth_hdr *hp; err = skb_maybe_pull_tail(skb, off + sizeof(struct ip_auth_hdr), MAX_IPV6_HDR_LEN); if (err < 0) goto out; hp = OPT_HDR(struct ip_auth_hdr, skb, off); nexthdr = hp->nexthdr; off += ipv6_authlen(hp); break; } case IPPROTO_FRAGMENT: { struct frag_hdr *hp; err = skb_maybe_pull_tail(skb, off + sizeof(struct frag_hdr), MAX_IPV6_HDR_LEN); if (err < 0) goto out; hp = OPT_HDR(struct frag_hdr, skb, off); if (hp->frag_off & htons(IP6_OFFSET | IP6_MF)) fragment = true; nexthdr = hp->nexthdr; off += sizeof(struct frag_hdr); break; } default: done = true; break; } } err = -EPROTO; if (!done || fragment) goto out; csum = skb_checksum_setup_ip(skb, nexthdr, off); if (IS_ERR(csum)) return PTR_ERR(csum); if (recalculate) *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, skb->len - off, nexthdr, 0); err = 0; out: return err; } /** * skb_checksum_setup - set up partial checksum offset * @skb: the skb to set up * @recalculate: if true the pseudo-header checksum will be recalculated */ int skb_checksum_setup(struct sk_buff *skb, bool recalculate) { int err; switch (skb->protocol) { case htons(ETH_P_IP): err = skb_checksum_setup_ipv4(skb, recalculate); break; case htons(ETH_P_IPV6): err = skb_checksum_setup_ipv6(skb, recalculate); break; default: err = -EPROTO; break; } return err; } EXPORT_SYMBOL(skb_checksum_setup); /** * skb_checksum_maybe_trim - maybe trims the given skb * @skb: the skb to check * @transport_len: the data length beyond the network header * * Checks whether the given skb has data beyond the given transport length. * If so, returns a cloned skb trimmed to this transport length. * Otherwise returns the provided skb. Returns NULL in error cases * (e.g. transport_len exceeds skb length or out-of-memory). * * Caller needs to set the skb transport header and free any returned skb if it * differs from the provided skb. */ static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb, unsigned int transport_len) { struct sk_buff *skb_chk; unsigned int len = skb_transport_offset(skb) + transport_len; int ret; if (skb->len < len) return NULL; else if (skb->len == len) return skb; skb_chk = skb_clone(skb, GFP_ATOMIC); if (!skb_chk) return NULL; ret = pskb_trim_rcsum(skb_chk, len); if (ret) { kfree_skb(skb_chk); return NULL; } return skb_chk; } /** * skb_checksum_trimmed - validate checksum of an skb * @skb: the skb to check * @transport_len: the data length beyond the network header * @skb_chkf: checksum function to use * * Applies the given checksum function skb_chkf to the provided skb. * Returns a checked and maybe trimmed skb. Returns NULL on error. * * If the skb has data beyond the given transport length, then a * trimmed & cloned skb is checked and returned. * * Caller needs to set the skb transport header and free any returned skb if it * differs from the provided skb. */ struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb, unsigned int transport_len, __sum16(*skb_chkf)(struct sk_buff *skb)) { struct sk_buff *skb_chk; unsigned int offset = skb_transport_offset(skb); __sum16 ret; skb_chk = skb_checksum_maybe_trim(skb, transport_len); if (!skb_chk) goto err; if (!pskb_may_pull(skb_chk, offset)) goto err; skb_pull_rcsum(skb_chk, offset); ret = skb_chkf(skb_chk); skb_push_rcsum(skb_chk, offset); if (ret) goto err; return skb_chk; err: if (skb_chk && skb_chk != skb) kfree_skb(skb_chk); return NULL; } EXPORT_SYMBOL(skb_checksum_trimmed); void __skb_warn_lro_forwarding(const struct sk_buff *skb) { net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n", skb->dev->name); } EXPORT_SYMBOL(__skb_warn_lro_forwarding); void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) { if (head_stolen) { skb_release_head_state(skb); kmem_cache_free(net_hotdata.skbuff_cache, skb); } else { __kfree_skb(skb); } } EXPORT_SYMBOL(kfree_skb_partial); /** * skb_try_coalesce - try to merge skb to prior one * @to: prior buffer * @from: buffer to add * @fragstolen: pointer to boolean * @delta_truesize: how much more was allocated than was requested */ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, bool *fragstolen, int *delta_truesize) { struct skb_shared_info *to_shinfo, *from_shinfo; int i, delta, len = from->len; *fragstolen = false; if (skb_cloned(to)) return false; /* In general, avoid mixing page_pool and non-page_pool allocated * pages within the same SKB. In theory we could take full * references if @from is cloned and !@to->pp_recycle but its * tricky (due to potential race with the clone disappearing) and * rare, so not worth dealing with. */ if (to->pp_recycle != from->pp_recycle) return false; if (skb_frags_readable(from) != skb_frags_readable(to)) return false; if (len <= skb_tailroom(to) && skb_frags_readable(from)) { if (len) BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); *delta_truesize = 0; return true; } to_shinfo = skb_shinfo(to); from_shinfo = skb_shinfo(from); if (to_shinfo->frag_list || from_shinfo->frag_list) return false; if (skb_zcopy(to) || skb_zcopy(from)) return false; if (skb_headlen(from) != 0) { struct page *page; unsigned int offset; if (to_shinfo->nr_frags + from_shinfo->nr_frags >= MAX_SKB_FRAGS) return false; if (skb_head_is_locked(from)) return false; delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); page = virt_to_head_page(from->head); offset = from->data - (unsigned char *)page_address(page); skb_fill_page_desc(to, to_shinfo->nr_frags, page, offset, skb_headlen(from)); *fragstolen = true; } else { if (to_shinfo->nr_frags + from_shinfo->nr_frags > MAX_SKB_FRAGS) return false; delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from)); } WARN_ON_ONCE(delta < len); memcpy(to_shinfo->frags + to_shinfo->nr_frags, from_shinfo->frags, from_shinfo->nr_frags * sizeof(skb_frag_t)); to_shinfo->nr_frags += from_shinfo->nr_frags; if (!skb_cloned(from)) from_shinfo->nr_frags = 0; /* if the skb is not cloned this does nothing * since we set nr_frags to 0. */ if (skb_pp_frag_ref(from)) { for (i = 0; i < from_shinfo->nr_frags; i++) __skb_frag_ref(&from_shinfo->frags[i]); } to->truesize += delta; to->len += len; to->data_len += len; *delta_truesize = delta; return true; } EXPORT_SYMBOL(skb_try_coalesce); /** * skb_scrub_packet - scrub an skb * * @skb: buffer to clean * @xnet: packet is crossing netns * * skb_scrub_packet can be used after encapsulating or decapsulating a packet * into/from a tunnel. Some information have to be cleared during these * operations. * skb_scrub_packet can also be used to clean a skb before injecting it in * another namespace (@xnet == true). We have to clear all information in the * skb that could impact namespace isolation. */ void skb_scrub_packet(struct sk_buff *skb, bool xnet) { skb->pkt_type = PACKET_HOST; skb->skb_iif = 0; skb->ignore_df = 0; skb_dst_drop(skb); skb_ext_reset(skb); nf_reset_ct(skb); nf_reset_trace(skb); #ifdef CONFIG_NET_SWITCHDEV skb->offload_fwd_mark = 0; skb->offload_l3_fwd_mark = 0; #endif ipvs_reset(skb); if (!xnet) return; skb->mark = 0; skb_clear_tstamp(skb); } EXPORT_SYMBOL_GPL(skb_scrub_packet); static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) { int mac_len, meta_len; void *meta; if (skb_cow(skb, skb_headroom(skb)) < 0) { kfree_skb(skb); return NULL; } mac_len = skb->data - skb_mac_header(skb); if (likely(mac_len > VLAN_HLEN + ETH_TLEN)) { memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb), mac_len - VLAN_HLEN - ETH_TLEN); } meta_len = skb_metadata_len(skb); if (meta_len) { meta = skb_metadata_end(skb) - meta_len; memmove(meta + VLAN_HLEN, meta, meta_len); } skb->mac_header += VLAN_HLEN; return skb; } struct sk_buff *skb_vlan_untag(struct sk_buff *skb) { struct vlan_hdr *vhdr; u16 vlan_tci; if (unlikely(skb_vlan_tag_present(skb))) { /* vlan_tci is already set-up so leave this for another time */ return skb; } skb = skb_share_check(skb, GFP_ATOMIC); if (unlikely(!skb)) goto err_free; /* We may access the two bytes after vlan_hdr in vlan_set_encap_proto(). */ if (unlikely(!pskb_may_pull(skb, VLAN_HLEN + sizeof(unsigned short)))) goto err_free; vhdr = (struct vlan_hdr *)skb->data; vlan_tci = ntohs(vhdr->h_vlan_TCI); __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci); skb_pull_rcsum(skb, VLAN_HLEN); vlan_set_encap_proto(skb, vhdr); skb = skb_reorder_vlan_header(skb); if (unlikely(!skb)) goto err_free; skb_reset_network_header(skb); if (!skb_transport_header_was_set(skb)) skb_reset_transport_header(skb); skb_reset_mac_len(skb); return skb; err_free: kfree_skb(skb); return NULL; } EXPORT_SYMBOL(skb_vlan_untag); int skb_ensure_writable(struct sk_buff *skb, unsigned int write_len) { if (!pskb_may_pull(skb, write_len)) return -ENOMEM; if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) return 0; return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); } EXPORT_SYMBOL(skb_ensure_writable); int skb_ensure_writable_head_tail(struct sk_buff *skb, struct net_device *dev) { int needed_headroom = dev->needed_headroom; int needed_tailroom = dev->needed_tailroom; /* For tail taggers, we need to pad short frames ourselves, to ensure * that the tail tag does not fail at its role of being at the end of * the packet, once the conduit interface pads the frame. Account for * that pad length here, and pad later. */ if (unlikely(needed_tailroom && skb->len < ETH_ZLEN)) needed_tailroom += ETH_ZLEN - skb->len; /* skb_headroom() returns unsigned int... */ needed_headroom = max_t(int, needed_headroom - skb_headroom(skb), 0); needed_tailroom = max_t(int, needed_tailroom - skb_tailroom(skb), 0); if (likely(!needed_headroom && !needed_tailroom && !skb_cloned(skb))) /* No reallocation needed, yay! */ return 0; return pskb_expand_head(skb, needed_headroom, needed_tailroom, GFP_ATOMIC); } EXPORT_SYMBOL(skb_ensure_writable_head_tail); /* remove VLAN header from packet and update csum accordingly. * expects a non skb_vlan_tag_present skb with a vlan tag payload */ int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci) { int offset = skb->data - skb_mac_header(skb); int err; if (WARN_ONCE(offset, "__skb_vlan_pop got skb with skb->data not at mac header (offset %d)\n", offset)) { return -EINVAL; } err = skb_ensure_writable(skb, VLAN_ETH_HLEN); if (unlikely(err)) return err; skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); vlan_remove_tag(skb, vlan_tci); skb->mac_header += VLAN_HLEN; if (skb_network_offset(skb) < ETH_HLEN) skb_set_network_header(skb, ETH_HLEN); skb_reset_mac_len(skb); return err; } EXPORT_SYMBOL(__skb_vlan_pop); /* Pop a vlan tag either from hwaccel or from payload. * Expects skb->data at mac header. */ int skb_vlan_pop(struct sk_buff *skb) { u16 vlan_tci; __be16 vlan_proto; int err; if (likely(skb_vlan_tag_present(skb))) { __vlan_hwaccel_clear_tag(skb); } else { if (unlikely(!eth_type_vlan(skb->protocol))) return 0; err = __skb_vlan_pop(skb, &vlan_tci); if (err) return err; } /* move next vlan tag to hw accel tag */ if (likely(!eth_type_vlan(skb->protocol))) return 0; vlan_proto = skb->protocol; err = __skb_vlan_pop(skb, &vlan_tci); if (unlikely(err)) return err; __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); return 0; } EXPORT_SYMBOL(skb_vlan_pop); /* Push a vlan tag either into hwaccel or into payload (if hwaccel tag present). * Expects skb->data at mac header. */ int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) { if (skb_vlan_tag_present(skb)) { int offset = skb->data - skb_mac_header(skb); int err; if (WARN_ONCE(offset, "skb_vlan_push got skb with skb->data not at mac header (offset %d)\n", offset)) { return -EINVAL; } err = __vlan_insert_tag(skb, skb->vlan_proto, skb_vlan_tag_get(skb)); if (err) return err; skb->protocol = skb->vlan_proto; skb->network_header -= VLAN_HLEN; skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); } __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); return 0; } EXPORT_SYMBOL(skb_vlan_push); /** * skb_eth_pop() - Drop the Ethernet header at the head of a packet * * @skb: Socket buffer to modify * * Drop the Ethernet header of @skb. * * Expects that skb->data points to the mac header and that no VLAN tags are * present. * * Returns 0 on success, -errno otherwise. */ int skb_eth_pop(struct sk_buff *skb) { if (!pskb_may_pull(skb, ETH_HLEN) || skb_vlan_tagged(skb) || skb_network_offset(skb) < ETH_HLEN) return -EPROTO; skb_pull_rcsum(skb, ETH_HLEN); skb_reset_mac_header(skb); skb_reset_mac_len(skb); return 0; } EXPORT_SYMBOL(skb_eth_pop); /** * skb_eth_push() - Add a new Ethernet header at the head of a packet * * @skb: Socket buffer to modify * @dst: Destination MAC address of the new header * @src: Source MAC address of the new header * * Prepend @skb with a new Ethernet header. * * Expects that skb->data points to the mac header, which must be empty. * * Returns 0 on success, -errno otherwise. */ int skb_eth_push(struct sk_buff *skb, const unsigned char *dst, const unsigned char *src) { struct ethhdr *eth; int err; if (skb_network_offset(skb) || skb_vlan_tag_present(skb)) return -EPROTO; err = skb_cow_head(skb, sizeof(*eth)); if (err < 0) return err; skb_push(skb, sizeof(*eth)); skb_reset_mac_header(skb); skb_reset_mac_len(skb); eth = eth_hdr(skb); ether_addr_copy(eth->h_dest, dst); ether_addr_copy(eth->h_source, src); eth->h_proto = skb->protocol; skb_postpush_rcsum(skb, eth, sizeof(*eth)); return 0; } EXPORT_SYMBOL(skb_eth_push); /* Update the ethertype of hdr and the skb csum value if required. */ static void skb_mod_eth_type(struct sk_buff *skb, struct ethhdr *hdr, __be16 ethertype) { if (skb->ip_summed == CHECKSUM_COMPLETE) { __be16 diff[] = { ~hdr->h_proto, ethertype }; skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); } hdr->h_proto = ethertype; } /** * skb_mpls_push() - push a new MPLS header after mac_len bytes from start of * the packet * * @skb: buffer * @mpls_lse: MPLS label stack entry to push * @mpls_proto: ethertype of the new MPLS header (expects 0x8847 or 0x8848) * @mac_len: length of the MAC header * @ethernet: flag to indicate if the resulting packet after skb_mpls_push is * ethernet * * Expects skb->data at mac header. * * Returns 0 on success, -errno otherwise. */ int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto, int mac_len, bool ethernet) { struct mpls_shim_hdr *lse; int err; if (unlikely(!eth_p_mpls(mpls_proto))) return -EINVAL; /* Networking stack does not allow simultaneous Tunnel and MPLS GSO. */ if (skb->encapsulation) return -EINVAL; err = skb_cow_head(skb, MPLS_HLEN); if (unlikely(err)) return err; if (!skb->inner_protocol) { skb_set_inner_network_header(skb, skb_network_offset(skb)); skb_set_inner_protocol(skb, skb->protocol); } skb_push(skb, MPLS_HLEN); memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb), mac_len); skb_reset_mac_header(skb); skb_set_network_header(skb, mac_len); skb_reset_mac_len(skb); lse = mpls_hdr(skb); lse->label_stack_entry = mpls_lse; skb_postpush_rcsum(skb, lse, MPLS_HLEN); if (ethernet && mac_len >= ETH_HLEN) skb_mod_eth_type(skb, eth_hdr(skb), mpls_proto); skb->protocol = mpls_proto; return 0; } EXPORT_SYMBOL_GPL(skb_mpls_push); /** * skb_mpls_pop() - pop the outermost MPLS header * * @skb: buffer * @next_proto: ethertype of header after popped MPLS header * @mac_len: length of the MAC header * @ethernet: flag to indicate if the packet is ethernet * * Expects skb->data at mac header. * * Returns 0 on success, -errno otherwise. */ int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len, bool ethernet) { int err; if (unlikely(!eth_p_mpls(skb->protocol))) return 0; err = skb_ensure_writable(skb, mac_len + MPLS_HLEN); if (unlikely(err)) return err; skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN); memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb), mac_len); __skb_pull(skb, MPLS_HLEN); skb_reset_mac_header(skb); skb_set_network_header(skb, mac_len); if (ethernet && mac_len >= ETH_HLEN) { struct ethhdr *hdr; /* use mpls_hdr() to get ethertype to account for VLANs. */ hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN); skb_mod_eth_type(skb, hdr, next_proto); } skb->protocol = next_proto; return 0; } EXPORT_SYMBOL_GPL(skb_mpls_pop); /** * skb_mpls_update_lse() - modify outermost MPLS header and update csum * * @skb: buffer * @mpls_lse: new MPLS label stack entry to update to * * Expects skb->data at mac header. * * Returns 0 on success, -errno otherwise. */ int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse) { int err; if (unlikely(!eth_p_mpls(skb->protocol))) return -EINVAL; err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN); if (unlikely(err)) return err; if (skb->ip_summed == CHECKSUM_COMPLETE) { __be32 diff[] = { ~mpls_hdr(skb)->label_stack_entry, mpls_lse }; skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); } mpls_hdr(skb)->label_stack_entry = mpls_lse; return 0; } EXPORT_SYMBOL_GPL(skb_mpls_update_lse); /** * skb_mpls_dec_ttl() - decrement the TTL of the outermost MPLS header * * @skb: buffer * * Expects skb->data at mac header. * * Returns 0 on success, -errno otherwise. */ int skb_mpls_dec_ttl(struct sk_buff *skb) { u32 lse; u8 ttl; if (unlikely(!eth_p_mpls(skb->protocol))) return -EINVAL; if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN)) return -ENOMEM; lse = be32_to_cpu(mpls_hdr(skb)->label_stack_entry); ttl = (lse & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT; if (!--ttl) return -EINVAL; lse &= ~MPLS_LS_TTL_MASK; lse |= ttl << MPLS_LS_TTL_SHIFT; return skb_mpls_update_lse(skb, cpu_to_be32(lse)); } EXPORT_SYMBOL_GPL(skb_mpls_dec_ttl); /** * alloc_skb_with_frags - allocate skb with page frags * * @header_len: size of linear part * @data_len: needed length in frags * @order: max page order desired. * @errcode: pointer to error code if any * @gfp_mask: allocation mask * * This can be used to allocate a paged skb, given a maximal order for frags. */ struct sk_buff *alloc_skb_with_frags(unsigned long header_len, unsigned long data_len, int order, int *errcode, gfp_t gfp_mask) { unsigned long chunk; struct sk_buff *skb; struct page *page; int nr_frags = 0; *errcode = -EMSGSIZE; if (unlikely(data_len > MAX_SKB_FRAGS * (PAGE_SIZE << order))) return NULL; *errcode = -ENOBUFS; skb = alloc_skb(header_len, gfp_mask); if (!skb) return NULL; while (data_len) { if (nr_frags == MAX_SKB_FRAGS - 1) goto failure; while (order && PAGE_ALIGN(data_len) < (PAGE_SIZE << order)) order--; if (order) { page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) | __GFP_COMP | __GFP_NOWARN, order); if (!page) { order--; continue; } } else { page = alloc_page(gfp_mask); if (!page) goto failure; } chunk = min_t(unsigned long, data_len, PAGE_SIZE << order); skb_fill_page_desc(skb, nr_frags, page, 0, chunk); nr_frags++; skb->truesize += (PAGE_SIZE << order); data_len -= chunk; } return skb; failure: kfree_skb(skb); return NULL; } EXPORT_SYMBOL(alloc_skb_with_frags); /* carve out the first off bytes from skb when off < headlen */ static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off, const int headlen, gfp_t gfp_mask) { int i; unsigned int size = skb_end_offset(skb); int new_hlen = headlen - off; u8 *data; if (skb_pfmemalloc(skb)) gfp_mask |= __GFP_MEMALLOC; data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL); if (!data) return -ENOMEM; size = SKB_WITH_OVERHEAD(size); /* Copy real data, and all frags */ skb_copy_from_linear_data_offset(skb, off, data, new_hlen); skb->len -= off; memcpy((struct skb_shared_info *)(data + size), skb_shinfo(skb), offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); if (skb_cloned(skb)) { /* drop the old head gracefully */ if (skb_orphan_frags(skb, gfp_mask)) { skb_kfree_head(data, size); return -ENOMEM; } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) skb_frag_ref(skb, i); if (skb_has_frag_list(skb)) skb_clone_fraglist(skb); skb_release_data(skb, SKB_CONSUMED); } else { /* we can reuse existing recount- all we did was * relocate values */ skb_free_head(skb); } skb->head = data; skb->data = data; skb->head_frag = 0; skb_set_end_offset(skb, size); skb_set_tail_pointer(skb, skb_headlen(skb)); skb_headers_offset_update(skb, 0); skb->cloned = 0; skb->hdr_len = 0; skb->nohdr = 0; atomic_set(&skb_shinfo(skb)->dataref, 1); return 0; } static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp); /* carve out the first eat bytes from skb's frag_list. May recurse into * pskb_carve() */ static int pskb_carve_frag_list(struct sk_buff *skb, struct skb_shared_info *shinfo, int eat, gfp_t gfp_mask) { struct sk_buff *list = shinfo->frag_list; struct sk_buff *clone = NULL; struct sk_buff *insp = NULL; do { if (!list) { pr_err("Not enough bytes to eat. Want %d\n", eat); return -EFAULT; } if (list->len <= eat) { /* Eaten as whole. */ eat -= list->len; list = list->next; insp = list; } else { /* Eaten partially. */ if (skb_shared(list)) { clone = skb_clone(list, gfp_mask); if (!clone) return -ENOMEM; insp = list->next; list = clone; } else { /* This may be pulled without problems. */ insp = list; } if (pskb_carve(list, eat, gfp_mask) < 0) { kfree_skb(clone); return -ENOMEM; } break; } } while (eat); /* Free pulled out fragments. */ while ((list = shinfo->frag_list) != insp) { shinfo->frag_list = list->next; consume_skb(list); } /* And insert new clone at head. */ if (clone) { clone->next = list; shinfo->frag_list = clone; } return 0; } /* carve off first len bytes from skb. Split line (off) is in the * non-linear part of skb */ static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off, int pos, gfp_t gfp_mask) { int i, k = 0; unsigned int size = skb_end_offset(skb); u8 *data; const int nfrags = skb_shinfo(skb)->nr_frags; struct skb_shared_info *shinfo; if (skb_pfmemalloc(skb)) gfp_mask |= __GFP_MEMALLOC; data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL); if (!data) return -ENOMEM; size = SKB_WITH_OVERHEAD(size); memcpy((struct skb_shared_info *)(data + size), skb_shinfo(skb), offsetof(struct skb_shared_info, frags[0])); if (skb_orphan_frags(skb, gfp_mask)) { skb_kfree_head(data, size); return -ENOMEM; } shinfo = (struct skb_shared_info *)(data + size); for (i = 0; i < nfrags; i++) { int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]); if (pos + fsize > off) { shinfo->frags[k] = skb_shinfo(skb)->frags[i]; if (pos < off) { /* Split frag. * We have two variants in this case: * 1. Move all the frag to the second * part, if it is possible. F.e. * this approach is mandatory for TUX, * where splitting is expensive. * 2. Split is accurately. We make this. */ skb_frag_off_add(&shinfo->frags[0], off - pos); skb_frag_size_sub(&shinfo->frags[0], off - pos); } skb_frag_ref(skb, i); k++; } pos += fsize; } shinfo->nr_frags = k; if (skb_has_frag_list(skb)) skb_clone_fraglist(skb); /* split line is in frag list */ if (k == 0 && pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask)) { /* skb_frag_unref() is not needed here as shinfo->nr_frags = 0. */ if (skb_has_frag_list(skb)) kfree_skb_list(skb_shinfo(skb)->frag_list); skb_kfree_head(data, size); return -ENOMEM; } skb_release_data(skb, SKB_CONSUMED); skb->head = data; skb->head_frag = 0; skb->data = data; skb_set_end_offset(skb, size); skb_reset_tail_pointer(skb); skb_headers_offset_update(skb, 0); skb->cloned = 0; skb->hdr_len = 0; skb->nohdr = 0; skb->len -= off; skb->data_len = skb->len; atomic_set(&skb_shinfo(skb)->dataref, 1); return 0; } /* remove len bytes from the beginning of the skb */ static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp) { int headlen = skb_headlen(skb); if (len < headlen) return pskb_carve_inside_header(skb, len, headlen, gfp); else return pskb_carve_inside_nonlinear(skb, len, headlen, gfp); } /* Extract to_copy bytes starting at off from skb, and return this in * a new skb */ struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy, gfp_t gfp) { struct sk_buff *clone = skb_clone(skb, gfp); if (!clone) return NULL; if (pskb_carve(clone, off, gfp) < 0 || pskb_trim(clone, to_copy)) { kfree_skb(clone); return NULL; } return clone; } EXPORT_SYMBOL(pskb_extract); /** * skb_condense - try to get rid of fragments/frag_list if possible * @skb: buffer * * Can be used to save memory before skb is added to a busy queue. * If packet has bytes in frags and enough tail room in skb->head, * pull all of them, so that we can free the frags right now and adjust * truesize. * Notes: * We do not reallocate skb->head thus can not fail. * Caller must re-evaluate skb->truesize if needed. */ void skb_condense(struct sk_buff *skb) { if (skb->data_len) { if (skb->data_len > skb->end - skb->tail || skb_cloned(skb) || !skb_frags_readable(skb)) return; /* Nice, we can free page frag(s) right now */ __pskb_pull_tail(skb, skb->data_len); } /* At this point, skb->truesize might be over estimated, * because skb had a fragment, and fragments do not tell * their truesize. * When we pulled its content into skb->head, fragment * was freed, but __pskb_pull_tail() could not possibly * adjust skb->truesize, not knowing the frag truesize. */ skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); } EXPORT_SYMBOL(skb_condense); #ifdef CONFIG_SKB_EXTENSIONS static void *skb_ext_get_ptr(struct skb_ext *ext, enum skb_ext_id id) { return (void *)ext + (ext->offset[id] * SKB_EXT_ALIGN_VALUE); } /** * __skb_ext_alloc - allocate a new skb extensions storage * * @flags: See kmalloc(). * * Returns the newly allocated pointer. The pointer can later attached to a * skb via __skb_ext_set(). * Note: caller must handle the skb_ext as an opaque data. */ struct skb_ext *__skb_ext_alloc(gfp_t flags) { struct skb_ext *new = kmem_cache_alloc(skbuff_ext_cache, flags); if (new) { memset(new->offset, 0, sizeof(new->offset)); refcount_set(&new->refcnt, 1); } return new; } static struct skb_ext *skb_ext_maybe_cow(struct skb_ext *old, unsigned int old_active) { struct skb_ext *new; if (refcount_read(&old->refcnt) == 1) return old; new = kmem_cache_alloc(skbuff_ext_cache, GFP_ATOMIC); if (!new) return NULL; memcpy(new, old, old->chunks * SKB_EXT_ALIGN_VALUE); refcount_set(&new->refcnt, 1); #ifdef CONFIG_XFRM if (old_active & (1 << SKB_EXT_SEC_PATH)) { struct sec_path *sp = skb_ext_get_ptr(old, SKB_EXT_SEC_PATH); unsigned int i; for (i = 0; i < sp->len; i++) xfrm_state_hold(sp->xvec[i]); } #endif #ifdef CONFIG_MCTP_FLOWS if (old_active & (1 << SKB_EXT_MCTP)) { struct mctp_flow *flow = skb_ext_get_ptr(old, SKB_EXT_MCTP); if (flow->key) refcount_inc(&flow->key->refs); } #endif __skb_ext_put(old); return new; } /** * __skb_ext_set - attach the specified extension storage to this skb * @skb: buffer * @id: extension id * @ext: extension storage previously allocated via __skb_ext_alloc() * * Existing extensions, if any, are cleared. * * Returns the pointer to the extension. */ void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id, struct skb_ext *ext) { unsigned int newlen, newoff = SKB_EXT_CHUNKSIZEOF(*ext); skb_ext_put(skb); newlen = newoff + skb_ext_type_len[id]; ext->chunks = newlen; ext->offset[id] = newoff; skb->extensions = ext; skb->active_extensions = 1 << id; return skb_ext_get_ptr(ext, id); } /** * skb_ext_add - allocate space for given extension, COW if needed * @skb: buffer * @id: extension to allocate space for * * Allocates enough space for the given extension. * If the extension is already present, a pointer to that extension * is returned. * * If the skb was cloned, COW applies and the returned memory can be * modified without changing the extension space of clones buffers. * * Returns pointer to the extension or NULL on allocation failure. */ void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id) { struct skb_ext *new, *old = NULL; unsigned int newlen, newoff; if (skb->active_extensions) { old = skb->extensions; new = skb_ext_maybe_cow(old, skb->active_extensions); if (!new) return NULL; if (__skb_ext_exist(new, id)) goto set_active; newoff = new->chunks; } else { newoff = SKB_EXT_CHUNKSIZEOF(*new); new = __skb_ext_alloc(GFP_ATOMIC); if (!new) return NULL; } newlen = newoff + skb_ext_type_len[id]; new->chunks = newlen; new->offset[id] = newoff; set_active: skb->slow_gro = 1; skb->extensions = new; skb->active_extensions |= 1 << id; return skb_ext_get_ptr(new, id); } EXPORT_SYMBOL(skb_ext_add); #ifdef CONFIG_XFRM static void skb_ext_put_sp(struct sec_path *sp) { unsigned int i; for (i = 0; i < sp->len; i++) xfrm_state_put(sp->xvec[i]); } #endif #ifdef CONFIG_MCTP_FLOWS static void skb_ext_put_mctp(struct mctp_flow *flow) { if (flow->key) mctp_key_unref(flow->key); } #endif void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id) { struct skb_ext *ext = skb->extensions; skb->active_extensions &= ~(1 << id); if (skb->active_extensions == 0) { skb->extensions = NULL; __skb_ext_put(ext); #ifdef CONFIG_XFRM } else if (id == SKB_EXT_SEC_PATH && refcount_read(&ext->refcnt) == 1) { struct sec_path *sp = skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH); skb_ext_put_sp(sp); sp->len = 0; #endif } } EXPORT_SYMBOL(__skb_ext_del); void __skb_ext_put(struct skb_ext *ext) { /* If this is last clone, nothing can increment * it after check passes. Avoids one atomic op. */ if (refcount_read(&ext->refcnt) == 1) goto free_now; if (!refcount_dec_and_test(&ext->refcnt)) return; free_now: #ifdef CONFIG_XFRM if (__skb_ext_exist(ext, SKB_EXT_SEC_PATH)) skb_ext_put_sp(skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH)); #endif #ifdef CONFIG_MCTP_FLOWS if (__skb_ext_exist(ext, SKB_EXT_MCTP)) skb_ext_put_mctp(skb_ext_get_ptr(ext, SKB_EXT_MCTP)); #endif kmem_cache_free(skbuff_ext_cache, ext); } EXPORT_SYMBOL(__skb_ext_put); #endif /* CONFIG_SKB_EXTENSIONS */ static void kfree_skb_napi_cache(struct sk_buff *skb) { /* if SKB is a clone, don't handle this case */ if (skb->fclone != SKB_FCLONE_UNAVAILABLE) { __kfree_skb(skb); return; } local_bh_disable(); __napi_kfree_skb(skb, SKB_CONSUMED); local_bh_enable(); } /** * skb_attempt_defer_free - queue skb for remote freeing * @skb: buffer * * Put @skb in a per-cpu list, using the cpu which * allocated the skb/pages to reduce false sharing * and memory zone spinlock contention. */ void skb_attempt_defer_free(struct sk_buff *skb) { int cpu = skb->alloc_cpu; struct softnet_data *sd; unsigned int defer_max; bool kick; if (cpu == raw_smp_processor_id() || WARN_ON_ONCE(cpu >= nr_cpu_ids) || !cpu_online(cpu)) { nodefer: kfree_skb_napi_cache(skb); return; } DEBUG_NET_WARN_ON_ONCE(skb_dst(skb)); DEBUG_NET_WARN_ON_ONCE(skb->destructor); sd = &per_cpu(softnet_data, cpu); defer_max = READ_ONCE(net_hotdata.sysctl_skb_defer_max); if (READ_ONCE(sd->defer_count) >= defer_max) goto nodefer; spin_lock_bh(&sd->defer_lock); /* Send an IPI every time queue reaches half capacity. */ kick = sd->defer_count == (defer_max >> 1); /* Paired with the READ_ONCE() few lines above */ WRITE_ONCE(sd->defer_count, sd->defer_count + 1); skb->next = sd->defer_list; /* Paired with READ_ONCE() in skb_defer_free_flush() */ WRITE_ONCE(sd->defer_list, skb); spin_unlock_bh(&sd->defer_lock); /* Make sure to trigger NET_RX_SOFTIRQ on the remote CPU * if we are unlucky enough (this seems very unlikely). */ if (unlikely(kick)) kick_defer_list_purge(sd, cpu); } static void skb_splice_csum_page(struct sk_buff *skb, struct page *page, size_t offset, size_t len) { const char *kaddr; __wsum csum; kaddr = kmap_local_page(page); csum = csum_partial(kaddr + offset, len, 0); kunmap_local(kaddr); skb->csum = csum_block_add(skb->csum, csum, skb->len); } /** * skb_splice_from_iter - Splice (or copy) pages to skbuff * @skb: The buffer to add pages to * @iter: Iterator representing the pages to be added * @maxsize: Maximum amount of pages to be added * @gfp: Allocation flags * * This is a common helper function for supporting MSG_SPLICE_PAGES. It * extracts pages from an iterator and adds them to the socket buffer if * possible, copying them to fragments if not possible (such as if they're slab * pages). * * Returns the amount of data spliced/copied or -EMSGSIZE if there's * insufficient space in the buffer to transfer anything. */ ssize_t skb_splice_from_iter(struct sk_buff *skb, struct iov_iter *iter, ssize_t maxsize, gfp_t gfp) { size_t frag_limit = READ_ONCE(net_hotdata.sysctl_max_skb_frags); struct page *pages[8], **ppages = pages; ssize_t spliced = 0, ret = 0; unsigned int i; while (iter->count > 0) { ssize_t space, nr, len; size_t off; ret = -EMSGSIZE; space = frag_limit - skb_shinfo(skb)->nr_frags; if (space < 0) break; /* We might be able to coalesce without increasing nr_frags */ nr = clamp_t(size_t, space, 1, ARRAY_SIZE(pages)); len = iov_iter_extract_pages(iter, &ppages, maxsize, nr, 0, &off); if (len <= 0) { ret = len ?: -EIO; break; } i = 0; do { struct page *page = pages[i++]; size_t part = min_t(size_t, PAGE_SIZE - off, len); ret = -EIO; if (WARN_ON_ONCE(!sendpage_ok(page))) goto out; ret = skb_append_pagefrags(skb, page, off, part, frag_limit); if (ret < 0) { iov_iter_revert(iter, len); goto out; } if (skb->ip_summed == CHECKSUM_NONE) skb_splice_csum_page(skb, page, off, part); off = 0; spliced += part; maxsize -= part; len -= part; } while (len > 0); if (maxsize <= 0) break; } out: skb_len_add(skb, spliced); return spliced ?: ret; } EXPORT_SYMBOL(skb_splice_from_iter); static __always_inline size_t memcpy_from_iter_csum(void *iter_from, size_t progress, size_t len, void *to, void *priv2) { __wsum *csum = priv2; __wsum next = csum_partial_copy_nocheck(iter_from, to + progress, len); *csum = csum_block_add(*csum, next, progress); return 0; } static __always_inline size_t copy_from_user_iter_csum(void __user *iter_from, size_t progress, size_t len, void *to, void *priv2) { __wsum next, *csum = priv2; next = csum_and_copy_from_user(iter_from, to + progress, len); *csum = csum_block_add(*csum, next, progress); return next ? 0 : len; } bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i) { size_t copied; if (WARN_ON_ONCE(!i->data_source)) return false; copied = iterate_and_advance2(i, bytes, addr, csum, copy_from_user_iter_csum, memcpy_from_iter_csum); if (likely(copied == bytes)) return true; iov_iter_revert(i, copied); return false; } EXPORT_SYMBOL(csum_and_copy_from_iter_full); void get_netmem(netmem_ref netmem) { struct net_iov *niov; if (netmem_is_net_iov(netmem)) { niov = netmem_to_net_iov(netmem); if (net_is_devmem_iov(niov)) net_devmem_get_net_iov(netmem_to_net_iov(netmem)); return; } get_page(netmem_to_page(netmem)); } EXPORT_SYMBOL(get_netmem); void put_netmem(netmem_ref netmem) { struct net_iov *niov; if (netmem_is_net_iov(netmem)) { niov = netmem_to_net_iov(netmem); if (net_is_devmem_iov(niov)) net_devmem_put_net_iov(netmem_to_net_iov(netmem)); return; } put_page(netmem_to_page(netmem)); } EXPORT_SYMBOL(put_netmem);
294 294 294 21 294 21 294 294 294 294 294 294 294 294 294 21 21 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 // SPDX-License-Identifier: GPL-2.0 /* * driver.c - centralized device driver management * * Copyright (c) 2002-3 Patrick Mochel * Copyright (c) 2002-3 Open Source Development Labs * Copyright (c) 2007 Greg Kroah-Hartman <gregkh@suse.de> * Copyright (c) 2007 Novell Inc. */ #include <linux/device/driver.h> #include <linux/device.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/sysfs.h> #include "base.h" static struct device *next_device(struct klist_iter *i) { struct klist_node *n = klist_next(i); struct device *dev = NULL; struct device_private *dev_prv; if (n) { dev_prv = to_device_private_driver(n); dev = dev_prv->device; } return dev; } /** * driver_set_override() - Helper to set or clear driver override. * @dev: Device to change * @override: Address of string to change (e.g. &device->driver_override); * The contents will be freed and hold newly allocated override. * @s: NUL-terminated string, new driver name to force a match, pass empty * string to clear it ("" or "\n", where the latter is only for sysfs * interface). * @len: length of @s * * Helper to set or clear driver override in a device, intended for the cases * when the driver_override field is allocated by driver/bus code. * * Returns: 0 on success or a negative error code on failure. */ int driver_set_override(struct device *dev, const char **override, const char *s, size_t len) { const char *new, *old; char *cp; if (!override || !s) return -EINVAL; /* * The stored value will be used in sysfs show callback (sysfs_emit()), * which has a length limit of PAGE_SIZE and adds a trailing newline. * Thus we can store one character less to avoid truncation during sysfs * show. */ if (len >= (PAGE_SIZE - 1)) return -EINVAL; /* * Compute the real length of the string in case userspace sends us a * bunch of \0 characters like python likes to do. */ len = strlen(s); if (!len) { /* Empty string passed - clear override */ device_lock(dev); old = *override; *override = NULL; device_unlock(dev); kfree(old); return 0; } cp = strnchr(s, len, '\n'); if (cp) len = cp - s; new = kstrndup(s, len, GFP_KERNEL); if (!new) return -ENOMEM; device_lock(dev); old = *override; if (cp != s) { *override = new; } else { /* "\n" passed - clear override */ kfree(new); *override = NULL; } device_unlock(dev); kfree(old); return 0; } EXPORT_SYMBOL_GPL(driver_set_override); /** * driver_for_each_device - Iterator for devices bound to a driver. * @drv: Driver we're iterating. * @start: Device to begin with * @data: Data to pass to the callback. * @fn: Function to call for each device. * * Iterate over the @drv's list of devices calling @fn for each one. */ int driver_for_each_device(struct device_driver *drv, struct device *start, void *data, device_iter_t fn) { struct klist_iter i; struct device *dev; int error = 0; if (!drv) return -EINVAL; klist_iter_init_node(&drv->p->klist_devices, &i, start ? &start->p->knode_driver : NULL); while (!error && (dev = next_device(&i))) error = fn(dev, data); klist_iter_exit(&i); return error; } EXPORT_SYMBOL_GPL(driver_for_each_device); /** * driver_find_device - device iterator for locating a particular device. * @drv: The device's driver * @start: Device to begin with * @data: Data to pass to match function * @match: Callback function to check device * * This is similar to the driver_for_each_device() function above, but * it returns a reference to a device that is 'found' for later use, as * determined by the @match callback. * * The callback should return 0 if the device doesn't match and non-zero * if it does. If the callback returns non-zero, this function will * return to the caller and not iterate over any more devices. */ struct device *driver_find_device(const struct device_driver *drv, struct device *start, const void *data, device_match_t match) { struct klist_iter i; struct device *dev; if (!drv || !drv->p) return NULL; klist_iter_init_node(&drv->p->klist_devices, &i, (start ? &start->p->knode_driver : NULL)); while ((dev = next_device(&i))) { if (match(dev, data)) { get_device(dev); break; } } klist_iter_exit(&i); return dev; } EXPORT_SYMBOL_GPL(driver_find_device); /** * driver_create_file - create sysfs file for driver. * @drv: driver. * @attr: driver attribute descriptor. */ int driver_create_file(const struct device_driver *drv, const struct driver_attribute *attr) { int error; if (drv) error = sysfs_create_file(&drv->p->kobj, &attr->attr); else error = -EINVAL; return error; } EXPORT_SYMBOL_GPL(driver_create_file); /** * driver_remove_file - remove sysfs file for driver. * @drv: driver. * @attr: driver attribute descriptor. */ void driver_remove_file(const struct device_driver *drv, const struct driver_attribute *attr) { if (drv) sysfs_remove_file(&drv->p->kobj, &attr->attr); } EXPORT_SYMBOL_GPL(driver_remove_file); int driver_add_groups(const struct device_driver *drv, const struct attribute_group **groups) { return sysfs_create_groups(&drv->p->kobj, groups); } void driver_remove_groups(const struct device_driver *drv, const struct attribute_group **groups) { sysfs_remove_groups(&drv->p->kobj, groups); } /** * driver_register - register driver with bus * @drv: driver to register * * We pass off most of the work to the bus_add_driver() call, * since most of the things we have to do deal with the bus * structures. */ int driver_register(struct device_driver *drv) { int ret; struct device_driver *other; if (!bus_is_registered(drv->bus)) { pr_err("Driver '%s' was unable to register with bus_type '%s' because the bus was not initialized.\n", drv->name, drv->bus->name); return -EINVAL; } if ((drv->bus->probe && drv->probe) || (drv->bus->remove && drv->remove) || (drv->bus->shutdown && drv->shutdown)) pr_warn("Driver '%s' needs updating - please use " "bus_type methods\n", drv->name); other = driver_find(drv->name, drv->bus); if (other) { pr_err("Error: Driver '%s' is already registered, " "aborting...\n", drv->name); return -EBUSY; } ret = bus_add_driver(drv); if (ret) return ret; ret = driver_add_groups(drv, drv->groups); if (ret) { bus_remove_driver(drv); return ret; } kobject_uevent(&drv->p->kobj, KOBJ_ADD); deferred_probe_extend_timeout(); return ret; } EXPORT_SYMBOL_GPL(driver_register); /** * driver_unregister - remove driver from system. * @drv: driver. * * Again, we pass off most of the work to the bus-level call. */ void driver_unregister(struct device_driver *drv) { if (!drv || !drv->p) { WARN(1, "Unexpected driver unregister!\n"); return; } driver_remove_groups(drv, drv->groups); bus_remove_driver(drv); } EXPORT_SYMBOL_GPL(driver_unregister);
5 5 1 5 1 5 2 4 4 4 4 4 3 1 1 1 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 // SPDX-License-Identifier: GPL-2.0-only /* * Network Service Header * * Copyright (c) 2017 Red Hat, Inc. -- Jiri Benc <jbenc@redhat.com> */ #include <linux/module.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <net/gso.h> #include <net/nsh.h> #include <net/tun_proto.h> int nsh_push(struct sk_buff *skb, const struct nshhdr *pushed_nh) { struct nshhdr *nh; size_t length = nsh_hdr_len(pushed_nh); u8 next_proto; if (skb->mac_len) { next_proto = TUN_P_ETHERNET; } else { next_proto = tun_p_from_eth_p(skb->protocol); if (!next_proto) return -EAFNOSUPPORT; } /* Add the NSH header */ if (skb_cow_head(skb, length) < 0) return -ENOMEM; skb_push(skb, length); nh = (struct nshhdr *)(skb->data); memcpy(nh, pushed_nh, length); nh->np = next_proto; skb_postpush_rcsum(skb, nh, length); skb->protocol = htons(ETH_P_NSH); skb_reset_mac_header(skb); skb_reset_network_header(skb); skb_reset_mac_len(skb); return 0; } EXPORT_SYMBOL_GPL(nsh_push); int nsh_pop(struct sk_buff *skb) { struct nshhdr *nh; size_t length; __be16 inner_proto; if (!pskb_may_pull(skb, NSH_BASE_HDR_LEN)) return -ENOMEM; nh = (struct nshhdr *)(skb->data); length = nsh_hdr_len(nh); if (length < NSH_BASE_HDR_LEN) return -EINVAL; inner_proto = tun_p_to_eth_p(nh->np); if (!pskb_may_pull(skb, length)) return -ENOMEM; if (!inner_proto) return -EAFNOSUPPORT; skb_pull_rcsum(skb, length); skb_reset_mac_header(skb); skb_reset_network_header(skb); skb_reset_mac_len(skb); skb->protocol = inner_proto; return 0; } EXPORT_SYMBOL_GPL(nsh_pop); static struct sk_buff *nsh_gso_segment(struct sk_buff *skb, netdev_features_t features) { unsigned int outer_hlen, mac_len, nsh_len; struct sk_buff *segs = ERR_PTR(-EINVAL); u16 mac_offset = skb->mac_header; __be16 outer_proto, proto; skb_reset_network_header(skb); outer_proto = skb->protocol; outer_hlen = skb_mac_header_len(skb); mac_len = skb->mac_len; if (unlikely(!pskb_may_pull(skb, NSH_BASE_HDR_LEN))) goto out; nsh_len = nsh_hdr_len(nsh_hdr(skb)); if (nsh_len < NSH_BASE_HDR_LEN) goto out; if (unlikely(!pskb_may_pull(skb, nsh_len))) goto out; proto = tun_p_to_eth_p(nsh_hdr(skb)->np); if (!proto) goto out; __skb_pull(skb, nsh_len); skb_reset_mac_header(skb); skb->mac_len = proto == htons(ETH_P_TEB) ? ETH_HLEN : 0; skb->protocol = proto; features &= NETIF_F_SG; segs = skb_mac_gso_segment(skb, features); if (IS_ERR_OR_NULL(segs)) { skb_gso_error_unwind(skb, htons(ETH_P_NSH), nsh_len, mac_offset, mac_len); goto out; } for (skb = segs; skb; skb = skb->next) { skb->protocol = outer_proto; __skb_push(skb, nsh_len + outer_hlen); skb_reset_mac_header(skb); skb_set_network_header(skb, outer_hlen); skb->mac_len = mac_len; } out: return segs; } static struct packet_offload nsh_packet_offload __read_mostly = { .type = htons(ETH_P_NSH), .priority = 15, .callbacks = { .gso_segment = nsh_gso_segment, }, }; static int __init nsh_init_module(void) { dev_add_offload(&nsh_packet_offload); return 0; } static void __exit nsh_cleanup_module(void) { dev_remove_offload(&nsh_packet_offload); } module_init(nsh_init_module); module_exit(nsh_cleanup_module); MODULE_AUTHOR("Jiri Benc <jbenc@redhat.com>"); MODULE_DESCRIPTION("NSH protocol"); MODULE_LICENSE("GPL v2");
199 423 63 66 66 428 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 // SPDX-License-Identifier: GPL-2.0 #ifndef __KVM_X86_MMU_TDP_MMU_H #define __KVM_X86_MMU_TDP_MMU_H #include <linux/kvm_host.h> #include "spte.h" void kvm_mmu_init_tdp_mmu(struct kvm *kvm); void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm); void kvm_tdp_mmu_alloc_root(struct kvm_vcpu *vcpu, bool private); __must_check static inline bool kvm_tdp_mmu_get_root(struct kvm_mmu_page *root) { return refcount_inc_not_zero(&root->tdp_mmu_root_count); } void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root); enum kvm_tdp_mmu_root_types { KVM_INVALID_ROOTS = BIT(0), KVM_DIRECT_ROOTS = BIT(1), KVM_MIRROR_ROOTS = BIT(2), KVM_VALID_ROOTS = KVM_DIRECT_ROOTS | KVM_MIRROR_ROOTS, KVM_ALL_ROOTS = KVM_VALID_ROOTS | KVM_INVALID_ROOTS, }; static inline enum kvm_tdp_mmu_root_types kvm_gfn_range_filter_to_root_types(struct kvm *kvm, enum kvm_gfn_range_filter process) { enum kvm_tdp_mmu_root_types ret = 0; if (!kvm_has_mirrored_tdp(kvm)) return KVM_DIRECT_ROOTS; if (process & KVM_FILTER_PRIVATE) ret |= KVM_MIRROR_ROOTS; if (process & KVM_FILTER_SHARED) ret |= KVM_DIRECT_ROOTS; WARN_ON_ONCE(!ret); return ret; } static inline struct kvm_mmu_page *tdp_mmu_get_root_for_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { if (unlikely(!kvm_is_addr_direct(vcpu->kvm, fault->addr))) return root_to_sp(vcpu->arch.mmu->mirror_root_hpa); return root_to_sp(vcpu->arch.mmu->root.hpa); } static inline struct kvm_mmu_page *tdp_mmu_get_root(struct kvm_vcpu *vcpu, enum kvm_tdp_mmu_root_types type) { if (unlikely(type == KVM_MIRROR_ROOTS)) return root_to_sp(vcpu->arch.mmu->mirror_root_hpa); return root_to_sp(vcpu->arch.mmu->root.hpa); } bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush); bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp); void kvm_tdp_mmu_zap_all(struct kvm *kvm); void kvm_tdp_mmu_invalidate_roots(struct kvm *kvm, enum kvm_tdp_mmu_root_types root_types); void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm, bool shared); int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault); bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range, bool flush); bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range); bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, const struct kvm_memory_slot *slot, int min_level); void kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, const struct kvm_memory_slot *slot); void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, unsigned long mask, bool wrprot); void kvm_tdp_mmu_recover_huge_pages(struct kvm *kvm, const struct kvm_memory_slot *slot); bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, int min_level); void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t start, gfn_t end, int target_level, bool shared); static inline void kvm_tdp_mmu_walk_lockless_begin(void) { rcu_read_lock(); } static inline void kvm_tdp_mmu_walk_lockless_end(void) { rcu_read_unlock(); } int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level); u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, gfn_t gfn, u64 *spte); #ifdef CONFIG_X86_64 static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; } #else static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; } #endif #endif /* __KVM_X86_MMU_TDP_MMU_H */
221 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 /* SPDX-License-Identifier: GPL-2.0 */ /* * procfs namespace bits */ #ifndef _LINUX_PROC_NS_H #define _LINUX_PROC_NS_H #include <linux/ns_common.h> struct pid_namespace; struct nsset; struct path; struct task_struct; struct inode; struct proc_ns_operations { const char *name; const char *real_ns_name; int type; struct ns_common *(*get)(struct task_struct *task); void (*put)(struct ns_common *ns); int (*install)(struct nsset *nsset, struct ns_common *ns); struct user_namespace *(*owner)(struct ns_common *ns); struct ns_common *(*get_parent)(struct ns_common *ns); } __randomize_layout; extern const struct proc_ns_operations netns_operations; extern const struct proc_ns_operations utsns_operations; extern const struct proc_ns_operations ipcns_operations; extern const struct proc_ns_operations pidns_operations; extern const struct proc_ns_operations pidns_for_children_operations; extern const struct proc_ns_operations userns_operations; extern const struct proc_ns_operations mntns_operations; extern const struct proc_ns_operations cgroupns_operations; extern const struct proc_ns_operations timens_operations; extern const struct proc_ns_operations timens_for_children_operations; /* * We always define these enumerators */ enum { PROC_ROOT_INO = 1, PROC_IPC_INIT_INO = 0xEFFFFFFFU, PROC_UTS_INIT_INO = 0xEFFFFFFEU, PROC_USER_INIT_INO = 0xEFFFFFFDU, PROC_PID_INIT_INO = 0xEFFFFFFCU, PROC_CGROUP_INIT_INO = 0xEFFFFFFBU, PROC_TIME_INIT_INO = 0xEFFFFFFAU, }; #ifdef CONFIG_PROC_FS extern int proc_alloc_inum(unsigned int *pino); extern void proc_free_inum(unsigned int inum); #else /* CONFIG_PROC_FS */ static inline int proc_alloc_inum(unsigned int *inum) { *inum = 1; return 0; } static inline void proc_free_inum(unsigned int inum) {} #endif /* CONFIG_PROC_FS */ static inline int ns_alloc_inum(struct ns_common *ns) { WRITE_ONCE(ns->stashed, NULL); return proc_alloc_inum(&ns->inum); } #define ns_free_inum(ns) proc_free_inum((ns)->inum) #define get_proc_ns(inode) ((struct ns_common *)(inode)->i_private) extern int ns_get_path(struct path *path, struct task_struct *task, const struct proc_ns_operations *ns_ops); typedef struct ns_common *ns_get_path_helper_t(void *); extern int ns_get_path_cb(struct path *path, ns_get_path_helper_t ns_get_cb, void *private_data); extern bool ns_match(const struct ns_common *ns, dev_t dev, ino_t ino); extern int ns_get_name(char *buf, size_t size, struct task_struct *task, const struct proc_ns_operations *ns_ops); extern void nsfs_init(void); #endif /* _LINUX_PROC_NS_H */
17 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 /* SPDX-License-Identifier: GPL-2.0 */ /* * Generic RTC interface. * This version contains the part of the user interface to the Real Time Clock * service. It is used with both the legacy mc146818 and also EFI * Struct rtc_time and first 12 ioctl by Paul Gortmaker, 1996 - separated out * from <linux/mc146818rtc.h> to this file for 2.4 kernels. * * Copyright (C) 1999 Hewlett-Packard Co. * Copyright (C) 1999 Stephane Eranian <eranian@hpl.hp.com> */ #ifndef _LINUX_RTC_H_ #define _LINUX_RTC_H_ #include <linux/types.h> #include <linux/interrupt.h> #include <linux/nvmem-provider.h> #include <uapi/linux/rtc.h> extern int rtc_month_days(unsigned int month, unsigned int year); extern int rtc_year_days(unsigned int day, unsigned int month, unsigned int year); extern int rtc_valid_tm(struct rtc_time *tm); extern time64_t rtc_tm_to_time64(struct rtc_time *tm); extern void rtc_time64_to_tm(time64_t time, struct rtc_time *tm); ktime_t rtc_tm_to_ktime(struct rtc_time tm); struct rtc_time rtc_ktime_to_tm(ktime_t kt); /* * rtc_tm_sub - Return the difference in seconds. */ static inline time64_t rtc_tm_sub(struct rtc_time *lhs, struct rtc_time *rhs) { return rtc_tm_to_time64(lhs) - rtc_tm_to_time64(rhs); } #include <linux/device.h> #include <linux/seq_file.h> #include <linux/cdev.h> #include <linux/poll.h> #include <linux/mutex.h> #include <linux/timerqueue.h> #include <linux/workqueue.h> extern const struct class rtc_class; /* * For these RTC methods the device parameter is the physical device * on whatever bus holds the hardware (I2C, Platform, SPI, etc), which * was passed to rtc_device_register(). Its driver_data normally holds * device state, including the rtc_device pointer for the RTC. * * Most of these methods are called with rtc_device.ops_lock held, * through the rtc_*(struct rtc_device *, ...) calls. * * The (current) exceptions are mostly filesystem hooks: * - the proc() hook for procfs */ struct rtc_class_ops { int (*ioctl)(struct device *, unsigned int, unsigned long); int (*read_time)(struct device *, struct rtc_time *); int (*set_time)(struct device *, struct rtc_time *); int (*read_alarm)(struct device *, struct rtc_wkalrm *); int (*set_alarm)(struct device *, struct rtc_wkalrm *); int (*proc)(struct device *, struct seq_file *); int (*alarm_irq_enable)(struct device *, unsigned int enabled); int (*read_offset)(struct device *, long *offset); int (*set_offset)(struct device *, long offset); int (*param_get)(struct device *, struct rtc_param *param); int (*param_set)(struct device *, struct rtc_param *param); }; struct rtc_device; struct rtc_timer { struct timerqueue_node node; ktime_t period; void (*func)(struct rtc_device *rtc); struct rtc_device *rtc; int enabled; }; /* flags */ #define RTC_DEV_BUSY 0 #define RTC_NO_CDEV 1 struct rtc_device { struct device dev; struct module *owner; int id; const struct rtc_class_ops *ops; struct mutex ops_lock; struct cdev char_dev; unsigned long flags; unsigned long irq_data; spinlock_t irq_lock; wait_queue_head_t irq_queue; struct fasync_struct *async_queue; int irq_freq; int max_user_freq; struct timerqueue_head timerqueue; struct rtc_timer aie_timer; struct rtc_timer uie_rtctimer; struct hrtimer pie_timer; /* sub second exp, so needs hrtimer */ int pie_enabled; struct work_struct irqwork; /* * This offset specifies the update timing of the RTC. * * tsched t1 write(t2.tv_sec - 1sec)) t2 RTC increments seconds * * The offset defines how tsched is computed so that the write to * the RTC (t2.tv_sec - 1sec) is correct versus the time required * for the transport of the write and the time which the RTC needs * to increment seconds the first time after the write (t2). * * For direct accessible RTCs tsched ~= t1 because the write time * is negligible. For RTCs behind slow busses the transport time is * significant and has to be taken into account. * * The time between the write (t1) and the first increment after * the write (t2) is RTC specific. For a MC146818 RTC it's 500ms, * for many others it's exactly 1 second. Consult the datasheet. * * The value of this offset is also used to calculate the to be * written value (t2.tv_sec - 1sec) at tsched. * * The default value for this is NSEC_PER_SEC + 10 msec default * transport time. The offset can be adjusted by drivers so the * calculation for the to be written value at tsched becomes * correct: * * newval = tsched + set_offset_nsec - NSEC_PER_SEC * and (tsched + set_offset_nsec) % NSEC_PER_SEC == 0 */ unsigned long set_offset_nsec; unsigned long features[BITS_TO_LONGS(RTC_FEATURE_CNT)]; time64_t range_min; timeu64_t range_max; timeu64_t alarm_offset_max; time64_t start_secs; time64_t offset_secs; bool set_start_time; #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL struct work_struct uie_task; struct timer_list uie_timer; /* Those fields are protected by rtc->irq_lock */ unsigned int oldsecs; unsigned int uie_irq_active:1; unsigned int stop_uie_polling:1; unsigned int uie_task_active:1; unsigned int uie_timer_active:1; #endif }; #define to_rtc_device(d) container_of(d, struct rtc_device, dev) #define rtc_lock(d) mutex_lock(&d->ops_lock) #define rtc_unlock(d) mutex_unlock(&d->ops_lock) /* useful timestamps */ #define RTC_TIMESTAMP_BEGIN_0000 -62167219200ULL /* 0000-01-01 00:00:00 */ #define RTC_TIMESTAMP_BEGIN_1900 -2208988800LL /* 1900-01-01 00:00:00 */ #define RTC_TIMESTAMP_EPOCH_GPS 315964800LL /* 1980-01-06 00:00:00 */ #define RTC_TIMESTAMP_BEGIN_2000 946684800LL /* 2000-01-01 00:00:00 */ #define RTC_TIMESTAMP_END_2063 2966371199LL /* 2063-12-31 23:59:59 */ #define RTC_TIMESTAMP_END_2079 3471292799LL /* 2079-12-31 23:59:59 */ #define RTC_TIMESTAMP_END_2099 4102444799LL /* 2099-12-31 23:59:59 */ #define RTC_TIMESTAMP_END_2199 7258118399LL /* 2199-12-31 23:59:59 */ #define RTC_TIMESTAMP_END_9999 253402300799LL /* 9999-12-31 23:59:59 */ extern struct rtc_device *devm_rtc_device_register(struct device *dev, const char *name, const struct rtc_class_ops *ops, struct module *owner); struct rtc_device *devm_rtc_allocate_device(struct device *dev); int __devm_rtc_register_device(struct module *owner, struct rtc_device *rtc); extern int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm); extern int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm); int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm); extern int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alrm); extern int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alrm); extern int rtc_initialize_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alrm); extern void rtc_update_irq(struct rtc_device *rtc, unsigned long num, unsigned long events); extern struct rtc_device *rtc_class_open(const char *name); extern void rtc_class_close(struct rtc_device *rtc); extern int rtc_irq_set_state(struct rtc_device *rtc, int enabled); extern int rtc_irq_set_freq(struct rtc_device *rtc, int freq); extern int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled); extern int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled); extern int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc, unsigned int enabled); void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode); void rtc_aie_update_irq(struct rtc_device *rtc); void rtc_uie_update_irq(struct rtc_device *rtc); enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer); void rtc_timer_init(struct rtc_timer *timer, void (*f)(struct rtc_device *r), struct rtc_device *rtc); int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer *timer, ktime_t expires, ktime_t period); void rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer *timer); int rtc_read_offset(struct rtc_device *rtc, long *offset); int rtc_set_offset(struct rtc_device *rtc, long offset); void rtc_timer_do_work(struct work_struct *work); static inline bool is_leap_year(unsigned int year) { return (!(year % 4) && (year % 100)) || !(year % 400); } /** * rtc_bound_alarmtime() - Return alarm time bound by rtc limit * @rtc: Pointer to rtc device structure * @requested: Requested alarm timeout * * Return: Alarm timeout bound by maximum alarm time supported by rtc. */ static inline ktime_t rtc_bound_alarmtime(struct rtc_device *rtc, ktime_t requested) { if (rtc->alarm_offset_max && rtc->alarm_offset_max * MSEC_PER_SEC < ktime_to_ms(requested)) return ms_to_ktime(rtc->alarm_offset_max * MSEC_PER_SEC); return requested; } #define devm_rtc_register_device(device) \ __devm_rtc_register_device(THIS_MODULE, device) #ifdef CONFIG_RTC_HCTOSYS_DEVICE extern int rtc_hctosys_ret; #else #define rtc_hctosys_ret -ENODEV #endif #ifdef CONFIG_RTC_NVMEM int devm_rtc_nvmem_register(struct rtc_device *rtc, struct nvmem_config *nvmem_config); #else static inline int devm_rtc_nvmem_register(struct rtc_device *rtc, struct nvmem_config *nvmem_config) { return 0; } #endif #ifdef CONFIG_RTC_INTF_SYSFS int rtc_add_group(struct rtc_device *rtc, const struct attribute_group *grp); int rtc_add_groups(struct rtc_device *rtc, const struct attribute_group **grps); #else static inline int rtc_add_group(struct rtc_device *rtc, const struct attribute_group *grp) { return 0; } static inline int rtc_add_groups(struct rtc_device *rtc, const struct attribute_group **grps) { return 0; } #endif #endif /* _LINUX_RTC_H_ */
10 10 1 3 3 3 3 3 3 3 3 3 3 2 215 207 206 5 3 2 205 205 57 22 201 1 201 199 200 188 15 199 200 4 200 200 199 1 200 172 170 171 199 1 199 198 3 217 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 315 315 295 34 32 31 29 15 31 31 31 33 28 10 10 10 18 2 2 2 1 1 26 26 26 26 26 26 26 26 10 10 10 10 1 10 10 10 16 26 26 26 28 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 12 4 12 12 12 12 12 12 15 16 2 2 2 35 35 21 20 3 20 6 14 14 14 16 28 28 26 26 11 9 8 8 8 10 18 8 18 20 20 23 1 23 23 23 23 4 23 20 20 23 5 6 5 8 7 7 14 14 14 13 3 7 2 1 2 10 4 8 14 7 4 4 3 1 4 4 4 4 4 4 4 14 14 14 14 9 10 9 1 1 24 8 279 144 10 144 142 114 114 3 143 200 197 24 16 8 174 198 278 10 10 10 1 1 82 81 79 78 77 77 18 9 2 2 1 1 82 1 62 62 62 67 68 68 68 66 1 66 1 64 64 51 64 63 40 40 9 9 1 1 1 8 8 8 8 8 7 7 7 1 1 1 68 34 5 35 34 34 34 24 9 9 9 9 9 9 34 10 26 25 26 1 1 2 25 38 39 1 7 67 3 164 178 178 178 8 8 178 55 178 177 11 11 11 11 11 11 11 11 11 11 8 1 1 8 8 8 8 2 11 11 11 11 11 11 6 6 6 2 1 11 8 2 5 2 11 5 2 2 2 1 3 1 1 5 11 5 5 11 4 9 11 10 9 8 8 6 10 11 1 1 11 2 2 2 2 1 1 2 1 1 2 2 4 3 3 2 2 2 4 11 11 66 66 309 310 66 66 66 66 66 66 66 66 66 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 // SPDX-License-Identifier: GPL-2.0-or-later /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Implementation of the Transmission Control Protocol(TCP). * * IPv4 specific functions * * code split from: * linux/ipv4/tcp.c * linux/ipv4/tcp_input.c * linux/ipv4/tcp_output.c * * See tcp.c for author information */ /* * Changes: * David S. Miller : New socket lookup architecture. * This code is dedicated to John Dyson. * David S. Miller : Change semantics of established hash, * half is devoted to TIME_WAIT sockets * and the rest go in the other half. * Andi Kleen : Add support for syncookies and fixed * some bugs: ip options weren't passed to * the TCP layer, missed a check for an * ACK bit. * Andi Kleen : Implemented fast path mtu discovery. * Fixed many serious bugs in the * request_sock handling and moved * most of it into the af independent code. * Added tail drop and some other bugfixes. * Added new listen semantics. * Mike McLagan : Routing by source * Juan Jose Ciarlante: ip_dynaddr bits * Andi Kleen: various fixes. * Vitaly E. Lavrov : Transparent proxy revived after year * coma. * Andi Kleen : Fix new listen. * Andi Kleen : Fix accept error reporting. * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind * a single port at the same time. */ #define pr_fmt(fmt) "TCP: " fmt #include <linux/bottom_half.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/module.h> #include <linux/random.h> #include <linux/cache.h> #include <linux/jhash.h> #include <linux/init.h> #include <linux/times.h> #include <linux/slab.h> #include <linux/sched.h> #include <net/net_namespace.h> #include <net/icmp.h> #include <net/inet_hashtables.h> #include <net/tcp.h> #include <net/transp_v6.h> #include <net/ipv6.h> #include <net/inet_common.h> #include <net/inet_ecn.h> #include <net/timewait_sock.h> #include <net/xfrm.h> #include <net/secure_seq.h> #include <net/busy_poll.h> #include <net/rstreason.h> #include <linux/inet.h> #include <linux/ipv6.h> #include <linux/stddef.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/inetdevice.h> #include <linux/btf_ids.h> #include <linux/skbuff_ref.h> #include <crypto/hash.h> #include <linux/scatterlist.h> #include <trace/events/tcp.h> #ifdef CONFIG_TCP_MD5SIG static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key, __be32 daddr, __be32 saddr, const struct tcphdr *th); #endif struct inet_hashinfo tcp_hashinfo; static DEFINE_PER_CPU(struct sock_bh_locked, ipv4_tcp_sk) = { .bh_lock = INIT_LOCAL_LOCK(bh_lock), }; static DEFINE_MUTEX(tcp_exit_batch_mutex); static u32 tcp_v4_init_seq(const struct sk_buff *skb) { return secure_tcp_seq(ip_hdr(skb)->daddr, ip_hdr(skb)->saddr, tcp_hdr(skb)->dest, tcp_hdr(skb)->source); } static u32 tcp_v4_init_ts_off(const struct net *net, const struct sk_buff *skb) { return secure_tcp_ts_off(net, ip_hdr(skb)->daddr, ip_hdr(skb)->saddr); } int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp) { int reuse = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tw_reuse); const struct inet_timewait_sock *tw = inet_twsk(sktw); const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw); struct tcp_sock *tp = tcp_sk(sk); int ts_recent_stamp; u32 reuse_thresh; if (READ_ONCE(tw->tw_substate) == TCP_FIN_WAIT2) reuse = 0; if (reuse == 2) { /* Still does not detect *everything* that goes through * lo, since we require a loopback src or dst address * or direct binding to 'lo' interface. */ bool loopback = false; if (tw->tw_bound_dev_if == LOOPBACK_IFINDEX) loopback = true; #if IS_ENABLED(CONFIG_IPV6) if (tw->tw_family == AF_INET6) { if (ipv6_addr_loopback(&tw->tw_v6_daddr) || ipv6_addr_v4mapped_loopback(&tw->tw_v6_daddr) || ipv6_addr_loopback(&tw->tw_v6_rcv_saddr) || ipv6_addr_v4mapped_loopback(&tw->tw_v6_rcv_saddr)) loopback = true; } else #endif { if (ipv4_is_loopback(tw->tw_daddr) || ipv4_is_loopback(tw->tw_rcv_saddr)) loopback = true; } if (!loopback) reuse = 0; } /* With PAWS, it is safe from the viewpoint of data integrity. Even without PAWS it is safe provided sequence spaces do not overlap i.e. at data rates <= 80Mbit/sec. Actually, the idea is close to VJ's one, only timestamp cache is held not per host, but per port pair and TW bucket is used as state holder. If TW bucket has been already destroyed we fall back to VJ's scheme and use initial timestamp retrieved from peer table. */ ts_recent_stamp = READ_ONCE(tcptw->tw_ts_recent_stamp); reuse_thresh = READ_ONCE(tw->tw_entry_stamp) + READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tw_reuse_delay); if (ts_recent_stamp && (!twp || (reuse && time_after32(tcp_clock_ms(), reuse_thresh)))) { /* inet_twsk_hashdance_schedule() sets sk_refcnt after putting twsk * and releasing the bucket lock. */ if (unlikely(!refcount_inc_not_zero(&sktw->sk_refcnt))) return 0; /* In case of repair and re-using TIME-WAIT sockets we still * want to be sure that it is safe as above but honor the * sequence numbers and time stamps set as part of the repair * process. * * Without this check re-using a TIME-WAIT socket with TCP * repair would accumulate a -1 on the repair assigned * sequence number. The first time it is reused the sequence * is -1, the second time -2, etc. This fixes that issue * without appearing to create any others. */ if (likely(!tp->repair)) { u32 seq = tcptw->tw_snd_nxt + 65535 + 2; if (!seq) seq = 1; WRITE_ONCE(tp->write_seq, seq); tp->rx_opt.ts_recent = READ_ONCE(tcptw->tw_ts_recent); tp->rx_opt.ts_recent_stamp = ts_recent_stamp; } return 1; } return 0; } EXPORT_IPV6_MOD_GPL(tcp_twsk_unique); static int tcp_v4_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) { /* This check is replicated from tcp_v4_connect() and intended to * prevent BPF program called below from accessing bytes that are out * of the bound specified by user in addr_len. */ if (addr_len < sizeof(struct sockaddr_in)) return -EINVAL; sock_owned_by_me(sk); return BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr, &addr_len); } /* This will initiate an outgoing connection. */ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) { struct sockaddr_in *usin = (struct sockaddr_in *)uaddr; struct inet_timewait_death_row *tcp_death_row; struct inet_sock *inet = inet_sk(sk); struct tcp_sock *tp = tcp_sk(sk); struct ip_options_rcu *inet_opt; struct net *net = sock_net(sk); __be16 orig_sport, orig_dport; __be32 daddr, nexthop; struct flowi4 *fl4; struct rtable *rt; int err; if (addr_len < sizeof(struct sockaddr_in)) return -EINVAL; if (usin->sin_family != AF_INET) return -EAFNOSUPPORT; nexthop = daddr = usin->sin_addr.s_addr; inet_opt = rcu_dereference_protected(inet->inet_opt, lockdep_sock_is_held(sk)); if (inet_opt && inet_opt->opt.srr) { if (!daddr) return -EINVAL; nexthop = inet_opt->opt.faddr; } orig_sport = inet->inet_sport; orig_dport = usin->sin_port; fl4 = &inet->cork.fl.u.ip4; rt = ip_route_connect(fl4, nexthop, inet->inet_saddr, sk->sk_bound_dev_if, IPPROTO_TCP, orig_sport, orig_dport, sk); if (IS_ERR(rt)) { err = PTR_ERR(rt); if (err == -ENETUNREACH) IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES); return err; } if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) { ip_rt_put(rt); return -ENETUNREACH; } if (!inet_opt || !inet_opt->opt.srr) daddr = fl4->daddr; tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row; if (!inet->inet_saddr) { err = inet_bhash2_update_saddr(sk, &fl4->saddr, AF_INET); if (err) { ip_rt_put(rt); return err; } } else { sk_rcv_saddr_set(sk, inet->inet_saddr); } if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) { /* Reset inherited state */ tp->rx_opt.ts_recent = 0; tp->rx_opt.ts_recent_stamp = 0; if (likely(!tp->repair)) WRITE_ONCE(tp->write_seq, 0); } inet->inet_dport = usin->sin_port; sk_daddr_set(sk, daddr); inet_csk(sk)->icsk_ext_hdr_len = 0; if (inet_opt) inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen; tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT; /* Socket identity is still unknown (sport may be zero). * However we set state to SYN-SENT and not releasing socket * lock select source port, enter ourselves into the hash tables and * complete initialization after this. */ tcp_set_state(sk, TCP_SYN_SENT); err = inet_hash_connect(tcp_death_row, sk); if (err) goto failure; sk_set_txhash(sk); rt = ip_route_newports(fl4, rt, orig_sport, orig_dport, inet->inet_sport, inet->inet_dport, sk); if (IS_ERR(rt)) { err = PTR_ERR(rt); rt = NULL; goto failure; } tp->tcp_usec_ts = dst_tcp_usec_ts(&rt->dst); /* OK, now commit destination to socket. */ sk->sk_gso_type = SKB_GSO_TCPV4; sk_setup_caps(sk, &rt->dst); rt = NULL; if (likely(!tp->repair)) { if (!tp->write_seq) WRITE_ONCE(tp->write_seq, secure_tcp_seq(inet->inet_saddr, inet->inet_daddr, inet->inet_sport, usin->sin_port)); WRITE_ONCE(tp->tsoffset, secure_tcp_ts_off(net, inet->inet_saddr, inet->inet_daddr)); } atomic_set(&inet->inet_id, get_random_u16()); if (tcp_fastopen_defer_connect(sk, &err)) return err; if (err) goto failure; err = tcp_connect(sk); if (err) goto failure; return 0; failure: /* * This unhashes the socket and releases the local port, * if necessary. */ tcp_set_state(sk, TCP_CLOSE); inet_bhash2_reset_saddr(sk); ip_rt_put(rt); sk->sk_route_caps = 0; inet->inet_dport = 0; return err; } EXPORT_IPV6_MOD(tcp_v4_connect); /* * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191. * It can be called through tcp_release_cb() if socket was owned by user * at the time tcp_v4_err() was called to handle ICMP message. */ void tcp_v4_mtu_reduced(struct sock *sk) { struct inet_sock *inet = inet_sk(sk); struct dst_entry *dst; u32 mtu; if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) return; mtu = READ_ONCE(tcp_sk(sk)->mtu_info); dst = inet_csk_update_pmtu(sk, mtu); if (!dst) return; /* Something is about to be wrong... Remember soft error * for the case, if this connection will not able to recover. */ if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst)) WRITE_ONCE(sk->sk_err_soft, EMSGSIZE); mtu = dst_mtu(dst); if (inet->pmtudisc != IP_PMTUDISC_DONT && ip_sk_accept_pmtu(sk) && inet_csk(sk)->icsk_pmtu_cookie > mtu) { tcp_sync_mss(sk, mtu); /* Resend the TCP packet because it's * clear that the old packet has been * dropped. This is the new "fast" path mtu * discovery. */ tcp_simple_retransmit(sk); } /* else let the usual retransmit timer handle it */ } EXPORT_IPV6_MOD(tcp_v4_mtu_reduced); static void do_redirect(struct sk_buff *skb, struct sock *sk) { struct dst_entry *dst = __sk_dst_check(sk, 0); if (dst) dst->ops->redirect(dst, sk, skb); } /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */ void tcp_req_err(struct sock *sk, u32 seq, bool abort) { struct request_sock *req = inet_reqsk(sk); struct net *net = sock_net(sk); /* ICMPs are not backlogged, hence we cannot get * an established socket here. */ if (seq != tcp_rsk(req)->snt_isn) { __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS); } else if (abort) { /* * Still in SYN_RECV, just remove it silently. * There is no good way to pass the error to the newly * created socket, and POSIX does not want network * errors returned from accept(). */ inet_csk_reqsk_queue_drop(req->rsk_listener, req); tcp_listendrop(req->rsk_listener); } reqsk_put(req); } EXPORT_IPV6_MOD(tcp_req_err); /* TCP-LD (RFC 6069) logic */ void tcp_ld_RTO_revert(struct sock *sk, u32 seq) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; s32 remaining; u32 delta_us; if (sock_owned_by_user(sk)) return; if (seq != tp->snd_una || !icsk->icsk_retransmits || !icsk->icsk_backoff) return; skb = tcp_rtx_queue_head(sk); if (WARN_ON_ONCE(!skb)) return; icsk->icsk_backoff--; icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) : TCP_TIMEOUT_INIT; icsk->icsk_rto = inet_csk_rto_backoff(icsk, tcp_rto_max(sk)); tcp_mstamp_refresh(tp); delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb)); remaining = icsk->icsk_rto - usecs_to_jiffies(delta_us); if (remaining > 0) { tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS, remaining, false); } else { /* RTO revert clocked out retransmission. * Will retransmit now. */ tcp_retransmit_timer(sk); } } EXPORT_IPV6_MOD(tcp_ld_RTO_revert); /* * This routine is called by the ICMP module when it gets some * sort of error condition. If err < 0 then the socket should * be closed and the error returned to the user. If err > 0 * it's just the icmp type << 8 | icmp code. After adjustment * header points to the first 8 bytes of the tcp header. We need * to find the appropriate port. * * The locking strategy used here is very "optimistic". When * someone else accesses the socket the ICMP is just dropped * and for some paths there is no check at all. * A more general error queue to queue errors for later handling * is probably better. * */ int tcp_v4_err(struct sk_buff *skb, u32 info) { const struct iphdr *iph = (const struct iphdr *)skb->data; struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2)); struct net *net = dev_net_rcu(skb->dev); const int type = icmp_hdr(skb)->type; const int code = icmp_hdr(skb)->code; struct request_sock *fastopen; struct tcp_sock *tp; u32 seq, snd_una; struct sock *sk; int err; sk = __inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, iph->daddr, th->dest, iph->saddr, ntohs(th->source), inet_iif(skb), 0); if (!sk) { __ICMP_INC_STATS(net, ICMP_MIB_INERRORS); return -ENOENT; } if (sk->sk_state == TCP_TIME_WAIT) { /* To increase the counter of ignored icmps for TCP-AO */ tcp_ao_ignore_icmp(sk, AF_INET, type, code); inet_twsk_put(inet_twsk(sk)); return 0; } seq = ntohl(th->seq); if (sk->sk_state == TCP_NEW_SYN_RECV) { tcp_req_err(sk, seq, type == ICMP_PARAMETERPROB || type == ICMP_TIME_EXCEEDED || (type == ICMP_DEST_UNREACH && (code == ICMP_NET_UNREACH || code == ICMP_HOST_UNREACH))); return 0; } if (tcp_ao_ignore_icmp(sk, AF_INET, type, code)) { sock_put(sk); return 0; } bh_lock_sock(sk); /* If too many ICMPs get dropped on busy * servers this needs to be solved differently. * We do take care of PMTU discovery (RFC1191) special case : * we can receive locally generated ICMP messages while socket is held. */ if (sock_owned_by_user(sk)) { if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED)) __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS); } if (sk->sk_state == TCP_CLOSE) goto out; if (static_branch_unlikely(&ip4_min_ttl)) { /* min_ttl can be changed concurrently from do_ip_setsockopt() */ if (unlikely(iph->ttl < READ_ONCE(inet_sk(sk)->min_ttl))) { __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP); goto out; } } tp = tcp_sk(sk); /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */ fastopen = rcu_dereference(tp->fastopen_rsk); snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una; if (sk->sk_state != TCP_LISTEN && !between(seq, snd_una, tp->snd_nxt)) { __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS); goto out; } switch (type) { case ICMP_REDIRECT: if (!sock_owned_by_user(sk)) do_redirect(skb, sk); goto out; case ICMP_SOURCE_QUENCH: /* Just silently ignore these. */ goto out; case ICMP_PARAMETERPROB: err = EPROTO; break; case ICMP_DEST_UNREACH: if (code > NR_ICMP_UNREACH) goto out; if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */ /* We are not interested in TCP_LISTEN and open_requests * (SYN-ACKs send out by Linux are always <576bytes so * they should go through unfragmented). */ if (sk->sk_state == TCP_LISTEN) goto out; WRITE_ONCE(tp->mtu_info, info); if (!sock_owned_by_user(sk)) { tcp_v4_mtu_reduced(sk); } else { if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &sk->sk_tsq_flags)) sock_hold(sk); } goto out; } err = icmp_err_convert[code].errno; /* check if this ICMP message allows revert of backoff. * (see RFC 6069) */ if (!fastopen && (code == ICMP_NET_UNREACH || code == ICMP_HOST_UNREACH)) tcp_ld_RTO_revert(sk, seq); break; case ICMP_TIME_EXCEEDED: err = EHOSTUNREACH; break; default: goto out; } switch (sk->sk_state) { case TCP_SYN_SENT: case TCP_SYN_RECV: /* Only in fast or simultaneous open. If a fast open socket is * already accepted it is treated as a connected one below. */ if (fastopen && !fastopen->sk) break; ip_icmp_error(sk, skb, err, th->dest, info, (u8 *)th); if (!sock_owned_by_user(sk)) tcp_done_with_error(sk, err); else WRITE_ONCE(sk->sk_err_soft, err); goto out; } /* If we've already connected we will keep trying * until we time out, or the user gives up. * * rfc1122 4.2.3.9 allows to consider as hard errors * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too, * but it is obsoleted by pmtu discovery). * * Note, that in modern internet, where routing is unreliable * and in each dark corner broken firewalls sit, sending random * errors ordered by their masters even this two messages finally lose * their original sense (even Linux sends invalid PORT_UNREACHs) * * Now we are in compliance with RFCs. * --ANK (980905) */ if (!sock_owned_by_user(sk) && inet_test_bit(RECVERR, sk)) { WRITE_ONCE(sk->sk_err, err); sk_error_report(sk); } else { /* Only an error on timeout */ WRITE_ONCE(sk->sk_err_soft, err); } out: bh_unlock_sock(sk); sock_put(sk); return 0; } void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr) { struct tcphdr *th = tcp_hdr(skb); th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0); skb->csum_start = skb_transport_header(skb) - skb->head; skb->csum_offset = offsetof(struct tcphdr, check); } /* This routine computes an IPv4 TCP checksum. */ void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb) { const struct inet_sock *inet = inet_sk(sk); __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr); } EXPORT_IPV6_MOD(tcp_v4_send_check); #define REPLY_OPTIONS_LEN (MAX_TCP_OPTION_SPACE / sizeof(__be32)) static bool tcp_v4_ao_sign_reset(const struct sock *sk, struct sk_buff *skb, const struct tcp_ao_hdr *aoh, struct ip_reply_arg *arg, struct tcphdr *reply, __be32 reply_options[REPLY_OPTIONS_LEN]) { #ifdef CONFIG_TCP_AO int sdif = tcp_v4_sdif(skb); int dif = inet_iif(skb); int l3index = sdif ? dif : 0; bool allocated_traffic_key; struct tcp_ao_key *key; char *traffic_key; bool drop = true; u32 ao_sne = 0; u8 keyid; rcu_read_lock(); if (tcp_ao_prepare_reset(sk, skb, aoh, l3index, ntohl(reply->seq), &key, &traffic_key, &allocated_traffic_key, &keyid, &ao_sne)) goto out; reply_options[0] = htonl((TCPOPT_AO << 24) | (tcp_ao_len(key) << 16) | (aoh->rnext_keyid << 8) | keyid); arg->iov[0].iov_len += tcp_ao_len_aligned(key); reply->doff = arg->iov[0].iov_len / 4; if (tcp_ao_hash_hdr(AF_INET, (char *)&reply_options[1], key, traffic_key, (union tcp_ao_addr *)&ip_hdr(skb)->saddr, (union tcp_ao_addr *)&ip_hdr(skb)->daddr, reply, ao_sne)) goto out; drop = false; out: rcu_read_unlock(); if (allocated_traffic_key) kfree(traffic_key); return drop; #else return true; #endif } /* * This routine will send an RST to the other tcp. * * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.) * for reset. * Answer: if a packet caused RST, it is not for a socket * existing in our system, if it is matched to a socket, * it is just duplicate segment or bug in other side's TCP. * So that we build reply only basing on parameters * arrived with segment. * Exception: precedence violation. We do not implement it in any case. */ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb, enum sk_rst_reason reason) { const struct tcphdr *th = tcp_hdr(skb); struct { struct tcphdr th; __be32 opt[REPLY_OPTIONS_LEN]; } rep; const __u8 *md5_hash_location = NULL; const struct tcp_ao_hdr *aoh; struct ip_reply_arg arg; #ifdef CONFIG_TCP_MD5SIG struct tcp_md5sig_key *key = NULL; unsigned char newhash[16]; struct sock *sk1 = NULL; int genhash; #endif u64 transmit_time = 0; struct sock *ctl_sk; struct net *net; u32 txhash = 0; /* Never send a reset in response to a reset. */ if (th->rst) return; /* If sk not NULL, it means we did a successful lookup and incoming * route had to be correct. prequeue might have dropped our dst. */ if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL) return; /* Swap the send and the receive. */ memset(&rep, 0, sizeof(rep)); rep.th.dest = th->source; rep.th.source = th->dest; rep.th.doff = sizeof(struct tcphdr) / 4; rep.th.rst = 1; if (th->ack) { rep.th.seq = th->ack_seq; } else { rep.th.ack = 1; rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin + skb->len - (th->doff << 2)); } memset(&arg, 0, sizeof(arg)); arg.iov[0].iov_base = (unsigned char *)&rep; arg.iov[0].iov_len = sizeof(rep.th); net = sk ? sock_net(sk) : dev_net_rcu(skb_dst(skb)->dev); /* Invalid TCP option size or twice included auth */ if (tcp_parse_auth_options(tcp_hdr(skb), &md5_hash_location, &aoh)) return; if (aoh && tcp_v4_ao_sign_reset(sk, skb, aoh, &arg, &rep.th, rep.opt)) return; #ifdef CONFIG_TCP_MD5SIG rcu_read_lock(); if (sk && sk_fullsock(sk)) { const union tcp_md5_addr *addr; int l3index; /* sdif set, means packet ingressed via a device * in an L3 domain and inet_iif is set to it. */ l3index = tcp_v4_sdif(skb) ? inet_iif(skb) : 0; addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr; key = tcp_md5_do_lookup(sk, l3index, addr, AF_INET); } else if (md5_hash_location) { const union tcp_md5_addr *addr; int sdif = tcp_v4_sdif(skb); int dif = inet_iif(skb); int l3index; /* * active side is lost. Try to find listening socket through * source port, and then find md5 key through listening socket. * we are not loose security here: * Incoming packet is checked with md5 hash with finding key, * no RST generated if md5 hash doesn't match. */ sk1 = __inet_lookup_listener(net, net->ipv4.tcp_death_row.hashinfo, NULL, 0, ip_hdr(skb)->saddr, th->source, ip_hdr(skb)->daddr, ntohs(th->source), dif, sdif); /* don't send rst if it can't find key */ if (!sk1) goto out; /* sdif set, means packet ingressed via a device * in an L3 domain and dif is set to it. */ l3index = sdif ? dif : 0; addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr; key = tcp_md5_do_lookup(sk1, l3index, addr, AF_INET); if (!key) goto out; genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb); if (genhash || memcmp(md5_hash_location, newhash, 16) != 0) goto out; } if (key) { rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG); /* Update length and the length the header thinks exists */ arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED; rep.th.doff = arg.iov[0].iov_len / 4; tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1], key, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, &rep.th); } #endif /* Can't co-exist with TCPMD5, hence check rep.opt[0] */ if (rep.opt[0] == 0) { __be32 mrst = mptcp_reset_option(skb); if (mrst) { rep.opt[0] = mrst; arg.iov[0].iov_len += sizeof(mrst); rep.th.doff = arg.iov[0].iov_len / 4; } } arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr, ip_hdr(skb)->saddr, /* XXX */ arg.iov[0].iov_len, IPPROTO_TCP, 0); arg.csumoffset = offsetof(struct tcphdr, check) / 2; arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0; /* When socket is gone, all binding information is lost. * routing might fail in this case. No choice here, if we choose to force * input interface, we will misroute in case of asymmetric route. */ if (sk) arg.bound_dev_if = sk->sk_bound_dev_if; trace_tcp_send_reset(sk, skb, reason); BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) != offsetof(struct inet_timewait_sock, tw_bound_dev_if)); /* ECN bits of TW reset are cleared */ arg.tos = ip_hdr(skb)->tos & ~INET_ECN_MASK; arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL); local_bh_disable(); local_lock_nested_bh(&ipv4_tcp_sk.bh_lock); ctl_sk = this_cpu_read(ipv4_tcp_sk.sock); sock_net_set(ctl_sk, net); if (sk) { ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ? inet_twsk(sk)->tw_mark : READ_ONCE(sk->sk_mark); ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ? inet_twsk(sk)->tw_priority : READ_ONCE(sk->sk_priority); transmit_time = tcp_transmit_time(sk); xfrm_sk_clone_policy(ctl_sk, sk); txhash = (sk->sk_state == TCP_TIME_WAIT) ? inet_twsk(sk)->tw_txhash : sk->sk_txhash; } else { ctl_sk->sk_mark = 0; ctl_sk->sk_priority = 0; } ip_send_unicast_reply(ctl_sk, sk, skb, &TCP_SKB_CB(skb)->header.h4.opt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len, transmit_time, txhash); xfrm_sk_free_policy(ctl_sk); sock_net_set(ctl_sk, &init_net); __TCP_INC_STATS(net, TCP_MIB_OUTSEGS); __TCP_INC_STATS(net, TCP_MIB_OUTRSTS); local_unlock_nested_bh(&ipv4_tcp_sk.bh_lock); local_bh_enable(); #ifdef CONFIG_TCP_MD5SIG out: rcu_read_unlock(); #endif } /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states outside socket context is ugly, certainly. What can I do? */ static void tcp_v4_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 tsval, u32 tsecr, int oif, struct tcp_key *key, int reply_flags, u8 tos, u32 txhash) { const struct tcphdr *th = tcp_hdr(skb); struct { struct tcphdr th; __be32 opt[(MAX_TCP_OPTION_SPACE >> 2)]; } rep; struct net *net = sock_net(sk); struct ip_reply_arg arg; struct sock *ctl_sk; u64 transmit_time; memset(&rep.th, 0, sizeof(struct tcphdr)); memset(&arg, 0, sizeof(arg)); arg.iov[0].iov_base = (unsigned char *)&rep; arg.iov[0].iov_len = sizeof(rep.th); if (tsecr) { rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP); rep.opt[1] = htonl(tsval); rep.opt[2] = htonl(tsecr); arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED; } /* Swap the send and the receive. */ rep.th.dest = th->source; rep.th.source = th->dest; rep.th.doff = arg.iov[0].iov_len / 4; rep.th.seq = htonl(seq); rep.th.ack_seq = htonl(ack); rep.th.ack = 1; rep.th.window = htons(win); #ifdef CONFIG_TCP_MD5SIG if (tcp_key_is_md5(key)) { int offset = (tsecr) ? 3 : 0; rep.opt[offset++] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG); arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED; rep.th.doff = arg.iov[0].iov_len/4; tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset], key->md5_key, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, &rep.th); } #endif #ifdef CONFIG_TCP_AO if (tcp_key_is_ao(key)) { int offset = (tsecr) ? 3 : 0; rep.opt[offset++] = htonl((TCPOPT_AO << 24) | (tcp_ao_len(key->ao_key) << 16) | (key->ao_key->sndid << 8) | key->rcv_next); arg.iov[0].iov_len += tcp_ao_len_aligned(key->ao_key); rep.th.doff = arg.iov[0].iov_len / 4; tcp_ao_hash_hdr(AF_INET, (char *)&rep.opt[offset], key->ao_key, key->traffic_key, (union tcp_ao_addr *)&ip_hdr(skb)->saddr, (union tcp_ao_addr *)&ip_hdr(skb)->daddr, &rep.th, key->sne); } #endif arg.flags = reply_flags; arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr, ip_hdr(skb)->saddr, /* XXX */ arg.iov[0].iov_len, IPPROTO_TCP, 0); arg.csumoffset = offsetof(struct tcphdr, check) / 2; if (oif) arg.bound_dev_if = oif; arg.tos = tos; arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL); local_bh_disable(); local_lock_nested_bh(&ipv4_tcp_sk.bh_lock); ctl_sk = this_cpu_read(ipv4_tcp_sk.sock); sock_net_set(ctl_sk, net); ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ? inet_twsk(sk)->tw_mark : READ_ONCE(sk->sk_mark); ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ? inet_twsk(sk)->tw_priority : READ_ONCE(sk->sk_priority); transmit_time = tcp_transmit_time(sk); ip_send_unicast_reply(ctl_sk, sk, skb, &TCP_SKB_CB(skb)->header.h4.opt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len, transmit_time, txhash); sock_net_set(ctl_sk, &init_net); __TCP_INC_STATS(net, TCP_MIB_OUTSEGS); local_unlock_nested_bh(&ipv4_tcp_sk.bh_lock); local_bh_enable(); } static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb, enum tcp_tw_status tw_status) { struct inet_timewait_sock *tw = inet_twsk(sk); struct tcp_timewait_sock *tcptw = tcp_twsk(sk); struct tcp_key key = {}; u8 tos = tw->tw_tos; /* Cleaning only ECN bits of TW ACKs of oow data or is paws_reject, * while not cleaning ECN bits of other TW ACKs to avoid these ACKs * being placed in a different service queues (Classic rather than L4S) */ if (tw_status == TCP_TW_ACK_OOW) tos &= ~INET_ECN_MASK; #ifdef CONFIG_TCP_AO struct tcp_ao_info *ao_info; if (static_branch_unlikely(&tcp_ao_needed.key)) { /* FIXME: the segment to-be-acked is not verified yet */ ao_info = rcu_dereference(tcptw->ao_info); if (ao_info) { const struct tcp_ao_hdr *aoh; if (tcp_parse_auth_options(tcp_hdr(skb), NULL, &aoh)) { inet_twsk_put(tw); return; } if (aoh) key.ao_key = tcp_ao_established_key(sk, ao_info, aoh->rnext_keyid, -1); } } if (key.ao_key) { struct tcp_ao_key *rnext_key; key.traffic_key = snd_other_key(key.ao_key); key.sne = READ_ONCE(ao_info->snd_sne); rnext_key = READ_ONCE(ao_info->rnext_key); key.rcv_next = rnext_key->rcvid; key.type = TCP_KEY_AO; #else if (0) { #endif } else if (static_branch_tcp_md5()) { key.md5_key = tcp_twsk_md5_key(tcptw); if (key.md5_key) key.type = TCP_KEY_MD5; } tcp_v4_send_ack(sk, skb, tcptw->tw_snd_nxt, READ_ONCE(tcptw->tw_rcv_nxt), tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, tcp_tw_tsval(tcptw), READ_ONCE(tcptw->tw_ts_recent), tw->tw_bound_dev_if, &key, tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0, tos, tw->tw_txhash); inet_twsk_put(tw); } static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, struct request_sock *req) { struct tcp_key key = {}; /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV * sk->sk_state == TCP_SYN_RECV -> for Fast Open. */ u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt; #ifdef CONFIG_TCP_AO if (static_branch_unlikely(&tcp_ao_needed.key) && tcp_rsk_used_ao(req)) { const union tcp_md5_addr *addr; const struct tcp_ao_hdr *aoh; int l3index; /* Invalid TCP option size or twice included auth */ if (tcp_parse_auth_options(tcp_hdr(skb), NULL, &aoh)) return; if (!aoh) return; addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr; l3index = tcp_v4_sdif(skb) ? inet_iif(skb) : 0; key.ao_key = tcp_ao_do_lookup(sk, l3index, addr, AF_INET, aoh->rnext_keyid, -1); if (unlikely(!key.ao_key)) { /* Send ACK with any matching MKT for the peer */ key.ao_key = tcp_ao_do_lookup(sk, l3index, addr, AF_INET, -1, -1); /* Matching key disappeared (user removed the key?) * let the handshake timeout. */ if (!key.ao_key) { net_info_ratelimited("TCP-AO key for (%pI4, %d)->(%pI4, %d) suddenly disappeared, won't ACK new connection\n", addr, ntohs(tcp_hdr(skb)->source), &ip_hdr(skb)->daddr, ntohs(tcp_hdr(skb)->dest)); return; } } key.traffic_key = kmalloc(tcp_ao_digest_size(key.ao_key), GFP_ATOMIC); if (!key.traffic_key) return; key.type = TCP_KEY_AO; key.rcv_next = aoh->keyid; tcp_v4_ao_calc_key_rsk(key.ao_key, key.traffic_key, req); #else if (0) { #endif } else if (static_branch_tcp_md5()) { const union tcp_md5_addr *addr; int l3index; addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr; l3index = tcp_v4_sdif(skb) ? inet_iif(skb) : 0; key.md5_key = tcp_md5_do_lookup(sk, l3index, addr, AF_INET); if (key.md5_key) key.type = TCP_KEY_MD5; } /* Cleaning ECN bits of TW ACKs of oow data or is paws_reject */ tcp_v4_send_ack(sk, skb, seq, tcp_rsk(req)->rcv_nxt, tcp_synack_window(req) >> inet_rsk(req)->rcv_wscale, tcp_rsk_tsval(tcp_rsk(req)), req->ts_recent, 0, &key, inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0, ip_hdr(skb)->tos & ~INET_ECN_MASK, READ_ONCE(tcp_rsk(req)->txhash)); if (tcp_key_is_ao(&key)) kfree(key.traffic_key); } /* * Send a SYN-ACK after having received a SYN. * This still operates on a request_sock only, not on a big * socket. */ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst, struct flowi *fl, struct request_sock *req, struct tcp_fastopen_cookie *foc, enum tcp_synack_type synack_type, struct sk_buff *syn_skb) { const struct inet_request_sock *ireq = inet_rsk(req); struct flowi4 fl4; int err = -1; struct sk_buff *skb; u8 tos; /* First, grab a route. */ if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL) return -1; skb = tcp_make_synack(sk, dst, req, foc, synack_type, syn_skb); if (skb) { __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr); tos = READ_ONCE(inet_sk(sk)->tos); if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos)) tos = (tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) | (tos & INET_ECN_MASK); if (!INET_ECN_is_capable(tos) && tcp_bpf_ca_needs_ecn((struct sock *)req)) tos |= INET_ECN_ECT_0; rcu_read_lock(); err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr, ireq->ir_rmt_addr, rcu_dereference(ireq->ireq_opt), tos); rcu_read_unlock(); err = net_xmit_eval(err); } return err; } /* * IPv4 request_sock destructor. */ static void tcp_v4_reqsk_destructor(struct request_sock *req) { kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1)); } #ifdef CONFIG_TCP_MD5SIG /* * RFC2385 MD5 checksumming requires a mapping of * IP address->MD5 Key. * We need to maintain these in the sk structure. */ DEFINE_STATIC_KEY_DEFERRED_FALSE(tcp_md5_needed, HZ); EXPORT_IPV6_MOD(tcp_md5_needed); static bool better_md5_match(struct tcp_md5sig_key *old, struct tcp_md5sig_key *new) { if (!old) return true; /* l3index always overrides non-l3index */ if (old->l3index && new->l3index == 0) return false; if (old->l3index == 0 && new->l3index) return true; return old->prefixlen < new->prefixlen; } /* Find the Key structure for an address. */ struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index, const union tcp_md5_addr *addr, int family, bool any_l3index) { const struct tcp_sock *tp = tcp_sk(sk); struct tcp_md5sig_key *key; const struct tcp_md5sig_info *md5sig; __be32 mask; struct tcp_md5sig_key *best_match = NULL; bool match; /* caller either holds rcu_read_lock() or socket lock */ md5sig = rcu_dereference_check(tp->md5sig_info, lockdep_sock_is_held(sk)); if (!md5sig) return NULL; hlist_for_each_entry_rcu(key, &md5sig->head, node, lockdep_sock_is_held(sk)) { if (key->family != family) continue; if (!any_l3index && key->flags & TCP_MD5SIG_FLAG_IFINDEX && key->l3index != l3index) continue; if (family == AF_INET) { mask = inet_make_mask(key->prefixlen); match = (key->addr.a4.s_addr & mask) == (addr->a4.s_addr & mask); #if IS_ENABLED(CONFIG_IPV6) } else if (family == AF_INET6) { match = ipv6_prefix_equal(&key->addr.a6, &addr->a6, key->prefixlen); #endif } else { match = false; } if (match && better_md5_match(best_match, key)) best_match = key; } return best_match; } EXPORT_IPV6_MOD(__tcp_md5_do_lookup); static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk, const union tcp_md5_addr *addr, int family, u8 prefixlen, int l3index, u8 flags) { const struct tcp_sock *tp = tcp_sk(sk); struct tcp_md5sig_key *key; unsigned int size = sizeof(struct in_addr); const struct tcp_md5sig_info *md5sig; /* caller either holds rcu_read_lock() or socket lock */ md5sig = rcu_dereference_check(tp->md5sig_info, lockdep_sock_is_held(sk)); if (!md5sig) return NULL; #if IS_ENABLED(CONFIG_IPV6) if (family == AF_INET6) size = sizeof(struct in6_addr); #endif hlist_for_each_entry_rcu(key, &md5sig->head, node, lockdep_sock_is_held(sk)) { if (key->family != family) continue; if ((key->flags & TCP_MD5SIG_FLAG_IFINDEX) != (flags & TCP_MD5SIG_FLAG_IFINDEX)) continue; if (key->l3index != l3index) continue; if (!memcmp(&key->addr, addr, size) && key->prefixlen == prefixlen) return key; } return NULL; } struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk, const struct sock *addr_sk) { const union tcp_md5_addr *addr; int l3index; l3index = l3mdev_master_ifindex_by_index(sock_net(sk), addr_sk->sk_bound_dev_if); addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr; return tcp_md5_do_lookup(sk, l3index, addr, AF_INET); } EXPORT_IPV6_MOD(tcp_v4_md5_lookup); static int tcp_md5sig_info_add(struct sock *sk, gfp_t gfp) { struct tcp_sock *tp = tcp_sk(sk); struct tcp_md5sig_info *md5sig; md5sig = kmalloc(sizeof(*md5sig), gfp); if (!md5sig) return -ENOMEM; sk_gso_disable(sk); INIT_HLIST_HEAD(&md5sig->head); rcu_assign_pointer(tp->md5sig_info, md5sig); return 0; } /* This can be called on a newly created socket, from other files */ static int __tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr, int family, u8 prefixlen, int l3index, u8 flags, const u8 *newkey, u8 newkeylen, gfp_t gfp) { /* Add Key to the list */ struct tcp_md5sig_key *key; struct tcp_sock *tp = tcp_sk(sk); struct tcp_md5sig_info *md5sig; key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index, flags); if (key) { /* Pre-existing entry - just update that one. * Note that the key might be used concurrently. * data_race() is telling kcsan that we do not care of * key mismatches, since changing MD5 key on live flows * can lead to packet drops. */ data_race(memcpy(key->key, newkey, newkeylen)); /* Pairs with READ_ONCE() in tcp_md5_hash_key(). * Also note that a reader could catch new key->keylen value * but old key->key[], this is the reason we use __GFP_ZERO * at sock_kmalloc() time below these lines. */ WRITE_ONCE(key->keylen, newkeylen); return 0; } md5sig = rcu_dereference_protected(tp->md5sig_info, lockdep_sock_is_held(sk)); key = sock_kmalloc(sk, sizeof(*key), gfp | __GFP_ZERO); if (!key) return -ENOMEM; memcpy(key->key, newkey, newkeylen); key->keylen = newkeylen; key->family = family; key->prefixlen = prefixlen; key->l3index = l3index; key->flags = flags; memcpy(&key->addr, addr, (IS_ENABLED(CONFIG_IPV6) && family == AF_INET6) ? sizeof(struct in6_addr) : sizeof(struct in_addr)); hlist_add_head_rcu(&key->node, &md5sig->head); return 0; } int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr, int family, u8 prefixlen, int l3index, u8 flags, const u8 *newkey, u8 newkeylen) { struct tcp_sock *tp = tcp_sk(sk); if (!rcu_dereference_protected(tp->md5sig_info, lockdep_sock_is_held(sk))) { if (tcp_md5_alloc_sigpool()) return -ENOMEM; if (tcp_md5sig_info_add(sk, GFP_KERNEL)) { tcp_md5_release_sigpool(); return -ENOMEM; } if (!static_branch_inc(&tcp_md5_needed.key)) { struct tcp_md5sig_info *md5sig; md5sig = rcu_dereference_protected(tp->md5sig_info, lockdep_sock_is_held(sk)); rcu_assign_pointer(tp->md5sig_info, NULL); kfree_rcu(md5sig, rcu); tcp_md5_release_sigpool(); return -EUSERS; } } return __tcp_md5_do_add(sk, addr, family, prefixlen, l3index, flags, newkey, newkeylen, GFP_KERNEL); } EXPORT_IPV6_MOD(tcp_md5_do_add); int tcp_md5_key_copy(struct sock *sk, const union tcp_md5_addr *addr, int family, u8 prefixlen, int l3index, struct tcp_md5sig_key *key) { struct tcp_sock *tp = tcp_sk(sk); if (!rcu_dereference_protected(tp->md5sig_info, lockdep_sock_is_held(sk))) { tcp_md5_add_sigpool(); if (tcp_md5sig_info_add(sk, sk_gfp_mask(sk, GFP_ATOMIC))) { tcp_md5_release_sigpool(); return -ENOMEM; } if (!static_key_fast_inc_not_disabled(&tcp_md5_needed.key.key)) { struct tcp_md5sig_info *md5sig; md5sig = rcu_dereference_protected(tp->md5sig_info, lockdep_sock_is_held(sk)); net_warn_ratelimited("Too many TCP-MD5 keys in the system\n"); rcu_assign_pointer(tp->md5sig_info, NULL); kfree_rcu(md5sig, rcu); tcp_md5_release_sigpool(); return -EUSERS; } } return __tcp_md5_do_add(sk, addr, family, prefixlen, l3index, key->flags, key->key, key->keylen, sk_gfp_mask(sk, GFP_ATOMIC)); } EXPORT_IPV6_MOD(tcp_md5_key_copy); int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family, u8 prefixlen, int l3index, u8 flags) { struct tcp_md5sig_key *key; key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index, flags); if (!key) return -ENOENT; hlist_del_rcu(&key->node); atomic_sub(sizeof(*key), &sk->sk_omem_alloc); kfree_rcu(key, rcu); return 0; } EXPORT_IPV6_MOD(tcp_md5_do_del); void tcp_clear_md5_list(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct tcp_md5sig_key *key; struct hlist_node *n; struct tcp_md5sig_info *md5sig; md5sig = rcu_dereference_protected(tp->md5sig_info, 1); hlist_for_each_entry_safe(key, n, &md5sig->head, node) { hlist_del_rcu(&key->node); atomic_sub(sizeof(*key), &sk->sk_omem_alloc); kfree_rcu(key, rcu); } } static int tcp_v4_parse_md5_keys(struct sock *sk, int optname, sockptr_t optval, int optlen) { struct tcp_md5sig cmd; struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr; const union tcp_md5_addr *addr; u8 prefixlen = 32; int l3index = 0; bool l3flag; u8 flags; if (optlen < sizeof(cmd)) return -EINVAL; if (copy_from_sockptr(&cmd, optval, sizeof(cmd))) return -EFAULT; if (sin->sin_family != AF_INET) return -EINVAL; flags = cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX; l3flag = cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX; if (optname == TCP_MD5SIG_EXT && cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) { prefixlen = cmd.tcpm_prefixlen; if (prefixlen > 32) return -EINVAL; } if (optname == TCP_MD5SIG_EXT && cmd.tcpm_ifindex && cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX) { struct net_device *dev; rcu_read_lock(); dev = dev_get_by_index_rcu(sock_net(sk), cmd.tcpm_ifindex); if (dev && netif_is_l3_master(dev)) l3index = dev->ifindex; rcu_read_unlock(); /* ok to reference set/not set outside of rcu; * right now device MUST be an L3 master */ if (!dev || !l3index) return -EINVAL; } addr = (union tcp_md5_addr *)&sin->sin_addr.s_addr; if (!cmd.tcpm_keylen) return tcp_md5_do_del(sk, addr, AF_INET, prefixlen, l3index, flags); if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN) return -EINVAL; /* Don't allow keys for peers that have a matching TCP-AO key. * See the comment in tcp_ao_add_cmd() */ if (tcp_ao_required(sk, addr, AF_INET, l3flag ? l3index : -1, false)) return -EKEYREJECTED; return tcp_md5_do_add(sk, addr, AF_INET, prefixlen, l3index, flags, cmd.tcpm_key, cmd.tcpm_keylen); } static int tcp_v4_md5_hash_headers(struct tcp_sigpool *hp, __be32 daddr, __be32 saddr, const struct tcphdr *th, int nbytes) { struct tcp4_pseudohdr *bp; struct scatterlist sg; struct tcphdr *_th; bp = hp->scratch; bp->saddr = saddr; bp->daddr = daddr; bp->pad = 0; bp->protocol = IPPROTO_TCP; bp->len = cpu_to_be16(nbytes); _th = (struct tcphdr *)(bp + 1); memcpy(_th, th, sizeof(*th)); _th->check = 0; sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th)); ahash_request_set_crypt(hp->req, &sg, NULL, sizeof(*bp) + sizeof(*th)); return crypto_ahash_update(hp->req); } static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key, __be32 daddr, __be32 saddr, const struct tcphdr *th) { struct tcp_sigpool hp; if (tcp_sigpool_start(tcp_md5_sigpool_id, &hp)) goto clear_hash_nostart; if (crypto_ahash_init(hp.req)) goto clear_hash; if (tcp_v4_md5_hash_headers(&hp, daddr, saddr, th, th->doff << 2)) goto clear_hash; if (tcp_md5_hash_key(&hp, key)) goto clear_hash; ahash_request_set_crypt(hp.req, NULL, md5_hash, 0); if (crypto_ahash_final(hp.req)) goto clear_hash; tcp_sigpool_end(&hp); return 0; clear_hash: tcp_sigpool_end(&hp); clear_hash_nostart: memset(md5_hash, 0, 16); return 1; } int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key, const struct sock *sk, const struct sk_buff *skb) { const struct tcphdr *th = tcp_hdr(skb); struct tcp_sigpool hp; __be32 saddr, daddr; if (sk) { /* valid for establish/request sockets */ saddr = sk->sk_rcv_saddr; daddr = sk->sk_daddr; } else { const struct iphdr *iph = ip_hdr(skb); saddr = iph->saddr; daddr = iph->daddr; } if (tcp_sigpool_start(tcp_md5_sigpool_id, &hp)) goto clear_hash_nostart; if (crypto_ahash_init(hp.req)) goto clear_hash; if (tcp_v4_md5_hash_headers(&hp, daddr, saddr, th, skb->len)) goto clear_hash; if (tcp_sigpool_hash_skb_data(&hp, skb, th->doff << 2)) goto clear_hash; if (tcp_md5_hash_key(&hp, key)) goto clear_hash; ahash_request_set_crypt(hp.req, NULL, md5_hash, 0); if (crypto_ahash_final(hp.req)) goto clear_hash; tcp_sigpool_end(&hp); return 0; clear_hash: tcp_sigpool_end(&hp); clear_hash_nostart: memset(md5_hash, 0, 16); return 1; } EXPORT_IPV6_MOD(tcp_v4_md5_hash_skb); #endif static void tcp_v4_init_req(struct request_sock *req, const struct sock *sk_listener, struct sk_buff *skb) { struct inet_request_sock *ireq = inet_rsk(req); struct net *net = sock_net(sk_listener); sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr); sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr); RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(net, skb)); } static struct dst_entry *tcp_v4_route_req(const struct sock *sk, struct sk_buff *skb, struct flowi *fl, struct request_sock *req, u32 tw_isn) { tcp_v4_init_req(req, sk, skb); if (security_inet_conn_request(sk, skb, req)) return NULL; return inet_csk_route_req(sk, &fl->u.ip4, req); } struct request_sock_ops tcp_request_sock_ops __read_mostly = { .family = PF_INET, .obj_size = sizeof(struct tcp_request_sock), .rtx_syn_ack = tcp_rtx_synack, .send_ack = tcp_v4_reqsk_send_ack, .destructor = tcp_v4_reqsk_destructor, .send_reset = tcp_v4_send_reset, .syn_ack_timeout = tcp_syn_ack_timeout, }; const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = { .mss_clamp = TCP_MSS_DEFAULT, #ifdef CONFIG_TCP_MD5SIG .req_md5_lookup = tcp_v4_md5_lookup, .calc_md5_hash = tcp_v4_md5_hash_skb, #endif #ifdef CONFIG_TCP_AO .ao_lookup = tcp_v4_ao_lookup_rsk, .ao_calc_key = tcp_v4_ao_calc_key_rsk, .ao_synack_hash = tcp_v4_ao_synack_hash, #endif #ifdef CONFIG_SYN_COOKIES .cookie_init_seq = cookie_v4_init_sequence, #endif .route_req = tcp_v4_route_req, .init_seq = tcp_v4_init_seq, .init_ts_off = tcp_v4_init_ts_off, .send_synack = tcp_v4_send_synack, }; int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) { /* Never answer to SYNs send to broadcast or multicast */ if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) goto drop; return tcp_conn_request(&tcp_request_sock_ops, &tcp_request_sock_ipv4_ops, sk, skb); drop: tcp_listendrop(sk); return 0; } EXPORT_IPV6_MOD(tcp_v4_conn_request); /* * The three way handshake has completed - we got a valid synack - * now create the new socket. */ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, struct request_sock *req, struct dst_entry *dst, struct request_sock *req_unhash, bool *own_req) { struct inet_request_sock *ireq; bool found_dup_sk = false; struct inet_sock *newinet; struct tcp_sock *newtp; struct sock *newsk; #ifdef CONFIG_TCP_MD5SIG const union tcp_md5_addr *addr; struct tcp_md5sig_key *key; int l3index; #endif struct ip_options_rcu *inet_opt; if (sk_acceptq_is_full(sk)) goto exit_overflow; newsk = tcp_create_openreq_child(sk, req, skb); if (!newsk) goto exit_nonewsk; newsk->sk_gso_type = SKB_GSO_TCPV4; inet_sk_rx_dst_set(newsk, skb); newtp = tcp_sk(newsk); newinet = inet_sk(newsk); ireq = inet_rsk(req); inet_opt = rcu_dereference(ireq->ireq_opt); RCU_INIT_POINTER(newinet->inet_opt, inet_opt); newinet->mc_index = inet_iif(skb); newinet->mc_ttl = ip_hdr(skb)->ttl; newinet->rcv_tos = ip_hdr(skb)->tos; inet_csk(newsk)->icsk_ext_hdr_len = 0; if (inet_opt) inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen; atomic_set(&newinet->inet_id, get_random_u16()); /* Set ToS of the new socket based upon the value of incoming SYN. * ECT bits are set later in tcp_init_transfer(). */ if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos)) newinet->tos = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK; if (!dst) { dst = inet_csk_route_child_sock(sk, newsk, req); if (!dst) goto put_and_exit; } else { /* syncookie case : see end of cookie_v4_check() */ } sk_setup_caps(newsk, dst); tcp_ca_openreq_child(newsk, dst); tcp_sync_mss(newsk, dst_mtu(dst)); newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst)); tcp_initialize_rcv_mss(newsk); #ifdef CONFIG_TCP_MD5SIG l3index = l3mdev_master_ifindex_by_index(sock_net(sk), ireq->ir_iif); /* Copy over the MD5 key from the original socket */ addr = (union tcp_md5_addr *)&newinet->inet_daddr; key = tcp_md5_do_lookup(sk, l3index, addr, AF_INET); if (key && !tcp_rsk_used_ao(req)) { if (tcp_md5_key_copy(newsk, addr, AF_INET, 32, l3index, key)) goto put_and_exit; sk_gso_disable(newsk); } #endif #ifdef CONFIG_TCP_AO if (tcp_ao_copy_all_matching(sk, newsk, req, skb, AF_INET)) goto put_and_exit; /* OOM, release back memory */ #endif if (__inet_inherit_port(sk, newsk) < 0) goto put_and_exit; *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash), &found_dup_sk); if (likely(*own_req)) { tcp_move_syn(newtp, req); ireq->ireq_opt = NULL; } else { newinet->inet_opt = NULL; if (!req_unhash && found_dup_sk) { /* This code path should only be executed in the * syncookie case only */ bh_unlock_sock(newsk); sock_put(newsk); newsk = NULL; } } return newsk; exit_overflow: NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); exit_nonewsk: dst_release(dst); exit: tcp_listendrop(sk); return NULL; put_and_exit: newinet->inet_opt = NULL; inet_csk_prepare_forced_close(newsk); tcp_done(newsk); goto exit; } EXPORT_IPV6_MOD(tcp_v4_syn_recv_sock); static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb) { #ifdef CONFIG_SYN_COOKIES const struct tcphdr *th = tcp_hdr(skb); if (!th->syn) sk = cookie_v4_check(sk, skb); #endif return sk; } u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph, struct tcphdr *th, u32 *cookie) { u16 mss = 0; #ifdef CONFIG_SYN_COOKIES mss = tcp_get_syncookie_mss(&tcp_request_sock_ops, &tcp_request_sock_ipv4_ops, sk, th); if (mss) { *cookie = __cookie_v4_init_sequence(iph, th, &mss); tcp_synq_overflow(sk); } #endif return mss; } INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *, u32)); /* The socket must have it's spinlock held when we get * here, unless it is a TCP_LISTEN socket. * * We have a potential double-lock case here, so even when * doing backlog processing we use the BH locking scheme. * This is because we cannot sleep with the original spinlock * held. */ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) { enum skb_drop_reason reason; struct sock *rsk; if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ struct dst_entry *dst; dst = rcu_dereference_protected(sk->sk_rx_dst, lockdep_sock_is_held(sk)); sock_rps_save_rxhash(sk, skb); sk_mark_napi_id(sk, skb); if (dst) { if (sk->sk_rx_dst_ifindex != skb->skb_iif || !INDIRECT_CALL_1(dst->ops->check, ipv4_dst_check, dst, 0)) { RCU_INIT_POINTER(sk->sk_rx_dst, NULL); dst_release(dst); } } tcp_rcv_established(sk, skb); return 0; } if (tcp_checksum_complete(skb)) goto csum_err; if (sk->sk_state == TCP_LISTEN) { struct sock *nsk = tcp_v4_cookie_check(sk, skb); if (!nsk) return 0; if (nsk != sk) { reason = tcp_child_process(sk, nsk, skb); if (reason) { rsk = nsk; goto reset; } return 0; } } else sock_rps_save_rxhash(sk, skb); reason = tcp_rcv_state_process(sk, skb); if (reason) { rsk = sk; goto reset; } return 0; reset: tcp_v4_send_reset(rsk, skb, sk_rst_convert_drop_reason(reason)); discard: sk_skb_reason_drop(sk, skb, reason); /* Be careful here. If this function gets more complicated and * gcc suffers from register pressure on the x86, sk (in %ebx) * might be destroyed here. This current version compiles correctly, * but you have been warned. */ return 0; csum_err: reason = SKB_DROP_REASON_TCP_CSUM; trace_tcp_bad_csum(skb); TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS); TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); goto discard; } EXPORT_SYMBOL(tcp_v4_do_rcv); int tcp_v4_early_demux(struct sk_buff *skb) { struct net *net = dev_net_rcu(skb->dev); const struct iphdr *iph; const struct tcphdr *th; struct sock *sk; if (skb->pkt_type != PACKET_HOST) return 0; if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr))) return 0; iph = ip_hdr(skb); th = tcp_hdr(skb); if (th->doff < sizeof(struct tcphdr) / 4) return 0; sk = __inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, iph->saddr, th->source, iph->daddr, ntohs(th->dest), skb->skb_iif, inet_sdif(skb)); if (sk) { skb->sk = sk; skb->destructor = sock_edemux; if (sk_fullsock(sk)) { struct dst_entry *dst = rcu_dereference(sk->sk_rx_dst); if (dst) dst = dst_check(dst, 0); if (dst && sk->sk_rx_dst_ifindex == skb->skb_iif) skb_dst_set_noref(skb, dst); } } return 0; } bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb, enum skb_drop_reason *reason) { u32 tail_gso_size, tail_gso_segs; struct skb_shared_info *shinfo; const struct tcphdr *th; struct tcphdr *thtail; struct sk_buff *tail; unsigned int hdrlen; bool fragstolen; u32 gso_segs; u32 gso_size; u64 limit; int delta; /* In case all data was pulled from skb frags (in __pskb_pull_tail()), * we can fix skb->truesize to its real value to avoid future drops. * This is valid because skb is not yet charged to the socket. * It has been noticed pure SACK packets were sometimes dropped * (if cooked by drivers without copybreak feature). */ skb_condense(skb); tcp_cleanup_skb(skb); if (unlikely(tcp_checksum_complete(skb))) { bh_unlock_sock(sk); trace_tcp_bad_csum(skb); *reason = SKB_DROP_REASON_TCP_CSUM; __TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS); __TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); return true; } /* Attempt coalescing to last skb in backlog, even if we are * above the limits. * This is okay because skb capacity is limited to MAX_SKB_FRAGS. */ th = (const struct tcphdr *)skb->data; hdrlen = th->doff * 4; tail = sk->sk_backlog.tail; if (!tail) goto no_coalesce; thtail = (struct tcphdr *)tail->data; if (TCP_SKB_CB(tail)->end_seq != TCP_SKB_CB(skb)->seq || TCP_SKB_CB(tail)->ip_dsfield != TCP_SKB_CB(skb)->ip_dsfield || ((TCP_SKB_CB(tail)->tcp_flags | TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_SYN | TCPHDR_RST | TCPHDR_URG)) || !((TCP_SKB_CB(tail)->tcp_flags & TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_ACK) || ((TCP_SKB_CB(tail)->tcp_flags ^ TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_ECE | TCPHDR_CWR | TCPHDR_AE)) || !tcp_skb_can_collapse_rx(tail, skb) || thtail->doff != th->doff || memcmp(thtail + 1, th + 1, hdrlen - sizeof(*th))) goto no_coalesce; __skb_pull(skb, hdrlen); shinfo = skb_shinfo(skb); gso_size = shinfo->gso_size ?: skb->len; gso_segs = shinfo->gso_segs ?: 1; shinfo = skb_shinfo(tail); tail_gso_size = shinfo->gso_size ?: (tail->len - hdrlen); tail_gso_segs = shinfo->gso_segs ?: 1; if (skb_try_coalesce(tail, skb, &fragstolen, &delta)) { TCP_SKB_CB(tail)->end_seq = TCP_SKB_CB(skb)->end_seq; if (likely(!before(TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(tail)->ack_seq))) { TCP_SKB_CB(tail)->ack_seq = TCP_SKB_CB(skb)->ack_seq; thtail->window = th->window; } /* We have to update both TCP_SKB_CB(tail)->tcp_flags and * thtail->fin, so that the fast path in tcp_rcv_established() * is not entered if we append a packet with a FIN. * SYN, RST, URG are not present. * ACK is set on both packets. * PSH : we do not really care in TCP stack, * at least for 'GRO' packets. */ thtail->fin |= th->fin; TCP_SKB_CB(tail)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags; if (TCP_SKB_CB(skb)->has_rxtstamp) { TCP_SKB_CB(tail)->has_rxtstamp = true; tail->tstamp = skb->tstamp; skb_hwtstamps(tail)->hwtstamp = skb_hwtstamps(skb)->hwtstamp; } /* Not as strict as GRO. We only need to carry mss max value */ shinfo->gso_size = max(gso_size, tail_gso_size); shinfo->gso_segs = min_t(u32, gso_segs + tail_gso_segs, 0xFFFF); sk->sk_backlog.len += delta; __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGCOALESCE); kfree_skb_partial(skb, fragstolen); return false; } __skb_push(skb, hdrlen); no_coalesce: /* sk->sk_backlog.len is reset only at the end of __release_sock(). * Both sk->sk_backlog.len and sk->sk_rmem_alloc could reach * sk_rcvbuf in normal conditions. */ limit = ((u64)READ_ONCE(sk->sk_rcvbuf)) << 1; limit += ((u32)READ_ONCE(sk->sk_sndbuf)) >> 1; /* Only socket owner can try to collapse/prune rx queues * to reduce memory overhead, so add a little headroom here. * Few sockets backlog are possibly concurrently non empty. */ limit += 64 * 1024; limit = min_t(u64, limit, UINT_MAX); if (unlikely(sk_add_backlog(sk, skb, limit))) { bh_unlock_sock(sk); *reason = SKB_DROP_REASON_SOCKET_BACKLOG; __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP); return true; } return false; } EXPORT_IPV6_MOD(tcp_add_backlog); int tcp_filter(struct sock *sk, struct sk_buff *skb) { struct tcphdr *th = (struct tcphdr *)skb->data; return sk_filter_trim_cap(sk, skb, th->doff * 4); } EXPORT_IPV6_MOD(tcp_filter); static void tcp_v4_restore_cb(struct sk_buff *skb) { memmove(IPCB(skb), &TCP_SKB_CB(skb)->header.h4, sizeof(struct inet_skb_parm)); } static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph, const struct tcphdr *th) { /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB() * barrier() makes sure compiler wont play fool^Waliasing games. */ memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb), sizeof(struct inet_skb_parm)); barrier(); TCP_SKB_CB(skb)->seq = ntohl(th->seq); TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + skb->len - th->doff * 4); TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); TCP_SKB_CB(skb)->tcp_flags = tcp_flags_ntohs(th); TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph); TCP_SKB_CB(skb)->sacked = 0; TCP_SKB_CB(skb)->has_rxtstamp = skb->tstamp || skb_hwtstamps(skb)->hwtstamp; } /* * From tcp_input.c */ int tcp_v4_rcv(struct sk_buff *skb) { struct net *net = dev_net_rcu(skb->dev); enum skb_drop_reason drop_reason; enum tcp_tw_status tw_status; int sdif = inet_sdif(skb); int dif = inet_iif(skb); const struct iphdr *iph; const struct tcphdr *th; struct sock *sk = NULL; bool refcounted; int ret; u32 isn; drop_reason = SKB_DROP_REASON_NOT_SPECIFIED; if (skb->pkt_type != PACKET_HOST) goto discard_it; /* Count it even if it's bad */ __TCP_INC_STATS(net, TCP_MIB_INSEGS); if (!pskb_may_pull(skb, sizeof(struct tcphdr))) goto discard_it; th = (const struct tcphdr *)skb->data; if (unlikely(th->doff < sizeof(struct tcphdr) / 4)) { drop_reason = SKB_DROP_REASON_PKT_TOO_SMALL; goto bad_packet; } if (!pskb_may_pull(skb, th->doff * 4)) goto discard_it; /* An explanation is required here, I think. * Packet length and doff are validated by header prediction, * provided case of th->doff==0 is eliminated. * So, we defer the checks. */ if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo)) goto csum_error; th = (const struct tcphdr *)skb->data; iph = ip_hdr(skb); lookup: sk = __inet_lookup_skb(net->ipv4.tcp_death_row.hashinfo, skb, __tcp_hdrlen(th), th->source, th->dest, sdif, &refcounted); if (!sk) goto no_tcp_socket; if (sk->sk_state == TCP_TIME_WAIT) goto do_time_wait; if (sk->sk_state == TCP_NEW_SYN_RECV) { struct request_sock *req = inet_reqsk(sk); bool req_stolen = false; struct sock *nsk; sk = req->rsk_listener; if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) drop_reason = SKB_DROP_REASON_XFRM_POLICY; else drop_reason = tcp_inbound_hash(sk, req, skb, &iph->saddr, &iph->daddr, AF_INET, dif, sdif); if (unlikely(drop_reason)) { sk_drops_add(sk, skb); reqsk_put(req); goto discard_it; } if (tcp_checksum_complete(skb)) { reqsk_put(req); goto csum_error; } if (unlikely(sk->sk_state != TCP_LISTEN)) { nsk = reuseport_migrate_sock(sk, req_to_sk(req), skb); if (!nsk) { inet_csk_reqsk_queue_drop_and_put(sk, req); goto lookup; } sk = nsk; /* reuseport_migrate_sock() has already held one sk_refcnt * before returning. */ } else { /* We own a reference on the listener, increase it again * as we might lose it too soon. */ sock_hold(sk); } refcounted = true; nsk = NULL; if (!tcp_filter(sk, skb)) { th = (const struct tcphdr *)skb->data; iph = ip_hdr(skb); tcp_v4_fill_cb(skb, iph, th); nsk = tcp_check_req(sk, skb, req, false, &req_stolen, &drop_reason); } else { drop_reason = SKB_DROP_REASON_SOCKET_FILTER; } if (!nsk) { reqsk_put(req); if (req_stolen) { /* Another cpu got exclusive access to req * and created a full blown socket. * Try to feed this packet to this socket * instead of discarding it. */ tcp_v4_restore_cb(skb); sock_put(sk); goto lookup; } goto discard_and_relse; } nf_reset_ct(skb); if (nsk == sk) { reqsk_put(req); tcp_v4_restore_cb(skb); } else { drop_reason = tcp_child_process(sk, nsk, skb); if (drop_reason) { enum sk_rst_reason rst_reason; rst_reason = sk_rst_convert_drop_reason(drop_reason); tcp_v4_send_reset(nsk, skb, rst_reason); goto discard_and_relse; } sock_put(sk); return 0; } } process: if (static_branch_unlikely(&ip4_min_ttl)) { /* min_ttl can be changed concurrently from do_ip_setsockopt() */ if (unlikely(iph->ttl < READ_ONCE(inet_sk(sk)->min_ttl))) { __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP); drop_reason = SKB_DROP_REASON_TCP_MINTTL; goto discard_and_relse; } } if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) { drop_reason = SKB_DROP_REASON_XFRM_POLICY; goto discard_and_relse; } drop_reason = tcp_inbound_hash(sk, NULL, skb, &iph->saddr, &iph->daddr, AF_INET, dif, sdif); if (drop_reason) goto discard_and_relse; nf_reset_ct(skb); if (tcp_filter(sk, skb)) { drop_reason = SKB_DROP_REASON_SOCKET_FILTER; goto discard_and_relse; } th = (const struct tcphdr *)skb->data; iph = ip_hdr(skb); tcp_v4_fill_cb(skb, iph, th); skb->dev = NULL; if (sk->sk_state == TCP_LISTEN) { ret = tcp_v4_do_rcv(sk, skb); goto put_and_return; } sk_incoming_cpu_update(sk); bh_lock_sock_nested(sk); tcp_segs_in(tcp_sk(sk), skb); ret = 0; if (!sock_owned_by_user(sk)) { ret = tcp_v4_do_rcv(sk, skb); } else { if (tcp_add_backlog(sk, skb, &drop_reason)) goto discard_and_relse; } bh_unlock_sock(sk); put_and_return: if (refcounted) sock_put(sk); return ret; no_tcp_socket: drop_reason = SKB_DROP_REASON_NO_SOCKET; if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) goto discard_it; tcp_v4_fill_cb(skb, iph, th); if (tcp_checksum_complete(skb)) { csum_error: drop_reason = SKB_DROP_REASON_TCP_CSUM; trace_tcp_bad_csum(skb); __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS); bad_packet: __TCP_INC_STATS(net, TCP_MIB_INERRS); } else { tcp_v4_send_reset(NULL, skb, sk_rst_convert_drop_reason(drop_reason)); } discard_it: SKB_DR_OR(drop_reason, NOT_SPECIFIED); /* Discard frame. */ sk_skb_reason_drop(sk, skb, drop_reason); return 0; discard_and_relse: sk_drops_add(sk, skb); if (refcounted) sock_put(sk); goto discard_it; do_time_wait: if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { drop_reason = SKB_DROP_REASON_XFRM_POLICY; inet_twsk_put(inet_twsk(sk)); goto discard_it; } tcp_v4_fill_cb(skb, iph, th); if (tcp_checksum_complete(skb)) { inet_twsk_put(inet_twsk(sk)); goto csum_error; } tw_status = tcp_timewait_state_process(inet_twsk(sk), skb, th, &isn, &drop_reason); switch (tw_status) { case TCP_TW_SYN: { struct sock *sk2 = inet_lookup_listener(net, net->ipv4.tcp_death_row.hashinfo, skb, __tcp_hdrlen(th), iph->saddr, th->source, iph->daddr, th->dest, inet_iif(skb), sdif); if (sk2) { inet_twsk_deschedule_put(inet_twsk(sk)); sk = sk2; tcp_v4_restore_cb(skb); refcounted = false; __this_cpu_write(tcp_tw_isn, isn); goto process; } } /* to ACK */ fallthrough; case TCP_TW_ACK: case TCP_TW_ACK_OOW: tcp_v4_timewait_ack(sk, skb, tw_status); break; case TCP_TW_RST: tcp_v4_send_reset(sk, skb, SK_RST_REASON_TCP_TIMEWAIT_SOCKET); inet_twsk_deschedule_put(inet_twsk(sk)); goto discard_it; case TCP_TW_SUCCESS:; } goto discard_it; } static struct timewait_sock_ops tcp_timewait_sock_ops = { .twsk_obj_size = sizeof(struct tcp_timewait_sock), .twsk_destructor= tcp_twsk_destructor, }; void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); if (dst && dst_hold_safe(dst)) { rcu_assign_pointer(sk->sk_rx_dst, dst); sk->sk_rx_dst_ifindex = skb->skb_iif; } } EXPORT_IPV6_MOD(inet_sk_rx_dst_set); const struct inet_connection_sock_af_ops ipv4_specific = { .queue_xmit = ip_queue_xmit, .send_check = tcp_v4_send_check, .rebuild_header = inet_sk_rebuild_header, .sk_rx_dst_set = inet_sk_rx_dst_set, .conn_request = tcp_v4_conn_request, .syn_recv_sock = tcp_v4_syn_recv_sock, .net_header_len = sizeof(struct iphdr), .setsockopt = ip_setsockopt, .getsockopt = ip_getsockopt, .mtu_reduced = tcp_v4_mtu_reduced, }; EXPORT_IPV6_MOD(ipv4_specific); #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO) static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = { #ifdef CONFIG_TCP_MD5SIG .md5_lookup = tcp_v4_md5_lookup, .calc_md5_hash = tcp_v4_md5_hash_skb, .md5_parse = tcp_v4_parse_md5_keys, #endif #ifdef CONFIG_TCP_AO .ao_lookup = tcp_v4_ao_lookup, .calc_ao_hash = tcp_v4_ao_hash_skb, .ao_parse = tcp_v4_parse_ao, .ao_calc_key_sk = tcp_v4_ao_calc_key_sk, #endif }; #endif /* NOTE: A lot of things set to zero explicitly by call to * sk_alloc() so need not be done here. */ static int tcp_v4_init_sock(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); tcp_init_sock(sk); icsk->icsk_af_ops = &ipv4_specific; #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO) tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific; #endif return 0; } #ifdef CONFIG_TCP_MD5SIG static void tcp_md5sig_info_free_rcu(struct rcu_head *head) { struct tcp_md5sig_info *md5sig; md5sig = container_of(head, struct tcp_md5sig_info, rcu); kfree(md5sig); static_branch_slow_dec_deferred(&tcp_md5_needed); tcp_md5_release_sigpool(); } #endif static void tcp_release_user_frags(struct sock *sk) { #ifdef CONFIG_PAGE_POOL unsigned long index; void *netmem; xa_for_each(&sk->sk_user_frags, index, netmem) WARN_ON_ONCE(!napi_pp_put_page((__force netmem_ref)netmem)); #endif } void tcp_v4_destroy_sock(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); tcp_release_user_frags(sk); xa_destroy(&sk->sk_user_frags); trace_tcp_destroy_sock(sk); tcp_clear_xmit_timers(sk); tcp_cleanup_congestion_control(sk); tcp_cleanup_ulp(sk); /* Cleanup up the write buffer. */ tcp_write_queue_purge(sk); /* Check if we want to disable active TFO */ tcp_fastopen_active_disable_ofo_check(sk); /* Cleans up our, hopefully empty, out_of_order_queue. */ skb_rbtree_purge(&tp->out_of_order_queue); #ifdef CONFIG_TCP_MD5SIG /* Clean up the MD5 key list, if any */ if (tp->md5sig_info) { struct tcp_md5sig_info *md5sig; md5sig = rcu_dereference_protected(tp->md5sig_info, 1); tcp_clear_md5_list(sk); call_rcu(&md5sig->rcu, tcp_md5sig_info_free_rcu); rcu_assign_pointer(tp->md5sig_info, NULL); } #endif tcp_ao_destroy_sock(sk, false); /* Clean up a referenced TCP bind bucket. */ if (inet_csk(sk)->icsk_bind_hash) inet_put_port(sk); BUG_ON(rcu_access_pointer(tp->fastopen_rsk)); /* If socket is aborted during connect operation */ tcp_free_fastopen_req(tp); tcp_fastopen_destroy_cipher(sk); tcp_saved_syn_free(tp); sk_sockets_allocated_dec(sk); } EXPORT_IPV6_MOD(tcp_v4_destroy_sock); #ifdef CONFIG_PROC_FS /* Proc filesystem TCP sock list dumping. */ static unsigned short seq_file_family(const struct seq_file *seq); static bool seq_sk_match(struct seq_file *seq, const struct sock *sk) { unsigned short family = seq_file_family(seq); /* AF_UNSPEC is used as a match all */ return ((family == AF_UNSPEC || family == sk->sk_family) && net_eq(sock_net(sk), seq_file_net(seq))); } /* Find a non empty bucket (starting from st->bucket) * and return the first sk from it. */ static void *listening_get_first(struct seq_file *seq) { struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo; struct tcp_iter_state *st = seq->private; st->offset = 0; for (; st->bucket <= hinfo->lhash2_mask; st->bucket++) { struct inet_listen_hashbucket *ilb2; struct hlist_nulls_node *node; struct sock *sk; ilb2 = &hinfo->lhash2[st->bucket]; if (hlist_nulls_empty(&ilb2->nulls_head)) continue; spin_lock(&ilb2->lock); sk_nulls_for_each(sk, node, &ilb2->nulls_head) { if (seq_sk_match(seq, sk)) return sk; } spin_unlock(&ilb2->lock); } return NULL; } /* Find the next sk of "cur" within the same bucket (i.e. st->bucket). * If "cur" is the last one in the st->bucket, * call listening_get_first() to return the first sk of the next * non empty bucket. */ static void *listening_get_next(struct seq_file *seq, void *cur) { struct tcp_iter_state *st = seq->private; struct inet_listen_hashbucket *ilb2; struct hlist_nulls_node *node; struct inet_hashinfo *hinfo; struct sock *sk = cur; ++st->num; ++st->offset; sk = sk_nulls_next(sk); sk_nulls_for_each_from(sk, node) { if (seq_sk_match(seq, sk)) return sk; } hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo; ilb2 = &hinfo->lhash2[st->bucket]; spin_unlock(&ilb2->lock); ++st->bucket; return listening_get_first(seq); } static void *listening_get_idx(struct seq_file *seq, loff_t *pos) { struct tcp_iter_state *st = seq->private; void *rc; st->bucket = 0; st->offset = 0; rc = listening_get_first(seq); while (rc && *pos) { rc = listening_get_next(seq, rc); --*pos; } return rc; } static inline bool empty_bucket(struct inet_hashinfo *hinfo, const struct tcp_iter_state *st) { return hlist_nulls_empty(&hinfo->ehash[st->bucket].chain); } /* * Get first established socket starting from bucket given in st->bucket. * If st->bucket is zero, the very first socket in the hash is returned. */ static void *established_get_first(struct seq_file *seq) { struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo; struct tcp_iter_state *st = seq->private; st->offset = 0; for (; st->bucket <= hinfo->ehash_mask; ++st->bucket) { struct sock *sk; struct hlist_nulls_node *node; spinlock_t *lock = inet_ehash_lockp(hinfo, st->bucket); cond_resched(); /* Lockless fast path for the common case of empty buckets */ if (empty_bucket(hinfo, st)) continue; spin_lock_bh(lock); sk_nulls_for_each(sk, node, &hinfo->ehash[st->bucket].chain) { if (seq_sk_match(seq, sk)) return sk; } spin_unlock_bh(lock); } return NULL; } static void *established_get_next(struct seq_file *seq, void *cur) { struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo; struct tcp_iter_state *st = seq->private; struct hlist_nulls_node *node; struct sock *sk = cur; ++st->num; ++st->offset; sk = sk_nulls_next(sk); sk_nulls_for_each_from(sk, node) { if (seq_sk_match(seq, sk)) return sk; } spin_unlock_bh(inet_ehash_lockp(hinfo, st->bucket)); ++st->bucket; return established_get_first(seq); } static void *established_get_idx(struct seq_file *seq, loff_t pos) { struct tcp_iter_state *st = seq->private; void *rc; st->bucket = 0; rc = established_get_first(seq); while (rc && pos) { rc = established_get_next(seq, rc); --pos; } return rc; } static void *tcp_get_idx(struct seq_file *seq, loff_t pos) { void *rc; struct tcp_iter_state *st = seq->private; st->state = TCP_SEQ_STATE_LISTENING; rc = listening_get_idx(seq, &pos); if (!rc) { st->state = TCP_SEQ_STATE_ESTABLISHED; rc = established_get_idx(seq, pos); } return rc; } static void *tcp_seek_last_pos(struct seq_file *seq) { struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo; struct tcp_iter_state *st = seq->private; int bucket = st->bucket; int offset = st->offset; int orig_num = st->num; void *rc = NULL; switch (st->state) { case TCP_SEQ_STATE_LISTENING: if (st->bucket > hinfo->lhash2_mask) break; rc = listening_get_first(seq); while (offset-- && rc && bucket == st->bucket) rc = listening_get_next(seq, rc); if (rc) break; st->bucket = 0; st->state = TCP_SEQ_STATE_ESTABLISHED; fallthrough; case TCP_SEQ_STATE_ESTABLISHED: if (st->bucket > hinfo->ehash_mask) break; rc = established_get_first(seq); while (offset-- && rc && bucket == st->bucket) rc = established_get_next(seq, rc); } st->num = orig_num; return rc; } void *tcp_seq_start(struct seq_file *seq, loff_t *pos) { struct tcp_iter_state *st = seq->private; void *rc; if (*pos && *pos == st->last_pos) { rc = tcp_seek_last_pos(seq); if (rc) goto out; } st->state = TCP_SEQ_STATE_LISTENING; st->num = 0; st->bucket = 0; st->offset = 0; rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; out: st->last_pos = *pos; return rc; } EXPORT_IPV6_MOD(tcp_seq_start); void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct tcp_iter_state *st = seq->private; void *rc = NULL; if (v == SEQ_START_TOKEN) { rc = tcp_get_idx(seq, 0); goto out; } switch (st->state) { case TCP_SEQ_STATE_LISTENING: rc = listening_get_next(seq, v); if (!rc) { st->state = TCP_SEQ_STATE_ESTABLISHED; st->bucket = 0; st->offset = 0; rc = established_get_first(seq); } break; case TCP_SEQ_STATE_ESTABLISHED: rc = established_get_next(seq, v); break; } out: ++*pos; st->last_pos = *pos; return rc; } EXPORT_IPV6_MOD(tcp_seq_next); void tcp_seq_stop(struct seq_file *seq, void *v) { struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo; struct tcp_iter_state *st = seq->private; switch (st->state) { case TCP_SEQ_STATE_LISTENING: if (v != SEQ_START_TOKEN) spin_unlock(&hinfo->lhash2[st->bucket].lock); break; case TCP_SEQ_STATE_ESTABLISHED: if (v) spin_unlock_bh(inet_ehash_lockp(hinfo, st->bucket)); break; } } EXPORT_IPV6_MOD(tcp_seq_stop); static void get_openreq4(const struct request_sock *req, struct seq_file *f, int i) { const struct inet_request_sock *ireq = inet_rsk(req); long delta = req->rsk_timer.expires - jiffies; seq_printf(f, "%4d: %08X:%04X %08X:%04X" " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK", i, ireq->ir_loc_addr, ireq->ir_num, ireq->ir_rmt_addr, ntohs(ireq->ir_rmt_port), TCP_SYN_RECV, 0, 0, /* could print option size, but that is af dependent. */ 1, /* timers active (only the expire timer) */ jiffies_delta_to_clock_t(delta), req->num_timeout, from_kuid_munged(seq_user_ns(f), sock_i_uid(req->rsk_listener)), 0, /* non standard timer */ 0, /* open_requests have no inode */ 0, req); } static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i) { int timer_active; unsigned long timer_expires; const struct tcp_sock *tp = tcp_sk(sk); const struct inet_connection_sock *icsk = inet_csk(sk); const struct inet_sock *inet = inet_sk(sk); const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq; __be32 dest = inet->inet_daddr; __be32 src = inet->inet_rcv_saddr; __u16 destp = ntohs(inet->inet_dport); __u16 srcp = ntohs(inet->inet_sport); u8 icsk_pending; int rx_queue; int state; icsk_pending = smp_load_acquire(&icsk->icsk_pending); if (icsk_pending == ICSK_TIME_RETRANS || icsk_pending == ICSK_TIME_REO_TIMEOUT || icsk_pending == ICSK_TIME_LOSS_PROBE) { timer_active = 1; timer_expires = icsk_timeout(icsk); } else if (icsk_pending == ICSK_TIME_PROBE0) { timer_active = 4; timer_expires = icsk_timeout(icsk); } else if (timer_pending(&sk->sk_timer)) { timer_active = 2; timer_expires = sk->sk_timer.expires; } else { timer_active = 0; timer_expires = jiffies; } state = inet_sk_state_load(sk); if (state == TCP_LISTEN) rx_queue = READ_ONCE(sk->sk_ack_backlog); else /* Because we don't lock the socket, * we might find a transient negative value. */ rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq), 0); seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX " "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d", i, src, srcp, dest, destp, state, READ_ONCE(tp->write_seq) - tp->snd_una, rx_queue, timer_active, jiffies_delta_to_clock_t(timer_expires - jiffies), icsk->icsk_retransmits, from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)), icsk->icsk_probes_out, sock_i_ino(sk), refcount_read(&sk->sk_refcnt), sk, jiffies_to_clock_t(icsk->icsk_rto), jiffies_to_clock_t(icsk->icsk_ack.ato), (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sk), tcp_snd_cwnd(tp), state == TCP_LISTEN ? fastopenq->max_qlen : (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)); } static void get_timewait4_sock(const struct inet_timewait_sock *tw, struct seq_file *f, int i) { long delta = tw->tw_timer.expires - jiffies; __be32 dest, src; __u16 destp, srcp; dest = tw->tw_daddr; src = tw->tw_rcv_saddr; destp = ntohs(tw->tw_dport); srcp = ntohs(tw->tw_sport); seq_printf(f, "%4d: %08X:%04X %08X:%04X" " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK", i, src, srcp, dest, destp, READ_ONCE(tw->tw_substate), 0, 0, 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0, refcount_read(&tw->tw_refcnt), tw); } #define TMPSZ 150 static int tcp4_seq_show(struct seq_file *seq, void *v) { struct tcp_iter_state *st; struct sock *sk = v; seq_setwidth(seq, TMPSZ - 1); if (v == SEQ_START_TOKEN) { seq_puts(seq, " sl local_address rem_address st tx_queue " "rx_queue tr tm->when retrnsmt uid timeout " "inode"); goto out; } st = seq->private; if (sk->sk_state == TCP_TIME_WAIT) get_timewait4_sock(v, seq, st->num); else if (sk->sk_state == TCP_NEW_SYN_RECV) get_openreq4(v, seq, st->num); else get_tcp4_sock(v, seq, st->num); out: seq_pad(seq, '\n'); return 0; } #ifdef CONFIG_BPF_SYSCALL struct bpf_tcp_iter_state { struct tcp_iter_state state; unsigned int cur_sk; unsigned int end_sk; unsigned int max_sk; struct sock **batch; bool st_bucket_done; }; struct bpf_iter__tcp { __bpf_md_ptr(struct bpf_iter_meta *, meta); __bpf_md_ptr(struct sock_common *, sk_common); uid_t uid __aligned(8); }; static int tcp_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta, struct sock_common *sk_common, uid_t uid) { struct bpf_iter__tcp ctx; meta->seq_num--; /* skip SEQ_START_TOKEN */ ctx.meta = meta; ctx.sk_common = sk_common; ctx.uid = uid; return bpf_iter_run_prog(prog, &ctx); } static void bpf_iter_tcp_put_batch(struct bpf_tcp_iter_state *iter) { while (iter->cur_sk < iter->end_sk) sock_gen_put(iter->batch[iter->cur_sk++]); } static int bpf_iter_tcp_realloc_batch(struct bpf_tcp_iter_state *iter, unsigned int new_batch_sz) { struct sock **new_batch; new_batch = kvmalloc(sizeof(*new_batch) * new_batch_sz, GFP_USER | __GFP_NOWARN); if (!new_batch) return -ENOMEM; bpf_iter_tcp_put_batch(iter); kvfree(iter->batch); iter->batch = new_batch; iter->max_sk = new_batch_sz; return 0; } static unsigned int bpf_iter_tcp_listening_batch(struct seq_file *seq, struct sock *start_sk) { struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo; struct bpf_tcp_iter_state *iter = seq->private; struct tcp_iter_state *st = &iter->state; struct hlist_nulls_node *node; unsigned int expected = 1; struct sock *sk; sock_hold(start_sk); iter->batch[iter->end_sk++] = start_sk; sk = sk_nulls_next(start_sk); sk_nulls_for_each_from(sk, node) { if (seq_sk_match(seq, sk)) { if (iter->end_sk < iter->max_sk) { sock_hold(sk); iter->batch[iter->end_sk++] = sk; } expected++; } } spin_unlock(&hinfo->lhash2[st->bucket].lock); return expected; } static unsigned int bpf_iter_tcp_established_batch(struct seq_file *seq, struct sock *start_sk) { struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo; struct bpf_tcp_iter_state *iter = seq->private; struct tcp_iter_state *st = &iter->state; struct hlist_nulls_node *node; unsigned int expected = 1; struct sock *sk; sock_hold(start_sk); iter->batch[iter->end_sk++] = start_sk; sk = sk_nulls_next(start_sk); sk_nulls_for_each_from(sk, node) { if (seq_sk_match(seq, sk)) { if (iter->end_sk < iter->max_sk) { sock_hold(sk); iter->batch[iter->end_sk++] = sk; } expected++; } } spin_unlock_bh(inet_ehash_lockp(hinfo, st->bucket)); return expected; } static struct sock *bpf_iter_tcp_batch(struct seq_file *seq) { struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo; struct bpf_tcp_iter_state *iter = seq->private; struct tcp_iter_state *st = &iter->state; unsigned int expected; bool resized = false; struct sock *sk; /* The st->bucket is done. Directly advance to the next * bucket instead of having the tcp_seek_last_pos() to skip * one by one in the current bucket and eventually find out * it has to advance to the next bucket. */ if (iter->st_bucket_done) { st->offset = 0; st->bucket++; if (st->state == TCP_SEQ_STATE_LISTENING && st->bucket > hinfo->lhash2_mask) { st->state = TCP_SEQ_STATE_ESTABLISHED; st->bucket = 0; } } again: /* Get a new batch */ iter->cur_sk = 0; iter->end_sk = 0; iter->st_bucket_done = false; sk = tcp_seek_last_pos(seq); if (!sk) return NULL; /* Done */ if (st->state == TCP_SEQ_STATE_LISTENING) expected = bpf_iter_tcp_listening_batch(seq, sk); else expected = bpf_iter_tcp_established_batch(seq, sk); if (iter->end_sk == expected) { iter->st_bucket_done = true; return sk; } if (!resized && !bpf_iter_tcp_realloc_batch(iter, expected * 3 / 2)) { resized = true; goto again; } return sk; } static void *bpf_iter_tcp_seq_start(struct seq_file *seq, loff_t *pos) { /* bpf iter does not support lseek, so it always * continue from where it was stop()-ped. */ if (*pos) return bpf_iter_tcp_batch(seq); return SEQ_START_TOKEN; } static void *bpf_iter_tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct bpf_tcp_iter_state *iter = seq->private; struct tcp_iter_state *st = &iter->state; struct sock *sk; /* Whenever seq_next() is called, the iter->cur_sk is * done with seq_show(), so advance to the next sk in * the batch. */ if (iter->cur_sk < iter->end_sk) { /* Keeping st->num consistent in tcp_iter_state. * bpf_iter_tcp does not use st->num. * meta.seq_num is used instead. */ st->num++; /* Move st->offset to the next sk in the bucket such that * the future start() will resume at st->offset in * st->bucket. See tcp_seek_last_pos(). */ st->offset++; sock_gen_put(iter->batch[iter->cur_sk++]); } if (iter->cur_sk < iter->end_sk) sk = iter->batch[iter->cur_sk]; else sk = bpf_iter_tcp_batch(seq); ++*pos; /* Keeping st->last_pos consistent in tcp_iter_state. * bpf iter does not do lseek, so st->last_pos always equals to *pos. */ st->last_pos = *pos; return sk; } static int bpf_iter_tcp_seq_show(struct seq_file *seq, void *v) { struct bpf_iter_meta meta; struct bpf_prog *prog; struct sock *sk = v; uid_t uid; int ret; if (v == SEQ_START_TOKEN) return 0; if (sk_fullsock(sk)) lock_sock(sk); if (unlikely(sk_unhashed(sk))) { ret = SEQ_SKIP; goto unlock; } if (sk->sk_state == TCP_TIME_WAIT) { uid = 0; } else if (sk->sk_state == TCP_NEW_SYN_RECV) { const struct request_sock *req = v; uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(req->rsk_listener)); } else { uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)); } meta.seq = seq; prog = bpf_iter_get_info(&meta, false); ret = tcp_prog_seq_show(prog, &meta, v, uid); unlock: if (sk_fullsock(sk)) release_sock(sk); return ret; } static void bpf_iter_tcp_seq_stop(struct seq_file *seq, void *v) { struct bpf_tcp_iter_state *iter = seq->private; struct bpf_iter_meta meta; struct bpf_prog *prog; if (!v) { meta.seq = seq; prog = bpf_iter_get_info(&meta, true); if (prog) (void)tcp_prog_seq_show(prog, &meta, v, 0); } if (iter->cur_sk < iter->end_sk) { bpf_iter_tcp_put_batch(iter); iter->st_bucket_done = false; } } static const struct seq_operations bpf_iter_tcp_seq_ops = { .show = bpf_iter_tcp_seq_show, .start = bpf_iter_tcp_seq_start, .next = bpf_iter_tcp_seq_next, .stop = bpf_iter_tcp_seq_stop, }; #endif static unsigned short seq_file_family(const struct seq_file *seq) { const struct tcp_seq_afinfo *afinfo; #ifdef CONFIG_BPF_SYSCALL /* Iterated from bpf_iter. Let the bpf prog to filter instead. */ if (seq->op == &bpf_iter_tcp_seq_ops) return AF_UNSPEC; #endif /* Iterated from proc fs */ afinfo = pde_data(file_inode(seq->file)); return afinfo->family; } static const struct seq_operations tcp4_seq_ops = { .show = tcp4_seq_show, .start = tcp_seq_start, .next = tcp_seq_next, .stop = tcp_seq_stop, }; static struct tcp_seq_afinfo tcp4_seq_afinfo = { .family = AF_INET, }; static int __net_init tcp4_proc_init_net(struct net *net) { if (!proc_create_net_data("tcp", 0444, net->proc_net, &tcp4_seq_ops, sizeof(struct tcp_iter_state), &tcp4_seq_afinfo)) return -ENOMEM; return 0; } static void __net_exit tcp4_proc_exit_net(struct net *net) { remove_proc_entry("tcp", net->proc_net); } static struct pernet_operations tcp4_net_ops = { .init = tcp4_proc_init_net, .exit = tcp4_proc_exit_net, }; int __init tcp4_proc_init(void) { return register_pernet_subsys(&tcp4_net_ops); } void tcp4_proc_exit(void) { unregister_pernet_subsys(&tcp4_net_ops); } #endif /* CONFIG_PROC_FS */ /* @wake is one when sk_stream_write_space() calls us. * This sends EPOLLOUT only if notsent_bytes is half the limit. * This mimics the strategy used in sock_def_write_space(). */ bool tcp_stream_memory_free(const struct sock *sk, int wake) { const struct tcp_sock *tp = tcp_sk(sk); u32 notsent_bytes = READ_ONCE(tp->write_seq) - READ_ONCE(tp->snd_nxt); return (notsent_bytes << wake) < tcp_notsent_lowat(tp); } EXPORT_SYMBOL(tcp_stream_memory_free); struct proto tcp_prot = { .name = "TCP", .owner = THIS_MODULE, .close = tcp_close, .pre_connect = tcp_v4_pre_connect, .connect = tcp_v4_connect, .disconnect = tcp_disconnect, .accept = inet_csk_accept, .ioctl = tcp_ioctl, .init = tcp_v4_init_sock, .destroy = tcp_v4_destroy_sock, .shutdown = tcp_shutdown, .setsockopt = tcp_setsockopt, .getsockopt = tcp_getsockopt, .bpf_bypass_getsockopt = tcp_bpf_bypass_getsockopt, .keepalive = tcp_set_keepalive, .recvmsg = tcp_recvmsg, .sendmsg = tcp_sendmsg, .splice_eof = tcp_splice_eof, .backlog_rcv = tcp_v4_do_rcv, .release_cb = tcp_release_cb, .hash = inet_hash, .unhash = inet_unhash, .get_port = inet_csk_get_port, .put_port = inet_put_port, #ifdef CONFIG_BPF_SYSCALL .psock_update_sk_prot = tcp_bpf_update_proto, #endif .enter_memory_pressure = tcp_enter_memory_pressure, .leave_memory_pressure = tcp_leave_memory_pressure, .stream_memory_free = tcp_stream_memory_free, .sockets_allocated = &tcp_sockets_allocated, .orphan_count = &tcp_orphan_count, .memory_allocated = &tcp_memory_allocated, .per_cpu_fw_alloc = &tcp_memory_per_cpu_fw_alloc, .memory_pressure = &tcp_memory_pressure, .sysctl_mem = sysctl_tcp_mem, .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem), .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem), .max_header = MAX_TCP_HEADER, .obj_size = sizeof(struct tcp_sock), .slab_flags = SLAB_TYPESAFE_BY_RCU, .twsk_prot = &tcp_timewait_sock_ops, .rsk_prot = &tcp_request_sock_ops, .h.hashinfo = NULL, .no_autobind = true, .diag_destroy = tcp_abort, }; EXPORT_SYMBOL(tcp_prot); static void __net_exit tcp_sk_exit(struct net *net) { if (net->ipv4.tcp_congestion_control) bpf_module_put(net->ipv4.tcp_congestion_control, net->ipv4.tcp_congestion_control->owner); } static void __net_init tcp_set_hashinfo(struct net *net) { struct inet_hashinfo *hinfo; unsigned int ehash_entries; struct net *old_net; if (net_eq(net, &init_net)) goto fallback; old_net = current->nsproxy->net_ns; ehash_entries = READ_ONCE(old_net->ipv4.sysctl_tcp_child_ehash_entries); if (!ehash_entries) goto fallback; ehash_entries = roundup_pow_of_two(ehash_entries); hinfo = inet_pernet_hashinfo_alloc(&tcp_hashinfo, ehash_entries); if (!hinfo) { pr_warn("Failed to allocate TCP ehash (entries: %u) " "for a netns, fallback to the global one\n", ehash_entries); fallback: hinfo = &tcp_hashinfo; ehash_entries = tcp_hashinfo.ehash_mask + 1; } net->ipv4.tcp_death_row.hashinfo = hinfo; net->ipv4.tcp_death_row.sysctl_max_tw_buckets = ehash_entries / 2; net->ipv4.sysctl_max_syn_backlog = max(128U, ehash_entries / 128); } static int __net_init tcp_sk_init(struct net *net) { net->ipv4.sysctl_tcp_ecn = 2; net->ipv4.sysctl_tcp_ecn_fallback = 1; net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS; net->ipv4.sysctl_tcp_min_snd_mss = TCP_MIN_SND_MSS; net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD; net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL; net->ipv4.sysctl_tcp_mtu_probe_floor = TCP_MIN_SND_MSS; net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME; net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES; net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL; net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES; net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES; net->ipv4.sysctl_tcp_syncookies = 1; net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH; net->ipv4.sysctl_tcp_retries1 = TCP_RETR1; net->ipv4.sysctl_tcp_retries2 = TCP_RETR2; net->ipv4.sysctl_tcp_orphan_retries = 0; net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT; net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX; net->ipv4.sysctl_tcp_tw_reuse = 2; net->ipv4.sysctl_tcp_tw_reuse_delay = 1 * MSEC_PER_SEC; net->ipv4.sysctl_tcp_no_ssthresh_metrics_save = 1; refcount_set(&net->ipv4.tcp_death_row.tw_refcount, 1); tcp_set_hashinfo(net); net->ipv4.sysctl_tcp_sack = 1; net->ipv4.sysctl_tcp_window_scaling = 1; net->ipv4.sysctl_tcp_timestamps = 1; net->ipv4.sysctl_tcp_early_retrans = 3; net->ipv4.sysctl_tcp_recovery = TCP_RACK_LOSS_DETECTION; net->ipv4.sysctl_tcp_slow_start_after_idle = 1; /* By default, RFC2861 behavior. */ net->ipv4.sysctl_tcp_retrans_collapse = 1; net->ipv4.sysctl_tcp_max_reordering = 300; net->ipv4.sysctl_tcp_dsack = 1; net->ipv4.sysctl_tcp_app_win = 31; net->ipv4.sysctl_tcp_adv_win_scale = 1; net->ipv4.sysctl_tcp_frto = 2; net->ipv4.sysctl_tcp_moderate_rcvbuf = 1; /* This limits the percentage of the congestion window which we * will allow a single TSO frame to consume. Building TSO frames * which are too large can cause TCP streams to be bursty. */ net->ipv4.sysctl_tcp_tso_win_divisor = 3; /* Default TSQ limit of 4 MB */ net->ipv4.sysctl_tcp_limit_output_bytes = 4 << 20; /* rfc5961 challenge ack rate limiting, per net-ns, disabled by default. */ net->ipv4.sysctl_tcp_challenge_ack_limit = INT_MAX; net->ipv4.sysctl_tcp_min_tso_segs = 2; net->ipv4.sysctl_tcp_tso_rtt_log = 9; /* 2^9 = 512 usec */ net->ipv4.sysctl_tcp_min_rtt_wlen = 300; net->ipv4.sysctl_tcp_autocorking = 1; net->ipv4.sysctl_tcp_invalid_ratelimit = HZ/2; net->ipv4.sysctl_tcp_pacing_ss_ratio = 200; net->ipv4.sysctl_tcp_pacing_ca_ratio = 120; if (net != &init_net) { memcpy(net->ipv4.sysctl_tcp_rmem, init_net.ipv4.sysctl_tcp_rmem, sizeof(init_net.ipv4.sysctl_tcp_rmem)); memcpy(net->ipv4.sysctl_tcp_wmem, init_net.ipv4.sysctl_tcp_wmem, sizeof(init_net.ipv4.sysctl_tcp_wmem)); } net->ipv4.sysctl_tcp_comp_sack_delay_ns = NSEC_PER_MSEC; net->ipv4.sysctl_tcp_comp_sack_slack_ns = 100 * NSEC_PER_USEC; net->ipv4.sysctl_tcp_comp_sack_nr = 44; net->ipv4.sysctl_tcp_backlog_ack_defer = 1; net->ipv4.sysctl_tcp_fastopen = TFO_CLIENT_ENABLE; net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 0; atomic_set(&net->ipv4.tfo_active_disable_times, 0); /* Set default values for PLB */ net->ipv4.sysctl_tcp_plb_enabled = 0; /* Disabled by default */ net->ipv4.sysctl_tcp_plb_idle_rehash_rounds = 3; net->ipv4.sysctl_tcp_plb_rehash_rounds = 12; net->ipv4.sysctl_tcp_plb_suspend_rto_sec = 60; /* Default congestion threshold for PLB to mark a round is 50% */ net->ipv4.sysctl_tcp_plb_cong_thresh = (1 << TCP_PLB_SCALE) / 2; /* Reno is always built in */ if (!net_eq(net, &init_net) && bpf_try_module_get(init_net.ipv4.tcp_congestion_control, init_net.ipv4.tcp_congestion_control->owner)) net->ipv4.tcp_congestion_control = init_net.ipv4.tcp_congestion_control; else net->ipv4.tcp_congestion_control = &tcp_reno; net->ipv4.sysctl_tcp_syn_linear_timeouts = 4; net->ipv4.sysctl_tcp_shrink_window = 0; net->ipv4.sysctl_tcp_pingpong_thresh = 1; net->ipv4.sysctl_tcp_rto_min_us = jiffies_to_usecs(TCP_RTO_MIN); net->ipv4.sysctl_tcp_rto_max_ms = TCP_RTO_MAX_SEC * MSEC_PER_SEC; return 0; } static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list) { struct net *net; /* make sure concurrent calls to tcp_sk_exit_batch from net_cleanup_work * and failed setup_net error unwinding path are serialized. * * tcp_twsk_purge() handles twsk in any dead netns, not just those in * net_exit_list, the thread that dismantles a particular twsk must * do so without other thread progressing to refcount_dec_and_test() of * tcp_death_row.tw_refcount. */ mutex_lock(&tcp_exit_batch_mutex); tcp_twsk_purge(net_exit_list); list_for_each_entry(net, net_exit_list, exit_list) { inet_pernet_hashinfo_free(net->ipv4.tcp_death_row.hashinfo); WARN_ON_ONCE(!refcount_dec_and_test(&net->ipv4.tcp_death_row.tw_refcount)); tcp_fastopen_ctx_destroy(net); } mutex_unlock(&tcp_exit_batch_mutex); } static struct pernet_operations __net_initdata tcp_sk_ops = { .init = tcp_sk_init, .exit = tcp_sk_exit, .exit_batch = tcp_sk_exit_batch, }; #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS) DEFINE_BPF_ITER_FUNC(tcp, struct bpf_iter_meta *meta, struct sock_common *sk_common, uid_t uid) #define INIT_BATCH_SZ 16 static int bpf_iter_init_tcp(void *priv_data, struct bpf_iter_aux_info *aux) { struct bpf_tcp_iter_state *iter = priv_data; int err; err = bpf_iter_init_seq_net(priv_data, aux); if (err) return err; err = bpf_iter_tcp_realloc_batch(iter, INIT_BATCH_SZ); if (err) { bpf_iter_fini_seq_net(priv_data); return err; } return 0; } static void bpf_iter_fini_tcp(void *priv_data) { struct bpf_tcp_iter_state *iter = priv_data; bpf_iter_fini_seq_net(priv_data); kvfree(iter->batch); } static const struct bpf_iter_seq_info tcp_seq_info = { .seq_ops = &bpf_iter_tcp_seq_ops, .init_seq_private = bpf_iter_init_tcp, .fini_seq_private = bpf_iter_fini_tcp, .seq_priv_size = sizeof(struct bpf_tcp_iter_state), }; static const struct bpf_func_proto * bpf_iter_tcp_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { switch (func_id) { case BPF_FUNC_setsockopt: return &bpf_sk_setsockopt_proto; case BPF_FUNC_getsockopt: return &bpf_sk_getsockopt_proto; default: return NULL; } } static struct bpf_iter_reg tcp_reg_info = { .target = "tcp", .ctx_arg_info_size = 1, .ctx_arg_info = { { offsetof(struct bpf_iter__tcp, sk_common), PTR_TO_BTF_ID_OR_NULL | PTR_TRUSTED }, }, .get_func_proto = bpf_iter_tcp_get_func_proto, .seq_info = &tcp_seq_info, }; static void __init bpf_iter_register(void) { tcp_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON]; if (bpf_iter_reg_target(&tcp_reg_info)) pr_warn("Warning: could not register bpf iterator tcp\n"); } #endif void __init tcp_v4_init(void) { int cpu, res; for_each_possible_cpu(cpu) { struct sock *sk; res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW, IPPROTO_TCP, &init_net); if (res) panic("Failed to create the TCP control socket.\n"); sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); /* Please enforce IP_DF and IPID==0 for RST and * ACK sent in SYN-RECV and TIME-WAIT state. */ inet_sk(sk)->pmtudisc = IP_PMTUDISC_DO; sk->sk_clockid = CLOCK_MONOTONIC; per_cpu(ipv4_tcp_sk.sock, cpu) = sk; } if (register_pernet_subsys(&tcp_sk_ops)) panic("Failed to create the TCP control socket.\n"); #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS) bpf_iter_register(); #endif }
6 3 6 6 2 5 4 12 13 5 8 7 1 7 7 7 3 13 13 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2017-2018 HUAWEI, Inc. * https://www.huawei.com/ * Copyright (C) 2022, Alibaba Cloud */ #include "internal.h" static int erofs_fill_dentries(struct inode *dir, struct dir_context *ctx, void *dentry_blk, struct erofs_dirent *de, unsigned int nameoff0, unsigned int maxsize) { const struct erofs_dirent *end = dentry_blk + nameoff0; while (de < end) { unsigned char d_type = fs_ftype_to_dtype(de->file_type); unsigned int nameoff = le16_to_cpu(de->nameoff); const char *de_name = (char *)dentry_blk + nameoff; unsigned int de_namelen; /* the last dirent in the block? */ if (de + 1 >= end) de_namelen = strnlen(de_name, maxsize - nameoff); else de_namelen = le16_to_cpu(de[1].nameoff) - nameoff; /* a corrupted entry is found */ if (nameoff + de_namelen > maxsize || de_namelen > EROFS_NAME_LEN) { erofs_err(dir->i_sb, "bogus dirent @ nid %llu", EROFS_I(dir)->nid); DBG_BUGON(1); return -EFSCORRUPTED; } if (!dir_emit(ctx, de_name, de_namelen, le64_to_cpu(de->nid), d_type)) return 1; ++de; ctx->pos += sizeof(struct erofs_dirent); } return 0; } static int erofs_readdir(struct file *f, struct dir_context *ctx) { struct inode *dir = file_inode(f); struct erofs_buf buf = __EROFS_BUF_INITIALIZER; struct super_block *sb = dir->i_sb; unsigned long bsz = sb->s_blocksize; unsigned int ofs = erofs_blkoff(sb, ctx->pos); int err = 0; bool initial = true; buf.mapping = dir->i_mapping; while (ctx->pos < dir->i_size) { erofs_off_t dbstart = ctx->pos - ofs; struct erofs_dirent *de; unsigned int nameoff, maxsize; de = erofs_bread(&buf, dbstart, true); if (IS_ERR(de)) { erofs_err(sb, "failed to readdir of logical block %llu of nid %llu", erofs_blknr(sb, dbstart), EROFS_I(dir)->nid); err = PTR_ERR(de); break; } nameoff = le16_to_cpu(de->nameoff); if (nameoff < sizeof(struct erofs_dirent) || nameoff >= bsz) { erofs_err(sb, "invalid de[0].nameoff %u @ nid %llu", nameoff, EROFS_I(dir)->nid); err = -EFSCORRUPTED; break; } maxsize = min_t(unsigned int, dir->i_size - dbstart, bsz); /* search dirents at the arbitrary position */ if (initial) { initial = false; ofs = roundup(ofs, sizeof(struct erofs_dirent)); ctx->pos = dbstart + ofs; } err = erofs_fill_dentries(dir, ctx, de, (void *)de + ofs, nameoff, maxsize); if (err) break; ctx->pos = dbstart + maxsize; ofs = 0; } erofs_put_metabuf(&buf); if (EROFS_I(dir)->dot_omitted && ctx->pos == dir->i_size) { if (!dir_emit_dot(f, ctx)) return 0; ++ctx->pos; } return err < 0 ? err : 0; } const struct file_operations erofs_dir_fops = { .llseek = generic_file_llseek, .read = generic_read_dir, .iterate_shared = erofs_readdir, };
28 7 29 2 4 9 5 9 9 9 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 #ifndef __LINUX_MROUTE_BASE_H #define __LINUX_MROUTE_BASE_H #include <linux/netdevice.h> #include <linux/rhashtable-types.h> #include <linux/spinlock.h> #include <net/net_namespace.h> #include <net/sock.h> #include <net/fib_notifier.h> #include <net/ip_fib.h> /** * struct vif_device - interface representor for multicast routing * @dev: network device being used * @dev_tracker: refcount tracker for @dev reference * @bytes_in: statistic; bytes ingressing * @bytes_out: statistic; bytes egresing * @pkt_in: statistic; packets ingressing * @pkt_out: statistic; packets egressing * @rate_limit: Traffic shaping (NI) * @threshold: TTL threshold * @flags: Control flags * @link: Physical interface index * @dev_parent_id: device parent id * @local: Local address * @remote: Remote address for tunnels */ struct vif_device { struct net_device __rcu *dev; netdevice_tracker dev_tracker; unsigned long bytes_in, bytes_out; unsigned long pkt_in, pkt_out; unsigned long rate_limit; unsigned char threshold; unsigned short flags; int link; /* Currently only used by ipmr */ struct netdev_phys_item_id dev_parent_id; __be32 local, remote; }; struct vif_entry_notifier_info { struct fib_notifier_info info; struct net_device *dev; unsigned short vif_index; unsigned short vif_flags; u32 tb_id; }; static inline int mr_call_vif_notifier(struct notifier_block *nb, unsigned short family, enum fib_event_type event_type, struct vif_device *vif, struct net_device *vif_dev, unsigned short vif_index, u32 tb_id, struct netlink_ext_ack *extack) { struct vif_entry_notifier_info info = { .info = { .family = family, .extack = extack, }, .dev = vif_dev, .vif_index = vif_index, .vif_flags = vif->flags, .tb_id = tb_id, }; return call_fib_notifier(nb, event_type, &info.info); } static inline int mr_call_vif_notifiers(struct net *net, unsigned short family, enum fib_event_type event_type, struct vif_device *vif, struct net_device *vif_dev, unsigned short vif_index, u32 tb_id, unsigned int *ipmr_seq) { struct vif_entry_notifier_info info = { .info = { .family = family, }, .dev = vif_dev, .vif_index = vif_index, .vif_flags = vif->flags, .tb_id = tb_id, }; ASSERT_RTNL(); (*ipmr_seq)++; return call_fib_notifiers(net, event_type, &info.info); } #ifndef MAXVIFS /* This one is nasty; value is defined in uapi using different symbols for * mroute and morute6 but both map into same 32. */ #define MAXVIFS 32 #endif /* Note: This helper is deprecated. */ #define VIF_EXISTS(_mrt, _idx) (!!rcu_access_pointer((_mrt)->vif_table[_idx].dev)) /* mfc_flags: * MFC_STATIC - the entry was added statically (not by a routing daemon) * MFC_OFFLOAD - the entry was offloaded to the hardware */ enum { MFC_STATIC = BIT(0), MFC_OFFLOAD = BIT(1), }; /** * struct mr_mfc - common multicast routing entries * @mnode: rhashtable list * @mfc_parent: source interface (iif) * @mfc_flags: entry flags * @expires: unresolved entry expire time * @unresolved: unresolved cached skbs * @last_assert: time of last assert * @minvif: minimum VIF id * @maxvif: maximum VIF id * @bytes: bytes that have passed for this entry * @pkt: packets that have passed for this entry * @wrong_if: number of wrong source interface hits * @lastuse: time of last use of the group (traffic or update) * @ttls: OIF TTL threshold array * @refcount: reference count for this entry * @list: global entry list * @rcu: used for entry destruction * @free: Operation used for freeing an entry under RCU */ struct mr_mfc { struct rhlist_head mnode; unsigned short mfc_parent; int mfc_flags; union { struct { unsigned long expires; struct sk_buff_head unresolved; } unres; struct { unsigned long last_assert; int minvif; int maxvif; atomic_long_t bytes; atomic_long_t pkt; atomic_long_t wrong_if; unsigned long lastuse; unsigned char ttls[MAXVIFS]; refcount_t refcount; } res; } mfc_un; struct list_head list; struct rcu_head rcu; void (*free)(struct rcu_head *head); }; static inline void mr_cache_put(struct mr_mfc *c) { if (refcount_dec_and_test(&c->mfc_un.res.refcount)) call_rcu(&c->rcu, c->free); } static inline void mr_cache_hold(struct mr_mfc *c) { refcount_inc(&c->mfc_un.res.refcount); } struct mfc_entry_notifier_info { struct fib_notifier_info info; struct mr_mfc *mfc; u32 tb_id; }; static inline int mr_call_mfc_notifier(struct notifier_block *nb, unsigned short family, enum fib_event_type event_type, struct mr_mfc *mfc, u32 tb_id, struct netlink_ext_ack *extack) { struct mfc_entry_notifier_info info = { .info = { .family = family, .extack = extack, }, .mfc = mfc, .tb_id = tb_id }; return call_fib_notifier(nb, event_type, &info.info); } static inline int mr_call_mfc_notifiers(struct net *net, unsigned short family, enum fib_event_type event_type, struct mr_mfc *mfc, u32 tb_id, unsigned int *ipmr_seq) { struct mfc_entry_notifier_info info = { .info = { .family = family, }, .mfc = mfc, .tb_id = tb_id }; ASSERT_RTNL(); (*ipmr_seq)++; return call_fib_notifiers(net, event_type, &info.info); } struct mr_table; /** * struct mr_table_ops - callbacks and info for protocol-specific ops * @rht_params: parameters for accessing the MFC hash * @cmparg_any: a hash key to be used for matching on (*,*) routes */ struct mr_table_ops { const struct rhashtable_params *rht_params; void *cmparg_any; }; /** * struct mr_table - a multicast routing table * @list: entry within a list of multicast routing tables * @net: net where this table belongs * @ops: protocol specific operations * @id: identifier of the table * @mroute_sk: socket associated with the table * @ipmr_expire_timer: timer for handling unresolved routes * @mfc_unres_queue: list of unresolved MFC entries * @vif_table: array containing all possible vifs * @mfc_hash: Hash table of all resolved routes for easy lookup * @mfc_cache_list: list of resovled routes for possible traversal * @maxvif: Identifier of highest value vif currently in use * @cache_resolve_queue_len: current size of unresolved queue * @mroute_do_assert: Whether to inform userspace on wrong ingress * @mroute_do_pim: Whether to receive IGMP PIMv1 * @mroute_reg_vif_num: PIM-device vif index */ struct mr_table { struct list_head list; possible_net_t net; struct mr_table_ops ops; u32 id; struct sock __rcu *mroute_sk; struct timer_list ipmr_expire_timer; struct list_head mfc_unres_queue; struct vif_device vif_table[MAXVIFS]; struct rhltable mfc_hash; struct list_head mfc_cache_list; int maxvif; atomic_t cache_resolve_queue_len; bool mroute_do_assert; bool mroute_do_pim; bool mroute_do_wrvifwhole; int mroute_reg_vif_num; }; static inline bool mr_can_free_table(struct net *net) { return !check_net(net) || !net_initialized(net); } #ifdef CONFIG_IP_MROUTE_COMMON void vif_device_init(struct vif_device *v, struct net_device *dev, unsigned long rate_limit, unsigned char threshold, unsigned short flags, unsigned short get_iflink_mask); struct mr_table * mr_table_alloc(struct net *net, u32 id, struct mr_table_ops *ops, void (*expire_func)(struct timer_list *t), void (*table_set)(struct mr_table *mrt, struct net *net)); /* These actually return 'struct mr_mfc *', but to avoid need for explicit * castings they simply return void. */ void *mr_mfc_find_parent(struct mr_table *mrt, void *hasharg, int parent); void *mr_mfc_find_any_parent(struct mr_table *mrt, int vifi); void *mr_mfc_find_any(struct mr_table *mrt, int vifi, void *hasharg); int mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, struct mr_mfc *c, struct rtmsg *rtm); int mr_table_dump(struct mr_table *mrt, struct sk_buff *skb, struct netlink_callback *cb, int (*fill)(struct mr_table *mrt, struct sk_buff *skb, u32 portid, u32 seq, struct mr_mfc *c, int cmd, int flags), spinlock_t *lock, struct fib_dump_filter *filter); int mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb, struct mr_table *(*iter)(struct net *net, struct mr_table *mrt), int (*fill)(struct mr_table *mrt, struct sk_buff *skb, u32 portid, u32 seq, struct mr_mfc *c, int cmd, int flags), spinlock_t *lock, struct fib_dump_filter *filter); int mr_dump(struct net *net, struct notifier_block *nb, unsigned short family, int (*rules_dump)(struct net *net, struct notifier_block *nb, struct netlink_ext_ack *extack), struct mr_table *(*mr_iter)(struct net *net, struct mr_table *mrt), struct netlink_ext_ack *extack); #else static inline void vif_device_init(struct vif_device *v, struct net_device *dev, unsigned long rate_limit, unsigned char threshold, unsigned short flags, unsigned short get_iflink_mask) { } static inline void *mr_mfc_find_parent(struct mr_table *mrt, void *hasharg, int parent) { return NULL; } static inline void *mr_mfc_find_any_parent(struct mr_table *mrt, int vifi) { return NULL; } static inline struct mr_mfc *mr_mfc_find_any(struct mr_table *mrt, int vifi, void *hasharg) { return NULL; } static inline int mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, struct mr_mfc *c, struct rtmsg *rtm) { return -EINVAL; } static inline int mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb, struct mr_table *(*iter)(struct net *net, struct mr_table *mrt), int (*fill)(struct mr_table *mrt, struct sk_buff *skb, u32 portid, u32 seq, struct mr_mfc *c, int cmd, int flags), spinlock_t *lock, struct fib_dump_filter *filter) { return -EINVAL; } static inline int mr_dump(struct net *net, struct notifier_block *nb, unsigned short family, int (*rules_dump)(struct net *net, struct notifier_block *nb, struct netlink_ext_ack *extack), struct mr_table *(*mr_iter)(struct net *net, struct mr_table *mrt), struct netlink_ext_ack *extack) { return -EINVAL; } #endif static inline void *mr_mfc_find(struct mr_table *mrt, void *hasharg) { return mr_mfc_find_parent(mrt, hasharg, -1); } #ifdef CONFIG_PROC_FS struct mr_vif_iter { struct seq_net_private p; struct mr_table *mrt; int ct; }; struct mr_mfc_iter { struct seq_net_private p; struct mr_table *mrt; struct list_head *cache; /* Lock protecting the mr_table's unresolved queue */ spinlock_t *lock; }; #ifdef CONFIG_IP_MROUTE_COMMON void *mr_vif_seq_idx(struct net *net, struct mr_vif_iter *iter, loff_t pos); void *mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos); static inline void *mr_vif_seq_start(struct seq_file *seq, loff_t *pos) { return *pos ? mr_vif_seq_idx(seq_file_net(seq), seq->private, *pos - 1) : SEQ_START_TOKEN; } /* These actually return 'struct mr_mfc *', but to avoid need for explicit * castings they simply return void. */ void *mr_mfc_seq_idx(struct net *net, struct mr_mfc_iter *it, loff_t pos); void *mr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos); static inline void *mr_mfc_seq_start(struct seq_file *seq, loff_t *pos, struct mr_table *mrt, spinlock_t *lock) { struct mr_mfc_iter *it = seq->private; it->mrt = mrt; it->cache = NULL; it->lock = lock; return *pos ? mr_mfc_seq_idx(seq_file_net(seq), seq->private, *pos - 1) : SEQ_START_TOKEN; } static inline void mr_mfc_seq_stop(struct seq_file *seq, void *v) { struct mr_mfc_iter *it = seq->private; struct mr_table *mrt = it->mrt; if (it->cache == &mrt->mfc_unres_queue) spin_unlock_bh(it->lock); else if (it->cache == &mrt->mfc_cache_list) rcu_read_unlock(); } #else static inline void *mr_vif_seq_idx(struct net *net, struct mr_vif_iter *iter, loff_t pos) { return NULL; } static inline void *mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos) { return NULL; } static inline void *mr_vif_seq_start(struct seq_file *seq, loff_t *pos) { return NULL; } static inline void *mr_mfc_seq_idx(struct net *net, struct mr_mfc_iter *it, loff_t pos) { return NULL; } static inline void *mr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos) { return NULL; } static inline void *mr_mfc_seq_start(struct seq_file *seq, loff_t *pos, struct mr_table *mrt, spinlock_t *lock) { return NULL; } static inline void mr_mfc_seq_stop(struct seq_file *seq, void *v) { } #endif #endif #endif
1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 // SPDX-License-Identifier: GPL-2.0-or-later /* * Cryptographic API * * ARC4 Cipher Algorithm * * Jon Oberheide <jon@oberheide.org> */ #include <crypto/arc4.h> #include <crypto/internal/skcipher.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/sched.h> #define ARC4_ALIGN __alignof__(struct arc4_ctx) static int crypto_arc4_setkey(struct crypto_lskcipher *tfm, const u8 *in_key, unsigned int key_len) { struct arc4_ctx *ctx = crypto_lskcipher_ctx(tfm); return arc4_setkey(ctx, in_key, key_len); } static int crypto_arc4_crypt(struct crypto_lskcipher *tfm, const u8 *src, u8 *dst, unsigned nbytes, u8 *siv, u32 flags) { struct arc4_ctx *ctx = crypto_lskcipher_ctx(tfm); if (!(flags & CRYPTO_LSKCIPHER_FLAG_CONT)) memcpy(siv, ctx, sizeof(*ctx)); ctx = (struct arc4_ctx *)siv; arc4_crypt(ctx, dst, src, nbytes); return 0; } static int crypto_arc4_init(struct crypto_lskcipher *tfm) { pr_warn_ratelimited("\"%s\" (%ld) uses obsolete ecb(arc4) skcipher\n", current->comm, (unsigned long)current->pid); return 0; } static struct lskcipher_alg arc4_alg = { .co.base.cra_name = "arc4", .co.base.cra_driver_name = "arc4-generic", .co.base.cra_priority = 100, .co.base.cra_blocksize = ARC4_BLOCK_SIZE, .co.base.cra_ctxsize = sizeof(struct arc4_ctx), .co.base.cra_alignmask = ARC4_ALIGN - 1, .co.base.cra_module = THIS_MODULE, .co.min_keysize = ARC4_MIN_KEY_SIZE, .co.max_keysize = ARC4_MAX_KEY_SIZE, .co.statesize = sizeof(struct arc4_ctx), .setkey = crypto_arc4_setkey, .encrypt = crypto_arc4_crypt, .decrypt = crypto_arc4_crypt, .init = crypto_arc4_init, }; static int __init arc4_init(void) { return crypto_register_lskcipher(&arc4_alg); } static void __exit arc4_exit(void) { crypto_unregister_lskcipher(&arc4_alg); } module_init(arc4_init); module_exit(arc4_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("ARC4 Cipher Algorithm"); MODULE_AUTHOR("Jon Oberheide <jon@oberheide.org>"); MODULE_ALIAS_CRYPTO("ecb(arc4)");
6 6 2 6 6 4 4 76 75 76 76 68 68 68 68 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2003-2005 Devicescape Software, Inc. * Copyright (c) 2006 Jiri Benc <jbenc@suse.cz> * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> * Copyright (C) 2015 Intel Deutschland GmbH * Copyright (C) 2021-2023 Intel Corporation */ #include <linux/kobject.h> #include <linux/slab.h> #include "ieee80211_i.h" #include "key.h" #include "debugfs.h" #include "debugfs_key.h" #define KEY_READ(name, prop, format_string) \ static ssize_t key_##name##_read(struct file *file, \ char __user *userbuf, \ size_t count, loff_t *ppos) \ { \ struct ieee80211_key *key = file->private_data; \ return mac80211_format_buffer(userbuf, count, ppos, \ format_string, key->prop); \ } #define KEY_READ_X(name) KEY_READ(name, name, "0x%x\n") #define KEY_OPS(name) \ static const struct debugfs_short_fops key_ ##name## _ops = { \ .read = key_##name##_read, \ .llseek = generic_file_llseek, \ } #define KEY_OPS_W(name) \ static const struct debugfs_short_fops key_ ##name## _ops = { \ .read = key_##name##_read, \ .write = key_##name##_write, \ .llseek = generic_file_llseek, \ } #define KEY_FILE(name, format) \ KEY_READ_##format(name) \ KEY_OPS(name) #define KEY_CONF_READ(name, format_string) \ KEY_READ(conf_##name, conf.name, format_string) #define KEY_CONF_READ_D(name) KEY_CONF_READ(name, "%d\n") #define KEY_CONF_OPS(name) \ static const struct debugfs_short_fops key_ ##name## _ops = { \ .read = key_conf_##name##_read, \ .llseek = generic_file_llseek, \ } #define KEY_CONF_FILE(name, format) \ KEY_CONF_READ_##format(name) \ KEY_CONF_OPS(name) KEY_CONF_FILE(keylen, D); KEY_CONF_FILE(keyidx, D); KEY_CONF_FILE(hw_key_idx, D); KEY_FILE(flags, X); KEY_READ(ifindex, sdata->name, "%s\n"); KEY_OPS(ifindex); static ssize_t key_algorithm_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { char buf[15]; struct ieee80211_key *key = file->private_data; u32 c = key->conf.cipher; sprintf(buf, "%.2x-%.2x-%.2x:%d\n", c >> 24, (c >> 16) & 0xff, (c >> 8) & 0xff, c & 0xff); return simple_read_from_buffer(userbuf, count, ppos, buf, strlen(buf)); } KEY_OPS(algorithm); static ssize_t key_tx_spec_write(struct file *file, const char __user *userbuf, size_t count, loff_t *ppos) { struct ieee80211_key *key = file->private_data; u64 pn; int ret; switch (key->conf.cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: return -EINVAL; case WLAN_CIPHER_SUITE_TKIP: /* not supported yet */ return -EOPNOTSUPP; case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_BIP_CMAC_256: case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: ret = kstrtou64_from_user(userbuf, count, 16, &pn); if (ret) return ret; /* PN is a 48-bit counter */ if (pn >= (1ULL << 48)) return -ERANGE; atomic64_set(&key->conf.tx_pn, pn); return count; default: return 0; } } static ssize_t key_tx_spec_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { u64 pn; char buf[20]; int len; struct ieee80211_key *key = file->private_data; switch (key->conf.cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: len = scnprintf(buf, sizeof(buf), "\n"); break; case WLAN_CIPHER_SUITE_TKIP: pn = atomic64_read(&key->conf.tx_pn); len = scnprintf(buf, sizeof(buf), "%08x %04x\n", TKIP_PN_TO_IV32(pn), TKIP_PN_TO_IV16(pn)); break; case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_BIP_CMAC_256: case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: pn = atomic64_read(&key->conf.tx_pn); len = scnprintf(buf, sizeof(buf), "%02x%02x%02x%02x%02x%02x\n", (u8)(pn >> 40), (u8)(pn >> 32), (u8)(pn >> 24), (u8)(pn >> 16), (u8)(pn >> 8), (u8)pn); break; default: return 0; } return simple_read_from_buffer(userbuf, count, ppos, buf, len); } KEY_OPS_W(tx_spec); static ssize_t key_rx_spec_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { struct ieee80211_key *key = file->private_data; char buf[14*IEEE80211_NUM_TIDS+1], *p = buf; int i, len; const u8 *rpn; switch (key->conf.cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: len = scnprintf(buf, sizeof(buf), "\n"); break; case WLAN_CIPHER_SUITE_TKIP: for (i = 0; i < IEEE80211_NUM_TIDS; i++) p += scnprintf(p, sizeof(buf)+buf-p, "%08x %04x\n", key->u.tkip.rx[i].iv32, key->u.tkip.rx[i].iv16); len = p - buf; break; case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++) { rpn = key->u.ccmp.rx_pn[i]; p += scnprintf(p, sizeof(buf)+buf-p, "%02x%02x%02x%02x%02x%02x\n", rpn[0], rpn[1], rpn[2], rpn[3], rpn[4], rpn[5]); } len = p - buf; break; case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_BIP_CMAC_256: rpn = key->u.aes_cmac.rx_pn; p += scnprintf(p, sizeof(buf)+buf-p, "%02x%02x%02x%02x%02x%02x\n", rpn[0], rpn[1], rpn[2], rpn[3], rpn[4], rpn[5]); len = p - buf; break; case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: rpn = key->u.aes_gmac.rx_pn; p += scnprintf(p, sizeof(buf)+buf-p, "%02x%02x%02x%02x%02x%02x\n", rpn[0], rpn[1], rpn[2], rpn[3], rpn[4], rpn[5]); len = p - buf; break; case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++) { rpn = key->u.gcmp.rx_pn[i]; p += scnprintf(p, sizeof(buf)+buf-p, "%02x%02x%02x%02x%02x%02x\n", rpn[0], rpn[1], rpn[2], rpn[3], rpn[4], rpn[5]); } len = p - buf; break; default: return 0; } return simple_read_from_buffer(userbuf, count, ppos, buf, len); } KEY_OPS(rx_spec); static ssize_t key_replays_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { struct ieee80211_key *key = file->private_data; char buf[20]; int len; switch (key->conf.cipher) { case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: len = scnprintf(buf, sizeof(buf), "%u\n", key->u.ccmp.replays); break; case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_BIP_CMAC_256: len = scnprintf(buf, sizeof(buf), "%u\n", key->u.aes_cmac.replays); break; case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: len = scnprintf(buf, sizeof(buf), "%u\n", key->u.aes_gmac.replays); break; case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: len = scnprintf(buf, sizeof(buf), "%u\n", key->u.gcmp.replays); break; default: return 0; } return simple_read_from_buffer(userbuf, count, ppos, buf, len); } KEY_OPS(replays); static ssize_t key_icverrors_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { struct ieee80211_key *key = file->private_data; char buf[20]; int len; switch (key->conf.cipher) { case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_BIP_CMAC_256: len = scnprintf(buf, sizeof(buf), "%u\n", key->u.aes_cmac.icverrors); break; case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: len = scnprintf(buf, sizeof(buf), "%u\n", key->u.aes_gmac.icverrors); break; default: return 0; } return simple_read_from_buffer(userbuf, count, ppos, buf, len); } KEY_OPS(icverrors); static ssize_t key_mic_failures_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { struct ieee80211_key *key = file->private_data; char buf[20]; int len; if (key->conf.cipher != WLAN_CIPHER_SUITE_TKIP) return -EINVAL; len = scnprintf(buf, sizeof(buf), "%u\n", key->u.tkip.mic_failures); return simple_read_from_buffer(userbuf, count, ppos, buf, len); } KEY_OPS(mic_failures); static ssize_t key_key_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { struct ieee80211_key *key = file->private_data; int i, bufsize = 2 * key->conf.keylen + 2; char *buf = kmalloc(bufsize, GFP_KERNEL); char *p = buf; ssize_t res; if (!buf) return -ENOMEM; for (i = 0; i < key->conf.keylen; i++) p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]); p += scnprintf(p, bufsize+buf-p, "\n"); res = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); kfree(buf); return res; } KEY_OPS(key); #define DEBUGFS_ADD(name) \ debugfs_create_file(#name, 0400, key->debugfs.dir, \ key, &key_##name##_ops) #define DEBUGFS_ADD_W(name) \ debugfs_create_file(#name, 0600, key->debugfs.dir, \ key, &key_##name##_ops); void ieee80211_debugfs_key_add(struct ieee80211_key *key) { static int keycount; char buf[100]; struct sta_info *sta; if (!key->local->debugfs.keys) return; sprintf(buf, "%d", keycount); key->debugfs.cnt = keycount; keycount++; key->debugfs.dir = debugfs_create_dir(buf, key->local->debugfs.keys); sta = key->sta; if (sta) { sprintf(buf, "../../netdev:%s/stations/%pM", sta->sdata->name, sta->sta.addr); key->debugfs.stalink = debugfs_create_symlink("station", key->debugfs.dir, buf); } DEBUGFS_ADD(keylen); DEBUGFS_ADD(flags); DEBUGFS_ADD(keyidx); DEBUGFS_ADD(hw_key_idx); DEBUGFS_ADD(algorithm); DEBUGFS_ADD_W(tx_spec); DEBUGFS_ADD(rx_spec); DEBUGFS_ADD(replays); DEBUGFS_ADD(icverrors); DEBUGFS_ADD(mic_failures); DEBUGFS_ADD(key); DEBUGFS_ADD(ifindex); }; void ieee80211_debugfs_key_remove(struct ieee80211_key *key) { if (!key) return; debugfs_remove_recursive(key->debugfs.dir); key->debugfs.dir = NULL; } void ieee80211_debugfs_key_update_default(struct ieee80211_sub_if_data *sdata) { char buf[50]; struct ieee80211_key *key; if (!sdata->vif.debugfs_dir) return; lockdep_assert_wiphy(sdata->local->hw.wiphy); debugfs_remove(sdata->debugfs.default_unicast_key); sdata->debugfs.default_unicast_key = NULL; if (sdata->default_unicast_key) { key = wiphy_dereference(sdata->local->hw.wiphy, sdata->default_unicast_key); sprintf(buf, "../keys/%d", key->debugfs.cnt); sdata->debugfs.default_unicast_key = debugfs_create_symlink("default_unicast_key", sdata->vif.debugfs_dir, buf); } debugfs_remove(sdata->debugfs.default_multicast_key); sdata->debugfs.default_multicast_key = NULL; if (sdata->deflink.default_multicast_key) { key = wiphy_dereference(sdata->local->hw.wiphy, sdata->deflink.default_multicast_key); sprintf(buf, "../keys/%d", key->debugfs.cnt); sdata->debugfs.default_multicast_key = debugfs_create_symlink("default_multicast_key", sdata->vif.debugfs_dir, buf); } } void ieee80211_debugfs_key_remove_mgmt_default(struct ieee80211_sub_if_data *sdata) { if (!sdata) return; debugfs_remove(sdata->debugfs.default_mgmt_key); sdata->debugfs.default_mgmt_key = NULL; } void ieee80211_debugfs_key_remove_beacon_default(struct ieee80211_sub_if_data *sdata) { if (!sdata) return; debugfs_remove(sdata->debugfs.default_beacon_key); sdata->debugfs.default_beacon_key = NULL; }
21 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 // SPDX-License-Identifier: GPL-2.0-only /* * linux/fs/nfs/dir.c * * Copyright (C) 1992 Rick Sladkey * * nfs directory handling functions * * 10 Apr 1996 Added silly rename for unlink --okir * 28 Sep 1996 Improved directory cache --okir * 23 Aug 1997 Claus Heine claus@momo.math.rwth-aachen.de * Re-implemented silly rename for unlink, newly implemented * silly rename for nfs_rename() following the suggestions * of Olaf Kirch (okir) found in this file. * Following Linus comments on my original hack, this version * depends only on the dcache stuff and doesn't touch the inode * layer (iput() and friends). * 6 Jun 1999 Cache readdir lookups in the page cache. -DaveM */ #include <linux/compat.h> #include <linux/module.h> #include <linux/time.h> #include <linux/errno.h> #include <linux/stat.h> #include <linux/fcntl.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/sunrpc/clnt.h> #include <linux/nfs_fs.h> #include <linux/nfs_mount.h> #include <linux/pagemap.h> #include <linux/pagevec.h> #include <linux/namei.h> #include <linux/mount.h> #include <linux/swap.h> #include <linux/sched.h> #include <linux/kmemleak.h> #include <linux/xattr.h> #include <linux/hash.h> #include "delegation.h" #include "iostat.h" #include "internal.h" #include "fscache.h" #include "nfstrace.h" /* #define NFS_DEBUG_VERBOSE 1 */ static int nfs_opendir(struct inode *, struct file *); static int nfs_closedir(struct inode *, struct file *); static int nfs_readdir(struct file *, struct dir_context *); static int nfs_fsync_dir(struct file *, loff_t, loff_t, int); static loff_t nfs_llseek_dir(struct file *, loff_t, int); static void nfs_readdir_clear_array(struct folio *); static int nfs_do_create(struct inode *dir, struct dentry *dentry, umode_t mode, int open_flags); const struct file_operations nfs_dir_operations = { .llseek = nfs_llseek_dir, .read = generic_read_dir, .iterate_shared = nfs_readdir, .open = nfs_opendir, .release = nfs_closedir, .fsync = nfs_fsync_dir, }; const struct address_space_operations nfs_dir_aops = { .free_folio = nfs_readdir_clear_array, }; #define NFS_INIT_DTSIZE PAGE_SIZE static struct nfs_open_dir_context * alloc_nfs_open_dir_context(struct inode *dir) { struct nfs_inode *nfsi = NFS_I(dir); struct nfs_open_dir_context *ctx; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL_ACCOUNT); if (ctx != NULL) { ctx->attr_gencount = nfsi->attr_gencount; ctx->dtsize = NFS_INIT_DTSIZE; spin_lock(&dir->i_lock); if (list_empty(&nfsi->open_files) && (nfsi->cache_validity & NFS_INO_DATA_INVAL_DEFER)) nfs_set_cache_invalid(dir, NFS_INO_INVALID_DATA | NFS_INO_REVAL_FORCED); list_add_tail_rcu(&ctx->list, &nfsi->open_files); memcpy(ctx->verf, nfsi->cookieverf, sizeof(ctx->verf)); spin_unlock(&dir->i_lock); return ctx; } return ERR_PTR(-ENOMEM); } static void put_nfs_open_dir_context(struct inode *dir, struct nfs_open_dir_context *ctx) { spin_lock(&dir->i_lock); list_del_rcu(&ctx->list); spin_unlock(&dir->i_lock); kfree_rcu(ctx, rcu_head); } /* * Open file */ static int nfs_opendir(struct inode *inode, struct file *filp) { int res = 0; struct nfs_open_dir_context *ctx; dfprintk(FILE, "NFS: open dir(%pD2)\n", filp); nfs_inc_stats(inode, NFSIOS_VFSOPEN); ctx = alloc_nfs_open_dir_context(inode); if (IS_ERR(ctx)) { res = PTR_ERR(ctx); goto out; } filp->private_data = ctx; out: return res; } static int nfs_closedir(struct inode *inode, struct file *filp) { put_nfs_open_dir_context(file_inode(filp), filp->private_data); return 0; } struct nfs_cache_array_entry { u64 cookie; u64 ino; const char *name; unsigned int name_len; unsigned char d_type; }; struct nfs_cache_array { u64 change_attr; u64 last_cookie; unsigned int size; unsigned char folio_full : 1, folio_is_eof : 1, cookies_are_ordered : 1; struct nfs_cache_array_entry array[] __counted_by(size); }; struct nfs_readdir_descriptor { struct file *file; struct folio *folio; struct dir_context *ctx; pgoff_t folio_index; pgoff_t folio_index_max; u64 dir_cookie; u64 last_cookie; loff_t current_index; __be32 verf[NFS_DIR_VERIFIER_SIZE]; unsigned long dir_verifier; unsigned long timestamp; unsigned long gencount; unsigned long attr_gencount; unsigned int cache_entry_index; unsigned int buffer_fills; unsigned int dtsize; bool clear_cache; bool plus; bool eob; bool eof; }; static void nfs_set_dtsize(struct nfs_readdir_descriptor *desc, unsigned int sz) { struct nfs_server *server = NFS_SERVER(file_inode(desc->file)); unsigned int maxsize = server->dtsize; if (sz > maxsize) sz = maxsize; if (sz < NFS_MIN_FILE_IO_SIZE) sz = NFS_MIN_FILE_IO_SIZE; desc->dtsize = sz; } static void nfs_shrink_dtsize(struct nfs_readdir_descriptor *desc) { nfs_set_dtsize(desc, desc->dtsize >> 1); } static void nfs_grow_dtsize(struct nfs_readdir_descriptor *desc) { nfs_set_dtsize(desc, desc->dtsize << 1); } static void nfs_readdir_folio_init_array(struct folio *folio, u64 last_cookie, u64 change_attr) { struct nfs_cache_array *array; array = kmap_local_folio(folio, 0); array->change_attr = change_attr; array->last_cookie = last_cookie; array->size = 0; array->folio_full = 0; array->folio_is_eof = 0; array->cookies_are_ordered = 1; kunmap_local(array); } /* * we are freeing strings created by nfs_add_to_readdir_array() */ static void nfs_readdir_clear_array(struct folio *folio) { struct nfs_cache_array *array; unsigned int i; array = kmap_local_folio(folio, 0); for (i = 0; i < array->size; i++) kfree(array->array[i].name); array->size = 0; kunmap_local(array); } static void nfs_readdir_folio_reinit_array(struct folio *folio, u64 last_cookie, u64 change_attr) { nfs_readdir_clear_array(folio); nfs_readdir_folio_init_array(folio, last_cookie, change_attr); } static struct folio * nfs_readdir_folio_array_alloc(u64 last_cookie, gfp_t gfp_flags) { struct folio *folio = folio_alloc(gfp_flags, 0); if (folio) nfs_readdir_folio_init_array(folio, last_cookie, 0); return folio; } static void nfs_readdir_folio_array_free(struct folio *folio) { if (folio) { nfs_readdir_clear_array(folio); folio_put(folio); } } static u64 nfs_readdir_array_index_cookie(struct nfs_cache_array *array) { return array->size == 0 ? array->last_cookie : array->array[0].cookie; } static void nfs_readdir_array_set_eof(struct nfs_cache_array *array) { array->folio_is_eof = 1; array->folio_full = 1; } static bool nfs_readdir_array_is_full(struct nfs_cache_array *array) { return array->folio_full; } /* * the caller is responsible for freeing qstr.name * when called by nfs_readdir_add_to_array, the strings will be freed in * nfs_clear_readdir_array() */ static const char *nfs_readdir_copy_name(const char *name, unsigned int len) { const char *ret = kmemdup_nul(name, len, GFP_KERNEL); /* * Avoid a kmemleak false positive. The pointer to the name is stored * in a page cache page which kmemleak does not scan. */ if (ret != NULL) kmemleak_not_leak(ret); return ret; } static size_t nfs_readdir_array_maxentries(void) { return (PAGE_SIZE - sizeof(struct nfs_cache_array)) / sizeof(struct nfs_cache_array_entry); } /* * Check that the next array entry lies entirely within the page bounds */ static int nfs_readdir_array_can_expand(struct nfs_cache_array *array) { if (array->folio_full) return -ENOSPC; if (array->size == nfs_readdir_array_maxentries()) { array->folio_full = 1; return -ENOSPC; } return 0; } static int nfs_readdir_folio_array_append(struct folio *folio, const struct nfs_entry *entry, u64 *cookie) { struct nfs_cache_array *array; struct nfs_cache_array_entry *cache_entry; const char *name; int ret = -ENOMEM; name = nfs_readdir_copy_name(entry->name, entry->len); array = kmap_local_folio(folio, 0); if (!name) goto out; ret = nfs_readdir_array_can_expand(array); if (ret) { kfree(name); goto out; } array->size++; cache_entry = &array->array[array->size - 1]; cache_entry->cookie = array->last_cookie; cache_entry->ino = entry->ino; cache_entry->d_type = entry->d_type; cache_entry->name_len = entry->len; cache_entry->name = name; array->last_cookie = entry->cookie; if (array->last_cookie <= cache_entry->cookie) array->cookies_are_ordered = 0; if (entry->eof != 0) nfs_readdir_array_set_eof(array); out: *cookie = array->last_cookie; kunmap_local(array); return ret; } #define NFS_READDIR_COOKIE_MASK (U32_MAX >> 14) /* * Hash algorithm allowing content addressible access to sequences * of directory cookies. Content is addressed by the value of the * cookie index of the first readdir entry in a page. * * We select only the first 18 bits to avoid issues with excessive * memory use for the page cache XArray. 18 bits should allow the caching * of 262144 pages of sequences of readdir entries. Since each page holds * 127 readdir entries for a typical 64-bit system, that works out to a * cache of ~ 33 million entries per directory. */ static pgoff_t nfs_readdir_folio_cookie_hash(u64 cookie) { if (cookie == 0) return 0; return hash_64(cookie, 18); } static bool nfs_readdir_folio_validate(struct folio *folio, u64 last_cookie, u64 change_attr) { struct nfs_cache_array *array = kmap_local_folio(folio, 0); int ret = true; if (array->change_attr != change_attr) ret = false; if (nfs_readdir_array_index_cookie(array) != last_cookie) ret = false; kunmap_local(array); return ret; } static void nfs_readdir_folio_unlock_and_put(struct folio *folio) { folio_unlock(folio); folio_put(folio); } static void nfs_readdir_folio_init_and_validate(struct folio *folio, u64 cookie, u64 change_attr) { if (folio_test_uptodate(folio)) { if (nfs_readdir_folio_validate(folio, cookie, change_attr)) return; nfs_readdir_clear_array(folio); } nfs_readdir_folio_init_array(folio, cookie, change_attr); folio_mark_uptodate(folio); } static struct folio *nfs_readdir_folio_get_locked(struct address_space *mapping, u64 cookie, u64 change_attr) { pgoff_t index = nfs_readdir_folio_cookie_hash(cookie); struct folio *folio; folio = filemap_grab_folio(mapping, index); if (IS_ERR(folio)) return NULL; nfs_readdir_folio_init_and_validate(folio, cookie, change_attr); return folio; } static u64 nfs_readdir_folio_last_cookie(struct folio *folio) { struct nfs_cache_array *array; u64 ret; array = kmap_local_folio(folio, 0); ret = array->last_cookie; kunmap_local(array); return ret; } static bool nfs_readdir_folio_needs_filling(struct folio *folio) { struct nfs_cache_array *array; bool ret; array = kmap_local_folio(folio, 0); ret = !nfs_readdir_array_is_full(array); kunmap_local(array); return ret; } static void nfs_readdir_folio_set_eof(struct folio *folio) { struct nfs_cache_array *array; array = kmap_local_folio(folio, 0); nfs_readdir_array_set_eof(array); kunmap_local(array); } static struct folio *nfs_readdir_folio_get_next(struct address_space *mapping, u64 cookie, u64 change_attr) { pgoff_t index = nfs_readdir_folio_cookie_hash(cookie); struct folio *folio; folio = __filemap_get_folio(mapping, index, FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT, mapping_gfp_mask(mapping)); if (IS_ERR(folio)) return NULL; nfs_readdir_folio_init_and_validate(folio, cookie, change_attr); if (nfs_readdir_folio_last_cookie(folio) != cookie) nfs_readdir_folio_reinit_array(folio, cookie, change_attr); return folio; } static inline int is_32bit_api(void) { #ifdef CONFIG_COMPAT return in_compat_syscall(); #else return (BITS_PER_LONG == 32); #endif } static bool nfs_readdir_use_cookie(const struct file *filp) { if ((filp->f_mode & FMODE_32BITHASH) || (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api())) return false; return true; } static void nfs_readdir_seek_next_array(struct nfs_cache_array *array, struct nfs_readdir_descriptor *desc) { if (array->folio_full) { desc->last_cookie = array->last_cookie; desc->current_index += array->size; desc->cache_entry_index = 0; desc->folio_index++; } else desc->last_cookie = nfs_readdir_array_index_cookie(array); } static void nfs_readdir_rewind_search(struct nfs_readdir_descriptor *desc) { desc->current_index = 0; desc->last_cookie = 0; desc->folio_index = 0; } static int nfs_readdir_search_for_pos(struct nfs_cache_array *array, struct nfs_readdir_descriptor *desc) { loff_t diff = desc->ctx->pos - desc->current_index; unsigned int index; if (diff < 0) goto out_eof; if (diff >= array->size) { if (array->folio_is_eof) goto out_eof; nfs_readdir_seek_next_array(array, desc); return -EAGAIN; } index = (unsigned int)diff; desc->dir_cookie = array->array[index].cookie; desc->cache_entry_index = index; return 0; out_eof: desc->eof = true; return -EBADCOOKIE; } static bool nfs_readdir_array_cookie_in_range(struct nfs_cache_array *array, u64 cookie) { if (!array->cookies_are_ordered) return true; /* Optimisation for monotonically increasing cookies */ if (cookie >= array->last_cookie) return false; if (array->size && cookie < array->array[0].cookie) return false; return true; } static int nfs_readdir_search_for_cookie(struct nfs_cache_array *array, struct nfs_readdir_descriptor *desc) { unsigned int i; int status = -EAGAIN; if (!nfs_readdir_array_cookie_in_range(array, desc->dir_cookie)) goto check_eof; for (i = 0; i < array->size; i++) { if (array->array[i].cookie == desc->dir_cookie) { if (nfs_readdir_use_cookie(desc->file)) desc->ctx->pos = desc->dir_cookie; else desc->ctx->pos = desc->current_index + i; desc->cache_entry_index = i; return 0; } } check_eof: if (array->folio_is_eof) { status = -EBADCOOKIE; if (desc->dir_cookie == array->last_cookie) desc->eof = true; } else nfs_readdir_seek_next_array(array, desc); return status; } static int nfs_readdir_search_array(struct nfs_readdir_descriptor *desc) { struct nfs_cache_array *array; int status; array = kmap_local_folio(desc->folio, 0); if (desc->dir_cookie == 0) status = nfs_readdir_search_for_pos(array, desc); else status = nfs_readdir_search_for_cookie(array, desc); kunmap_local(array); return status; } /* Fill a page with xdr information before transferring to the cache page */ static int nfs_readdir_xdr_filler(struct nfs_readdir_descriptor *desc, __be32 *verf, u64 cookie, struct page **pages, size_t bufsize, __be32 *verf_res) { struct inode *inode = file_inode(desc->file); struct nfs_readdir_arg arg = { .dentry = file_dentry(desc->file), .cred = desc->file->f_cred, .verf = verf, .cookie = cookie, .pages = pages, .page_len = bufsize, .plus = desc->plus, }; struct nfs_readdir_res res = { .verf = verf_res, }; unsigned long timestamp, gencount; int error; again: timestamp = jiffies; gencount = nfs_inc_attr_generation_counter(); desc->dir_verifier = nfs_save_change_attribute(inode); error = NFS_PROTO(inode)->readdir(&arg, &res); if (error < 0) { /* We requested READDIRPLUS, but the server doesn't grok it */ if (error == -ENOTSUPP && desc->plus) { NFS_SERVER(inode)->caps &= ~NFS_CAP_READDIRPLUS; desc->plus = arg.plus = false; goto again; } goto error; } desc->timestamp = timestamp; desc->gencount = gencount; error: return error; } static int xdr_decode(struct nfs_readdir_descriptor *desc, struct nfs_entry *entry, struct xdr_stream *xdr) { struct inode *inode = file_inode(desc->file); int error; error = NFS_PROTO(inode)->decode_dirent(xdr, entry, desc->plus); if (error) return error; entry->fattr->time_start = desc->timestamp; entry->fattr->gencount = desc->gencount; return 0; } /* Match file and dirent using either filehandle or fileid * Note: caller is responsible for checking the fsid */ static int nfs_same_file(struct dentry *dentry, struct nfs_entry *entry) { struct inode *inode; struct nfs_inode *nfsi; if (d_really_is_negative(dentry)) return 0; inode = d_inode(dentry); if (is_bad_inode(inode) || NFS_STALE(inode)) return 0; nfsi = NFS_I(inode); if (entry->fattr->fileid != nfsi->fileid) return 0; if (entry->fh->size && nfs_compare_fh(entry->fh, &nfsi->fh) != 0) return 0; return 1; } #define NFS_READDIR_CACHE_USAGE_THRESHOLD (8UL) static bool nfs_use_readdirplus(struct inode *dir, struct dir_context *ctx, unsigned int cache_hits, unsigned int cache_misses) { if (!nfs_server_capable(dir, NFS_CAP_READDIRPLUS)) return false; if (NFS_SERVER(dir)->flags & NFS_MOUNT_FORCE_RDIRPLUS) return true; if (ctx->pos == 0 || cache_hits + cache_misses > NFS_READDIR_CACHE_USAGE_THRESHOLD) return true; return false; } /* * This function is called by the getattr code to request the * use of readdirplus to accelerate any future lookups in the same * directory. */ void nfs_readdir_record_entry_cache_hit(struct inode *dir) { struct nfs_inode *nfsi = NFS_I(dir); struct nfs_open_dir_context *ctx; if (nfs_server_capable(dir, NFS_CAP_READDIRPLUS) && S_ISDIR(dir->i_mode)) { rcu_read_lock(); list_for_each_entry_rcu (ctx, &nfsi->open_files, list) atomic_inc(&ctx->cache_hits); rcu_read_unlock(); } } /* * This function is mainly for use by nfs_getattr(). * * If this is an 'ls -l', we want to force use of readdirplus. */ void nfs_readdir_record_entry_cache_miss(struct inode *dir) { struct nfs_inode *nfsi = NFS_I(dir); struct nfs_open_dir_context *ctx; if (nfs_server_capable(dir, NFS_CAP_READDIRPLUS) && S_ISDIR(dir->i_mode)) { rcu_read_lock(); list_for_each_entry_rcu (ctx, &nfsi->open_files, list) atomic_inc(&ctx->cache_misses); rcu_read_unlock(); } } static void nfs_lookup_advise_force_readdirplus(struct inode *dir, unsigned int flags) { if (nfs_server_capable(dir, NFS_CAP_CASE_INSENSITIVE)) return; if (flags & (LOOKUP_EXCL | LOOKUP_PARENT | LOOKUP_REVAL)) return; nfs_readdir_record_entry_cache_miss(dir); } static void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry, unsigned long dir_verifier) { struct qstr filename = QSTR_INIT(entry->name, entry->len); DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); struct dentry *dentry; struct dentry *alias; struct inode *inode; int status; if (!(entry->fattr->valid & NFS_ATTR_FATTR_FILEID)) return; if (!(entry->fattr->valid & NFS_ATTR_FATTR_FSID)) return; if (filename.len == 0) return; /* Validate that the name doesn't contain any illegal '\0' */ if (strnlen(filename.name, filename.len) != filename.len) return; /* ...or '/' */ if (strnchr(filename.name, filename.len, '/')) return; if (filename.name[0] == '.') { if (filename.len == 1) return; if (filename.len == 2 && filename.name[1] == '.') return; } filename.hash = full_name_hash(parent, filename.name, filename.len); dentry = d_lookup(parent, &filename); again: if (!dentry) { dentry = d_alloc_parallel(parent, &filename, &wq); if (IS_ERR(dentry)) return; } if (!d_in_lookup(dentry)) { /* Is there a mountpoint here? If so, just exit */ if (!nfs_fsid_equal(&NFS_SB(dentry->d_sb)->fsid, &entry->fattr->fsid)) goto out; if (nfs_same_file(dentry, entry)) { if (!entry->fh->size) goto out; nfs_set_verifier(dentry, dir_verifier); status = nfs_refresh_inode(d_inode(dentry), entry->fattr); if (!status) nfs_setsecurity(d_inode(dentry), entry->fattr); trace_nfs_readdir_lookup_revalidate(d_inode(parent), dentry, 0, status); goto out; } else { trace_nfs_readdir_lookup_revalidate_failed( d_inode(parent), dentry, 0); d_invalidate(dentry); dput(dentry); dentry = NULL; goto again; } } if (!entry->fh->size) { d_lookup_done(dentry); goto out; } inode = nfs_fhget(dentry->d_sb, entry->fh, entry->fattr); alias = d_splice_alias(inode, dentry); d_lookup_done(dentry); if (alias) { if (IS_ERR(alias)) goto out; dput(dentry); dentry = alias; } nfs_set_verifier(dentry, dir_verifier); trace_nfs_readdir_lookup(d_inode(parent), dentry, 0); out: dput(dentry); } static int nfs_readdir_entry_decode(struct nfs_readdir_descriptor *desc, struct nfs_entry *entry, struct xdr_stream *stream) { int ret; if (entry->fattr->label) entry->fattr->label->len = NFS4_MAXLABELLEN; ret = xdr_decode(desc, entry, stream); if (ret || !desc->plus) return ret; nfs_prime_dcache(file_dentry(desc->file), entry, desc->dir_verifier); return 0; } /* Perform conversion from xdr to cache array */ static int nfs_readdir_folio_filler(struct nfs_readdir_descriptor *desc, struct nfs_entry *entry, struct page **xdr_pages, unsigned int buflen, struct folio **arrays, size_t narrays, u64 change_attr) { struct address_space *mapping = desc->file->f_mapping; struct folio *new, *folio = *arrays; struct xdr_stream stream; struct page *scratch; struct xdr_buf buf; u64 cookie; int status; scratch = alloc_page(GFP_KERNEL); if (scratch == NULL) return -ENOMEM; xdr_init_decode_pages(&stream, &buf, xdr_pages, buflen); xdr_set_scratch_page(&stream, scratch); do { status = nfs_readdir_entry_decode(desc, entry, &stream); if (status != 0) break; status = nfs_readdir_folio_array_append(folio, entry, &cookie); if (status != -ENOSPC) continue; if (folio->mapping != mapping) { if (!--narrays) break; new = nfs_readdir_folio_array_alloc(cookie, GFP_KERNEL); if (!new) break; arrays++; *arrays = folio = new; } else { new = nfs_readdir_folio_get_next(mapping, cookie, change_attr); if (!new) break; if (folio != *arrays) nfs_readdir_folio_unlock_and_put(folio); folio = new; } desc->folio_index_max++; status = nfs_readdir_folio_array_append(folio, entry, &cookie); } while (!status && !entry->eof); switch (status) { case -EBADCOOKIE: if (!entry->eof) break; nfs_readdir_folio_set_eof(folio); fallthrough; case -EAGAIN: status = 0; break; case -ENOSPC: status = 0; if (!desc->plus) break; while (!nfs_readdir_entry_decode(desc, entry, &stream)) ; } if (folio != *arrays) nfs_readdir_folio_unlock_and_put(folio); put_page(scratch); return status; } static void nfs_readdir_free_pages(struct page **pages, size_t npages) { while (npages--) put_page(pages[npages]); kfree(pages); } /* * nfs_readdir_alloc_pages() will allocate pages that must be freed with a call * to nfs_readdir_free_pages() */ static struct page **nfs_readdir_alloc_pages(size_t npages) { struct page **pages; size_t i; pages = kmalloc_array(npages, sizeof(*pages), GFP_KERNEL); if (!pages) return NULL; for (i = 0; i < npages; i++) { struct page *page = alloc_page(GFP_KERNEL); if (page == NULL) goto out_freepages; pages[i] = page; } return pages; out_freepages: nfs_readdir_free_pages(pages, i); return NULL; } static int nfs_readdir_xdr_to_array(struct nfs_readdir_descriptor *desc, __be32 *verf_arg, __be32 *verf_res, struct folio **arrays, size_t narrays) { u64 change_attr; struct page **pages; struct folio *folio = *arrays; struct nfs_entry *entry; size_t array_size; struct inode *inode = file_inode(desc->file); unsigned int dtsize = desc->dtsize; unsigned int pglen; int status = -ENOMEM; entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return -ENOMEM; entry->cookie = nfs_readdir_folio_last_cookie(folio); entry->fh = nfs_alloc_fhandle(); entry->fattr = nfs_alloc_fattr_with_label(NFS_SERVER(inode)); entry->server = NFS_SERVER(inode); if (entry->fh == NULL || entry->fattr == NULL) goto out; array_size = (dtsize + PAGE_SIZE - 1) >> PAGE_SHIFT; pages = nfs_readdir_alloc_pages(array_size); if (!pages) goto out; change_attr = inode_peek_iversion_raw(inode); status = nfs_readdir_xdr_filler(desc, verf_arg, entry->cookie, pages, dtsize, verf_res); if (status < 0) goto free_pages; pglen = status; if (pglen != 0) status = nfs_readdir_folio_filler(desc, entry, pages, pglen, arrays, narrays, change_attr); else nfs_readdir_folio_set_eof(folio); desc->buffer_fills++; free_pages: nfs_readdir_free_pages(pages, array_size); out: nfs_free_fattr(entry->fattr); nfs_free_fhandle(entry->fh); kfree(entry); return status; } static void nfs_readdir_folio_put(struct nfs_readdir_descriptor *desc) { folio_put(desc->folio); desc->folio = NULL; } static void nfs_readdir_folio_unlock_and_put_cached(struct nfs_readdir_descriptor *desc) { folio_unlock(desc->folio); nfs_readdir_folio_put(desc); } static struct folio * nfs_readdir_folio_get_cached(struct nfs_readdir_descriptor *desc) { struct address_space *mapping = desc->file->f_mapping; u64 change_attr = inode_peek_iversion_raw(mapping->host); u64 cookie = desc->last_cookie; struct folio *folio; folio = nfs_readdir_folio_get_locked(mapping, cookie, change_attr); if (!folio) return NULL; if (desc->clear_cache && !nfs_readdir_folio_needs_filling(folio)) nfs_readdir_folio_reinit_array(folio, cookie, change_attr); return folio; } /* * Returns 0 if desc->dir_cookie was found on page desc->page_index * and locks the page to prevent removal from the page cache. */ static int find_and_lock_cache_page(struct nfs_readdir_descriptor *desc) { struct inode *inode = file_inode(desc->file); struct nfs_inode *nfsi = NFS_I(inode); __be32 verf[NFS_DIR_VERIFIER_SIZE]; int res; desc->folio = nfs_readdir_folio_get_cached(desc); if (!desc->folio) return -ENOMEM; if (nfs_readdir_folio_needs_filling(desc->folio)) { /* Grow the dtsize if we had to go back for more pages */ if (desc->folio_index == desc->folio_index_max) nfs_grow_dtsize(desc); desc->folio_index_max = desc->folio_index; trace_nfs_readdir_cache_fill(desc->file, nfsi->cookieverf, desc->last_cookie, desc->folio->index, desc->dtsize); res = nfs_readdir_xdr_to_array(desc, nfsi->cookieverf, verf, &desc->folio, 1); if (res < 0) { nfs_readdir_folio_unlock_and_put_cached(desc); trace_nfs_readdir_cache_fill_done(inode, res); if (res == -EBADCOOKIE || res == -ENOTSYNC) { invalidate_inode_pages2(desc->file->f_mapping); nfs_readdir_rewind_search(desc); trace_nfs_readdir_invalidate_cache_range( inode, 0, MAX_LFS_FILESIZE); return -EAGAIN; } return res; } /* * Set the cookie verifier if the page cache was empty */ if (desc->last_cookie == 0 && memcmp(nfsi->cookieverf, verf, sizeof(nfsi->cookieverf))) { memcpy(nfsi->cookieverf, verf, sizeof(nfsi->cookieverf)); invalidate_inode_pages2_range(desc->file->f_mapping, 1, -1); trace_nfs_readdir_invalidate_cache_range( inode, 1, MAX_LFS_FILESIZE); } desc->clear_cache = false; } res = nfs_readdir_search_array(desc); if (res == 0) return 0; nfs_readdir_folio_unlock_and_put_cached(desc); return res; } /* Search for desc->dir_cookie from the beginning of the page cache */ static int readdir_search_pagecache(struct nfs_readdir_descriptor *desc) { int res; do { res = find_and_lock_cache_page(desc); } while (res == -EAGAIN); return res; } #define NFS_READDIR_CACHE_MISS_THRESHOLD (16UL) /* * Once we've found the start of the dirent within a page: fill 'er up... */ static void nfs_do_filldir(struct nfs_readdir_descriptor *desc, const __be32 *verf) { struct file *file = desc->file; struct nfs_cache_array *array; unsigned int i; bool first_emit = !desc->dir_cookie; array = kmap_local_folio(desc->folio, 0); for (i = desc->cache_entry_index; i < array->size; i++) { struct nfs_cache_array_entry *ent; /* * nfs_readdir_handle_cache_misses return force clear at * (cache_misses > NFS_READDIR_CACHE_MISS_THRESHOLD) for * readdir heuristic, NFS_READDIR_CACHE_MISS_THRESHOLD + 1 * entries need be emitted here. */ if (first_emit && i > NFS_READDIR_CACHE_MISS_THRESHOLD + 2) { desc->eob = true; break; } ent = &array->array[i]; if (!dir_emit(desc->ctx, ent->name, ent->name_len, nfs_compat_user_ino64(ent->ino), ent->d_type)) { desc->eob = true; break; } memcpy(desc->verf, verf, sizeof(desc->verf)); if (i == array->size - 1) { desc->dir_cookie = array->last_cookie; nfs_readdir_seek_next_array(array, desc); } else { desc->dir_cookie = array->array[i + 1].cookie; desc->last_cookie = array->array[0].cookie; } if (nfs_readdir_use_cookie(file)) desc->ctx->pos = desc->dir_cookie; else desc->ctx->pos++; } if (array->folio_is_eof) desc->eof = !desc->eob; kunmap_local(array); dfprintk(DIRCACHE, "NFS: nfs_do_filldir() filling ended @ cookie %llu\n", (unsigned long long)desc->dir_cookie); } /* * If we cannot find a cookie in our cache, we suspect that this is * because it points to a deleted file, so we ask the server to return * whatever it thinks is the next entry. We then feed this to filldir. * If all goes well, we should then be able to find our way round the * cache on the next call to readdir_search_pagecache(); * * NOTE: we cannot add the anonymous page to the pagecache because * the data it contains might not be page aligned. Besides, * we should already have a complete representation of the * directory in the page cache by the time we get here. */ static int uncached_readdir(struct nfs_readdir_descriptor *desc) { struct folio **arrays; size_t i, sz = 512; __be32 verf[NFS_DIR_VERIFIER_SIZE]; int status = -ENOMEM; dfprintk(DIRCACHE, "NFS: uncached_readdir() searching for cookie %llu\n", (unsigned long long)desc->dir_cookie); arrays = kcalloc(sz, sizeof(*arrays), GFP_KERNEL); if (!arrays) goto out; arrays[0] = nfs_readdir_folio_array_alloc(desc->dir_cookie, GFP_KERNEL); if (!arrays[0]) goto out; desc->folio_index = 0; desc->cache_entry_index = 0; desc->last_cookie = desc->dir_cookie; desc->folio_index_max = 0; trace_nfs_readdir_uncached(desc->file, desc->verf, desc->last_cookie, -1, desc->dtsize); status = nfs_readdir_xdr_to_array(desc, desc->verf, verf, arrays, sz); if (status < 0) { trace_nfs_readdir_uncached_done(file_inode(desc->file), status); goto out_free; } for (i = 0; !desc->eob && i < sz && arrays[i]; i++) { desc->folio = arrays[i]; nfs_do_filldir(desc, verf); } desc->folio = NULL; /* * Grow the dtsize if we have to go back for more pages, * or shrink it if we're reading too many. */ if (!desc->eof) { if (!desc->eob) nfs_grow_dtsize(desc); else if (desc->buffer_fills == 1 && i < (desc->folio_index_max >> 1)) nfs_shrink_dtsize(desc); } out_free: for (i = 0; i < sz && arrays[i]; i++) nfs_readdir_folio_array_free(arrays[i]); out: if (!nfs_readdir_use_cookie(desc->file)) nfs_readdir_rewind_search(desc); desc->folio_index_max = -1; kfree(arrays); dfprintk(DIRCACHE, "NFS: %s: returns %d\n", __func__, status); return status; } static bool nfs_readdir_handle_cache_misses(struct inode *inode, struct nfs_readdir_descriptor *desc, unsigned int cache_misses, bool force_clear) { if (desc->ctx->pos == 0 || !desc->plus) return false; if (cache_misses <= NFS_READDIR_CACHE_MISS_THRESHOLD && !force_clear) return false; trace_nfs_readdir_force_readdirplus(inode); return true; } /* The file offset position represents the dirent entry number. A last cookie cache takes care of the common case of reading the whole directory. */ static int nfs_readdir(struct file *file, struct dir_context *ctx) { struct dentry *dentry = file_dentry(file); struct inode *inode = d_inode(dentry); struct nfs_inode *nfsi = NFS_I(inode); struct nfs_open_dir_context *dir_ctx = file->private_data; struct nfs_readdir_descriptor *desc; unsigned int cache_hits, cache_misses; bool force_clear; int res; dfprintk(FILE, "NFS: readdir(%pD2) starting at cookie %llu\n", file, (long long)ctx->pos); nfs_inc_stats(inode, NFSIOS_VFSGETDENTS); /* * ctx->pos points to the dirent entry number. * *desc->dir_cookie has the cookie for the next entry. We have * to either find the entry with the appropriate number or * revalidate the cookie. */ nfs_revalidate_mapping(inode, file->f_mapping); res = -ENOMEM; desc = kzalloc(sizeof(*desc), GFP_KERNEL); if (!desc) goto out; desc->file = file; desc->ctx = ctx; desc->folio_index_max = -1; spin_lock(&file->f_lock); desc->dir_cookie = dir_ctx->dir_cookie; desc->folio_index = dir_ctx->page_index; desc->last_cookie = dir_ctx->last_cookie; desc->attr_gencount = dir_ctx->attr_gencount; desc->eof = dir_ctx->eof; nfs_set_dtsize(desc, dir_ctx->dtsize); memcpy(desc->verf, dir_ctx->verf, sizeof(desc->verf)); cache_hits = atomic_xchg(&dir_ctx->cache_hits, 0); cache_misses = atomic_xchg(&dir_ctx->cache_misses, 0); force_clear = dir_ctx->force_clear; spin_unlock(&file->f_lock); if (desc->eof) { res = 0; goto out_free; } desc->plus = nfs_use_readdirplus(inode, ctx, cache_hits, cache_misses); force_clear = nfs_readdir_handle_cache_misses(inode, desc, cache_misses, force_clear); desc->clear_cache = force_clear; do { res = readdir_search_pagecache(desc); if (res == -EBADCOOKIE) { res = 0; /* This means either end of directory */ if (desc->dir_cookie && !desc->eof) { /* Or that the server has 'lost' a cookie */ res = uncached_readdir(desc); if (res == 0) continue; if (res == -EBADCOOKIE || res == -ENOTSYNC) res = 0; } break; } if (res == -ETOOSMALL && desc->plus) { nfs_zap_caches(inode); desc->plus = false; desc->eof = false; continue; } if (res < 0) break; nfs_do_filldir(desc, nfsi->cookieverf); nfs_readdir_folio_unlock_and_put_cached(desc); if (desc->folio_index == desc->folio_index_max) desc->clear_cache = force_clear; } while (!desc->eob && !desc->eof); spin_lock(&file->f_lock); dir_ctx->dir_cookie = desc->dir_cookie; dir_ctx->last_cookie = desc->last_cookie; dir_ctx->attr_gencount = desc->attr_gencount; dir_ctx->page_index = desc->folio_index; dir_ctx->force_clear = force_clear; dir_ctx->eof = desc->eof; dir_ctx->dtsize = desc->dtsize; memcpy(dir_ctx->verf, desc->verf, sizeof(dir_ctx->verf)); spin_unlock(&file->f_lock); out_free: kfree(desc); out: dfprintk(FILE, "NFS: readdir(%pD2) returns %d\n", file, res); return res; } static loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int whence) { struct nfs_open_dir_context *dir_ctx = filp->private_data; dfprintk(FILE, "NFS: llseek dir(%pD2, %lld, %d)\n", filp, offset, whence); switch (whence) { default: return -EINVAL; case SEEK_SET: if (offset < 0) return -EINVAL; spin_lock(&filp->f_lock); break; case SEEK_CUR: if (offset == 0) return filp->f_pos; spin_lock(&filp->f_lock); offset += filp->f_pos; if (offset < 0) { spin_unlock(&filp->f_lock); return -EINVAL; } } if (offset != filp->f_pos) { filp->f_pos = offset; dir_ctx->page_index = 0; if (!nfs_readdir_use_cookie(filp)) { dir_ctx->dir_cookie = 0; dir_ctx->last_cookie = 0; } else { dir_ctx->dir_cookie = offset; dir_ctx->last_cookie = offset; } dir_ctx->eof = false; } spin_unlock(&filp->f_lock); return offset; } /* * All directory operations under NFS are synchronous, so fsync() * is a dummy operation. */ static int nfs_fsync_dir(struct file *filp, loff_t start, loff_t end, int datasync) { dfprintk(FILE, "NFS: fsync dir(%pD2) datasync %d\n", filp, datasync); nfs_inc_stats(file_inode(filp), NFSIOS_VFSFSYNC); return 0; } /** * nfs_force_lookup_revalidate - Mark the directory as having changed * @dir: pointer to directory inode * * This forces the revalidation code in nfs_lookup_revalidate() to do a * full lookup on all child dentries of 'dir' whenever a change occurs * on the server that might have invalidated our dcache. * * Note that we reserve bit '0' as a tag to let us know when a dentry * was revalidated while holding a delegation on its inode. * * The caller should be holding dir->i_lock */ void nfs_force_lookup_revalidate(struct inode *dir) { NFS_I(dir)->cache_change_attribute += 2; } EXPORT_SYMBOL_GPL(nfs_force_lookup_revalidate); /** * nfs_verify_change_attribute - Detects NFS remote directory changes * @dir: pointer to parent directory inode * @verf: previously saved change attribute * * Return "false" if the verifiers doesn't match the change attribute. * This would usually indicate that the directory contents have changed on * the server, and that any dentries need revalidating. */ static bool nfs_verify_change_attribute(struct inode *dir, unsigned long verf) { return (verf & ~1UL) == nfs_save_change_attribute(dir); } static void nfs_set_verifier_delegated(unsigned long *verf) { *verf |= 1UL; } #if IS_ENABLED(CONFIG_NFS_V4) static void nfs_unset_verifier_delegated(unsigned long *verf) { *verf &= ~1UL; } #endif /* IS_ENABLED(CONFIG_NFS_V4) */ static bool nfs_test_verifier_delegated(unsigned long verf) { return verf & 1; } static bool nfs_verifier_is_delegated(struct dentry *dentry) { return nfs_test_verifier_delegated(dentry->d_time); } static void nfs_set_verifier_locked(struct dentry *dentry, unsigned long verf) { struct inode *inode = d_inode(dentry); struct inode *dir = d_inode_rcu(dentry->d_parent); if (!dir || !nfs_verify_change_attribute(dir, verf)) return; if (inode && NFS_PROTO(inode)->have_delegation(inode, FMODE_READ, 0)) nfs_set_verifier_delegated(&verf); dentry->d_time = verf; } /** * nfs_set_verifier - save a parent directory verifier in the dentry * @dentry: pointer to dentry * @verf: verifier to save * * Saves the parent directory verifier in @dentry. If the inode has * a delegation, we also tag the dentry as having been revalidated * while holding a delegation so that we know we don't have to * look it up again after a directory change. */ void nfs_set_verifier(struct dentry *dentry, unsigned long verf) { spin_lock(&dentry->d_lock); nfs_set_verifier_locked(dentry, verf); spin_unlock(&dentry->d_lock); } EXPORT_SYMBOL_GPL(nfs_set_verifier); #if IS_ENABLED(CONFIG_NFS_V4) /** * nfs_clear_verifier_delegated - clear the dir verifier delegation tag * @inode: pointer to inode * * Iterates through the dentries in the inode alias list and clears * the tag used to indicate that the dentry has been revalidated * while holding a delegation. * This function is intended for use when the delegation is being * returned or revoked. */ void nfs_clear_verifier_delegated(struct inode *inode) { struct dentry *alias; if (!inode) return; spin_lock(&inode->i_lock); hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) { spin_lock(&alias->d_lock); nfs_unset_verifier_delegated(&alias->d_time); spin_unlock(&alias->d_lock); } spin_unlock(&inode->i_lock); } EXPORT_SYMBOL_GPL(nfs_clear_verifier_delegated); #endif /* IS_ENABLED(CONFIG_NFS_V4) */ static int nfs_dentry_verify_change(struct inode *dir, struct dentry *dentry) { if (nfs_server_capable(dir, NFS_CAP_CASE_INSENSITIVE) && d_really_is_negative(dentry)) return dentry->d_time == inode_peek_iversion_raw(dir); return nfs_verify_change_attribute(dir, dentry->d_time); } /* * A check for whether or not the parent directory has changed. * In the case it has, we assume that the dentries are untrustworthy * and may need to be looked up again. * If rcu_walk prevents us from performing a full check, return 0. */ static int nfs_check_verifier(struct inode *dir, struct dentry *dentry, int rcu_walk) { if (IS_ROOT(dentry)) return 1; if (NFS_SERVER(dir)->flags & NFS_MOUNT_LOOKUP_CACHE_NONE) return 0; if (!nfs_dentry_verify_change(dir, dentry)) return 0; /* Revalidate nfsi->cache_change_attribute before we declare a match */ if (nfs_mapping_need_revalidate_inode(dir)) { if (rcu_walk) return 0; if (__nfs_revalidate_inode(NFS_SERVER(dir), dir) < 0) return 0; } if (!nfs_dentry_verify_change(dir, dentry)) return 0; return 1; } /* * Use intent information to check whether or not we're going to do * an O_EXCL create using this path component. */ static int nfs_is_exclusive_create(struct inode *dir, unsigned int flags) { if (NFS_PROTO(dir)->version == 2) return 0; return (flags & (LOOKUP_CREATE | LOOKUP_EXCL)) == (LOOKUP_CREATE | LOOKUP_EXCL); } /* * Inode and filehandle revalidation for lookups. * * We force revalidation in the cases where the VFS sets LOOKUP_REVAL, * or if the intent information indicates that we're about to open this * particular file and the "nocto" mount flag is not set. * */ static int nfs_lookup_verify_inode(struct inode *inode, unsigned int flags) { struct nfs_server *server = NFS_SERVER(inode); int ret; if (IS_AUTOMOUNT(inode)) return 0; if (flags & LOOKUP_OPEN) { switch (inode->i_mode & S_IFMT) { case S_IFREG: /* A NFSv4 OPEN will revalidate later */ if (server->caps & NFS_CAP_ATOMIC_OPEN) goto out; fallthrough; case S_IFDIR: if (server->flags & NFS_MOUNT_NOCTO) break; /* NFS close-to-open cache consistency validation */ goto out_force; } } /* VFS wants an on-the-wire revalidation */ if (flags & LOOKUP_REVAL) goto out_force; out: if (inode->i_nlink > 0 || (inode->i_nlink == 0 && test_bit(NFS_INO_PRESERVE_UNLINKED, &NFS_I(inode)->flags))) return 0; else return -ESTALE; out_force: if (flags & LOOKUP_RCU) return -ECHILD; ret = __nfs_revalidate_inode(server, inode); if (ret != 0) return ret; goto out; } static void nfs_mark_dir_for_revalidate(struct inode *inode) { spin_lock(&inode->i_lock); nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE); spin_unlock(&inode->i_lock); } /* * We judge how long we want to trust negative * dentries by looking at the parent inode mtime. * * If parent mtime has changed, we revalidate, else we wait for a * period corresponding to the parent's attribute cache timeout value. * * If LOOKUP_RCU prevents us from performing a full check, return 1 * suggesting a reval is needed. * * Note that when creating a new file, or looking up a rename target, * then it shouldn't be necessary to revalidate a negative dentry. */ static inline int nfs_neg_need_reval(struct inode *dir, struct dentry *dentry, unsigned int flags) { if (flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET)) return 0; if (NFS_SERVER(dir)->flags & NFS_MOUNT_LOOKUP_CACHE_NONEG) return 1; /* Case insensitive server? Revalidate negative dentries */ if (nfs_server_capable(dir, NFS_CAP_CASE_INSENSITIVE)) return 1; return !nfs_check_verifier(dir, dentry, flags & LOOKUP_RCU); } static int nfs_lookup_revalidate_done(struct inode *dir, struct dentry *dentry, struct inode *inode, int error) { switch (error) { case 1: break; case -ETIMEDOUT: if (inode && (IS_ROOT(dentry) || NFS_SERVER(inode)->flags & NFS_MOUNT_SOFTREVAL)) error = 1; break; case -ESTALE: case -ENOENT: error = 0; fallthrough; default: /* * We can't d_drop the root of a disconnected tree: * its d_hash is on the s_anon list and d_drop() would hide * it from shrink_dcache_for_unmount(), leading to busy * inodes on unmount and further oopses. */ if (inode && IS_ROOT(dentry)) error = 1; break; } trace_nfs_lookup_revalidate_exit(dir, dentry, 0, error); return error; } static int nfs_lookup_revalidate_negative(struct inode *dir, struct dentry *dentry, unsigned int flags) { int ret = 1; if (nfs_neg_need_reval(dir, dentry, flags)) { if (flags & LOOKUP_RCU) return -ECHILD; ret = 0; } return nfs_lookup_revalidate_done(dir, dentry, NULL, ret); } static int nfs_lookup_revalidate_delegated(struct inode *dir, struct dentry *dentry, struct inode *inode) { nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); return nfs_lookup_revalidate_done(dir, dentry, inode, 1); } static int nfs_lookup_revalidate_dentry(struct inode *dir, const struct qstr *name, struct dentry *dentry, struct inode *inode, unsigned int flags) { struct nfs_fh *fhandle; struct nfs_fattr *fattr; unsigned long dir_verifier; int ret; trace_nfs_lookup_revalidate_enter(dir, dentry, flags); ret = -ENOMEM; fhandle = nfs_alloc_fhandle(); fattr = nfs_alloc_fattr_with_label(NFS_SERVER(inode)); if (fhandle == NULL || fattr == NULL) goto out; dir_verifier = nfs_save_change_attribute(dir); ret = NFS_PROTO(dir)->lookup(dir, dentry, name, fhandle, fattr); if (ret < 0) goto out; /* Request help from readdirplus */ nfs_lookup_advise_force_readdirplus(dir, flags); ret = 0; if (nfs_compare_fh(NFS_FH(inode), fhandle)) goto out; if (nfs_refresh_inode(inode, fattr) < 0) goto out; nfs_setsecurity(inode, fattr); nfs_set_verifier(dentry, dir_verifier); ret = 1; out: nfs_free_fattr(fattr); nfs_free_fhandle(fhandle); /* * If the lookup failed despite the dentry change attribute being * a match, then we should revalidate the directory cache. */ if (!ret && nfs_dentry_verify_change(dir, dentry)) nfs_mark_dir_for_revalidate(dir); return nfs_lookup_revalidate_done(dir, dentry, inode, ret); } /* * This is called every time the dcache has a lookup hit, * and we should check whether we can really trust that * lookup. * * NOTE! The hit can be a negative hit too, don't assume * we have an inode! * * If the parent directory is seen to have changed, we throw out the * cached dentry and do a new lookup. */ static int nfs_do_lookup_revalidate(struct inode *dir, const struct qstr *name, struct dentry *dentry, unsigned int flags) { struct inode *inode; int error = 0; nfs_inc_stats(dir, NFSIOS_DENTRYREVALIDATE); inode = d_inode(dentry); if (!inode) return nfs_lookup_revalidate_negative(dir, dentry, flags); if (is_bad_inode(inode)) { dfprintk(LOOKUPCACHE, "%s: %pd2 has dud inode\n", __func__, dentry); goto out_bad; } if ((flags & LOOKUP_RENAME_TARGET) && d_count(dentry) < 2 && nfs_server_capable(dir, NFS_CAP_CASE_INSENSITIVE)) goto out_bad; if (nfs_verifier_is_delegated(dentry)) return nfs_lookup_revalidate_delegated(dir, dentry, inode); /* Force a full look up iff the parent directory has changed */ if (!(flags & (LOOKUP_EXCL | LOOKUP_REVAL)) && nfs_check_verifier(dir, dentry, flags & LOOKUP_RCU)) { error = nfs_lookup_verify_inode(inode, flags); if (error) { if (error == -ESTALE) nfs_mark_dir_for_revalidate(dir); goto out_bad; } goto out_valid; } if (flags & LOOKUP_RCU) return -ECHILD; if (NFS_STALE(inode)) goto out_bad; return nfs_lookup_revalidate_dentry(dir, name, dentry, inode, flags); out_valid: return nfs_lookup_revalidate_done(dir, dentry, inode, 1); out_bad: if (flags & LOOKUP_RCU) return -ECHILD; return nfs_lookup_revalidate_done(dir, dentry, inode, error); } static int __nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags) { if (flags & LOOKUP_RCU) { if (dentry->d_fsdata == NFS_FSDATA_BLOCKED) return -ECHILD; } else { /* Wait for unlink to complete - see unblock_revalidate() */ wait_var_event(&dentry->d_fsdata, smp_load_acquire(&dentry->d_fsdata) != NFS_FSDATA_BLOCKED); } return 0; } static int nfs_lookup_revalidate(struct inode *dir, const struct qstr *name, struct dentry *dentry, unsigned int flags) { if (__nfs_lookup_revalidate(dentry, flags)) return -ECHILD; return nfs_do_lookup_revalidate(dir, name, dentry, flags); } static void block_revalidate(struct dentry *dentry) { /* old devname - just in case */ kfree(dentry->d_fsdata); /* Any new reference that could lead to an open * will take ->d_lock in lookup_open() -> d_lookup(). * Holding this lock ensures we cannot race with * __nfs_lookup_revalidate() and removes and need * for further barriers. */ lockdep_assert_held(&dentry->d_lock); dentry->d_fsdata = NFS_FSDATA_BLOCKED; } static void unblock_revalidate(struct dentry *dentry) { /* store_release ensures wait_var_event() sees the update */ smp_store_release(&dentry->d_fsdata, NULL); wake_up_var(&dentry->d_fsdata); } /* * A weaker form of d_revalidate for revalidating just the d_inode(dentry) * when we don't really care about the dentry name. This is called when a * pathwalk ends on a dentry that was not found via a normal lookup in the * parent dir (e.g.: ".", "..", procfs symlinks or mountpoint traversals). * * In this situation, we just want to verify that the inode itself is OK * since the dentry might have changed on the server. */ static int nfs_weak_revalidate(struct dentry *dentry, unsigned int flags) { struct inode *inode = d_inode(dentry); int error = 0; /* * I believe we can only get a negative dentry here in the case of a * procfs-style symlink. Just assume it's correct for now, but we may * eventually need to do something more here. */ if (!inode) { dfprintk(LOOKUPCACHE, "%s: %pd2 has negative inode\n", __func__, dentry); return 1; } if (is_bad_inode(inode)) { dfprintk(LOOKUPCACHE, "%s: %pd2 has dud inode\n", __func__, dentry); return 0; } error = nfs_lookup_verify_inode(inode, flags); dfprintk(LOOKUPCACHE, "NFS: %s: inode %lu is %s\n", __func__, inode->i_ino, error ? "invalid" : "valid"); return !error; } /* * This is called from dput() when d_count is going to 0. */ static int nfs_dentry_delete(const struct dentry *dentry) { dfprintk(VFS, "NFS: dentry_delete(%pd2, %x)\n", dentry, dentry->d_flags); /* Unhash any dentry with a stale inode */ if (d_really_is_positive(dentry) && NFS_STALE(d_inode(dentry))) return 1; if (dentry->d_flags & DCACHE_NFSFS_RENAMED) { /* Unhash it, so that ->d_iput() would be called */ return 1; } if (!(dentry->d_sb->s_flags & SB_ACTIVE)) { /* Unhash it, so that ancestors of killed async unlink * files will be cleaned up during umount */ return 1; } return 0; } /* Ensure that we revalidate inode->i_nlink */ static void nfs_drop_nlink(struct inode *inode) { spin_lock(&inode->i_lock); /* drop the inode if we're reasonably sure this is the last link */ if (inode->i_nlink > 0) drop_nlink(inode); NFS_I(inode)->attr_gencount = nfs_inc_attr_generation_counter(); nfs_set_cache_invalid( inode, NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_CTIME | NFS_INO_INVALID_NLINK); spin_unlock(&inode->i_lock); } /* * Called when the dentry loses inode. * We use it to clean up silly-renamed files. */ static void nfs_dentry_iput(struct dentry *dentry, struct inode *inode) { if (dentry->d_flags & DCACHE_NFSFS_RENAMED) { nfs_complete_unlink(dentry, inode); nfs_drop_nlink(inode); } iput(inode); } static void nfs_d_release(struct dentry *dentry) { /* free cached devname value, if it survived that far */ if (unlikely(dentry->d_fsdata)) { if (dentry->d_flags & DCACHE_NFSFS_RENAMED) WARN_ON(1); else kfree(dentry->d_fsdata); } } const struct dentry_operations nfs_dentry_operations = { .d_revalidate = nfs_lookup_revalidate, .d_weak_revalidate = nfs_weak_revalidate, .d_delete = nfs_dentry_delete, .d_iput = nfs_dentry_iput, .d_automount = nfs_d_automount, .d_release = nfs_d_release, }; EXPORT_SYMBOL_GPL(nfs_dentry_operations); struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, unsigned int flags) { struct dentry *res; struct inode *inode = NULL; struct nfs_fh *fhandle = NULL; struct nfs_fattr *fattr = NULL; unsigned long dir_verifier; int error; dfprintk(VFS, "NFS: lookup(%pd2)\n", dentry); nfs_inc_stats(dir, NFSIOS_VFSLOOKUP); if (unlikely(dentry->d_name.len > NFS_SERVER(dir)->namelen)) return ERR_PTR(-ENAMETOOLONG); /* * If we're doing an exclusive create, optimize away the lookup * but don't hash the dentry. */ if (nfs_is_exclusive_create(dir, flags) || flags & LOOKUP_RENAME_TARGET) return NULL; res = ERR_PTR(-ENOMEM); fhandle = nfs_alloc_fhandle(); fattr = nfs_alloc_fattr_with_label(NFS_SERVER(dir)); if (fhandle == NULL || fattr == NULL) goto out; dir_verifier = nfs_save_change_attribute(dir); trace_nfs_lookup_enter(dir, dentry, flags); error = NFS_PROTO(dir)->lookup(dir, dentry, &dentry->d_name, fhandle, fattr); if (error == -ENOENT) { if (nfs_server_capable(dir, NFS_CAP_CASE_INSENSITIVE)) dir_verifier = inode_peek_iversion_raw(dir); goto no_entry; } if (error < 0) { res = ERR_PTR(error); goto out; } inode = nfs_fhget(dentry->d_sb, fhandle, fattr); res = ERR_CAST(inode); if (IS_ERR(res)) goto out; /* Notify readdir to use READDIRPLUS */ nfs_lookup_advise_force_readdirplus(dir, flags); no_entry: res = d_splice_alias(inode, dentry); if (res != NULL) { if (IS_ERR(res)) goto out; dentry = res; } nfs_set_verifier(dentry, dir_verifier); out: trace_nfs_lookup_exit(dir, dentry, flags, PTR_ERR_OR_ZERO(res)); nfs_free_fattr(fattr); nfs_free_fhandle(fhandle); return res; } EXPORT_SYMBOL_GPL(nfs_lookup); void nfs_d_prune_case_insensitive_aliases(struct inode *inode) { /* Case insensitive server? Revalidate dentries */ if (inode && nfs_server_capable(inode, NFS_CAP_CASE_INSENSITIVE)) d_prune_aliases(inode); } EXPORT_SYMBOL_GPL(nfs_d_prune_case_insensitive_aliases); #if IS_ENABLED(CONFIG_NFS_V4) static int nfs4_lookup_revalidate(struct inode *, const struct qstr *, struct dentry *, unsigned int); const struct dentry_operations nfs4_dentry_operations = { .d_revalidate = nfs4_lookup_revalidate, .d_weak_revalidate = nfs_weak_revalidate, .d_delete = nfs_dentry_delete, .d_iput = nfs_dentry_iput, .d_automount = nfs_d_automount, .d_release = nfs_d_release, }; EXPORT_SYMBOL_GPL(nfs4_dentry_operations); static struct nfs_open_context *create_nfs_open_context(struct dentry *dentry, int open_flags, struct file *filp) { return alloc_nfs_open_context(dentry, flags_to_mode(open_flags), filp); } static int do_open(struct inode *inode, struct file *filp) { nfs_fscache_open_file(inode, filp); return 0; } static int nfs_finish_open(struct nfs_open_context *ctx, struct dentry *dentry, struct file *file, unsigned open_flags) { int err; err = finish_open(file, dentry, do_open); if (err) goto out; if (S_ISREG(file_inode(file)->i_mode)) nfs_file_set_open_context(file, ctx); else err = -EOPENSTALE; out: return err; } int nfs_atomic_open(struct inode *dir, struct dentry *dentry, struct file *file, unsigned open_flags, umode_t mode) { DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); struct nfs_open_context *ctx; struct dentry *res; struct iattr attr = { .ia_valid = ATTR_OPEN }; struct inode *inode; unsigned int lookup_flags = 0; unsigned long dir_verifier; bool switched = false; int created = 0; int err; /* Expect a negative dentry */ BUG_ON(d_inode(dentry)); dfprintk(VFS, "NFS: atomic_open(%s/%lu), %pd\n", dir->i_sb->s_id, dir->i_ino, dentry); err = nfs_check_flags(open_flags); if (err) return err; /* NFS only supports OPEN on regular files */ if ((open_flags & O_DIRECTORY)) { if (!d_in_lookup(dentry)) { /* * Hashed negative dentry with O_DIRECTORY: dentry was * revalidated and is fine, no need to perform lookup * again */ return -ENOENT; } lookup_flags = LOOKUP_OPEN|LOOKUP_DIRECTORY; goto no_open; } if (dentry->d_name.len > NFS_SERVER(dir)->namelen) return -ENAMETOOLONG; if (open_flags & O_CREAT) { struct nfs_server *server = NFS_SERVER(dir); if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK)) mode &= ~current_umask(); attr.ia_valid |= ATTR_MODE; attr.ia_mode = mode; } if (open_flags & O_TRUNC) { attr.ia_valid |= ATTR_SIZE; attr.ia_size = 0; } if (!(open_flags & O_CREAT) && !d_in_lookup(dentry)) { d_drop(dentry); switched = true; dentry = d_alloc_parallel(dentry->d_parent, &dentry->d_name, &wq); if (IS_ERR(dentry)) return PTR_ERR(dentry); if (unlikely(!d_in_lookup(dentry))) return finish_no_open(file, dentry); } ctx = create_nfs_open_context(dentry, open_flags, file); err = PTR_ERR(ctx); if (IS_ERR(ctx)) goto out; trace_nfs_atomic_open_enter(dir, ctx, open_flags); inode = NFS_PROTO(dir)->open_context(dir, ctx, open_flags, &attr, &created); if (created) file->f_mode |= FMODE_CREATED; if (IS_ERR(inode)) { err = PTR_ERR(inode); trace_nfs_atomic_open_exit(dir, ctx, open_flags, err); put_nfs_open_context(ctx); d_drop(dentry); switch (err) { case -ENOENT: d_splice_alias(NULL, dentry); if (nfs_server_capable(dir, NFS_CAP_CASE_INSENSITIVE)) dir_verifier = inode_peek_iversion_raw(dir); else dir_verifier = nfs_save_change_attribute(dir); nfs_set_verifier(dentry, dir_verifier); break; case -EISDIR: case -ENOTDIR: goto no_open; case -ELOOP: if (!(open_flags & O_NOFOLLOW)) goto no_open; break; /* case -EINVAL: */ default: break; } goto out; } file->f_mode |= FMODE_CAN_ODIRECT; err = nfs_finish_open(ctx, ctx->dentry, file, open_flags); trace_nfs_atomic_open_exit(dir, ctx, open_flags, err); put_nfs_open_context(ctx); out: if (unlikely(switched)) { d_lookup_done(dentry); dput(dentry); } return err; no_open: res = nfs_lookup(dir, dentry, lookup_flags); if (!res) { inode = d_inode(dentry); if ((lookup_flags & LOOKUP_DIRECTORY) && inode && !(S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) res = ERR_PTR(-ENOTDIR); else if (inode && S_ISREG(inode->i_mode)) res = ERR_PTR(-EOPENSTALE); } else if (!IS_ERR(res)) { inode = d_inode(res); if ((lookup_flags & LOOKUP_DIRECTORY) && inode && !(S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) { dput(res); res = ERR_PTR(-ENOTDIR); } else if (inode && S_ISREG(inode->i_mode)) { dput(res); res = ERR_PTR(-EOPENSTALE); } } if (switched) { d_lookup_done(dentry); if (!res) res = dentry; else dput(dentry); } if (IS_ERR(res)) return PTR_ERR(res); return finish_no_open(file, res); } EXPORT_SYMBOL_GPL(nfs_atomic_open); static int nfs4_lookup_revalidate(struct inode *dir, const struct qstr *name, struct dentry *dentry, unsigned int flags) { struct inode *inode; if (__nfs_lookup_revalidate(dentry, flags)) return -ECHILD; trace_nfs_lookup_revalidate_enter(dir, dentry, flags); if (!(flags & LOOKUP_OPEN) || (flags & LOOKUP_DIRECTORY)) goto full_reval; if (d_mountpoint(dentry)) goto full_reval; inode = d_inode(dentry); /* We can't create new files in nfs_open_revalidate(), so we * optimize away revalidation of negative dentries. */ if (inode == NULL) goto full_reval; if (nfs_verifier_is_delegated(dentry)) return nfs_lookup_revalidate_delegated(dir, dentry, inode); /* NFS only supports OPEN on regular files */ if (!S_ISREG(inode->i_mode)) goto full_reval; /* We cannot do exclusive creation on a positive dentry */ if (flags & (LOOKUP_EXCL | LOOKUP_REVAL)) goto reval_dentry; /* Check if the directory changed */ if (!nfs_check_verifier(dir, dentry, flags & LOOKUP_RCU)) goto reval_dentry; /* Let f_op->open() actually open (and revalidate) the file */ return 1; reval_dentry: if (flags & LOOKUP_RCU) return -ECHILD; return nfs_lookup_revalidate_dentry(dir, name, dentry, inode, flags); full_reval: return nfs_do_lookup_revalidate(dir, name, dentry, flags); } #endif /* CONFIG_NFSV4 */ int nfs_atomic_open_v23(struct inode *dir, struct dentry *dentry, struct file *file, unsigned int open_flags, umode_t mode) { /* Same as look+open from lookup_open(), but with different O_TRUNC * handling. */ int error = 0; if (dentry->d_name.len > NFS_SERVER(dir)->namelen) return -ENAMETOOLONG; if (open_flags & O_CREAT) { file->f_mode |= FMODE_CREATED; error = nfs_do_create(dir, dentry, mode, open_flags); if (error) return error; return finish_open(file, dentry, NULL); } else if (d_in_lookup(dentry)) { /* The only flags nfs_lookup considers are * LOOKUP_EXCL and LOOKUP_RENAME_TARGET, and * we want those to be zero so the lookup isn't skipped. */ struct dentry *res = nfs_lookup(dir, dentry, 0); d_lookup_done(dentry); if (unlikely(res)) { if (IS_ERR(res)) return PTR_ERR(res); return finish_no_open(file, res); } } return finish_no_open(file, NULL); } EXPORT_SYMBOL_GPL(nfs_atomic_open_v23); struct dentry * nfs_add_or_obtain(struct dentry *dentry, struct nfs_fh *fhandle, struct nfs_fattr *fattr) { struct dentry *parent = dget_parent(dentry); struct inode *dir = d_inode(parent); struct inode *inode; struct dentry *d; int error; d_drop(dentry); if (fhandle->size == 0) { error = NFS_PROTO(dir)->lookup(dir, dentry, &dentry->d_name, fhandle, fattr); if (error) goto out_error; } nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); if (!(fattr->valid & NFS_ATTR_FATTR)) { struct nfs_server *server = NFS_SB(dentry->d_sb); error = server->nfs_client->rpc_ops->getattr(server, fhandle, fattr, NULL); if (error < 0) goto out_error; } inode = nfs_fhget(dentry->d_sb, fhandle, fattr); d = d_splice_alias(inode, dentry); out: dput(parent); return d; out_error: d = ERR_PTR(error); goto out; } EXPORT_SYMBOL_GPL(nfs_add_or_obtain); /* * Code common to create, mkdir, and mknod. */ int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fhandle, struct nfs_fattr *fattr) { struct dentry *d; d = nfs_add_or_obtain(dentry, fhandle, fattr); if (IS_ERR(d)) return PTR_ERR(d); /* Callers don't care */ dput(d); return 0; } EXPORT_SYMBOL_GPL(nfs_instantiate); /* * Following a failed create operation, we drop the dentry rather * than retain a negative dentry. This avoids a problem in the event * that the operation succeeded on the server, but an error in the * reply path made it appear to have failed. */ static int nfs_do_create(struct inode *dir, struct dentry *dentry, umode_t mode, int open_flags) { struct iattr attr; int error; open_flags |= O_CREAT; dfprintk(VFS, "NFS: create(%s/%lu), %pd\n", dir->i_sb->s_id, dir->i_ino, dentry); attr.ia_mode = mode; attr.ia_valid = ATTR_MODE; if (open_flags & O_TRUNC) { attr.ia_size = 0; attr.ia_valid |= ATTR_SIZE; } trace_nfs_create_enter(dir, dentry, open_flags); error = NFS_PROTO(dir)->create(dir, dentry, &attr, open_flags); trace_nfs_create_exit(dir, dentry, open_flags, error); if (error != 0) goto out_err; return 0; out_err: d_drop(dentry); return error; } int nfs_create(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) { return nfs_do_create(dir, dentry, mode, excl ? O_EXCL : 0); } EXPORT_SYMBOL_GPL(nfs_create); /* * See comments for nfs_proc_create regarding failed operations. */ int nfs_mknod(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev) { struct iattr attr; int status; dfprintk(VFS, "NFS: mknod(%s/%lu), %pd\n", dir->i_sb->s_id, dir->i_ino, dentry); attr.ia_mode = mode; attr.ia_valid = ATTR_MODE; trace_nfs_mknod_enter(dir, dentry); status = NFS_PROTO(dir)->mknod(dir, dentry, &attr, rdev); trace_nfs_mknod_exit(dir, dentry, status); if (status != 0) goto out_err; return 0; out_err: d_drop(dentry); return status; } EXPORT_SYMBOL_GPL(nfs_mknod); /* * See comments for nfs_proc_create regarding failed operations. */ struct dentry *nfs_mkdir(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode) { struct iattr attr; struct dentry *ret; dfprintk(VFS, "NFS: mkdir(%s/%lu), %pd\n", dir->i_sb->s_id, dir->i_ino, dentry); attr.ia_valid = ATTR_MODE; attr.ia_mode = mode | S_IFDIR; trace_nfs_mkdir_enter(dir, dentry); ret = NFS_PROTO(dir)->mkdir(dir, dentry, &attr); trace_nfs_mkdir_exit(dir, dentry, PTR_ERR_OR_ZERO(ret)); return ret; } EXPORT_SYMBOL_GPL(nfs_mkdir); static void nfs_dentry_handle_enoent(struct dentry *dentry) { if (simple_positive(dentry)) d_delete(dentry); } static void nfs_dentry_remove_handle_error(struct inode *dir, struct dentry *dentry, int error) { switch (error) { case -ENOENT: if (d_really_is_positive(dentry)) d_delete(dentry); nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); break; case 0: nfs_d_prune_case_insensitive_aliases(d_inode(dentry)); nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); } } int nfs_rmdir(struct inode *dir, struct dentry *dentry) { int error; dfprintk(VFS, "NFS: rmdir(%s/%lu), %pd\n", dir->i_sb->s_id, dir->i_ino, dentry); trace_nfs_rmdir_enter(dir, dentry); if (d_really_is_positive(dentry)) { down_write(&NFS_I(d_inode(dentry))->rmdir_sem); error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name); /* Ensure the VFS deletes this inode */ switch (error) { case 0: clear_nlink(d_inode(dentry)); break; case -ENOENT: nfs_dentry_handle_enoent(dentry); } up_write(&NFS_I(d_inode(dentry))->rmdir_sem); } else error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name); nfs_dentry_remove_handle_error(dir, dentry, error); trace_nfs_rmdir_exit(dir, dentry, error); return error; } EXPORT_SYMBOL_GPL(nfs_rmdir); /* * Remove a file after making sure there are no pending writes, * and after checking that the file has only one user. * * We invalidate the attribute cache and free the inode prior to the operation * to avoid possible races if the server reuses the inode. */ static int nfs_safe_remove(struct dentry *dentry) { struct inode *dir = d_inode(dentry->d_parent); struct inode *inode = d_inode(dentry); int error = -EBUSY; dfprintk(VFS, "NFS: safe_remove(%pd2)\n", dentry); /* If the dentry was sillyrenamed, we simply call d_delete() */ if (dentry->d_flags & DCACHE_NFSFS_RENAMED) { error = 0; goto out; } trace_nfs_remove_enter(dir, dentry); if (inode != NULL) { error = NFS_PROTO(dir)->remove(dir, dentry); if (error == 0) nfs_drop_nlink(inode); } else error = NFS_PROTO(dir)->remove(dir, dentry); if (error == -ENOENT) nfs_dentry_handle_enoent(dentry); trace_nfs_remove_exit(dir, dentry, error); out: return error; } /* We do silly rename. In case sillyrename() returns -EBUSY, the inode * belongs to an active ".nfs..." file and we return -EBUSY. * * If sillyrename() returns 0, we do nothing, otherwise we unlink. */ int nfs_unlink(struct inode *dir, struct dentry *dentry) { int error; dfprintk(VFS, "NFS: unlink(%s/%lu, %pd)\n", dir->i_sb->s_id, dir->i_ino, dentry); trace_nfs_unlink_enter(dir, dentry); spin_lock(&dentry->d_lock); if (d_count(dentry) > 1 && !test_bit(NFS_INO_PRESERVE_UNLINKED, &NFS_I(d_inode(dentry))->flags)) { spin_unlock(&dentry->d_lock); /* Start asynchronous writeout of the inode */ write_inode_now(d_inode(dentry), 0); error = nfs_sillyrename(dir, dentry); goto out; } /* We must prevent any concurrent open until the unlink * completes. ->d_revalidate will wait for ->d_fsdata * to clear. We set it here to ensure no lookup succeeds until * the unlink is complete on the server. */ error = -ETXTBSY; if (WARN_ON(dentry->d_flags & DCACHE_NFSFS_RENAMED) || WARN_ON(dentry->d_fsdata == NFS_FSDATA_BLOCKED)) { spin_unlock(&dentry->d_lock); goto out; } block_revalidate(dentry); spin_unlock(&dentry->d_lock); error = nfs_safe_remove(dentry); nfs_dentry_remove_handle_error(dir, dentry, error); unblock_revalidate(dentry); out: trace_nfs_unlink_exit(dir, dentry, error); return error; } EXPORT_SYMBOL_GPL(nfs_unlink); /* * To create a symbolic link, most file systems instantiate a new inode, * add a page to it containing the path, then write it out to the disk * using prepare_write/commit_write. * * Unfortunately the NFS client can't create the in-core inode first * because it needs a file handle to create an in-core inode (see * fs/nfs/inode.c:nfs_fhget). We only have a file handle *after* the * symlink request has completed on the server. * * So instead we allocate a raw page, copy the symname into it, then do * the SYMLINK request with the page as the buffer. If it succeeds, we * now have a new file handle and can instantiate an in-core NFS inode * and move the raw page into its mapping. */ int nfs_symlink(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, const char *symname) { struct folio *folio; char *kaddr; struct iattr attr; unsigned int pathlen = strlen(symname); int error; dfprintk(VFS, "NFS: symlink(%s/%lu, %pd, %s)\n", dir->i_sb->s_id, dir->i_ino, dentry, symname); if (pathlen > PAGE_SIZE) return -ENAMETOOLONG; attr.ia_mode = S_IFLNK | S_IRWXUGO; attr.ia_valid = ATTR_MODE; folio = folio_alloc(GFP_USER, 0); if (!folio) return -ENOMEM; kaddr = folio_address(folio); memcpy(kaddr, symname, pathlen); if (pathlen < PAGE_SIZE) memset(kaddr + pathlen, 0, PAGE_SIZE - pathlen); trace_nfs_symlink_enter(dir, dentry); error = NFS_PROTO(dir)->symlink(dir, dentry, folio, pathlen, &attr); trace_nfs_symlink_exit(dir, dentry, error); if (error != 0) { dfprintk(VFS, "NFS: symlink(%s/%lu, %pd, %s) error %d\n", dir->i_sb->s_id, dir->i_ino, dentry, symname, error); d_drop(dentry); folio_put(folio); return error; } nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); /* * No big deal if we can't add this page to the page cache here. * READLINK will get the missing page from the server if needed. */ if (filemap_add_folio(d_inode(dentry)->i_mapping, folio, 0, GFP_KERNEL) == 0) { folio_mark_uptodate(folio); folio_unlock(folio); } folio_put(folio); return 0; } EXPORT_SYMBOL_GPL(nfs_symlink); int nfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { struct inode *inode = d_inode(old_dentry); int error; dfprintk(VFS, "NFS: link(%pd2 -> %pd2)\n", old_dentry, dentry); trace_nfs_link_enter(inode, dir, dentry); d_drop(dentry); if (S_ISREG(inode->i_mode)) nfs_sync_inode(inode); error = NFS_PROTO(dir)->link(inode, dir, &dentry->d_name); if (error == 0) { nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); ihold(inode); d_add(dentry, inode); } trace_nfs_link_exit(inode, dir, dentry, error); return error; } EXPORT_SYMBOL_GPL(nfs_link); static void nfs_unblock_rename(struct rpc_task *task, struct nfs_renamedata *data) { struct dentry *new_dentry = data->new_dentry; unblock_revalidate(new_dentry); } static bool nfs_rename_is_unsafe_cross_dir(struct dentry *old_dentry, struct dentry *new_dentry) { struct nfs_server *server = NFS_SB(old_dentry->d_sb); if (old_dentry->d_parent != new_dentry->d_parent) return false; if (server->fh_expire_type & NFS_FH_RENAME_UNSAFE) return !(server->fh_expire_type & NFS_FH_NOEXPIRE_WITH_OPEN); return true; } /* * RENAME * FIXME: Some nfsds, like the Linux user space nfsd, may generate a * different file handle for the same inode after a rename (e.g. when * moving to a different directory). A fail-safe method to do so would * be to look up old_dir/old_name, create a link to new_dir/new_name and * rename the old file using the sillyrename stuff. This way, the original * file in old_dir will go away when the last process iput()s the inode. * * FIXED. * * It actually works quite well. One needs to have the possibility for * at least one ".nfs..." file in each directory the file ever gets * moved or linked to which happens automagically with the new * implementation that only depends on the dcache stuff instead of * using the inode layer * * Unfortunately, things are a little more complicated than indicated * above. For a cross-directory move, we want to make sure we can get * rid of the old inode after the operation. This means there must be * no pending writes (if it's a file), and the use count must be 1. * If these conditions are met, we can drop the dentries before doing * the rename. */ int nfs_rename(struct mnt_idmap *idmap, struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) { struct inode *old_inode = d_inode(old_dentry); struct inode *new_inode = d_inode(new_dentry); struct dentry *dentry = NULL; struct rpc_task *task; bool must_unblock = false; int error = -EBUSY; if (flags) return -EINVAL; dfprintk(VFS, "NFS: rename(%pd2 -> %pd2, ct=%d)\n", old_dentry, new_dentry, d_count(new_dentry)); trace_nfs_rename_enter(old_dir, old_dentry, new_dir, new_dentry); /* * For non-directories, check whether the target is busy and if so, * make a copy of the dentry and then do a silly-rename. If the * silly-rename succeeds, the copied dentry is hashed and becomes * the new target. */ if (new_inode && !S_ISDIR(new_inode->i_mode)) { /* We must prevent any concurrent open until the unlink * completes. ->d_revalidate will wait for ->d_fsdata * to clear. We set it here to ensure no lookup succeeds until * the unlink is complete on the server. */ error = -ETXTBSY; if (WARN_ON(new_dentry->d_flags & DCACHE_NFSFS_RENAMED) || WARN_ON(new_dentry->d_fsdata == NFS_FSDATA_BLOCKED)) goto out; spin_lock(&new_dentry->d_lock); if (d_count(new_dentry) > 2) { int err; spin_unlock(&new_dentry->d_lock); /* copy the target dentry's name */ dentry = d_alloc(new_dentry->d_parent, &new_dentry->d_name); if (!dentry) goto out; /* silly-rename the existing target ... */ err = nfs_sillyrename(new_dir, new_dentry); if (err) goto out; new_dentry = dentry; new_inode = NULL; } else { block_revalidate(new_dentry); must_unblock = true; spin_unlock(&new_dentry->d_lock); } } if (S_ISREG(old_inode->i_mode) && nfs_rename_is_unsafe_cross_dir(old_dentry, new_dentry)) nfs_sync_inode(old_inode); task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry, must_unblock ? nfs_unblock_rename : NULL); if (IS_ERR(task)) { if (must_unblock) unblock_revalidate(new_dentry); error = PTR_ERR(task); goto out; } error = rpc_wait_for_completion_task(task); if (error != 0) { ((struct nfs_renamedata *)task->tk_calldata)->cancelled = 1; /* Paired with the atomic_dec_and_test() barrier in rpc_do_put_task() */ smp_wmb(); } else error = task->tk_status; rpc_put_task(task); /* Ensure the inode attributes are revalidated */ if (error == 0) { spin_lock(&old_inode->i_lock); NFS_I(old_inode)->attr_gencount = nfs_inc_attr_generation_counter(); nfs_set_cache_invalid(old_inode, NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_CTIME | NFS_INO_REVAL_FORCED); spin_unlock(&old_inode->i_lock); } out: trace_nfs_rename_exit(old_dir, old_dentry, new_dir, new_dentry, error); if (!error) { if (new_inode != NULL) nfs_drop_nlink(new_inode); /* * The d_move() should be here instead of in an async RPC completion * handler because we need the proper locks to move the dentry. If * we're interrupted by a signal, the async RPC completion handler * should mark the directories for revalidation. */ d_move(old_dentry, new_dentry); nfs_set_verifier(old_dentry, nfs_save_change_attribute(new_dir)); } else if (error == -ENOENT) nfs_dentry_handle_enoent(old_dentry); /* new dentry created? */ if (dentry) dput(dentry); return error; } EXPORT_SYMBOL_GPL(nfs_rename); static DEFINE_SPINLOCK(nfs_access_lru_lock); static LIST_HEAD(nfs_access_lru_list); static atomic_long_t nfs_access_nr_entries; static unsigned long nfs_access_max_cachesize = 4*1024*1024; module_param(nfs_access_max_cachesize, ulong, 0644); MODULE_PARM_DESC(nfs_access_max_cachesize, "NFS access maximum total cache length"); static void nfs_access_free_entry(struct nfs_access_entry *entry) { put_group_info(entry->group_info); kfree_rcu(entry, rcu_head); smp_mb__before_atomic(); atomic_long_dec(&nfs_access_nr_entries); smp_mb__after_atomic(); } static void nfs_access_free_list(struct list_head *head) { struct nfs_access_entry *cache; while (!list_empty(head)) { cache = list_entry(head->next, struct nfs_access_entry, lru); list_del(&cache->lru); nfs_access_free_entry(cache); } } static unsigned long nfs_do_access_cache_scan(unsigned int nr_to_scan) { LIST_HEAD(head); struct nfs_inode *nfsi, *next; struct nfs_access_entry *cache; long freed = 0; spin_lock(&nfs_access_lru_lock); list_for_each_entry_safe(nfsi, next, &nfs_access_lru_list, access_cache_inode_lru) { struct inode *inode; if (nr_to_scan-- == 0) break; inode = &nfsi->vfs_inode; spin_lock(&inode->i_lock); if (list_empty(&nfsi->access_cache_entry_lru)) goto remove_lru_entry; cache = list_entry(nfsi->access_cache_entry_lru.next, struct nfs_access_entry, lru); list_move(&cache->lru, &head); rb_erase(&cache->rb_node, &nfsi->access_cache); freed++; if (!list_empty(&nfsi->access_cache_entry_lru)) list_move_tail(&nfsi->access_cache_inode_lru, &nfs_access_lru_list); else { remove_lru_entry: list_del_init(&nfsi->access_cache_inode_lru); smp_mb__before_atomic(); clear_bit(NFS_INO_ACL_LRU_SET, &nfsi->flags); smp_mb__after_atomic(); } spin_unlock(&inode->i_lock); } spin_unlock(&nfs_access_lru_lock); nfs_access_free_list(&head); return freed; } unsigned long nfs_access_cache_scan(struct shrinker *shrink, struct shrink_control *sc) { int nr_to_scan = sc->nr_to_scan; gfp_t gfp_mask = sc->gfp_mask; if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL) return SHRINK_STOP; return nfs_do_access_cache_scan(nr_to_scan); } unsigned long nfs_access_cache_count(struct shrinker *shrink, struct shrink_control *sc) { return vfs_pressure_ratio(atomic_long_read(&nfs_access_nr_entries)); } static void nfs_access_cache_enforce_limit(void) { long nr_entries = atomic_long_read(&nfs_access_nr_entries); unsigned long diff; unsigned int nr_to_scan; if (nr_entries < 0 || nr_entries <= nfs_access_max_cachesize) return; nr_to_scan = 100; diff = nr_entries - nfs_access_max_cachesize; if (diff < nr_to_scan) nr_to_scan = diff; nfs_do_access_cache_scan(nr_to_scan); } static void __nfs_access_zap_cache(struct nfs_inode *nfsi, struct list_head *head) { struct rb_root *root_node = &nfsi->access_cache; struct rb_node *n; struct nfs_access_entry *entry; /* Unhook entries from the cache */ while ((n = rb_first(root_node)) != NULL) { entry = rb_entry(n, struct nfs_access_entry, rb_node); rb_erase(n, root_node); list_move(&entry->lru, head); } nfsi->cache_validity &= ~NFS_INO_INVALID_ACCESS; } void nfs_access_zap_cache(struct inode *inode) { LIST_HEAD(head); if (test_bit(NFS_INO_ACL_LRU_SET, &NFS_I(inode)->flags) == 0) return; /* Remove from global LRU init */ spin_lock(&nfs_access_lru_lock); if (test_and_clear_bit(NFS_INO_ACL_LRU_SET, &NFS_I(inode)->flags)) list_del_init(&NFS_I(inode)->access_cache_inode_lru); spin_lock(&inode->i_lock); __nfs_access_zap_cache(NFS_I(inode), &head); spin_unlock(&inode->i_lock); spin_unlock(&nfs_access_lru_lock); nfs_access_free_list(&head); } EXPORT_SYMBOL_GPL(nfs_access_zap_cache); static int access_cmp(const struct cred *a, const struct nfs_access_entry *b) { struct group_info *ga, *gb; int g; if (uid_lt(a->fsuid, b->fsuid)) return -1; if (uid_gt(a->fsuid, b->fsuid)) return 1; if (gid_lt(a->fsgid, b->fsgid)) return -1; if (gid_gt(a->fsgid, b->fsgid)) return 1; ga = a->group_info; gb = b->group_info; if (ga == gb) return 0; if (ga == NULL) return -1; if (gb == NULL) return 1; if (ga->ngroups < gb->ngroups) return -1; if (ga->ngroups > gb->ngroups) return 1; for (g = 0; g < ga->ngroups; g++) { if (gid_lt(ga->gid[g], gb->gid[g])) return -1; if (gid_gt(ga->gid[g], gb->gid[g])) return 1; } return 0; } static struct nfs_access_entry *nfs_access_search_rbtree(struct inode *inode, const struct cred *cred) { struct rb_node *n = NFS_I(inode)->access_cache.rb_node; while (n != NULL) { struct nfs_access_entry *entry = rb_entry(n, struct nfs_access_entry, rb_node); int cmp = access_cmp(cred, entry); if (cmp < 0) n = n->rb_left; else if (cmp > 0) n = n->rb_right; else return entry; } return NULL; } static u64 nfs_access_login_time(const struct task_struct *task, const struct cred *cred) { const struct task_struct *parent; const struct cred *pcred; u64 ret; rcu_read_lock(); for (;;) { parent = rcu_dereference(task->real_parent); pcred = __task_cred(parent); if (parent == task || cred_fscmp(pcred, cred) != 0) break; task = parent; } ret = task->start_time; rcu_read_unlock(); return ret; } static int nfs_access_get_cached_locked(struct inode *inode, const struct cred *cred, u32 *mask, bool may_block) { struct nfs_inode *nfsi = NFS_I(inode); u64 login_time = nfs_access_login_time(current, cred); struct nfs_access_entry *cache; bool retry = true; int err; spin_lock(&inode->i_lock); for(;;) { if (nfsi->cache_validity & NFS_INO_INVALID_ACCESS) goto out_zap; cache = nfs_access_search_rbtree(inode, cred); err = -ENOENT; if (cache == NULL) goto out; /* Found an entry, is our attribute cache valid? */ if (!nfs_check_cache_invalid(inode, NFS_INO_INVALID_ACCESS)) break; if (!retry) break; err = -ECHILD; if (!may_block) goto out; spin_unlock(&inode->i_lock); err = __nfs_revalidate_inode(NFS_SERVER(inode), inode); if (err) return err; spin_lock(&inode->i_lock); retry = false; } err = -ENOENT; if ((s64)(login_time - cache->timestamp) > 0) goto out; *mask = cache->mask; list_move_tail(&cache->lru, &nfsi->access_cache_entry_lru); err = 0; out: spin_unlock(&inode->i_lock); return err; out_zap: spin_unlock(&inode->i_lock); nfs_access_zap_cache(inode); return -ENOENT; } static int nfs_access_get_cached_rcu(struct inode *inode, const struct cred *cred, u32 *mask) { /* Only check the most recently returned cache entry, * but do it without locking. */ struct nfs_inode *nfsi = NFS_I(inode); u64 login_time = nfs_access_login_time(current, cred); struct nfs_access_entry *cache; int err = -ECHILD; struct list_head *lh; rcu_read_lock(); if (nfsi->cache_validity & NFS_INO_INVALID_ACCESS) goto out; lh = rcu_dereference(list_tail_rcu(&nfsi->access_cache_entry_lru)); cache = list_entry(lh, struct nfs_access_entry, lru); if (lh == &nfsi->access_cache_entry_lru || access_cmp(cred, cache) != 0) cache = NULL; if (cache == NULL) goto out; if ((s64)(login_time - cache->timestamp) > 0) goto out; if (nfs_check_cache_invalid(inode, NFS_INO_INVALID_ACCESS)) goto out; *mask = cache->mask; err = 0; out: rcu_read_unlock(); return err; } int nfs_access_get_cached(struct inode *inode, const struct cred *cred, u32 *mask, bool may_block) { int status; status = nfs_access_get_cached_rcu(inode, cred, mask); if (status != 0) status = nfs_access_get_cached_locked(inode, cred, mask, may_block); return status; } EXPORT_SYMBOL_GPL(nfs_access_get_cached); static void nfs_access_add_rbtree(struct inode *inode, struct nfs_access_entry *set, const struct cred *cred) { struct nfs_inode *nfsi = NFS_I(inode); struct rb_root *root_node = &nfsi->access_cache; struct rb_node **p = &root_node->rb_node; struct rb_node *parent = NULL; struct nfs_access_entry *entry; int cmp; spin_lock(&inode->i_lock); while (*p != NULL) { parent = *p; entry = rb_entry(parent, struct nfs_access_entry, rb_node); cmp = access_cmp(cred, entry); if (cmp < 0) p = &parent->rb_left; else if (cmp > 0) p = &parent->rb_right; else goto found; } rb_link_node(&set->rb_node, parent, p); rb_insert_color(&set->rb_node, root_node); list_add_tail(&set->lru, &nfsi->access_cache_entry_lru); spin_unlock(&inode->i_lock); return; found: rb_replace_node(parent, &set->rb_node, root_node); list_add_tail(&set->lru, &nfsi->access_cache_entry_lru); list_del(&entry->lru); spin_unlock(&inode->i_lock); nfs_access_free_entry(entry); } void nfs_access_add_cache(struct inode *inode, struct nfs_access_entry *set, const struct cred *cred) { struct nfs_access_entry *cache = kmalloc(sizeof(*cache), GFP_KERNEL); if (cache == NULL) return; RB_CLEAR_NODE(&cache->rb_node); cache->fsuid = cred->fsuid; cache->fsgid = cred->fsgid; cache->group_info = get_group_info(cred->group_info); cache->mask = set->mask; cache->timestamp = ktime_get_ns(); /* The above field assignments must be visible * before this item appears on the lru. We cannot easily * use rcu_assign_pointer, so just force the memory barrier. */ smp_wmb(); nfs_access_add_rbtree(inode, cache, cred); /* Update accounting */ smp_mb__before_atomic(); atomic_long_inc(&nfs_access_nr_entries); smp_mb__after_atomic(); /* Add inode to global LRU list */ if (!test_bit(NFS_INO_ACL_LRU_SET, &NFS_I(inode)->flags)) { spin_lock(&nfs_access_lru_lock); if (!test_and_set_bit(NFS_INO_ACL_LRU_SET, &NFS_I(inode)->flags)) list_add_tail(&NFS_I(inode)->access_cache_inode_lru, &nfs_access_lru_list); spin_unlock(&nfs_access_lru_lock); } nfs_access_cache_enforce_limit(); } EXPORT_SYMBOL_GPL(nfs_access_add_cache); #define NFS_MAY_READ (NFS_ACCESS_READ) #define NFS_MAY_WRITE (NFS_ACCESS_MODIFY | \ NFS_ACCESS_EXTEND | \ NFS_ACCESS_DELETE) #define NFS_FILE_MAY_WRITE (NFS_ACCESS_MODIFY | \ NFS_ACCESS_EXTEND) #define NFS_DIR_MAY_WRITE NFS_MAY_WRITE #define NFS_MAY_LOOKUP (NFS_ACCESS_LOOKUP) #define NFS_MAY_EXECUTE (NFS_ACCESS_EXECUTE) static int nfs_access_calc_mask(u32 access_result, umode_t umode) { int mask = 0; if (access_result & NFS_MAY_READ) mask |= MAY_READ; if (S_ISDIR(umode)) { if ((access_result & NFS_DIR_MAY_WRITE) == NFS_DIR_MAY_WRITE) mask |= MAY_WRITE; if ((access_result & NFS_MAY_LOOKUP) == NFS_MAY_LOOKUP) mask |= MAY_EXEC; } else if (S_ISREG(umode)) { if ((access_result & NFS_FILE_MAY_WRITE) == NFS_FILE_MAY_WRITE) mask |= MAY_WRITE; if ((access_result & NFS_MAY_EXECUTE) == NFS_MAY_EXECUTE) mask |= MAY_EXEC; } else if (access_result & NFS_MAY_WRITE) mask |= MAY_WRITE; return mask; } void nfs_access_set_mask(struct nfs_access_entry *entry, u32 access_result) { entry->mask = access_result; } EXPORT_SYMBOL_GPL(nfs_access_set_mask); static int nfs_do_access(struct inode *inode, const struct cred *cred, int mask) { struct nfs_access_entry cache; bool may_block = (mask & MAY_NOT_BLOCK) == 0; int cache_mask = -1; int status; trace_nfs_access_enter(inode); status = nfs_access_get_cached(inode, cred, &cache.mask, may_block); if (status == 0) goto out_cached; status = -ECHILD; if (!may_block) goto out; /* * Determine which access bits we want to ask for... */ cache.mask = NFS_ACCESS_READ | NFS_ACCESS_MODIFY | NFS_ACCESS_EXTEND | nfs_access_xattr_mask(NFS_SERVER(inode)); if (S_ISDIR(inode->i_mode)) cache.mask |= NFS_ACCESS_DELETE | NFS_ACCESS_LOOKUP; else cache.mask |= NFS_ACCESS_EXECUTE; status = NFS_PROTO(inode)->access(inode, &cache, cred); if (status != 0) { if (status == -ESTALE) { if (!S_ISDIR(inode->i_mode)) nfs_set_inode_stale(inode); else nfs_zap_caches(inode); } goto out; } nfs_access_add_cache(inode, &cache, cred); out_cached: cache_mask = nfs_access_calc_mask(cache.mask, inode->i_mode); if ((mask & ~cache_mask & (MAY_READ | MAY_WRITE | MAY_EXEC)) != 0) status = -EACCES; out: trace_nfs_access_exit(inode, mask, cache_mask, status); return status; } static int nfs_open_permission_mask(int openflags) { int mask = 0; if (openflags & __FMODE_EXEC) { /* ONLY check exec rights */ mask = MAY_EXEC; } else { if ((openflags & O_ACCMODE) != O_WRONLY) mask |= MAY_READ; if ((openflags & O_ACCMODE) != O_RDONLY) mask |= MAY_WRITE; } return mask; } int nfs_may_open(struct inode *inode, const struct cred *cred, int openflags) { return nfs_do_access(inode, cred, nfs_open_permission_mask(openflags)); } EXPORT_SYMBOL_GPL(nfs_may_open); static int nfs_execute_ok(struct inode *inode, int mask) { struct nfs_server *server = NFS_SERVER(inode); int ret = 0; if (S_ISDIR(inode->i_mode)) return 0; if (nfs_check_cache_invalid(inode, NFS_INO_INVALID_MODE)) { if (mask & MAY_NOT_BLOCK) return -ECHILD; ret = __nfs_revalidate_inode(server, inode); } if (ret == 0 && !execute_ok(inode)) ret = -EACCES; return ret; } int nfs_permission(struct mnt_idmap *idmap, struct inode *inode, int mask) { const struct cred *cred = current_cred(); int res = 0; nfs_inc_stats(inode, NFSIOS_VFSACCESS); if ((mask & (MAY_READ | MAY_WRITE | MAY_EXEC)) == 0) goto out; /* Is this sys_access() ? */ if (mask & (MAY_ACCESS | MAY_CHDIR)) goto force_lookup; switch (inode->i_mode & S_IFMT) { case S_IFLNK: goto out; case S_IFREG: if ((mask & MAY_OPEN) && nfs_server_capable(inode, NFS_CAP_ATOMIC_OPEN)) return 0; break; case S_IFDIR: /* * Optimize away all write operations, since the server * will check permissions when we perform the op. */ if ((mask & MAY_WRITE) && !(mask & MAY_READ)) goto out; } force_lookup: if (!NFS_PROTO(inode)->access) goto out_notsup; res = nfs_do_access(inode, cred, mask); out: if (!res && (mask & MAY_EXEC)) res = nfs_execute_ok(inode, mask); dfprintk(VFS, "NFS: permission(%s/%lu), mask=0x%x, res=%d\n", inode->i_sb->s_id, inode->i_ino, mask, res); return res; out_notsup: if (mask & MAY_NOT_BLOCK) return -ECHILD; res = nfs_revalidate_inode(inode, NFS_INO_INVALID_MODE | NFS_INO_INVALID_OTHER); if (res == 0) res = generic_permission(&nop_mnt_idmap, inode, mask); goto out; } EXPORT_SYMBOL_GPL(nfs_permission);
32 32 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 // SPDX-License-Identifier: GPL-2.0 /* * Block device concurrent positioning ranges. * * Copyright (C) 2021 Western Digital Corporation or its Affiliates. */ #include <linux/kernel.h> #include <linux/blkdev.h> #include <linux/slab.h> #include <linux/init.h> #include "blk.h" static ssize_t blk_ia_range_sector_show(struct blk_independent_access_range *iar, char *buf) { return sprintf(buf, "%llu\n", iar->sector); } static ssize_t blk_ia_range_nr_sectors_show(struct blk_independent_access_range *iar, char *buf) { return sprintf(buf, "%llu\n", iar->nr_sectors); } struct blk_ia_range_sysfs_entry { struct attribute attr; ssize_t (*show)(struct blk_independent_access_range *iar, char *buf); }; static struct blk_ia_range_sysfs_entry blk_ia_range_sector_entry = { .attr = { .name = "sector", .mode = 0444 }, .show = blk_ia_range_sector_show, }; static struct blk_ia_range_sysfs_entry blk_ia_range_nr_sectors_entry = { .attr = { .name = "nr_sectors", .mode = 0444 }, .show = blk_ia_range_nr_sectors_show, }; static struct attribute *blk_ia_range_attrs[] = { &blk_ia_range_sector_entry.attr, &blk_ia_range_nr_sectors_entry.attr, NULL, }; ATTRIBUTE_GROUPS(blk_ia_range); static ssize_t blk_ia_range_sysfs_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct blk_ia_range_sysfs_entry *entry = container_of(attr, struct blk_ia_range_sysfs_entry, attr); struct blk_independent_access_range *iar = container_of(kobj, struct blk_independent_access_range, kobj); return entry->show(iar, buf); } static const struct sysfs_ops blk_ia_range_sysfs_ops = { .show = blk_ia_range_sysfs_show, }; /* * Independent access range entries are not freed individually, but alltogether * with struct blk_independent_access_ranges and its array of ranges. Since * kobject_add() takes a reference on the parent kobject contained in * struct blk_independent_access_ranges, the array of independent access range * entries cannot be freed until kobject_del() is called for all entries. * So we do not need to do anything here, but still need this no-op release * operation to avoid complaints from the kobject code. */ static void blk_ia_range_sysfs_nop_release(struct kobject *kobj) { } static const struct kobj_type blk_ia_range_ktype = { .sysfs_ops = &blk_ia_range_sysfs_ops, .default_groups = blk_ia_range_groups, .release = blk_ia_range_sysfs_nop_release, }; /* * This will be executed only after all independent access range entries are * removed with kobject_del(), at which point, it is safe to free everything, * including the array of ranges. */ static void blk_ia_ranges_sysfs_release(struct kobject *kobj) { struct blk_independent_access_ranges *iars = container_of(kobj, struct blk_independent_access_ranges, kobj); kfree(iars); } static const struct kobj_type blk_ia_ranges_ktype = { .release = blk_ia_ranges_sysfs_release, }; /** * disk_register_independent_access_ranges - register with sysfs a set of * independent access ranges * @disk: Target disk * * Register with sysfs a set of independent access ranges for @disk. */ int disk_register_independent_access_ranges(struct gendisk *disk) { struct blk_independent_access_ranges *iars = disk->ia_ranges; struct request_queue *q = disk->queue; int i, ret; lockdep_assert_held(&q->sysfs_lock); if (!iars) return 0; /* * At this point, iars is the new set of sector access ranges that needs * to be registered with sysfs. */ WARN_ON(iars->sysfs_registered); ret = kobject_init_and_add(&iars->kobj, &blk_ia_ranges_ktype, &disk->queue_kobj, "%s", "independent_access_ranges"); if (ret) { disk->ia_ranges = NULL; kobject_put(&iars->kobj); return ret; } for (i = 0; i < iars->nr_ia_ranges; i++) { ret = kobject_init_and_add(&iars->ia_range[i].kobj, &blk_ia_range_ktype, &iars->kobj, "%d", i); if (ret) { while (--i >= 0) kobject_del(&iars->ia_range[i].kobj); kobject_del(&iars->kobj); kobject_put(&iars->kobj); return ret; } } iars->sysfs_registered = true; return 0; } void disk_unregister_independent_access_ranges(struct gendisk *disk) { struct request_queue *q = disk->queue; struct blk_independent_access_ranges *iars = disk->ia_ranges; int i; lockdep_assert_held(&q->sysfs_lock); if (!iars) return; if (iars->sysfs_registered) { for (i = 0; i < iars->nr_ia_ranges; i++) kobject_del(&iars->ia_range[i].kobj); kobject_del(&iars->kobj); kobject_put(&iars->kobj); } else { kfree(iars); } disk->ia_ranges = NULL; } static struct blk_independent_access_range * disk_find_ia_range(struct blk_independent_access_ranges *iars, sector_t sector) { struct blk_independent_access_range *iar; int i; for (i = 0; i < iars->nr_ia_ranges; i++) { iar = &iars->ia_range[i]; if (sector >= iar->sector && sector < iar->sector + iar->nr_sectors) return iar; } return NULL; } static bool disk_check_ia_ranges(struct gendisk *disk, struct blk_independent_access_ranges *iars) { struct blk_independent_access_range *iar, *tmp; sector_t capacity = get_capacity(disk); sector_t sector = 0; int i; if (WARN_ON_ONCE(!iars->nr_ia_ranges)) return false; /* * While sorting the ranges in increasing LBA order, check that the * ranges do not overlap, that there are no sector holes and that all * sectors belong to one range. */ for (i = 0; i < iars->nr_ia_ranges; i++) { tmp = disk_find_ia_range(iars, sector); if (!tmp || tmp->sector != sector) { pr_warn("Invalid non-contiguous independent access ranges\n"); return false; } iar = &iars->ia_range[i]; if (tmp != iar) { swap(iar->sector, tmp->sector); swap(iar->nr_sectors, tmp->nr_sectors); } sector += iar->nr_sectors; } if (sector != capacity) { pr_warn("Independent access ranges do not match disk capacity\n"); return false; } return true; } static bool disk_ia_ranges_changed(struct gendisk *disk, struct blk_independent_access_ranges *new) { struct blk_independent_access_ranges *old = disk->ia_ranges; int i; if (!old) return true; if (old->nr_ia_ranges != new->nr_ia_ranges) return true; for (i = 0; i < old->nr_ia_ranges; i++) { if (new->ia_range[i].sector != old->ia_range[i].sector || new->ia_range[i].nr_sectors != old->ia_range[i].nr_sectors) return true; } return false; } /** * disk_alloc_independent_access_ranges - Allocate an independent access ranges * data structure * @disk: target disk * @nr_ia_ranges: Number of independent access ranges * * Allocate a struct blk_independent_access_ranges structure with @nr_ia_ranges * access range descriptors. */ struct blk_independent_access_ranges * disk_alloc_independent_access_ranges(struct gendisk *disk, int nr_ia_ranges) { struct blk_independent_access_ranges *iars; iars = kzalloc_node(struct_size(iars, ia_range, nr_ia_ranges), GFP_KERNEL, disk->queue->node); if (iars) iars->nr_ia_ranges = nr_ia_ranges; return iars; } EXPORT_SYMBOL_GPL(disk_alloc_independent_access_ranges); /** * disk_set_independent_access_ranges - Set a disk independent access ranges * @disk: target disk * @iars: independent access ranges structure * * Set the independent access ranges information of the request queue * of @disk to @iars. If @iars is NULL and the independent access ranges * structure already set is cleared. If there are no differences between * @iars and the independent access ranges structure already set, @iars * is freed. */ void disk_set_independent_access_ranges(struct gendisk *disk, struct blk_independent_access_ranges *iars) { struct request_queue *q = disk->queue; mutex_lock(&q->sysfs_lock); if (iars && !disk_check_ia_ranges(disk, iars)) { kfree(iars); iars = NULL; } if (iars && !disk_ia_ranges_changed(disk, iars)) { kfree(iars); goto unlock; } /* * This may be called for a registered queue. E.g. during a device * revalidation. If that is the case, we need to unregister the old * set of independent access ranges and register the new set. If the * queue is not registered, registration of the device request queue * will register the independent access ranges. */ disk_unregister_independent_access_ranges(disk); disk->ia_ranges = iars; if (blk_queue_registered(q)) disk_register_independent_access_ranges(disk); unlock: mutex_unlock(&q->sysfs_lock); } EXPORT_SYMBOL_GPL(disk_set_independent_access_ranges);
6 6 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 // SPDX-License-Identifier: GPL-2.0-or-later /* * PTP virtual clock driver * * Copyright 2021 NXP */ #include <linux/slab.h> #include <linux/hashtable.h> #include "ptp_private.h" #define PTP_VCLOCK_CC_SHIFT 31 #define PTP_VCLOCK_CC_MULT (1 << PTP_VCLOCK_CC_SHIFT) #define PTP_VCLOCK_FADJ_SHIFT 9 #define PTP_VCLOCK_FADJ_DENOMINATOR 15625ULL #define PTP_VCLOCK_REFRESH_INTERVAL (HZ * 2) /* protects vclock_hash addition/deletion */ static DEFINE_SPINLOCK(vclock_hash_lock); static DEFINE_READ_MOSTLY_HASHTABLE(vclock_hash, 8); static void ptp_vclock_hash_add(struct ptp_vclock *vclock) { spin_lock(&vclock_hash_lock); hlist_add_head_rcu(&vclock->vclock_hash_node, &vclock_hash[vclock->clock->index % HASH_SIZE(vclock_hash)]); spin_unlock(&vclock_hash_lock); } static void ptp_vclock_hash_del(struct ptp_vclock *vclock) { spin_lock(&vclock_hash_lock); hlist_del_init_rcu(&vclock->vclock_hash_node); spin_unlock(&vclock_hash_lock); synchronize_rcu(); } static int ptp_vclock_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) { struct ptp_vclock *vclock = info_to_vclock(ptp); s64 adj; adj = (s64)scaled_ppm << PTP_VCLOCK_FADJ_SHIFT; adj = div_s64(adj, PTP_VCLOCK_FADJ_DENOMINATOR); if (mutex_lock_interruptible(&vclock->lock)) return -EINTR; timecounter_read(&vclock->tc); vclock->cc.mult = PTP_VCLOCK_CC_MULT + adj; mutex_unlock(&vclock->lock); return 0; } static int ptp_vclock_adjtime(struct ptp_clock_info *ptp, s64 delta) { struct ptp_vclock *vclock = info_to_vclock(ptp); if (mutex_lock_interruptible(&vclock->lock)) return -EINTR; timecounter_adjtime(&vclock->tc, delta); mutex_unlock(&vclock->lock); return 0; } static int ptp_vclock_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) { struct ptp_vclock *vclock = info_to_vclock(ptp); u64 ns; if (mutex_lock_interruptible(&vclock->lock)) return -EINTR; ns = timecounter_read(&vclock->tc); mutex_unlock(&vclock->lock); *ts = ns_to_timespec64(ns); return 0; } static int ptp_vclock_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts, struct ptp_system_timestamp *sts) { struct ptp_vclock *vclock = info_to_vclock(ptp); struct ptp_clock *pptp = vclock->pclock; struct timespec64 pts; int err; u64 ns; err = pptp->info->getcyclesx64(pptp->info, &pts, sts); if (err) return err; if (mutex_lock_interruptible(&vclock->lock)) return -EINTR; ns = timecounter_cyc2time(&vclock->tc, timespec64_to_ns(&pts)); mutex_unlock(&vclock->lock); *ts = ns_to_timespec64(ns); return 0; } static int ptp_vclock_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts) { struct ptp_vclock *vclock = info_to_vclock(ptp); u64 ns = timespec64_to_ns(ts); if (mutex_lock_interruptible(&vclock->lock)) return -EINTR; timecounter_init(&vclock->tc, &vclock->cc, ns); mutex_unlock(&vclock->lock); return 0; } static int ptp_vclock_getcrosststamp(struct ptp_clock_info *ptp, struct system_device_crosststamp *xtstamp) { struct ptp_vclock *vclock = info_to_vclock(ptp); struct ptp_clock *pptp = vclock->pclock; int err; u64 ns; err = pptp->info->getcrosscycles(pptp->info, xtstamp); if (err) return err; if (mutex_lock_interruptible(&vclock->lock)) return -EINTR; ns = timecounter_cyc2time(&vclock->tc, ktime_to_ns(xtstamp->device)); mutex_unlock(&vclock->lock); xtstamp->device = ns_to_ktime(ns); return 0; } static long ptp_vclock_refresh(struct ptp_clock_info *ptp) { struct ptp_vclock *vclock = info_to_vclock(ptp); struct timespec64 ts; ptp_vclock_gettime(&vclock->info, &ts); return PTP_VCLOCK_REFRESH_INTERVAL; } static const struct ptp_clock_info ptp_vclock_info = { .owner = THIS_MODULE, .name = "ptp virtual clock", .max_adj = 500000000, .adjfine = ptp_vclock_adjfine, .adjtime = ptp_vclock_adjtime, .settime64 = ptp_vclock_settime, .do_aux_work = ptp_vclock_refresh, }; static u64 ptp_vclock_read(const struct cyclecounter *cc) { struct ptp_vclock *vclock = cc_to_vclock(cc); struct ptp_clock *ptp = vclock->pclock; struct timespec64 ts = {}; ptp->info->getcycles64(ptp->info, &ts); return timespec64_to_ns(&ts); } static const struct cyclecounter ptp_vclock_cc = { .read = ptp_vclock_read, .mask = CYCLECOUNTER_MASK(32), .mult = PTP_VCLOCK_CC_MULT, .shift = PTP_VCLOCK_CC_SHIFT, }; struct ptp_vclock *ptp_vclock_register(struct ptp_clock *pclock) { struct ptp_vclock *vclock; vclock = kzalloc(sizeof(*vclock), GFP_KERNEL); if (!vclock) return NULL; vclock->pclock = pclock; vclock->info = ptp_vclock_info; if (pclock->info->getcyclesx64) vclock->info.gettimex64 = ptp_vclock_gettimex; else vclock->info.gettime64 = ptp_vclock_gettime; if (pclock->info->getcrosscycles) vclock->info.getcrosststamp = ptp_vclock_getcrosststamp; vclock->cc = ptp_vclock_cc; snprintf(vclock->info.name, PTP_CLOCK_NAME_LEN, "ptp%d_virt", pclock->index); INIT_HLIST_NODE(&vclock->vclock_hash_node); mutex_init(&vclock->lock); vclock->clock = ptp_clock_register(&vclock->info, &pclock->dev); if (IS_ERR_OR_NULL(vclock->clock)) { kfree(vclock); return NULL; } timecounter_init(&vclock->tc, &vclock->cc, 0); ptp_schedule_worker(vclock->clock, PTP_VCLOCK_REFRESH_INTERVAL); ptp_vclock_hash_add(vclock); return vclock; } void ptp_vclock_unregister(struct ptp_vclock *vclock) { ptp_vclock_hash_del(vclock); ptp_clock_unregister(vclock->clock); kfree(vclock); } #if IS_BUILTIN(CONFIG_PTP_1588_CLOCK) int ptp_get_vclocks_index(int pclock_index, int **vclock_index) { char name[PTP_CLOCK_NAME_LEN] = ""; struct ptp_clock *ptp; struct device *dev; int num = 0; if (pclock_index < 0) return num; snprintf(name, PTP_CLOCK_NAME_LEN, "ptp%d", pclock_index); dev = class_find_device_by_name(&ptp_class, name); if (!dev) return num; ptp = dev_get_drvdata(dev); if (mutex_lock_interruptible(&ptp->n_vclocks_mux)) { put_device(dev); return num; } *vclock_index = kzalloc(sizeof(int) * ptp->n_vclocks, GFP_KERNEL); if (!(*vclock_index)) goto out; memcpy(*vclock_index, ptp->vclock_index, sizeof(int) * ptp->n_vclocks); num = ptp->n_vclocks; out: mutex_unlock(&ptp->n_vclocks_mux); put_device(dev); return num; } EXPORT_SYMBOL(ptp_get_vclocks_index); ktime_t ptp_convert_timestamp(const ktime_t *hwtstamp, int vclock_index) { unsigned int hash = vclock_index % HASH_SIZE(vclock_hash); struct ptp_vclock *vclock; u64 ns; u64 vclock_ns = 0; ns = ktime_to_ns(*hwtstamp); rcu_read_lock(); hlist_for_each_entry_rcu(vclock, &vclock_hash[hash], vclock_hash_node) { if (vclock->clock->index != vclock_index) continue; if (mutex_lock_interruptible(&vclock->lock)) break; vclock_ns = timecounter_cyc2time(&vclock->tc, ns); mutex_unlock(&vclock->lock); break; } rcu_read_unlock(); return ns_to_ktime(vclock_ns); } EXPORT_SYMBOL(ptp_convert_timestamp); #endif
6 6 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_LINUX_SWAB_H #define _UAPI_LINUX_SWAB_H #include <linux/types.h> #include <linux/stddef.h> #include <asm/bitsperlong.h> #include <asm/swab.h> /* * casts are necessary for constants, because we never know how for sure * how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way. */ #define ___constant_swab16(x) ((__u16)( \ (((__u16)(x) & (__u16)0x00ffU) << 8) | \ (((__u16)(x) & (__u16)0xff00U) >> 8))) #define ___constant_swab32(x) ((__u32)( \ (((__u32)(x) & (__u32)0x000000ffUL) << 24) | \ (((__u32)(x) & (__u32)0x0000ff00UL) << 8) | \ (((__u32)(x) & (__u32)0x00ff0000UL) >> 8) | \ (((__u32)(x) & (__u32)0xff000000UL) >> 24))) #define ___constant_swab64(x) ((__u64)( \ (((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) | \ (((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) | \ (((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) | \ (((__u64)(x) & (__u64)0x00000000ff000000ULL) << 8) | \ (((__u64)(x) & (__u64)0x000000ff00000000ULL) >> 8) | \ (((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) | \ (((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) | \ (((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56))) #define ___constant_swahw32(x) ((__u32)( \ (((__u32)(x) & (__u32)0x0000ffffUL) << 16) | \ (((__u32)(x) & (__u32)0xffff0000UL) >> 16))) #define ___constant_swahb32(x) ((__u32)( \ (((__u32)(x) & (__u32)0x00ff00ffUL) << 8) | \ (((__u32)(x) & (__u32)0xff00ff00UL) >> 8))) /* * Implement the following as inlines, but define the interface using * macros to allow constant folding when possible: * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32 */ static inline __attribute_const__ __u16 __fswab16(__u16 val) { #if defined (__arch_swab16) return __arch_swab16(val); #else return ___constant_swab16(val); #endif } static inline __attribute_const__ __u32 __fswab32(__u32 val) { #if defined(__arch_swab32) return __arch_swab32(val); #else return ___constant_swab32(val); #endif } static inline __attribute_const__ __u64 __fswab64(__u64 val) { #if defined (__arch_swab64) return __arch_swab64(val); #elif defined(__SWAB_64_THRU_32__) __u32 h = val >> 32; __u32 l = val & ((1ULL << 32) - 1); return (((__u64)__fswab32(l)) << 32) | ((__u64)(__fswab32(h))); #else return ___constant_swab64(val); #endif } static inline __attribute_const__ __u32 __fswahw32(__u32 val) { #ifdef __arch_swahw32 return __arch_swahw32(val); #else return ___constant_swahw32(val); #endif } static inline __attribute_const__ __u32 __fswahb32(__u32 val) { #ifdef __arch_swahb32 return __arch_swahb32(val); #else return ___constant_swahb32(val); #endif } /** * __swab16 - return a byteswapped 16-bit value * @x: value to byteswap */ #ifdef __HAVE_BUILTIN_BSWAP16__ #define __swab16(x) (__u16)__builtin_bswap16((__u16)(x)) #else #define __swab16(x) \ (__u16)(__builtin_constant_p(x) ? \ ___constant_swab16(x) : \ __fswab16(x)) #endif /** * __swab32 - return a byteswapped 32-bit value * @x: value to byteswap */ #ifdef __HAVE_BUILTIN_BSWAP32__ #define __swab32(x) (__u32)__builtin_bswap32((__u32)(x)) #else #define __swab32(x) \ (__u32)(__builtin_constant_p(x) ? \ ___constant_swab32(x) : \ __fswab32(x)) #endif /** * __swab64 - return a byteswapped 64-bit value * @x: value to byteswap */ #ifdef __HAVE_BUILTIN_BSWAP64__ #define __swab64(x) (__u64)__builtin_bswap64((__u64)(x)) #else #define __swab64(x) \ (__u64)(__builtin_constant_p(x) ? \ ___constant_swab64(x) : \ __fswab64(x)) #endif static __always_inline unsigned long __swab(const unsigned long y) { #if __BITS_PER_LONG == 64 return __swab64(y); #else /* __BITS_PER_LONG == 32 */ return __swab32(y); #endif } /** * __swahw32 - return a word-swapped 32-bit value * @x: value to wordswap * * __swahw32(0x12340000) is 0x00001234 */ #define __swahw32(x) \ (__builtin_constant_p((__u32)(x)) ? \ ___constant_swahw32(x) : \ __fswahw32(x)) /** * __swahb32 - return a high and low byte-swapped 32-bit value * @x: value to byteswap * * __swahb32(0x12345678) is 0x34127856 */ #define __swahb32(x) \ (__builtin_constant_p((__u32)(x)) ? \ ___constant_swahb32(x) : \ __fswahb32(x)) /** * __swab16p - return a byteswapped 16-bit value from a pointer * @p: pointer to a naturally-aligned 16-bit value */ static __always_inline __u16 __swab16p(const __u16 *p) { #ifdef __arch_swab16p return __arch_swab16p(p); #else return __swab16(*p); #endif } /** * __swab32p - return a byteswapped 32-bit value from a pointer * @p: pointer to a naturally-aligned 32-bit value */ static __always_inline __u32 __swab32p(const __u32 *p) { #ifdef __arch_swab32p return __arch_swab32p(p); #else return __swab32(*p); #endif } /** * __swab64p - return a byteswapped 64-bit value from a pointer * @p: pointer to a naturally-aligned 64-bit value */ static __always_inline __u64 __swab64p(const __u64 *p) { #ifdef __arch_swab64p return __arch_swab64p(p); #else return __swab64(*p); #endif } /** * __swahw32p - return a wordswapped 32-bit value from a pointer * @p: pointer to a naturally-aligned 32-bit value * * See __swahw32() for details of wordswapping. */ static inline __u32 __swahw32p(const __u32 *p) { #ifdef __arch_swahw32p return __arch_swahw32p(p); #else return __swahw32(*p); #endif } /** * __swahb32p - return a high and low byteswapped 32-bit value from a pointer * @p: pointer to a naturally-aligned 32-bit value * * See __swahb32() for details of high/low byteswapping. */ static inline __u32 __swahb32p(const __u32 *p) { #ifdef __arch_swahb32p return __arch_swahb32p(p); #else return __swahb32(*p); #endif } /** * __swab16s - byteswap a 16-bit value in-place * @p: pointer to a naturally-aligned 16-bit value */ static inline void __swab16s(__u16 *p) { #ifdef __arch_swab16s __arch_swab16s(p); #else *p = __swab16p(p); #endif } /** * __swab32s - byteswap a 32-bit value in-place * @p: pointer to a naturally-aligned 32-bit value */ static __always_inline void __swab32s(__u32 *p) { #ifdef __arch_swab32s __arch_swab32s(p); #else *p = __swab32p(p); #endif } /** * __swab64s - byteswap a 64-bit value in-place * @p: pointer to a naturally-aligned 64-bit value */ static __always_inline void __swab64s(__u64 *p) { #ifdef __arch_swab64s __arch_swab64s(p); #else *p = __swab64p(p); #endif } /** * __swahw32s - wordswap a 32-bit value in-place * @p: pointer to a naturally-aligned 32-bit value * * See __swahw32() for details of wordswapping */ static inline void __swahw32s(__u32 *p) { #ifdef __arch_swahw32s __arch_swahw32s(p); #else *p = __swahw32p(p); #endif } /** * __swahb32s - high and low byteswap a 32-bit value in-place * @p: pointer to a naturally-aligned 32-bit value * * See __swahb32() for details of high and low byte swapping */ static inline void __swahb32s(__u32 *p) { #ifdef __arch_swahb32s __arch_swahb32s(p); #else *p = __swahb32p(p); #endif } #endif /* _UAPI_LINUX_SWAB_H */
20 1866 1864 1867 1865 14 5 8 9 9 14 14 14 2 14 14 14 14 14 14 14 14 14 13 12 13 14 20 20 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557