Total coverage: 272230 (18%)of 1575623
2 2 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 // SPDX-License-Identifier: GPL-2.0-or-later /* * caiaq.c: ALSA driver for caiaq/NativeInstruments devices * * Copyright (c) 2007 Daniel Mack <daniel@caiaq.de> * Karsten Wiese <fzu@wemgehoertderstaat.de> */ #include <linux/moduleparam.h> #include <linux/device.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/init.h> #include <linux/gfp.h> #include <linux/usb.h> #include <sound/initval.h> #include <sound/core.h> #include <sound/pcm.h> #include "device.h" #include "audio.h" #include "midi.h" #include "control.h" #include "input.h" MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>"); MODULE_DESCRIPTION("caiaq USB audio"); MODULE_LICENSE("GPL"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-max */ static char* id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* Id for this card */ static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable this card */ module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for the caiaq sound device"); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for the caiaq soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable the caiaq soundcard."); enum { SAMPLERATE_44100 = 0, SAMPLERATE_48000 = 1, SAMPLERATE_96000 = 2, SAMPLERATE_192000 = 3, SAMPLERATE_88200 = 4, SAMPLERATE_INVALID = 0xff }; enum { DEPTH_NONE = 0, DEPTH_16 = 1, DEPTH_24 = 2, DEPTH_32 = 3 }; static const struct usb_device_id snd_usb_id_table[] = { { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_RIGKONTROL2 }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_RIGKONTROL3 }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_KORECONTROLLER }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_KORECONTROLLER2 }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_AK1 }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_AUDIO8DJ }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_SESSIONIO }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_GUITARRIGMOBILE }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_AUDIO4DJ }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_AUDIO2DJ }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_TRAKTORKONTROLX1 }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_TRAKTORKONTROLS4 }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_TRAKTORAUDIO2 }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_MASCHINECONTROLLER }, { /* terminator */ } }; static void usb_ep1_command_reply_dispatch (struct urb* urb) { int ret; struct device *dev = &urb->dev->dev; struct snd_usb_caiaqdev *cdev = urb->context; unsigned char *buf = urb->transfer_buffer; if (urb->status || !cdev) { dev_warn(dev, "received EP1 urb->status = %i\n", urb->status); return; } switch(buf[0]) { case EP1_CMD_GET_DEVICE_INFO: memcpy(&cdev->spec, buf+1, sizeof(struct caiaq_device_spec)); cdev->spec.fw_version = le16_to_cpu(cdev->spec.fw_version); dev_dbg(dev, "device spec (firmware %d): audio: %d in, %d out, " "MIDI: %d in, %d out, data alignment %d\n", cdev->spec.fw_version, cdev->spec.num_analog_audio_in, cdev->spec.num_analog_audio_out, cdev->spec.num_midi_in, cdev->spec.num_midi_out, cdev->spec.data_alignment); cdev->spec_received++; wake_up(&cdev->ep1_wait_queue); break; case EP1_CMD_AUDIO_PARAMS: cdev->audio_parm_answer = buf[1]; wake_up(&cdev->ep1_wait_queue); break; case EP1_CMD_MIDI_READ: snd_usb_caiaq_midi_handle_input(cdev, buf[1], buf + 3, buf[2]); break; case EP1_CMD_READ_IO: if (cdev->chip.usb_id == USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_AUDIO8DJ)) { if (urb->actual_length > sizeof(cdev->control_state)) urb->actual_length = sizeof(cdev->control_state); memcpy(cdev->control_state, buf + 1, urb->actual_length); wake_up(&cdev->ep1_wait_queue); break; } #ifdef CONFIG_SND_USB_CAIAQ_INPUT fallthrough; case EP1_CMD_READ_ERP: case EP1_CMD_READ_ANALOG: snd_usb_caiaq_input_dispatch(cdev, buf, urb->actual_length); #endif break; } cdev->ep1_in_urb.actual_length = 0; ret = usb_submit_urb(&cdev->ep1_in_urb, GFP_ATOMIC); if (ret < 0) dev_err(dev, "unable to submit urb. OOM!?\n"); } int snd_usb_caiaq_send_command(struct snd_usb_caiaqdev *cdev, unsigned char command, const unsigned char *buffer, int len) { int actual_len; struct usb_device *usb_dev = cdev->chip.dev; if (!usb_dev) return -EIO; if (len > EP1_BUFSIZE - 1) len = EP1_BUFSIZE - 1; if (buffer && len > 0) memcpy(cdev->ep1_out_buf+1, buffer, len); cdev->ep1_out_buf[0] = command; return usb_bulk_msg(usb_dev, usb_sndbulkpipe(usb_dev, 1), cdev->ep1_out_buf, len+1, &actual_len, 200); } int snd_usb_caiaq_send_command_bank(struct snd_usb_caiaqdev *cdev, unsigned char command, unsigned char bank, const unsigned char *buffer, int len) { int actual_len; struct usb_device *usb_dev = cdev->chip.dev; if (!usb_dev) return -EIO; if (len > EP1_BUFSIZE - 2) len = EP1_BUFSIZE - 2; if (buffer && len > 0) memcpy(cdev->ep1_out_buf+2, buffer, len); cdev->ep1_out_buf[0] = command; cdev->ep1_out_buf[1] = bank; return usb_bulk_msg(usb_dev, usb_sndbulkpipe(usb_dev, 1), cdev->ep1_out_buf, len+2, &actual_len, 200); } int snd_usb_caiaq_set_audio_params (struct snd_usb_caiaqdev *cdev, int rate, int depth, int bpp) { int ret; char tmp[5]; struct device *dev = caiaqdev_to_dev(cdev); switch (rate) { case 44100: tmp[0] = SAMPLERATE_44100; break; case 48000: tmp[0] = SAMPLERATE_48000; break; case 88200: tmp[0] = SAMPLERATE_88200; break; case 96000: tmp[0] = SAMPLERATE_96000; break; case 192000: tmp[0] = SAMPLERATE_192000; break; default: return -EINVAL; } switch (depth) { case 16: tmp[1] = DEPTH_16; break; case 24: tmp[1] = DEPTH_24; break; default: return -EINVAL; } tmp[2] = bpp & 0xff; tmp[3] = bpp >> 8; tmp[4] = 1; /* packets per microframe */ dev_dbg(dev, "setting audio params: %d Hz, %d bits, %d bpp\n", rate, depth, bpp); cdev->audio_parm_answer = -1; ret = snd_usb_caiaq_send_command(cdev, EP1_CMD_AUDIO_PARAMS, tmp, sizeof(tmp)); if (ret) return ret; if (!wait_event_timeout(cdev->ep1_wait_queue, cdev->audio_parm_answer >= 0, HZ)) return -EPIPE; if (cdev->audio_parm_answer != 1) dev_dbg(dev, "unable to set the device's audio params\n"); else cdev->bpp = bpp; return cdev->audio_parm_answer == 1 ? 0 : -EINVAL; } int snd_usb_caiaq_set_auto_msg(struct snd_usb_caiaqdev *cdev, int digital, int analog, int erp) { char tmp[3] = { digital, analog, erp }; return snd_usb_caiaq_send_command(cdev, EP1_CMD_AUTO_MSG, tmp, sizeof(tmp)); } static void setup_card(struct snd_usb_caiaqdev *cdev) { int ret; char val[4]; struct device *dev = caiaqdev_to_dev(cdev); /* device-specific startup specials */ switch (cdev->chip.usb_id) { case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_RIGKONTROL2): /* RigKontrol2 - display centered dash ('-') */ val[0] = 0x00; val[1] = 0x00; val[2] = 0x01; snd_usb_caiaq_send_command(cdev, EP1_CMD_WRITE_IO, val, 3); break; case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_RIGKONTROL3): /* RigKontrol2 - display two centered dashes ('--') */ val[0] = 0x00; val[1] = 0x40; val[2] = 0x40; val[3] = 0x00; snd_usb_caiaq_send_command(cdev, EP1_CMD_WRITE_IO, val, 4); break; case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_AK1): /* Audio Kontrol 1 - make USB-LED stop blinking */ val[0] = 0x00; snd_usb_caiaq_send_command(cdev, EP1_CMD_WRITE_IO, val, 1); break; case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_AUDIO8DJ): /* Audio 8 DJ - trigger read of current settings */ cdev->control_state[0] = 0xff; snd_usb_caiaq_set_auto_msg(cdev, 1, 0, 0); snd_usb_caiaq_send_command(cdev, EP1_CMD_READ_IO, NULL, 0); if (!wait_event_timeout(cdev->ep1_wait_queue, cdev->control_state[0] != 0xff, HZ)) return; /* fix up some defaults */ if ((cdev->control_state[1] != 2) || (cdev->control_state[2] != 3) || (cdev->control_state[4] != 2)) { cdev->control_state[1] = 2; cdev->control_state[2] = 3; cdev->control_state[4] = 2; snd_usb_caiaq_send_command(cdev, EP1_CMD_WRITE_IO, cdev->control_state, 6); } break; } if (cdev->spec.num_analog_audio_out + cdev->spec.num_analog_audio_in + cdev->spec.num_digital_audio_out + cdev->spec.num_digital_audio_in > 0) { ret = snd_usb_caiaq_audio_init(cdev); if (ret < 0) dev_err(dev, "Unable to set up audio system (ret=%d)\n", ret); } if (cdev->spec.num_midi_in + cdev->spec.num_midi_out > 0) { ret = snd_usb_caiaq_midi_init(cdev); if (ret < 0) dev_err(dev, "Unable to set up MIDI system (ret=%d)\n", ret); } #ifdef CONFIG_SND_USB_CAIAQ_INPUT ret = snd_usb_caiaq_input_init(cdev); if (ret < 0) dev_err(dev, "Unable to set up input system (ret=%d)\n", ret); #endif /* finally, register the card and all its sub-instances */ ret = snd_card_register(cdev->chip.card); if (ret < 0) { dev_err(dev, "snd_card_register() returned %d\n", ret); snd_card_free(cdev->chip.card); } ret = snd_usb_caiaq_control_init(cdev); if (ret < 0) dev_err(dev, "Unable to set up control system (ret=%d)\n", ret); } static void card_free(struct snd_card *card) { struct snd_usb_caiaqdev *cdev = caiaqdev(card); #ifdef CONFIG_SND_USB_CAIAQ_INPUT snd_usb_caiaq_input_free(cdev); #endif snd_usb_caiaq_audio_free(cdev); usb_reset_device(cdev->chip.dev); } static int create_card(struct usb_device *usb_dev, struct usb_interface *intf, struct snd_card **cardp) { int devnum; int err; struct snd_card *card; struct snd_usb_caiaqdev *cdev; for (devnum = 0; devnum < SNDRV_CARDS; devnum++) if (enable[devnum]) break; if (devnum >= SNDRV_CARDS) return -ENODEV; err = snd_card_new(&intf->dev, index[devnum], id[devnum], THIS_MODULE, sizeof(struct snd_usb_caiaqdev), &card); if (err < 0) return err; cdev = caiaqdev(card); cdev->chip.dev = usb_dev; cdev->chip.card = card; cdev->chip.usb_id = USB_ID(le16_to_cpu(usb_dev->descriptor.idVendor), le16_to_cpu(usb_dev->descriptor.idProduct)); spin_lock_init(&cdev->spinlock); *cardp = card; return 0; } static int init_card(struct snd_usb_caiaqdev *cdev) { char *c, usbpath[32]; struct usb_device *usb_dev = cdev->chip.dev; struct snd_card *card = cdev->chip.card; struct device *dev = caiaqdev_to_dev(cdev); int err, len; if (usb_set_interface(usb_dev, 0, 1) != 0) { dev_err(dev, "can't set alt interface.\n"); return -EIO; } usb_init_urb(&cdev->ep1_in_urb); usb_init_urb(&cdev->midi_out_urb); usb_fill_bulk_urb(&cdev->ep1_in_urb, usb_dev, usb_rcvbulkpipe(usb_dev, 0x1), cdev->ep1_in_buf, EP1_BUFSIZE, usb_ep1_command_reply_dispatch, cdev); usb_fill_bulk_urb(&cdev->midi_out_urb, usb_dev, usb_sndbulkpipe(usb_dev, 0x1), cdev->midi_out_buf, EP1_BUFSIZE, snd_usb_caiaq_midi_output_done, cdev); /* sanity checks of EPs before actually submitting */ if (usb_urb_ep_type_check(&cdev->ep1_in_urb) || usb_urb_ep_type_check(&cdev->midi_out_urb)) { dev_err(dev, "invalid EPs\n"); return -EINVAL; } init_waitqueue_head(&cdev->ep1_wait_queue); init_waitqueue_head(&cdev->prepare_wait_queue); if (usb_submit_urb(&cdev->ep1_in_urb, GFP_KERNEL) != 0) return -EIO; err = snd_usb_caiaq_send_command(cdev, EP1_CMD_GET_DEVICE_INFO, NULL, 0); if (err) goto err_kill_urb; if (!wait_event_timeout(cdev->ep1_wait_queue, cdev->spec_received, HZ)) { err = -ENODEV; goto err_kill_urb; } usb_string(usb_dev, usb_dev->descriptor.iManufacturer, cdev->vendor_name, CAIAQ_USB_STR_LEN); usb_string(usb_dev, usb_dev->descriptor.iProduct, cdev->product_name, CAIAQ_USB_STR_LEN); strscpy(card->driver, MODNAME, sizeof(card->driver)); strscpy(card->shortname, cdev->product_name, sizeof(card->shortname)); strscpy(card->mixername, cdev->product_name, sizeof(card->mixername)); /* if the id was not passed as module option, fill it with a shortened * version of the product string which does not contain any * whitespaces */ if (*card->id == '\0') { char id[sizeof(card->id)]; memset(id, 0, sizeof(id)); for (c = card->shortname, len = 0; *c && len < sizeof(card->id); c++) if (*c != ' ') id[len++] = *c; snd_card_set_id(card, id); } usb_make_path(usb_dev, usbpath, sizeof(usbpath)); scnprintf(card->longname, sizeof(card->longname), "%s %s (%s)", cdev->vendor_name, cdev->product_name, usbpath); setup_card(cdev); card->private_free = card_free; return 0; err_kill_urb: usb_kill_urb(&cdev->ep1_in_urb); return err; } static int snd_probe(struct usb_interface *intf, const struct usb_device_id *id) { int ret; struct snd_card *card = NULL; struct usb_device *usb_dev = interface_to_usbdev(intf); ret = create_card(usb_dev, intf, &card); if (ret < 0) return ret; usb_set_intfdata(intf, card); ret = init_card(caiaqdev(card)); if (ret < 0) { dev_err(&usb_dev->dev, "unable to init card! (ret=%d)\n", ret); snd_card_free(card); return ret; } return 0; } static void snd_disconnect(struct usb_interface *intf) { struct snd_card *card = usb_get_intfdata(intf); struct device *dev = intf->usb_dev; struct snd_usb_caiaqdev *cdev; if (!card) return; cdev = caiaqdev(card); dev_dbg(dev, "%s(%p)\n", __func__, intf); snd_card_disconnect(card); #ifdef CONFIG_SND_USB_CAIAQ_INPUT snd_usb_caiaq_input_disconnect(cdev); #endif snd_usb_caiaq_audio_disconnect(cdev); usb_kill_urb(&cdev->ep1_in_urb); usb_kill_urb(&cdev->midi_out_urb); snd_card_free_when_closed(card); } MODULE_DEVICE_TABLE(usb, snd_usb_id_table); static struct usb_driver snd_usb_driver = { .name = MODNAME, .probe = snd_probe, .disconnect = snd_disconnect, .id_table = snd_usb_id_table, }; module_usb_driver(snd_usb_driver);
12 12 12 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 // SPDX-License-Identifier: GPL-2.0-only /* Kernel module to match connection tracking information. */ /* (C) 1999-2001 Paul `Rusty' Russell * (C) 2002-2005 Netfilter Core Team <coreteam@netfilter.org> */ #include <linux/module.h> #include <linux/skbuff.h> #include <net/netfilter/nf_conntrack.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_state.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Rusty Russell <rusty@rustcorp.com.au>"); MODULE_DESCRIPTION("ip[6]_tables connection tracking state match module"); MODULE_ALIAS("ipt_state"); MODULE_ALIAS("ip6t_state"); static bool state_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_state_info *sinfo = par->matchinfo; enum ip_conntrack_info ctinfo; unsigned int statebit; struct nf_conn *ct = nf_ct_get(skb, &ctinfo); if (ct) statebit = XT_STATE_BIT(ctinfo); else if (ctinfo == IP_CT_UNTRACKED) statebit = XT_STATE_UNTRACKED; else statebit = XT_STATE_INVALID; return (sinfo->statemask & statebit); } static int state_mt_check(const struct xt_mtchk_param *par) { int ret; ret = nf_ct_netns_get(par->net, par->family); if (ret < 0) pr_info_ratelimited("cannot load conntrack support for proto=%u\n", par->family); return ret; } static void state_mt_destroy(const struct xt_mtdtor_param *par) { nf_ct_netns_put(par->net, par->family); } static struct xt_match state_mt_reg __read_mostly = { .name = "state", .family = NFPROTO_UNSPEC, .checkentry = state_mt_check, .match = state_mt, .destroy = state_mt_destroy, .matchsize = sizeof(struct xt_state_info), .me = THIS_MODULE, }; static int __init state_mt_init(void) { return xt_register_match(&state_mt_reg); } static void __exit state_mt_exit(void) { xt_unregister_match(&state_mt_reg); } module_init(state_mt_init); module_exit(state_mt_exit);
59 4 58 3 55 1 59 59 4 4 4 1 1 1 1 1 1 18 19 19 2 2 1 1 1 1 1 1 10 15 15 15 15 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995 5996 5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046 6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 6057 6058 6059 6060 6061 6062 6063 6064 6065 6066 6067 6068 6069 6070 6071 6072 6073 6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090 6091 6092 6093 6094 6095 6096 6097 6098 6099 6100 6101 6102 6103 6104 6105 6106 6107 6108 6109 6110 6111 6112 6113 6114 6115 6116 6117 6118 6119 6120 6121 6122 6123 6124 6125 6126 6127 6128 6129 6130 6131 6132 6133 6134 6135 6136 6137 6138 6139 6140 6141 6142 6143 6144 6145 6146 6147 6148 6149 6150 6151 6152 6153 6154 6155 6156 6157 6158 6159 6160 6161 6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180 6181 6182 6183 6184 6185 6186 6187 6188 6189 6190 6191 6192 6193 6194 6195 6196 6197 6198 6199 6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227 6228 6229 6230 6231 6232 6233 6234 6235 6236 6237 6238 6239 6240 6241 6242 6243 6244 6245 6246 6247 6248 6249 6250 6251 6252 6253 6254 6255 6256 6257 6258 6259 6260 6261 6262 6263 6264 6265 6266 6267 6268 6269 6270 6271 6272 6273 6274 6275 6276 6277 6278 6279 6280 6281 6282 6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317 6318 6319 6320 6321 6322 6323 6324 6325 6326 6327 6328 6329 6330 6331 6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346 6347 6348 6349 6350 6351 6352 6353 6354 6355 6356 6357 6358 6359 6360 6361 6362 6363 6364 6365 6366 6367 6368 6369 6370 6371 6372 6373 6374 6375 6376 6377 6378 6379 6380 6381 6382 6383 6384 6385 6386 6387 6388 6389 6390 6391 6392 6393 6394 6395 6396 6397 6398 6399 6400 6401 6402 6403 6404 6405 6406 6407 6408 6409 6410 6411 6412 6413 6414 6415 6416 6417 6418 6419 6420 6421 6422 6423 6424 6425 6426 6427 6428 6429 6430 6431 6432 6433 6434 6435 6436 6437 6438 6439 6440 6441 6442 6443 6444 6445 6446 6447 6448 6449 6450 6451 6452 6453 6454 6455 6456 6457 6458 6459 6460 6461 6462 6463 6464 6465 6466 6467 6468 6469 6470 6471 6472 6473 6474 6475 6476 6477 6478 6479 6480 6481 6482 6483 6484 6485 6486 6487 6488 6489 6490 6491 6492 6493 6494 6495 6496 6497 6498 6499 6500 6501 6502 6503 6504 6505 6506 6507 6508 6509 6510 6511 6512 6513 6514 6515 6516 6517 6518 6519 6520 6521 6522 6523 6524 6525 6526 6527 6528 6529 6530 6531 6532 6533 6534 6535 6536 6537 6538 6539 6540 6541 6542 6543 6544 6545 6546 6547 6548 6549 6550 6551 6552 6553 6554 6555 6556 6557 6558 6559 6560 6561 6562 6563 6564 6565 6566 6567 6568 6569 6570 6571 6572 6573 6574 6575 6576 6577 6578 6579 6580 6581 6582 6583 6584 6585 6586 6587 6588 6589 6590 6591 6592 6593 6594 6595 6596 6597 6598 6599 6600 6601 6602 6603 6604 6605 6606 6607 6608 6609 6610 6611 6612 6613 6614 6615 6616 6617 6618 6619 6620 6621 6622 6623 6624 6625 6626 6627 6628 6629 6630 6631 6632 6633 6634 6635 6636 6637 6638 6639 6640 6641 6642 6643 6644 6645 6646 6647 6648 6649 6650 6651 6652 6653 6654 6655 6656 6657 6658 6659 6660 6661 6662 6663 6664 6665 6666 6667 6668 6669 6670 6671 6672 6673 6674 6675 6676 6677 6678 6679 6680 6681 6682 6683 6684 6685 6686 6687 6688 6689 6690 6691 6692 6693 6694 6695 6696 6697 6698 6699 6700 6701 6702 6703 6704 6705 6706 6707 6708 6709 6710 6711 6712 6713 6714 6715 6716 6717 6718 6719 6720 6721 6722 6723 6724 6725 6726 6727 6728 6729 6730 6731 6732 6733 6734 6735 6736 6737 6738 6739 6740 6741 6742 6743 6744 6745 6746 6747 6748 6749 6750 6751 6752 6753 6754 6755 6756 6757 6758 6759 6760 6761 6762 6763 6764 6765 6766 6767 6768 6769 6770 6771 6772 6773 6774 6775 6776 6777 6778 6779 6780 6781 6782 6783 6784 6785 6786 6787 6788 6789 6790 6791 6792 6793 6794 6795 6796 6797 6798 6799 6800 6801 6802 6803 6804 6805 6806 6807 6808 6809 6810 6811 6812 6813 6814 6815 6816 6817 6818 6819 6820 6821 6822 6823 6824 6825 6826 6827 6828 6829 6830 6831 6832 6833 6834 6835 6836 6837 6838 6839 6840 6841 6842 6843 6844 6845 6846 6847 6848 6849 6850 6851 6852 6853 6854 6855 6856 6857 6858 6859 6860 6861 6862 6863 6864 6865 6866 6867 6868 6869 6870 6871 6872 6873 6874 6875 6876 6877 6878 6879 6880 6881 6882 6883 6884 6885 6886 6887 6888 6889 6890 6891 6892 6893 6894 6895 6896 6897 6898 6899 6900 6901 6902 6903 6904 6905 6906 6907 6908 6909 6910 6911 6912 6913 6914 6915 6916 6917 6918 6919 6920 6921 6922 6923 6924 6925 6926 6927 6928 6929 6930 6931 6932 6933 6934 6935 6936 6937 6938 6939 6940 6941 6942 6943 6944 6945 6946 6947 6948 6949 6950 6951 6952 6953 6954 6955 6956 6957 6958 6959 6960 6961 6962 6963 6964 6965 6966 6967 6968 6969 6970 6971 6972 6973 6974 6975 6976 6977 6978 6979 6980 6981 6982 6983 6984 6985 6986 6987 6988 6989 6990 6991 6992 6993 6994 6995 6996 6997 6998 6999 7000 7001 7002 7003 7004 7005 7006 7007 7008 7009 7010 7011 7012 7013 7014 7015 7016 7017 7018 7019 7020 7021 7022 7023 7024 7025 7026 7027 7028 7029 7030 7031 7032 7033 7034 7035 7036 7037 7038 7039 7040 7041 7042 7043 7044 7045 7046 7047 7048 7049 7050 7051 7052 7053 7054 7055 7056 7057 7058 7059 7060 7061 7062 7063 7064 7065 7066 7067 7068 7069 7070 7071 7072 7073 7074 7075 7076 7077 7078 7079 7080 7081 7082 7083 7084 7085 7086 7087 7088 7089 7090 7091 7092 7093 7094 7095 7096 7097 7098 7099 7100 7101 7102 7103 7104 7105 7106 7107 7108 7109 7110 7111 7112 7113 7114 7115 7116 7117 7118 7119 7120 7121 7122 7123 7124 7125 7126 7127 7128 7129 7130 7131 7132 7133 7134 7135 7136 7137 7138 7139 7140 7141 7142 7143 7144 7145 7146 7147 7148 7149 7150 7151 7152 7153 7154 7155 7156 7157 7158 7159 7160 7161 7162 7163 7164 7165 7166 7167 7168 7169 7170 7171 7172 7173 7174 7175 7176 7177 7178 7179 7180 7181 7182 7183 7184 7185 7186 7187 7188 7189 7190 7191 7192 7193 7194 7195 7196 7197 7198 7199 7200 7201 7202 7203 7204 7205 7206 7207 7208 7209 7210 7211 7212 7213 7214 7215 7216 7217 7218 7219 7220 7221 7222 7223 7224 7225 7226 7227 7228 7229 7230 7231 7232 7233 7234 7235 7236 7237 7238 7239 7240 7241 7242 7243 7244 7245 7246 7247 7248 7249 7250 7251 7252 7253 7254 7255 7256 7257 7258 7259 7260 7261 7262 7263 7264 7265 7266 7267 7268 7269 7270 7271 7272 7273 7274 7275 7276 7277 7278 7279 7280 7281 7282 7283 7284 7285 7286 7287 7288 7289 7290 7291 7292 7293 7294 7295 7296 7297 7298 7299 7300 7301 7302 7303 7304 7305 7306 7307 7308 7309 7310 7311 7312 7313 7314 7315 7316 7317 7318 7319 7320 7321 7322 7323 7324 7325 7326 7327 7328 7329 7330 7331 7332 7333 7334 7335 7336 7337 7338 7339 7340 7341 7342 7343 7344 7345 7346 7347 7348 7349 7350 7351 7352 7353 7354 7355 7356 7357 7358 7359 7360 7361 7362 7363 7364 7365 7366 7367 7368 7369 7370 7371 7372 7373 7374 7375 7376 7377 7378 7379 7380 7381 7382 7383 7384 7385 7386 7387 7388 7389 7390 7391 7392 7393 7394 7395 7396 7397 7398 7399 7400 7401 7402 7403 7404 7405 7406 7407 7408 7409 7410 7411 7412 7413 7414 7415 7416 7417 7418 7419 7420 7421 7422 7423 7424 7425 7426 7427 7428 7429 7430 7431 7432 7433 7434 7435 7436 7437 7438 7439 7440 7441 7442 7443 7444 7445 7446 7447 7448 7449 7450 7451 7452 7453 7454 7455 7456 7457 7458 7459 7460 7461 7462 7463 7464 7465 7466 7467 7468 7469 7470 7471 7472 7473 7474 7475 7476 7477 7478 7479 7480 7481 7482 7483 7484 7485 7486 7487 7488 7489 7490 7491 7492 7493 7494 7495 7496 7497 7498 7499 7500 7501 7502 7503 7504 7505 7506 7507 7508 7509 7510 7511 7512 7513 7514 7515 7516 7517 7518 7519 7520 7521 7522 7523 7524 7525 7526 7527 7528 7529 7530 7531 7532 7533 7534 7535 7536 7537 7538 7539 7540 7541 7542 7543 7544 7545 7546 7547 7548 7549 7550 7551 7552 7553 7554 7555 7556 7557 7558 7559 7560 7561 7562 7563 7564 7565 7566 7567 7568 7569 7570 7571 7572 7573 7574 7575 7576 7577 7578 7579 7580 7581 7582 7583 7584 7585 7586 7587 7588 7589 7590 7591 7592 7593 7594 7595 7596 7597 7598 7599 7600 7601 7602 7603 7604 7605 7606 7607 7608 7609 7610 7611 7612 7613 7614 7615 7616 7617 7618 7619 7620 7621 7622 7623 7624 7625 7626 7627 7628 7629 7630 7631 7632 7633 7634 7635 7636 7637 7638 7639 7640 7641 7642 7643 7644 7645 7646 7647 7648 7649 7650 7651 7652 7653 7654 7655 7656 7657 7658 7659 7660 7661 7662 7663 7664 7665 7666 7667 7668 7669 7670 7671 7672 7673 7674 7675 7676 7677 7678 7679 7680 7681 7682 7683 7684 7685 7686 7687 7688 7689 7690 7691 7692 7693 7694 7695 7696 7697 7698 7699 7700 7701 7702 7703 7704 7705 7706 7707 7708 7709 7710 7711 7712 7713 7714 7715 7716 7717 7718 7719 7720 7721 7722 7723 7724 7725 7726 7727 7728 7729 7730 7731 7732 7733 7734 7735 7736 7737 7738 7739 7740 7741 7742 7743 7744 7745 7746 7747 7748 7749 7750 7751 7752 7753 7754 7755 7756 7757 7758 7759 7760 7761 7762 7763 7764 7765 7766 7767 7768 7769 7770 7771 7772 7773 7774 7775 7776 7777 7778 7779 7780 7781 7782 7783 7784 7785 7786 7787 7788 7789 7790 7791 7792 7793 7794 7795 7796 7797 7798 7799 7800 7801 7802 7803 7804 7805 7806 7807 7808 7809 7810 7811 7812 7813 7814 7815 7816 7817 7818 7819 7820 7821 7822 7823 7824 7825 7826 7827 7828 7829 7830 7831 7832 7833 7834 7835 7836 7837 7838 7839 7840 7841 7842 7843 7844 7845 7846 7847 7848 7849 7850 7851 7852 7853 7854 7855 7856 7857 7858 7859 7860 7861 7862 7863 7864 7865 7866 7867 7868 7869 7870 7871 7872 7873 7874 7875 7876 7877 7878 7879 7880 7881 7882 7883 7884 7885 7886 7887 7888 7889 7890 7891 7892 7893 7894 7895 7896 7897 7898 7899 7900 7901 7902 7903 7904 7905 7906 7907 7908 7909 7910 7911 7912 7913 7914 7915 7916 7917 7918 7919 7920 7921 7922 7923 7924 7925 7926 7927 7928 7929 7930 7931 7932 7933 7934 7935 7936 7937 7938 7939 7940 7941 7942 7943 7944 7945 7946 7947 7948 7949 7950 7951 7952 7953 7954 7955 7956 7957 7958 7959 7960 7961 7962 7963 7964 7965 7966 7967 7968 7969 7970 7971 7972 7973 7974 7975 7976 7977 7978 7979 7980 7981 7982 7983 7984 7985 7986 7987 7988 7989 7990 7991 7992 7993 7994 7995 7996 7997 7998 7999 8000 8001 8002 8003 8004 8005 8006 8007 8008 8009 8010 8011 8012 8013 8014 8015 8016 8017 8018 8019 8020 8021 8022 8023 8024 8025 8026 8027 8028 8029 8030 8031 8032 8033 8034 8035 8036 8037 8038 8039 8040 8041 8042 8043 8044 8045 8046 8047 8048 8049 8050 8051 8052 8053 8054 8055 8056 8057 8058 8059 8060 8061 8062 8063 8064 8065 8066 8067 8068 8069 8070 8071 8072 8073 8074 8075 8076 8077 8078 8079 8080 8081 8082 8083 8084 8085 8086 8087 8088 8089 8090 8091 8092 8093 8094 8095 8096 8097 8098 8099 8100 8101 8102 8103 8104 8105 8106 8107 8108 8109 8110 8111 8112 8113 8114 8115 8116 8117 8118 8119 8120 8121 8122 8123 8124 8125 8126 8127 8128 8129 8130 8131 8132 8133 8134 8135 8136 8137 8138 8139 8140 8141 8142 8143 8144 8145 8146 8147 8148 8149 8150 8151 8152 8153 8154 8155 8156 8157 8158 8159 8160 8161 8162 8163 8164 8165 8166 8167 8168 8169 8170 8171 8172 8173 8174 8175 8176 8177 8178 8179 8180 8181 8182 8183 8184 8185 8186 8187 8188 8189 8190 8191 8192 8193 8194 8195 8196 8197 8198 8199 8200 8201 8202 8203 8204 8205 8206 8207 8208 8209 8210 8211 8212 8213 8214 8215 8216 8217 8218 8219 8220 8221 8222 8223 8224 8225 8226 8227 8228 8229 8230 8231 8232 8233 8234 8235 8236 8237 8238 8239 8240 8241 8242 8243 8244 8245 8246 8247 8248 8249 8250 8251 8252 8253 8254 8255 8256 8257 8258 8259 8260 8261 8262 8263 8264 8265 8266 8267 8268 8269 8270 8271 8272 8273 8274 8275 8276 8277 8278 8279 8280 8281 8282 8283 8284 8285 8286 8287 8288 8289 8290 8291 8292 8293 8294 8295 8296 8297 8298 8299 8300 8301 8302 8303 8304 8305 8306 8307 8308 8309 8310 8311 8312 8313 8314 8315 8316 8317 8318 8319 8320 8321 8322 8323 8324 8325 8326 8327 8328 8329 8330 8331 8332 8333 8334 8335 8336 8337 8338 8339 8340 8341 8342 8343 8344 8345 8346 8347 8348 8349 8350 8351 8352 8353 8354 8355 8356 8357 8358 8359 8360 8361 8362 8363 8364 8365 8366 8367 8368 8369 8370 8371 8372 8373 8374 8375 8376 8377 8378 8379 8380 8381 8382 8383 8384 8385 8386 8387 8388 8389 8390 8391 8392 8393 8394 8395 8396 8397 8398 8399 8400 8401 8402 8403 8404 8405 8406 8407 8408 8409 8410 8411 8412 8413 8414 8415 8416 8417 8418 8419 8420 8421 8422 8423 8424 8425 8426 8427 8428 8429 8430 8431 8432 8433 8434 8435 8436 8437 8438 8439 8440 8441 8442 8443 8444 8445 8446 8447 8448 8449 8450 8451 8452 8453 8454 8455 8456 8457 8458 8459 8460 8461 8462 8463 8464 8465 8466 8467 8468 8469 8470 8471 8472 8473 8474 8475 8476 8477 8478 8479 8480 8481 8482 8483 8484 8485 8486 8487 8488 8489 8490 8491 8492 8493 8494 8495 8496 8497 8498 8499 8500 8501 8502 8503 8504 8505 8506 8507 8508 8509 8510 8511 8512 8513 8514 8515 8516 8517 8518 8519 8520 8521 8522 8523 8524 8525 8526 8527 8528 8529 8530 8531 8532 8533 8534 8535 8536 8537 8538 8539 8540 8541 8542 8543 8544 8545 8546 8547 8548 8549 8550 8551 8552 8553 8554 8555 8556 8557 8558 8559 8560 8561 8562 8563 8564 8565 8566 8567 8568 8569 8570 8571 8572 8573 8574 8575 8576 8577 8578 8579 8580 8581 8582 8583 8584 8585 8586 8587 8588 8589 8590 8591 8592 8593 8594 8595 8596 8597 8598 8599 8600 8601 8602 8603 8604 8605 8606 8607 8608 8609 8610 8611 8612 8613 8614 8615 8616 8617 8618 8619 8620 8621 8622 8623 8624 8625 8626 8627 8628 8629 8630 8631 8632 8633 8634 8635 8636 8637 8638 8639 8640 8641 8642 8643 8644 8645 8646 8647 8648 8649 8650 8651 8652 8653 8654 8655 8656 8657 8658 8659 8660 8661 8662 8663 8664 8665 8666 8667 8668 8669 8670 8671 8672 8673 8674 8675 8676 8677 8678 8679 8680 8681 8682 8683 8684 8685 8686 8687 8688 8689 8690 8691 8692 8693 8694 8695 8696 8697 8698 8699 8700 8701 8702 8703 8704 8705 8706 8707 8708 8709 8710 8711 8712 8713 8714 8715 8716 8717 8718 8719 8720 8721 8722 8723 8724 8725 8726 8727 8728 8729 8730 8731 8732 8733 8734 8735 8736 8737 8738 8739 8740 8741 8742 8743 8744 8745 8746 8747 8748 8749 8750 8751 8752 8753 8754 8755 8756 8757 8758 8759 8760 8761 8762 8763 8764 8765 8766 8767 8768 8769 8770 8771 8772 8773 8774 8775 8776 8777 8778 8779 8780 8781 8782 8783 8784 8785 8786 8787 8788 8789 8790 8791 8792 8793 8794 8795 8796 8797 8798 8799 8800 8801 8802 8803 8804 8805 8806 8807 8808 8809 8810 8811 8812 8813 8814 8815 8816 8817 8818 8819 8820 8821 8822 8823 8824 8825 8826 8827 8828 8829 8830 8831 8832 8833 8834 8835 8836 8837 8838 8839 8840 8841 8842 8843 8844 8845 8846 8847 8848 8849 8850 8851 8852 8853 8854 8855 8856 8857 8858 8859 8860 8861 8862 8863 8864 8865 8866 8867 8868 8869 8870 8871 8872 8873 8874 8875 8876 8877 8878 8879 8880 8881 8882 8883 8884 8885 8886 8887 8888 8889 8890 8891 8892 8893 8894 8895 8896 8897 8898 8899 8900 8901 8902 8903 8904 8905 8906 8907 8908 8909 8910 8911 8912 8913 8914 8915 8916 8917 8918 8919 8920 8921 8922 8923 8924 8925 8926 8927 8928 8929 8930 8931 8932 8933 8934 8935 8936 8937 8938 8939 8940 8941 8942 8943 8944 8945 8946 8947 8948 8949 8950 8951 8952 8953 8954 8955 8956 8957 8958 8959 8960 8961 8962 8963 8964 8965 8966 8967 8968 8969 8970 8971 8972 8973 8974 8975 8976 8977 8978 8979 8980 8981 8982 8983 8984 8985 8986 8987 8988 8989 8990 8991 8992 8993 8994 8995 8996 8997 8998 8999 9000 9001 9002 9003 9004 9005 9006 9007 9008 9009 9010 9011 9012 9013 9014 9015 9016 9017 9018 9019 9020 9021 9022 9023 9024 9025 9026 9027 9028 9029 9030 9031 9032 9033 9034 9035 9036 9037 9038 9039 9040 9041 9042 9043 9044 9045 9046 9047 9048 9049 9050 9051 9052 9053 9054 9055 9056 9057 9058 9059 9060 9061 9062 9063 9064 9065 9066 9067 9068 9069 9070 9071 9072 9073 9074 9075 9076 9077 9078 9079 9080 9081 9082 9083 9084 9085 9086 9087 9088 9089 9090 9091 9092 9093 9094 9095 9096 9097 9098 9099 9100 9101 9102 9103 9104 9105 9106 9107 9108 9109 9110 9111 9112 9113 9114 9115 9116 9117 9118 9119 9120 9121 9122 9123 9124 9125 9126 9127 9128 9129 9130 9131 9132 9133 9134 9135 9136 9137 9138 9139 9140 9141 9142 9143 9144 9145 9146 9147 9148 9149 9150 9151 9152 9153 9154 9155 9156 9157 9158 9159 9160 9161 9162 9163 9164 9165 9166 9167 9168 9169 9170 9171 9172 9173 9174 9175 9176 9177 9178 9179 9180 9181 9182 9183 9184 9185 9186 9187 9188 9189 9190 9191 9192 9193 9194 9195 9196 9197 9198 9199 9200 9201 9202 9203 9204 9205 9206 9207 9208 9209 9210 9211 9212 9213 9214 9215 9216 9217 9218 9219 9220 9221 9222 9223 9224 9225 9226 9227 9228 9229 9230 9231 9232 9233 9234 9235 9236 9237 9238 9239 9240 9241 9242 9243 9244 9245 9246 9247 9248 9249 9250 9251 9252 9253 9254 9255 9256 9257 9258 9259 9260 9261 9262 9263 9264 9265 9266 9267 9268 9269 9270 9271 9272 9273 9274 9275 9276 9277 9278 9279 9280 9281 9282 9283 9284 9285 9286 9287 9288 9289 9290 9291 9292 9293 9294 9295 9296 9297 9298 9299 9300 9301 9302 9303 9304 9305 9306 9307 9308 9309 9310 9311 9312 9313 9314 9315 9316 9317 9318 9319 9320 9321 9322 9323 9324 9325 9326 9327 9328 9329 9330 9331 9332 9333 9334 9335 9336 9337 9338 9339 9340 9341 9342 9343 9344 9345 9346 9347 9348 9349 9350 9351 9352 9353 9354 9355 9356 9357 9358 9359 9360 9361 9362 9363 9364 9365 9366 9367 9368 9369 9370 9371 9372 9373 9374 9375 9376 9377 9378 9379 9380 9381 9382 9383 9384 9385 9386 9387 9388 9389 9390 9391 9392 9393 9394 9395 9396 9397 9398 9399 9400 9401 9402 9403 9404 9405 9406 9407 9408 9409 9410 9411 9412 9413 9414 9415 9416 9417 9418 9419 9420 9421 9422 9423 9424 9425 9426 9427 9428 9429 9430 9431 9432 9433 9434 9435 9436 9437 9438 9439 9440 9441 9442 9443 9444 9445 9446 9447 9448 9449 9450 9451 9452 9453 9454 9455 9456 9457 9458 9459 9460 9461 9462 9463 9464 9465 9466 9467 9468 9469 9470 9471 9472 9473 9474 9475 9476 9477 9478 9479 9480 9481 9482 9483 9484 9485 9486 9487 9488 9489 9490 9491 9492 9493 9494 9495 9496 9497 9498 9499 9500 9501 9502 9503 9504 9505 9506 9507 9508 9509 9510 9511 9512 9513 9514 9515 9516 9517 9518 9519 9520 9521 9522 9523 9524 9525 9526 9527 9528 9529 9530 9531 9532 9533 9534 9535 9536 9537 9538 9539 9540 9541 9542 9543 9544 9545 9546 9547 9548 9549 9550 9551 9552 9553 9554 9555 9556 9557 9558 9559 9560 9561 9562 9563 9564 9565 9566 9567 9568 9569 9570 9571 9572 9573 9574 9575 9576 9577 9578 9579 9580 9581 9582 9583 9584 9585 9586 9587 9588 9589 9590 9591 9592 9593 9594 9595 9596 9597 9598 9599 9600 9601 9602 9603 9604 9605 9606 9607 9608 9609 9610 9611 9612 9613 9614 9615 9616 9617 9618 9619 9620 9621 9622 9623 9624 9625 9626 9627 9628 9629 9630 9631 9632 9633 9634 9635 9636 9637 9638 9639 9640 9641 9642 9643 9644 9645 9646 9647 9648 9649 9650 9651 9652 9653 9654 9655 9656 9657 9658 9659 9660 9661 9662 9663 9664 9665 9666 9667 9668 9669 9670 9671 9672 9673 9674 9675 9676 9677 9678 9679 9680 9681 9682 9683 9684 9685 9686 9687 9688 9689 9690 9691 9692 9693 9694 9695 9696 9697 9698 9699 9700 9701 9702 9703 9704 9705 9706 9707 9708 9709 9710 9711 9712 9713 9714 9715 9716 9717 9718 9719 9720 9721 9722 9723 9724 9725 9726 9727 9728 9729 9730 9731 9732 9733 9734 9735 9736 9737 9738 9739 9740 9741 9742 9743 9744 9745 9746 9747 9748 9749 9750 9751 9752 9753 9754 9755 9756 9757 9758 9759 9760 9761 9762 9763 9764 9765 9766 9767 9768 9769 9770 9771 9772 9773 9774 9775 9776 9777 9778 9779 9780 9781 9782 9783 9784 9785 9786 9787 9788 9789 9790 9791 9792 9793 9794 9795 9796 9797 9798 9799 9800 9801 9802 9803 9804 9805 9806 9807 9808 9809 9810 9811 9812 9813 9814 9815 9816 9817 9818 9819 9820 9821 9822 9823 9824 9825 9826 9827 9828 9829 9830 9831 9832 9833 9834 9835 9836 9837 9838 9839 9840 9841 9842 9843 9844 9845 9846 9847 9848 9849 9850 9851 9852 9853 9854 9855 9856 9857 9858 9859 9860 9861 9862 9863 9864 9865 9866 9867 9868 9869 9870 9871 9872 9873 9874 9875 9876 9877 9878 9879 9880 9881 9882 9883 9884 9885 9886 9887 9888 9889 9890 9891 9892 9893 9894 9895 9896 9897 9898 9899 9900 9901 9902 9903 9904 9905 9906 9907 9908 9909 9910 9911 9912 9913 9914 9915 9916 9917 9918 9919 9920 9921 9922 9923 9924 9925 9926 9927 9928 9929 9930 9931 9932 9933 9934 9935 9936 9937 9938 9939 9940 9941 9942 9943 9944 9945 9946 9947 9948 9949 9950 9951 9952 9953 9954 9955 9956 9957 9958 9959 9960 9961 9962 9963 9964 9965 9966 9967 9968 9969 9970 9971 9972 9973 9974 9975 9976 9977 9978 9979 9980 9981 9982 9983 9984 9985 9986 9987 9988 9989 9990 9991 9992 9993 9994 9995 9996 9997 9998 9999 10000 10001 10002 10003 10004 10005 10006 10007 10008 10009 10010 10011 10012 10013 10014 10015 10016 10017 10018 10019 10020 10021 10022 10023 10024 10025 10026 10027 10028 10029 10030 10031 10032 10033 10034 10035 10036 10037 10038 10039 10040 10041 10042 10043 10044 10045 10046 10047 10048 10049 10050 10051 10052 10053 10054 10055 10056 10057 10058 10059 10060 10061 10062 10063 10064 10065 10066 10067 10068 10069 10070 10071 10072 10073 10074 10075 10076 10077 10078 10079 10080 10081 10082 10083 10084 10085 10086 10087 10088 10089 10090 10091 10092 10093 10094 10095 10096 10097 10098 10099 10100 10101 10102 10103 10104 10105 10106 10107 10108 10109 10110 10111 10112 10113 10114 10115 10116 10117 10118 10119 10120 10121 10122 10123 10124 10125 10126 10127 10128 10129 10130 10131 10132 10133 10134 10135 10136 10137 10138 10139 10140 10141 10142 10143 10144 10145 10146 10147 10148 10149 10150 10151 10152 10153 10154 10155 10156 10157 10158 10159 10160 10161 10162 10163 10164 10165 10166 10167 10168 10169 10170 10171 10172 10173 10174 10175 10176 10177 10178 10179 10180 10181 10182 10183 10184 10185 10186 10187 10188 10189 10190 10191 10192 10193 10194 10195 10196 10197 10198 10199 10200 10201 10202 10203 10204 10205 10206 10207 10208 10209 10210 10211 10212 10213 10214 10215 10216 10217 10218 10219 10220 10221 10222 10223 10224 10225 10226 10227 10228 10229 10230 10231 10232 10233 10234 10235 10236 10237 10238 10239 10240 10241 10242 10243 10244 10245 10246 10247 10248 10249 10250 10251 10252 10253 10254 10255 10256 10257 10258 10259 10260 10261 10262 10263 10264 10265 10266 10267 10268 10269 10270 10271 10272 10273 10274 10275 10276 10277 10278 10279 10280 10281 10282 10283 10284 10285 10286 10287 10288 10289 10290 10291 10292 10293 10294 10295 10296 10297 10298 10299 10300 10301 10302 10303 10304 10305 10306 10307 10308 10309 10310 10311 10312 10313 10314 10315 10316 10317 10318 10319 10320 10321 10322 10323 // SPDX-License-Identifier: GPL-2.0-only /* * BSS client mode implementation * Copyright 2003-2008, Jouni Malinen <j@w1.fi> * Copyright 2004, Instant802 Networks, Inc. * Copyright 2005, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> * Copyright 2007, Michael Wu <flamingice@sourmilk.net> * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015 - 2017 Intel Deutschland GmbH * Copyright (C) 2018 - 2024 Intel Corporation */ #include <linux/delay.h> #include <linux/fips.h> #include <linux/if_ether.h> #include <linux/skbuff.h> #include <linux/if_arp.h> #include <linux/etherdevice.h> #include <linux/moduleparam.h> #include <linux/rtnetlink.h> #include <linux/crc32.h> #include <linux/slab.h> #include <linux/export.h> #include <net/mac80211.h> #include <linux/unaligned.h> #include "ieee80211_i.h" #include "driver-ops.h" #include "rate.h" #include "led.h" #include "fils_aead.h" #include <kunit/static_stub.h> #define IEEE80211_AUTH_TIMEOUT (HZ / 5) #define IEEE80211_AUTH_TIMEOUT_LONG (HZ / 2) #define IEEE80211_AUTH_TIMEOUT_SHORT (HZ / 10) #define IEEE80211_AUTH_TIMEOUT_SAE (HZ * 2) #define IEEE80211_AUTH_MAX_TRIES 3 #define IEEE80211_AUTH_WAIT_ASSOC (HZ * 5) #define IEEE80211_AUTH_WAIT_SAE_RETRY (HZ * 2) #define IEEE80211_ASSOC_TIMEOUT (HZ / 5) #define IEEE80211_ASSOC_TIMEOUT_LONG (HZ / 2) #define IEEE80211_ASSOC_TIMEOUT_SHORT (HZ / 10) #define IEEE80211_ASSOC_MAX_TRIES 3 #define IEEE80211_ADV_TTLM_SAFETY_BUFFER_MS msecs_to_jiffies(100) #define IEEE80211_ADV_TTLM_ST_UNDERFLOW 0xff00 #define IEEE80211_NEG_TTLM_REQ_TIMEOUT (HZ / 5) static int max_nullfunc_tries = 2; module_param(max_nullfunc_tries, int, 0644); MODULE_PARM_DESC(max_nullfunc_tries, "Maximum nullfunc tx tries before disconnecting (reason 4)."); static int max_probe_tries = 5; module_param(max_probe_tries, int, 0644); MODULE_PARM_DESC(max_probe_tries, "Maximum probe tries before disconnecting (reason 4)."); /* * Beacon loss timeout is calculated as N frames times the * advertised beacon interval. This may need to be somewhat * higher than what hardware might detect to account for * delays in the host processing frames. But since we also * probe on beacon miss before declaring the connection lost * default to what we want. */ static int beacon_loss_count = 7; module_param(beacon_loss_count, int, 0644); MODULE_PARM_DESC(beacon_loss_count, "Number of beacon intervals before we decide beacon was lost."); /* * Time the connection can be idle before we probe * it to see if we can still talk to the AP. */ #define IEEE80211_CONNECTION_IDLE_TIME (30 * HZ) /* * Time we wait for a probe response after sending * a probe request because of beacon loss or for * checking the connection still works. */ static int probe_wait_ms = 500; module_param(probe_wait_ms, int, 0644); MODULE_PARM_DESC(probe_wait_ms, "Maximum time(ms) to wait for probe response" " before disconnecting (reason 4)."); /* * How many Beacon frames need to have been used in average signal strength * before starting to indicate signal change events. */ #define IEEE80211_SIGNAL_AVE_MIN_COUNT 4 /* * We can have multiple work items (and connection probing) * scheduling this timer, but we need to take care to only * reschedule it when it should fire _earlier_ than it was * asked for before, or if it's not pending right now. This * function ensures that. Note that it then is required to * run this function for all timeouts after the first one * has happened -- the work that runs from this timer will * do that. */ static void run_again(struct ieee80211_sub_if_data *sdata, unsigned long timeout) { lockdep_assert_wiphy(sdata->local->hw.wiphy); if (!timer_pending(&sdata->u.mgd.timer) || time_before(timeout, sdata->u.mgd.timer.expires)) mod_timer(&sdata->u.mgd.timer, timeout); } void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata) { if (sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER) return; if (ieee80211_hw_check(&sdata->local->hw, CONNECTION_MONITOR)) return; mod_timer(&sdata->u.mgd.bcn_mon_timer, round_jiffies_up(jiffies + sdata->u.mgd.beacon_timeout)); } void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; if (unlikely(!ifmgd->associated)) return; if (ifmgd->probe_send_count) ifmgd->probe_send_count = 0; if (ieee80211_hw_check(&sdata->local->hw, CONNECTION_MONITOR)) return; mod_timer(&ifmgd->conn_mon_timer, round_jiffies_up(jiffies + IEEE80211_CONNECTION_IDLE_TIME)); } static int ecw2cw(int ecw) { return (1 << ecw) - 1; } static enum ieee80211_conn_mode ieee80211_determine_ap_chan(struct ieee80211_sub_if_data *sdata, struct ieee80211_channel *channel, u32 vht_cap_info, const struct ieee802_11_elems *elems, bool ignore_ht_channel_mismatch, const struct ieee80211_conn_settings *conn, struct cfg80211_chan_def *chandef) { const struct ieee80211_ht_operation *ht_oper = elems->ht_operation; const struct ieee80211_vht_operation *vht_oper = elems->vht_operation; const struct ieee80211_he_operation *he_oper = elems->he_operation; const struct ieee80211_eht_operation *eht_oper = elems->eht_operation; struct ieee80211_supported_band *sband = sdata->local->hw.wiphy->bands[channel->band]; struct cfg80211_chan_def vht_chandef; bool no_vht = false; u32 ht_cfreq; *chandef = (struct cfg80211_chan_def) { .chan = channel, .width = NL80211_CHAN_WIDTH_20_NOHT, .center_freq1 = channel->center_freq, .freq1_offset = channel->freq_offset, }; /* get special S1G case out of the way */ if (sband->band == NL80211_BAND_S1GHZ) { if (!ieee80211_chandef_s1g_oper(elems->s1g_oper, chandef)) { sdata_info(sdata, "Missing S1G Operation Element? Trying operating == primary\n"); chandef->width = ieee80211_s1g_channel_width(channel); } return IEEE80211_CONN_MODE_S1G; } /* get special 6 GHz case out of the way */ if (sband->band == NL80211_BAND_6GHZ) { enum ieee80211_conn_mode mode = IEEE80211_CONN_MODE_EHT; /* this is an error */ if (conn->mode < IEEE80211_CONN_MODE_HE) return IEEE80211_CONN_MODE_LEGACY; if (!elems->he_6ghz_capa || !elems->he_cap) { sdata_info(sdata, "HE 6 GHz AP is missing HE/HE 6 GHz band capability\n"); return IEEE80211_CONN_MODE_LEGACY; } if (!eht_oper || !elems->eht_cap) { eht_oper = NULL; mode = IEEE80211_CONN_MODE_HE; } if (!ieee80211_chandef_he_6ghz_oper(sdata->local, he_oper, eht_oper, chandef)) { sdata_info(sdata, "bad HE/EHT 6 GHz operation\n"); return IEEE80211_CONN_MODE_LEGACY; } return mode; } /* now we have the progression HT, VHT, ... */ if (conn->mode < IEEE80211_CONN_MODE_HT) return IEEE80211_CONN_MODE_LEGACY; if (!ht_oper || !elems->ht_cap_elem) return IEEE80211_CONN_MODE_LEGACY; chandef->width = NL80211_CHAN_WIDTH_20; ht_cfreq = ieee80211_channel_to_frequency(ht_oper->primary_chan, channel->band); /* check that channel matches the right operating channel */ if (!ignore_ht_channel_mismatch && channel->center_freq != ht_cfreq) { /* * It's possible that some APs are confused here; * Netgear WNDR3700 sometimes reports 4 higher than * the actual channel in association responses, but * since we look at probe response/beacon data here * it should be OK. */ sdata_info(sdata, "Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n", channel->center_freq, ht_cfreq, ht_oper->primary_chan, channel->band); return IEEE80211_CONN_MODE_LEGACY; } ieee80211_chandef_ht_oper(ht_oper, chandef); if (conn->mode < IEEE80211_CONN_MODE_VHT) return IEEE80211_CONN_MODE_HT; vht_chandef = *chandef; /* * having he_cap/he_oper parsed out implies we're at * least operating as HE STA */ if (elems->he_cap && he_oper && he_oper->he_oper_params & cpu_to_le32(IEEE80211_HE_OPERATION_VHT_OPER_INFO)) { struct ieee80211_vht_operation he_oper_vht_cap; /* * Set only first 3 bytes (other 2 aren't used in * ieee80211_chandef_vht_oper() anyway) */ memcpy(&he_oper_vht_cap, he_oper->optional, 3); he_oper_vht_cap.basic_mcs_set = cpu_to_le16(0); if (!ieee80211_chandef_vht_oper(&sdata->local->hw, vht_cap_info, &he_oper_vht_cap, ht_oper, &vht_chandef)) { sdata_info(sdata, "HE AP VHT information is invalid, disabling HE\n"); /* this will cause us to re-parse as VHT STA */ return IEEE80211_CONN_MODE_VHT; } } else if (!vht_oper || !elems->vht_cap_elem) { if (sband->band == NL80211_BAND_5GHZ) { sdata_info(sdata, "VHT information is missing, disabling VHT\n"); return IEEE80211_CONN_MODE_HT; } no_vht = true; } else if (sband->band == NL80211_BAND_2GHZ) { no_vht = true; } else if (!ieee80211_chandef_vht_oper(&sdata->local->hw, vht_cap_info, vht_oper, ht_oper, &vht_chandef)) { sdata_info(sdata, "AP VHT information is invalid, disabling VHT\n"); return IEEE80211_CONN_MODE_HT; } if (!cfg80211_chandef_compatible(chandef, &vht_chandef)) { sdata_info(sdata, "AP VHT information doesn't match HT, disabling VHT\n"); return IEEE80211_CONN_MODE_HT; } *chandef = vht_chandef; /* stick to current max mode if we or the AP don't have HE */ if (conn->mode < IEEE80211_CONN_MODE_HE || !elems->he_operation || !elems->he_cap) { if (no_vht) return IEEE80211_CONN_MODE_HT; return IEEE80211_CONN_MODE_VHT; } /* stick to HE if we or the AP don't have EHT */ if (conn->mode < IEEE80211_CONN_MODE_EHT || !eht_oper || !elems->eht_cap) return IEEE80211_CONN_MODE_HE; /* * handle the case that the EHT operation indicates that it holds EHT * operation information (in case that the channel width differs from * the channel width reported in HT/VHT/HE). */ if (eht_oper->params & IEEE80211_EHT_OPER_INFO_PRESENT) { struct cfg80211_chan_def eht_chandef = *chandef; ieee80211_chandef_eht_oper((const void *)eht_oper->optional, &eht_chandef); eht_chandef.punctured = ieee80211_eht_oper_dis_subchan_bitmap(eht_oper); if (!cfg80211_chandef_valid(&eht_chandef)) { sdata_info(sdata, "AP EHT information is invalid, disabling EHT\n"); return IEEE80211_CONN_MODE_HE; } if (!cfg80211_chandef_compatible(chandef, &eht_chandef)) { sdata_info(sdata, "AP EHT information doesn't match HT/VHT/HE, disabling EHT\n"); return IEEE80211_CONN_MODE_HE; } *chandef = eht_chandef; } return IEEE80211_CONN_MODE_EHT; } static bool ieee80211_verify_peer_he_mcs_support(struct ieee80211_sub_if_data *sdata, int link_id, const struct ieee80211_he_cap_elem *he_cap, const struct ieee80211_he_operation *he_op) { struct ieee80211_he_mcs_nss_supp *he_mcs_nss_supp; u16 mcs_80_map_tx, mcs_80_map_rx; u16 ap_min_req_set; int nss; if (!he_cap) return false; /* mcs_nss is right after he_cap info */ he_mcs_nss_supp = (void *)(he_cap + 1); mcs_80_map_tx = le16_to_cpu(he_mcs_nss_supp->tx_mcs_80); mcs_80_map_rx = le16_to_cpu(he_mcs_nss_supp->rx_mcs_80); /* P802.11-REVme/D0.3 * 27.1.1 Introduction to the HE PHY * ... * An HE STA shall support the following features: * ... * Single spatial stream HE-MCSs 0 to 7 (transmit and receive) in all * supported channel widths for HE SU PPDUs */ if ((mcs_80_map_tx & 0x3) == IEEE80211_HE_MCS_NOT_SUPPORTED || (mcs_80_map_rx & 0x3) == IEEE80211_HE_MCS_NOT_SUPPORTED) { link_id_info(sdata, link_id, "Missing mandatory rates for 1 Nss, rx 0x%x, tx 0x%x, disable HE\n", mcs_80_map_tx, mcs_80_map_rx); return false; } if (!he_op) return true; ap_min_req_set = le16_to_cpu(he_op->he_mcs_nss_set); /* * Apparently iPhone 13 (at least iOS version 15.3.1) sets this to all * zeroes, which is nonsense, and completely inconsistent with itself * (it doesn't have 8 streams). Accept the settings in this case anyway. */ if (!ap_min_req_set) return true; /* make sure the AP is consistent with itself * * P802.11-REVme/D0.3 * 26.17.1 Basic HE BSS operation * * A STA that is operating in an HE BSS shall be able to receive and * transmit at each of the <HE-MCS, NSS> tuple values indicated by the * Basic HE-MCS And NSS Set field of the HE Operation parameter of the * MLME-START.request primitive and shall be able to receive at each of * the <HE-MCS, NSS> tuple values indicated by the Supported HE-MCS and * NSS Set field in the HE Capabilities parameter of the MLMESTART.request * primitive */ for (nss = 8; nss > 0; nss--) { u8 ap_op_val = (ap_min_req_set >> (2 * (nss - 1))) & 3; u8 ap_rx_val; u8 ap_tx_val; if (ap_op_val == IEEE80211_HE_MCS_NOT_SUPPORTED) continue; ap_rx_val = (mcs_80_map_rx >> (2 * (nss - 1))) & 3; ap_tx_val = (mcs_80_map_tx >> (2 * (nss - 1))) & 3; if (ap_rx_val == IEEE80211_HE_MCS_NOT_SUPPORTED || ap_tx_val == IEEE80211_HE_MCS_NOT_SUPPORTED || ap_rx_val < ap_op_val || ap_tx_val < ap_op_val) { link_id_info(sdata, link_id, "Invalid rates for %d Nss, rx %d, tx %d oper %d, disable HE\n", nss, ap_rx_val, ap_tx_val, ap_op_val); return false; } } return true; } static bool ieee80211_verify_sta_he_mcs_support(struct ieee80211_sub_if_data *sdata, struct ieee80211_supported_band *sband, const struct ieee80211_he_operation *he_op) { const struct ieee80211_sta_he_cap *sta_he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif); u16 ap_min_req_set; int i; if (!sta_he_cap || !he_op) return false; ap_min_req_set = le16_to_cpu(he_op->he_mcs_nss_set); /* * Apparently iPhone 13 (at least iOS version 15.3.1) sets this to all * zeroes, which is nonsense, and completely inconsistent with itself * (it doesn't have 8 streams). Accept the settings in this case anyway. */ if (!ap_min_req_set) return true; /* Need to go over for 80MHz, 160MHz and for 80+80 */ for (i = 0; i < 3; i++) { const struct ieee80211_he_mcs_nss_supp *sta_mcs_nss_supp = &sta_he_cap->he_mcs_nss_supp; u16 sta_mcs_map_rx = le16_to_cpu(((__le16 *)sta_mcs_nss_supp)[2 * i]); u16 sta_mcs_map_tx = le16_to_cpu(((__le16 *)sta_mcs_nss_supp)[2 * i + 1]); u8 nss; bool verified = true; /* * For each band there is a maximum of 8 spatial streams * possible. Each of the sta_mcs_map_* is a 16-bit struct built * of 2 bits per NSS (1-8), with the values defined in enum * ieee80211_he_mcs_support. Need to make sure STA TX and RX * capabilities aren't less than the AP's minimum requirements * for this HE BSS per SS. * It is enough to find one such band that meets the reqs. */ for (nss = 8; nss > 0; nss--) { u8 sta_rx_val = (sta_mcs_map_rx >> (2 * (nss - 1))) & 3; u8 sta_tx_val = (sta_mcs_map_tx >> (2 * (nss - 1))) & 3; u8 ap_val = (ap_min_req_set >> (2 * (nss - 1))) & 3; if (ap_val == IEEE80211_HE_MCS_NOT_SUPPORTED) continue; /* * Make sure the HE AP doesn't require MCSs that aren't * supported by the client as required by spec * * P802.11-REVme/D0.3 * 26.17.1 Basic HE BSS operation * * An HE STA shall not attempt to join * (MLME-JOIN.request primitive) * a BSS, unless it supports (i.e., is able to both transmit and * receive using) all of the <HE-MCS, NSS> tuples in the basic * HE-MCS and NSS set. */ if (sta_rx_val == IEEE80211_HE_MCS_NOT_SUPPORTED || sta_tx_val == IEEE80211_HE_MCS_NOT_SUPPORTED || (ap_val > sta_rx_val) || (ap_val > sta_tx_val)) { verified = false; break; } } if (verified) return true; } /* If here, STA doesn't meet AP's HE min requirements */ return false; } static u8 ieee80211_get_eht_cap_mcs_nss(const struct ieee80211_sta_he_cap *sta_he_cap, const struct ieee80211_sta_eht_cap *sta_eht_cap, unsigned int idx, int bw) { u8 he_phy_cap0 = sta_he_cap->he_cap_elem.phy_cap_info[0]; u8 eht_phy_cap0 = sta_eht_cap->eht_cap_elem.phy_cap_info[0]; /* handle us being a 20 MHz-only EHT STA - with four values * for MCS 0-7, 8-9, 10-11, 12-13. */ if (!(he_phy_cap0 & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL)) return sta_eht_cap->eht_mcs_nss_supp.only_20mhz.rx_tx_max_nss[idx]; /* the others have MCS 0-9 together, rather than separately from 0-7 */ if (idx > 0) idx--; switch (bw) { case 0: return sta_eht_cap->eht_mcs_nss_supp.bw._80.rx_tx_max_nss[idx]; case 1: if (!(he_phy_cap0 & (IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G | IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G))) return 0xff; /* pass check */ return sta_eht_cap->eht_mcs_nss_supp.bw._160.rx_tx_max_nss[idx]; case 2: if (!(eht_phy_cap0 & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ)) return 0xff; /* pass check */ return sta_eht_cap->eht_mcs_nss_supp.bw._320.rx_tx_max_nss[idx]; } WARN_ON(1); return 0; } static bool ieee80211_verify_sta_eht_mcs_support(struct ieee80211_sub_if_data *sdata, struct ieee80211_supported_band *sband, const struct ieee80211_eht_operation *eht_op) { const struct ieee80211_sta_he_cap *sta_he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif); const struct ieee80211_sta_eht_cap *sta_eht_cap = ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif); const struct ieee80211_eht_mcs_nss_supp_20mhz_only *req; unsigned int i; if (!sta_he_cap || !sta_eht_cap || !eht_op) return false; req = &eht_op->basic_mcs_nss; for (i = 0; i < ARRAY_SIZE(req->rx_tx_max_nss); i++) { u8 req_rx_nss, req_tx_nss; unsigned int bw; req_rx_nss = u8_get_bits(req->rx_tx_max_nss[i], IEEE80211_EHT_MCS_NSS_RX); req_tx_nss = u8_get_bits(req->rx_tx_max_nss[i], IEEE80211_EHT_MCS_NSS_TX); for (bw = 0; bw < 3; bw++) { u8 have, have_rx_nss, have_tx_nss; have = ieee80211_get_eht_cap_mcs_nss(sta_he_cap, sta_eht_cap, i, bw); have_rx_nss = u8_get_bits(have, IEEE80211_EHT_MCS_NSS_RX); have_tx_nss = u8_get_bits(have, IEEE80211_EHT_MCS_NSS_TX); if (req_rx_nss > have_rx_nss || req_tx_nss > have_tx_nss) return false; } } return true; } static void ieee80211_get_rates(struct ieee80211_supported_band *sband, const u8 *supp_rates, unsigned int supp_rates_len, const u8 *ext_supp_rates, unsigned int ext_supp_rates_len, u32 *rates, u32 *basic_rates, unsigned long *unknown_rates_selectors, bool *have_higher_than_11mbit, int *min_rate, int *min_rate_index) { int i, j; for (i = 0; i < supp_rates_len + ext_supp_rates_len; i++) { u8 supp_rate = i < supp_rates_len ? supp_rates[i] : ext_supp_rates[i - supp_rates_len]; int rate = supp_rate & 0x7f; bool is_basic = !!(supp_rate & 0x80); if ((rate * 5) > 110 && have_higher_than_11mbit) *have_higher_than_11mbit = true; /* * Skip membership selectors since they're not rates. * * Note: Even though the membership selector and the basic * rate flag share the same bit, they are not exactly * the same. */ if (is_basic && rate >= BSS_MEMBERSHIP_SELECTOR_MIN) { if (unknown_rates_selectors) set_bit(rate, unknown_rates_selectors); continue; } for (j = 0; j < sband->n_bitrates; j++) { struct ieee80211_rate *br; int brate; br = &sband->bitrates[j]; brate = DIV_ROUND_UP(br->bitrate, 5); if (brate == rate) { if (rates) *rates |= BIT(j); if (is_basic && basic_rates) *basic_rates |= BIT(j); if (min_rate && (rate * 5) < *min_rate) { *min_rate = rate * 5; if (min_rate_index) *min_rate_index = j; } break; } } /* Handle an unknown entry as if it is an unknown selector */ if (is_basic && unknown_rates_selectors && j == sband->n_bitrates) set_bit(rate, unknown_rates_selectors); } } static bool ieee80211_chandef_usable(struct ieee80211_sub_if_data *sdata, const struct cfg80211_chan_def *chandef, u32 prohibited_flags) { if (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef, prohibited_flags)) return false; if (chandef->punctured && ieee80211_hw_check(&sdata->local->hw, DISALLOW_PUNCTURING)) return false; if (chandef->punctured && chandef->chan->band == NL80211_BAND_5GHZ && ieee80211_hw_check(&sdata->local->hw, DISALLOW_PUNCTURING_5GHZ)) return false; return true; } static int ieee80211_chandef_num_subchans(const struct cfg80211_chan_def *c) { if (c->width == NL80211_CHAN_WIDTH_80P80) return 4 + 4; return nl80211_chan_width_to_mhz(c->width) / 20; } static int ieee80211_chandef_num_widths(const struct cfg80211_chan_def *c) { switch (c->width) { case NL80211_CHAN_WIDTH_20: case NL80211_CHAN_WIDTH_20_NOHT: return 1; case NL80211_CHAN_WIDTH_40: return 2; case NL80211_CHAN_WIDTH_80P80: case NL80211_CHAN_WIDTH_80: return 3; case NL80211_CHAN_WIDTH_160: return 4; case NL80211_CHAN_WIDTH_320: return 5; default: WARN_ON(1); return 0; } } VISIBLE_IF_MAC80211_KUNIT int ieee80211_calc_chandef_subchan_offset(const struct cfg80211_chan_def *ap, u8 n_partial_subchans) { int n = ieee80211_chandef_num_subchans(ap); struct cfg80211_chan_def tmp = *ap; int offset = 0; /* * Given a chandef (in this context, it's the AP's) and a number * of subchannels that we want to look at ('n_partial_subchans'), * calculate the offset in number of subchannels between the full * and the subset with the desired width. */ /* same number of subchannels means no offset, obviously */ if (n == n_partial_subchans) return 0; /* don't WARN - misconfigured APs could cause this if their N > width */ if (n < n_partial_subchans) return 0; while (ieee80211_chandef_num_subchans(&tmp) > n_partial_subchans) { u32 prev = tmp.center_freq1; ieee80211_chandef_downgrade(&tmp, NULL); /* * if center_freq moved up, half the original channels * are gone now but were below, so increase offset */ if (prev < tmp.center_freq1) offset += ieee80211_chandef_num_subchans(&tmp); } /* * 80+80 with secondary 80 below primary - four subchannels for it * (we cannot downgrade *to* 80+80, so no need to consider 'tmp') */ if (ap->width == NL80211_CHAN_WIDTH_80P80 && ap->center_freq2 < ap->center_freq1) offset += 4; return offset; } EXPORT_SYMBOL_IF_MAC80211_KUNIT(ieee80211_calc_chandef_subchan_offset); VISIBLE_IF_MAC80211_KUNIT void ieee80211_rearrange_tpe_psd(struct ieee80211_parsed_tpe_psd *psd, const struct cfg80211_chan_def *ap, const struct cfg80211_chan_def *used) { u8 needed = ieee80211_chandef_num_subchans(used); u8 have = ieee80211_chandef_num_subchans(ap); u8 tmp[IEEE80211_TPE_PSD_ENTRIES_320MHZ]; u8 offset; if (!psd->valid) return; /* if N is zero, all defaults were used, no point in rearranging */ if (!psd->n) goto out; BUILD_BUG_ON(sizeof(tmp) != sizeof(psd->power)); /* * This assumes that 'N' is consistent with the HE channel, as * it should be (otherwise the AP is broken). * * In psd->power we have values in the order 0..N, 0..K, where * N+K should cover the entire channel per 'ap', but even if it * doesn't then we've pre-filled 'unlimited' as defaults. * * But this is all the wrong order, we want to have them in the * order of the 'used' channel. * * So for example, we could have a 320 MHz EHT AP, which has the * HE channel as 80 MHz (e.g. due to puncturing, which doesn't * seem to be considered for the TPE), as follows: * * EHT 320: | | | | | | | | | | | | | | | | | * HE 80: | | | | | * used 160: | | | | | | | | | * * N entries: |--|--|--|--| * K entries: |--|--|--|--|--|--|--|--| |--|--|--|--| * power idx: 4 5 6 7 8 9 10 11 0 1 2 3 12 13 14 15 * full chan: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 * used chan: 0 1 2 3 4 5 6 7 * * The idx in the power array ('power idx') is like this since it * comes directly from the element's N and K entries in their * element order, and those are this way for HE compatibility. * * Rearrange them as desired here, first by putting them into the * 'full chan' order, and then selecting the necessary subset for * the 'used chan'. */ /* first reorder according to AP channel */ offset = ieee80211_calc_chandef_subchan_offset(ap, psd->n); for (int i = 0; i < have; i++) { if (i < offset) tmp[i] = psd->power[i + psd->n]; else if (i < offset + psd->n) tmp[i] = psd->power[i - offset]; else tmp[i] = psd->power[i]; } /* * and then select the subset for the used channel * (set everything to defaults first in case a driver is confused) */ memset(psd->power, IEEE80211_TPE_PSD_NO_LIMIT, sizeof(psd->power)); offset = ieee80211_calc_chandef_subchan_offset(ap, needed); for (int i = 0; i < needed; i++) psd->power[i] = tmp[offset + i]; out: /* limit, but don't lie if there are defaults in the data */ if (needed < psd->count) psd->count = needed; } EXPORT_SYMBOL_IF_MAC80211_KUNIT(ieee80211_rearrange_tpe_psd); static void ieee80211_rearrange_tpe(struct ieee80211_parsed_tpe *tpe, const struct cfg80211_chan_def *ap, const struct cfg80211_chan_def *used) { /* ignore this completely for narrow/invalid channels */ if (!ieee80211_chandef_num_subchans(ap) || !ieee80211_chandef_num_subchans(used)) { ieee80211_clear_tpe(tpe); return; } for (int i = 0; i < 2; i++) { int needed_pwr_count; ieee80211_rearrange_tpe_psd(&tpe->psd_local[i], ap, used); ieee80211_rearrange_tpe_psd(&tpe->psd_reg_client[i], ap, used); /* limit this to the widths we actually need */ needed_pwr_count = ieee80211_chandef_num_widths(used); if (needed_pwr_count < tpe->max_local[i].count) tpe->max_local[i].count = needed_pwr_count; if (needed_pwr_count < tpe->max_reg_client[i].count) tpe->max_reg_client[i].count = needed_pwr_count; } } /* * The AP part of the channel request is used to distinguish settings * to the device used for wider bandwidth OFDMA. This is used in the * channel context code to assign two channel contexts even if they're * both for the same channel, if the AP bandwidths are incompatible. * If not EHT (or driver override) then ap.chan == NULL indicates that * there's no wider BW OFDMA used. */ static void ieee80211_set_chanreq_ap(struct ieee80211_sub_if_data *sdata, struct ieee80211_chan_req *chanreq, struct ieee80211_conn_settings *conn, struct cfg80211_chan_def *ap_chandef) { chanreq->ap.chan = NULL; if (conn->mode < IEEE80211_CONN_MODE_EHT) return; if (sdata->vif.driver_flags & IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW) return; chanreq->ap = *ap_chandef; } static struct ieee802_11_elems * ieee80211_determine_chan_mode(struct ieee80211_sub_if_data *sdata, struct ieee80211_conn_settings *conn, struct cfg80211_bss *cbss, int link_id, struct ieee80211_chan_req *chanreq, struct cfg80211_chan_def *ap_chandef, unsigned long *userspace_selectors) { const struct cfg80211_bss_ies *ies = rcu_dereference(cbss->ies); struct ieee80211_bss *bss = (void *)cbss->priv; struct ieee80211_channel *channel = cbss->channel; struct ieee80211_elems_parse_params parse_params = { .link_id = -1, .from_ap = true, .start = ies->data, .len = ies->len, }; struct ieee802_11_elems *elems; struct ieee80211_supported_band *sband; enum ieee80211_conn_mode ap_mode; unsigned long unknown_rates_selectors[BITS_TO_LONGS(128)] = {}; unsigned long sta_selectors[BITS_TO_LONGS(128)] = {}; int ret; again: parse_params.mode = conn->mode; elems = ieee802_11_parse_elems_full(&parse_params); if (!elems) return ERR_PTR(-ENOMEM); ap_mode = ieee80211_determine_ap_chan(sdata, channel, bss->vht_cap_info, elems, false, conn, ap_chandef); /* this should be impossible since parsing depends on our mode */ if (WARN_ON(ap_mode > conn->mode)) { ret = -EINVAL; goto free; } if (conn->mode != ap_mode) { conn->mode = ap_mode; kfree(elems); goto again; } mlme_link_id_dbg(sdata, link_id, "determined AP %pM to be %s\n", cbss->bssid, ieee80211_conn_mode_str(ap_mode)); sband = sdata->local->hw.wiphy->bands[channel->band]; ieee80211_get_rates(sband, elems->supp_rates, elems->supp_rates_len, elems->ext_supp_rates, elems->ext_supp_rates_len, NULL, NULL, unknown_rates_selectors, NULL, NULL, NULL); switch (channel->band) { case NL80211_BAND_S1GHZ: if (WARN_ON(ap_mode != IEEE80211_CONN_MODE_S1G)) { ret = -EINVAL; goto free; } return elems; case NL80211_BAND_6GHZ: if (ap_mode < IEEE80211_CONN_MODE_HE) { link_id_info(sdata, link_id, "Rejecting non-HE 6/7 GHz connection"); ret = -EINVAL; goto free; } break; default: if (WARN_ON(ap_mode == IEEE80211_CONN_MODE_S1G)) { ret = -EINVAL; goto free; } } switch (ap_mode) { case IEEE80211_CONN_MODE_S1G: WARN_ON(1); ret = -EINVAL; goto free; case IEEE80211_CONN_MODE_LEGACY: conn->bw_limit = IEEE80211_CONN_BW_LIMIT_20; break; case IEEE80211_CONN_MODE_HT: conn->bw_limit = min_t(enum ieee80211_conn_bw_limit, conn->bw_limit, IEEE80211_CONN_BW_LIMIT_40); break; case IEEE80211_CONN_MODE_VHT: case IEEE80211_CONN_MODE_HE: conn->bw_limit = min_t(enum ieee80211_conn_bw_limit, conn->bw_limit, IEEE80211_CONN_BW_LIMIT_160); break; case IEEE80211_CONN_MODE_EHT: conn->bw_limit = min_t(enum ieee80211_conn_bw_limit, conn->bw_limit, IEEE80211_CONN_BW_LIMIT_320); break; } chanreq->oper = *ap_chandef; bitmap_copy(sta_selectors, userspace_selectors, 128); if (conn->mode >= IEEE80211_CONN_MODE_HT) set_bit(BSS_MEMBERSHIP_SELECTOR_HT_PHY, sta_selectors); if (conn->mode >= IEEE80211_CONN_MODE_VHT) set_bit(BSS_MEMBERSHIP_SELECTOR_VHT_PHY, sta_selectors); if (conn->mode >= IEEE80211_CONN_MODE_HE) set_bit(BSS_MEMBERSHIP_SELECTOR_HE_PHY, sta_selectors); if (conn->mode >= IEEE80211_CONN_MODE_EHT) set_bit(BSS_MEMBERSHIP_SELECTOR_EHT_PHY, sta_selectors); /* * We do not support EPD or GLK so never add them. * SAE_H2E is handled through userspace_selectors. */ /* Check if we support all required features */ if (!bitmap_subset(unknown_rates_selectors, sta_selectors, 128)) { link_id_info(sdata, link_id, "required basic rate or BSS membership selectors not supported or disabled, rejecting connection\n"); ret = -EINVAL; goto free; } ieee80211_set_chanreq_ap(sdata, chanreq, conn, ap_chandef); while (!ieee80211_chandef_usable(sdata, &chanreq->oper, IEEE80211_CHAN_DISABLED)) { if (WARN_ON(chanreq->oper.width == NL80211_CHAN_WIDTH_20_NOHT)) { ret = -EINVAL; goto free; } ieee80211_chanreq_downgrade(chanreq, conn); } if (conn->mode >= IEEE80211_CONN_MODE_HE && !cfg80211_chandef_usable(sdata->wdev.wiphy, &chanreq->oper, IEEE80211_CHAN_NO_HE)) { conn->mode = IEEE80211_CONN_MODE_VHT; conn->bw_limit = min_t(enum ieee80211_conn_bw_limit, conn->bw_limit, IEEE80211_CONN_BW_LIMIT_160); } if (conn->mode >= IEEE80211_CONN_MODE_EHT && !cfg80211_chandef_usable(sdata->wdev.wiphy, &chanreq->oper, IEEE80211_CHAN_NO_EHT)) { conn->mode = IEEE80211_CONN_MODE_HE; conn->bw_limit = min_t(enum ieee80211_conn_bw_limit, conn->bw_limit, IEEE80211_CONN_BW_LIMIT_160); } if (chanreq->oper.width != ap_chandef->width || ap_mode != conn->mode) link_id_info(sdata, link_id, "regulatory prevented using AP config, downgraded\n"); if (conn->mode >= IEEE80211_CONN_MODE_HE && (!ieee80211_verify_peer_he_mcs_support(sdata, link_id, (void *)elems->he_cap, elems->he_operation) || !ieee80211_verify_sta_he_mcs_support(sdata, sband, elems->he_operation))) { conn->mode = IEEE80211_CONN_MODE_VHT; link_id_info(sdata, link_id, "required MCSes not supported, disabling HE\n"); } if (conn->mode >= IEEE80211_CONN_MODE_EHT && !ieee80211_verify_sta_eht_mcs_support(sdata, sband, elems->eht_operation)) { conn->mode = IEEE80211_CONN_MODE_HE; conn->bw_limit = min_t(enum ieee80211_conn_bw_limit, conn->bw_limit, IEEE80211_CONN_BW_LIMIT_160); link_id_info(sdata, link_id, "required MCSes not supported, disabling EHT\n"); } /* the mode can only decrease, so this must terminate */ if (ap_mode != conn->mode) { kfree(elems); goto again; } mlme_link_id_dbg(sdata, link_id, "connecting with %s mode, max bandwidth %d MHz\n", ieee80211_conn_mode_str(conn->mode), 20 * (1 << conn->bw_limit)); if (WARN_ON_ONCE(!cfg80211_chandef_valid(&chanreq->oper))) { ret = -EINVAL; goto free; } return elems; free: kfree(elems); return ERR_PTR(ret); } static int ieee80211_config_bw(struct ieee80211_link_data *link, struct ieee802_11_elems *elems, bool update, u64 *changed, const char *frame) { struct ieee80211_channel *channel = link->conf->chanreq.oper.chan; struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_chan_req chanreq = {}; struct cfg80211_chan_def ap_chandef; enum ieee80211_conn_mode ap_mode; u32 vht_cap_info = 0; u16 ht_opmode; int ret; /* don't track any bandwidth changes in legacy/S1G modes */ if (link->u.mgd.conn.mode == IEEE80211_CONN_MODE_LEGACY || link->u.mgd.conn.mode == IEEE80211_CONN_MODE_S1G) return 0; if (elems->vht_cap_elem) vht_cap_info = le32_to_cpu(elems->vht_cap_elem->vht_cap_info); ap_mode = ieee80211_determine_ap_chan(sdata, channel, vht_cap_info, elems, true, &link->u.mgd.conn, &ap_chandef); if (ap_mode != link->u.mgd.conn.mode) { link_info(link, "AP %pM appears to change mode (expected %s, found %s) in %s, disconnect\n", link->u.mgd.bssid, ieee80211_conn_mode_str(link->u.mgd.conn.mode), ieee80211_conn_mode_str(ap_mode), frame); return -EINVAL; } chanreq.oper = ap_chandef; ieee80211_set_chanreq_ap(sdata, &chanreq, &link->u.mgd.conn, &ap_chandef); /* * if HT operation mode changed store the new one - * this may be applicable even if channel is identical */ if (elems->ht_operation) { ht_opmode = le16_to_cpu(elems->ht_operation->operation_mode); if (link->conf->ht_operation_mode != ht_opmode) { *changed |= BSS_CHANGED_HT; link->conf->ht_operation_mode = ht_opmode; } } /* * Downgrade the new channel if we associated with restricted * bandwidth capabilities. For example, if we associated as a * 20 MHz STA to a 40 MHz AP (due to regulatory, capabilities * or config reasons) then switching to a 40 MHz channel now * won't do us any good -- we couldn't use it with the AP. */ while (link->u.mgd.conn.bw_limit < ieee80211_min_bw_limit_from_chandef(&chanreq.oper)) ieee80211_chandef_downgrade(&chanreq.oper, NULL); if (ap_chandef.chan->band == NL80211_BAND_6GHZ && link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HE) { ieee80211_rearrange_tpe(&elems->tpe, &ap_chandef, &chanreq.oper); if (memcmp(&link->conf->tpe, &elems->tpe, sizeof(elems->tpe))) { link->conf->tpe = elems->tpe; *changed |= BSS_CHANGED_TPE; } } if (ieee80211_chanreq_identical(&chanreq, &link->conf->chanreq)) return 0; link_info(link, "AP %pM changed bandwidth in %s, new used config is %d.%03d MHz, width %d (%d.%03d/%d MHz)\n", link->u.mgd.bssid, frame, chanreq.oper.chan->center_freq, chanreq.oper.chan->freq_offset, chanreq.oper.width, chanreq.oper.center_freq1, chanreq.oper.freq1_offset, chanreq.oper.center_freq2); if (!cfg80211_chandef_valid(&chanreq.oper)) { sdata_info(sdata, "AP %pM changed caps/bw in %s in a way we can't support - disconnect\n", link->u.mgd.bssid, frame); return -EINVAL; } if (!update) { link->conf->chanreq = chanreq; return 0; } /* * We're tracking the current AP here, so don't do any further checks * here. This keeps us from playing ping-pong with regulatory, without * it the following can happen (for example): * - connect to an AP with 80 MHz, world regdom allows 80 MHz * - AP advertises regdom US * - CRDA loads regdom US with 80 MHz prohibited (old database) * - we detect an unsupported channel and disconnect * - disconnect causes CRDA to reload world regdomain and the game * starts anew. * (see https://bugzilla.kernel.org/show_bug.cgi?id=70881) * * It seems possible that there are still scenarios with CSA or real * bandwidth changes where a this could happen, but those cases are * less common and wouldn't completely prevent using the AP. */ ret = ieee80211_link_change_chanreq(link, &chanreq, changed); if (ret) { sdata_info(sdata, "AP %pM changed bandwidth in %s to incompatible one - disconnect\n", link->u.mgd.bssid, frame); return ret; } cfg80211_schedule_channels_check(&sdata->wdev); return 0; } /* frame sending functions */ static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, u8 ap_ht_param, struct ieee80211_supported_band *sband, struct ieee80211_channel *channel, enum ieee80211_smps_mode smps, const struct ieee80211_conn_settings *conn) { u8 *pos; u32 flags = channel->flags; u16 cap; struct ieee80211_sta_ht_cap ht_cap; BUILD_BUG_ON(sizeof(ht_cap) != sizeof(sband->ht_cap)); memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap)); ieee80211_apply_htcap_overrides(sdata, &ht_cap); /* determine capability flags */ cap = ht_cap.cap; switch (ap_ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: if (flags & IEEE80211_CHAN_NO_HT40PLUS) { cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; cap &= ~IEEE80211_HT_CAP_SGI_40; } break; case IEEE80211_HT_PARAM_CHA_SEC_BELOW: if (flags & IEEE80211_CHAN_NO_HT40MINUS) { cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; cap &= ~IEEE80211_HT_CAP_SGI_40; } break; } /* * If 40 MHz was disabled associate as though we weren't * capable of 40 MHz -- some broken APs will never fall * back to trying to transmit in 20 MHz. */ if (conn->bw_limit <= IEEE80211_CONN_BW_LIMIT_20) { cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; cap &= ~IEEE80211_HT_CAP_SGI_40; } /* set SM PS mode properly */ cap &= ~IEEE80211_HT_CAP_SM_PS; switch (smps) { case IEEE80211_SMPS_AUTOMATIC: case IEEE80211_SMPS_NUM_MODES: WARN_ON(1); fallthrough; case IEEE80211_SMPS_OFF: cap |= WLAN_HT_CAP_SM_PS_DISABLED << IEEE80211_HT_CAP_SM_PS_SHIFT; break; case IEEE80211_SMPS_STATIC: cap |= WLAN_HT_CAP_SM_PS_STATIC << IEEE80211_HT_CAP_SM_PS_SHIFT; break; case IEEE80211_SMPS_DYNAMIC: cap |= WLAN_HT_CAP_SM_PS_DYNAMIC << IEEE80211_HT_CAP_SM_PS_SHIFT; break; } /* reserve and fill IE */ pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2); ieee80211_ie_build_ht_cap(pos, &ht_cap, cap); } /* This function determines vht capability flags for the association * and builds the IE. * Note - the function returns true to own the MU-MIMO capability */ static bool ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, struct ieee80211_supported_band *sband, struct ieee80211_vht_cap *ap_vht_cap, const struct ieee80211_conn_settings *conn) { struct ieee80211_local *local = sdata->local; u8 *pos; u32 cap; struct ieee80211_sta_vht_cap vht_cap; u32 mask, ap_bf_sts, our_bf_sts; bool mu_mimo_owner = false; BUILD_BUG_ON(sizeof(vht_cap) != sizeof(sband->vht_cap)); memcpy(&vht_cap, &sband->vht_cap, sizeof(vht_cap)); ieee80211_apply_vhtcap_overrides(sdata, &vht_cap); /* determine capability flags */ cap = vht_cap.cap; if (conn->bw_limit <= IEEE80211_CONN_BW_LIMIT_80) { cap &= ~IEEE80211_VHT_CAP_SHORT_GI_160; cap &= ~IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK; } /* * Some APs apparently get confused if our capabilities are better * than theirs, so restrict what we advertise in the assoc request. */ if (!(ap_vht_cap->vht_cap_info & cpu_to_le32(IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE))) cap &= ~(IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE); else if (!(ap_vht_cap->vht_cap_info & cpu_to_le32(IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE))) cap &= ~IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE; /* * If some other vif is using the MU-MIMO capability we cannot associate * using MU-MIMO - this will lead to contradictions in the group-id * mechanism. * Ownership is defined since association request, in order to avoid * simultaneous associations with MU-MIMO. */ if (cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) { bool disable_mu_mimo = false; struct ieee80211_sub_if_data *other; list_for_each_entry(other, &local->interfaces, list) { if (other->vif.bss_conf.mu_mimo_owner) { disable_mu_mimo = true; break; } } if (disable_mu_mimo) cap &= ~IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE; else mu_mimo_owner = true; } mask = IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK; ap_bf_sts = le32_to_cpu(ap_vht_cap->vht_cap_info) & mask; our_bf_sts = cap & mask; if (ap_bf_sts < our_bf_sts) { cap &= ~mask; cap |= ap_bf_sts; } /* reserve and fill IE */ pos = skb_put(skb, sizeof(struct ieee80211_vht_cap) + 2); ieee80211_ie_build_vht_cap(pos, &vht_cap, cap); return mu_mimo_owner; } static void ieee80211_assoc_add_rates(struct sk_buff *skb, enum nl80211_chan_width width, struct ieee80211_supported_band *sband, struct ieee80211_mgd_assoc_data *assoc_data) { u32 rates; if (assoc_data->supp_rates_len) { /* * Get all rates supported by the device and the AP as * some APs don't like getting a superset of their rates * in the association request (e.g. D-Link DAP 1353 in * b-only mode)... */ ieee80211_parse_bitrates(width, sband, assoc_data->supp_rates, assoc_data->supp_rates_len, &rates); } else { /* * In case AP not provide any supported rates information * before association, we send information element(s) with * all rates that we support. */ rates = ~0; } ieee80211_put_srates_elem(skb, sband, 0, 0, ~rates, WLAN_EID_SUPP_RATES); ieee80211_put_srates_elem(skb, sband, 0, 0, ~rates, WLAN_EID_EXT_SUPP_RATES); } static size_t ieee80211_add_before_ht_elems(struct sk_buff *skb, const u8 *elems, size_t elems_len, size_t offset) { size_t noffset; static const u8 before_ht[] = { WLAN_EID_SSID, WLAN_EID_SUPP_RATES, WLAN_EID_EXT_SUPP_RATES, WLAN_EID_PWR_CAPABILITY, WLAN_EID_SUPPORTED_CHANNELS, WLAN_EID_RSN, WLAN_EID_QOS_CAPA, WLAN_EID_RRM_ENABLED_CAPABILITIES, WLAN_EID_MOBILITY_DOMAIN, WLAN_EID_FAST_BSS_TRANSITION, /* reassoc only */ WLAN_EID_RIC_DATA, /* reassoc only */ WLAN_EID_SUPPORTED_REGULATORY_CLASSES, }; static const u8 after_ric[] = { WLAN_EID_SUPPORTED_REGULATORY_CLASSES, WLAN_EID_HT_CAPABILITY, WLAN_EID_BSS_COEX_2040, /* luckily this is almost always there */ WLAN_EID_EXT_CAPABILITY, WLAN_EID_QOS_TRAFFIC_CAPA, WLAN_EID_TIM_BCAST_REQ, WLAN_EID_INTERWORKING, /* 60 GHz (Multi-band, DMG, MMS) can't happen */ WLAN_EID_VHT_CAPABILITY, WLAN_EID_OPMODE_NOTIF, }; if (!elems_len) return offset; noffset = ieee80211_ie_split_ric(elems, elems_len, before_ht, ARRAY_SIZE(before_ht), after_ric, ARRAY_SIZE(after_ric), offset); skb_put_data(skb, elems + offset, noffset - offset); return noffset; } static size_t ieee80211_add_before_vht_elems(struct sk_buff *skb, const u8 *elems, size_t elems_len, size_t offset) { static const u8 before_vht[] = { /* * no need to list the ones split off before HT * or generated here */ WLAN_EID_BSS_COEX_2040, WLAN_EID_EXT_CAPABILITY, WLAN_EID_QOS_TRAFFIC_CAPA, WLAN_EID_TIM_BCAST_REQ, WLAN_EID_INTERWORKING, /* 60 GHz (Multi-band, DMG, MMS) can't happen */ }; size_t noffset; if (!elems_len) return offset; /* RIC already taken care of in ieee80211_add_before_ht_elems() */ noffset = ieee80211_ie_split(elems, elems_len, before_vht, ARRAY_SIZE(before_vht), offset); skb_put_data(skb, elems + offset, noffset - offset); return noffset; } static size_t ieee80211_add_before_he_elems(struct sk_buff *skb, const u8 *elems, size_t elems_len, size_t offset) { static const u8 before_he[] = { /* * no need to list the ones split off before VHT * or generated here */ WLAN_EID_OPMODE_NOTIF, WLAN_EID_EXTENSION, WLAN_EID_EXT_FUTURE_CHAN_GUIDANCE, /* 11ai elements */ WLAN_EID_EXTENSION, WLAN_EID_EXT_FILS_SESSION, WLAN_EID_EXTENSION, WLAN_EID_EXT_FILS_PUBLIC_KEY, WLAN_EID_EXTENSION, WLAN_EID_EXT_FILS_KEY_CONFIRM, WLAN_EID_EXTENSION, WLAN_EID_EXT_FILS_HLP_CONTAINER, WLAN_EID_EXTENSION, WLAN_EID_EXT_FILS_IP_ADDR_ASSIGN, /* TODO: add 11ah/11aj/11ak elements */ }; size_t noffset; if (!elems_len) return offset; /* RIC already taken care of in ieee80211_add_before_ht_elems() */ noffset = ieee80211_ie_split(elems, elems_len, before_he, ARRAY_SIZE(before_he), offset); skb_put_data(skb, elems + offset, noffset - offset); return noffset; } #define PRESENT_ELEMS_MAX 8 #define PRESENT_ELEM_EXT_OFFS 0x100 static void ieee80211_assoc_add_ml_elem(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, u16 capab, const struct element *ext_capa, const u16 *present_elems, struct ieee80211_mgd_assoc_data *assoc_data); static size_t ieee80211_add_link_elems(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, u16 *capab, const struct element *ext_capa, const u8 *extra_elems, size_t extra_elems_len, unsigned int link_id, struct ieee80211_link_data *link, u16 *present_elems, struct ieee80211_mgd_assoc_data *assoc_data) { enum nl80211_iftype iftype = ieee80211_vif_type_p2p(&sdata->vif); struct cfg80211_bss *cbss = assoc_data->link[link_id].bss; struct ieee80211_channel *chan = cbss->channel; const struct ieee80211_sband_iftype_data *iftd; struct ieee80211_local *local = sdata->local; struct ieee80211_supported_band *sband; enum nl80211_chan_width width = NL80211_CHAN_WIDTH_20; struct ieee80211_chanctx_conf *chanctx_conf; enum ieee80211_smps_mode smps_mode; u16 orig_capab = *capab; size_t offset = 0; int present_elems_len = 0; u8 *pos; int i; #define ADD_PRESENT_ELEM(id) do { \ /* need a last for termination - we use 0 == SSID */ \ if (!WARN_ON(present_elems_len >= PRESENT_ELEMS_MAX - 1)) \ present_elems[present_elems_len++] = (id); \ } while (0) #define ADD_PRESENT_EXT_ELEM(id) ADD_PRESENT_ELEM(PRESENT_ELEM_EXT_OFFS | (id)) if (link) smps_mode = link->smps_mode; else if (sdata->u.mgd.powersave) smps_mode = IEEE80211_SMPS_DYNAMIC; else smps_mode = IEEE80211_SMPS_OFF; if (link) { /* * 5/10 MHz scenarios are only viable without MLO, in which * case this pointer should be used ... All of this is a bit * unclear though, not sure this even works at all. */ rcu_read_lock(); chanctx_conf = rcu_dereference(link->conf->chanctx_conf); if (chanctx_conf) width = chanctx_conf->def.width; rcu_read_unlock(); } sband = local->hw.wiphy->bands[chan->band]; iftd = ieee80211_get_sband_iftype_data(sband, iftype); if (sband->band == NL80211_BAND_2GHZ) { *capab |= WLAN_CAPABILITY_SHORT_SLOT_TIME; *capab |= WLAN_CAPABILITY_SHORT_PREAMBLE; } if ((cbss->capability & WLAN_CAPABILITY_SPECTRUM_MGMT) && ieee80211_hw_check(&local->hw, SPECTRUM_MGMT)) *capab |= WLAN_CAPABILITY_SPECTRUM_MGMT; if (sband->band != NL80211_BAND_S1GHZ) ieee80211_assoc_add_rates(skb, width, sband, assoc_data); if (*capab & WLAN_CAPABILITY_SPECTRUM_MGMT || *capab & WLAN_CAPABILITY_RADIO_MEASURE) { struct cfg80211_chan_def chandef = { .width = width, .chan = chan, }; pos = skb_put(skb, 4); *pos++ = WLAN_EID_PWR_CAPABILITY; *pos++ = 2; *pos++ = 0; /* min tx power */ /* max tx power */ *pos++ = ieee80211_chandef_max_power(&chandef); ADD_PRESENT_ELEM(WLAN_EID_PWR_CAPABILITY); } /* * Per spec, we shouldn't include the list of channels if we advertise * support for extended channel switching, but we've always done that; * (for now?) apply this restriction only on the (new) 6 GHz band. */ if (*capab & WLAN_CAPABILITY_SPECTRUM_MGMT && (sband->band != NL80211_BAND_6GHZ || !ext_capa || ext_capa->datalen < 1 || !(ext_capa->data[0] & WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING))) { /* TODO: get this in reg domain format */ pos = skb_put(skb, 2 * sband->n_channels + 2); *pos++ = WLAN_EID_SUPPORTED_CHANNELS; *pos++ = 2 * sband->n_channels; for (i = 0; i < sband->n_channels; i++) { int cf = sband->channels[i].center_freq; *pos++ = ieee80211_frequency_to_channel(cf); *pos++ = 1; /* one channel in the subband*/ } ADD_PRESENT_ELEM(WLAN_EID_SUPPORTED_CHANNELS); } /* if present, add any custom IEs that go before HT */ offset = ieee80211_add_before_ht_elems(skb, extra_elems, extra_elems_len, offset); if (sband->band != NL80211_BAND_6GHZ && assoc_data->link[link_id].conn.mode >= IEEE80211_CONN_MODE_HT) { ieee80211_add_ht_ie(sdata, skb, assoc_data->link[link_id].ap_ht_param, sband, chan, smps_mode, &assoc_data->link[link_id].conn); ADD_PRESENT_ELEM(WLAN_EID_HT_CAPABILITY); } /* if present, add any custom IEs that go before VHT */ offset = ieee80211_add_before_vht_elems(skb, extra_elems, extra_elems_len, offset); if (sband->band != NL80211_BAND_6GHZ && assoc_data->link[link_id].conn.mode >= IEEE80211_CONN_MODE_VHT && sband->vht_cap.vht_supported) { bool mu_mimo_owner = ieee80211_add_vht_ie(sdata, skb, sband, &assoc_data->link[link_id].ap_vht_cap, &assoc_data->link[link_id].conn); if (link) link->conf->mu_mimo_owner = mu_mimo_owner; ADD_PRESENT_ELEM(WLAN_EID_VHT_CAPABILITY); } /* if present, add any custom IEs that go before HE */ offset = ieee80211_add_before_he_elems(skb, extra_elems, extra_elems_len, offset); if (assoc_data->link[link_id].conn.mode >= IEEE80211_CONN_MODE_HE) { ieee80211_put_he_cap(skb, sdata, sband, &assoc_data->link[link_id].conn); ADD_PRESENT_EXT_ELEM(WLAN_EID_EXT_HE_CAPABILITY); ieee80211_put_he_6ghz_cap(skb, sdata, smps_mode); } /* * careful - need to know about all the present elems before * calling ieee80211_assoc_add_ml_elem(), so add this one if * we're going to put it after the ML element */ if (assoc_data->link[link_id].conn.mode >= IEEE80211_CONN_MODE_EHT) ADD_PRESENT_EXT_ELEM(WLAN_EID_EXT_EHT_CAPABILITY); if (link_id == assoc_data->assoc_link_id) ieee80211_assoc_add_ml_elem(sdata, skb, orig_capab, ext_capa, present_elems, assoc_data); /* crash if somebody gets it wrong */ present_elems = NULL; if (assoc_data->link[link_id].conn.mode >= IEEE80211_CONN_MODE_EHT) ieee80211_put_eht_cap(skb, sdata, sband, &assoc_data->link[link_id].conn); if (sband->band == NL80211_BAND_S1GHZ) { ieee80211_add_aid_request_ie(sdata, skb); ieee80211_add_s1g_capab_ie(sdata, &sband->s1g_cap, skb); } if (iftd && iftd->vendor_elems.data && iftd->vendor_elems.len) skb_put_data(skb, iftd->vendor_elems.data, iftd->vendor_elems.len); return offset; } static void ieee80211_add_non_inheritance_elem(struct sk_buff *skb, const u16 *outer, const u16 *inner) { unsigned int skb_len = skb->len; bool at_extension = false; bool added = false; int i, j; u8 *len, *list_len = NULL; skb_put_u8(skb, WLAN_EID_EXTENSION); len = skb_put(skb, 1); skb_put_u8(skb, WLAN_EID_EXT_NON_INHERITANCE); for (i = 0; i < PRESENT_ELEMS_MAX && outer[i]; i++) { u16 elem = outer[i]; bool have_inner = false; /* should at least be sorted in the sense of normal -> ext */ WARN_ON(at_extension && elem < PRESENT_ELEM_EXT_OFFS); /* switch to extension list */ if (!at_extension && elem >= PRESENT_ELEM_EXT_OFFS) { at_extension = true; if (!list_len) skb_put_u8(skb, 0); list_len = NULL; } for (j = 0; j < PRESENT_ELEMS_MAX && inner[j]; j++) { if (elem == inner[j]) { have_inner = true; break; } } if (have_inner) continue; if (!list_len) { list_len = skb_put(skb, 1); *list_len = 0; } *list_len += 1; skb_put_u8(skb, (u8)elem); added = true; } /* if we added a list but no extension list, make a zero-len one */ if (added && (!at_extension || !list_len)) skb_put_u8(skb, 0); /* if nothing added remove extension element completely */ if (!added) skb_trim(skb, skb_len); else *len = skb->len - skb_len - 2; } static void ieee80211_assoc_add_ml_elem(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, u16 capab, const struct element *ext_capa, const u16 *outer_present_elems, struct ieee80211_mgd_assoc_data *assoc_data) { struct ieee80211_local *local = sdata->local; struct ieee80211_multi_link_elem *ml_elem; struct ieee80211_mle_basic_common_info *common; const struct wiphy_iftype_ext_capab *ift_ext_capa; __le16 eml_capa = 0, mld_capa_ops = 0; unsigned int link_id; u8 *ml_elem_len; void *capab_pos; if (!ieee80211_vif_is_mld(&sdata->vif)) return; ift_ext_capa = cfg80211_get_iftype_ext_capa(local->hw.wiphy, ieee80211_vif_type_p2p(&sdata->vif)); if (ift_ext_capa) { eml_capa = cpu_to_le16(ift_ext_capa->eml_capabilities); mld_capa_ops = cpu_to_le16(ift_ext_capa->mld_capa_and_ops); } skb_put_u8(skb, WLAN_EID_EXTENSION); ml_elem_len = skb_put(skb, 1); skb_put_u8(skb, WLAN_EID_EXT_EHT_MULTI_LINK); ml_elem = skb_put(skb, sizeof(*ml_elem)); ml_elem->control = cpu_to_le16(IEEE80211_ML_CONTROL_TYPE_BASIC | IEEE80211_MLC_BASIC_PRES_MLD_CAPA_OP); common = skb_put(skb, sizeof(*common)); common->len = sizeof(*common) + 2; /* MLD capa/ops */ memcpy(common->mld_mac_addr, sdata->vif.addr, ETH_ALEN); /* add EML_CAPA only if needed, see Draft P802.11be_D2.1, 35.3.17 */ if (eml_capa & cpu_to_le16((IEEE80211_EML_CAP_EMLSR_SUPP | IEEE80211_EML_CAP_EMLMR_SUPPORT))) { common->len += 2; /* EML capabilities */ ml_elem->control |= cpu_to_le16(IEEE80211_MLC_BASIC_PRES_EML_CAPA); skb_put_data(skb, &eml_capa, sizeof(eml_capa)); } skb_put_data(skb, &mld_capa_ops, sizeof(mld_capa_ops)); for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { u16 link_present_elems[PRESENT_ELEMS_MAX] = {}; const u8 *extra_elems; size_t extra_elems_len; size_t extra_used; u8 *subelem_len = NULL; __le16 ctrl; if (!assoc_data->link[link_id].bss || link_id == assoc_data->assoc_link_id) continue; extra_elems = assoc_data->link[link_id].elems; extra_elems_len = assoc_data->link[link_id].elems_len; skb_put_u8(skb, IEEE80211_MLE_SUBELEM_PER_STA_PROFILE); subelem_len = skb_put(skb, 1); ctrl = cpu_to_le16(link_id | IEEE80211_MLE_STA_CONTROL_COMPLETE_PROFILE | IEEE80211_MLE_STA_CONTROL_STA_MAC_ADDR_PRESENT); skb_put_data(skb, &ctrl, sizeof(ctrl)); skb_put_u8(skb, 1 + ETH_ALEN); /* STA Info Length */ skb_put_data(skb, assoc_data->link[link_id].addr, ETH_ALEN); /* * Now add the contents of the (re)association request, * but the "listen interval" and "current AP address" * (if applicable) are skipped. So we only have * the capability field (remember the position and fill * later), followed by the elements added below by * calling ieee80211_add_link_elems(). */ capab_pos = skb_put(skb, 2); extra_used = ieee80211_add_link_elems(sdata, skb, &capab, ext_capa, extra_elems, extra_elems_len, link_id, NULL, link_present_elems, assoc_data); if (extra_elems) skb_put_data(skb, extra_elems + extra_used, extra_elems_len - extra_used); put_unaligned_le16(capab, capab_pos); ieee80211_add_non_inheritance_elem(skb, outer_present_elems, link_present_elems); ieee80211_fragment_element(skb, subelem_len, IEEE80211_MLE_SUBELEM_FRAGMENT); } ieee80211_fragment_element(skb, ml_elem_len, WLAN_EID_FRAGMENT); } static int ieee80211_link_common_elems_size(struct ieee80211_sub_if_data *sdata, enum nl80211_iftype iftype, struct cfg80211_bss *cbss, size_t elems_len) { struct ieee80211_local *local = sdata->local; const struct ieee80211_sband_iftype_data *iftd; struct ieee80211_supported_band *sband; size_t size = 0; if (!cbss) return size; sband = local->hw.wiphy->bands[cbss->channel->band]; /* add STA profile elements length */ size += elems_len; /* and supported rates length */ size += 4 + sband->n_bitrates; /* supported channels */ size += 2 + 2 * sband->n_channels; iftd = ieee80211_get_sband_iftype_data(sband, iftype); if (iftd) size += iftd->vendor_elems.len; /* power capability */ size += 4; /* HT, VHT, HE, EHT */ size += 2 + sizeof(struct ieee80211_ht_cap); size += 2 + sizeof(struct ieee80211_vht_cap); size += 2 + 1 + sizeof(struct ieee80211_he_cap_elem) + sizeof(struct ieee80211_he_mcs_nss_supp) + IEEE80211_HE_PPE_THRES_MAX_LEN; if (sband->band == NL80211_BAND_6GHZ) size += 2 + 1 + sizeof(struct ieee80211_he_6ghz_capa); size += 2 + 1 + sizeof(struct ieee80211_eht_cap_elem) + sizeof(struct ieee80211_eht_mcs_nss_supp) + IEEE80211_EHT_PPE_THRES_MAX_LEN; return size; } static int ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_mgd_assoc_data *assoc_data = ifmgd->assoc_data; struct ieee80211_link_data *link; struct sk_buff *skb; struct ieee80211_mgmt *mgmt; u8 *pos, qos_info, *ie_start; size_t offset, noffset; u16 capab = 0, link_capab; __le16 listen_int; struct element *ext_capa = NULL; enum nl80211_iftype iftype = ieee80211_vif_type_p2p(&sdata->vif); struct ieee80211_prep_tx_info info = {}; unsigned int link_id, n_links = 0; u16 present_elems[PRESENT_ELEMS_MAX] = {}; void *capab_pos; size_t size; int ret; /* we know it's writable, cast away the const */ if (assoc_data->ie_len) ext_capa = (void *)cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY, assoc_data->ie, assoc_data->ie_len); lockdep_assert_wiphy(sdata->local->hw.wiphy); size = local->hw.extra_tx_headroom + sizeof(*mgmt) + /* bit too much but doesn't matter */ 2 + assoc_data->ssid_len + /* SSID */ assoc_data->ie_len + /* extra IEs */ (assoc_data->fils_kek_len ? 16 /* AES-SIV */ : 0) + 9; /* WMM */ for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { struct cfg80211_bss *cbss = assoc_data->link[link_id].bss; size_t elems_len = assoc_data->link[link_id].elems_len; if (!cbss) continue; n_links++; size += ieee80211_link_common_elems_size(sdata, iftype, cbss, elems_len); /* non-inheritance element */ size += 2 + 2 + PRESENT_ELEMS_MAX; /* should be the same across all BSSes */ if (cbss->capability & WLAN_CAPABILITY_PRIVACY) capab |= WLAN_CAPABILITY_PRIVACY; } if (ieee80211_vif_is_mld(&sdata->vif)) { /* consider the multi-link element with STA profile */ size += sizeof(struct ieee80211_multi_link_elem); /* max common info field in basic multi-link element */ size += sizeof(struct ieee80211_mle_basic_common_info) + 2 + /* capa & op */ 2; /* EML capa */ /* * The capability elements were already considered above; * note this over-estimates a bit because there's no * STA profile for the assoc link. */ size += (n_links - 1) * (1 + 1 + /* subelement ID/length */ 2 + /* STA control */ 1 + ETH_ALEN + 2 /* STA Info field */); } link = sdata_dereference(sdata->link[assoc_data->assoc_link_id], sdata); if (WARN_ON(!link)) return -EINVAL; if (WARN_ON(!assoc_data->link[assoc_data->assoc_link_id].bss)) return -EINVAL; skb = alloc_skb(size, GFP_KERNEL); if (!skb) return -ENOMEM; skb_reserve(skb, local->hw.extra_tx_headroom); if (ifmgd->flags & IEEE80211_STA_ENABLE_RRM) capab |= WLAN_CAPABILITY_RADIO_MEASURE; /* Set MBSSID support for HE AP if needed */ if (ieee80211_hw_check(&local->hw, SUPPORTS_ONLY_HE_MULTI_BSSID) && link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HE && ext_capa && ext_capa->datalen >= 3) ext_capa->data[2] |= WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT; mgmt = skb_put_zero(skb, 24); memcpy(mgmt->da, sdata->vif.cfg.ap_addr, ETH_ALEN); memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); memcpy(mgmt->bssid, sdata->vif.cfg.ap_addr, ETH_ALEN); listen_int = cpu_to_le16(assoc_data->s1g ? ieee80211_encode_usf(local->hw.conf.listen_interval) : local->hw.conf.listen_interval); if (!is_zero_ether_addr(assoc_data->prev_ap_addr)) { skb_put(skb, 10); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_REASSOC_REQ); capab_pos = &mgmt->u.reassoc_req.capab_info; mgmt->u.reassoc_req.listen_interval = listen_int; memcpy(mgmt->u.reassoc_req.current_ap, assoc_data->prev_ap_addr, ETH_ALEN); info.subtype = IEEE80211_STYPE_REASSOC_REQ; } else { skb_put(skb, 4); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ASSOC_REQ); capab_pos = &mgmt->u.assoc_req.capab_info; mgmt->u.assoc_req.listen_interval = listen_int; info.subtype = IEEE80211_STYPE_ASSOC_REQ; } /* SSID */ pos = skb_put(skb, 2 + assoc_data->ssid_len); ie_start = pos; *pos++ = WLAN_EID_SSID; *pos++ = assoc_data->ssid_len; memcpy(pos, assoc_data->ssid, assoc_data->ssid_len); /* * This bit is technically reserved, so it shouldn't matter for either * the AP or us, but it also means we shouldn't set it. However, we've * always set it in the past, and apparently some EHT APs check that * we don't set it. To avoid interoperability issues with old APs that * for some reason check it and want it to be set, set the bit for all * pre-EHT connections as we used to do. */ if (link->u.mgd.conn.mode < IEEE80211_CONN_MODE_EHT) capab |= WLAN_CAPABILITY_ESS; /* add the elements for the assoc (main) link */ link_capab = capab; offset = ieee80211_add_link_elems(sdata, skb, &link_capab, ext_capa, assoc_data->ie, assoc_data->ie_len, assoc_data->assoc_link_id, link, present_elems, assoc_data); put_unaligned_le16(link_capab, capab_pos); /* if present, add any custom non-vendor IEs */ if (assoc_data->ie_len) { noffset = ieee80211_ie_split_vendor(assoc_data->ie, assoc_data->ie_len, offset); skb_put_data(skb, assoc_data->ie + offset, noffset - offset); offset = noffset; } if (assoc_data->wmm) { if (assoc_data->uapsd) { qos_info = ifmgd->uapsd_queues; qos_info |= (ifmgd->uapsd_max_sp_len << IEEE80211_WMM_IE_STA_QOSINFO_SP_SHIFT); } else { qos_info = 0; } pos = ieee80211_add_wmm_info_ie(skb_put(skb, 9), qos_info); } /* add any remaining custom (i.e. vendor specific here) IEs */ if (assoc_data->ie_len) { noffset = assoc_data->ie_len; skb_put_data(skb, assoc_data->ie + offset, noffset - offset); } if (assoc_data->fils_kek_len) { ret = fils_encrypt_assoc_req(skb, assoc_data); if (ret < 0) { dev_kfree_skb(skb); return ret; } } pos = skb_tail_pointer(skb); kfree(ifmgd->assoc_req_ies); ifmgd->assoc_req_ies = kmemdup(ie_start, pos - ie_start, GFP_ATOMIC); if (!ifmgd->assoc_req_ies) { dev_kfree_skb(skb); return -ENOMEM; } ifmgd->assoc_req_ies_len = pos - ie_start; info.link_id = assoc_data->assoc_link_id; drv_mgd_prepare_tx(local, sdata, &info); IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS | IEEE80211_TX_INTFL_MLME_CONN_TX; ieee80211_tx_skb(sdata, skb); return 0; } void ieee80211_send_pspoll(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata) { struct ieee80211_pspoll *pspoll; struct sk_buff *skb; skb = ieee80211_pspoll_get(&local->hw, &sdata->vif); if (!skb) return; pspoll = (struct ieee80211_pspoll *) skb->data; pspoll->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; ieee80211_tx_skb(sdata, skb); } void ieee80211_send_nullfunc(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, bool powersave) { struct sk_buff *skb; struct ieee80211_hdr_3addr *nullfunc; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif, -1, !ieee80211_hw_check(&local->hw, DOESNT_SUPPORT_QOS_NDP)); if (!skb) return; nullfunc = (struct ieee80211_hdr_3addr *) skb->data; if (powersave) nullfunc->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT | IEEE80211_TX_INTFL_OFFCHAN_TX_OK; if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; if (ifmgd->flags & IEEE80211_STA_CONNECTION_POLL) IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_USE_MINRATE; ieee80211_tx_skb(sdata, skb); } void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata) { struct sk_buff *skb; struct ieee80211_hdr *nullfunc; __le16 fc; if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION)) return; skb = dev_alloc_skb(local->hw.extra_tx_headroom + 30); if (!skb) return; skb_reserve(skb, local->hw.extra_tx_headroom); nullfunc = skb_put_zero(skb, 30); fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC | IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); nullfunc->frame_control = fc; memcpy(nullfunc->addr1, sdata->deflink.u.mgd.bssid, ETH_ALEN); memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN); memcpy(nullfunc->addr3, sdata->deflink.u.mgd.bssid, ETH_ALEN); memcpy(nullfunc->addr4, sdata->vif.addr, ETH_ALEN); IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_USE_MINRATE; ieee80211_tx_skb(sdata, skb); } /* spectrum management related things */ static void ieee80211_csa_switch_work(struct wiphy *wiphy, struct wiphy_work *work) { struct ieee80211_link_data *link = container_of(work, struct ieee80211_link_data, u.mgd.csa.switch_work.work); struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; int ret; if (!ieee80211_sdata_running(sdata)) return; lockdep_assert_wiphy(local->hw.wiphy); if (!ifmgd->associated) return; if (!link->conf->csa_active) return; /* * If the link isn't active (now), we cannot wait for beacons, won't * have a reserved chanctx, etc. Just switch over the chandef and * update cfg80211 directly. */ if (!ieee80211_vif_link_active(&sdata->vif, link->link_id)) { link->conf->chanreq = link->csa.chanreq; cfg80211_ch_switch_notify(sdata->dev, &link->csa.chanreq.oper, link->link_id); return; } /* * using reservation isn't immediate as it may be deferred until later * with multi-vif. once reservation is complete it will re-schedule the * work with no reserved_chanctx so verify chandef to check if it * completed successfully */ if (link->reserved_chanctx) { /* * with multi-vif csa driver may call ieee80211_csa_finish() * many times while waiting for other interfaces to use their * reservations */ if (link->reserved_ready) return; ret = ieee80211_link_use_reserved_context(link); if (ret) { link_info(link, "failed to use reserved channel context, disconnecting (err=%d)\n", ret); wiphy_work_queue(sdata->local->hw.wiphy, &ifmgd->csa_connection_drop_work); } return; } if (!ieee80211_chanreq_identical(&link->conf->chanreq, &link->csa.chanreq)) { link_info(link, "failed to finalize channel switch, disconnecting\n"); wiphy_work_queue(sdata->local->hw.wiphy, &ifmgd->csa_connection_drop_work); return; } link->u.mgd.csa.waiting_bcn = true; /* apply new TPE restrictions immediately on the new channel */ if (link->u.mgd.csa.ap_chandef.chan->band == NL80211_BAND_6GHZ && link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HE) { ieee80211_rearrange_tpe(&link->u.mgd.csa.tpe, &link->u.mgd.csa.ap_chandef, &link->conf->chanreq.oper); if (memcmp(&link->conf->tpe, &link->u.mgd.csa.tpe, sizeof(link->u.mgd.csa.tpe))) { link->conf->tpe = link->u.mgd.csa.tpe; ieee80211_link_info_change_notify(sdata, link, BSS_CHANGED_TPE); } } ieee80211_sta_reset_beacon_monitor(sdata); ieee80211_sta_reset_conn_monitor(sdata); } static void ieee80211_chswitch_post_beacon(struct ieee80211_link_data *link) { struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; int ret; lockdep_assert_wiphy(sdata->local->hw.wiphy); WARN_ON(!link->conf->csa_active); ieee80211_vif_unblock_queues_csa(sdata); link->conf->csa_active = false; link->u.mgd.csa.blocked_tx = false; link->u.mgd.csa.waiting_bcn = false; ret = drv_post_channel_switch(link); if (ret) { link_info(link, "driver post channel switch failed, disconnecting\n"); wiphy_work_queue(sdata->local->hw.wiphy, &ifmgd->csa_connection_drop_work); return; } cfg80211_ch_switch_notify(sdata->dev, &link->conf->chanreq.oper, link->link_id); } void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success, unsigned int link_id) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); trace_api_chswitch_done(sdata, success, link_id); rcu_read_lock(); if (!success) { sdata_info(sdata, "driver channel switch failed (link %d), disconnecting\n", link_id); wiphy_work_queue(sdata->local->hw.wiphy, &sdata->u.mgd.csa_connection_drop_work); } else { struct ieee80211_link_data *link = rcu_dereference(sdata->link[link_id]); if (WARN_ON(!link)) { rcu_read_unlock(); return; } wiphy_delayed_work_queue(sdata->local->hw.wiphy, &link->u.mgd.csa.switch_work, 0); } rcu_read_unlock(); } EXPORT_SYMBOL(ieee80211_chswitch_done); static void ieee80211_sta_abort_chanswitch(struct ieee80211_link_data *link) { struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_local *local = sdata->local; lockdep_assert_wiphy(local->hw.wiphy); if (!local->ops->abort_channel_switch) return; ieee80211_link_unreserve_chanctx(link); ieee80211_vif_unblock_queues_csa(sdata); link->conf->csa_active = false; link->u.mgd.csa.blocked_tx = false; drv_abort_channel_switch(link); } struct sta_csa_rnr_iter_data { struct ieee80211_link_data *link; struct ieee80211_channel *chan; u8 mld_id; }; static enum cfg80211_rnr_iter_ret ieee80211_sta_csa_rnr_iter(void *_data, u8 type, const struct ieee80211_neighbor_ap_info *info, const u8 *tbtt_info, u8 tbtt_info_len) { struct sta_csa_rnr_iter_data *data = _data; struct ieee80211_link_data *link = data->link; struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; const struct ieee80211_tbtt_info_ge_11 *ti; enum nl80211_band band; unsigned int center_freq; int link_id; if (type != IEEE80211_TBTT_INFO_TYPE_TBTT) return RNR_ITER_CONTINUE; if (tbtt_info_len < sizeof(*ti)) return RNR_ITER_CONTINUE; ti = (const void *)tbtt_info; if (ti->mld_params.mld_id != data->mld_id) return RNR_ITER_CONTINUE; link_id = le16_get_bits(ti->mld_params.params, IEEE80211_RNR_MLD_PARAMS_LINK_ID); if (link_id != data->link->link_id) return RNR_ITER_CONTINUE; /* we found the entry for our link! */ /* this AP is confused, it had this right before ... just disconnect */ if (!ieee80211_operating_class_to_band(info->op_class, &band)) { link_info(link, "AP now has invalid operating class in RNR, disconnect\n"); wiphy_work_queue(sdata->local->hw.wiphy, &ifmgd->csa_connection_drop_work); return RNR_ITER_BREAK; } center_freq = ieee80211_channel_to_frequency(info->channel, band); data->chan = ieee80211_get_channel(sdata->local->hw.wiphy, center_freq); return RNR_ITER_BREAK; } static void ieee80211_sta_other_link_csa_disappeared(struct ieee80211_link_data *link, struct ieee802_11_elems *elems) { struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct sta_csa_rnr_iter_data data = { .link = link, }; /* * If we get here, we see a beacon from another link without * CSA still being reported for it, so now we have to check * if the CSA was aborted or completed. This may not even be * perfectly possible if the CSA was only done for changing * the puncturing, but in that case if the link in inactive * we don't really care, and if it's an active link (or when * it's activated later) we'll get a beacon and adjust. */ if (WARN_ON(!elems->ml_basic)) return; data.mld_id = ieee80211_mle_get_mld_id((const void *)elems->ml_basic); /* * So in order to do this, iterate the RNR element(s) and see * what channel is reported now. */ cfg80211_iter_rnr(elems->ie_start, elems->total_len, ieee80211_sta_csa_rnr_iter, &data); if (!data.chan) { link_info(link, "couldn't find (valid) channel in RNR for CSA, disconnect\n"); wiphy_work_queue(sdata->local->hw.wiphy, &ifmgd->csa_connection_drop_work); return; } /* * If it doesn't match the CSA, then assume it aborted. This * may erroneously detect that it was _not_ aborted when it * was in fact aborted, but only changed the bandwidth or the * puncturing configuration, but we don't have enough data to * detect that. */ if (data.chan != link->csa.chanreq.oper.chan) ieee80211_sta_abort_chanswitch(link); } enum ieee80211_csa_source { IEEE80211_CSA_SOURCE_BEACON, IEEE80211_CSA_SOURCE_OTHER_LINK, IEEE80211_CSA_SOURCE_PROT_ACTION, IEEE80211_CSA_SOURCE_UNPROT_ACTION, }; static void ieee80211_sta_process_chanswitch(struct ieee80211_link_data *link, u64 timestamp, u32 device_timestamp, struct ieee802_11_elems *full_elems, struct ieee802_11_elems *csa_elems, enum ieee80211_csa_source source) { struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_chanctx *chanctx = NULL; struct ieee80211_chanctx_conf *conf; struct ieee80211_csa_ie csa_ie = {}; struct ieee80211_channel_switch ch_switch = { .link_id = link->link_id, .timestamp = timestamp, .device_timestamp = device_timestamp, }; unsigned long now; int res; lockdep_assert_wiphy(local->hw.wiphy); if (csa_elems) { struct cfg80211_bss *cbss = link->conf->bss; enum nl80211_band current_band; struct ieee80211_bss *bss; if (WARN_ON(!cbss)) return; current_band = cbss->channel->band; bss = (void *)cbss->priv; res = ieee80211_parse_ch_switch_ie(sdata, csa_elems, current_band, bss->vht_cap_info, &link->u.mgd.conn, link->u.mgd.bssid, source == IEEE80211_CSA_SOURCE_UNPROT_ACTION, &csa_ie); if (res == 0) { ch_switch.block_tx = csa_ie.mode; ch_switch.chandef = csa_ie.chanreq.oper; ch_switch.count = csa_ie.count; ch_switch.delay = csa_ie.max_switch_time; } link->u.mgd.csa.tpe = csa_elems->csa_tpe; } else { /* * If there was no per-STA profile for this link, we * get called with csa_elems == NULL. This of course means * there are no CSA elements, so set res=1 indicating * no more CSA. */ res = 1; } if (res < 0) { /* ignore this case, not a protected frame */ if (source == IEEE80211_CSA_SOURCE_UNPROT_ACTION) return; goto drop_connection; } if (link->conf->csa_active) { switch (source) { case IEEE80211_CSA_SOURCE_PROT_ACTION: case IEEE80211_CSA_SOURCE_UNPROT_ACTION: /* already processing - disregard action frames */ return; case IEEE80211_CSA_SOURCE_BEACON: if (link->u.mgd.csa.waiting_bcn) { ieee80211_chswitch_post_beacon(link); /* * If the CSA is still present after the switch * we need to consider it as a new CSA (possibly * to self). This happens by not returning here * so we'll get to the check below. */ } else if (res) { ieee80211_sta_abort_chanswitch(link); return; } else { drv_channel_switch_rx_beacon(sdata, &ch_switch); return; } break; case IEEE80211_CSA_SOURCE_OTHER_LINK: /* active link: we want to see the beacon to continue */ if (ieee80211_vif_link_active(&sdata->vif, link->link_id)) return; /* switch work ran, so just complete the process */ if (link->u.mgd.csa.waiting_bcn) { ieee80211_chswitch_post_beacon(link); /* * If the CSA is still present after the switch * we need to consider it as a new CSA (possibly * to self). This happens by not returning here * so we'll get to the check below. */ break; } /* link still has CSA but we already know, do nothing */ if (!res) return; /* check in the RNR if the CSA aborted */ ieee80211_sta_other_link_csa_disappeared(link, full_elems); return; } } /* no active CSA nor a new one */ if (res) { /* * However, we may have stopped queues when receiving a public * action frame that couldn't be protected, if it had the quiet * bit set. This is a trade-off, we want to be quiet as soon as * possible, but also don't trust the public action frame much, * as it can't be protected. */ if (unlikely(link->u.mgd.csa.blocked_tx)) { link->u.mgd.csa.blocked_tx = false; ieee80211_vif_unblock_queues_csa(sdata); } return; } /* * We don't really trust public action frames, but block queues (go to * quiet mode) for them anyway, we should get a beacon soon to either * know what the CSA really is, or figure out the public action frame * was actually an attack. */ if (source == IEEE80211_CSA_SOURCE_UNPROT_ACTION) { if (csa_ie.mode) { link->u.mgd.csa.blocked_tx = true; ieee80211_vif_block_queues_csa(sdata); } return; } if (link->conf->chanreq.oper.chan->band != csa_ie.chanreq.oper.chan->band) { link_info(link, "AP %pM switches to different band (%d MHz, width:%d, CF1/2: %d/%d MHz), disconnecting\n", link->u.mgd.bssid, csa_ie.chanreq.oper.chan->center_freq, csa_ie.chanreq.oper.width, csa_ie.chanreq.oper.center_freq1, csa_ie.chanreq.oper.center_freq2); goto drop_connection; } if (!cfg80211_chandef_usable(local->hw.wiphy, &csa_ie.chanreq.oper, IEEE80211_CHAN_DISABLED)) { link_info(link, "AP %pM switches to unsupported channel (%d.%03d MHz, width:%d, CF1/2: %d.%03d/%d MHz), disconnecting\n", link->u.mgd.bssid, csa_ie.chanreq.oper.chan->center_freq, csa_ie.chanreq.oper.chan->freq_offset, csa_ie.chanreq.oper.width, csa_ie.chanreq.oper.center_freq1, csa_ie.chanreq.oper.freq1_offset, csa_ie.chanreq.oper.center_freq2); goto drop_connection; } if (cfg80211_chandef_identical(&csa_ie.chanreq.oper, &link->conf->chanreq.oper) && (!csa_ie.mode || source != IEEE80211_CSA_SOURCE_BEACON)) { if (link->u.mgd.csa.ignored_same_chan) return; link_info(link, "AP %pM tries to chanswitch to same channel, ignore\n", link->u.mgd.bssid); link->u.mgd.csa.ignored_same_chan = true; return; } /* * Drop all TDLS peers on the affected link - either we disconnect or * move to a different channel from this point on. There's no telling * what our peer will do. * The TDLS WIDER_BW scenario is also problematic, as peers might now * have an incompatible wider chandef. */ ieee80211_teardown_tdls_peers(link); conf = rcu_dereference_protected(link->conf->chanctx_conf, lockdep_is_held(&local->hw.wiphy->mtx)); if (ieee80211_vif_link_active(&sdata->vif, link->link_id) && !conf) { link_info(link, "no channel context assigned to vif?, disconnecting\n"); goto drop_connection; } if (conf) chanctx = container_of(conf, struct ieee80211_chanctx, conf); if (!ieee80211_hw_check(&local->hw, CHANCTX_STA_CSA)) { link_info(link, "driver doesn't support chan-switch with channel contexts\n"); goto drop_connection; } if (drv_pre_channel_switch(sdata, &ch_switch)) { link_info(link, "preparing for channel switch failed, disconnecting\n"); goto drop_connection; } link->u.mgd.csa.ap_chandef = csa_ie.chanreq.ap; link->csa.chanreq.oper = csa_ie.chanreq.oper; ieee80211_set_chanreq_ap(sdata, &link->csa.chanreq, &link->u.mgd.conn, &csa_ie.chanreq.ap); if (chanctx) { res = ieee80211_link_reserve_chanctx(link, &link->csa.chanreq, chanctx->mode, false); if (res) { link_info(link, "failed to reserve channel context for channel switch, disconnecting (err=%d)\n", res); goto drop_connection; } } link->conf->csa_active = true; link->u.mgd.csa.ignored_same_chan = false; link->u.mgd.beacon_crc_valid = false; link->u.mgd.csa.blocked_tx = csa_ie.mode; if (csa_ie.mode) ieee80211_vif_block_queues_csa(sdata); cfg80211_ch_switch_started_notify(sdata->dev, &csa_ie.chanreq.oper, link->link_id, csa_ie.count, csa_ie.mode); /* we may have to handle timeout for deactivated link in software */ now = jiffies; link->u.mgd.csa.time = now + TU_TO_JIFFIES((max_t(int, csa_ie.count, 1) - 1) * link->conf->beacon_int); if (ieee80211_vif_link_active(&sdata->vif, link->link_id) && local->ops->channel_switch) { /* * Use driver's channel switch callback, the driver will * later call ieee80211_chswitch_done(). It may deactivate * the link as well, we handle that elsewhere and queue * the csa.switch_work for the calculated time then. */ drv_channel_switch(local, sdata, &ch_switch); return; } /* channel switch handled in software */ wiphy_delayed_work_queue(local->hw.wiphy, &link->u.mgd.csa.switch_work, link->u.mgd.csa.time - now); return; drop_connection: /* * This is just so that the disconnect flow will know that * we were trying to switch channel and failed. In case the * mode is 1 (we are not allowed to Tx), we will know not to * send a deauthentication frame. Those two fields will be * reset when the disconnection worker runs. */ link->conf->csa_active = true; link->u.mgd.csa.blocked_tx = csa_ie.mode; wiphy_work_queue(sdata->local->hw.wiphy, &ifmgd->csa_connection_drop_work); } struct sta_bss_param_ch_cnt_data { struct ieee80211_sub_if_data *sdata; u8 reporting_link_id; u8 mld_id; }; static enum cfg80211_rnr_iter_ret ieee80211_sta_bss_param_ch_cnt_iter(void *_data, u8 type, const struct ieee80211_neighbor_ap_info *info, const u8 *tbtt_info, u8 tbtt_info_len) { struct sta_bss_param_ch_cnt_data *data = _data; struct ieee80211_sub_if_data *sdata = data->sdata; const struct ieee80211_tbtt_info_ge_11 *ti; u8 bss_param_ch_cnt; int link_id; if (type != IEEE80211_TBTT_INFO_TYPE_TBTT) return RNR_ITER_CONTINUE; if (tbtt_info_len < sizeof(*ti)) return RNR_ITER_CONTINUE; ti = (const void *)tbtt_info; if (ti->mld_params.mld_id != data->mld_id) return RNR_ITER_CONTINUE; link_id = le16_get_bits(ti->mld_params.params, IEEE80211_RNR_MLD_PARAMS_LINK_ID); bss_param_ch_cnt = le16_get_bits(ti->mld_params.params, IEEE80211_RNR_MLD_PARAMS_BSS_CHANGE_COUNT); if (bss_param_ch_cnt != 255 && link_id < ARRAY_SIZE(sdata->link)) { struct ieee80211_link_data *link = sdata_dereference(sdata->link[link_id], sdata); if (link && link->conf->bss_param_ch_cnt != bss_param_ch_cnt) { link->conf->bss_param_ch_cnt = bss_param_ch_cnt; link->conf->bss_param_ch_cnt_link_id = data->reporting_link_id; } } return RNR_ITER_CONTINUE; } static void ieee80211_mgd_update_bss_param_ch_cnt(struct ieee80211_sub_if_data *sdata, struct ieee80211_bss_conf *bss_conf, struct ieee802_11_elems *elems) { struct sta_bss_param_ch_cnt_data data = { .reporting_link_id = bss_conf->link_id, .sdata = sdata, }; int bss_param_ch_cnt; if (!elems->ml_basic) return; data.mld_id = ieee80211_mle_get_mld_id((const void *)elems->ml_basic); cfg80211_iter_rnr(elems->ie_start, elems->total_len, ieee80211_sta_bss_param_ch_cnt_iter, &data); bss_param_ch_cnt = ieee80211_mle_get_bss_param_ch_cnt((const void *)elems->ml_basic); /* * Update bss_param_ch_cnt_link_id even if bss_param_ch_cnt * didn't change to indicate that we got a beacon on our own * link. */ if (bss_param_ch_cnt >= 0 && bss_param_ch_cnt != 255) { bss_conf->bss_param_ch_cnt = bss_param_ch_cnt; bss_conf->bss_param_ch_cnt_link_id = bss_conf->link_id; } } static bool ieee80211_find_80211h_pwr_constr(struct ieee80211_channel *channel, const u8 *country_ie, u8 country_ie_len, const u8 *pwr_constr_elem, int *chan_pwr, int *pwr_reduction) { struct ieee80211_country_ie_triplet *triplet; int chan = ieee80211_frequency_to_channel(channel->center_freq); int i, chan_increment; bool have_chan_pwr = false; /* Invalid IE */ if (country_ie_len % 2 || country_ie_len < IEEE80211_COUNTRY_IE_MIN_LEN) return false; triplet = (void *)(country_ie + 3); country_ie_len -= 3; switch (channel->band) { default: WARN_ON_ONCE(1); fallthrough; case NL80211_BAND_2GHZ: case NL80211_BAND_60GHZ: case NL80211_BAND_LC: chan_increment = 1; break; case NL80211_BAND_5GHZ: chan_increment = 4; break; case NL80211_BAND_6GHZ: /* * In the 6 GHz band, the "maximum transmit power level" * field in the triplets is reserved, and thus will be * zero and we shouldn't use it to control TX power. * The actual TX power will be given in the transmit * power envelope element instead. */ return false; } /* find channel */ while (country_ie_len >= 3) { u8 first_channel = triplet->chans.first_channel; if (first_channel >= IEEE80211_COUNTRY_EXTENSION_ID) goto next; for (i = 0; i < triplet->chans.num_channels; i++) { if (first_channel + i * chan_increment == chan) { have_chan_pwr = true; *chan_pwr = triplet->chans.max_power; break; } } if (have_chan_pwr) break; next: triplet++; country_ie_len -= 3; } if (have_chan_pwr && pwr_constr_elem) *pwr_reduction = *pwr_constr_elem; else *pwr_reduction = 0; return have_chan_pwr; } static void ieee80211_find_cisco_dtpc(struct ieee80211_channel *channel, const u8 *cisco_dtpc_ie, int *pwr_level) { /* From practical testing, the first data byte of the DTPC element * seems to contain the requested dBm level, and the CLI on Cisco * APs clearly state the range is -127 to 127 dBm, which indicates * a signed byte, although it seemingly never actually goes negative. * The other byte seems to always be zero. */ *pwr_level = (__s8)cisco_dtpc_ie[4]; } static u64 ieee80211_handle_pwr_constr(struct ieee80211_link_data *link, struct ieee80211_channel *channel, struct ieee80211_mgmt *mgmt, const u8 *country_ie, u8 country_ie_len, const u8 *pwr_constr_ie, const u8 *cisco_dtpc_ie) { struct ieee80211_sub_if_data *sdata = link->sdata; bool has_80211h_pwr = false, has_cisco_pwr = false; int chan_pwr = 0, pwr_reduction_80211h = 0; int pwr_level_cisco, pwr_level_80211h; int new_ap_level; __le16 capab = mgmt->u.probe_resp.capab_info; if (ieee80211_is_s1g_beacon(mgmt->frame_control)) return 0; /* TODO */ if (country_ie && (capab & cpu_to_le16(WLAN_CAPABILITY_SPECTRUM_MGMT) || capab & cpu_to_le16(WLAN_CAPABILITY_RADIO_MEASURE))) { has_80211h_pwr = ieee80211_find_80211h_pwr_constr( channel, country_ie, country_ie_len, pwr_constr_ie, &chan_pwr, &pwr_reduction_80211h); pwr_level_80211h = max_t(int, 0, chan_pwr - pwr_reduction_80211h); } if (cisco_dtpc_ie) { ieee80211_find_cisco_dtpc( channel, cisco_dtpc_ie, &pwr_level_cisco); has_cisco_pwr = true; } if (!has_80211h_pwr && !has_cisco_pwr) return 0; /* If we have both 802.11h and Cisco DTPC, apply both limits * by picking the smallest of the two power levels advertised. */ if (has_80211h_pwr && (!has_cisco_pwr || pwr_level_80211h <= pwr_level_cisco)) { new_ap_level = pwr_level_80211h; if (link->ap_power_level == new_ap_level) return 0; sdata_dbg(sdata, "Limiting TX power to %d (%d - %d) dBm as advertised by %pM\n", pwr_level_80211h, chan_pwr, pwr_reduction_80211h, link->u.mgd.bssid); } else { /* has_cisco_pwr is always true here. */ new_ap_level = pwr_level_cisco; if (link->ap_power_level == new_ap_level) return 0; sdata_dbg(sdata, "Limiting TX power to %d dBm as advertised by %pM\n", pwr_level_cisco, link->u.mgd.bssid); } link->ap_power_level = new_ap_level; if (__ieee80211_recalc_txpower(link)) return BSS_CHANGED_TXPOWER; return 0; } /* powersave */ static void ieee80211_enable_ps(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata) { struct ieee80211_conf *conf = &local->hw.conf; /* * If we are scanning right now then the parameters will * take effect when scan finishes. */ if (local->scanning) return; if (conf->dynamic_ps_timeout > 0 && !ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS)) { mod_timer(&local->dynamic_ps_timer, jiffies + msecs_to_jiffies(conf->dynamic_ps_timeout)); } else { if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK)) ieee80211_send_nullfunc(local, sdata, true); if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK) && ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) return; conf->flags |= IEEE80211_CONF_PS; ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); } } static void ieee80211_change_ps(struct ieee80211_local *local) { struct ieee80211_conf *conf = &local->hw.conf; if (local->ps_sdata) { ieee80211_enable_ps(local, local->ps_sdata); } else if (conf->flags & IEEE80211_CONF_PS) { conf->flags &= ~IEEE80211_CONF_PS; ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); del_timer_sync(&local->dynamic_ps_timer); wiphy_work_cancel(local->hw.wiphy, &local->dynamic_ps_enable_work); } } static bool ieee80211_powersave_allowed(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *mgd = &sdata->u.mgd; struct sta_info *sta = NULL; bool authorized = false; if (!mgd->powersave) return false; if (mgd->broken_ap) return false; if (!mgd->associated) return false; if (mgd->flags & IEEE80211_STA_CONNECTION_POLL) return false; if (!(local->hw.wiphy->flags & WIPHY_FLAG_SUPPORTS_MLO) && !sdata->deflink.u.mgd.have_beacon) return false; rcu_read_lock(); sta = sta_info_get(sdata, sdata->vif.cfg.ap_addr); if (sta) authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED); rcu_read_unlock(); return authorized; } /* need to hold RTNL or interface lock */ void ieee80211_recalc_ps(struct ieee80211_local *local) { struct ieee80211_sub_if_data *sdata, *found = NULL; int count = 0; int timeout; if (!ieee80211_hw_check(&local->hw, SUPPORTS_PS) || ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS)) { local->ps_sdata = NULL; return; } list_for_each_entry(sdata, &local->interfaces, list) { if (!ieee80211_sdata_running(sdata)) continue; if (sdata->vif.type == NL80211_IFTYPE_AP) { /* If an AP vif is found, then disable PS * by setting the count to zero thereby setting * ps_sdata to NULL. */ count = 0; break; } if (sdata->vif.type != NL80211_IFTYPE_STATION) continue; found = sdata; count++; } if (count == 1 && ieee80211_powersave_allowed(found)) { u8 dtimper = found->deflink.u.mgd.dtim_period; timeout = local->dynamic_ps_forced_timeout; if (timeout < 0) timeout = 100; local->hw.conf.dynamic_ps_timeout = timeout; /* If the TIM IE is invalid, pretend the value is 1 */ if (!dtimper) dtimper = 1; local->hw.conf.ps_dtim_period = dtimper; local->ps_sdata = found; } else { local->ps_sdata = NULL; } ieee80211_change_ps(local); } void ieee80211_recalc_ps_vif(struct ieee80211_sub_if_data *sdata) { bool ps_allowed = ieee80211_powersave_allowed(sdata); if (sdata->vif.cfg.ps != ps_allowed) { sdata->vif.cfg.ps = ps_allowed; ieee80211_vif_cfg_change_notify(sdata, BSS_CHANGED_PS); } } void ieee80211_dynamic_ps_disable_work(struct wiphy *wiphy, struct wiphy_work *work) { struct ieee80211_local *local = container_of(work, struct ieee80211_local, dynamic_ps_disable_work); if (local->hw.conf.flags & IEEE80211_CONF_PS) { local->hw.conf.flags &= ~IEEE80211_CONF_PS; ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); } ieee80211_wake_queues_by_reason(&local->hw, IEEE80211_MAX_QUEUE_MAP, IEEE80211_QUEUE_STOP_REASON_PS, false); } void ieee80211_dynamic_ps_enable_work(struct wiphy *wiphy, struct wiphy_work *work) { struct ieee80211_local *local = container_of(work, struct ieee80211_local, dynamic_ps_enable_work); struct ieee80211_sub_if_data *sdata = local->ps_sdata; struct ieee80211_if_managed *ifmgd; unsigned long flags; int q; /* can only happen when PS was just disabled anyway */ if (!sdata) return; ifmgd = &sdata->u.mgd; if (local->hw.conf.flags & IEEE80211_CONF_PS) return; if (local->hw.conf.dynamic_ps_timeout > 0) { /* don't enter PS if TX frames are pending */ if (drv_tx_frames_pending(local)) { mod_timer(&local->dynamic_ps_timer, jiffies + msecs_to_jiffies( local->hw.conf.dynamic_ps_timeout)); return; } /* * transmission can be stopped by others which leads to * dynamic_ps_timer expiry. Postpone the ps timer if it * is not the actual idle state. */ spin_lock_irqsave(&local->queue_stop_reason_lock, flags); for (q = 0; q < local->hw.queues; q++) { if (local->queue_stop_reasons[q]) { spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); mod_timer(&local->dynamic_ps_timer, jiffies + msecs_to_jiffies( local->hw.conf.dynamic_ps_timeout)); return; } } spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); } if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK) && !(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)) { if (drv_tx_frames_pending(local)) { mod_timer(&local->dynamic_ps_timer, jiffies + msecs_to_jiffies( local->hw.conf.dynamic_ps_timeout)); } else { ieee80211_send_nullfunc(local, sdata, true); /* Flush to get the tx status of nullfunc frame */ ieee80211_flush_queues(local, sdata, false); } } if (!(ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS) && ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK)) || (ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)) { ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED; local->hw.conf.flags |= IEEE80211_CONF_PS; ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); } } void ieee80211_dynamic_ps_timer(struct timer_list *t) { struct ieee80211_local *local = from_timer(local, t, dynamic_ps_timer); wiphy_work_queue(local->hw.wiphy, &local->dynamic_ps_enable_work); } void ieee80211_dfs_cac_timer_work(struct wiphy *wiphy, struct wiphy_work *work) { struct ieee80211_link_data *link = container_of(work, struct ieee80211_link_data, dfs_cac_timer_work.work); struct cfg80211_chan_def chandef = link->conf->chanreq.oper; struct ieee80211_sub_if_data *sdata = link->sdata; lockdep_assert_wiphy(sdata->local->hw.wiphy); if (sdata->wdev.links[link->link_id].cac_started) { ieee80211_link_release_channel(link); cfg80211_cac_event(sdata->dev, &chandef, NL80211_RADAR_CAC_FINISHED, GFP_KERNEL, link->link_id); } } static bool __ieee80211_sta_handle_tspec_ac_params(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; bool ret = false; int ac; if (local->hw.queues < IEEE80211_NUM_ACS) return false; for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { struct ieee80211_sta_tx_tspec *tx_tspec = &ifmgd->tx_tspec[ac]; int non_acm_ac; unsigned long now = jiffies; if (tx_tspec->action == TX_TSPEC_ACTION_NONE && tx_tspec->admitted_time && time_after(now, tx_tspec->time_slice_start + HZ)) { tx_tspec->consumed_tx_time = 0; tx_tspec->time_slice_start = now; if (tx_tspec->downgraded) tx_tspec->action = TX_TSPEC_ACTION_STOP_DOWNGRADE; } switch (tx_tspec->action) { case TX_TSPEC_ACTION_STOP_DOWNGRADE: /* take the original parameters */ if (drv_conf_tx(local, &sdata->deflink, ac, &sdata->deflink.tx_conf[ac])) link_err(&sdata->deflink, "failed to set TX queue parameters for queue %d\n", ac); tx_tspec->action = TX_TSPEC_ACTION_NONE; tx_tspec->downgraded = false; ret = true; break; case TX_TSPEC_ACTION_DOWNGRADE: if (time_after(now, tx_tspec->time_slice_start + HZ)) { tx_tspec->action = TX_TSPEC_ACTION_NONE; ret = true; break; } /* downgrade next lower non-ACM AC */ for (non_acm_ac = ac + 1; non_acm_ac < IEEE80211_NUM_ACS; non_acm_ac++) if (!(sdata->wmm_acm & BIT(7 - 2 * non_acm_ac))) break; /* Usually the loop will result in using BK even if it * requires admission control, but such a configuration * makes no sense and we have to transmit somehow - the * AC selection does the same thing. * If we started out trying to downgrade from BK, then * the extra condition here might be needed. */ if (non_acm_ac >= IEEE80211_NUM_ACS) non_acm_ac = IEEE80211_AC_BK; if (drv_conf_tx(local, &sdata->deflink, ac, &sdata->deflink.tx_conf[non_acm_ac])) link_err(&sdata->deflink, "failed to set TX queue parameters for queue %d\n", ac); tx_tspec->action = TX_TSPEC_ACTION_NONE; ret = true; wiphy_delayed_work_queue(local->hw.wiphy, &ifmgd->tx_tspec_wk, tx_tspec->time_slice_start + HZ - now + 1); break; case TX_TSPEC_ACTION_NONE: /* nothing now */ break; } } return ret; } void ieee80211_sta_handle_tspec_ac_params(struct ieee80211_sub_if_data *sdata) { if (__ieee80211_sta_handle_tspec_ac_params(sdata)) ieee80211_link_info_change_notify(sdata, &sdata->deflink, BSS_CHANGED_QOS); } static void ieee80211_sta_handle_tspec_ac_params_wk(struct wiphy *wiphy, struct wiphy_work *work) { struct ieee80211_sub_if_data *sdata; sdata = container_of(work, struct ieee80211_sub_if_data, u.mgd.tx_tspec_wk.work); ieee80211_sta_handle_tspec_ac_params(sdata); } void ieee80211_mgd_set_link_qos_params(struct ieee80211_link_data *link) { struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_tx_queue_params *params = link->tx_conf; u8 ac; for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { mlme_dbg(sdata, "WMM AC=%d acm=%d aifs=%d cWmin=%d cWmax=%d txop=%d uapsd=%d, downgraded=%d\n", ac, params[ac].acm, params[ac].aifs, params[ac].cw_min, params[ac].cw_max, params[ac].txop, params[ac].uapsd, ifmgd->tx_tspec[ac].downgraded); if (!ifmgd->tx_tspec[ac].downgraded && drv_conf_tx(local, link, ac, &params[ac])) link_err(link, "failed to set TX queue parameters for AC %d\n", ac); } } /* MLME */ static bool ieee80211_sta_wmm_params(struct ieee80211_local *local, struct ieee80211_link_data *link, const u8 *wmm_param, size_t wmm_param_len, const struct ieee80211_mu_edca_param_set *mu_edca) { struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_tx_queue_params params[IEEE80211_NUM_ACS]; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; size_t left; int count, mu_edca_count, ac; const u8 *pos; u8 uapsd_queues = 0; if (!local->ops->conf_tx) return false; if (local->hw.queues < IEEE80211_NUM_ACS) return false; if (!wmm_param) return false; if (wmm_param_len < 8 || wmm_param[5] /* version */ != 1) return false; if (ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED) uapsd_queues = ifmgd->uapsd_queues; count = wmm_param[6] & 0x0f; /* -1 is the initial value of ifmgd->mu_edca_last_param_set. * if mu_edca was preset before and now it disappeared tell * the driver about it. */ mu_edca_count = mu_edca ? mu_edca->mu_qos_info & 0x0f : -1; if (count == link->u.mgd.wmm_last_param_set && mu_edca_count == link->u.mgd.mu_edca_last_param_set) return false; link->u.mgd.wmm_last_param_set = count; link->u.mgd.mu_edca_last_param_set = mu_edca_count; pos = wmm_param + 8; left = wmm_param_len - 8; memset(&params, 0, sizeof(params)); sdata->wmm_acm = 0; for (; left >= 4; left -= 4, pos += 4) { int aci = (pos[0] >> 5) & 0x03; int acm = (pos[0] >> 4) & 0x01; bool uapsd = false; switch (aci) { case 1: /* AC_BK */ ac = IEEE80211_AC_BK; if (acm) sdata->wmm_acm |= BIT(1) | BIT(2); /* BK/- */ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) uapsd = true; params[ac].mu_edca = !!mu_edca; if (mu_edca) params[ac].mu_edca_param_rec = mu_edca->ac_bk; break; case 2: /* AC_VI */ ac = IEEE80211_AC_VI; if (acm) sdata->wmm_acm |= BIT(4) | BIT(5); /* CL/VI */ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) uapsd = true; params[ac].mu_edca = !!mu_edca; if (mu_edca) params[ac].mu_edca_param_rec = mu_edca->ac_vi; break; case 3: /* AC_VO */ ac = IEEE80211_AC_VO; if (acm) sdata->wmm_acm |= BIT(6) | BIT(7); /* VO/NC */ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) uapsd = true; params[ac].mu_edca = !!mu_edca; if (mu_edca) params[ac].mu_edca_param_rec = mu_edca->ac_vo; break; case 0: /* AC_BE */ default: ac = IEEE80211_AC_BE; if (acm) sdata->wmm_acm |= BIT(0) | BIT(3); /* BE/EE */ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) uapsd = true; params[ac].mu_edca = !!mu_edca; if (mu_edca) params[ac].mu_edca_param_rec = mu_edca->ac_be; break; } params[ac].aifs = pos[0] & 0x0f; if (params[ac].aifs < 2) { link_info(link, "AP has invalid WMM params (AIFSN=%d for ACI %d), will use 2\n", params[ac].aifs, aci); params[ac].aifs = 2; } params[ac].cw_max = ecw2cw((pos[1] & 0xf0) >> 4); params[ac].cw_min = ecw2cw(pos[1] & 0x0f); params[ac].txop = get_unaligned_le16(pos + 2); params[ac].acm = acm; params[ac].uapsd = uapsd; if (params[ac].cw_min == 0 || params[ac].cw_min > params[ac].cw_max) { link_info(link, "AP has invalid WMM params (CWmin/max=%d/%d for ACI %d), using defaults\n", params[ac].cw_min, params[ac].cw_max, aci); return false; } ieee80211_regulatory_limit_wmm_params(sdata, &params[ac], ac); } /* WMM specification requires all 4 ACIs. */ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { if (params[ac].cw_min == 0) { link_info(link, "AP has invalid WMM params (missing AC %d), using defaults\n", ac); return false; } } for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) link->tx_conf[ac] = params[ac]; ieee80211_mgd_set_link_qos_params(link); /* enable WMM or activate new settings */ link->conf->qos = true; return true; } static void __ieee80211_stop_poll(struct ieee80211_sub_if_data *sdata) { lockdep_assert_wiphy(sdata->local->hw.wiphy); sdata->u.mgd.flags &= ~IEEE80211_STA_CONNECTION_POLL; ieee80211_run_deferred_scan(sdata->local); } static void ieee80211_stop_poll(struct ieee80211_sub_if_data *sdata) { lockdep_assert_wiphy(sdata->local->hw.wiphy); __ieee80211_stop_poll(sdata); } static u64 ieee80211_handle_bss_capability(struct ieee80211_link_data *link, u16 capab, bool erp_valid, u8 erp) { struct ieee80211_bss_conf *bss_conf = link->conf; struct ieee80211_supported_band *sband; u64 changed = 0; bool use_protection; bool use_short_preamble; bool use_short_slot; sband = ieee80211_get_link_sband(link); if (!sband) return changed; if (erp_valid) { use_protection = (erp & WLAN_ERP_USE_PROTECTION) != 0; use_short_preamble = (erp & WLAN_ERP_BARKER_PREAMBLE) == 0; } else { use_protection = false; use_short_preamble = !!(capab & WLAN_CAPABILITY_SHORT_PREAMBLE); } use_short_slot = !!(capab & WLAN_CAPABILITY_SHORT_SLOT_TIME); if (sband->band == NL80211_BAND_5GHZ || sband->band == NL80211_BAND_6GHZ) use_short_slot = true; if (use_protection != bss_conf->use_cts_prot) { bss_conf->use_cts_prot = use_protection; changed |= BSS_CHANGED_ERP_CTS_PROT; } if (use_short_preamble != bss_conf->use_short_preamble) { bss_conf->use_short_preamble = use_short_preamble; changed |= BSS_CHANGED_ERP_PREAMBLE; } if (use_short_slot != bss_conf->use_short_slot) { bss_conf->use_short_slot = use_short_slot; changed |= BSS_CHANGED_ERP_SLOT; } return changed; } static u64 ieee80211_link_set_associated(struct ieee80211_link_data *link, struct cfg80211_bss *cbss) { struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_bss_conf *bss_conf = link->conf; struct ieee80211_bss *bss = (void *)cbss->priv; u64 changed = BSS_CHANGED_QOS; /* not really used in MLO */ sdata->u.mgd.beacon_timeout = usecs_to_jiffies(ieee80211_tu_to_usec(beacon_loss_count * bss_conf->beacon_int)); changed |= ieee80211_handle_bss_capability(link, bss_conf->assoc_capability, bss->has_erp_value, bss->erp_value); ieee80211_check_rate_mask(link); link->conf->bss = cbss; memcpy(link->u.mgd.bssid, cbss->bssid, ETH_ALEN); if (sdata->vif.p2p || sdata->vif.driver_flags & IEEE80211_VIF_GET_NOA_UPDATE) { const struct cfg80211_bss_ies *ies; rcu_read_lock(); ies = rcu_dereference(cbss->ies); if (ies) { int ret; ret = cfg80211_get_p2p_attr( ies->data, ies->len, IEEE80211_P2P_ATTR_ABSENCE_NOTICE, (u8 *) &bss_conf->p2p_noa_attr, sizeof(bss_conf->p2p_noa_attr)); if (ret >= 2) { link->u.mgd.p2p_noa_index = bss_conf->p2p_noa_attr.index; changed |= BSS_CHANGED_P2P_PS; } } rcu_read_unlock(); } if (link->u.mgd.have_beacon) { bss_conf->beacon_rate = bss->beacon_rate; changed |= BSS_CHANGED_BEACON_INFO; } else { bss_conf->beacon_rate = NULL; } /* Tell the driver to monitor connection quality (if supported) */ if (sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI && bss_conf->cqm_rssi_thold) changed |= BSS_CHANGED_CQM; return changed; } static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgd_assoc_data *assoc_data, u64 changed[IEEE80211_MLD_MAX_NUM_LINKS]) { struct ieee80211_local *local = sdata->local; struct ieee80211_vif_cfg *vif_cfg = &sdata->vif.cfg; u64 vif_changed = BSS_CHANGED_ASSOC; unsigned int link_id; lockdep_assert_wiphy(local->hw.wiphy); sdata->u.mgd.associated = true; for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { struct cfg80211_bss *cbss = assoc_data->link[link_id].bss; struct ieee80211_link_data *link; if (!cbss || assoc_data->link[link_id].status != WLAN_STATUS_SUCCESS) continue; if (ieee80211_vif_is_mld(&sdata->vif) && !(ieee80211_vif_usable_links(&sdata->vif) & BIT(link_id))) continue; link = sdata_dereference(sdata->link[link_id], sdata); if (WARN_ON(!link)) return; changed[link_id] |= ieee80211_link_set_associated(link, cbss); } /* just to be sure */ ieee80211_stop_poll(sdata); ieee80211_led_assoc(local, 1); vif_cfg->assoc = 1; /* Enable ARP filtering */ if (vif_cfg->arp_addr_cnt) vif_changed |= BSS_CHANGED_ARP_FILTER; if (ieee80211_vif_is_mld(&sdata->vif)) { for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { struct ieee80211_link_data *link; struct cfg80211_bss *cbss = assoc_data->link[link_id].bss; if (!cbss || !(BIT(link_id) & ieee80211_vif_usable_links(&sdata->vif)) || assoc_data->link[link_id].status != WLAN_STATUS_SUCCESS) continue; link = sdata_dereference(sdata->link[link_id], sdata); if (WARN_ON(!link)) return; ieee80211_link_info_change_notify(sdata, link, changed[link_id]); ieee80211_recalc_smps(sdata, link); } ieee80211_vif_cfg_change_notify(sdata, vif_changed); } else { ieee80211_bss_info_change_notify(sdata, vif_changed | changed[0]); } ieee80211_recalc_ps(local); /* leave this here to not change ordering in non-MLO cases */ if (!ieee80211_vif_is_mld(&sdata->vif)) ieee80211_recalc_smps(sdata, &sdata->deflink); ieee80211_recalc_ps_vif(sdata); netif_carrier_on(sdata->dev); } static void ieee80211_ml_reconf_reset(struct ieee80211_sub_if_data *sdata) { struct ieee80211_mgd_assoc_data *add_links_data = sdata->u.mgd.reconf.add_links_data; if (!ieee80211_vif_is_mld(&sdata->vif) || !(sdata->u.mgd.reconf.added_links | sdata->u.mgd.reconf.removed_links)) return; wiphy_delayed_work_cancel(sdata->local->hw.wiphy, &sdata->u.mgd.reconf.wk); sdata->u.mgd.reconf.added_links = 0; sdata->u.mgd.reconf.removed_links = 0; sdata->u.mgd.reconf.dialog_token = 0; if (add_links_data) { struct cfg80211_mlo_reconf_done_data done_data = {}; u8 link_id; for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) done_data.links[link_id].bss = add_links_data->link[link_id].bss; cfg80211_mlo_reconf_add_done(sdata->dev, &done_data); kfree(sdata->u.mgd.reconf.add_links_data); sdata->u.mgd.reconf.add_links_data = NULL; } } static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, u16 stype, u16 reason, bool tx, u8 *frame_buf) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_local *local = sdata->local; struct sta_info *ap_sta = sta_info_get(sdata, sdata->vif.cfg.ap_addr); unsigned int link_id; u64 changed = 0; struct ieee80211_prep_tx_info info = { .subtype = stype, .was_assoc = true, .link_id = ffs(sdata->vif.active_links) - 1, }; lockdep_assert_wiphy(local->hw.wiphy); if (WARN_ON(!ap_sta)) return; if (WARN_ON_ONCE(tx && !frame_buf)) return; if (WARN_ON(!ifmgd->associated)) return; ieee80211_stop_poll(sdata); ifmgd->associated = false; /* other links will be destroyed */ sdata->deflink.conf->bss = NULL; sdata->deflink.smps_mode = IEEE80211_SMPS_OFF; netif_carrier_off(sdata->dev); /* * if we want to get out of ps before disassoc (why?) we have * to do it before sending disassoc, as otherwise the null-packet * won't be valid. */ if (local->hw.conf.flags & IEEE80211_CONF_PS) { local->hw.conf.flags &= ~IEEE80211_CONF_PS; ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); } local->ps_sdata = NULL; /* disable per-vif ps */ ieee80211_recalc_ps_vif(sdata); /* make sure ongoing transmission finishes */ synchronize_net(); /* * drop any frame before deauth/disassoc, this can be data or * management frame. Since we are disconnecting, we should not * insist sending these frames which can take time and delay * the disconnection and possible the roaming. */ if (tx) ieee80211_flush_queues(local, sdata, true); /* deauthenticate/disassociate now */ if (tx || frame_buf) { drv_mgd_prepare_tx(sdata->local, sdata, &info); ieee80211_send_deauth_disassoc(sdata, sdata->vif.cfg.ap_addr, sdata->vif.cfg.ap_addr, stype, reason, tx, frame_buf); } /* flush out frame - make sure the deauth was actually sent */ if (tx) ieee80211_flush_queues(local, sdata, false); drv_mgd_complete_tx(sdata->local, sdata, &info); /* clear AP addr only after building the needed mgmt frames */ eth_zero_addr(sdata->deflink.u.mgd.bssid); eth_zero_addr(sdata->vif.cfg.ap_addr); sdata->vif.cfg.ssid_len = 0; /* Remove TDLS peers */ __sta_info_flush(sdata, false, -1, ap_sta); if (sdata->vif.driver_flags & IEEE80211_VIF_REMOVE_AP_AFTER_DISASSOC) { /* Only move the AP state */ sta_info_move_state(ap_sta, IEEE80211_STA_NONE); } else { /* Remove AP peer */ sta_info_flush(sdata, -1); } /* finally reset all BSS / config parameters */ if (!ieee80211_vif_is_mld(&sdata->vif)) changed |= ieee80211_reset_erp_info(sdata); ieee80211_led_assoc(local, 0); changed |= BSS_CHANGED_ASSOC; sdata->vif.cfg.assoc = false; sdata->deflink.u.mgd.p2p_noa_index = -1; memset(&sdata->vif.bss_conf.p2p_noa_attr, 0, sizeof(sdata->vif.bss_conf.p2p_noa_attr)); /* on the next assoc, re-program HT/VHT parameters */ memset(&ifmgd->ht_capa, 0, sizeof(ifmgd->ht_capa)); memset(&ifmgd->ht_capa_mask, 0, sizeof(ifmgd->ht_capa_mask)); memset(&ifmgd->vht_capa, 0, sizeof(ifmgd->vht_capa)); memset(&ifmgd->vht_capa_mask, 0, sizeof(ifmgd->vht_capa_mask)); /* * reset MU-MIMO ownership and group data in default link, * if used, other links are destroyed */ memset(sdata->vif.bss_conf.mu_group.membership, 0, sizeof(sdata->vif.bss_conf.mu_group.membership)); memset(sdata->vif.bss_conf.mu_group.position, 0, sizeof(sdata->vif.bss_conf.mu_group.position)); if (!ieee80211_vif_is_mld(&sdata->vif)) changed |= BSS_CHANGED_MU_GROUPS; sdata->vif.bss_conf.mu_mimo_owner = false; sdata->deflink.ap_power_level = IEEE80211_UNSET_POWER_LEVEL; del_timer_sync(&local->dynamic_ps_timer); wiphy_work_cancel(local->hw.wiphy, &local->dynamic_ps_enable_work); /* Disable ARP filtering */ if (sdata->vif.cfg.arp_addr_cnt) changed |= BSS_CHANGED_ARP_FILTER; sdata->vif.bss_conf.qos = false; if (!ieee80211_vif_is_mld(&sdata->vif)) { changed |= BSS_CHANGED_QOS; /* The BSSID (not really interesting) and HT changed */ changed |= BSS_CHANGED_BSSID | BSS_CHANGED_HT; ieee80211_bss_info_change_notify(sdata, changed); } else { ieee80211_vif_cfg_change_notify(sdata, changed); } if (sdata->vif.driver_flags & IEEE80211_VIF_REMOVE_AP_AFTER_DISASSOC) { /* * After notifying the driver about the disassoc, * remove the ap sta. */ sta_info_flush(sdata, -1); } /* disassociated - set to defaults now */ ieee80211_set_wmm_default(&sdata->deflink, false, false); del_timer_sync(&sdata->u.mgd.conn_mon_timer); del_timer_sync(&sdata->u.mgd.bcn_mon_timer); del_timer_sync(&sdata->u.mgd.timer); sdata->vif.bss_conf.dtim_period = 0; sdata->vif.bss_conf.beacon_rate = NULL; sdata->deflink.u.mgd.have_beacon = false; sdata->deflink.u.mgd.tracking_signal_avg = false; sdata->deflink.u.mgd.disable_wmm_tracking = false; ifmgd->flags = 0; for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) { struct ieee80211_link_data *link; link = sdata_dereference(sdata->link[link_id], sdata); if (!link) continue; ieee80211_link_release_channel(link); } sdata->vif.bss_conf.csa_active = false; sdata->deflink.u.mgd.csa.blocked_tx = false; sdata->deflink.u.mgd.csa.waiting_bcn = false; sdata->deflink.u.mgd.csa.ignored_same_chan = false; ieee80211_vif_unblock_queues_csa(sdata); /* existing TX TSPEC sessions no longer exist */ memset(ifmgd->tx_tspec, 0, sizeof(ifmgd->tx_tspec)); wiphy_delayed_work_cancel(local->hw.wiphy, &ifmgd->tx_tspec_wk); sdata->vif.bss_conf.power_type = IEEE80211_REG_UNSET_AP; sdata->vif.bss_conf.pwr_reduction = 0; ieee80211_clear_tpe(&sdata->vif.bss_conf.tpe); sdata->vif.cfg.eml_cap = 0; sdata->vif.cfg.eml_med_sync_delay = 0; sdata->vif.cfg.mld_capa_op = 0; memset(&sdata->u.mgd.ttlm_info, 0, sizeof(sdata->u.mgd.ttlm_info)); wiphy_delayed_work_cancel(sdata->local->hw.wiphy, &ifmgd->ttlm_work); memset(&sdata->vif.neg_ttlm, 0, sizeof(sdata->vif.neg_ttlm)); wiphy_delayed_work_cancel(sdata->local->hw.wiphy, &ifmgd->neg_ttlm_timeout_work); sdata->u.mgd.removed_links = 0; wiphy_delayed_work_cancel(sdata->local->hw.wiphy, &sdata->u.mgd.ml_reconf_work); wiphy_work_cancel(sdata->local->hw.wiphy, &ifmgd->teardown_ttlm_work); ieee80211_vif_set_links(sdata, 0, 0); ifmgd->mcast_seq_last = IEEE80211_SN_MODULO; /* if disconnection happens in the middle of the ML reconfiguration * flow, cfg80211 must called to release the BSS references obtained * when the flow started. */ ieee80211_ml_reconf_reset(sdata); } static void ieee80211_reset_ap_probe(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_local *local = sdata->local; lockdep_assert_wiphy(local->hw.wiphy); if (!(ifmgd->flags & IEEE80211_STA_CONNECTION_POLL)) return; __ieee80211_stop_poll(sdata); ieee80211_recalc_ps(local); if (ieee80211_hw_check(&sdata->local->hw, CONNECTION_MONITOR)) return; /* * We've received a probe response, but are not sure whether * we have or will be receiving any beacons or data, so let's * schedule the timers again, just in case. */ ieee80211_sta_reset_beacon_monitor(sdata); mod_timer(&ifmgd->conn_mon_timer, round_jiffies_up(jiffies + IEEE80211_CONNECTION_IDLE_TIME)); } static void ieee80211_sta_tx_wmm_ac_notify(struct ieee80211_sub_if_data *sdata, struct ieee80211_hdr *hdr, u16 tx_time) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; u16 tid; int ac; struct ieee80211_sta_tx_tspec *tx_tspec; unsigned long now = jiffies; if (!ieee80211_is_data_qos(hdr->frame_control)) return; tid = ieee80211_get_tid(hdr); ac = ieee80211_ac_from_tid(tid); tx_tspec = &ifmgd->tx_tspec[ac]; if (likely(!tx_tspec->admitted_time)) return; if (time_after(now, tx_tspec->time_slice_start + HZ)) { tx_tspec->consumed_tx_time = 0; tx_tspec->time_slice_start = now; if (tx_tspec->downgraded) { tx_tspec->action = TX_TSPEC_ACTION_STOP_DOWNGRADE; wiphy_delayed_work_queue(sdata->local->hw.wiphy, &ifmgd->tx_tspec_wk, 0); } } if (tx_tspec->downgraded) return; tx_tspec->consumed_tx_time += tx_time; if (tx_tspec->consumed_tx_time >= tx_tspec->admitted_time) { tx_tspec->downgraded = true; tx_tspec->action = TX_TSPEC_ACTION_DOWNGRADE; wiphy_delayed_work_queue(sdata->local->hw.wiphy, &ifmgd->tx_tspec_wk, 0); } } void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata, struct ieee80211_hdr *hdr, bool ack, u16 tx_time) { ieee80211_sta_tx_wmm_ac_notify(sdata, hdr, tx_time); if (!ieee80211_is_any_nullfunc(hdr->frame_control) || !sdata->u.mgd.probe_send_count) return; if (ack) sdata->u.mgd.probe_send_count = 0; else sdata->u.mgd.nullfunc_failed = true; wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work); } static void ieee80211_mlme_send_probe_req(struct ieee80211_sub_if_data *sdata, const u8 *src, const u8 *dst, const u8 *ssid, size_t ssid_len, struct ieee80211_channel *channel) { struct sk_buff *skb; skb = ieee80211_build_probe_req(sdata, src, dst, (u32)-1, channel, ssid, ssid_len, NULL, 0, IEEE80211_PROBE_FLAG_DIRECTED); if (skb) ieee80211_tx_skb(sdata, skb); } static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; u8 *dst = sdata->vif.cfg.ap_addr; u8 unicast_limit = max(1, max_probe_tries - 3); struct sta_info *sta; lockdep_assert_wiphy(sdata->local->hw.wiphy); if (WARN_ON(ieee80211_vif_is_mld(&sdata->vif))) return; /* * Try sending broadcast probe requests for the last three * probe requests after the first ones failed since some * buggy APs only support broadcast probe requests. */ if (ifmgd->probe_send_count >= unicast_limit) dst = NULL; /* * When the hardware reports an accurate Tx ACK status, it's * better to send a nullfunc frame instead of a probe request, * as it will kick us off the AP quickly if we aren't associated * anymore. The timeout will be reset if the frame is ACKed by * the AP. */ ifmgd->probe_send_count++; if (dst) { sta = sta_info_get(sdata, dst); if (!WARN_ON(!sta)) ieee80211_check_fast_rx(sta); } if (ieee80211_hw_check(&sdata->local->hw, REPORTS_TX_ACK_STATUS)) { ifmgd->nullfunc_failed = false; ieee80211_send_nullfunc(sdata->local, sdata, false); } else { ieee80211_mlme_send_probe_req(sdata, sdata->vif.addr, dst, sdata->vif.cfg.ssid, sdata->vif.cfg.ssid_len, sdata->deflink.conf->bss->channel); } ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms); run_again(sdata, ifmgd->probe_timeout); } static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata, bool beacon) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; bool already = false; lockdep_assert_wiphy(sdata->local->hw.wiphy); if (WARN_ON_ONCE(ieee80211_vif_is_mld(&sdata->vif))) return; if (!ieee80211_sdata_running(sdata)) return; if (!ifmgd->associated) return; if (sdata->local->tmp_channel || sdata->local->scanning) return; if (sdata->local->suspending) { /* reschedule after resume */ ieee80211_reset_ap_probe(sdata); return; } if (beacon) { mlme_dbg_ratelimited(sdata, "detected beacon loss from AP (missed %d beacons) - probing\n", beacon_loss_count); ieee80211_cqm_beacon_loss_notify(&sdata->vif, GFP_KERNEL); } /* * The driver/our work has already reported this event or the * connection monitoring has kicked in and we have already sent * a probe request. Or maybe the AP died and the driver keeps * reporting until we disassociate... * * In either case we have to ignore the current call to this * function (except for setting the correct probe reason bit) * because otherwise we would reset the timer every time and * never check whether we received a probe response! */ if (ifmgd->flags & IEEE80211_STA_CONNECTION_POLL) already = true; ifmgd->flags |= IEEE80211_STA_CONNECTION_POLL; if (already) return; ieee80211_recalc_ps(sdata->local); ifmgd->probe_send_count = 0; ieee80211_mgd_probe_ap_send(sdata); } struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct cfg80211_bss *cbss; struct sk_buff *skb; const struct element *ssid; int ssid_len; lockdep_assert_wiphy(sdata->local->hw.wiphy); if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION || ieee80211_vif_is_mld(&sdata->vif))) return NULL; if (ifmgd->associated) cbss = sdata->deflink.conf->bss; else if (ifmgd->auth_data) cbss = ifmgd->auth_data->bss; else if (ifmgd->assoc_data && ifmgd->assoc_data->link[0].bss) cbss = ifmgd->assoc_data->link[0].bss; else return NULL; rcu_read_lock(); ssid = ieee80211_bss_get_elem(cbss, WLAN_EID_SSID); if (WARN_ONCE(!ssid || ssid->datalen > IEEE80211_MAX_SSID_LEN, "invalid SSID element (len=%d)", ssid ? ssid->datalen : -1)) ssid_len = 0; else ssid_len = ssid->datalen; skb = ieee80211_build_probe_req(sdata, sdata->vif.addr, cbss->bssid, (u32) -1, cbss->channel, ssid->data, ssid_len, NULL, 0, IEEE80211_PROBE_FLAG_DIRECTED); rcu_read_unlock(); return skb; } EXPORT_SYMBOL(ieee80211_ap_probereq_get); static void ieee80211_report_disconnect(struct ieee80211_sub_if_data *sdata, const u8 *buf, size_t len, bool tx, u16 reason, bool reconnect) { struct ieee80211_event event = { .type = MLME_EVENT, .u.mlme.data = tx ? DEAUTH_TX_EVENT : DEAUTH_RX_EVENT, .u.mlme.reason = reason, }; if (tx) cfg80211_tx_mlme_mgmt(sdata->dev, buf, len, reconnect); else cfg80211_rx_mlme_mgmt(sdata->dev, buf, len); drv_event_callback(sdata->local, sdata, &event); } static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; bool tx = false; lockdep_assert_wiphy(local->hw.wiphy); if (!ifmgd->associated) return; /* only transmit if we have a link that makes that worthwhile */ for (unsigned int link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) { struct ieee80211_link_data *link; if (!ieee80211_vif_link_active(&sdata->vif, link_id)) continue; link = sdata_dereference(sdata->link[link_id], sdata); if (WARN_ON_ONCE(!link)) continue; if (link->u.mgd.csa.blocked_tx) continue; tx = true; break; } if (!ifmgd->driver_disconnect) { unsigned int link_id; /* * AP is probably out of range (or not reachable for another * reason) so remove the bss structs for that AP. In the case * of multi-link, it's not clear that all of them really are * out of range, but if they weren't the driver likely would * have switched to just have a single link active? */ for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) { struct ieee80211_link_data *link; link = sdata_dereference(sdata->link[link_id], sdata); if (!link) continue; cfg80211_unlink_bss(local->hw.wiphy, link->conf->bss); link->conf->bss = NULL; } } ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, ifmgd->driver_disconnect ? WLAN_REASON_DEAUTH_LEAVING : WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, tx, frame_buf); /* the other links will be destroyed */ sdata->vif.bss_conf.csa_active = false; sdata->deflink.u.mgd.csa.waiting_bcn = false; sdata->deflink.u.mgd.csa.blocked_tx = false; ieee80211_vif_unblock_queues_csa(sdata); ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), tx, WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, ifmgd->reconnect); ifmgd->reconnect = false; } static void ieee80211_beacon_connection_loss_work(struct wiphy *wiphy, struct wiphy_work *work) { struct ieee80211_sub_if_data *sdata = container_of(work, struct ieee80211_sub_if_data, u.mgd.beacon_connection_loss_work); struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; if (ifmgd->connection_loss) { sdata_info(sdata, "Connection to AP %pM lost\n", sdata->vif.cfg.ap_addr); __ieee80211_disconnect(sdata); ifmgd->connection_loss = false; } else if (ifmgd->driver_disconnect) { sdata_info(sdata, "Driver requested disconnection from AP %pM\n", sdata->vif.cfg.ap_addr); __ieee80211_disconnect(sdata); ifmgd->driver_disconnect = false; } else { if (ifmgd->associated) sdata->deflink.u.mgd.beacon_loss_count++; ieee80211_mgd_probe_ap(sdata, true); } } static void ieee80211_csa_connection_drop_work(struct wiphy *wiphy, struct wiphy_work *work) { struct ieee80211_sub_if_data *sdata = container_of(work, struct ieee80211_sub_if_data, u.mgd.csa_connection_drop_work); __ieee80211_disconnect(sdata); } void ieee80211_beacon_loss(struct ieee80211_vif *vif) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct ieee80211_hw *hw = &sdata->local->hw; trace_api_beacon_loss(sdata); sdata->u.mgd.connection_loss = false; wiphy_work_queue(hw->wiphy, &sdata->u.mgd.beacon_connection_loss_work); } EXPORT_SYMBOL(ieee80211_beacon_loss); void ieee80211_connection_loss(struct ieee80211_vif *vif) { struct ieee80211_sub_if_data *sdata; struct ieee80211_hw *hw; KUNIT_STATIC_STUB_REDIRECT(ieee80211_connection_loss, vif); sdata = vif_to_sdata(vif); hw = &sdata->local->hw; trace_api_connection_loss(sdata); sdata->u.mgd.connection_loss = true; wiphy_work_queue(hw->wiphy, &sdata->u.mgd.beacon_connection_loss_work); } EXPORT_SYMBOL(ieee80211_connection_loss); void ieee80211_disconnect(struct ieee80211_vif *vif, bool reconnect) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct ieee80211_hw *hw = &sdata->local->hw; trace_api_disconnect(sdata, reconnect); if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION)) return; sdata->u.mgd.driver_disconnect = true; sdata->u.mgd.reconnect = reconnect; wiphy_work_queue(hw->wiphy, &sdata->u.mgd.beacon_connection_loss_work); } EXPORT_SYMBOL(ieee80211_disconnect); static void ieee80211_destroy_auth_data(struct ieee80211_sub_if_data *sdata, bool assoc) { struct ieee80211_mgd_auth_data *auth_data = sdata->u.mgd.auth_data; lockdep_assert_wiphy(sdata->local->hw.wiphy); sdata->u.mgd.auth_data = NULL; if (!assoc) { /* * we are not authenticated yet, the only timer that could be * running is the timeout for the authentication response which * which is not relevant anymore. */ del_timer_sync(&sdata->u.mgd.timer); sta_info_destroy_addr(sdata, auth_data->ap_addr); /* other links are destroyed */ eth_zero_addr(sdata->deflink.u.mgd.bssid); ieee80211_link_info_change_notify(sdata, &sdata->deflink, BSS_CHANGED_BSSID); sdata->u.mgd.flags = 0; ieee80211_link_release_channel(&sdata->deflink); ieee80211_vif_set_links(sdata, 0, 0); } cfg80211_put_bss(sdata->local->hw.wiphy, auth_data->bss); kfree(auth_data); } enum assoc_status { ASSOC_SUCCESS, ASSOC_REJECTED, ASSOC_TIMEOUT, ASSOC_ABANDON, }; static void ieee80211_destroy_assoc_data(struct ieee80211_sub_if_data *sdata, enum assoc_status status) { struct ieee80211_mgd_assoc_data *assoc_data = sdata->u.mgd.assoc_data; lockdep_assert_wiphy(sdata->local->hw.wiphy); sdata->u.mgd.assoc_data = NULL; if (status != ASSOC_SUCCESS) { /* * we are not associated yet, the only timer that could be * running is the timeout for the association response which * which is not relevant anymore. */ del_timer_sync(&sdata->u.mgd.timer); sta_info_destroy_addr(sdata, assoc_data->ap_addr); eth_zero_addr(sdata->deflink.u.mgd.bssid); ieee80211_link_info_change_notify(sdata, &sdata->deflink, BSS_CHANGED_BSSID); sdata->u.mgd.flags = 0; sdata->vif.bss_conf.mu_mimo_owner = false; if (status != ASSOC_REJECTED) { struct cfg80211_assoc_failure data = { .timeout = status == ASSOC_TIMEOUT, }; int i; BUILD_BUG_ON(ARRAY_SIZE(data.bss) != ARRAY_SIZE(assoc_data->link)); for (i = 0; i < ARRAY_SIZE(data.bss); i++) data.bss[i] = assoc_data->link[i].bss; if (ieee80211_vif_is_mld(&sdata->vif)) data.ap_mld_addr = assoc_data->ap_addr; cfg80211_assoc_failure(sdata->dev, &data); } ieee80211_link_release_channel(&sdata->deflink); ieee80211_vif_set_links(sdata, 0, 0); } kfree(assoc_data); } static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len) { struct ieee80211_local *local = sdata->local; struct ieee80211_mgd_auth_data *auth_data = sdata->u.mgd.auth_data; const struct element *challenge; u8 *pos; u32 tx_flags = 0; struct ieee80211_prep_tx_info info = { .subtype = IEEE80211_STYPE_AUTH, .link_id = auth_data->link_id, }; pos = mgmt->u.auth.variable; challenge = cfg80211_find_elem(WLAN_EID_CHALLENGE, pos, len - (pos - (u8 *)mgmt)); if (!challenge) return; auth_data->expected_transaction = 4; drv_mgd_prepare_tx(sdata->local, sdata, &info); if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) tx_flags = IEEE80211_TX_CTL_REQ_TX_STATUS | IEEE80211_TX_INTFL_MLME_CONN_TX; ieee80211_send_auth(sdata, 3, auth_data->algorithm, 0, (void *)challenge, challenge->datalen + sizeof(*challenge), auth_data->ap_addr, auth_data->ap_addr, auth_data->key, auth_data->key_len, auth_data->key_idx, tx_flags); } static bool ieee80211_mark_sta_auth(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; const u8 *ap_addr = ifmgd->auth_data->ap_addr; struct sta_info *sta; lockdep_assert_wiphy(sdata->local->hw.wiphy); sdata_info(sdata, "authenticated\n"); ifmgd->auth_data->done = true; ifmgd->auth_data->timeout = jiffies + IEEE80211_AUTH_WAIT_ASSOC; ifmgd->auth_data->timeout_started = true; run_again(sdata, ifmgd->auth_data->timeout); /* move station state to auth */ sta = sta_info_get(sdata, ap_addr); if (!sta) { WARN_ONCE(1, "%s: STA %pM not found", sdata->name, ap_addr); return false; } if (sta_info_move_state(sta, IEEE80211_STA_AUTH)) { sdata_info(sdata, "failed moving %pM to auth\n", ap_addr); return false; } return true; } static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; u16 auth_alg, auth_transaction, status_code; struct ieee80211_event event = { .type = MLME_EVENT, .u.mlme.data = AUTH_EVENT, }; struct ieee80211_prep_tx_info info = { .subtype = IEEE80211_STYPE_AUTH, }; lockdep_assert_wiphy(sdata->local->hw.wiphy); if (len < 24 + 6) return; if (!ifmgd->auth_data || ifmgd->auth_data->done) return; if (!ether_addr_equal(ifmgd->auth_data->ap_addr, mgmt->bssid)) return; auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg); auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction); status_code = le16_to_cpu(mgmt->u.auth.status_code); if (auth_alg != ifmgd->auth_data->algorithm || (auth_alg != WLAN_AUTH_SAE && auth_transaction != ifmgd->auth_data->expected_transaction) || (auth_alg == WLAN_AUTH_SAE && (auth_transaction < ifmgd->auth_data->expected_transaction || auth_transaction > 2))) { sdata_info(sdata, "%pM unexpected authentication state: alg %d (expected %d) transact %d (expected %d)\n", mgmt->sa, auth_alg, ifmgd->auth_data->algorithm, auth_transaction, ifmgd->auth_data->expected_transaction); goto notify_driver; } if (status_code != WLAN_STATUS_SUCCESS) { cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len); if (auth_alg == WLAN_AUTH_SAE && (status_code == WLAN_STATUS_ANTI_CLOG_REQUIRED || (auth_transaction == 1 && (status_code == WLAN_STATUS_SAE_HASH_TO_ELEMENT || status_code == WLAN_STATUS_SAE_PK)))) { /* waiting for userspace now */ ifmgd->auth_data->waiting = true; ifmgd->auth_data->timeout = jiffies + IEEE80211_AUTH_WAIT_SAE_RETRY; ifmgd->auth_data->timeout_started = true; run_again(sdata, ifmgd->auth_data->timeout); goto notify_driver; } sdata_info(sdata, "%pM denied authentication (status %d)\n", mgmt->sa, status_code); ieee80211_destroy_auth_data(sdata, false); event.u.mlme.status = MLME_DENIED; event.u.mlme.reason = status_code; drv_event_callback(sdata->local, sdata, &event); goto notify_driver; } switch (ifmgd->auth_data->algorithm) { case WLAN_AUTH_OPEN: case WLAN_AUTH_LEAP: case WLAN_AUTH_FT: case WLAN_AUTH_SAE: case WLAN_AUTH_FILS_SK: case WLAN_AUTH_FILS_SK_PFS: case WLAN_AUTH_FILS_PK: break; case WLAN_AUTH_SHARED_KEY: if (ifmgd->auth_data->expected_transaction != 4) { ieee80211_auth_challenge(sdata, mgmt, len); /* need another frame */ return; } break; default: WARN_ONCE(1, "invalid auth alg %d", ifmgd->auth_data->algorithm); goto notify_driver; } event.u.mlme.status = MLME_SUCCESS; info.success = 1; drv_event_callback(sdata->local, sdata, &event); if (ifmgd->auth_data->algorithm != WLAN_AUTH_SAE || (auth_transaction == 2 && ifmgd->auth_data->expected_transaction == 2)) { if (!ieee80211_mark_sta_auth(sdata)) return; /* ignore frame -- wait for timeout */ } else if (ifmgd->auth_data->algorithm == WLAN_AUTH_SAE && auth_transaction == 2) { sdata_info(sdata, "SAE peer confirmed\n"); ifmgd->auth_data->peer_confirmed = true; } cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len); notify_driver: drv_mgd_complete_tx(sdata->local, sdata, &info); } #define case_WLAN(type) \ case WLAN_REASON_##type: return #type const char *ieee80211_get_reason_code_string(u16 reason_code) { switch (reason_code) { case_WLAN(UNSPECIFIED); case_WLAN(PREV_AUTH_NOT_VALID); case_WLAN(DEAUTH_LEAVING); case_WLAN(DISASSOC_DUE_TO_INACTIVITY); case_WLAN(DISASSOC_AP_BUSY); case_WLAN(CLASS2_FRAME_FROM_NONAUTH_STA); case_WLAN(CLASS3_FRAME_FROM_NONASSOC_STA); case_WLAN(DISASSOC_STA_HAS_LEFT); case_WLAN(STA_REQ_ASSOC_WITHOUT_AUTH); case_WLAN(DISASSOC_BAD_POWER); case_WLAN(DISASSOC_BAD_SUPP_CHAN); case_WLAN(INVALID_IE); case_WLAN(MIC_FAILURE); case_WLAN(4WAY_HANDSHAKE_TIMEOUT); case_WLAN(GROUP_KEY_HANDSHAKE_TIMEOUT); case_WLAN(IE_DIFFERENT); case_WLAN(INVALID_GROUP_CIPHER); case_WLAN(INVALID_PAIRWISE_CIPHER); case_WLAN(INVALID_AKMP); case_WLAN(UNSUPP_RSN_VERSION); case_WLAN(INVALID_RSN_IE_CAP); case_WLAN(IEEE8021X_FAILED); case_WLAN(CIPHER_SUITE_REJECTED); case_WLAN(DISASSOC_UNSPECIFIED_QOS); case_WLAN(DISASSOC_QAP_NO_BANDWIDTH); case_WLAN(DISASSOC_LOW_ACK); case_WLAN(DISASSOC_QAP_EXCEED_TXOP); case_WLAN(QSTA_LEAVE_QBSS); case_WLAN(QSTA_NOT_USE); case_WLAN(QSTA_REQUIRE_SETUP); case_WLAN(QSTA_TIMEOUT); case_WLAN(QSTA_CIPHER_NOT_SUPP); case_WLAN(MESH_PEER_CANCELED); case_WLAN(MESH_MAX_PEERS); case_WLAN(MESH_CONFIG); case_WLAN(MESH_CLOSE); case_WLAN(MESH_MAX_RETRIES); case_WLAN(MESH_CONFIRM_TIMEOUT); case_WLAN(MESH_INVALID_GTK); case_WLAN(MESH_INCONSISTENT_PARAM); case_WLAN(MESH_INVALID_SECURITY); case_WLAN(MESH_PATH_ERROR); case_WLAN(MESH_PATH_NOFORWARD); case_WLAN(MESH_PATH_DEST_UNREACHABLE); case_WLAN(MAC_EXISTS_IN_MBSS); case_WLAN(MESH_CHAN_REGULATORY); case_WLAN(MESH_CHAN); default: return "<unknown>"; } } static void ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; u16 reason_code = le16_to_cpu(mgmt->u.deauth.reason_code); lockdep_assert_wiphy(sdata->local->hw.wiphy); if (len < 24 + 2) return; if (!ether_addr_equal(mgmt->bssid, mgmt->sa)) { ieee80211_tdls_handle_disconnect(sdata, mgmt->sa, reason_code); return; } if (ifmgd->associated && ether_addr_equal(mgmt->bssid, sdata->vif.cfg.ap_addr)) { sdata_info(sdata, "deauthenticated from %pM (Reason: %u=%s)\n", sdata->vif.cfg.ap_addr, reason_code, ieee80211_get_reason_code_string(reason_code)); ieee80211_set_disassoc(sdata, 0, 0, false, NULL); ieee80211_report_disconnect(sdata, (u8 *)mgmt, len, false, reason_code, false); return; } if (ifmgd->assoc_data && ether_addr_equal(mgmt->bssid, ifmgd->assoc_data->ap_addr)) { sdata_info(sdata, "deauthenticated from %pM while associating (Reason: %u=%s)\n", ifmgd->assoc_data->ap_addr, reason_code, ieee80211_get_reason_code_string(reason_code)); ieee80211_destroy_assoc_data(sdata, ASSOC_ABANDON); cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len); return; } } static void ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; u16 reason_code; lockdep_assert_wiphy(sdata->local->hw.wiphy); if (len < 24 + 2) return; if (!ifmgd->associated || !ether_addr_equal(mgmt->bssid, sdata->vif.cfg.ap_addr)) return; reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); if (!ether_addr_equal(mgmt->bssid, mgmt->sa)) { ieee80211_tdls_handle_disconnect(sdata, mgmt->sa, reason_code); return; } sdata_info(sdata, "disassociated from %pM (Reason: %u=%s)\n", sdata->vif.cfg.ap_addr, reason_code, ieee80211_get_reason_code_string(reason_code)); ieee80211_set_disassoc(sdata, 0, 0, false, NULL); ieee80211_report_disconnect(sdata, (u8 *)mgmt, len, false, reason_code, false); } static bool ieee80211_twt_req_supported(struct ieee80211_sub_if_data *sdata, struct ieee80211_supported_band *sband, const struct link_sta_info *link_sta, const struct ieee802_11_elems *elems) { const struct ieee80211_sta_he_cap *own_he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif); if (elems->ext_capab_len < 10) return false; if (!(elems->ext_capab[9] & WLAN_EXT_CAPA10_TWT_RESPONDER_SUPPORT)) return false; return link_sta->pub->he_cap.he_cap_elem.mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_TWT_RES && own_he_cap && (own_he_cap->he_cap_elem.mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_TWT_REQ); } static u64 ieee80211_recalc_twt_req(struct ieee80211_sub_if_data *sdata, struct ieee80211_supported_band *sband, struct ieee80211_link_data *link, struct link_sta_info *link_sta, struct ieee802_11_elems *elems) { bool twt = ieee80211_twt_req_supported(sdata, sband, link_sta, elems); if (link->conf->twt_requester != twt) { link->conf->twt_requester = twt; return BSS_CHANGED_TWT; } return 0; } static bool ieee80211_twt_bcast_support(struct ieee80211_sub_if_data *sdata, struct ieee80211_bss_conf *bss_conf, struct ieee80211_supported_band *sband, struct link_sta_info *link_sta) { const struct ieee80211_sta_he_cap *own_he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif); return bss_conf->he_support && (link_sta->pub->he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BCAST_TWT) && own_he_cap && (own_he_cap->he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BCAST_TWT); } static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link, struct link_sta_info *link_sta, struct cfg80211_bss *cbss, struct ieee80211_mgmt *mgmt, const u8 *elem_start, unsigned int elem_len, u64 *changed) { struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_mgd_assoc_data *assoc_data = sdata->u.mgd.assoc_data ?: sdata->u.mgd.reconf.add_links_data; struct ieee80211_bss_conf *bss_conf = link->conf; struct ieee80211_local *local = sdata->local; unsigned int link_id = link->link_id; struct ieee80211_elems_parse_params parse_params = { .mode = link->u.mgd.conn.mode, .start = elem_start, .len = elem_len, .link_id = link_id == assoc_data->assoc_link_id ? -1 : link_id, .from_ap = true, }; bool is_5ghz = cbss->channel->band == NL80211_BAND_5GHZ; bool is_6ghz = cbss->channel->band == NL80211_BAND_6GHZ; bool is_s1g = cbss->channel->band == NL80211_BAND_S1GHZ; const struct cfg80211_bss_ies *bss_ies = NULL; struct ieee80211_supported_band *sband; struct ieee802_11_elems *elems; const __le16 prof_bss_param_ch_present = cpu_to_le16(IEEE80211_MLE_STA_CONTROL_BSS_PARAM_CHANGE_CNT_PRESENT); u16 capab_info; bool ret; elems = ieee802_11_parse_elems_full(&parse_params); if (!elems) return false; if (link_id == assoc_data->assoc_link_id) { capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info); /* * we should not get to this flow unless the association was * successful, so set the status directly to success */ assoc_data->link[link_id].status = WLAN_STATUS_SUCCESS; if (elems->ml_basic) { int bss_param_ch_cnt = ieee80211_mle_get_bss_param_ch_cnt((const void *)elems->ml_basic); if (bss_param_ch_cnt < 0) { ret = false; goto out; } bss_conf->bss_param_ch_cnt = bss_param_ch_cnt; bss_conf->bss_param_ch_cnt_link_id = link_id; } } else if (elems->parse_error & IEEE80211_PARSE_ERR_DUP_NEST_ML_BASIC || !elems->prof || !(elems->prof->control & prof_bss_param_ch_present)) { ret = false; goto out; } else { const u8 *ptr = elems->prof->variable + elems->prof->sta_info_len - 1; int bss_param_ch_cnt; /* * During parsing, we validated that these fields exist, * otherwise elems->prof would have been set to NULL. */ capab_info = get_unaligned_le16(ptr); assoc_data->link[link_id].status = get_unaligned_le16(ptr + 2); bss_param_ch_cnt = ieee80211_mle_basic_sta_prof_bss_param_ch_cnt(elems->prof); bss_conf->bss_param_ch_cnt = bss_param_ch_cnt; bss_conf->bss_param_ch_cnt_link_id = link_id; if (assoc_data->link[link_id].status != WLAN_STATUS_SUCCESS) { link_info(link, "association response status code=%u\n", assoc_data->link[link_id].status); ret = true; goto out; } } if (!is_s1g && !elems->supp_rates) { sdata_info(sdata, "no SuppRates element in AssocResp\n"); ret = false; goto out; } link->u.mgd.tdls_chan_switch_prohibited = elems->ext_capab && elems->ext_capab_len >= 5 && (elems->ext_capab[4] & WLAN_EXT_CAPA5_TDLS_CH_SW_PROHIBITED); /* * Some APs are erroneously not including some information in their * (re)association response frames. Try to recover by using the data * from the beacon or probe response. This seems to afflict mobile * 2G/3G/4G wifi routers, reported models include the "Onda PN51T", * "Vodafone PocketWiFi 2", "ZTE MF60" and a similar T-Mobile device. */ if (!is_6ghz && ((assoc_data->wmm && !elems->wmm_param) || (link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HT && (!elems->ht_cap_elem || !elems->ht_operation)) || (is_5ghz && link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_VHT && (!elems->vht_cap_elem || !elems->vht_operation)))) { const struct cfg80211_bss_ies *ies; struct ieee802_11_elems *bss_elems; rcu_read_lock(); ies = rcu_dereference(cbss->ies); if (ies) bss_ies = kmemdup(ies, sizeof(*ies) + ies->len, GFP_ATOMIC); rcu_read_unlock(); if (!bss_ies) { ret = false; goto out; } parse_params.start = bss_ies->data; parse_params.len = bss_ies->len; parse_params.bss = cbss; bss_elems = ieee802_11_parse_elems_full(&parse_params); if (!bss_elems) { ret = false; goto out; } if (assoc_data->wmm && !elems->wmm_param && bss_elems->wmm_param) { elems->wmm_param = bss_elems->wmm_param; sdata_info(sdata, "AP bug: WMM param missing from AssocResp\n"); } /* * Also check if we requested HT/VHT, otherwise the AP doesn't * have to include the IEs in the (re)association response. */ if (!elems->ht_cap_elem && bss_elems->ht_cap_elem && link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HT) { elems->ht_cap_elem = bss_elems->ht_cap_elem; sdata_info(sdata, "AP bug: HT capability missing from AssocResp\n"); } if (!elems->ht_operation && bss_elems->ht_operation && link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HT) { elems->ht_operation = bss_elems->ht_operation; sdata_info(sdata, "AP bug: HT operation missing from AssocResp\n"); } if (is_5ghz) { if (!elems->vht_cap_elem && bss_elems->vht_cap_elem && link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_VHT) { elems->vht_cap_elem = bss_elems->vht_cap_elem; sdata_info(sdata, "AP bug: VHT capa missing from AssocResp\n"); } if (!elems->vht_operation && bss_elems->vht_operation && link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_VHT) { elems->vht_operation = bss_elems->vht_operation; sdata_info(sdata, "AP bug: VHT operation missing from AssocResp\n"); } } kfree(bss_elems); } /* * We previously checked these in the beacon/probe response, so * they should be present here. This is just a safety net. * Note that the ieee80211_config_bw() below would also check * for this (and more), but this has better error reporting. */ if (!is_6ghz && link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HT && (!elems->wmm_param || !elems->ht_cap_elem || !elems->ht_operation)) { sdata_info(sdata, "HT AP is missing WMM params or HT capability/operation\n"); ret = false; goto out; } if (is_5ghz && link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_VHT && (!elems->vht_cap_elem || !elems->vht_operation)) { sdata_info(sdata, "VHT AP is missing VHT capability/operation\n"); ret = false; goto out; } /* check/update if AP changed anything in assoc response vs. scan */ if (ieee80211_config_bw(link, elems, link_id == assoc_data->assoc_link_id, changed, "assoc response")) { ret = false; goto out; } if (WARN_ON(!link->conf->chanreq.oper.chan)) { ret = false; goto out; } sband = local->hw.wiphy->bands[link->conf->chanreq.oper.chan->band]; /* Set up internal HT/VHT capabilities */ if (elems->ht_cap_elem && link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HT) ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, elems->ht_cap_elem, link_sta); if (elems->vht_cap_elem && link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_VHT) { const struct ieee80211_vht_cap *bss_vht_cap = NULL; const struct cfg80211_bss_ies *ies; /* * Cisco AP module 9115 with FW 17.3 has a bug and sends a * too large maximum MPDU length in the association response * (indicating 12k) that it cannot actually process ... * Work around that. */ rcu_read_lock(); ies = rcu_dereference(cbss->ies); if (ies) { const struct element *elem; elem = cfg80211_find_elem(WLAN_EID_VHT_CAPABILITY, ies->data, ies->len); if (elem && elem->datalen >= sizeof(*bss_vht_cap)) bss_vht_cap = (const void *)elem->data; } ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband, elems->vht_cap_elem, bss_vht_cap, link_sta); rcu_read_unlock(); } if (elems->he_operation && link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HE && elems->he_cap) { ieee80211_he_cap_ie_to_sta_he_cap(sdata, sband, elems->he_cap, elems->he_cap_len, elems->he_6ghz_capa, link_sta); bss_conf->he_support = link_sta->pub->he_cap.has_he; if (elems->rsnx && elems->rsnx_len && (elems->rsnx[0] & WLAN_RSNX_CAPA_PROTECTED_TWT) && wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_PROTECTED_TWT)) bss_conf->twt_protected = true; else bss_conf->twt_protected = false; *changed |= ieee80211_recalc_twt_req(sdata, sband, link, link_sta, elems); if (elems->eht_operation && elems->eht_cap && link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_EHT) { ieee80211_eht_cap_ie_to_sta_eht_cap(sdata, sband, elems->he_cap, elems->he_cap_len, elems->eht_cap, elems->eht_cap_len, link_sta); bss_conf->eht_support = link_sta->pub->eht_cap.has_eht; } else { bss_conf->eht_support = false; } } else { bss_conf->he_support = false; bss_conf->twt_requester = false; bss_conf->twt_protected = false; bss_conf->eht_support = false; } bss_conf->twt_broadcast = ieee80211_twt_bcast_support(sdata, bss_conf, sband, link_sta); if (bss_conf->he_support) { bss_conf->he_bss_color.color = le32_get_bits(elems->he_operation->he_oper_params, IEEE80211_HE_OPERATION_BSS_COLOR_MASK); bss_conf->he_bss_color.partial = le32_get_bits(elems->he_operation->he_oper_params, IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR); bss_conf->he_bss_color.enabled = !le32_get_bits(elems->he_operation->he_oper_params, IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED); if (bss_conf->he_bss_color.enabled) *changed |= BSS_CHANGED_HE_BSS_COLOR; bss_conf->htc_trig_based_pkt_ext = le32_get_bits(elems->he_operation->he_oper_params, IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK); bss_conf->frame_time_rts_th = le32_get_bits(elems->he_operation->he_oper_params, IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK); bss_conf->uora_exists = !!elems->uora_element; if (elems->uora_element) bss_conf->uora_ocw_range = elems->uora_element[0]; ieee80211_he_op_ie_to_bss_conf(&sdata->vif, elems->he_operation); ieee80211_he_spr_ie_to_bss_conf(&sdata->vif, elems->he_spr); /* TODO: OPEN: what happens if BSS color disable is set? */ } if (cbss->transmitted_bss) { bss_conf->nontransmitted = true; ether_addr_copy(bss_conf->transmitter_bssid, cbss->transmitted_bss->bssid); bss_conf->bssid_indicator = cbss->max_bssid_indicator; bss_conf->bssid_index = cbss->bssid_index; } /* * Some APs, e.g. Netgear WNDR3700, report invalid HT operation data * in their association response, so ignore that data for our own * configuration. If it changed since the last beacon, we'll get the * next beacon and update then. */ /* * If an operating mode notification IE is present, override the * NSS calculation (that would be done in rate_control_rate_init()) * and use the # of streams from that element. */ if (elems->opmode_notif && !(*elems->opmode_notif & IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF)) { u8 nss; nss = *elems->opmode_notif & IEEE80211_OPMODE_NOTIF_RX_NSS_MASK; nss >>= IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT; nss += 1; link_sta->pub->rx_nss = nss; } /* * Always handle WMM once after association regardless * of the first value the AP uses. Setting -1 here has * that effect because the AP values is an unsigned * 4-bit value. */ link->u.mgd.wmm_last_param_set = -1; link->u.mgd.mu_edca_last_param_set = -1; if (link->u.mgd.disable_wmm_tracking) { ieee80211_set_wmm_default(link, false, false); } else if (!ieee80211_sta_wmm_params(local, link, elems->wmm_param, elems->wmm_param_len, elems->mu_edca_param_set)) { /* still enable QoS since we might have HT/VHT */ ieee80211_set_wmm_default(link, false, true); /* disable WMM tracking in this case to disable * tracking WMM parameter changes in the beacon if * the parameters weren't actually valid. Doing so * avoids changing parameters very strangely when * the AP is going back and forth between valid and * invalid parameters. */ link->u.mgd.disable_wmm_tracking = true; } if (elems->max_idle_period_ie) { bss_conf->max_idle_period = le16_to_cpu(elems->max_idle_period_ie->max_idle_period); bss_conf->protected_keep_alive = !!(elems->max_idle_period_ie->idle_options & WLAN_IDLE_OPTIONS_PROTECTED_KEEP_ALIVE); *changed |= BSS_CHANGED_KEEP_ALIVE; } else { bss_conf->max_idle_period = 0; bss_conf->protected_keep_alive = false; } /* set assoc capability (AID was already set earlier), * ieee80211_set_associated() will tell the driver */ bss_conf->assoc_capability = capab_info; ret = true; out: kfree(elems); kfree(bss_ies); return ret; } static int ieee80211_mgd_setup_link_sta(struct ieee80211_link_data *link, struct sta_info *sta, struct link_sta_info *link_sta, struct cfg80211_bss *cbss) { struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_local *local = sdata->local; struct ieee80211_bss *bss = (void *)cbss->priv; u32 rates = 0, basic_rates = 0; bool have_higher_than_11mbit = false; int min_rate = INT_MAX, min_rate_index = -1; struct ieee80211_supported_band *sband; memcpy(link_sta->addr, cbss->bssid, ETH_ALEN); memcpy(link_sta->pub->addr, cbss->bssid, ETH_ALEN); /* TODO: S1G Basic Rate Set is expressed elsewhere */ if (cbss->channel->band == NL80211_BAND_S1GHZ) { ieee80211_s1g_sta_rate_init(sta); return 0; } sband = local->hw.wiphy->bands[cbss->channel->band]; ieee80211_get_rates(sband, bss->supp_rates, bss->supp_rates_len, NULL, 0, &rates, &basic_rates, NULL, &have_higher_than_11mbit, &min_rate, &min_rate_index); /* * This used to be a workaround for basic rates missing * in the association response frame. Now that we no * longer use the basic rates from there, it probably * doesn't happen any more, but keep the workaround so * in case some *other* APs are buggy in different ways * we can connect -- with a warning. * Allow this workaround only in case the AP provided at least * one rate. */ if (min_rate_index < 0) { link_info(link, "No legacy rates in association response\n"); return -EINVAL; } else if (!basic_rates) { link_info(link, "No basic rates, using min rate instead\n"); basic_rates = BIT(min_rate_index); } if (rates) link_sta->pub->supp_rates[cbss->channel->band] = rates; else link_info(link, "No rates found, keeping mandatory only\n"); link->conf->basic_rates = basic_rates; /* cf. IEEE 802.11 9.2.12 */ link->operating_11g_mode = sband->band == NL80211_BAND_2GHZ && have_higher_than_11mbit; return 0; } static u8 ieee80211_max_rx_chains(struct ieee80211_link_data *link, struct cfg80211_bss *cbss) { struct ieee80211_he_mcs_nss_supp *he_mcs_nss_supp; const struct element *ht_cap_elem, *vht_cap_elem; const struct cfg80211_bss_ies *ies; const struct ieee80211_ht_cap *ht_cap; const struct ieee80211_vht_cap *vht_cap; const struct ieee80211_he_cap_elem *he_cap; const struct element *he_cap_elem; u16 mcs_80_map, mcs_160_map; int i, mcs_nss_size; bool support_160; u8 chains = 1; if (link->u.mgd.conn.mode < IEEE80211_CONN_MODE_HT) return chains; ht_cap_elem = ieee80211_bss_get_elem(cbss, WLAN_EID_HT_CAPABILITY); if (ht_cap_elem && ht_cap_elem->datalen >= sizeof(*ht_cap)) { ht_cap = (void *)ht_cap_elem->data; chains = ieee80211_mcs_to_chains(&ht_cap->mcs); /* * TODO: use "Tx Maximum Number Spatial Streams Supported" and * "Tx Unequal Modulation Supported" fields. */ } if (link->u.mgd.conn.mode < IEEE80211_CONN_MODE_VHT) return chains; vht_cap_elem = ieee80211_bss_get_elem(cbss, WLAN_EID_VHT_CAPABILITY); if (vht_cap_elem && vht_cap_elem->datalen >= sizeof(*vht_cap)) { u8 nss; u16 tx_mcs_map; vht_cap = (void *)vht_cap_elem->data; tx_mcs_map = le16_to_cpu(vht_cap->supp_mcs.tx_mcs_map); for (nss = 8; nss > 0; nss--) { if (((tx_mcs_map >> (2 * (nss - 1))) & 3) != IEEE80211_VHT_MCS_NOT_SUPPORTED) break; } /* TODO: use "Tx Highest Supported Long GI Data Rate" field? */ chains = max(chains, nss); } if (link->u.mgd.conn.mode < IEEE80211_CONN_MODE_HE) return chains; ies = rcu_dereference(cbss->ies); he_cap_elem = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_CAPABILITY, ies->data, ies->len); if (!he_cap_elem || he_cap_elem->datalen < sizeof(*he_cap)) return chains; /* skip one byte ext_tag_id */ he_cap = (void *)(he_cap_elem->data + 1); mcs_nss_size = ieee80211_he_mcs_nss_size(he_cap); /* invalid HE IE */ if (he_cap_elem->datalen < 1 + mcs_nss_size + sizeof(*he_cap)) return chains; /* mcs_nss is right after he_cap info */ he_mcs_nss_supp = (void *)(he_cap + 1); mcs_80_map = le16_to_cpu(he_mcs_nss_supp->tx_mcs_80); for (i = 7; i >= 0; i--) { u8 mcs_80 = mcs_80_map >> (2 * i) & 3; if (mcs_80 != IEEE80211_VHT_MCS_NOT_SUPPORTED) { chains = max_t(u8, chains, i + 1); break; } } support_160 = he_cap->phy_cap_info[0] & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G; if (!support_160) return chains; mcs_160_map = le16_to_cpu(he_mcs_nss_supp->tx_mcs_160); for (i = 7; i >= 0; i--) { u8 mcs_160 = mcs_160_map >> (2 * i) & 3; if (mcs_160 != IEEE80211_VHT_MCS_NOT_SUPPORTED) { chains = max_t(u8, chains, i + 1); break; } } return chains; } static void ieee80211_determine_our_sta_mode(struct ieee80211_sub_if_data *sdata, struct ieee80211_supported_band *sband, struct cfg80211_assoc_request *req, bool wmm_used, int link_id, struct ieee80211_conn_settings *conn) { struct ieee80211_sta_ht_cap sta_ht_cap = sband->ht_cap; bool is_5ghz = sband->band == NL80211_BAND_5GHZ; bool is_6ghz = sband->band == NL80211_BAND_6GHZ; const struct ieee80211_sta_he_cap *he_cap; const struct ieee80211_sta_eht_cap *eht_cap; struct ieee80211_sta_vht_cap vht_cap; if (sband->band == NL80211_BAND_S1GHZ) { conn->mode = IEEE80211_CONN_MODE_S1G; conn->bw_limit = IEEE80211_CONN_BW_LIMIT_20; mlme_dbg(sdata, "operating as S1G STA\n"); return; } conn->mode = IEEE80211_CONN_MODE_LEGACY; conn->bw_limit = IEEE80211_CONN_BW_LIMIT_20; ieee80211_apply_htcap_overrides(sdata, &sta_ht_cap); if (req && req->flags & ASSOC_REQ_DISABLE_HT) { mlme_link_id_dbg(sdata, link_id, "HT disabled by flag, limiting to legacy\n"); goto out; } if (!wmm_used) { mlme_link_id_dbg(sdata, link_id, "WMM/QoS not supported, limiting to legacy\n"); goto out; } if (req) { unsigned int i; for (i = 0; i < req->crypto.n_ciphers_pairwise; i++) { if (req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP40 || req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_TKIP || req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104) { netdev_info(sdata->dev, "WEP/TKIP use, limiting to legacy\n"); goto out; } } } if (!sta_ht_cap.ht_supported && !is_6ghz) { mlme_link_id_dbg(sdata, link_id, "HT not supported (and not on 6 GHz), limiting to legacy\n"); goto out; } /* HT is fine */ conn->mode = IEEE80211_CONN_MODE_HT; conn->bw_limit = sta_ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ? IEEE80211_CONN_BW_LIMIT_40 : IEEE80211_CONN_BW_LIMIT_20; memcpy(&vht_cap, &sband->vht_cap, sizeof(vht_cap)); ieee80211_apply_vhtcap_overrides(sdata, &vht_cap); if (req && req->flags & ASSOC_REQ_DISABLE_VHT) { mlme_link_id_dbg(sdata, link_id, "VHT disabled by flag, limiting to HT\n"); goto out; } if (vht_cap.vht_supported && is_5ghz) { bool have_80mhz = false; unsigned int i; if (conn->bw_limit == IEEE80211_CONN_BW_LIMIT_20) { mlme_link_id_dbg(sdata, link_id, "no 40 MHz support on 5 GHz, limiting to HT\n"); goto out; } /* Allow VHT if at least one channel on the sband supports 80 MHz */ for (i = 0; i < sband->n_channels; i++) { if (sband->channels[i].flags & (IEEE80211_CHAN_DISABLED | IEEE80211_CHAN_NO_80MHZ)) continue; have_80mhz = true; break; } if (!have_80mhz) { mlme_link_id_dbg(sdata, link_id, "no 80 MHz channel support on 5 GHz, limiting to HT\n"); goto out; } } else if (is_5ghz) { /* !vht_supported but on 5 GHz */ mlme_link_id_dbg(sdata, link_id, "no VHT support on 5 GHz, limiting to HT\n"); goto out; } /* VHT - if we have - is fine, including 80 MHz, check 160 below again */ if (sband->band != NL80211_BAND_2GHZ) { conn->mode = IEEE80211_CONN_MODE_VHT; conn->bw_limit = IEEE80211_CONN_BW_LIMIT_160; } if (is_5ghz && !(vht_cap.cap & (IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ | IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ))) { conn->bw_limit = IEEE80211_CONN_BW_LIMIT_80; mlme_link_id_dbg(sdata, link_id, "no VHT 160 MHz capability on 5 GHz, limiting to 80 MHz"); } if (req && req->flags & ASSOC_REQ_DISABLE_HE) { mlme_link_id_dbg(sdata, link_id, "HE disabled by flag, limiting to HT/VHT\n"); goto out; } he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif); if (!he_cap) { WARN_ON(is_6ghz); mlme_link_id_dbg(sdata, link_id, "no HE support, limiting to HT/VHT\n"); goto out; } /* so we have HE */ conn->mode = IEEE80211_CONN_MODE_HE; /* check bandwidth */ switch (sband->band) { default: case NL80211_BAND_2GHZ: if (he_cap->he_cap_elem.phy_cap_info[0] & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G) break; conn->bw_limit = IEEE80211_CONN_BW_LIMIT_20; mlme_link_id_dbg(sdata, link_id, "no 40 MHz HE cap in 2.4 GHz, limiting to 20 MHz\n"); break; case NL80211_BAND_5GHZ: if (!(he_cap->he_cap_elem.phy_cap_info[0] & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G)) { conn->bw_limit = IEEE80211_CONN_BW_LIMIT_20; mlme_link_id_dbg(sdata, link_id, "no 40/80 MHz HE cap in 5 GHz, limiting to 20 MHz\n"); break; } if (!(he_cap->he_cap_elem.phy_cap_info[0] & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)) { conn->bw_limit = min_t(enum ieee80211_conn_bw_limit, conn->bw_limit, IEEE80211_CONN_BW_LIMIT_80); mlme_link_id_dbg(sdata, link_id, "no 160 MHz HE cap in 5 GHz, limiting to 80 MHz\n"); } break; case NL80211_BAND_6GHZ: if (he_cap->he_cap_elem.phy_cap_info[0] & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G) break; conn->bw_limit = min_t(enum ieee80211_conn_bw_limit, conn->bw_limit, IEEE80211_CONN_BW_LIMIT_80); mlme_link_id_dbg(sdata, link_id, "no 160 MHz HE cap in 6 GHz, limiting to 80 MHz\n"); break; } if (req && req->flags & ASSOC_REQ_DISABLE_EHT) { mlme_link_id_dbg(sdata, link_id, "EHT disabled by flag, limiting to HE\n"); goto out; } eht_cap = ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif); if (!eht_cap) { mlme_link_id_dbg(sdata, link_id, "no EHT support, limiting to HE\n"); goto out; } /* we have EHT */ conn->mode = IEEE80211_CONN_MODE_EHT; /* check bandwidth */ if (is_6ghz && eht_cap->eht_cap_elem.phy_cap_info[0] & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ) conn->bw_limit = IEEE80211_CONN_BW_LIMIT_320; else if (is_6ghz) mlme_link_id_dbg(sdata, link_id, "no EHT 320 MHz cap in 6 GHz, limiting to 160 MHz\n"); out: mlme_link_id_dbg(sdata, link_id, "determined local STA to be %s, BW limited to %d MHz\n", ieee80211_conn_mode_str(conn->mode), 20 * (1 << conn->bw_limit)); } static void ieee80211_determine_our_sta_mode_auth(struct ieee80211_sub_if_data *sdata, struct ieee80211_supported_band *sband, struct cfg80211_auth_request *req, bool wmm_used, struct ieee80211_conn_settings *conn) { ieee80211_determine_our_sta_mode(sdata, sband, NULL, wmm_used, req->link_id > 0 ? req->link_id : 0, conn); } static void ieee80211_determine_our_sta_mode_assoc(struct ieee80211_sub_if_data *sdata, struct ieee80211_supported_band *sband, struct cfg80211_assoc_request *req, bool wmm_used, int link_id, struct ieee80211_conn_settings *conn) { struct ieee80211_conn_settings tmp; WARN_ON(!req); ieee80211_determine_our_sta_mode(sdata, sband, req, wmm_used, link_id, &tmp); conn->mode = min_t(enum ieee80211_conn_mode, conn->mode, tmp.mode); conn->bw_limit = min_t(enum ieee80211_conn_bw_limit, conn->bw_limit, tmp.bw_limit); } static enum ieee80211_ap_reg_power ieee80211_ap_power_type(u8 control) { switch (u8_get_bits(control, IEEE80211_HE_6GHZ_OPER_CTRL_REG_INFO)) { case IEEE80211_6GHZ_CTRL_REG_LPI_AP: case IEEE80211_6GHZ_CTRL_REG_INDOOR_LPI_AP: return IEEE80211_REG_LPI_AP; case IEEE80211_6GHZ_CTRL_REG_SP_AP: case IEEE80211_6GHZ_CTRL_REG_INDOOR_SP_AP: return IEEE80211_REG_SP_AP; case IEEE80211_6GHZ_CTRL_REG_VLP_AP: return IEEE80211_REG_VLP_AP; default: return IEEE80211_REG_UNSET_AP; } } static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, struct ieee80211_link_data *link, int link_id, struct cfg80211_bss *cbss, bool mlo, struct ieee80211_conn_settings *conn, unsigned long *userspace_selectors) { struct ieee80211_local *local = sdata->local; bool is_6ghz = cbss->channel->band == NL80211_BAND_6GHZ; struct ieee80211_chan_req chanreq = {}; struct cfg80211_chan_def ap_chandef; struct ieee802_11_elems *elems; int ret; lockdep_assert_wiphy(local->hw.wiphy); rcu_read_lock(); elems = ieee80211_determine_chan_mode(sdata, conn, cbss, link_id, &chanreq, &ap_chandef, userspace_selectors); if (IS_ERR(elems)) { rcu_read_unlock(); return PTR_ERR(elems); } if (mlo && !elems->ml_basic) { sdata_info(sdata, "Rejecting MLO as it is not supported by AP\n"); rcu_read_unlock(); kfree(elems); return -EINVAL; } if (link && is_6ghz && conn->mode >= IEEE80211_CONN_MODE_HE) { const struct ieee80211_he_6ghz_oper *he_6ghz_oper; if (elems->pwr_constr_elem) link->conf->pwr_reduction = *elems->pwr_constr_elem; he_6ghz_oper = ieee80211_he_6ghz_oper(elems->he_operation); if (he_6ghz_oper) link->conf->power_type = ieee80211_ap_power_type(he_6ghz_oper->control); else link_info(link, "HE 6 GHz operation missing (on %d MHz), expect issues\n", cbss->channel->center_freq); link->conf->tpe = elems->tpe; ieee80211_rearrange_tpe(&link->conf->tpe, &ap_chandef, &chanreq.oper); } rcu_read_unlock(); /* the element data was RCU protected so no longer valid anyway */ kfree(elems); elems = NULL; if (!link) return 0; rcu_read_lock(); link->needed_rx_chains = min(ieee80211_max_rx_chains(link, cbss), local->rx_chains); rcu_read_unlock(); /* * If this fails (possibly due to channel context sharing * on incompatible channels, e.g. 80+80 and 160 sharing the * same control channel) try to use a smaller bandwidth. */ ret = ieee80211_link_use_channel(link, &chanreq, IEEE80211_CHANCTX_SHARED); /* don't downgrade for 5 and 10 MHz channels, though. */ if (chanreq.oper.width == NL80211_CHAN_WIDTH_5 || chanreq.oper.width == NL80211_CHAN_WIDTH_10) return ret; while (ret && chanreq.oper.width != NL80211_CHAN_WIDTH_20_NOHT) { ieee80211_chanreq_downgrade(&chanreq, conn); ret = ieee80211_link_use_channel(link, &chanreq, IEEE80211_CHANCTX_SHARED); } return ret; } static bool ieee80211_get_dtim(const struct cfg80211_bss_ies *ies, u8 *dtim_count, u8 *dtim_period) { const u8 *tim_ie = cfg80211_find_ie(WLAN_EID_TIM, ies->data, ies->len); const u8 *idx_ie = cfg80211_find_ie(WLAN_EID_MULTI_BSSID_IDX, ies->data, ies->len); const struct ieee80211_tim_ie *tim = NULL; const struct ieee80211_bssid_index *idx; bool valid = tim_ie && tim_ie[1] >= 2; if (valid) tim = (void *)(tim_ie + 2); if (dtim_count) *dtim_count = valid ? tim->dtim_count : 0; if (dtim_period) *dtim_period = valid ? tim->dtim_period : 0; /* Check if value is overridden by non-transmitted profile */ if (!idx_ie || idx_ie[1] < 3) return valid; idx = (void *)(idx_ie + 2); if (dtim_count) *dtim_count = idx->dtim_count; if (dtim_period) *dtim_period = idx->dtim_period; return true; } static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, struct ieee802_11_elems *elems, const u8 *elem_start, unsigned int elem_len) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_mgd_assoc_data *assoc_data = ifmgd->assoc_data; struct ieee80211_local *local = sdata->local; unsigned int link_id; struct sta_info *sta; u64 changed[IEEE80211_MLD_MAX_NUM_LINKS] = {}; u16 valid_links = 0, dormant_links = 0; int err; lockdep_assert_wiphy(sdata->local->hw.wiphy); /* * station info was already allocated and inserted before * the association and should be available to us */ sta = sta_info_get(sdata, assoc_data->ap_addr); if (WARN_ON(!sta)) goto out_err; sta->sta.spp_amsdu = assoc_data->spp_amsdu; if (ieee80211_vif_is_mld(&sdata->vif)) { for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { if (!assoc_data->link[link_id].bss) continue; valid_links |= BIT(link_id); if (assoc_data->link[link_id].disabled) dormant_links |= BIT(link_id); if (link_id != assoc_data->assoc_link_id) { err = ieee80211_sta_allocate_link(sta, link_id); if (err) goto out_err; } } ieee80211_vif_set_links(sdata, valid_links, dormant_links); } for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { struct cfg80211_bss *cbss = assoc_data->link[link_id].bss; struct ieee80211_link_data *link; struct link_sta_info *link_sta; if (!cbss) continue; link = sdata_dereference(sdata->link[link_id], sdata); if (WARN_ON(!link)) goto out_err; if (ieee80211_vif_is_mld(&sdata->vif)) link_info(link, "local address %pM, AP link address %pM%s\n", link->conf->addr, assoc_data->link[link_id].bss->bssid, link_id == assoc_data->assoc_link_id ? " (assoc)" : ""); link_sta = rcu_dereference_protected(sta->link[link_id], lockdep_is_held(&local->hw.wiphy->mtx)); if (WARN_ON(!link_sta)) goto out_err; if (!link->u.mgd.have_beacon) { const struct cfg80211_bss_ies *ies; rcu_read_lock(); ies = rcu_dereference(cbss->beacon_ies); if (ies) link->u.mgd.have_beacon = true; else ies = rcu_dereference(cbss->ies); ieee80211_get_dtim(ies, &link->conf->sync_dtim_count, &link->u.mgd.dtim_period); link->conf->beacon_int = cbss->beacon_interval; rcu_read_unlock(); } link->conf->dtim_period = link->u.mgd.dtim_period ?: 1; if (link_id != assoc_data->assoc_link_id) { link->u.mgd.conn = assoc_data->link[link_id].conn; err = ieee80211_prep_channel(sdata, link, link_id, cbss, true, &link->u.mgd.conn, assoc_data->userspace_selectors); if (err) { link_info(link, "prep_channel failed\n"); goto out_err; } } err = ieee80211_mgd_setup_link_sta(link, sta, link_sta, assoc_data->link[link_id].bss); if (err) goto out_err; if (!ieee80211_assoc_config_link(link, link_sta, assoc_data->link[link_id].bss, mgmt, elem_start, elem_len, &changed[link_id])) goto out_err; if (assoc_data->link[link_id].status != WLAN_STATUS_SUCCESS) { valid_links &= ~BIT(link_id); ieee80211_sta_remove_link(sta, link_id); continue; } if (link_id != assoc_data->assoc_link_id) { err = ieee80211_sta_activate_link(sta, link_id); if (err) goto out_err; } } /* links might have changed due to rejected ones, set them again */ ieee80211_vif_set_links(sdata, valid_links, dormant_links); rate_control_rate_init_all_links(sta); if (ifmgd->flags & IEEE80211_STA_MFP_ENABLED) { set_sta_flag(sta, WLAN_STA_MFP); sta->sta.mfp = true; } else { sta->sta.mfp = false; } ieee80211_sta_set_max_amsdu_subframes(sta, elems->ext_capab, elems->ext_capab_len); sta->sta.wme = (elems->wmm_param || elems->s1g_capab) && local->hw.queues >= IEEE80211_NUM_ACS; err = sta_info_move_state(sta, IEEE80211_STA_ASSOC); if (!err && !(ifmgd->flags & IEEE80211_STA_CONTROL_PORT)) err = sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED); if (err) { sdata_info(sdata, "failed to move station %pM to desired state\n", sta->sta.addr); WARN_ON(__sta_info_destroy(sta)); goto out_err; } if (sdata->wdev.use_4addr) drv_sta_set_4addr(local, sdata, &sta->sta, true); ieee80211_set_associated(sdata, assoc_data, changed); /* * If we're using 4-addr mode, let the AP know that we're * doing so, so that it can create the STA VLAN on its side */ if (ifmgd->use_4addr) ieee80211_send_4addr_nullfunc(local, sdata); /* * Start timer to probe the connection to the AP now. * Also start the timer that will detect beacon loss. */ ieee80211_sta_reset_beacon_monitor(sdata); ieee80211_sta_reset_conn_monitor(sdata); return true; out_err: eth_zero_addr(sdata->vif.cfg.ap_addr); return false; } static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_mgd_assoc_data *assoc_data = ifmgd->assoc_data; u16 capab_info, status_code, aid; struct ieee80211_elems_parse_params parse_params = { .bss = NULL, .link_id = -1, .from_ap = true, }; struct ieee802_11_elems *elems; int ac; const u8 *elem_start; unsigned int elem_len; bool reassoc; struct ieee80211_event event = { .type = MLME_EVENT, .u.mlme.data = ASSOC_EVENT, }; struct ieee80211_prep_tx_info info = {}; struct cfg80211_rx_assoc_resp_data resp = { .uapsd_queues = -1, }; u8 ap_mld_addr[ETH_ALEN] __aligned(2); unsigned int link_id; lockdep_assert_wiphy(sdata->local->hw.wiphy); if (!assoc_data) return; info.link_id = assoc_data->assoc_link_id; parse_params.mode = assoc_data->link[assoc_data->assoc_link_id].conn.mode; if (!ether_addr_equal(assoc_data->ap_addr, mgmt->bssid) || !ether_addr_equal(assoc_data->ap_addr, mgmt->sa)) return; /* * AssocResp and ReassocResp have identical structure, so process both * of them in this function. */ if (len < 24 + 6) return; reassoc = ieee80211_is_reassoc_resp(mgmt->frame_control); capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info); status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code); if (assoc_data->s1g) elem_start = mgmt->u.s1g_assoc_resp.variable; else elem_start = mgmt->u.assoc_resp.variable; /* * Note: this may not be perfect, AP might misbehave - if * anyone needs to rely on perfect complete notification * with the exact right subtype, then we need to track what * we actually transmitted. */ info.subtype = reassoc ? IEEE80211_STYPE_REASSOC_REQ : IEEE80211_STYPE_ASSOC_REQ; if (assoc_data->fils_kek_len && fils_decrypt_assoc_resp(sdata, (u8 *)mgmt, &len, assoc_data) < 0) return; elem_len = len - (elem_start - (u8 *)mgmt); parse_params.start = elem_start; parse_params.len = elem_len; elems = ieee802_11_parse_elems_full(&parse_params); if (!elems) goto notify_driver; if (elems->aid_resp) aid = le16_to_cpu(elems->aid_resp->aid); else if (assoc_data->s1g) aid = 0; /* TODO */ else aid = le16_to_cpu(mgmt->u.assoc_resp.aid); /* * The 5 MSB of the AID field are reserved * (802.11-2016 9.4.1.8 AID field) */ aid &= 0x7ff; sdata_info(sdata, "RX %sssocResp from %pM (capab=0x%x status=%d aid=%d)\n", reassoc ? "Rea" : "A", assoc_data->ap_addr, capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14)))); ifmgd->broken_ap = false; if (status_code == WLAN_STATUS_ASSOC_REJECTED_TEMPORARILY && elems->timeout_int && elems->timeout_int->type == WLAN_TIMEOUT_ASSOC_COMEBACK) { u32 tu, ms; cfg80211_assoc_comeback(sdata->dev, assoc_data->ap_addr, le32_to_cpu(elems->timeout_int->value)); tu = le32_to_cpu(elems->timeout_int->value); ms = tu * 1024 / 1000; sdata_info(sdata, "%pM rejected association temporarily; comeback duration %u TU (%u ms)\n", assoc_data->ap_addr, tu, ms); assoc_data->timeout = jiffies + msecs_to_jiffies(ms); assoc_data->timeout_started = true; assoc_data->comeback = true; if (ms > IEEE80211_ASSOC_TIMEOUT) run_again(sdata, assoc_data->timeout); goto notify_driver; } if (status_code != WLAN_STATUS_SUCCESS) { sdata_info(sdata, "%pM denied association (code=%d)\n", assoc_data->ap_addr, status_code); event.u.mlme.status = MLME_DENIED; event.u.mlme.reason = status_code; drv_event_callback(sdata->local, sdata, &event); } else { if (aid == 0 || aid > IEEE80211_MAX_AID) { sdata_info(sdata, "invalid AID value %d (out of range), turn off PS\n", aid); aid = 0; ifmgd->broken_ap = true; } if (ieee80211_vif_is_mld(&sdata->vif)) { struct ieee80211_mle_basic_common_info *common; if (!elems->ml_basic) { sdata_info(sdata, "MLO association with %pM but no (basic) multi-link element in response!\n", assoc_data->ap_addr); goto abandon_assoc; } common = (void *)elems->ml_basic->variable; if (memcmp(assoc_data->ap_addr, common->mld_mac_addr, ETH_ALEN)) { sdata_info(sdata, "AP MLD MAC address mismatch: got %pM expected %pM\n", common->mld_mac_addr, assoc_data->ap_addr); goto abandon_assoc; } sdata->vif.cfg.eml_cap = ieee80211_mle_get_eml_cap((const void *)elems->ml_basic); sdata->vif.cfg.eml_med_sync_delay = ieee80211_mle_get_eml_med_sync_delay((const void *)elems->ml_basic); sdata->vif.cfg.mld_capa_op = ieee80211_mle_get_mld_capa_op((const void *)elems->ml_basic); } sdata->vif.cfg.aid = aid; if (!ieee80211_assoc_success(sdata, mgmt, elems, elem_start, elem_len)) { /* oops -- internal error -- send timeout for now */ ieee80211_destroy_assoc_data(sdata, ASSOC_TIMEOUT); goto notify_driver; } event.u.mlme.status = MLME_SUCCESS; drv_event_callback(sdata->local, sdata, &event); sdata_info(sdata, "associated\n"); info.success = 1; } for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { struct ieee80211_link_data *link; if (!assoc_data->link[link_id].bss) continue; resp.links[link_id].bss = assoc_data->link[link_id].bss; ether_addr_copy(resp.links[link_id].addr, assoc_data->link[link_id].addr); resp.links[link_id].status = assoc_data->link[link_id].status; link = sdata_dereference(sdata->link[link_id], sdata); if (!link) continue; /* get uapsd queues configuration - same for all links */ resp.uapsd_queues = 0; for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) if (link->tx_conf[ac].uapsd) resp.uapsd_queues |= ieee80211_ac_to_qos_mask[ac]; } if (ieee80211_vif_is_mld(&sdata->vif)) { ether_addr_copy(ap_mld_addr, sdata->vif.cfg.ap_addr); resp.ap_mld_addr = ap_mld_addr; } ieee80211_destroy_assoc_data(sdata, status_code == WLAN_STATUS_SUCCESS ? ASSOC_SUCCESS : ASSOC_REJECTED); resp.buf = (u8 *)mgmt; resp.len = len; resp.req_ies = ifmgd->assoc_req_ies; resp.req_ies_len = ifmgd->assoc_req_ies_len; cfg80211_rx_assoc_resp(sdata->dev, &resp); notify_driver: drv_mgd_complete_tx(sdata->local, sdata, &info); kfree(elems); return; abandon_assoc: ieee80211_destroy_assoc_data(sdata, ASSOC_ABANDON); goto notify_driver; } static void ieee80211_rx_bss_info(struct ieee80211_link_data *link, struct ieee80211_mgmt *mgmt, size_t len, struct ieee80211_rx_status *rx_status) { struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_local *local = sdata->local; struct ieee80211_bss *bss; struct ieee80211_channel *channel; lockdep_assert_wiphy(sdata->local->hw.wiphy); channel = ieee80211_get_channel_khz(local->hw.wiphy, ieee80211_rx_status_to_khz(rx_status)); if (!channel) return; bss = ieee80211_bss_info_update(local, rx_status, mgmt, len, channel); if (bss) { link->conf->beacon_rate = bss->beacon_rate; ieee80211_rx_bss_put(local, bss); } } static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_link_data *link, struct sk_buff *skb) { struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_mgmt *mgmt = (void *)skb->data; struct ieee80211_if_managed *ifmgd; struct ieee80211_rx_status *rx_status = (void *) skb->cb; struct ieee80211_channel *channel; size_t baselen, len = skb->len; ifmgd = &sdata->u.mgd; lockdep_assert_wiphy(sdata->local->hw.wiphy); /* * According to Draft P802.11ax D6.0 clause 26.17.2.3.2: * "If a 6 GHz AP receives a Probe Request frame and responds with * a Probe Response frame [..], the Address 1 field of the Probe * Response frame shall be set to the broadcast address [..]" * So, on 6GHz band we should also accept broadcast responses. */ channel = ieee80211_get_channel(sdata->local->hw.wiphy, rx_status->freq); if (!channel) return; if (!ether_addr_equal(mgmt->da, sdata->vif.addr) && (channel->band != NL80211_BAND_6GHZ || !is_broadcast_ether_addr(mgmt->da))) return; /* ignore ProbeResp to foreign address */ baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt; if (baselen > len) return; ieee80211_rx_bss_info(link, mgmt, len, rx_status); if (ifmgd->associated && ether_addr_equal(mgmt->bssid, link->u.mgd.bssid)) ieee80211_reset_ap_probe(sdata); } /* * This is the canonical list of information elements we care about, * the filter code also gives us all changes to the Microsoft OUI * (00:50:F2) vendor IE which is used for WMM which we need to track, * as well as the DTPC IE (part of the Cisco OUI) used for signaling * changes to requested client power. * * We implement beacon filtering in software since that means we can * avoid processing the frame here and in cfg80211, and userspace * will not be able to tell whether the hardware supports it or not. * * XXX: This list needs to be dynamic -- userspace needs to be able to * add items it requires. It also needs to be able to tell us to * look out for other vendor IEs. */ static const u64 care_about_ies = (1ULL << WLAN_EID_COUNTRY) | (1ULL << WLAN_EID_ERP_INFO) | (1ULL << WLAN_EID_CHANNEL_SWITCH) | (1ULL << WLAN_EID_PWR_CONSTRAINT) | (1ULL << WLAN_EID_HT_CAPABILITY) | (1ULL << WLAN_EID_HT_OPERATION) | (1ULL << WLAN_EID_EXT_CHANSWITCH_ANN); static void ieee80211_handle_beacon_sig(struct ieee80211_link_data *link, struct ieee80211_if_managed *ifmgd, struct ieee80211_bss_conf *bss_conf, struct ieee80211_local *local, struct ieee80211_rx_status *rx_status) { struct ieee80211_sub_if_data *sdata = link->sdata; /* Track average RSSI from the Beacon frames of the current AP */ if (!link->u.mgd.tracking_signal_avg) { link->u.mgd.tracking_signal_avg = true; ewma_beacon_signal_init(&link->u.mgd.ave_beacon_signal); link->u.mgd.last_cqm_event_signal = 0; link->u.mgd.count_beacon_signal = 1; link->u.mgd.last_ave_beacon_signal = 0; } else { link->u.mgd.count_beacon_signal++; } ewma_beacon_signal_add(&link->u.mgd.ave_beacon_signal, -rx_status->signal); if (ifmgd->rssi_min_thold != ifmgd->rssi_max_thold && link->u.mgd.count_beacon_signal >= IEEE80211_SIGNAL_AVE_MIN_COUNT) { int sig = -ewma_beacon_signal_read(&link->u.mgd.ave_beacon_signal); int last_sig = link->u.mgd.last_ave_beacon_signal; struct ieee80211_event event = { .type = RSSI_EVENT, }; /* * if signal crosses either of the boundaries, invoke callback * with appropriate parameters */ if (sig > ifmgd->rssi_max_thold && (last_sig <= ifmgd->rssi_min_thold || last_sig == 0)) { link->u.mgd.last_ave_beacon_signal = sig; event.u.rssi.data = RSSI_EVENT_HIGH; drv_event_callback(local, sdata, &event); } else if (sig < ifmgd->rssi_min_thold && (last_sig >= ifmgd->rssi_max_thold || last_sig == 0)) { link->u.mgd.last_ave_beacon_signal = sig; event.u.rssi.data = RSSI_EVENT_LOW; drv_event_callback(local, sdata, &event); } } if (bss_conf->cqm_rssi_thold && link->u.mgd.count_beacon_signal >= IEEE80211_SIGNAL_AVE_MIN_COUNT && !(sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI)) { int sig = -ewma_beacon_signal_read(&link->u.mgd.ave_beacon_signal); int last_event = link->u.mgd.last_cqm_event_signal; int thold = bss_conf->cqm_rssi_thold; int hyst = bss_conf->cqm_rssi_hyst; if (sig < thold && (last_event == 0 || sig < last_event - hyst)) { link->u.mgd.last_cqm_event_signal = sig; ieee80211_cqm_rssi_notify( &sdata->vif, NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW, sig, GFP_KERNEL); } else if (sig > thold && (last_event == 0 || sig > last_event + hyst)) { link->u.mgd.last_cqm_event_signal = sig; ieee80211_cqm_rssi_notify( &sdata->vif, NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH, sig, GFP_KERNEL); } } if (bss_conf->cqm_rssi_low && link->u.mgd.count_beacon_signal >= IEEE80211_SIGNAL_AVE_MIN_COUNT) { int sig = -ewma_beacon_signal_read(&link->u.mgd.ave_beacon_signal); int last_event = link->u.mgd.last_cqm_event_signal; int low = bss_conf->cqm_rssi_low; int high = bss_conf->cqm_rssi_high; if (sig < low && (last_event == 0 || last_event >= low)) { link->u.mgd.last_cqm_event_signal = sig; ieee80211_cqm_rssi_notify( &sdata->vif, NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW, sig, GFP_KERNEL); } else if (sig > high && (last_event == 0 || last_event <= high)) { link->u.mgd.last_cqm_event_signal = sig; ieee80211_cqm_rssi_notify( &sdata->vif, NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH, sig, GFP_KERNEL); } } } static bool ieee80211_rx_our_beacon(const u8 *tx_bssid, struct cfg80211_bss *bss) { if (ether_addr_equal(tx_bssid, bss->bssid)) return true; if (!bss->transmitted_bss) return false; return ether_addr_equal(tx_bssid, bss->transmitted_bss->bssid); } static void ieee80211_ml_reconf_work(struct wiphy *wiphy, struct wiphy_work *work) { struct ieee80211_sub_if_data *sdata = container_of(work, struct ieee80211_sub_if_data, u.mgd.ml_reconf_work.work); u16 new_valid_links, new_active_links, new_dormant_links; int ret; if (!sdata->u.mgd.removed_links) return; sdata_info(sdata, "MLO Reconfiguration: work: valid=0x%x, removed=0x%x\n", sdata->vif.valid_links, sdata->u.mgd.removed_links); new_valid_links = sdata->vif.valid_links & ~sdata->u.mgd.removed_links; if (new_valid_links == sdata->vif.valid_links) return; if (!new_valid_links || !(new_valid_links & ~sdata->vif.dormant_links)) { sdata_info(sdata, "No valid links after reconfiguration\n"); ret = -EINVAL; goto out; } new_active_links = sdata->vif.active_links & ~sdata->u.mgd.removed_links; if (new_active_links != sdata->vif.active_links) { if (!new_active_links) new_active_links = BIT(ffs(new_valid_links & ~sdata->vif.dormant_links) - 1); ret = ieee80211_set_active_links(&sdata->vif, new_active_links); if (ret) { sdata_info(sdata, "Failed setting active links\n"); goto out; } } new_dormant_links = sdata->vif.dormant_links & ~sdata->u.mgd.removed_links; ret = ieee80211_vif_set_links(sdata, new_valid_links, new_dormant_links); if (ret) sdata_info(sdata, "Failed setting valid links\n"); ieee80211_vif_cfg_change_notify(sdata, BSS_CHANGED_MLD_VALID_LINKS); out: if (!ret) cfg80211_links_removed(sdata->dev, sdata->u.mgd.removed_links); else __ieee80211_disconnect(sdata); sdata->u.mgd.removed_links = 0; } static void ieee80211_ml_reconfiguration(struct ieee80211_sub_if_data *sdata, struct ieee802_11_elems *elems) { const struct element *sub; unsigned long removed_links = 0; u16 link_removal_timeout[IEEE80211_MLD_MAX_NUM_LINKS] = {}; u8 link_id; u32 delay; if (!ieee80211_vif_is_mld(&sdata->vif) || !elems->ml_reconf) return; /* Directly parse the sub elements as the common information doesn't * hold any useful information. */ for_each_mle_subelement(sub, (const u8 *)elems->ml_reconf, elems->ml_reconf_len) { struct ieee80211_mle_per_sta_profile *prof = (void *)sub->data; u8 *pos = prof->variable; u16 control; if (sub->id != IEEE80211_MLE_SUBELEM_PER_STA_PROFILE) continue; if (!ieee80211_mle_reconf_sta_prof_size_ok(sub->data, sub->datalen)) return; control = le16_to_cpu(prof->control); link_id = control & IEEE80211_MLE_STA_RECONF_CONTROL_LINK_ID; removed_links |= BIT(link_id); /* the MAC address should not be included, but handle it */ if (control & IEEE80211_MLE_STA_RECONF_CONTROL_STA_MAC_ADDR_PRESENT) pos += 6; /* According to Draft P802.11be_D3.0, the control should * include the AP Removal Timer present. If the AP Removal Timer * is not present assume immediate removal. */ if (control & IEEE80211_MLE_STA_RECONF_CONTROL_AP_REM_TIMER_PRESENT) link_removal_timeout[link_id] = get_unaligned_le16(pos); } removed_links &= sdata->vif.valid_links; if (!removed_links) { /* In case the removal was cancelled, abort it */ if (sdata->u.mgd.removed_links) { sdata->u.mgd.removed_links = 0; wiphy_delayed_work_cancel(sdata->local->hw.wiphy, &sdata->u.mgd.ml_reconf_work); } return; } delay = 0; for_each_set_bit(link_id, &removed_links, IEEE80211_MLD_MAX_NUM_LINKS) { struct ieee80211_bss_conf *link_conf = sdata_dereference(sdata->vif.link_conf[link_id], sdata); u32 link_delay; if (!link_conf) { removed_links &= ~BIT(link_id); continue; } if (link_removal_timeout[link_id] < 1) link_delay = 0; else link_delay = link_conf->beacon_int * (link_removal_timeout[link_id] - 1); if (!delay) delay = link_delay; else delay = min(delay, link_delay); } sdata->u.mgd.removed_links = removed_links; wiphy_delayed_work_queue(sdata->local->hw.wiphy, &sdata->u.mgd.ml_reconf_work, TU_TO_JIFFIES(delay)); } static int ieee80211_ttlm_set_links(struct ieee80211_sub_if_data *sdata, u16 active_links, u16 dormant_links, u16 suspended_links) { u64 changed = 0; int ret; if (!active_links) { ret = -EINVAL; goto out; } /* If there is an active negotiated TTLM, it should be discarded by * the new negotiated/advertised TTLM. */ if (sdata->vif.neg_ttlm.valid) { memset(&sdata->vif.neg_ttlm, 0, sizeof(sdata->vif.neg_ttlm)); sdata->vif.suspended_links = 0; changed = BSS_CHANGED_MLD_TTLM; } if (sdata->vif.active_links != active_links) { /* usable links are affected when active_links are changed, * so notify the driver about the status change */ changed |= BSS_CHANGED_MLD_VALID_LINKS; active_links &= sdata->vif.active_links; if (!active_links) active_links = BIT(__ffs(sdata->vif.valid_links & ~dormant_links)); ret = ieee80211_set_active_links(&sdata->vif, active_links); if (ret) { sdata_info(sdata, "Failed to set TTLM active links\n"); goto out; } } ret = ieee80211_vif_set_links(sdata, sdata->vif.valid_links, dormant_links); if (ret) { sdata_info(sdata, "Failed to set TTLM dormant links\n"); goto out; } sdata->vif.suspended_links = suspended_links; if (sdata->vif.suspended_links) changed |= BSS_CHANGED_MLD_TTLM; ieee80211_vif_cfg_change_notify(sdata, changed); out: if (ret) ieee80211_disconnect(&sdata->vif, false); return ret; } static void ieee80211_tid_to_link_map_work(struct wiphy *wiphy, struct wiphy_work *work) { u16 new_active_links, new_dormant_links; struct ieee80211_sub_if_data *sdata = container_of(work, struct ieee80211_sub_if_data, u.mgd.ttlm_work.work); new_active_links = sdata->u.mgd.ttlm_info.map & sdata->vif.valid_links; new_dormant_links = ~sdata->u.mgd.ttlm_info.map & sdata->vif.valid_links; ieee80211_vif_set_links(sdata, sdata->vif.valid_links, 0); if (ieee80211_ttlm_set_links(sdata, new_active_links, new_dormant_links, 0)) return; sdata->u.mgd.ttlm_info.active = true; sdata->u.mgd.ttlm_info.switch_time = 0; } static u16 ieee80211_get_ttlm(u8 bm_size, u8 *data) { if (bm_size == 1) return *data; else return get_unaligned_le16(data); } static int ieee80211_parse_adv_t2l(struct ieee80211_sub_if_data *sdata, const struct ieee80211_ttlm_elem *ttlm, struct ieee80211_adv_ttlm_info *ttlm_info) { /* The element size was already validated in * ieee80211_tid_to_link_map_size_ok() */ u8 control, link_map_presence, map_size, tid; u8 *pos; memset(ttlm_info, 0, sizeof(*ttlm_info)); pos = (void *)ttlm->optional; control = ttlm->control; if ((control & IEEE80211_TTLM_CONTROL_DEF_LINK_MAP) || !(control & IEEE80211_TTLM_CONTROL_SWITCH_TIME_PRESENT)) return 0; if ((control & IEEE80211_TTLM_CONTROL_DIRECTION) != IEEE80211_TTLM_DIRECTION_BOTH) { sdata_info(sdata, "Invalid advertised T2L map direction\n"); return -EINVAL; } link_map_presence = *pos; pos++; ttlm_info->switch_time = get_unaligned_le16(pos); /* Since ttlm_info->switch_time == 0 means no switch time, bump it * by 1. */ if (!ttlm_info->switch_time) ttlm_info->switch_time = 1; pos += 2; if (control & IEEE80211_TTLM_CONTROL_EXPECTED_DUR_PRESENT) { ttlm_info->duration = pos[0] | pos[1] << 8 | pos[2] << 16; pos += 3; } if (control & IEEE80211_TTLM_CONTROL_LINK_MAP_SIZE) map_size = 1; else map_size = 2; /* According to Draft P802.11be_D3.0 clause 35.3.7.1.7, an AP MLD shall * not advertise a TID-to-link mapping that does not map all TIDs to the * same link set, reject frame if not all links have mapping */ if (link_map_presence != 0xff) { sdata_info(sdata, "Invalid advertised T2L mapping presence indicator\n"); return -EINVAL; } ttlm_info->map = ieee80211_get_ttlm(map_size, pos); if (!ttlm_info->map) { sdata_info(sdata, "Invalid advertised T2L map for TID 0\n"); return -EINVAL; } pos += map_size; for (tid = 1; tid < 8; tid++) { u16 map = ieee80211_get_ttlm(map_size, pos); if (map != ttlm_info->map) { sdata_info(sdata, "Invalid advertised T2L map for tid %d\n", tid); return -EINVAL; } pos += map_size; } return 0; } static void ieee80211_process_adv_ttlm(struct ieee80211_sub_if_data *sdata, struct ieee802_11_elems *elems, u64 beacon_ts) { u8 i; int ret; if (!ieee80211_vif_is_mld(&sdata->vif)) return; if (!elems->ttlm_num) { if (sdata->u.mgd.ttlm_info.switch_time) { /* if a planned TID-to-link mapping was cancelled - * abort it */ wiphy_delayed_work_cancel(sdata->local->hw.wiphy, &sdata->u.mgd.ttlm_work); } else if (sdata->u.mgd.ttlm_info.active) { /* if no TID-to-link element, set to default mapping in * which all TIDs are mapped to all setup links */ ret = ieee80211_vif_set_links(sdata, sdata->vif.valid_links, 0); if (ret) { sdata_info(sdata, "Failed setting valid/dormant links\n"); return; } ieee80211_vif_cfg_change_notify(sdata, BSS_CHANGED_MLD_VALID_LINKS); } memset(&sdata->u.mgd.ttlm_info, 0, sizeof(sdata->u.mgd.ttlm_info)); return; } for (i = 0; i < elems->ttlm_num; i++) { struct ieee80211_adv_ttlm_info ttlm_info; u32 res; res = ieee80211_parse_adv_t2l(sdata, elems->ttlm[i], &ttlm_info); if (res) { __ieee80211_disconnect(sdata); return; } if (ttlm_info.switch_time) { u16 beacon_ts_tu, st_tu, delay; u32 delay_jiffies; u64 mask; /* The t2l map switch time is indicated with a partial * TSF value (bits 10 to 25), get the partial beacon TS * as well, and calc the delay to the start time. */ mask = GENMASK_ULL(25, 10); beacon_ts_tu = (beacon_ts & mask) >> 10; st_tu = ttlm_info.switch_time; delay = st_tu - beacon_ts_tu; /* * If the switch time is far in the future, then it * could also be the previous switch still being * announced. * We can simply ignore it for now, if it is a future * switch the AP will continue to announce it anyway. */ if (delay > IEEE80211_ADV_TTLM_ST_UNDERFLOW) return; delay_jiffies = TU_TO_JIFFIES(delay); /* Link switching can take time, so schedule it * 100ms before to be ready on time */ if (delay_jiffies > IEEE80211_ADV_TTLM_SAFETY_BUFFER_MS) delay_jiffies -= IEEE80211_ADV_TTLM_SAFETY_BUFFER_MS; else delay_jiffies = 0; sdata->u.mgd.ttlm_info = ttlm_info; wiphy_delayed_work_cancel(sdata->local->hw.wiphy, &sdata->u.mgd.ttlm_work); wiphy_delayed_work_queue(sdata->local->hw.wiphy, &sdata->u.mgd.ttlm_work, delay_jiffies); return; } } } static void ieee80211_mgd_check_cross_link_csa(struct ieee80211_sub_if_data *sdata, int reporting_link_id, struct ieee802_11_elems *elems) { const struct element *sta_profiles[IEEE80211_MLD_MAX_NUM_LINKS] = {}; ssize_t sta_profiles_len[IEEE80211_MLD_MAX_NUM_LINKS] = {}; const struct element *sub; const u8 *subelems; size_t subelems_len; u8 common_size; int link_id; if (!ieee80211_mle_size_ok((u8 *)elems->ml_basic, elems->ml_basic_len)) return; common_size = ieee80211_mle_common_size((u8 *)elems->ml_basic); subelems = (u8 *)elems->ml_basic + common_size; subelems_len = elems->ml_basic_len - common_size; for_each_element_id(sub, IEEE80211_MLE_SUBELEM_PER_STA_PROFILE, subelems, subelems_len) { struct ieee80211_mle_per_sta_profile *prof = (void *)sub->data; struct ieee80211_link_data *link; ssize_t len; if (!ieee80211_mle_basic_sta_prof_size_ok(sub->data, sub->datalen)) continue; link_id = le16_get_bits(prof->control, IEEE80211_MLE_STA_CONTROL_LINK_ID); /* need a valid link ID, but also not our own, both AP bugs */ if (link_id == reporting_link_id || link_id >= IEEE80211_MLD_MAX_NUM_LINKS) continue; link = sdata_dereference(sdata->link[link_id], sdata); if (!link) continue; len = cfg80211_defragment_element(sub, subelems, subelems_len, NULL, 0, IEEE80211_MLE_SUBELEM_FRAGMENT); if (WARN_ON(len < 0)) continue; sta_profiles[link_id] = sub; sta_profiles_len[link_id] = len; } for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { struct ieee80211_mle_per_sta_profile *prof; struct ieee802_11_elems *prof_elems; struct ieee80211_link_data *link; ssize_t len; if (link_id == reporting_link_id) continue; link = sdata_dereference(sdata->link[link_id], sdata); if (!link) continue; if (!sta_profiles[link_id]) { prof_elems = NULL; goto handle; } /* we can defragment in-place, won't use the buffer again */ len = cfg80211_defragment_element(sta_profiles[link_id], subelems, subelems_len, (void *)sta_profiles[link_id], sta_profiles_len[link_id], IEEE80211_MLE_SUBELEM_FRAGMENT); if (WARN_ON(len != sta_profiles_len[link_id])) continue; prof = (void *)sta_profiles[link_id]; prof_elems = ieee802_11_parse_elems(prof->variable + (prof->sta_info_len - 1), len - (prof->sta_info_len - 1), false, NULL); /* memory allocation failed - let's hope that's transient */ if (!prof_elems) continue; handle: /* * FIXME: the timings here are obviously incorrect, * but only older Intel drivers seem to care, and * those don't have MLO. If you really need this, * the problem is having to calculate it with the * TSF offset etc. The device_timestamp is still * correct, of course. */ ieee80211_sta_process_chanswitch(link, 0, 0, elems, prof_elems, IEEE80211_CSA_SOURCE_OTHER_LINK); kfree(prof_elems); } } static bool ieee80211_mgd_ssid_mismatch(struct ieee80211_sub_if_data *sdata, const struct ieee802_11_elems *elems) { struct ieee80211_vif_cfg *cfg = &sdata->vif.cfg; static u8 zero_ssid[IEEE80211_MAX_SSID_LEN]; if (!elems->ssid) return false; /* hidden SSID: zero length */ if (elems->ssid_len == 0) return false; if (elems->ssid_len != cfg->ssid_len) return true; /* hidden SSID: zeroed out */ if (!memcmp(elems->ssid, zero_ssid, elems->ssid_len)) return false; return memcmp(elems->ssid, cfg->ssid, cfg->ssid_len); } static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link, struct ieee80211_hdr *hdr, size_t len, struct ieee80211_rx_status *rx_status) { struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_bss_conf *bss_conf = link->conf; struct ieee80211_vif_cfg *vif_cfg = &sdata->vif.cfg; struct ieee80211_mgmt *mgmt = (void *) hdr; size_t baselen; struct ieee802_11_elems *elems; struct ieee80211_local *local = sdata->local; struct ieee80211_chanctx_conf *chanctx_conf; struct ieee80211_supported_band *sband; struct ieee80211_channel *chan; struct link_sta_info *link_sta; struct sta_info *sta; u64 changed = 0; bool erp_valid; u8 erp_value = 0; u32 ncrc = 0; u8 *bssid, *variable = mgmt->u.beacon.variable; u8 deauth_buf[IEEE80211_DEAUTH_FRAME_LEN]; struct ieee80211_elems_parse_params parse_params = { .mode = link->u.mgd.conn.mode, .link_id = -1, .from_ap = true, }; lockdep_assert_wiphy(local->hw.wiphy); /* Process beacon from the current BSS */ bssid = ieee80211_get_bssid(hdr, len, sdata->vif.type); if (ieee80211_is_s1g_beacon(mgmt->frame_control)) { struct ieee80211_ext *ext = (void *) mgmt; if (ieee80211_is_s1g_short_beacon(ext->frame_control)) variable = ext->u.s1g_short_beacon.variable; else variable = ext->u.s1g_beacon.variable; } baselen = (u8 *) variable - (u8 *) mgmt; if (baselen > len) return; parse_params.start = variable; parse_params.len = len - baselen; rcu_read_lock(); chanctx_conf = rcu_dereference(bss_conf->chanctx_conf); if (!chanctx_conf) { rcu_read_unlock(); return; } if (ieee80211_rx_status_to_khz(rx_status) != ieee80211_channel_to_khz(chanctx_conf->def.chan)) { rcu_read_unlock(); return; } chan = chanctx_conf->def.chan; rcu_read_unlock(); if (ifmgd->assoc_data && ifmgd->assoc_data->need_beacon && !WARN_ON(ieee80211_vif_is_mld(&sdata->vif)) && ieee80211_rx_our_beacon(bssid, ifmgd->assoc_data->link[0].bss)) { parse_params.bss = ifmgd->assoc_data->link[0].bss; elems = ieee802_11_parse_elems_full(&parse_params); if (!elems) return; ieee80211_rx_bss_info(link, mgmt, len, rx_status); if (elems->dtim_period) link->u.mgd.dtim_period = elems->dtim_period; link->u.mgd.have_beacon = true; ifmgd->assoc_data->need_beacon = false; if (ieee80211_hw_check(&local->hw, TIMING_BEACON_ONLY) && !ieee80211_is_s1g_beacon(hdr->frame_control)) { bss_conf->sync_tsf = le64_to_cpu(mgmt->u.beacon.timestamp); bss_conf->sync_device_ts = rx_status->device_timestamp; bss_conf->sync_dtim_count = elems->dtim_count; } if (elems->mbssid_config_ie) bss_conf->profile_periodicity = elems->mbssid_config_ie->profile_periodicity; else bss_conf->profile_periodicity = 0; if (elems->ext_capab_len >= 11 && (elems->ext_capab[10] & WLAN_EXT_CAPA11_EMA_SUPPORT)) bss_conf->ema_ap = true; else bss_conf->ema_ap = false; /* continue assoc process */ ifmgd->assoc_data->timeout = jiffies; ifmgd->assoc_data->timeout_started = true; run_again(sdata, ifmgd->assoc_data->timeout); kfree(elems); return; } if (!ifmgd->associated || !ieee80211_rx_our_beacon(bssid, bss_conf->bss)) return; bssid = link->u.mgd.bssid; if (!(rx_status->flag & RX_FLAG_NO_SIGNAL_VAL)) ieee80211_handle_beacon_sig(link, ifmgd, bss_conf, local, rx_status); if (ifmgd->flags & IEEE80211_STA_CONNECTION_POLL) { mlme_dbg_ratelimited(sdata, "cancelling AP probe due to a received beacon\n"); ieee80211_reset_ap_probe(sdata); } /* * Push the beacon loss detection into the future since * we are processing a beacon from the AP just now. */ ieee80211_sta_reset_beacon_monitor(sdata); /* TODO: CRC urrently not calculated on S1G Beacon Compatibility * element (which carries the beacon interval). Don't forget to add a * bit to care_about_ies[] above if mac80211 is interested in a * changing S1G element. */ if (!ieee80211_is_s1g_beacon(hdr->frame_control)) ncrc = crc32_be(0, (void *)&mgmt->u.beacon.beacon_int, 4); parse_params.bss = bss_conf->bss; parse_params.filter = care_about_ies; parse_params.crc = ncrc; elems = ieee802_11_parse_elems_full(&parse_params); if (!elems) return; if (rx_status->flag & RX_FLAG_DECRYPTED && ieee80211_mgd_ssid_mismatch(sdata, elems)) { sdata_info(sdata, "SSID mismatch for AP %pM, disconnect\n", sdata->vif.cfg.ap_addr); __ieee80211_disconnect(sdata); return; } ncrc = elems->crc; if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK) && ieee80211_check_tim(elems->tim, elems->tim_len, vif_cfg->aid)) { if (local->hw.conf.dynamic_ps_timeout > 0) { if (local->hw.conf.flags & IEEE80211_CONF_PS) { local->hw.conf.flags &= ~IEEE80211_CONF_PS; ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); } ieee80211_send_nullfunc(local, sdata, false); } else if (!local->pspolling && sdata->u.mgd.powersave) { local->pspolling = true; /* * Here is assumed that the driver will be * able to send ps-poll frame and receive a * response even though power save mode is * enabled, but some drivers might require * to disable power save here. This needs * to be investigated. */ ieee80211_send_pspoll(local, sdata); } } if (sdata->vif.p2p || sdata->vif.driver_flags & IEEE80211_VIF_GET_NOA_UPDATE) { struct ieee80211_p2p_noa_attr noa = {}; int ret; ret = cfg80211_get_p2p_attr(variable, len - baselen, IEEE80211_P2P_ATTR_ABSENCE_NOTICE, (u8 *) &noa, sizeof(noa)); if (ret >= 2) { if (link->u.mgd.p2p_noa_index != noa.index) { /* valid noa_attr and index changed */ link->u.mgd.p2p_noa_index = noa.index; memcpy(&bss_conf->p2p_noa_attr, &noa, sizeof(noa)); changed |= BSS_CHANGED_P2P_PS; /* * make sure we update all information, the CRC * mechanism doesn't look at P2P attributes. */ link->u.mgd.beacon_crc_valid = false; } } else if (link->u.mgd.p2p_noa_index != -1) { /* noa_attr not found and we had valid noa_attr before */ link->u.mgd.p2p_noa_index = -1; memset(&bss_conf->p2p_noa_attr, 0, sizeof(bss_conf->p2p_noa_attr)); changed |= BSS_CHANGED_P2P_PS; link->u.mgd.beacon_crc_valid = false; } } /* * Update beacon timing and dtim count on every beacon appearance. This * will allow the driver to use the most updated values. Do it before * comparing this one with last received beacon. * IMPORTANT: These parameters would possibly be out of sync by the time * the driver will use them. The synchronized view is currently * guaranteed only in certain callbacks. */ if (ieee80211_hw_check(&local->hw, TIMING_BEACON_ONLY) && !ieee80211_is_s1g_beacon(hdr->frame_control)) { bss_conf->sync_tsf = le64_to_cpu(mgmt->u.beacon.timestamp); bss_conf->sync_device_ts = rx_status->device_timestamp; bss_conf->sync_dtim_count = elems->dtim_count; } if ((ncrc == link->u.mgd.beacon_crc && link->u.mgd.beacon_crc_valid) || ieee80211_is_s1g_short_beacon(mgmt->frame_control)) goto free; link->u.mgd.beacon_crc = ncrc; link->u.mgd.beacon_crc_valid = true; ieee80211_rx_bss_info(link, mgmt, len, rx_status); ieee80211_sta_process_chanswitch(link, rx_status->mactime, rx_status->device_timestamp, elems, elems, IEEE80211_CSA_SOURCE_BEACON); /* note that after this elems->ml_basic can no longer be used fully */ ieee80211_mgd_check_cross_link_csa(sdata, rx_status->link_id, elems); ieee80211_mgd_update_bss_param_ch_cnt(sdata, bss_conf, elems); if (!link->u.mgd.disable_wmm_tracking && ieee80211_sta_wmm_params(local, link, elems->wmm_param, elems->wmm_param_len, elems->mu_edca_param_set)) changed |= BSS_CHANGED_QOS; /* * If we haven't had a beacon before, tell the driver about the * DTIM period (and beacon timing if desired) now. */ if (!link->u.mgd.have_beacon) { /* a few bogus AP send dtim_period = 0 or no TIM IE */ bss_conf->dtim_period = elems->dtim_period ?: 1; changed |= BSS_CHANGED_BEACON_INFO; link->u.mgd.have_beacon = true; ieee80211_recalc_ps(local); ieee80211_recalc_ps_vif(sdata); } if (elems->erp_info) { erp_valid = true; erp_value = elems->erp_info[0]; } else { erp_valid = false; } if (!ieee80211_is_s1g_beacon(hdr->frame_control)) changed |= ieee80211_handle_bss_capability(link, le16_to_cpu(mgmt->u.beacon.capab_info), erp_valid, erp_value); sta = sta_info_get(sdata, sdata->vif.cfg.ap_addr); if (WARN_ON(!sta)) { goto free; } link_sta = rcu_dereference_protected(sta->link[link->link_id], lockdep_is_held(&local->hw.wiphy->mtx)); if (WARN_ON(!link_sta)) { goto free; } if (WARN_ON(!bss_conf->chanreq.oper.chan)) goto free; sband = local->hw.wiphy->bands[bss_conf->chanreq.oper.chan->band]; changed |= ieee80211_recalc_twt_req(sdata, sband, link, link_sta, elems); if (ieee80211_config_bw(link, elems, true, &changed, "beacon")) { ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, WLAN_REASON_DEAUTH_LEAVING, true, deauth_buf); ieee80211_report_disconnect(sdata, deauth_buf, sizeof(deauth_buf), true, WLAN_REASON_DEAUTH_LEAVING, false); goto free; } if (elems->opmode_notif) ieee80211_vht_handle_opmode(sdata, link_sta, *elems->opmode_notif, rx_status->band); changed |= ieee80211_handle_pwr_constr(link, chan, mgmt, elems->country_elem, elems->country_elem_len, elems->pwr_constr_elem, elems->cisco_dtpc_elem); ieee80211_ml_reconfiguration(sdata, elems); ieee80211_process_adv_ttlm(sdata, elems, le64_to_cpu(mgmt->u.beacon.timestamp)); ieee80211_link_info_change_notify(sdata, link, changed); free: kfree(elems); } static void ieee80211_apply_neg_ttlm(struct ieee80211_sub_if_data *sdata, struct ieee80211_neg_ttlm neg_ttlm) { u16 new_active_links, new_dormant_links, new_suspended_links, map = 0; u8 i; for (i = 0; i < IEEE80211_TTLM_NUM_TIDS; i++) map |= neg_ttlm.downlink[i] | neg_ttlm.uplink[i]; /* If there is an active TTLM, unset previously suspended links */ if (sdata->vif.neg_ttlm.valid) sdata->vif.dormant_links &= ~sdata->vif.suspended_links; /* exclude links that are already disabled by advertised TTLM */ new_active_links = map & sdata->vif.valid_links & ~sdata->vif.dormant_links; new_suspended_links = (~map & sdata->vif.valid_links) & ~sdata->vif.dormant_links; new_dormant_links = sdata->vif.dormant_links | new_suspended_links; if (ieee80211_ttlm_set_links(sdata, new_active_links, new_dormant_links, new_suspended_links)) return; sdata->vif.neg_ttlm = neg_ttlm; sdata->vif.neg_ttlm.valid = true; } static void ieee80211_neg_ttlm_timeout_work(struct wiphy *wiphy, struct wiphy_work *work) { struct ieee80211_sub_if_data *sdata = container_of(work, struct ieee80211_sub_if_data, u.mgd.neg_ttlm_timeout_work.work); sdata_info(sdata, "No negotiated TTLM response from AP, disconnecting.\n"); __ieee80211_disconnect(sdata); } static void ieee80211_neg_ttlm_add_suggested_map(struct sk_buff *skb, struct ieee80211_neg_ttlm *neg_ttlm) { u8 i, direction[IEEE80211_TTLM_MAX_CNT]; if (memcmp(neg_ttlm->downlink, neg_ttlm->uplink, sizeof(neg_ttlm->downlink))) { direction[0] = IEEE80211_TTLM_DIRECTION_DOWN; direction[1] = IEEE80211_TTLM_DIRECTION_UP; } else { direction[0] = IEEE80211_TTLM_DIRECTION_BOTH; } for (i = 0; i < ARRAY_SIZE(direction); i++) { u8 tid, len, map_ind = 0, *len_pos, *map_ind_pos, *pos; __le16 map; len = sizeof(struct ieee80211_ttlm_elem) + 1 + 1; pos = skb_put(skb, len + 2); *pos++ = WLAN_EID_EXTENSION; len_pos = pos++; *pos++ = WLAN_EID_EXT_TID_TO_LINK_MAPPING; *pos++ = direction[i]; map_ind_pos = pos++; for (tid = 0; tid < IEEE80211_TTLM_NUM_TIDS; tid++) { map = direction[i] == IEEE80211_TTLM_DIRECTION_UP ? cpu_to_le16(neg_ttlm->uplink[tid]) : cpu_to_le16(neg_ttlm->downlink[tid]); if (!map) continue; len += 2; map_ind |= BIT(tid); skb_put_data(skb, &map, sizeof(map)); } *map_ind_pos = map_ind; *len_pos = len; if (direction[i] == IEEE80211_TTLM_DIRECTION_BOTH) break; } } static void ieee80211_send_neg_ttlm_req(struct ieee80211_sub_if_data *sdata, struct ieee80211_neg_ttlm *neg_ttlm, u8 dialog_token) { struct ieee80211_local *local = sdata->local; struct ieee80211_mgmt *mgmt; struct sk_buff *skb; int hdr_len = offsetofend(struct ieee80211_mgmt, u.action.u.ttlm_req); int ttlm_max_len = 2 + 1 + sizeof(struct ieee80211_ttlm_elem) + 1 + 2 * 2 * IEEE80211_TTLM_NUM_TIDS; skb = dev_alloc_skb(local->tx_headroom + hdr_len + ttlm_max_len); if (!skb) return; skb_reserve(skb, local->tx_headroom); mgmt = skb_put_zero(skb, hdr_len); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION); memcpy(mgmt->da, sdata->vif.cfg.ap_addr, ETH_ALEN); memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); memcpy(mgmt->bssid, sdata->vif.cfg.ap_addr, ETH_ALEN); mgmt->u.action.category = WLAN_CATEGORY_PROTECTED_EHT; mgmt->u.action.u.ttlm_req.action_code = WLAN_PROTECTED_EHT_ACTION_TTLM_REQ; mgmt->u.action.u.ttlm_req.dialog_token = dialog_token; ieee80211_neg_ttlm_add_suggested_map(skb, neg_ttlm); ieee80211_tx_skb(sdata, skb); } int ieee80211_req_neg_ttlm(struct ieee80211_sub_if_data *sdata, struct cfg80211_ttlm_params *params) { struct ieee80211_neg_ttlm neg_ttlm = {}; u8 i; if (!ieee80211_vif_is_mld(&sdata->vif) || !(sdata->vif.cfg.mld_capa_op & IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP)) return -EINVAL; for (i = 0; i < IEEE80211_TTLM_NUM_TIDS; i++) { if ((params->dlink[i] & ~sdata->vif.valid_links) || (params->ulink[i] & ~sdata->vif.valid_links)) return -EINVAL; neg_ttlm.downlink[i] = params->dlink[i]; neg_ttlm.uplink[i] = params->ulink[i]; } if (drv_can_neg_ttlm(sdata->local, sdata, &neg_ttlm) != NEG_TTLM_RES_ACCEPT) return -EINVAL; ieee80211_apply_neg_ttlm(sdata, neg_ttlm); sdata->u.mgd.dialog_token_alloc++; ieee80211_send_neg_ttlm_req(sdata, &sdata->vif.neg_ttlm, sdata->u.mgd.dialog_token_alloc); wiphy_delayed_work_cancel(sdata->local->hw.wiphy, &sdata->u.mgd.neg_ttlm_timeout_work); wiphy_delayed_work_queue(sdata->local->hw.wiphy, &sdata->u.mgd.neg_ttlm_timeout_work, IEEE80211_NEG_TTLM_REQ_TIMEOUT); return 0; } static void ieee80211_send_neg_ttlm_res(struct ieee80211_sub_if_data *sdata, enum ieee80211_neg_ttlm_res ttlm_res, u8 dialog_token, struct ieee80211_neg_ttlm *neg_ttlm) { struct ieee80211_local *local = sdata->local; struct ieee80211_mgmt *mgmt; struct sk_buff *skb; int hdr_len = offsetofend(struct ieee80211_mgmt, u.action.u.ttlm_res); int ttlm_max_len = 2 + 1 + sizeof(struct ieee80211_ttlm_elem) + 1 + 2 * 2 * IEEE80211_TTLM_NUM_TIDS; skb = dev_alloc_skb(local->tx_headroom + hdr_len + ttlm_max_len); if (!skb) return; skb_reserve(skb, local->tx_headroom); mgmt = skb_put_zero(skb, hdr_len); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION); memcpy(mgmt->da, sdata->vif.cfg.ap_addr, ETH_ALEN); memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); memcpy(mgmt->bssid, sdata->vif.cfg.ap_addr, ETH_ALEN); mgmt->u.action.category = WLAN_CATEGORY_PROTECTED_EHT; mgmt->u.action.u.ttlm_res.action_code = WLAN_PROTECTED_EHT_ACTION_TTLM_RES; mgmt->u.action.u.ttlm_res.dialog_token = dialog_token; switch (ttlm_res) { default: WARN_ON(1); fallthrough; case NEG_TTLM_RES_REJECT: mgmt->u.action.u.ttlm_res.status_code = WLAN_STATUS_DENIED_TID_TO_LINK_MAPPING; break; case NEG_TTLM_RES_ACCEPT: mgmt->u.action.u.ttlm_res.status_code = WLAN_STATUS_SUCCESS; break; case NEG_TTLM_RES_SUGGEST_PREFERRED: mgmt->u.action.u.ttlm_res.status_code = WLAN_STATUS_PREF_TID_TO_LINK_MAPPING_SUGGESTED; ieee80211_neg_ttlm_add_suggested_map(skb, neg_ttlm); break; } ieee80211_tx_skb(sdata, skb); } static int ieee80211_parse_neg_ttlm(struct ieee80211_sub_if_data *sdata, const struct ieee80211_ttlm_elem *ttlm, struct ieee80211_neg_ttlm *neg_ttlm, u8 *direction) { u8 control, link_map_presence, map_size, tid; u8 *pos; /* The element size was already validated in * ieee80211_tid_to_link_map_size_ok() */ pos = (void *)ttlm->optional; control = ttlm->control; /* mapping switch time and expected duration fields are not expected * in case of negotiated TTLM */ if (control & (IEEE80211_TTLM_CONTROL_SWITCH_TIME_PRESENT | IEEE80211_TTLM_CONTROL_EXPECTED_DUR_PRESENT)) { mlme_dbg(sdata, "Invalid TTLM element in negotiated TTLM request\n"); return -EINVAL; } if (control & IEEE80211_TTLM_CONTROL_DEF_LINK_MAP) { for (tid = 0; tid < IEEE80211_TTLM_NUM_TIDS; tid++) { neg_ttlm->downlink[tid] = sdata->vif.valid_links; neg_ttlm->uplink[tid] = sdata->vif.valid_links; } *direction = IEEE80211_TTLM_DIRECTION_BOTH; return 0; } *direction = u8_get_bits(control, IEEE80211_TTLM_CONTROL_DIRECTION); if (*direction != IEEE80211_TTLM_DIRECTION_DOWN && *direction != IEEE80211_TTLM_DIRECTION_UP && *direction != IEEE80211_TTLM_DIRECTION_BOTH) return -EINVAL; link_map_presence = *pos; pos++; if (control & IEEE80211_TTLM_CONTROL_LINK_MAP_SIZE) map_size = 1; else map_size = 2; for (tid = 0; tid < IEEE80211_TTLM_NUM_TIDS; tid++) { u16 map; if (link_map_presence & BIT(tid)) { map = ieee80211_get_ttlm(map_size, pos); if (!map) { mlme_dbg(sdata, "No active links for TID %d", tid); return -EINVAL; } } else { map = 0; } switch (*direction) { case IEEE80211_TTLM_DIRECTION_BOTH: neg_ttlm->downlink[tid] = map; neg_ttlm->uplink[tid] = map; break; case IEEE80211_TTLM_DIRECTION_DOWN: neg_ttlm->downlink[tid] = map; break; case IEEE80211_TTLM_DIRECTION_UP: neg_ttlm->uplink[tid] = map; break; default: return -EINVAL; } pos += map_size; } return 0; } void ieee80211_process_neg_ttlm_req(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len) { u8 dialog_token, direction[IEEE80211_TTLM_MAX_CNT] = {}, i; size_t ies_len; enum ieee80211_neg_ttlm_res ttlm_res = NEG_TTLM_RES_ACCEPT; struct ieee802_11_elems *elems = NULL; struct ieee80211_neg_ttlm neg_ttlm = {}; BUILD_BUG_ON(ARRAY_SIZE(direction) != ARRAY_SIZE(elems->ttlm)); if (!ieee80211_vif_is_mld(&sdata->vif)) return; dialog_token = mgmt->u.action.u.ttlm_req.dialog_token; ies_len = len - offsetof(struct ieee80211_mgmt, u.action.u.ttlm_req.variable); elems = ieee802_11_parse_elems(mgmt->u.action.u.ttlm_req.variable, ies_len, true, NULL); if (!elems) { ttlm_res = NEG_TTLM_RES_REJECT; goto out; } for (i = 0; i < elems->ttlm_num; i++) { if (ieee80211_parse_neg_ttlm(sdata, elems->ttlm[i], &neg_ttlm, &direction[i]) || (direction[i] == IEEE80211_TTLM_DIRECTION_BOTH && elems->ttlm_num != 1)) { ttlm_res = NEG_TTLM_RES_REJECT; goto out; } } if (!elems->ttlm_num || (elems->ttlm_num == 2 && direction[0] == direction[1])) { ttlm_res = NEG_TTLM_RES_REJECT; goto out; } for (i = 0; i < IEEE80211_TTLM_NUM_TIDS; i++) { if ((neg_ttlm.downlink[i] && (neg_ttlm.downlink[i] & ~sdata->vif.valid_links)) || (neg_ttlm.uplink[i] && (neg_ttlm.uplink[i] & ~sdata->vif.valid_links))) { ttlm_res = NEG_TTLM_RES_REJECT; goto out; } } ttlm_res = drv_can_neg_ttlm(sdata->local, sdata, &neg_ttlm); if (ttlm_res != NEG_TTLM_RES_ACCEPT) goto out; ieee80211_apply_neg_ttlm(sdata, neg_ttlm); out: kfree(elems); ieee80211_send_neg_ttlm_res(sdata, ttlm_res, dialog_token, &neg_ttlm); } void ieee80211_process_neg_ttlm_res(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len) { if (!ieee80211_vif_is_mld(&sdata->vif) || mgmt->u.action.u.ttlm_req.dialog_token != sdata->u.mgd.dialog_token_alloc) return; wiphy_delayed_work_cancel(sdata->local->hw.wiphy, &sdata->u.mgd.neg_ttlm_timeout_work); /* MLD station sends a TID to link mapping request, mainly to handle * BTM (BSS transition management) request, in which case it needs to * restrict the active links set. * In this case it's not expected that the MLD AP will reject the * negotiated TTLM request. * This can be better implemented in the future, to handle request * rejections. */ if (mgmt->u.action.u.ttlm_res.status_code != WLAN_STATUS_SUCCESS) __ieee80211_disconnect(sdata); } static void ieee80211_teardown_ttlm_work(struct wiphy *wiphy, struct wiphy_work *work) { u16 new_dormant_links; struct ieee80211_sub_if_data *sdata = container_of(work, struct ieee80211_sub_if_data, u.mgd.teardown_ttlm_work); if (!sdata->vif.neg_ttlm.valid) return; memset(&sdata->vif.neg_ttlm, 0, sizeof(sdata->vif.neg_ttlm)); new_dormant_links = sdata->vif.dormant_links & ~sdata->vif.suspended_links; sdata->vif.suspended_links = 0; ieee80211_vif_set_links(sdata, sdata->vif.valid_links, new_dormant_links); ieee80211_vif_cfg_change_notify(sdata, BSS_CHANGED_MLD_TTLM | BSS_CHANGED_MLD_VALID_LINKS); } void ieee80211_send_teardown_neg_ttlm(struct ieee80211_vif *vif) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct ieee80211_local *local = sdata->local; struct ieee80211_mgmt *mgmt; struct sk_buff *skb; int frame_len = offsetofend(struct ieee80211_mgmt, u.action.u.ttlm_tear_down); struct ieee80211_tx_info *info; skb = dev_alloc_skb(local->hw.extra_tx_headroom + frame_len); if (!skb) return; skb_reserve(skb, local->hw.extra_tx_headroom); mgmt = skb_put_zero(skb, frame_len); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION); memcpy(mgmt->da, sdata->vif.cfg.ap_addr, ETH_ALEN); memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); memcpy(mgmt->bssid, sdata->vif.cfg.ap_addr, ETH_ALEN); mgmt->u.action.category = WLAN_CATEGORY_PROTECTED_EHT; mgmt->u.action.u.ttlm_tear_down.action_code = WLAN_PROTECTED_EHT_ACTION_TTLM_TEARDOWN; info = IEEE80211_SKB_CB(skb); info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; info->status_data = IEEE80211_STATUS_TYPE_NEG_TTLM; ieee80211_tx_skb(sdata, skb); } EXPORT_SYMBOL(ieee80211_send_teardown_neg_ttlm); void ieee80211_sta_rx_queued_ext(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { struct ieee80211_link_data *link = &sdata->deflink; struct ieee80211_rx_status *rx_status; struct ieee80211_hdr *hdr; u16 fc; lockdep_assert_wiphy(sdata->local->hw.wiphy); rx_status = (struct ieee80211_rx_status *) skb->cb; hdr = (struct ieee80211_hdr *) skb->data; fc = le16_to_cpu(hdr->frame_control); switch (fc & IEEE80211_FCTL_STYPE) { case IEEE80211_STYPE_S1G_BEACON: ieee80211_rx_mgmt_beacon(link, hdr, skb->len, rx_status); break; } } void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { struct ieee80211_link_data *link = &sdata->deflink; struct ieee80211_rx_status *rx_status; struct ieee802_11_elems *elems; struct ieee80211_mgmt *mgmt; u16 fc; int ies_len; lockdep_assert_wiphy(sdata->local->hw.wiphy); rx_status = (struct ieee80211_rx_status *) skb->cb; mgmt = (struct ieee80211_mgmt *) skb->data; fc = le16_to_cpu(mgmt->frame_control); if (rx_status->link_valid) { link = sdata_dereference(sdata->link[rx_status->link_id], sdata); if (!link) return; } switch (fc & IEEE80211_FCTL_STYPE) { case IEEE80211_STYPE_BEACON: ieee80211_rx_mgmt_beacon(link, (void *)mgmt, skb->len, rx_status); break; case IEEE80211_STYPE_PROBE_RESP: ieee80211_rx_mgmt_probe_resp(link, skb); break; case IEEE80211_STYPE_AUTH: ieee80211_rx_mgmt_auth(sdata, mgmt, skb->len); break; case IEEE80211_STYPE_DEAUTH: ieee80211_rx_mgmt_deauth(sdata, mgmt, skb->len); break; case IEEE80211_STYPE_DISASSOC: ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len); break; case IEEE80211_STYPE_ASSOC_RESP: case IEEE80211_STYPE_REASSOC_RESP: ieee80211_rx_mgmt_assoc_resp(sdata, mgmt, skb->len); break; case IEEE80211_STYPE_ACTION: if (!sdata->u.mgd.associated || !ether_addr_equal(mgmt->bssid, sdata->vif.cfg.ap_addr)) break; switch (mgmt->u.action.category) { case WLAN_CATEGORY_SPECTRUM_MGMT: ies_len = skb->len - offsetof(struct ieee80211_mgmt, u.action.u.chan_switch.variable); if (ies_len < 0) break; /* CSA IE cannot be overridden, no need for BSSID */ elems = ieee802_11_parse_elems( mgmt->u.action.u.chan_switch.variable, ies_len, true, NULL); if (elems && !elems->parse_error) { enum ieee80211_csa_source src = IEEE80211_CSA_SOURCE_PROT_ACTION; ieee80211_sta_process_chanswitch(link, rx_status->mactime, rx_status->device_timestamp, elems, elems, src); } kfree(elems); break; case WLAN_CATEGORY_PUBLIC: case WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION: ies_len = skb->len - offsetof(struct ieee80211_mgmt, u.action.u.ext_chan_switch.variable); if (ies_len < 0) break; /* * extended CSA IE can't be overridden, no need for * BSSID */ elems = ieee802_11_parse_elems( mgmt->u.action.u.ext_chan_switch.variable, ies_len, true, NULL); if (elems && !elems->parse_error) { enum ieee80211_csa_source src; if (mgmt->u.action.category == WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION) src = IEEE80211_CSA_SOURCE_PROT_ACTION; else src = IEEE80211_CSA_SOURCE_UNPROT_ACTION; /* for the handling code pretend it was an IE */ elems->ext_chansw_ie = &mgmt->u.action.u.ext_chan_switch.data; ieee80211_sta_process_chanswitch(link, rx_status->mactime, rx_status->device_timestamp, elems, elems, src); } kfree(elems); break; } break; } } static void ieee80211_sta_timer(struct timer_list *t) { struct ieee80211_sub_if_data *sdata = from_timer(sdata, t, u.mgd.timer); wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work); } void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata, u8 reason, bool tx) { u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, reason, tx, frame_buf); ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true, reason, false); } static int ieee80211_auth(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_mgd_auth_data *auth_data = ifmgd->auth_data; u32 tx_flags = 0; u16 trans = 1; u16 status = 0; struct ieee80211_prep_tx_info info = { .subtype = IEEE80211_STYPE_AUTH, }; lockdep_assert_wiphy(sdata->local->hw.wiphy); if (WARN_ON_ONCE(!auth_data)) return -EINVAL; auth_data->tries++; if (auth_data->tries > IEEE80211_AUTH_MAX_TRIES) { sdata_info(sdata, "authentication with %pM timed out\n", auth_data->ap_addr); /* * Most likely AP is not in the range so remove the * bss struct for that AP. */ cfg80211_unlink_bss(local->hw.wiphy, auth_data->bss); return -ETIMEDOUT; } if (auth_data->algorithm == WLAN_AUTH_SAE) info.duration = jiffies_to_msecs(IEEE80211_AUTH_TIMEOUT_SAE); info.link_id = auth_data->link_id; drv_mgd_prepare_tx(local, sdata, &info); sdata_info(sdata, "send auth to %pM (try %d/%d)\n", auth_data->ap_addr, auth_data->tries, IEEE80211_AUTH_MAX_TRIES); auth_data->expected_transaction = 2; if (auth_data->algorithm == WLAN_AUTH_SAE) { trans = auth_data->sae_trans; status = auth_data->sae_status; auth_data->expected_transaction = trans; } if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) tx_flags = IEEE80211_TX_CTL_REQ_TX_STATUS | IEEE80211_TX_INTFL_MLME_CONN_TX; ieee80211_send_auth(sdata, trans, auth_data->algorithm, status, auth_data->data, auth_data->data_len, auth_data->ap_addr, auth_data->ap_addr, NULL, 0, 0, tx_flags); if (tx_flags == 0) { if (auth_data->algorithm == WLAN_AUTH_SAE) auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT_SAE; else auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT; } else { auth_data->timeout = round_jiffies_up(jiffies + IEEE80211_AUTH_TIMEOUT_LONG); } auth_data->timeout_started = true; run_again(sdata, auth_data->timeout); return 0; } static int ieee80211_do_assoc(struct ieee80211_sub_if_data *sdata) { struct ieee80211_mgd_assoc_data *assoc_data = sdata->u.mgd.assoc_data; struct ieee80211_local *local = sdata->local; int ret; lockdep_assert_wiphy(sdata->local->hw.wiphy); assoc_data->tries++; assoc_data->comeback = false; if (assoc_data->tries > IEEE80211_ASSOC_MAX_TRIES) { sdata_info(sdata, "association with %pM timed out\n", assoc_data->ap_addr); /* * Most likely AP is not in the range so remove the * bss struct for that AP. */ cfg80211_unlink_bss(local->hw.wiphy, assoc_data->link[assoc_data->assoc_link_id].bss); return -ETIMEDOUT; } sdata_info(sdata, "associate with %pM (try %d/%d)\n", assoc_data->ap_addr, assoc_data->tries, IEEE80211_ASSOC_MAX_TRIES); ret = ieee80211_send_assoc(sdata); if (ret) return ret; if (!ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) { assoc_data->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT; assoc_data->timeout_started = true; run_again(sdata, assoc_data->timeout); } else { assoc_data->timeout = round_jiffies_up(jiffies + IEEE80211_ASSOC_TIMEOUT_LONG); assoc_data->timeout_started = true; run_again(sdata, assoc_data->timeout); } return 0; } void ieee80211_mgd_conn_tx_status(struct ieee80211_sub_if_data *sdata, __le16 fc, bool acked) { struct ieee80211_local *local = sdata->local; sdata->u.mgd.status_fc = fc; sdata->u.mgd.status_acked = acked; sdata->u.mgd.status_received = true; wiphy_work_queue(local->hw.wiphy, &sdata->work); } void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; lockdep_assert_wiphy(sdata->local->hw.wiphy); if (ifmgd->status_received) { __le16 fc = ifmgd->status_fc; bool status_acked = ifmgd->status_acked; ifmgd->status_received = false; if (ifmgd->auth_data && ieee80211_is_auth(fc)) { if (status_acked) { if (ifmgd->auth_data->algorithm == WLAN_AUTH_SAE) ifmgd->auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT_SAE; else ifmgd->auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT_SHORT; run_again(sdata, ifmgd->auth_data->timeout); } else { ifmgd->auth_data->timeout = jiffies - 1; } ifmgd->auth_data->timeout_started = true; } else if (ifmgd->assoc_data && !ifmgd->assoc_data->comeback && (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))) { /* * Update association timeout based on the TX status * for the (Re)Association Request frame. Skip this if * we have already processed a (Re)Association Response * frame that indicated need for association comeback * at a specific time in the future. This could happen * if the TX status information is delayed enough for * the response to be received and processed first. */ if (status_acked) { ifmgd->assoc_data->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT_SHORT; run_again(sdata, ifmgd->assoc_data->timeout); } else { ifmgd->assoc_data->timeout = jiffies - 1; } ifmgd->assoc_data->timeout_started = true; } } if (ifmgd->auth_data && ifmgd->auth_data->timeout_started && time_after(jiffies, ifmgd->auth_data->timeout)) { if (ifmgd->auth_data->done || ifmgd->auth_data->waiting) { /* * ok ... we waited for assoc or continuation but * userspace didn't do it, so kill the auth data */ ieee80211_destroy_auth_data(sdata, false); } else if (ieee80211_auth(sdata)) { u8 ap_addr[ETH_ALEN]; struct ieee80211_event event = { .type = MLME_EVENT, .u.mlme.data = AUTH_EVENT, .u.mlme.status = MLME_TIMEOUT, }; memcpy(ap_addr, ifmgd->auth_data->ap_addr, ETH_ALEN); ieee80211_destroy_auth_data(sdata, false); cfg80211_auth_timeout(sdata->dev, ap_addr); drv_event_callback(sdata->local, sdata, &event); } } else if (ifmgd->auth_data && ifmgd->auth_data->timeout_started) run_again(sdata, ifmgd->auth_data->timeout); if (ifmgd->assoc_data && ifmgd->assoc_data->timeout_started && time_after(jiffies, ifmgd->assoc_data->timeout)) { if ((ifmgd->assoc_data->need_beacon && !sdata->deflink.u.mgd.have_beacon) || ieee80211_do_assoc(sdata)) { struct ieee80211_event event = { .type = MLME_EVENT, .u.mlme.data = ASSOC_EVENT, .u.mlme.status = MLME_TIMEOUT, }; ieee80211_destroy_assoc_data(sdata, ASSOC_TIMEOUT); drv_event_callback(sdata->local, sdata, &event); } } else if (ifmgd->assoc_data && ifmgd->assoc_data->timeout_started) run_again(sdata, ifmgd->assoc_data->timeout); if (ifmgd->flags & IEEE80211_STA_CONNECTION_POLL && ifmgd->associated) { u8 *bssid = sdata->deflink.u.mgd.bssid; int max_tries; if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) max_tries = max_nullfunc_tries; else max_tries = max_probe_tries; /* ACK received for nullfunc probing frame */ if (!ifmgd->probe_send_count) ieee80211_reset_ap_probe(sdata); else if (ifmgd->nullfunc_failed) { if (ifmgd->probe_send_count < max_tries) { mlme_dbg(sdata, "No ack for nullfunc frame to AP %pM, try %d/%i\n", bssid, ifmgd->probe_send_count, max_tries); ieee80211_mgd_probe_ap_send(sdata); } else { mlme_dbg(sdata, "No ack for nullfunc frame to AP %pM, disconnecting.\n", bssid); ieee80211_sta_connection_lost(sdata, WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, false); } } else if (time_is_after_jiffies(ifmgd->probe_timeout)) run_again(sdata, ifmgd->probe_timeout); else if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) { mlme_dbg(sdata, "Failed to send nullfunc to AP %pM after %dms, disconnecting\n", bssid, probe_wait_ms); ieee80211_sta_connection_lost(sdata, WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, false); } else if (ifmgd->probe_send_count < max_tries) { mlme_dbg(sdata, "No probe response from AP %pM after %dms, try %d/%i\n", bssid, probe_wait_ms, ifmgd->probe_send_count, max_tries); ieee80211_mgd_probe_ap_send(sdata); } else { /* * We actually lost the connection ... or did we? * Let's make sure! */ mlme_dbg(sdata, "No probe response from AP %pM after %dms, disconnecting.\n", bssid, probe_wait_ms); ieee80211_sta_connection_lost(sdata, WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, false); } } } static void ieee80211_sta_bcn_mon_timer(struct timer_list *t) { struct ieee80211_sub_if_data *sdata = from_timer(sdata, t, u.mgd.bcn_mon_timer); if (WARN_ON(ieee80211_vif_is_mld(&sdata->vif))) return; if (sdata->vif.bss_conf.csa_active && !sdata->deflink.u.mgd.csa.waiting_bcn) return; if (sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER) return; sdata->u.mgd.connection_loss = false; wiphy_work_queue(sdata->local->hw.wiphy, &sdata->u.mgd.beacon_connection_loss_work); } static void ieee80211_sta_conn_mon_timer(struct timer_list *t) { struct ieee80211_sub_if_data *sdata = from_timer(sdata, t, u.mgd.conn_mon_timer); struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_local *local = sdata->local; struct sta_info *sta; unsigned long timeout; if (WARN_ON(ieee80211_vif_is_mld(&sdata->vif))) return; if (sdata->vif.bss_conf.csa_active && !sdata->deflink.u.mgd.csa.waiting_bcn) return; sta = sta_info_get(sdata, sdata->vif.cfg.ap_addr); if (!sta) return; timeout = sta->deflink.status_stats.last_ack; if (time_before(sta->deflink.status_stats.last_ack, sta->deflink.rx_stats.last_rx)) timeout = sta->deflink.rx_stats.last_rx; timeout += IEEE80211_CONNECTION_IDLE_TIME; /* If timeout is after now, then update timer to fire at * the later date, but do not actually probe at this time. */ if (time_is_after_jiffies(timeout)) { mod_timer(&ifmgd->conn_mon_timer, round_jiffies_up(timeout)); return; } wiphy_work_queue(local->hw.wiphy, &sdata->u.mgd.monitor_work); } static void ieee80211_sta_monitor_work(struct wiphy *wiphy, struct wiphy_work *work) { struct ieee80211_sub_if_data *sdata = container_of(work, struct ieee80211_sub_if_data, u.mgd.monitor_work); ieee80211_mgd_probe_ap(sdata, false); } static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata) { if (sdata->vif.type == NL80211_IFTYPE_STATION) { __ieee80211_stop_poll(sdata); /* let's probe the connection once */ if (!ieee80211_hw_check(&sdata->local->hw, CONNECTION_MONITOR)) wiphy_work_queue(sdata->local->hw.wiphy, &sdata->u.mgd.monitor_work); } } #ifdef CONFIG_PM void ieee80211_mgd_quiesce(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; lockdep_assert_wiphy(sdata->local->hw.wiphy); if (ifmgd->auth_data || ifmgd->assoc_data) { const u8 *ap_addr = ifmgd->auth_data ? ifmgd->auth_data->ap_addr : ifmgd->assoc_data->ap_addr; /* * If we are trying to authenticate / associate while suspending, * cfg80211 won't know and won't actually abort those attempts, * thus we need to do that ourselves. */ ieee80211_send_deauth_disassoc(sdata, ap_addr, ap_addr, IEEE80211_STYPE_DEAUTH, WLAN_REASON_DEAUTH_LEAVING, false, frame_buf); if (ifmgd->assoc_data) ieee80211_destroy_assoc_data(sdata, ASSOC_ABANDON); if (ifmgd->auth_data) ieee80211_destroy_auth_data(sdata, false); cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf, IEEE80211_DEAUTH_FRAME_LEN, false); } /* This is a bit of a hack - we should find a better and more generic * solution to this. Normally when suspending, cfg80211 will in fact * deauthenticate. However, it doesn't (and cannot) stop an ongoing * auth (not so important) or assoc (this is the problem) process. * * As a consequence, it can happen that we are in the process of both * associating and suspending, and receive an association response * after cfg80211 has checked if it needs to disconnect, but before * we actually set the flag to drop incoming frames. This will then * cause the workqueue flush to process the association response in * the suspend, resulting in a successful association just before it * tries to remove the interface from the driver, which now though * has a channel context assigned ... this results in issues. * * To work around this (for now) simply deauth here again if we're * now connected. */ if (ifmgd->associated && !sdata->local->wowlan) { u8 bssid[ETH_ALEN]; struct cfg80211_deauth_request req = { .reason_code = WLAN_REASON_DEAUTH_LEAVING, .bssid = bssid, }; memcpy(bssid, sdata->vif.cfg.ap_addr, ETH_ALEN); ieee80211_mgd_deauth(sdata, &req); } } #endif void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; lockdep_assert_wiphy(sdata->local->hw.wiphy); if (!ifmgd->associated) return; if (sdata->flags & IEEE80211_SDATA_DISCONNECT_RESUME) { sdata->flags &= ~IEEE80211_SDATA_DISCONNECT_RESUME; mlme_dbg(sdata, "driver requested disconnect after resume\n"); ieee80211_sta_connection_lost(sdata, WLAN_REASON_UNSPECIFIED, true); return; } if (sdata->flags & IEEE80211_SDATA_DISCONNECT_HW_RESTART) { sdata->flags &= ~IEEE80211_SDATA_DISCONNECT_HW_RESTART; mlme_dbg(sdata, "driver requested disconnect after hardware restart\n"); ieee80211_sta_connection_lost(sdata, WLAN_REASON_UNSPECIFIED, true); return; } } static void ieee80211_request_smps_mgd_work(struct wiphy *wiphy, struct wiphy_work *work) { struct ieee80211_link_data *link = container_of(work, struct ieee80211_link_data, u.mgd.request_smps_work); __ieee80211_request_smps_mgd(link->sdata, link, link->u.mgd.driver_smps_mode); } static void ieee80211_ml_sta_reconf_timeout(struct wiphy *wiphy, struct wiphy_work *work) { struct ieee80211_sub_if_data *sdata = container_of(work, struct ieee80211_sub_if_data, u.mgd.reconf.wk.work); if (!sdata->u.mgd.reconf.added_links && !sdata->u.mgd.reconf.removed_links) return; sdata_info(sdata, "mlo: reconf: timeout: added=0x%x, removed=0x%x\n", sdata->u.mgd.reconf.added_links, sdata->u.mgd.reconf.removed_links); __ieee80211_disconnect(sdata); } /* interface setup */ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; wiphy_work_init(&ifmgd->monitor_work, ieee80211_sta_monitor_work); wiphy_work_init(&ifmgd->beacon_connection_loss_work, ieee80211_beacon_connection_loss_work); wiphy_work_init(&ifmgd->csa_connection_drop_work, ieee80211_csa_connection_drop_work); wiphy_delayed_work_init(&ifmgd->tdls_peer_del_work, ieee80211_tdls_peer_del_work); wiphy_delayed_work_init(&ifmgd->ml_reconf_work, ieee80211_ml_reconf_work); wiphy_delayed_work_init(&ifmgd->reconf.wk, ieee80211_ml_sta_reconf_timeout); timer_setup(&ifmgd->timer, ieee80211_sta_timer, 0); timer_setup(&ifmgd->bcn_mon_timer, ieee80211_sta_bcn_mon_timer, 0); timer_setup(&ifmgd->conn_mon_timer, ieee80211_sta_conn_mon_timer, 0); wiphy_delayed_work_init(&ifmgd->tx_tspec_wk, ieee80211_sta_handle_tspec_ac_params_wk); wiphy_delayed_work_init(&ifmgd->ttlm_work, ieee80211_tid_to_link_map_work); wiphy_delayed_work_init(&ifmgd->neg_ttlm_timeout_work, ieee80211_neg_ttlm_timeout_work); wiphy_work_init(&ifmgd->teardown_ttlm_work, ieee80211_teardown_ttlm_work); ifmgd->flags = 0; ifmgd->powersave = sdata->wdev.ps; ifmgd->uapsd_queues = sdata->local->hw.uapsd_queues; ifmgd->uapsd_max_sp_len = sdata->local->hw.uapsd_max_sp_len; /* Setup TDLS data */ spin_lock_init(&ifmgd->teardown_lock); ifmgd->teardown_skb = NULL; ifmgd->orig_teardown_skb = NULL; ifmgd->mcast_seq_last = IEEE80211_SN_MODULO; } static void ieee80211_recalc_smps_work(struct wiphy *wiphy, struct wiphy_work *work) { struct ieee80211_link_data *link = container_of(work, struct ieee80211_link_data, u.mgd.recalc_smps); ieee80211_recalc_smps(link->sdata, link); } void ieee80211_mgd_setup_link(struct ieee80211_link_data *link) { struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_local *local = sdata->local; unsigned int link_id = link->link_id; link->u.mgd.p2p_noa_index = -1; link->conf->bssid = link->u.mgd.bssid; link->smps_mode = IEEE80211_SMPS_OFF; wiphy_work_init(&link->u.mgd.request_smps_work, ieee80211_request_smps_mgd_work); wiphy_work_init(&link->u.mgd.recalc_smps, ieee80211_recalc_smps_work); if (local->hw.wiphy->features & NL80211_FEATURE_DYNAMIC_SMPS) link->u.mgd.req_smps = IEEE80211_SMPS_AUTOMATIC; else link->u.mgd.req_smps = IEEE80211_SMPS_OFF; wiphy_delayed_work_init(&link->u.mgd.csa.switch_work, ieee80211_csa_switch_work); ieee80211_clear_tpe(&link->conf->tpe); if (sdata->u.mgd.assoc_data) ether_addr_copy(link->conf->addr, sdata->u.mgd.assoc_data->link[link_id].addr); else if (sdata->u.mgd.reconf.add_links_data) ether_addr_copy(link->conf->addr, sdata->u.mgd.reconf.add_links_data->link[link_id].addr); else if (!is_valid_ether_addr(link->conf->addr)) eth_random_addr(link->conf->addr); } /* scan finished notification */ void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local) { struct ieee80211_sub_if_data *sdata; /* Restart STA timers */ rcu_read_lock(); list_for_each_entry_rcu(sdata, &local->interfaces, list) { if (ieee80211_sdata_running(sdata)) ieee80211_restart_sta_timer(sdata); } rcu_read_unlock(); } static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata, struct cfg80211_bss *cbss, s8 link_id, const u8 *ap_mld_addr, bool assoc, struct ieee80211_conn_settings *conn, bool override, unsigned long *userspace_selectors) { struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_bss *bss = (void *)cbss->priv; struct sta_info *new_sta = NULL; struct ieee80211_link_data *link; bool have_sta = false; bool mlo; int err; if (link_id >= 0) { mlo = true; if (WARN_ON(!ap_mld_addr)) return -EINVAL; err = ieee80211_vif_set_links(sdata, BIT(link_id), 0); } else { if (WARN_ON(ap_mld_addr)) return -EINVAL; ap_mld_addr = cbss->bssid; err = ieee80211_vif_set_links(sdata, 0, 0); link_id = 0; mlo = false; } if (err) return err; link = sdata_dereference(sdata->link[link_id], sdata); if (WARN_ON(!link)) { err = -ENOLINK; goto out_err; } if (WARN_ON(!ifmgd->auth_data && !ifmgd->assoc_data)) { err = -EINVAL; goto out_err; } /* If a reconfig is happening, bail out */ if (local->in_reconfig) { err = -EBUSY; goto out_err; } if (assoc) { rcu_read_lock(); have_sta = sta_info_get(sdata, ap_mld_addr); rcu_read_unlock(); } if (!have_sta) { if (mlo) new_sta = sta_info_alloc_with_link(sdata, ap_mld_addr, link_id, cbss->bssid, GFP_KERNEL); else new_sta = sta_info_alloc(sdata, ap_mld_addr, GFP_KERNEL); if (!new_sta) { err = -ENOMEM; goto out_err; } new_sta->sta.mlo = mlo; } /* * Set up the information for the new channel before setting the * new channel. We can't - completely race-free - change the basic * rates bitmap and the channel (sband) that it refers to, but if * we set it up before we at least avoid calling into the driver's * bss_info_changed() method with invalid information (since we do * call that from changing the channel - only for IDLE and perhaps * some others, but ...). * * So to avoid that, just set up all the new information before the * channel, but tell the driver to apply it only afterwards, since * it might need the new channel for that. */ if (new_sta) { const struct cfg80211_bss_ies *ies; struct link_sta_info *link_sta; rcu_read_lock(); link_sta = rcu_dereference(new_sta->link[link_id]); if (WARN_ON(!link_sta)) { rcu_read_unlock(); sta_info_free(local, new_sta); err = -EINVAL; goto out_err; } err = ieee80211_mgd_setup_link_sta(link, new_sta, link_sta, cbss); if (err) { rcu_read_unlock(); sta_info_free(local, new_sta); goto out_err; } memcpy(link->u.mgd.bssid, cbss->bssid, ETH_ALEN); /* set timing information */ link->conf->beacon_int = cbss->beacon_interval; ies = rcu_dereference(cbss->beacon_ies); if (ies) { link->conf->sync_tsf = ies->tsf; link->conf->sync_device_ts = bss->device_ts_beacon; ieee80211_get_dtim(ies, &link->conf->sync_dtim_count, NULL); } else if (!ieee80211_hw_check(&sdata->local->hw, TIMING_BEACON_ONLY)) { ies = rcu_dereference(cbss->proberesp_ies); /* must be non-NULL since beacon IEs were NULL */ link->conf->sync_tsf = ies->tsf; link->conf->sync_device_ts = bss->device_ts_presp; link->conf->sync_dtim_count = 0; } else { link->conf->sync_tsf = 0; link->conf->sync_device_ts = 0; link->conf->sync_dtim_count = 0; } rcu_read_unlock(); } if (new_sta || override) { /* * Only set this if we're also going to calculate the AP * settings etc., otherwise this was set before in a * previous call. Note override is set to %true in assoc * if the settings were changed. */ link->u.mgd.conn = *conn; err = ieee80211_prep_channel(sdata, link, link->link_id, cbss, mlo, &link->u.mgd.conn, userspace_selectors); if (err) { if (new_sta) sta_info_free(local, new_sta); goto out_err; } /* pass out for use in assoc */ *conn = link->u.mgd.conn; } if (new_sta) { /* * tell driver about BSSID, basic rates and timing * this was set up above, before setting the channel */ ieee80211_link_info_change_notify(sdata, link, BSS_CHANGED_BSSID | BSS_CHANGED_BASIC_RATES | BSS_CHANGED_BEACON_INT); if (assoc) sta_info_pre_move_state(new_sta, IEEE80211_STA_AUTH); err = sta_info_insert(new_sta); new_sta = NULL; if (err) { sdata_info(sdata, "failed to insert STA entry for the AP (error %d)\n", err); goto out_release_chan; } } else WARN_ON_ONCE(!ether_addr_equal(link->u.mgd.bssid, cbss->bssid)); /* Cancel scan to ensure that nothing interferes with connection */ if (local->scanning) ieee80211_scan_cancel(local); return 0; out_release_chan: ieee80211_link_release_channel(link); out_err: ieee80211_vif_set_links(sdata, 0, 0); return err; } static bool ieee80211_mgd_csa_present(struct ieee80211_sub_if_data *sdata, const struct cfg80211_bss_ies *ies, u8 cur_channel, bool ignore_ecsa) { const struct element *csa_elem, *ecsa_elem; struct ieee80211_channel_sw_ie *csa = NULL; struct ieee80211_ext_chansw_ie *ecsa = NULL; if (!ies) return false; csa_elem = cfg80211_find_elem(WLAN_EID_CHANNEL_SWITCH, ies->data, ies->len); if (csa_elem && csa_elem->datalen == sizeof(*csa)) csa = (void *)csa_elem->data; ecsa_elem = cfg80211_find_elem(WLAN_EID_EXT_CHANSWITCH_ANN, ies->data, ies->len); if (ecsa_elem && ecsa_elem->datalen == sizeof(*ecsa)) ecsa = (void *)ecsa_elem->data; if (csa && csa->count == 0) csa = NULL; if (csa && !csa->mode && csa->new_ch_num == cur_channel) csa = NULL; if (ecsa && ecsa->count == 0) ecsa = NULL; if (ecsa && !ecsa->mode && ecsa->new_ch_num == cur_channel) ecsa = NULL; if (ignore_ecsa && ecsa) { sdata_info(sdata, "Ignoring ECSA in probe response - was considered stuck!\n"); return csa; } return csa || ecsa; } static bool ieee80211_mgd_csa_in_process(struct ieee80211_sub_if_data *sdata, struct cfg80211_bss *bss) { u8 cur_channel; bool ret; cur_channel = ieee80211_frequency_to_channel(bss->channel->center_freq); rcu_read_lock(); if (ieee80211_mgd_csa_present(sdata, rcu_dereference(bss->beacon_ies), cur_channel, false)) { ret = true; goto out; } if (ieee80211_mgd_csa_present(sdata, rcu_dereference(bss->proberesp_ies), cur_channel, bss->proberesp_ecsa_stuck)) { ret = true; goto out; } ret = false; out: rcu_read_unlock(); return ret; } static void ieee80211_parse_cfg_selectors(unsigned long *userspace_selectors, const u8 *supported_selectors, u8 supported_selectors_len) { if (supported_selectors) { for (int i = 0; i < supported_selectors_len; i++) { set_bit(supported_selectors[i], userspace_selectors); } } else { /* Assume SAE_H2E support for backward compatibility. */ set_bit(BSS_MEMBERSHIP_SELECTOR_SAE_H2E, userspace_selectors); } } /* config hooks */ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, struct cfg80211_auth_request *req) { struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_mgd_auth_data *auth_data; struct ieee80211_conn_settings conn; struct ieee80211_link_data *link; struct ieee80211_supported_band *sband; struct ieee80211_bss *bss; u16 auth_alg; int err; bool cont_auth, wmm_used; lockdep_assert_wiphy(sdata->local->hw.wiphy); /* prepare auth data structure */ switch (req->auth_type) { case NL80211_AUTHTYPE_OPEN_SYSTEM: auth_alg = WLAN_AUTH_OPEN; break; case NL80211_AUTHTYPE_SHARED_KEY: if (fips_enabled) return -EOPNOTSUPP; auth_alg = WLAN_AUTH_SHARED_KEY; break; case NL80211_AUTHTYPE_FT: auth_alg = WLAN_AUTH_FT; break; case NL80211_AUTHTYPE_NETWORK_EAP: auth_alg = WLAN_AUTH_LEAP; break; case NL80211_AUTHTYPE_SAE: auth_alg = WLAN_AUTH_SAE; break; case NL80211_AUTHTYPE_FILS_SK: auth_alg = WLAN_AUTH_FILS_SK; break; case NL80211_AUTHTYPE_FILS_SK_PFS: auth_alg = WLAN_AUTH_FILS_SK_PFS; break; case NL80211_AUTHTYPE_FILS_PK: auth_alg = WLAN_AUTH_FILS_PK; break; default: return -EOPNOTSUPP; } if (ifmgd->assoc_data) return -EBUSY; if (ieee80211_mgd_csa_in_process(sdata, req->bss)) { sdata_info(sdata, "AP is in CSA process, reject auth\n"); return -EINVAL; } auth_data = kzalloc(sizeof(*auth_data) + req->auth_data_len + req->ie_len, GFP_KERNEL); if (!auth_data) return -ENOMEM; memcpy(auth_data->ap_addr, req->ap_mld_addr ?: req->bss->bssid, ETH_ALEN); auth_data->bss = req->bss; auth_data->link_id = req->link_id; if (req->auth_data_len >= 4) { if (req->auth_type == NL80211_AUTHTYPE_SAE) { __le16 *pos = (__le16 *) req->auth_data; auth_data->sae_trans = le16_to_cpu(pos[0]); auth_data->sae_status = le16_to_cpu(pos[1]); } memcpy(auth_data->data, req->auth_data + 4, req->auth_data_len - 4); auth_data->data_len += req->auth_data_len - 4; } /* Check if continuing authentication or trying to authenticate with the * same BSS that we were in the process of authenticating with and avoid * removal and re-addition of the STA entry in * ieee80211_prep_connection(). */ cont_auth = ifmgd->auth_data && req->bss == ifmgd->auth_data->bss && ifmgd->auth_data->link_id == req->link_id; if (req->ie && req->ie_len) { memcpy(&auth_data->data[auth_data->data_len], req->ie, req->ie_len); auth_data->data_len += req->ie_len; } if (req->key && req->key_len) { auth_data->key_len = req->key_len; auth_data->key_idx = req->key_idx; memcpy(auth_data->key, req->key, req->key_len); } ieee80211_parse_cfg_selectors(auth_data->userspace_selectors, req->supported_selectors, req->supported_selectors_len); auth_data->algorithm = auth_alg; /* try to authenticate/probe */ if (ifmgd->auth_data) { if (cont_auth && req->auth_type == NL80211_AUTHTYPE_SAE) { auth_data->peer_confirmed = ifmgd->auth_data->peer_confirmed; } ieee80211_destroy_auth_data(sdata, cont_auth); } /* prep auth_data so we don't go into idle on disassoc */ ifmgd->auth_data = auth_data; /* If this is continuation of an ongoing SAE authentication exchange * (i.e., request to send SAE Confirm) and the peer has already * confirmed, mark authentication completed since we are about to send * out SAE Confirm. */ if (cont_auth && req->auth_type == NL80211_AUTHTYPE_SAE && auth_data->peer_confirmed && auth_data->sae_trans == 2) ieee80211_mark_sta_auth(sdata); if (ifmgd->associated) { u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; sdata_info(sdata, "disconnect from AP %pM for new auth to %pM\n", sdata->vif.cfg.ap_addr, auth_data->ap_addr); ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, WLAN_REASON_UNSPECIFIED, false, frame_buf); ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true, WLAN_REASON_UNSPECIFIED, false); } /* needed for transmitting the auth frame(s) properly */ memcpy(sdata->vif.cfg.ap_addr, auth_data->ap_addr, ETH_ALEN); bss = (void *)req->bss->priv; wmm_used = bss->wmm_used && (local->hw.queues >= IEEE80211_NUM_ACS); sband = local->hw.wiphy->bands[req->bss->channel->band]; ieee80211_determine_our_sta_mode_auth(sdata, sband, req, wmm_used, &conn); err = ieee80211_prep_connection(sdata, req->bss, req->link_id, req->ap_mld_addr, cont_auth, &conn, false, auth_data->userspace_selectors); if (err) goto err_clear; if (req->link_id >= 0) link = sdata_dereference(sdata->link[req->link_id], sdata); else link = &sdata->deflink; if (WARN_ON(!link)) { err = -ENOLINK; goto err_clear; } sdata_info(sdata, "authenticate with %pM (local address=%pM)\n", auth_data->ap_addr, link->conf->addr); err = ieee80211_auth(sdata); if (err) { sta_info_destroy_addr(sdata, auth_data->ap_addr); goto err_clear; } /* hold our own reference */ cfg80211_ref_bss(local->hw.wiphy, auth_data->bss); return 0; err_clear: if (!ieee80211_vif_is_mld(&sdata->vif)) { eth_zero_addr(sdata->deflink.u.mgd.bssid); ieee80211_link_info_change_notify(sdata, &sdata->deflink, BSS_CHANGED_BSSID); ieee80211_link_release_channel(&sdata->deflink); } ifmgd->auth_data = NULL; kfree(auth_data); return err; } static void ieee80211_setup_assoc_link(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgd_assoc_data *assoc_data, struct cfg80211_assoc_request *req, struct ieee80211_conn_settings *conn, unsigned int link_id) { struct ieee80211_local *local = sdata->local; const struct cfg80211_bss_ies *bss_ies; struct ieee80211_supported_band *sband; struct ieee80211_link_data *link; struct cfg80211_bss *cbss; struct ieee80211_bss *bss; cbss = assoc_data->link[link_id].bss; if (WARN_ON(!cbss)) return; bss = (void *)cbss->priv; sband = local->hw.wiphy->bands[cbss->channel->band]; if (WARN_ON(!sband)) return; link = sdata_dereference(sdata->link[link_id], sdata); if (WARN_ON(!link)) return; /* for MLO connections assume advertising all rates is OK */ if (!req->ap_mld_addr) { assoc_data->supp_rates = bss->supp_rates; assoc_data->supp_rates_len = bss->supp_rates_len; } /* copy and link elems for the STA profile */ if (req->links[link_id].elems_len) { memcpy(assoc_data->ie_pos, req->links[link_id].elems, req->links[link_id].elems_len); assoc_data->link[link_id].elems = assoc_data->ie_pos; assoc_data->link[link_id].elems_len = req->links[link_id].elems_len; assoc_data->ie_pos += req->links[link_id].elems_len; } link->u.mgd.beacon_crc_valid = false; link->u.mgd.dtim_period = 0; link->u.mgd.have_beacon = false; /* override HT configuration only if the AP and we support it */ if (conn->mode >= IEEE80211_CONN_MODE_HT) { struct ieee80211_sta_ht_cap sta_ht_cap; memcpy(&sta_ht_cap, &sband->ht_cap, sizeof(sta_ht_cap)); ieee80211_apply_htcap_overrides(sdata, &sta_ht_cap); } rcu_read_lock(); bss_ies = rcu_dereference(cbss->beacon_ies); if (bss_ies) { u8 dtim_count = 0; ieee80211_get_dtim(bss_ies, &dtim_count, &link->u.mgd.dtim_period); sdata->deflink.u.mgd.have_beacon = true; if (ieee80211_hw_check(&local->hw, TIMING_BEACON_ONLY)) { link->conf->sync_tsf = bss_ies->tsf; link->conf->sync_device_ts = bss->device_ts_beacon; link->conf->sync_dtim_count = dtim_count; } } else { bss_ies = rcu_dereference(cbss->ies); } if (bss_ies) { const struct element *elem; elem = cfg80211_find_ext_elem(WLAN_EID_EXT_MULTIPLE_BSSID_CONFIGURATION, bss_ies->data, bss_ies->len); if (elem && elem->datalen >= 3) link->conf->profile_periodicity = elem->data[2]; else link->conf->profile_periodicity = 0; elem = cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY, bss_ies->data, bss_ies->len); if (elem && elem->datalen >= 11 && (elem->data[10] & WLAN_EXT_CAPA11_EMA_SUPPORT)) link->conf->ema_ap = true; else link->conf->ema_ap = false; } rcu_read_unlock(); if (bss->corrupt_data) { char *corrupt_type = "data"; if (bss->corrupt_data & IEEE80211_BSS_CORRUPT_BEACON) { if (bss->corrupt_data & IEEE80211_BSS_CORRUPT_PROBE_RESP) corrupt_type = "beacon and probe response"; else corrupt_type = "beacon"; } else if (bss->corrupt_data & IEEE80211_BSS_CORRUPT_PROBE_RESP) { corrupt_type = "probe response"; } sdata_info(sdata, "associating to AP %pM with corrupt %s\n", cbss->bssid, corrupt_type); } if (link->u.mgd.req_smps == IEEE80211_SMPS_AUTOMATIC) { if (sdata->u.mgd.powersave) link->smps_mode = IEEE80211_SMPS_DYNAMIC; else link->smps_mode = IEEE80211_SMPS_OFF; } else { link->smps_mode = link->u.mgd.req_smps; } } static int ieee80211_mgd_get_ap_ht_vht_capa(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgd_assoc_data *assoc_data, int link_id) { struct cfg80211_bss *cbss = assoc_data->link[link_id].bss; enum nl80211_band band = cbss->channel->band; struct ieee80211_supported_band *sband; const struct element *elem; int err; /* neither HT nor VHT elements used on 6 GHz */ if (band == NL80211_BAND_6GHZ) return 0; if (assoc_data->link[link_id].conn.mode < IEEE80211_CONN_MODE_HT) return 0; rcu_read_lock(); elem = ieee80211_bss_get_elem(cbss, WLAN_EID_HT_OPERATION); if (!elem || elem->datalen < sizeof(struct ieee80211_ht_operation)) { mlme_link_id_dbg(sdata, link_id, "no HT operation on BSS %pM\n", cbss->bssid); err = -EINVAL; goto out_rcu; } assoc_data->link[link_id].ap_ht_param = ((struct ieee80211_ht_operation *)(elem->data))->ht_param; rcu_read_unlock(); if (assoc_data->link[link_id].conn.mode < IEEE80211_CONN_MODE_VHT) return 0; /* some drivers want to support VHT on 2.4 GHz even */ sband = sdata->local->hw.wiphy->bands[band]; if (!sband->vht_cap.vht_supported) return 0; rcu_read_lock(); elem = ieee80211_bss_get_elem(cbss, WLAN_EID_VHT_CAPABILITY); /* but even then accept it not being present on the AP */ if (!elem && band == NL80211_BAND_2GHZ) { err = 0; goto out_rcu; } if (!elem || elem->datalen < sizeof(struct ieee80211_vht_cap)) { mlme_link_id_dbg(sdata, link_id, "no VHT capa on BSS %pM\n", cbss->bssid); err = -EINVAL; goto out_rcu; } memcpy(&assoc_data->link[link_id].ap_vht_cap, elem->data, sizeof(struct ieee80211_vht_cap)); rcu_read_unlock(); return 0; out_rcu: rcu_read_unlock(); return err; } int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, struct cfg80211_assoc_request *req) { unsigned int assoc_link_id = req->link_id < 0 ? 0 : req->link_id; struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_mgd_assoc_data *assoc_data; const struct element *ssid_elem; struct ieee80211_vif_cfg *vif_cfg = &sdata->vif.cfg; struct ieee80211_link_data *link; struct cfg80211_bss *cbss; bool override, uapsd_supported; bool match_auth; int i, err; size_t size = sizeof(*assoc_data) + req->ie_len; for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) size += req->links[i].elems_len; /* FIXME: no support for 4-addr MLO yet */ if (sdata->u.mgd.use_4addr && req->link_id >= 0) return -EOPNOTSUPP; assoc_data = kzalloc(size, GFP_KERNEL); if (!assoc_data) return -ENOMEM; cbss = req->link_id < 0 ? req->bss : req->links[req->link_id].bss; if (ieee80211_mgd_csa_in_process(sdata, cbss)) { sdata_info(sdata, "AP is in CSA process, reject assoc\n"); err = -EINVAL; goto err_free; } rcu_read_lock(); ssid_elem = ieee80211_bss_get_elem(cbss, WLAN_EID_SSID); if (!ssid_elem || ssid_elem->datalen > sizeof(assoc_data->ssid)) { rcu_read_unlock(); err = -EINVAL; goto err_free; } memcpy(assoc_data->ssid, ssid_elem->data, ssid_elem->datalen); assoc_data->ssid_len = ssid_elem->datalen; rcu_read_unlock(); if (req->ap_mld_addr) memcpy(assoc_data->ap_addr, req->ap_mld_addr, ETH_ALEN); else memcpy(assoc_data->ap_addr, cbss->bssid, ETH_ALEN); if (ifmgd->associated) { u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; sdata_info(sdata, "disconnect from AP %pM for new assoc to %pM\n", sdata->vif.cfg.ap_addr, assoc_data->ap_addr); ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, WLAN_REASON_UNSPECIFIED, false, frame_buf); ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true, WLAN_REASON_UNSPECIFIED, false); } ieee80211_parse_cfg_selectors(assoc_data->userspace_selectors, req->supported_selectors, req->supported_selectors_len); memcpy(&ifmgd->ht_capa, &req->ht_capa, sizeof(ifmgd->ht_capa)); memcpy(&ifmgd->ht_capa_mask, &req->ht_capa_mask, sizeof(ifmgd->ht_capa_mask)); memcpy(&ifmgd->vht_capa, &req->vht_capa, sizeof(ifmgd->vht_capa)); memcpy(&ifmgd->vht_capa_mask, &req->vht_capa_mask, sizeof(ifmgd->vht_capa_mask)); memcpy(&ifmgd->s1g_capa, &req->s1g_capa, sizeof(ifmgd->s1g_capa)); memcpy(&ifmgd->s1g_capa_mask, &req->s1g_capa_mask, sizeof(ifmgd->s1g_capa_mask)); /* keep some setup (AP STA, channel, ...) if matching */ match_auth = ifmgd->auth_data && ether_addr_equal(ifmgd->auth_data->ap_addr, assoc_data->ap_addr) && ifmgd->auth_data->link_id == req->link_id; if (req->ap_mld_addr) { uapsd_supported = true; if (req->flags & (ASSOC_REQ_DISABLE_HT | ASSOC_REQ_DISABLE_VHT | ASSOC_REQ_DISABLE_HE | ASSOC_REQ_DISABLE_EHT)) { err = -EINVAL; goto err_free; } for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) { struct ieee80211_supported_band *sband; struct cfg80211_bss *link_cbss = req->links[i].bss; struct ieee80211_bss *bss; if (!link_cbss) continue; bss = (void *)link_cbss->priv; if (!bss->wmm_used) { err = -EINVAL; req->links[i].error = err; goto err_free; } if (link_cbss->channel->band == NL80211_BAND_S1GHZ) { err = -EINVAL; req->links[i].error = err; goto err_free; } link = sdata_dereference(sdata->link[i], sdata); if (link) ether_addr_copy(assoc_data->link[i].addr, link->conf->addr); else eth_random_addr(assoc_data->link[i].addr); sband = local->hw.wiphy->bands[link_cbss->channel->band]; if (match_auth && i == assoc_link_id && link) assoc_data->link[i].conn = link->u.mgd.conn; else assoc_data->link[i].conn = ieee80211_conn_settings_unlimited; ieee80211_determine_our_sta_mode_assoc(sdata, sband, req, true, i, &assoc_data->link[i].conn); assoc_data->link[i].bss = link_cbss; assoc_data->link[i].disabled = req->links[i].disabled; if (!bss->uapsd_supported) uapsd_supported = false; if (assoc_data->link[i].conn.mode < IEEE80211_CONN_MODE_EHT) { err = -EINVAL; req->links[i].error = err; goto err_free; } err = ieee80211_mgd_get_ap_ht_vht_capa(sdata, assoc_data, i); if (err) { err = -EINVAL; req->links[i].error = err; goto err_free; } } assoc_data->wmm = true; } else { struct ieee80211_supported_band *sband; struct ieee80211_bss *bss = (void *)cbss->priv; memcpy(assoc_data->link[0].addr, sdata->vif.addr, ETH_ALEN); assoc_data->s1g = cbss->channel->band == NL80211_BAND_S1GHZ; assoc_data->wmm = bss->wmm_used && (local->hw.queues >= IEEE80211_NUM_ACS); if (cbss->channel->band == NL80211_BAND_6GHZ && req->flags & (ASSOC_REQ_DISABLE_HT | ASSOC_REQ_DISABLE_VHT | ASSOC_REQ_DISABLE_HE)) { err = -EINVAL; goto err_free; } sband = local->hw.wiphy->bands[cbss->channel->band]; assoc_data->link[0].bss = cbss; if (match_auth) assoc_data->link[0].conn = sdata->deflink.u.mgd.conn; else assoc_data->link[0].conn = ieee80211_conn_settings_unlimited; ieee80211_determine_our_sta_mode_assoc(sdata, sband, req, assoc_data->wmm, 0, &assoc_data->link[0].conn); uapsd_supported = bss->uapsd_supported; err = ieee80211_mgd_get_ap_ht_vht_capa(sdata, assoc_data, 0); if (err) goto err_free; } assoc_data->spp_amsdu = req->flags & ASSOC_REQ_SPP_AMSDU; if (ifmgd->auth_data && !ifmgd->auth_data->done) { err = -EBUSY; goto err_free; } if (ifmgd->assoc_data) { err = -EBUSY; goto err_free; } /* Cleanup is delayed if auth_data matches */ if (ifmgd->auth_data && !match_auth) ieee80211_destroy_auth_data(sdata, false); if (req->ie && req->ie_len) { memcpy(assoc_data->ie, req->ie, req->ie_len); assoc_data->ie_len = req->ie_len; assoc_data->ie_pos = assoc_data->ie + assoc_data->ie_len; } else { assoc_data->ie_pos = assoc_data->ie; } if (req->fils_kek) { /* should already be checked in cfg80211 - so warn */ if (WARN_ON(req->fils_kek_len > FILS_MAX_KEK_LEN)) { err = -EINVAL; goto err_free; } memcpy(assoc_data->fils_kek, req->fils_kek, req->fils_kek_len); assoc_data->fils_kek_len = req->fils_kek_len; } if (req->fils_nonces) memcpy(assoc_data->fils_nonces, req->fils_nonces, 2 * FILS_NONCE_LEN); /* default timeout */ assoc_data->timeout = jiffies; assoc_data->timeout_started = true; assoc_data->assoc_link_id = assoc_link_id; if (req->ap_mld_addr) { /* if there was no authentication, set up the link */ err = ieee80211_vif_set_links(sdata, BIT(assoc_link_id), 0); if (err) goto err_clear; } link = sdata_dereference(sdata->link[assoc_link_id], sdata); if (WARN_ON(!link)) { err = -EINVAL; goto err_clear; } override = link->u.mgd.conn.mode != assoc_data->link[assoc_link_id].conn.mode || link->u.mgd.conn.bw_limit != assoc_data->link[assoc_link_id].conn.bw_limit; link->u.mgd.conn = assoc_data->link[assoc_link_id].conn; ieee80211_setup_assoc_link(sdata, assoc_data, req, &link->u.mgd.conn, assoc_link_id); if (WARN((sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_UAPSD) && ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK), "U-APSD not supported with HW_PS_NULLFUNC_STACK\n")) sdata->vif.driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD; if (assoc_data->wmm && uapsd_supported && (sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_UAPSD)) { assoc_data->uapsd = true; ifmgd->flags |= IEEE80211_STA_UAPSD_ENABLED; } else { assoc_data->uapsd = false; ifmgd->flags &= ~IEEE80211_STA_UAPSD_ENABLED; } if (req->prev_bssid) memcpy(assoc_data->prev_ap_addr, req->prev_bssid, ETH_ALEN); if (req->use_mfp) { ifmgd->mfp = IEEE80211_MFP_REQUIRED; ifmgd->flags |= IEEE80211_STA_MFP_ENABLED; } else { ifmgd->mfp = IEEE80211_MFP_DISABLED; ifmgd->flags &= ~IEEE80211_STA_MFP_ENABLED; } if (req->flags & ASSOC_REQ_USE_RRM) ifmgd->flags |= IEEE80211_STA_ENABLE_RRM; else ifmgd->flags &= ~IEEE80211_STA_ENABLE_RRM; if (req->crypto.control_port) ifmgd->flags |= IEEE80211_STA_CONTROL_PORT; else ifmgd->flags &= ~IEEE80211_STA_CONTROL_PORT; sdata->control_port_protocol = req->crypto.control_port_ethertype; sdata->control_port_no_encrypt = req->crypto.control_port_no_encrypt; sdata->control_port_over_nl80211 = req->crypto.control_port_over_nl80211; sdata->control_port_no_preauth = req->crypto.control_port_no_preauth; /* kick off associate process */ ifmgd->assoc_data = assoc_data; for (i = 0; i < ARRAY_SIZE(assoc_data->link); i++) { if (!assoc_data->link[i].bss) continue; if (i == assoc_data->assoc_link_id) continue; /* only calculate the mode, hence link == NULL */ err = ieee80211_prep_channel(sdata, NULL, i, assoc_data->link[i].bss, true, &assoc_data->link[i].conn, assoc_data->userspace_selectors); if (err) { req->links[i].error = err; goto err_clear; } } memcpy(vif_cfg->ssid, assoc_data->ssid, assoc_data->ssid_len); vif_cfg->ssid_len = assoc_data->ssid_len; /* needed for transmitting the assoc frames properly */ memcpy(sdata->vif.cfg.ap_addr, assoc_data->ap_addr, ETH_ALEN); err = ieee80211_prep_connection(sdata, cbss, req->link_id, req->ap_mld_addr, true, &assoc_data->link[assoc_link_id].conn, override, assoc_data->userspace_selectors); if (err) goto err_clear; if (ieee80211_hw_check(&sdata->local->hw, NEED_DTIM_BEFORE_ASSOC)) { const struct cfg80211_bss_ies *beacon_ies; rcu_read_lock(); beacon_ies = rcu_dereference(req->bss->beacon_ies); if (!beacon_ies) { /* * Wait up to one beacon interval ... * should this be more if we miss one? */ sdata_info(sdata, "waiting for beacon from %pM\n", link->u.mgd.bssid); assoc_data->timeout = TU_TO_EXP_TIME(req->bss->beacon_interval); assoc_data->timeout_started = true; assoc_data->need_beacon = true; } rcu_read_unlock(); } run_again(sdata, assoc_data->timeout); /* We are associating, clean up auth_data */ if (ifmgd->auth_data) ieee80211_destroy_auth_data(sdata, true); return 0; err_clear: if (!ifmgd->auth_data) { eth_zero_addr(sdata->deflink.u.mgd.bssid); ieee80211_link_info_change_notify(sdata, &sdata->deflink, BSS_CHANGED_BSSID); } ifmgd->assoc_data = NULL; err_free: kfree(assoc_data); return err; } int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, struct cfg80211_deauth_request *req) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; bool tx = !req->local_state_change; struct ieee80211_prep_tx_info info = { .subtype = IEEE80211_STYPE_DEAUTH, }; if (ifmgd->auth_data && ether_addr_equal(ifmgd->auth_data->ap_addr, req->bssid)) { sdata_info(sdata, "aborting authentication with %pM by local choice (Reason: %u=%s)\n", req->bssid, req->reason_code, ieee80211_get_reason_code_string(req->reason_code)); info.link_id = ifmgd->auth_data->link_id; drv_mgd_prepare_tx(sdata->local, sdata, &info); ieee80211_send_deauth_disassoc(sdata, req->bssid, req->bssid, IEEE80211_STYPE_DEAUTH, req->reason_code, tx, frame_buf); ieee80211_destroy_auth_data(sdata, false); ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true, req->reason_code, false); drv_mgd_complete_tx(sdata->local, sdata, &info); return 0; } if (ifmgd->assoc_data && ether_addr_equal(ifmgd->assoc_data->ap_addr, req->bssid)) { sdata_info(sdata, "aborting association with %pM by local choice (Reason: %u=%s)\n", req->bssid, req->reason_code, ieee80211_get_reason_code_string(req->reason_code)); info.link_id = ifmgd->assoc_data->assoc_link_id; drv_mgd_prepare_tx(sdata->local, sdata, &info); ieee80211_send_deauth_disassoc(sdata, req->bssid, req->bssid, IEEE80211_STYPE_DEAUTH, req->reason_code, tx, frame_buf); ieee80211_destroy_assoc_data(sdata, ASSOC_ABANDON); ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true, req->reason_code, false); drv_mgd_complete_tx(sdata->local, sdata, &info); return 0; } if (ifmgd->associated && ether_addr_equal(sdata->vif.cfg.ap_addr, req->bssid)) { sdata_info(sdata, "deauthenticating from %pM by local choice (Reason: %u=%s)\n", req->bssid, req->reason_code, ieee80211_get_reason_code_string(req->reason_code)); ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, req->reason_code, tx, frame_buf); ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true, req->reason_code, false); drv_mgd_complete_tx(sdata->local, sdata, &info); return 0; } return -ENOTCONN; } int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata, struct cfg80211_disassoc_request *req) { u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; if (!sdata->u.mgd.associated || memcmp(sdata->vif.cfg.ap_addr, req->ap_addr, ETH_ALEN)) return -ENOTCONN; sdata_info(sdata, "disassociating from %pM by local choice (Reason: %u=%s)\n", req->ap_addr, req->reason_code, ieee80211_get_reason_code_string(req->reason_code)); ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DISASSOC, req->reason_code, !req->local_state_change, frame_buf); ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true, req->reason_code, false); return 0; } void ieee80211_mgd_stop_link(struct ieee80211_link_data *link) { wiphy_work_cancel(link->sdata->local->hw.wiphy, &link->u.mgd.request_smps_work); wiphy_work_cancel(link->sdata->local->hw.wiphy, &link->u.mgd.recalc_smps); wiphy_delayed_work_cancel(link->sdata->local->hw.wiphy, &link->u.mgd.csa.switch_work); } void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; /* * Make sure some work items will not run after this, * they will not do anything but might not have been * cancelled when disconnecting. */ wiphy_work_cancel(sdata->local->hw.wiphy, &ifmgd->monitor_work); wiphy_work_cancel(sdata->local->hw.wiphy, &ifmgd->beacon_connection_loss_work); wiphy_work_cancel(sdata->local->hw.wiphy, &ifmgd->csa_connection_drop_work); wiphy_delayed_work_cancel(sdata->local->hw.wiphy, &ifmgd->tdls_peer_del_work); if (ifmgd->assoc_data) ieee80211_destroy_assoc_data(sdata, ASSOC_TIMEOUT); if (ifmgd->auth_data) ieee80211_destroy_auth_data(sdata, false); spin_lock_bh(&ifmgd->teardown_lock); if (ifmgd->teardown_skb) { kfree_skb(ifmgd->teardown_skb); ifmgd->teardown_skb = NULL; ifmgd->orig_teardown_skb = NULL; } kfree(ifmgd->assoc_req_ies); ifmgd->assoc_req_ies = NULL; ifmgd->assoc_req_ies_len = 0; spin_unlock_bh(&ifmgd->teardown_lock); del_timer_sync(&ifmgd->timer); } void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif, enum nl80211_cqm_rssi_threshold_event rssi_event, s32 rssi_level, gfp_t gfp) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); trace_api_cqm_rssi_notify(sdata, rssi_event, rssi_level); cfg80211_cqm_rssi_notify(sdata->dev, rssi_event, rssi_level, gfp); } EXPORT_SYMBOL(ieee80211_cqm_rssi_notify); void ieee80211_cqm_beacon_loss_notify(struct ieee80211_vif *vif, gfp_t gfp) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); trace_api_cqm_beacon_loss_notify(sdata->local, sdata); cfg80211_cqm_beacon_loss_notify(sdata->dev, gfp); } EXPORT_SYMBOL(ieee80211_cqm_beacon_loss_notify); static void _ieee80211_enable_rssi_reports(struct ieee80211_sub_if_data *sdata, int rssi_min_thold, int rssi_max_thold) { trace_api_enable_rssi_reports(sdata, rssi_min_thold, rssi_max_thold); if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION)) return; /* * Scale up threshold values before storing it, as the RSSI averaging * algorithm uses a scaled up value as well. Change this scaling * factor if the RSSI averaging algorithm changes. */ sdata->u.mgd.rssi_min_thold = rssi_min_thold*16; sdata->u.mgd.rssi_max_thold = rssi_max_thold*16; } void ieee80211_enable_rssi_reports(struct ieee80211_vif *vif, int rssi_min_thold, int rssi_max_thold) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); WARN_ON(rssi_min_thold == rssi_max_thold || rssi_min_thold > rssi_max_thold); _ieee80211_enable_rssi_reports(sdata, rssi_min_thold, rssi_max_thold); } EXPORT_SYMBOL(ieee80211_enable_rssi_reports); void ieee80211_disable_rssi_reports(struct ieee80211_vif *vif) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); _ieee80211_enable_rssi_reports(sdata, 0, 0); } EXPORT_SYMBOL(ieee80211_disable_rssi_reports); static void ieee80211_ml_reconf_selectors(unsigned long *userspace_selectors) { *userspace_selectors = 0; /* these selectors are mandatory for ML reconfiguration */ set_bit(BSS_MEMBERSHIP_SELECTOR_SAE_H2E, userspace_selectors); set_bit(BSS_MEMBERSHIP_SELECTOR_HE_PHY, userspace_selectors); set_bit(BSS_MEMBERSHIP_SELECTOR_EHT_PHY, userspace_selectors); } void ieee80211_process_ml_reconf_resp(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len) { struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_mgd_assoc_data *add_links_data = ifmgd->reconf.add_links_data; struct sta_info *sta; struct cfg80211_mlo_reconf_done_data done_data = {}; u16 sta_changed_links = sdata->u.mgd.reconf.added_links | sdata->u.mgd.reconf.removed_links; u16 link_mask, valid_links; unsigned int link_id; unsigned long userspace_selectors; size_t orig_len = len; u8 i, group_key_data_len; u8 *pos; if (!ieee80211_vif_is_mld(&sdata->vif) || len < offsetofend(typeof(*mgmt), u.action.u.ml_reconf_resp) || mgmt->u.action.u.ml_reconf_resp.dialog_token != sdata->u.mgd.reconf.dialog_token || !sta_changed_links) return; pos = mgmt->u.action.u.ml_reconf_resp.variable; len -= offsetofend(typeof(*mgmt), u.action.u.ml_reconf_resp); /* each status duple is 3 octets */ if (len < mgmt->u.action.u.ml_reconf_resp.count * 3) { sdata_info(sdata, "mlo: reconf: unexpected len=%zu, count=%u\n", len, mgmt->u.action.u.ml_reconf_resp.count); goto disconnect; } link_mask = sta_changed_links; for (i = 0; i < mgmt->u.action.u.ml_reconf_resp.count; i++) { u16 status = get_unaligned_le16(pos + 1); link_id = *pos; if (!(link_mask & BIT(link_id))) { sdata_info(sdata, "mlo: reconf: unexpected link: %u, changed=0x%x\n", link_id, sta_changed_links); goto disconnect; } /* clear the corresponding link, to detect the case that * the same link was included more than one time */ link_mask &= ~BIT(link_id); /* Handle failure to remove links here. Failure to remove added * links will be done later in the flow. */ if (status != WLAN_STATUS_SUCCESS) { sdata_info(sdata, "mlo: reconf: failed on link=%u, status=%u\n", link_id, status); /* The AP MLD failed to remove a link that was already * removed locally. As this is not expected behavior, * disconnect */ if (sdata->u.mgd.reconf.removed_links & BIT(link_id)) goto disconnect; /* The AP MLD failed to add a link. Remove it from the * added links. */ sdata->u.mgd.reconf.added_links &= ~BIT(link_id); } pos += 3; len -= 3; } if (link_mask) { sdata_info(sdata, "mlo: reconf: no response for links=0x%x\n", link_mask); goto disconnect; } if (!sdata->u.mgd.reconf.added_links) goto out; if (len < 1 || len < 1 + *pos) { sdata_info(sdata, "mlo: reconf: invalid group key data length"); goto disconnect; } /* The Group Key Data field must be present when links are added. This * field should be processed by userland. */ group_key_data_len = *pos++; pos += group_key_data_len; len -= group_key_data_len + 1; /* Process the information for the added links */ sta = sta_info_get(sdata, sdata->vif.cfg.ap_addr); if (WARN_ON(!sta)) goto disconnect; valid_links = sdata->vif.valid_links; for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { if (!add_links_data->link[link_id].bss || !(sdata->u.mgd.reconf.added_links & BIT(link_id))) continue; valid_links |= BIT(link_id); if (ieee80211_sta_allocate_link(sta, link_id)) goto disconnect; } ieee80211_vif_set_links(sdata, valid_links, sdata->vif.dormant_links); ieee80211_ml_reconf_selectors(&userspace_selectors); link_mask = 0; for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { struct cfg80211_bss *cbss = add_links_data->link[link_id].bss; struct ieee80211_link_data *link; struct link_sta_info *link_sta; u64 changed = 0; if (!cbss) continue; link = sdata_dereference(sdata->link[link_id], sdata); if (WARN_ON(!link)) goto disconnect; link_info(link, "mlo: reconf: local address %pM, AP link address %pM\n", add_links_data->link[link_id].addr, add_links_data->link[link_id].bss->bssid); link_sta = rcu_dereference_protected(sta->link[link_id], lockdep_is_held(&local->hw.wiphy->mtx)); if (WARN_ON(!link_sta)) goto disconnect; if (!link->u.mgd.have_beacon) { const struct cfg80211_bss_ies *ies; rcu_read_lock(); ies = rcu_dereference(cbss->beacon_ies); if (ies) link->u.mgd.have_beacon = true; else ies = rcu_dereference(cbss->ies); ieee80211_get_dtim(ies, &link->conf->sync_dtim_count, &link->u.mgd.dtim_period); link->conf->beacon_int = cbss->beacon_interval; rcu_read_unlock(); } link->conf->dtim_period = link->u.mgd.dtim_period ?: 1; link->u.mgd.conn = add_links_data->link[link_id].conn; if (ieee80211_prep_channel(sdata, link, link_id, cbss, true, &link->u.mgd.conn, &userspace_selectors)) { link_info(link, "mlo: reconf: prep_channel failed\n"); goto disconnect; } if (ieee80211_mgd_setup_link_sta(link, sta, link_sta, add_links_data->link[link_id].bss)) goto disconnect; if (!ieee80211_assoc_config_link(link, link_sta, add_links_data->link[link_id].bss, mgmt, pos, len, &changed)) goto disconnect; /* The AP MLD indicated success for this link, but the station * profile status indicated otherwise. Since there is an * inconsistency in the ML reconfiguration response, disconnect */ if (add_links_data->link[link_id].status != WLAN_STATUS_SUCCESS) goto disconnect; ieee80211_sta_init_nss(link_sta); if (ieee80211_sta_activate_link(sta, link_id)) goto disconnect; changed |= ieee80211_link_set_associated(link, cbss); ieee80211_link_info_change_notify(sdata, link, changed); ieee80211_recalc_smps(sdata, link); link_mask |= BIT(link_id); } sdata_info(sdata, "mlo: reconf: current valid_links=0x%x, added=0x%x\n", valid_links, link_mask); /* links might have changed due to rejected ones, set them again */ ieee80211_vif_set_links(sdata, valid_links, sdata->vif.dormant_links); ieee80211_vif_cfg_change_notify(sdata, BSS_CHANGED_MLD_VALID_LINKS); ieee80211_recalc_ps(local); ieee80211_recalc_ps_vif(sdata); done_data.buf = (const u8 *)mgmt; done_data.len = orig_len; done_data.added_links = link_mask; for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) done_data.links[link_id].bss = add_links_data->link[link_id].bss; cfg80211_mlo_reconf_add_done(sdata->dev, &done_data); kfree(sdata->u.mgd.reconf.add_links_data); sdata->u.mgd.reconf.add_links_data = NULL; out: ieee80211_ml_reconf_reset(sdata); return; disconnect: __ieee80211_disconnect(sdata); } static struct sk_buff * ieee80211_build_ml_reconf_req(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgd_assoc_data *add_links_data, u16 removed_links) { struct ieee80211_local *local = sdata->local; struct ieee80211_mgmt *mgmt; struct ieee80211_multi_link_elem *ml_elem; struct ieee80211_mle_basic_common_info *common; enum nl80211_iftype iftype = ieee80211_vif_type_p2p(&sdata->vif); struct sk_buff *skb; size_t size; unsigned int link_id; __le16 eml_capa = 0, mld_capa_ops = 0; struct ieee80211_tx_info *info; u8 common_size, var_common_size; u8 *ml_elem_len; u16 capab = 0; size = local->hw.extra_tx_headroom + sizeof(*mgmt); /* Consider the maximal length of the reconfiguration ML element */ size += sizeof(struct ieee80211_multi_link_elem); /* The Basic ML element and the Reconfiguration ML element have the same * fixed common information fields in the context of ML reconfiguration * action frame. The AP MLD MAC address must always be present */ common_size = sizeof(*common); /* when adding links, the MLD capabilities must be present */ var_common_size = 0; if (add_links_data) { const struct wiphy_iftype_ext_capab *ift_ext_capa = cfg80211_get_iftype_ext_capa(local->hw.wiphy, ieee80211_vif_type_p2p(&sdata->vif)); if (ift_ext_capa) { eml_capa = cpu_to_le16(ift_ext_capa->eml_capabilities); mld_capa_ops = cpu_to_le16(ift_ext_capa->mld_capa_and_ops); } /* MLD capabilities and operation */ var_common_size += 2; /* EML capabilities */ if (eml_capa & cpu_to_le16((IEEE80211_EML_CAP_EMLSR_SUPP | IEEE80211_EML_CAP_EMLMR_SUPPORT))) var_common_size += 2; } /* Add the common information length */ size += common_size + var_common_size; for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { struct cfg80211_bss *cbss; size_t elems_len; if (removed_links & BIT(link_id)) { size += sizeof(struct ieee80211_mle_per_sta_profile) + ETH_ALEN; continue; } if (!add_links_data || !add_links_data->link[link_id].bss) continue; elems_len = add_links_data->link[link_id].elems_len; cbss = add_links_data->link[link_id].bss; /* should be the same across all BSSes */ if (cbss->capability & WLAN_CAPABILITY_PRIVACY) capab |= WLAN_CAPABILITY_PRIVACY; size += 2 + sizeof(struct ieee80211_mle_per_sta_profile) + ETH_ALEN; /* SSID element + WMM */ size += 2 + sdata->vif.cfg.ssid_len + 9; size += ieee80211_link_common_elems_size(sdata, iftype, cbss, elems_len); } skb = alloc_skb(size, GFP_KERNEL); if (!skb) return NULL; skb_reserve(skb, local->hw.extra_tx_headroom); mgmt = skb_put_zero(skb, offsetofend(struct ieee80211_mgmt, u.action.u.ml_reconf_req)); /* Add the MAC header */ mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION); memcpy(mgmt->da, sdata->vif.cfg.ap_addr, ETH_ALEN); memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); memcpy(mgmt->bssid, sdata->vif.cfg.ap_addr, ETH_ALEN); /* Add the action frame fixed fields */ mgmt->u.action.category = WLAN_CATEGORY_PROTECTED_EHT; mgmt->u.action.u.ml_reconf_req.action_code = WLAN_PROTECTED_EHT_ACTION_LINK_RECONFIG_REQ; /* allocate a dialog token and store it */ sdata->u.mgd.reconf.dialog_token = ++sdata->u.mgd.dialog_token_alloc; mgmt->u.action.u.ml_reconf_req.dialog_token = sdata->u.mgd.reconf.dialog_token; /* Add the ML reconfiguration element and the common information */ skb_put_u8(skb, WLAN_EID_EXTENSION); ml_elem_len = skb_put(skb, 1); skb_put_u8(skb, WLAN_EID_EXT_EHT_MULTI_LINK); ml_elem = skb_put(skb, sizeof(*ml_elem)); ml_elem->control = cpu_to_le16(IEEE80211_ML_CONTROL_TYPE_RECONF | IEEE80211_MLC_RECONF_PRES_MLD_MAC_ADDR); common = skb_put(skb, common_size); common->len = common_size + var_common_size; memcpy(common->mld_mac_addr, sdata->vif.addr, ETH_ALEN); if (add_links_data) { if (eml_capa & cpu_to_le16((IEEE80211_EML_CAP_EMLSR_SUPP | IEEE80211_EML_CAP_EMLMR_SUPPORT))) { ml_elem->control |= cpu_to_le16(IEEE80211_MLC_RECONF_PRES_EML_CAPA); skb_put_data(skb, &eml_capa, sizeof(eml_capa)); } ml_elem->control |= cpu_to_le16(IEEE80211_MLC_RECONF_PRES_MLD_CAPA_OP); skb_put_data(skb, &mld_capa_ops, sizeof(mld_capa_ops)); } if (sdata->u.mgd.flags & IEEE80211_STA_ENABLE_RRM) capab |= WLAN_CAPABILITY_RADIO_MEASURE; /* Add the per station profile */ for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { u8 *subelem_len = NULL; u16 ctrl; const u8 *addr; /* Skip links that are not changing */ if (!(removed_links & BIT(link_id)) && (!add_links_data || !add_links_data->link[link_id].bss)) continue; ctrl = link_id | IEEE80211_MLE_STA_RECONF_CONTROL_STA_MAC_ADDR_PRESENT; if (removed_links & BIT(link_id)) { struct ieee80211_bss_conf *conf = sdata_dereference(sdata->vif.link_conf[link_id], sdata); if (!conf) continue; addr = conf->addr; ctrl |= u16_encode_bits(IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_TYPE_DEL_LINK, IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_TYPE); } else { addr = add_links_data->link[link_id].addr; ctrl |= IEEE80211_MLE_STA_RECONF_CONTROL_COMPLETE_PROFILE | u16_encode_bits(IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_TYPE_ADD_LINK, IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_TYPE); } skb_put_u8(skb, IEEE80211_MLE_SUBELEM_PER_STA_PROFILE); subelem_len = skb_put(skb, 1); put_unaligned_le16(ctrl, skb_put(skb, sizeof(ctrl))); skb_put_u8(skb, 1 + ETH_ALEN); skb_put_data(skb, addr, ETH_ALEN); if (!(removed_links & BIT(link_id))) { u16 link_present_elems[PRESENT_ELEMS_MAX] = {}; size_t extra_used; void *capab_pos; u8 qos_info; capab_pos = skb_put(skb, 2); skb_put_u8(skb, WLAN_EID_SSID); skb_put_u8(skb, sdata->vif.cfg.ssid_len); skb_put_data(skb, sdata->vif.cfg.ssid, sdata->vif.cfg.ssid_len); extra_used = ieee80211_add_link_elems(sdata, skb, &capab, NULL, add_links_data->link[link_id].elems, add_links_data->link[link_id].elems_len, link_id, NULL, link_present_elems, add_links_data); if (add_links_data->link[link_id].elems) skb_put_data(skb, add_links_data->link[link_id].elems + extra_used, add_links_data->link[link_id].elems_len - extra_used); if (sdata->u.mgd.flags & IEEE80211_STA_UAPSD_ENABLED) { qos_info = sdata->u.mgd.uapsd_queues; qos_info |= (sdata->u.mgd.uapsd_max_sp_len << IEEE80211_WMM_IE_STA_QOSINFO_SP_SHIFT); } else { qos_info = 0; } ieee80211_add_wmm_info_ie(skb_put(skb, 9), qos_info); put_unaligned_le16(capab, capab_pos); } ieee80211_fragment_element(skb, subelem_len, IEEE80211_MLE_SUBELEM_FRAGMENT); } ieee80211_fragment_element(skb, ml_elem_len, WLAN_EID_FRAGMENT); info = IEEE80211_SKB_CB(skb); info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; return skb; } int ieee80211_mgd_assoc_ml_reconf(struct ieee80211_sub_if_data *sdata, struct cfg80211_assoc_link *add_links, u16 rem_links) { struct ieee80211_local *local = sdata->local; struct ieee80211_mgd_assoc_data *data = NULL; struct sta_info *sta; struct sk_buff *skb; u16 added_links, new_valid_links; int link_id, err; if (!ieee80211_vif_is_mld(&sdata->vif) || !(sdata->vif.cfg.mld_capa_op & IEEE80211_MLD_CAP_OP_LINK_RECONF_SUPPORT)) return -EINVAL; /* No support for concurrent ML reconfiguration operation */ if (sdata->u.mgd.reconf.added_links || sdata->u.mgd.reconf.removed_links) return -EBUSY; added_links = 0; for (link_id = 0; add_links && link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { if (!add_links[link_id].bss) continue; added_links |= BIT(link_id); } sta = sta_info_get(sdata, sdata->vif.cfg.ap_addr); if (WARN_ON(!sta)) return -ENOLINK; if (rem_links & BIT(sta->sta.deflink.link_id)) return -EINVAL; /* Adding links to the set of valid link is done only after a successful * ML reconfiguration frame exchange. Here prepare the data for the ML * reconfiguration frame construction and allocate the required * resources */ if (added_links) { bool uapsd_supported; unsigned long userspace_selectors; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; uapsd_supported = true; ieee80211_ml_reconf_selectors(&userspace_selectors); for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { struct ieee80211_supported_band *sband; struct cfg80211_bss *link_cbss = add_links[link_id].bss; struct ieee80211_bss *bss; if (!link_cbss) continue; bss = (void *)link_cbss->priv; if (!bss->wmm_used) { err = -EINVAL; goto err_free; } if (link_cbss->channel->band == NL80211_BAND_S1GHZ) { err = -EINVAL; goto err_free; } eth_random_addr(data->link[link_id].addr); data->link[link_id].conn = ieee80211_conn_settings_unlimited; sband = local->hw.wiphy->bands[link_cbss->channel->band]; ieee80211_determine_our_sta_mode(sdata, sband, NULL, true, link_id, &data->link[link_id].conn); data->link[link_id].bss = link_cbss; data->link[link_id].disabled = add_links[link_id].disabled; data->link[link_id].elems = (u8 *)add_links[link_id].elems; data->link[link_id].elems_len = add_links[link_id].elems_len; if (!bss->uapsd_supported) uapsd_supported = false; if (data->link[link_id].conn.mode < IEEE80211_CONN_MODE_EHT) { err = -EINVAL; goto err_free; } err = ieee80211_mgd_get_ap_ht_vht_capa(sdata, data, link_id); if (err) { err = -EINVAL; goto err_free; } } /* Require U-APSD support to be similar to the current valid * links */ if (uapsd_supported != !!(sdata->u.mgd.flags & IEEE80211_STA_UAPSD_ENABLED)) { err = -EINVAL; goto err_free; } for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { if (!data->link[link_id].bss) continue; /* only used to verify the mode, nothing is allocated */ err = ieee80211_prep_channel(sdata, NULL, link_id, data->link[link_id].bss, true, &data->link[link_id].conn, &userspace_selectors); if (err) goto err_free; } } /* link removal is done before the ML reconfiguration frame exchange so * that these links will not be used between their removal by the AP MLD * and before the station got the ML reconfiguration response. Based on * Section 35.3.6.4 in Draft P802.11be_D7.0 the AP MLD should accept the * link removal request. */ if (rem_links) { u16 new_active_links = sdata->vif.active_links & ~rem_links; new_valid_links = sdata->vif.valid_links & ~rem_links; /* Should not be left with no valid links to perform the * ML reconfiguration */ if (!new_valid_links || !(new_valid_links & ~sdata->vif.dormant_links)) { sdata_info(sdata, "mlo: reconf: no valid links\n"); err = -EINVAL; goto err_free; } if (new_active_links != sdata->vif.active_links) { if (!new_active_links) new_active_links = BIT(__ffs(new_valid_links & ~sdata->vif.dormant_links)); err = ieee80211_set_active_links(&sdata->vif, new_active_links); if (err) { sdata_info(sdata, "mlo: reconf: failed set active links\n"); goto err_free; } } } /* Build the SKB before the link removal as the construction of the * station info for removed links requires the local address. * Invalidate the removed links, so that the transmission of the ML * reconfiguration request frame would not be done using them, as the AP * is expected to send the ML reconfiguration response frame on the link * on which the request was received. */ skb = ieee80211_build_ml_reconf_req(sdata, data, rem_links); if (!skb) { err = -ENOMEM; goto err_free; } if (rem_links) { u16 new_dormant_links = sdata->vif.dormant_links & ~rem_links; err = ieee80211_vif_set_links(sdata, new_valid_links, new_dormant_links); if (err) { sdata_info(sdata, "mlo: reconf: failed set valid links\n"); kfree_skb(skb); goto err_free; } for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { if (!(rem_links & BIT(link_id))) continue; ieee80211_sta_remove_link(sta, link_id); } /* notify the driver and upper layers */ ieee80211_vif_cfg_change_notify(sdata, BSS_CHANGED_MLD_VALID_LINKS); cfg80211_links_removed(sdata->dev, rem_links); } sdata_info(sdata, "mlo: reconf: adding=0x%x, removed=0x%x\n", added_links, rem_links); ieee80211_tx_skb(sdata, skb); sdata->u.mgd.reconf.added_links = added_links; sdata->u.mgd.reconf.add_links_data = data; sdata->u.mgd.reconf.removed_links = rem_links; wiphy_delayed_work_queue(sdata->local->hw.wiphy, &sdata->u.mgd.reconf.wk, IEEE80211_ASSOC_TIMEOUT_SHORT); return 0; err_free: kfree(data); return err; }
13 44 12 26 7 4 17 2 15 1 1 8 58 72 68 11 8 11 1 1 29 31 13 2 6 8 1 33 31 32 16 16 15 3 16 16 3 12 12 7 12 12 4 5 5 5 4 2 3 3 2 1 1 1 1 1 20 1 24 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 /* BlueZ - Bluetooth protocol stack for Linux Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved. Copyright 2023-2024 NXP Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ #ifndef __HCI_CORE_H #define __HCI_CORE_H #include <linux/idr.h> #include <linux/leds.h> #include <linux/rculist.h> #include <net/bluetooth/hci.h> #include <net/bluetooth/hci_sync.h> #include <net/bluetooth/hci_sock.h> #include <net/bluetooth/coredump.h> /* HCI priority */ #define HCI_PRIO_MAX 7 /* HCI maximum id value */ #define HCI_MAX_ID 10000 /* HCI Core structures */ struct inquiry_data { bdaddr_t bdaddr; __u8 pscan_rep_mode; __u8 pscan_period_mode; __u8 pscan_mode; __u8 dev_class[3]; __le16 clock_offset; __s8 rssi; __u8 ssp_mode; }; struct inquiry_entry { struct list_head all; /* inq_cache.all */ struct list_head list; /* unknown or resolve */ enum { NAME_NOT_KNOWN, NAME_NEEDED, NAME_PENDING, NAME_KNOWN, } name_state; __u32 timestamp; struct inquiry_data data; }; struct discovery_state { int type; enum { DISCOVERY_STOPPED, DISCOVERY_STARTING, DISCOVERY_FINDING, DISCOVERY_RESOLVING, DISCOVERY_STOPPING, } state; struct list_head all; /* All devices found during inquiry */ struct list_head unknown; /* Name state not known */ struct list_head resolve; /* Name needs to be resolved */ __u32 timestamp; bdaddr_t last_adv_addr; u8 last_adv_addr_type; s8 last_adv_rssi; u32 last_adv_flags; u8 last_adv_data[HCI_MAX_EXT_AD_LENGTH]; u8 last_adv_data_len; bool report_invalid_rssi; bool result_filtering; bool limited; s8 rssi; u16 uuid_count; u8 (*uuids)[16]; unsigned long name_resolve_timeout; }; #define SUSPEND_NOTIFIER_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */ enum suspend_tasks { SUSPEND_PAUSE_DISCOVERY, SUSPEND_UNPAUSE_DISCOVERY, SUSPEND_PAUSE_ADVERTISING, SUSPEND_UNPAUSE_ADVERTISING, SUSPEND_SCAN_DISABLE, SUSPEND_SCAN_ENABLE, SUSPEND_DISCONNECTING, SUSPEND_POWERING_DOWN, SUSPEND_PREPARE_NOTIFIER, SUSPEND_SET_ADV_FILTER, __SUSPEND_NUM_TASKS }; enum suspended_state { BT_RUNNING = 0, BT_SUSPEND_DISCONNECT, BT_SUSPEND_CONFIGURE_WAKE, }; struct hci_conn_hash { struct list_head list; unsigned int acl_num; unsigned int sco_num; unsigned int iso_num; unsigned int le_num; unsigned int le_num_peripheral; }; struct bdaddr_list { struct list_head list; bdaddr_t bdaddr; u8 bdaddr_type; }; struct codec_list { struct list_head list; u8 id; __u16 cid; __u16 vid; u8 transport; u8 num_caps; u32 len; struct hci_codec_caps caps[]; }; struct bdaddr_list_with_irk { struct list_head list; bdaddr_t bdaddr; u8 bdaddr_type; u8 peer_irk[16]; u8 local_irk[16]; }; /* Bitmask of connection flags */ enum hci_conn_flags { HCI_CONN_FLAG_REMOTE_WAKEUP = BIT(0), HCI_CONN_FLAG_DEVICE_PRIVACY = BIT(1), HCI_CONN_FLAG_ADDRESS_RESOLUTION = BIT(2), }; typedef u8 hci_conn_flags_t; struct bdaddr_list_with_flags { struct list_head list; bdaddr_t bdaddr; u8 bdaddr_type; hci_conn_flags_t flags; }; struct bt_uuid { struct list_head list; u8 uuid[16]; u8 size; u8 svc_hint; }; struct blocked_key { struct list_head list; struct rcu_head rcu; u8 type; u8 val[16]; }; struct smp_csrk { bdaddr_t bdaddr; u8 bdaddr_type; u8 type; u8 val[16]; }; struct smp_ltk { struct list_head list; struct rcu_head rcu; bdaddr_t bdaddr; u8 bdaddr_type; u8 authenticated; u8 type; u8 enc_size; __le16 ediv; __le64 rand; u8 val[16]; }; struct smp_irk { struct list_head list; struct rcu_head rcu; bdaddr_t rpa; bdaddr_t bdaddr; u8 addr_type; u8 val[16]; }; struct link_key { struct list_head list; struct rcu_head rcu; bdaddr_t bdaddr; u8 type; u8 val[HCI_LINK_KEY_SIZE]; u8 pin_len; }; struct oob_data { struct list_head list; bdaddr_t bdaddr; u8 bdaddr_type; u8 present; u8 hash192[16]; u8 rand192[16]; u8 hash256[16]; u8 rand256[16]; }; struct adv_info { struct list_head list; bool enabled; bool pending; bool periodic; __u8 mesh; __u8 instance; __u8 handle; __u32 flags; __u16 timeout; __u16 remaining_time; __u16 duration; __u16 adv_data_len; __u8 adv_data[HCI_MAX_EXT_AD_LENGTH]; bool adv_data_changed; __u16 scan_rsp_len; __u8 scan_rsp_data[HCI_MAX_EXT_AD_LENGTH]; bool scan_rsp_changed; __u16 per_adv_data_len; __u8 per_adv_data[HCI_MAX_PER_AD_LENGTH]; __s8 tx_power; __u32 min_interval; __u32 max_interval; bdaddr_t random_addr; bool rpa_expired; struct delayed_work rpa_expired_cb; }; #define HCI_MAX_ADV_INSTANCES 5 #define HCI_DEFAULT_ADV_DURATION 2 #define HCI_ADV_TX_POWER_NO_PREFERENCE 0x7F #define DATA_CMP(_d1, _l1, _d2, _l2) \ (_l1 == _l2 ? memcmp(_d1, _d2, _l1) : _l1 - _l2) #define ADV_DATA_CMP(_adv, _data, _len) \ DATA_CMP((_adv)->adv_data, (_adv)->adv_data_len, _data, _len) #define SCAN_RSP_CMP(_adv, _data, _len) \ DATA_CMP((_adv)->scan_rsp_data, (_adv)->scan_rsp_len, _data, _len) struct monitored_device { struct list_head list; bdaddr_t bdaddr; __u8 addr_type; __u16 handle; bool notified; }; struct adv_pattern { struct list_head list; __u8 ad_type; __u8 offset; __u8 length; __u8 value[HCI_MAX_EXT_AD_LENGTH]; }; struct adv_rssi_thresholds { __s8 low_threshold; __s8 high_threshold; __u16 low_threshold_timeout; __u16 high_threshold_timeout; __u8 sampling_period; }; struct adv_monitor { struct list_head patterns; struct adv_rssi_thresholds rssi; __u16 handle; enum { ADV_MONITOR_STATE_NOT_REGISTERED, ADV_MONITOR_STATE_REGISTERED, ADV_MONITOR_STATE_OFFLOADED } state; }; #define HCI_MIN_ADV_MONITOR_HANDLE 1 #define HCI_MAX_ADV_MONITOR_NUM_HANDLES 32 #define HCI_MAX_ADV_MONITOR_NUM_PATTERNS 16 #define HCI_ADV_MONITOR_EXT_NONE 1 #define HCI_ADV_MONITOR_EXT_MSFT 2 #define HCI_MAX_SHORT_NAME_LENGTH 10 #define HCI_CONN_HANDLE_MAX 0x0eff #define HCI_CONN_HANDLE_UNSET(_handle) (_handle > HCI_CONN_HANDLE_MAX) /* Min encryption key size to match with SMP */ #define HCI_MIN_ENC_KEY_SIZE 7 /* Default LE RPA expiry time, 15 minutes */ #define HCI_DEFAULT_RPA_TIMEOUT (15 * 60) /* Default min/max age of connection information (1s/3s) */ #define DEFAULT_CONN_INFO_MIN_AGE 1000 #define DEFAULT_CONN_INFO_MAX_AGE 3000 /* Default authenticated payload timeout 30s */ #define DEFAULT_AUTH_PAYLOAD_TIMEOUT 0x0bb8 #define HCI_MAX_PAGES 3 struct hci_dev { struct list_head list; struct mutex lock; struct ida unset_handle_ida; const char *name; unsigned long flags; __u16 id; __u8 bus; bdaddr_t bdaddr; bdaddr_t setup_addr; bdaddr_t public_addr; bdaddr_t random_addr; bdaddr_t static_addr; __u8 adv_addr_type; __u8 dev_name[HCI_MAX_NAME_LENGTH]; __u8 short_name[HCI_MAX_SHORT_NAME_LENGTH]; __u8 eir[HCI_MAX_EIR_LENGTH]; __u16 appearance; __u8 dev_class[3]; __u8 major_class; __u8 minor_class; __u8 max_page; __u8 features[HCI_MAX_PAGES][8]; __u8 le_features[8]; __u8 le_accept_list_size; __u8 le_resolv_list_size; __u8 le_num_of_adv_sets; __u8 le_states[8]; __u8 mesh_ad_types[16]; __u8 mesh_send_ref; __u8 commands[64]; __u8 hci_ver; __u16 hci_rev; __u8 lmp_ver; __u16 manufacturer; __u16 lmp_subver; __u16 voice_setting; __u8 num_iac; __u16 stored_max_keys; __u16 stored_num_keys; __u8 io_capability; __s8 inq_tx_power; __u8 err_data_reporting; __u16 page_scan_interval; __u16 page_scan_window; __u8 page_scan_type; __u8 le_adv_channel_map; __u16 le_adv_min_interval; __u16 le_adv_max_interval; __u8 le_scan_type; __u16 le_scan_interval; __u16 le_scan_window; __u16 le_scan_int_suspend; __u16 le_scan_window_suspend; __u16 le_scan_int_discovery; __u16 le_scan_window_discovery; __u16 le_scan_int_adv_monitor; __u16 le_scan_window_adv_monitor; __u16 le_scan_int_connect; __u16 le_scan_window_connect; __u16 le_conn_min_interval; __u16 le_conn_max_interval; __u16 le_conn_latency; __u16 le_supv_timeout; __u16 le_def_tx_len; __u16 le_def_tx_time; __u16 le_max_tx_len; __u16 le_max_tx_time; __u16 le_max_rx_len; __u16 le_max_rx_time; __u8 le_max_key_size; __u8 le_min_key_size; __u16 discov_interleaved_timeout; __u16 conn_info_min_age; __u16 conn_info_max_age; __u16 auth_payload_timeout; __u8 min_enc_key_size; __u8 max_enc_key_size; __u8 pairing_opts; __u8 ssp_debug_mode; __u8 hw_error_code; __u32 clock; __u16 advmon_allowlist_duration; __u16 advmon_no_filter_duration; __u8 enable_advmon_interleave_scan; __u16 devid_source; __u16 devid_vendor; __u16 devid_product; __u16 devid_version; __u8 def_page_scan_type; __u16 def_page_scan_int; __u16 def_page_scan_window; __u8 def_inq_scan_type; __u16 def_inq_scan_int; __u16 def_inq_scan_window; __u16 def_br_lsto; __u16 def_page_timeout; __u16 def_multi_adv_rotation_duration; __u16 def_le_autoconnect_timeout; __s8 min_le_tx_power; __s8 max_le_tx_power; __u16 pkt_type; __u16 esco_type; __u16 link_policy; __u16 link_mode; __u32 idle_timeout; __u16 sniff_min_interval; __u16 sniff_max_interval; unsigned int auto_accept_delay; unsigned long quirks; atomic_t cmd_cnt; unsigned int acl_cnt; unsigned int sco_cnt; unsigned int le_cnt; unsigned int iso_cnt; unsigned int acl_mtu; unsigned int sco_mtu; unsigned int le_mtu; unsigned int iso_mtu; unsigned int acl_pkts; unsigned int sco_pkts; unsigned int le_pkts; unsigned int iso_pkts; unsigned long acl_last_tx; unsigned long le_last_tx; __u8 le_tx_def_phys; __u8 le_rx_def_phys; struct workqueue_struct *workqueue; struct workqueue_struct *req_workqueue; struct work_struct power_on; struct delayed_work power_off; struct work_struct error_reset; struct work_struct cmd_sync_work; struct list_head cmd_sync_work_list; struct mutex cmd_sync_work_lock; struct mutex unregister_lock; struct work_struct cmd_sync_cancel_work; struct work_struct reenable_adv_work; __u16 discov_timeout; struct delayed_work discov_off; struct delayed_work service_cache; struct delayed_work cmd_timer; struct delayed_work ncmd_timer; struct work_struct rx_work; struct work_struct cmd_work; struct work_struct tx_work; struct delayed_work le_scan_disable; struct sk_buff_head rx_q; struct sk_buff_head raw_q; struct sk_buff_head cmd_q; struct sk_buff *sent_cmd; struct sk_buff *recv_event; struct mutex req_lock; wait_queue_head_t req_wait_q; __u32 req_status; __u32 req_result; struct sk_buff *req_skb; struct sk_buff *req_rsp; void *smp_data; void *smp_bredr_data; struct discovery_state discovery; bool discovery_paused; int advertising_old_state; bool advertising_paused; struct notifier_block suspend_notifier; enum suspended_state suspend_state_next; enum suspended_state suspend_state; bool scanning_paused; bool suspended; u8 wake_reason; bdaddr_t wake_addr; u8 wake_addr_type; struct hci_conn_hash conn_hash; struct list_head mesh_pending; struct list_head mgmt_pending; struct list_head reject_list; struct list_head accept_list; struct list_head uuids; struct list_head link_keys; struct list_head long_term_keys; struct list_head identity_resolving_keys; struct list_head remote_oob_data; struct list_head le_accept_list; struct list_head le_resolv_list; struct list_head le_conn_params; struct list_head pend_le_conns; struct list_head pend_le_reports; struct list_head blocked_keys; struct list_head local_codecs; struct hci_dev_stats stat; atomic_t promisc; const char *hw_info; const char *fw_info; struct dentry *debugfs; struct hci_devcoredump dump; struct device dev; struct rfkill *rfkill; DECLARE_BITMAP(dev_flags, __HCI_NUM_FLAGS); hci_conn_flags_t conn_flags; __s8 adv_tx_power; __u8 adv_data[HCI_MAX_EXT_AD_LENGTH]; __u8 adv_data_len; __u8 scan_rsp_data[HCI_MAX_EXT_AD_LENGTH]; __u8 scan_rsp_data_len; __u8 per_adv_data[HCI_MAX_PER_AD_LENGTH]; __u8 per_adv_data_len; struct list_head adv_instances; unsigned int adv_instance_cnt; __u8 cur_adv_instance; __u16 adv_instance_timeout; struct delayed_work adv_instance_expire; struct idr adv_monitors_idr; unsigned int adv_monitors_cnt; __u8 irk[16]; __u32 rpa_timeout; struct delayed_work rpa_expired; bdaddr_t rpa; struct delayed_work mesh_send_done; enum { INTERLEAVE_SCAN_NONE, INTERLEAVE_SCAN_NO_FILTER, INTERLEAVE_SCAN_ALLOWLIST } interleave_scan_state; struct delayed_work interleave_scan; struct list_head monitored_devices; bool advmon_pend_notify; #if IS_ENABLED(CONFIG_BT_LEDS) struct led_trigger *power_led; #endif #if IS_ENABLED(CONFIG_BT_MSFTEXT) __u16 msft_opcode; void *msft_data; bool msft_curve_validity; #endif #if IS_ENABLED(CONFIG_BT_AOSPEXT) bool aosp_capable; bool aosp_quality_report; #endif int (*open)(struct hci_dev *hdev); int (*close)(struct hci_dev *hdev); int (*flush)(struct hci_dev *hdev); int (*setup)(struct hci_dev *hdev); int (*shutdown)(struct hci_dev *hdev); int (*send)(struct hci_dev *hdev, struct sk_buff *skb); void (*notify)(struct hci_dev *hdev, unsigned int evt); void (*hw_error)(struct hci_dev *hdev, u8 code); int (*post_init)(struct hci_dev *hdev); int (*set_diag)(struct hci_dev *hdev, bool enable); int (*set_bdaddr)(struct hci_dev *hdev, const bdaddr_t *bdaddr); void (*reset)(struct hci_dev *hdev); bool (*wakeup)(struct hci_dev *hdev); int (*set_quality_report)(struct hci_dev *hdev, bool enable); int (*get_data_path_id)(struct hci_dev *hdev, __u8 *data_path); int (*get_codec_config_data)(struct hci_dev *hdev, __u8 type, struct bt_codec *codec, __u8 *vnd_len, __u8 **vnd_data); u8 (*classify_pkt_type)(struct hci_dev *hdev, struct sk_buff *skb); }; #define HCI_PHY_HANDLE(handle) (handle & 0xff) enum conn_reasons { CONN_REASON_PAIR_DEVICE, CONN_REASON_L2CAP_CHAN, CONN_REASON_SCO_CONNECT, CONN_REASON_ISO_CONNECT, }; struct hci_conn { struct list_head list; atomic_t refcnt; bdaddr_t dst; __u8 dst_type; bdaddr_t src; __u8 src_type; bdaddr_t init_addr; __u8 init_addr_type; bdaddr_t resp_addr; __u8 resp_addr_type; __u8 adv_instance; __u16 handle; __u16 sync_handle; __u8 sid; __u16 state; __u16 mtu; __u8 mode; __u8 type; __u8 role; bool out; __u8 attempt; __u8 dev_class[3]; __u8 features[HCI_MAX_PAGES][8]; __u16 pkt_type; __u16 link_policy; __u8 key_type; __u8 auth_type; __u8 sec_level; __u8 pending_sec_level; __u8 pin_length; __u8 enc_key_size; __u8 io_capability; __u32 passkey_notify; __u8 passkey_entered; __u16 disc_timeout; __u16 conn_timeout; __u16 setting; __u16 auth_payload_timeout; __u16 le_conn_min_interval; __u16 le_conn_max_interval; __u16 le_conn_interval; __u16 le_conn_latency; __u16 le_supv_timeout; __u8 le_adv_data[HCI_MAX_EXT_AD_LENGTH]; __u8 le_adv_data_len; __u8 le_per_adv_data[HCI_MAX_PER_AD_TOT_LEN]; __u16 le_per_adv_data_len; __u16 le_per_adv_data_offset; __u8 le_adv_phy; __u8 le_adv_sec_phy; __u8 le_tx_phy; __u8 le_rx_phy; __s8 rssi; __s8 tx_power; __s8 max_tx_power; struct bt_iso_qos iso_qos; __u8 num_bis; __u8 bis[HCI_MAX_ISO_BIS]; unsigned long flags; enum conn_reasons conn_reason; __u8 abort_reason; __u32 clock; __u16 clock_accuracy; unsigned long conn_info_timestamp; __u8 remote_cap; __u8 remote_auth; __u8 remote_id; unsigned int sent; struct sk_buff_head data_q; struct list_head chan_list; struct delayed_work disc_work; struct delayed_work auto_accept_work; struct delayed_work idle_work; struct delayed_work le_conn_timeout; struct device dev; struct dentry *debugfs; struct hci_dev *hdev; void *l2cap_data; void *sco_data; void *iso_data; struct list_head link_list; struct hci_conn *parent; struct hci_link *link; struct bt_codec codec; void (*connect_cfm_cb) (struct hci_conn *conn, u8 status); void (*security_cfm_cb) (struct hci_conn *conn, u8 status); void (*disconn_cfm_cb) (struct hci_conn *conn, u8 reason); void (*cleanup)(struct hci_conn *conn); }; struct hci_link { struct list_head list; struct hci_conn *conn; }; struct hci_chan { struct list_head list; __u16 handle; struct hci_conn *conn; struct sk_buff_head data_q; unsigned int sent; __u8 state; }; struct hci_conn_params { struct list_head list; struct list_head action; bdaddr_t addr; u8 addr_type; u16 conn_min_interval; u16 conn_max_interval; u16 conn_latency; u16 supervision_timeout; enum { HCI_AUTO_CONN_DISABLED, HCI_AUTO_CONN_REPORT, HCI_AUTO_CONN_DIRECT, HCI_AUTO_CONN_ALWAYS, HCI_AUTO_CONN_LINK_LOSS, HCI_AUTO_CONN_EXPLICIT, } auto_connect; struct hci_conn *conn; bool explicit_connect; /* Accessed without hdev->lock: */ hci_conn_flags_t flags; u8 privacy_mode; }; extern struct list_head hci_dev_list; extern struct list_head hci_cb_list; extern rwlock_t hci_dev_list_lock; #define hci_dev_set_flag(hdev, nr) set_bit((nr), (hdev)->dev_flags) #define hci_dev_clear_flag(hdev, nr) clear_bit((nr), (hdev)->dev_flags) #define hci_dev_change_flag(hdev, nr) change_bit((nr), (hdev)->dev_flags) #define hci_dev_test_flag(hdev, nr) test_bit((nr), (hdev)->dev_flags) #define hci_dev_test_and_set_flag(hdev, nr) test_and_set_bit((nr), (hdev)->dev_flags) #define hci_dev_test_and_clear_flag(hdev, nr) test_and_clear_bit((nr), (hdev)->dev_flags) #define hci_dev_test_and_change_flag(hdev, nr) test_and_change_bit((nr), (hdev)->dev_flags) #define hci_dev_clear_volatile_flags(hdev) \ do { \ hci_dev_clear_flag(hdev, HCI_LE_SCAN); \ hci_dev_clear_flag(hdev, HCI_LE_ADV); \ hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);\ hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ); \ hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT); \ } while (0) #define hci_dev_le_state_simultaneous(hdev) \ (!test_bit(HCI_QUIRK_BROKEN_LE_STATES, &hdev->quirks) && \ (hdev->le_states[4] & 0x08) && /* Central */ \ (hdev->le_states[4] & 0x40) && /* Peripheral */ \ (hdev->le_states[3] & 0x10)) /* Simultaneous */ /* ----- HCI interface to upper protocols ----- */ int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr); int l2cap_disconn_ind(struct hci_conn *hcon); void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags); #if IS_ENABLED(CONFIG_BT_BREDR) int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags); void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb); #else static inline int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags) { return 0; } static inline void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb) { } #endif #if IS_ENABLED(CONFIG_BT_LE) int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags); void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags); #else static inline int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags) { return 0; } static inline void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags) { } #endif /* ----- Inquiry cache ----- */ #define INQUIRY_CACHE_AGE_MAX (HZ*30) /* 30 seconds */ #define INQUIRY_ENTRY_AGE_MAX (HZ*60) /* 60 seconds */ static inline void discovery_init(struct hci_dev *hdev) { hdev->discovery.state = DISCOVERY_STOPPED; INIT_LIST_HEAD(&hdev->discovery.all); INIT_LIST_HEAD(&hdev->discovery.unknown); INIT_LIST_HEAD(&hdev->discovery.resolve); hdev->discovery.report_invalid_rssi = true; hdev->discovery.rssi = HCI_RSSI_INVALID; } static inline void hci_discovery_filter_clear(struct hci_dev *hdev) { hdev->discovery.result_filtering = false; hdev->discovery.report_invalid_rssi = true; hdev->discovery.rssi = HCI_RSSI_INVALID; hdev->discovery.uuid_count = 0; kfree(hdev->discovery.uuids); hdev->discovery.uuids = NULL; } bool hci_discovery_active(struct hci_dev *hdev); void hci_discovery_set_state(struct hci_dev *hdev, int state); static inline int inquiry_cache_empty(struct hci_dev *hdev) { return list_empty(&hdev->discovery.all); } static inline long inquiry_cache_age(struct hci_dev *hdev) { struct discovery_state *c = &hdev->discovery; return jiffies - c->timestamp; } static inline long inquiry_entry_age(struct inquiry_entry *e) { return jiffies - e->timestamp; } struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr); struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev, bdaddr_t *bdaddr); struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev, bdaddr_t *bdaddr, int state); void hci_inquiry_cache_update_resolve(struct hci_dev *hdev, struct inquiry_entry *ie); u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data, bool name_known); void hci_inquiry_cache_flush(struct hci_dev *hdev); /* ----- HCI Connections ----- */ enum { HCI_CONN_AUTH_PEND, HCI_CONN_ENCRYPT_PEND, HCI_CONN_RSWITCH_PEND, HCI_CONN_MODE_CHANGE_PEND, HCI_CONN_SCO_SETUP_PEND, HCI_CONN_MGMT_CONNECTED, HCI_CONN_SSP_ENABLED, HCI_CONN_SC_ENABLED, HCI_CONN_AES_CCM, HCI_CONN_POWER_SAVE, HCI_CONN_FLUSH_KEY, HCI_CONN_ENCRYPT, HCI_CONN_AUTH, HCI_CONN_SECURE, HCI_CONN_FIPS, HCI_CONN_STK_ENCRYPT, HCI_CONN_AUTH_INITIATOR, HCI_CONN_DROP, HCI_CONN_CANCEL, HCI_CONN_PARAM_REMOVAL_PEND, HCI_CONN_NEW_LINK_KEY, HCI_CONN_SCANNING, HCI_CONN_AUTH_FAILURE, HCI_CONN_PER_ADV, HCI_CONN_BIG_CREATED, HCI_CONN_CREATE_CIS, HCI_CONN_CREATE_BIG_SYNC, HCI_CONN_BIG_SYNC, HCI_CONN_BIG_SYNC_FAILED, HCI_CONN_CREATE_PA_SYNC, HCI_CONN_PA_SYNC, HCI_CONN_PA_SYNC_FAILED, }; static inline bool hci_conn_ssp_enabled(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; return hci_dev_test_flag(hdev, HCI_SSP_ENABLED) && test_bit(HCI_CONN_SSP_ENABLED, &conn->flags); } static inline bool hci_conn_sc_enabled(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; return hci_dev_test_flag(hdev, HCI_SC_ENABLED) && test_bit(HCI_CONN_SC_ENABLED, &conn->flags); } static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c) { struct hci_conn_hash *h = &hdev->conn_hash; list_add_tail_rcu(&c->list, &h->list); switch (c->type) { case ACL_LINK: h->acl_num++; break; case LE_LINK: h->le_num++; if (c->role == HCI_ROLE_SLAVE) h->le_num_peripheral++; break; case SCO_LINK: case ESCO_LINK: h->sco_num++; break; case ISO_LINK: h->iso_num++; break; } } static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c) { struct hci_conn_hash *h = &hdev->conn_hash; list_del_rcu(&c->list); synchronize_rcu(); switch (c->type) { case ACL_LINK: h->acl_num--; break; case LE_LINK: h->le_num--; if (c->role == HCI_ROLE_SLAVE) h->le_num_peripheral--; break; case SCO_LINK: case ESCO_LINK: h->sco_num--; break; case ISO_LINK: h->iso_num--; break; } } static inline unsigned int hci_conn_num(struct hci_dev *hdev, __u8 type) { struct hci_conn_hash *h = &hdev->conn_hash; switch (type) { case ACL_LINK: return h->acl_num; case LE_LINK: return h->le_num; case SCO_LINK: case ESCO_LINK: return h->sco_num; case ISO_LINK: return h->iso_num; default: return 0; } } static inline unsigned int hci_conn_count(struct hci_dev *hdev) { struct hci_conn_hash *c = &hdev->conn_hash; return c->acl_num + c->sco_num + c->le_num + c->iso_num; } static inline bool hci_conn_valid(struct hci_dev *hdev, struct hci_conn *conn) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (c == conn) { rcu_read_unlock(); return true; } } rcu_read_unlock(); return false; } static inline __u8 hci_conn_lookup_type(struct hci_dev *hdev, __u16 handle) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; __u8 type = INVALID_LINK; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (c->handle == handle) { type = c->type; break; } } rcu_read_unlock(); return type; } static inline struct hci_conn *hci_conn_hash_lookup_bis(struct hci_dev *hdev, bdaddr_t *ba, __u8 bis) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (bacmp(&c->dst, ba) || c->type != ISO_LINK) continue; if (c->iso_qos.bcast.bis == bis) { rcu_read_unlock(); return c; } } rcu_read_unlock(); return NULL; } static inline struct hci_conn *hci_conn_hash_lookup_sid(struct hci_dev *hdev, __u8 sid, bdaddr_t *dst, __u8 dst_type) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (c->type != ISO_LINK || bacmp(&c->dst, dst) || c->dst_type != dst_type || c->sid != sid) continue; rcu_read_unlock(); return c; } rcu_read_unlock(); return NULL; } static inline struct hci_conn * hci_conn_hash_lookup_per_adv_bis(struct hci_dev *hdev, bdaddr_t *ba, __u8 big, __u8 bis) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (bacmp(&c->dst, ba) || c->type != ISO_LINK || !test_bit(HCI_CONN_PER_ADV, &c->flags)) continue; if (c->iso_qos.bcast.big == big && c->iso_qos.bcast.bis == bis) { rcu_read_unlock(); return c; } } rcu_read_unlock(); return NULL; } static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev, __u16 handle) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (c->handle == handle) { rcu_read_unlock(); return c; } } rcu_read_unlock(); return NULL; } static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev, __u8 type, bdaddr_t *ba) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (c->type == type && !bacmp(&c->dst, ba)) { rcu_read_unlock(); return c; } } rcu_read_unlock(); return NULL; } static inline struct hci_conn *hci_conn_hash_lookup_le(struct hci_dev *hdev, bdaddr_t *ba, __u8 ba_type) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (c->type != LE_LINK) continue; if (ba_type == c->dst_type && !bacmp(&c->dst, ba)) { rcu_read_unlock(); return c; } } rcu_read_unlock(); return NULL; } static inline struct hci_conn *hci_conn_hash_lookup_cis(struct hci_dev *hdev, bdaddr_t *ba, __u8 ba_type, __u8 cig, __u8 id) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (c->type != ISO_LINK || !bacmp(&c->dst, BDADDR_ANY)) continue; /* Match CIG ID if set */ if (cig != c->iso_qos.ucast.cig) continue; /* Match CIS ID if set */ if (id != c->iso_qos.ucast.cis) continue; /* Match destination address if set */ if (!ba || (ba_type == c->dst_type && !bacmp(&c->dst, ba))) { rcu_read_unlock(); return c; } } rcu_read_unlock(); return NULL; } static inline struct hci_conn *hci_conn_hash_lookup_cig(struct hci_dev *hdev, __u8 handle) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (c->type != ISO_LINK || !bacmp(&c->dst, BDADDR_ANY)) continue; if (handle == c->iso_qos.ucast.cig) { rcu_read_unlock(); return c; } } rcu_read_unlock(); return NULL; } static inline struct hci_conn *hci_conn_hash_lookup_big(struct hci_dev *hdev, __u8 handle) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (c->type != ISO_LINK) continue; /* An ISO_LINK hcon with BDADDR_ANY as destination * address is a Broadcast connection. A Broadcast * slave connection is associated with a PA train, * so the sync_handle can be used to differentiate * from unicast. */ if (bacmp(&c->dst, BDADDR_ANY) && c->sync_handle == HCI_SYNC_HANDLE_INVALID) continue; if (handle == c->iso_qos.bcast.big) { rcu_read_unlock(); return c; } } rcu_read_unlock(); return NULL; } static inline struct hci_conn * hci_conn_hash_lookup_big_sync_pend(struct hci_dev *hdev, __u8 handle, __u8 num_bis) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (c->type != ISO_LINK) continue; if (handle == c->iso_qos.bcast.big && num_bis == c->num_bis) { rcu_read_unlock(); return c; } } rcu_read_unlock(); return NULL; } static inline struct hci_conn * hci_conn_hash_lookup_big_state(struct hci_dev *hdev, __u8 handle, __u16 state) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (bacmp(&c->dst, BDADDR_ANY) || c->type != ISO_LINK || c->state != state) continue; if (handle == c->iso_qos.bcast.big) { rcu_read_unlock(); return c; } } rcu_read_unlock(); return NULL; } static inline struct hci_conn * hci_conn_hash_lookup_pa_sync_big_handle(struct hci_dev *hdev, __u8 big) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (c->type != ISO_LINK || !test_bit(HCI_CONN_PA_SYNC, &c->flags)) continue; if (c->iso_qos.bcast.big == big) { rcu_read_unlock(); return c; } } rcu_read_unlock(); return NULL; } static inline struct hci_conn * hci_conn_hash_lookup_pa_sync_handle(struct hci_dev *hdev, __u16 sync_handle) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (c->type != ISO_LINK) continue; /* Ignore the listen hcon, we are looking * for the child hcon that was created as * a result of the PA sync established event. */ if (c->state == BT_LISTEN) continue; if (c->sync_handle == sync_handle) { rcu_read_unlock(); return c; } } rcu_read_unlock(); return NULL; } static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev, __u8 type, __u16 state) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (c->type == type && c->state == state) { rcu_read_unlock(); return c; } } rcu_read_unlock(); return NULL; } typedef void (*hci_conn_func_t)(struct hci_conn *conn, void *data); static inline void hci_conn_hash_list_state(struct hci_dev *hdev, hci_conn_func_t func, __u8 type, __u16 state, void *data) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; if (!func) return; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (c->type == type && c->state == state) func(c, data); } rcu_read_unlock(); } static inline void hci_conn_hash_list_flag(struct hci_dev *hdev, hci_conn_func_t func, __u8 type, __u8 flag, void *data) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; if (!func) return; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (c->type == type && test_bit(flag, &c->flags)) func(c, data); } rcu_read_unlock(); } static inline struct hci_conn *hci_lookup_le_connect(struct hci_dev *hdev) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (c->type == LE_LINK && c->state == BT_CONNECT && !test_bit(HCI_CONN_SCANNING, &c->flags)) { rcu_read_unlock(); return c; } } rcu_read_unlock(); return NULL; } /* Returns true if an le connection is in the scanning state */ static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (c->type == LE_LINK && c->state == BT_CONNECT && test_bit(HCI_CONN_SCANNING, &c->flags)) { rcu_read_unlock(); return true; } } rcu_read_unlock(); return false; } int hci_disconnect(struct hci_conn *conn, __u8 reason); bool hci_setup_sync(struct hci_conn *conn, __u16 handle); void hci_sco_setup(struct hci_conn *conn, __u8 status); bool hci_iso_setup_path(struct hci_conn *conn); int hci_le_create_cis_pending(struct hci_dev *hdev); int hci_pa_create_sync_pending(struct hci_dev *hdev); int hci_le_big_create_sync_pending(struct hci_dev *hdev); int hci_conn_check_create_cis(struct hci_conn *conn); struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst, u8 role, u16 handle); struct hci_conn *hci_conn_add_unset(struct hci_dev *hdev, int type, bdaddr_t *dst, u8 role); void hci_conn_del(struct hci_conn *conn); void hci_conn_hash_flush(struct hci_dev *hdev); struct hci_chan *hci_chan_create(struct hci_conn *conn); void hci_chan_del(struct hci_chan *chan); void hci_chan_list_flush(struct hci_conn *conn); struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle); struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst, u8 dst_type, u8 sec_level, u16 conn_timeout, enum conn_reasons conn_reason); struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, u8 dst_type, bool dst_resolved, u8 sec_level, u16 conn_timeout, u8 role, u8 phy, u8 sec_phy); void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status); struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst, u8 sec_level, u8 auth_type, enum conn_reasons conn_reason, u16 timeout); struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst, __u16 setting, struct bt_codec *codec, u16 timeout); struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type, struct bt_iso_qos *qos); struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst, struct bt_iso_qos *qos, __u8 base_len, __u8 *base); struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type, struct bt_iso_qos *qos); struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type, struct bt_iso_qos *qos, __u8 data_len, __u8 *data); struct hci_conn *hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type, __u8 sid, struct bt_iso_qos *qos); int hci_le_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon, struct bt_iso_qos *qos, __u16 sync_handle, __u8 num_bis, __u8 bis[]); int hci_conn_check_link_mode(struct hci_conn *conn); int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level); int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type, bool initiator); int hci_conn_switch_role(struct hci_conn *conn, __u8 role); void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active); void hci_conn_failed(struct hci_conn *conn, u8 status); u8 hci_conn_set_handle(struct hci_conn *conn, u16 handle); /* * hci_conn_get() and hci_conn_put() are used to control the life-time of an * "hci_conn" object. They do not guarantee that the hci_conn object is running, * working or anything else. They just guarantee that the object is available * and can be dereferenced. So you can use its locks, local variables and any * other constant data. * Before accessing runtime data, you _must_ lock the object and then check that * it is still running. As soon as you release the locks, the connection might * get dropped, though. * * On the other hand, hci_conn_hold() and hci_conn_drop() are used to control * how long the underlying connection is held. So every channel that runs on the * hci_conn object calls this to prevent the connection from disappearing. As * long as you hold a device, you must also guarantee that you have a valid * reference to the device via hci_conn_get() (or the initial reference from * hci_conn_add()). * The hold()/drop() ref-count is known to drop below 0 sometimes, which doesn't * break because nobody cares for that. But this means, we cannot use * _get()/_drop() in it, but require the caller to have a valid ref (FIXME). */ static inline struct hci_conn *hci_conn_get(struct hci_conn *conn) { get_device(&conn->dev); return conn; } static inline void hci_conn_put(struct hci_conn *conn) { put_device(&conn->dev); } static inline struct hci_conn *hci_conn_hold(struct hci_conn *conn) { BT_DBG("hcon %p orig refcnt %d", conn, atomic_read(&conn->refcnt)); atomic_inc(&conn->refcnt); cancel_delayed_work(&conn->disc_work); return conn; } static inline void hci_conn_drop(struct hci_conn *conn) { BT_DBG("hcon %p orig refcnt %d", conn, atomic_read(&conn->refcnt)); if (atomic_dec_and_test(&conn->refcnt)) { unsigned long timeo; switch (conn->type) { case ACL_LINK: case LE_LINK: cancel_delayed_work(&conn->idle_work); if (conn->state == BT_CONNECTED) { timeo = conn->disc_timeout; if (!conn->out) timeo *= 2; } else { timeo = 0; } break; default: timeo = 0; break; } cancel_delayed_work(&conn->disc_work); queue_delayed_work(conn->hdev->workqueue, &conn->disc_work, timeo); } } /* ----- HCI Devices ----- */ static inline void hci_dev_put(struct hci_dev *d) { BT_DBG("%s orig refcnt %d", d->name, kref_read(&d->dev.kobj.kref)); put_device(&d->dev); } static inline struct hci_dev *hci_dev_hold(struct hci_dev *d) { BT_DBG("%s orig refcnt %d", d->name, kref_read(&d->dev.kobj.kref)); get_device(&d->dev); return d; } #define hci_dev_lock(d) mutex_lock(&d->lock) #define hci_dev_unlock(d) mutex_unlock(&d->lock) #define to_hci_dev(d) container_of(d, struct hci_dev, dev) #define to_hci_conn(c) container_of(c, struct hci_conn, dev) static inline void *hci_get_drvdata(struct hci_dev *hdev) { return dev_get_drvdata(&hdev->dev); } static inline void hci_set_drvdata(struct hci_dev *hdev, void *data) { dev_set_drvdata(&hdev->dev, data); } static inline void *hci_get_priv(struct hci_dev *hdev) { return (char *)hdev + sizeof(*hdev); } struct hci_dev *hci_dev_get(int index); struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, u8 src_type); struct hci_dev *hci_alloc_dev_priv(int sizeof_priv); static inline struct hci_dev *hci_alloc_dev(void) { return hci_alloc_dev_priv(0); } void hci_free_dev(struct hci_dev *hdev); int hci_register_dev(struct hci_dev *hdev); void hci_unregister_dev(struct hci_dev *hdev); void hci_release_dev(struct hci_dev *hdev); int hci_register_suspend_notifier(struct hci_dev *hdev); int hci_unregister_suspend_notifier(struct hci_dev *hdev); int hci_suspend_dev(struct hci_dev *hdev); int hci_resume_dev(struct hci_dev *hdev); int hci_reset_dev(struct hci_dev *hdev); int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb); int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb); __printf(2, 3) void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...); __printf(2, 3) void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...); static inline void hci_set_msft_opcode(struct hci_dev *hdev, __u16 opcode) { #if IS_ENABLED(CONFIG_BT_MSFTEXT) hdev->msft_opcode = opcode; #endif } static inline void hci_set_aosp_capable(struct hci_dev *hdev) { #if IS_ENABLED(CONFIG_BT_AOSPEXT) hdev->aosp_capable = true; #endif } static inline void hci_devcd_setup(struct hci_dev *hdev) { #ifdef CONFIG_DEV_COREDUMP INIT_WORK(&hdev->dump.dump_rx, hci_devcd_rx); INIT_DELAYED_WORK(&hdev->dump.dump_timeout, hci_devcd_timeout); skb_queue_head_init(&hdev->dump.dump_q); #endif } int hci_dev_open(__u16 dev); int hci_dev_close(__u16 dev); int hci_dev_do_close(struct hci_dev *hdev); int hci_dev_reset(__u16 dev); int hci_dev_reset_stat(__u16 dev); int hci_dev_cmd(unsigned int cmd, void __user *arg); int hci_get_dev_list(void __user *arg); int hci_get_dev_info(void __user *arg); int hci_get_conn_list(void __user *arg); int hci_get_conn_info(struct hci_dev *hdev, void __user *arg); int hci_get_auth_info(struct hci_dev *hdev, void __user *arg); int hci_inquiry(void __user *arg); struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *list, bdaddr_t *bdaddr, u8 type); struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk( struct list_head *list, bdaddr_t *bdaddr, u8 type); struct bdaddr_list_with_flags * hci_bdaddr_list_lookup_with_flags(struct list_head *list, bdaddr_t *bdaddr, u8 type); int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type); int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr, u8 type, u8 *peer_irk, u8 *local_irk); int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr, u8 type, u32 flags); int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type); int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr, u8 type); void hci_bdaddr_list_clear(struct list_head *list); struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type); struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type); void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type); void hci_conn_params_clear_disabled(struct hci_dev *hdev); void hci_conn_params_free(struct hci_conn_params *param); void hci_pend_le_list_del_init(struct hci_conn_params *param); void hci_pend_le_list_add(struct hci_conn_params *param, struct list_head *list); struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list, bdaddr_t *addr, u8 addr_type); void hci_uuids_clear(struct hci_dev *hdev); void hci_link_keys_clear(struct hci_dev *hdev); struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr); struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len, bool *persistent); struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type, u8 authenticated, u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand); struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 role); int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type); void hci_smp_ltks_clear(struct hci_dev *hdev); int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr); struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa); struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type); struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 val[16], bdaddr_t *rpa); void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type); bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16]); void hci_blocked_keys_clear(struct hci_dev *hdev); void hci_smp_irks_clear(struct hci_dev *hdev); bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type); void hci_remote_oob_data_clear(struct hci_dev *hdev); struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type); int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type, u8 *hash192, u8 *rand192, u8 *hash256, u8 *rand256); int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type); void hci_adv_instances_clear(struct hci_dev *hdev); struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance); struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance); struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags, u16 adv_data_len, u8 *adv_data, u16 scan_rsp_len, u8 *scan_rsp_data, u16 timeout, u16 duration, s8 tx_power, u32 min_interval, u32 max_interval, u8 mesh_handle); struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance, u32 flags, u8 data_len, u8 *data, u32 min_interval, u32 max_interval); int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance, u16 adv_data_len, u8 *adv_data, u16 scan_rsp_len, u8 *scan_rsp_data); int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance); void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired); u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance); bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance); void hci_adv_monitors_clear(struct hci_dev *hdev); void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor); int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor); int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle); int hci_remove_all_adv_monitor(struct hci_dev *hdev); bool hci_is_adv_monitoring(struct hci_dev *hdev); int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev); void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb); void hci_init_sysfs(struct hci_dev *hdev); void hci_conn_init_sysfs(struct hci_conn *conn); void hci_conn_add_sysfs(struct hci_conn *conn); void hci_conn_del_sysfs(struct hci_conn *conn); #define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->dev.parent = (pdev)) #define GET_HCIDEV_DEV(hdev) ((hdev)->dev.parent) /* ----- LMP capabilities ----- */ #define lmp_encrypt_capable(dev) ((dev)->features[0][0] & LMP_ENCRYPT) #define lmp_rswitch_capable(dev) ((dev)->features[0][0] & LMP_RSWITCH) #define lmp_hold_capable(dev) ((dev)->features[0][0] & LMP_HOLD) #define lmp_sniff_capable(dev) ((dev)->features[0][0] & LMP_SNIFF) #define lmp_park_capable(dev) ((dev)->features[0][1] & LMP_PARK) #define lmp_inq_rssi_capable(dev) ((dev)->features[0][3] & LMP_RSSI_INQ) #define lmp_esco_capable(dev) ((dev)->features[0][3] & LMP_ESCO) #define lmp_bredr_capable(dev) (!((dev)->features[0][4] & LMP_NO_BREDR)) #define lmp_le_capable(dev) ((dev)->features[0][4] & LMP_LE) #define lmp_sniffsubr_capable(dev) ((dev)->features[0][5] & LMP_SNIFF_SUBR) #define lmp_pause_enc_capable(dev) ((dev)->features[0][5] & LMP_PAUSE_ENC) #define lmp_esco_2m_capable(dev) ((dev)->features[0][5] & LMP_EDR_ESCO_2M) #define lmp_ext_inq_capable(dev) ((dev)->features[0][6] & LMP_EXT_INQ) #define lmp_le_br_capable(dev) (!!((dev)->features[0][6] & LMP_SIMUL_LE_BR)) #define lmp_ssp_capable(dev) ((dev)->features[0][6] & LMP_SIMPLE_PAIR) #define lmp_no_flush_capable(dev) ((dev)->features[0][6] & LMP_NO_FLUSH) #define lmp_lsto_capable(dev) ((dev)->features[0][7] & LMP_LSTO) #define lmp_inq_tx_pwr_capable(dev) ((dev)->features[0][7] & LMP_INQ_TX_PWR) #define lmp_ext_feat_capable(dev) ((dev)->features[0][7] & LMP_EXTFEATURES) #define lmp_transp_capable(dev) ((dev)->features[0][2] & LMP_TRANSPARENT) #define lmp_edr_2m_capable(dev) ((dev)->features[0][3] & LMP_EDR_2M) #define lmp_edr_3m_capable(dev) ((dev)->features[0][3] & LMP_EDR_3M) #define lmp_edr_3slot_capable(dev) ((dev)->features[0][4] & LMP_EDR_3SLOT) #define lmp_edr_5slot_capable(dev) ((dev)->features[0][5] & LMP_EDR_5SLOT) /* ----- Extended LMP capabilities ----- */ #define lmp_cpb_central_capable(dev) ((dev)->features[2][0] & LMP_CPB_CENTRAL) #define lmp_cpb_peripheral_capable(dev) ((dev)->features[2][0] & LMP_CPB_PERIPHERAL) #define lmp_sync_train_capable(dev) ((dev)->features[2][0] & LMP_SYNC_TRAIN) #define lmp_sync_scan_capable(dev) ((dev)->features[2][0] & LMP_SYNC_SCAN) #define lmp_sc_capable(dev) ((dev)->features[2][1] & LMP_SC) #define lmp_ping_capable(dev) ((dev)->features[2][1] & LMP_PING) /* ----- Host capabilities ----- */ #define lmp_host_ssp_capable(dev) ((dev)->features[1][0] & LMP_HOST_SSP) #define lmp_host_sc_capable(dev) ((dev)->features[1][0] & LMP_HOST_SC) #define lmp_host_le_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE)) #define lmp_host_le_br_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE_BREDR)) #define hdev_is_powered(dev) (test_bit(HCI_UP, &(dev)->flags) && \ !hci_dev_test_flag(dev, HCI_AUTO_OFF)) #define bredr_sc_enabled(dev) (lmp_sc_capable(dev) && \ hci_dev_test_flag(dev, HCI_SC_ENABLED)) #define rpa_valid(dev) (bacmp(&dev->rpa, BDADDR_ANY) && \ !hci_dev_test_flag(dev, HCI_RPA_EXPIRED)) #define adv_rpa_valid(adv) (bacmp(&adv->random_addr, BDADDR_ANY) && \ !adv->rpa_expired) #define scan_1m(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_1M) || \ ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_1M)) #define le_2m_capable(dev) (((dev)->le_features[1] & HCI_LE_PHY_2M)) #define scan_2m(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_2M) || \ ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_2M)) #define le_coded_capable(dev) (((dev)->le_features[1] & HCI_LE_PHY_CODED) && \ !test_bit(HCI_QUIRK_BROKEN_LE_CODED, \ &(dev)->quirks)) #define scan_coded(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_CODED) || \ ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_CODED)) #define ll_privacy_capable(dev) ((dev)->le_features[0] & HCI_LE_LL_PRIVACY) #define privacy_mode_capable(dev) (ll_privacy_capable(dev) && \ (hdev->commands[39] & 0x04)) #define read_key_size_capable(dev) \ ((dev)->commands[20] & 0x10 && \ !test_bit(HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE, &hdev->quirks)) /* Use enhanced synchronous connection if command is supported and its quirk * has not been set. */ #define enhanced_sync_conn_capable(dev) \ (((dev)->commands[29] & 0x08) && \ !test_bit(HCI_QUIRK_BROKEN_ENHANCED_SETUP_SYNC_CONN, &(dev)->quirks)) /* Use ext scanning if set ext scan param and ext scan enable is supported */ #define use_ext_scan(dev) (((dev)->commands[37] & 0x20) && \ ((dev)->commands[37] & 0x40) && \ !test_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &(dev)->quirks)) /* Use ext create connection if command is supported */ #define use_ext_conn(dev) (((dev)->commands[37] & 0x80) && \ !test_bit(HCI_QUIRK_BROKEN_EXT_CREATE_CONN, &(dev)->quirks)) /* Extended advertising support */ #define ext_adv_capable(dev) (((dev)->le_features[1] & HCI_LE_EXT_ADV)) /* Maximum advertising length */ #define max_adv_len(dev) \ (ext_adv_capable(dev) ? HCI_MAX_EXT_AD_LENGTH : HCI_MAX_AD_LENGTH) /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 1789: * * C24: Mandatory if the LE Controller supports Connection State and either * LE Feature (LL Privacy) or LE Feature (Extended Advertising) is supported */ #define use_enhanced_conn_complete(dev) ((ll_privacy_capable(dev) || \ ext_adv_capable(dev)) && \ !test_bit(HCI_QUIRK_BROKEN_EXT_CREATE_CONN, \ &(dev)->quirks)) /* Periodic advertising support */ #define per_adv_capable(dev) (((dev)->le_features[1] & HCI_LE_PERIODIC_ADV)) /* CIS Master/Slave and BIS support */ #define iso_capable(dev) (cis_capable(dev) || bis_capable(dev)) #define cis_capable(dev) \ (cis_central_capable(dev) || cis_peripheral_capable(dev)) #define cis_central_capable(dev) \ ((dev)->le_features[3] & HCI_LE_CIS_CENTRAL) #define cis_peripheral_capable(dev) \ ((dev)->le_features[3] & HCI_LE_CIS_PERIPHERAL) #define bis_capable(dev) ((dev)->le_features[3] & HCI_LE_ISO_BROADCASTER) #define sync_recv_capable(dev) ((dev)->le_features[3] & HCI_LE_ISO_SYNC_RECEIVER) #define mws_transport_config_capable(dev) (((dev)->commands[30] & 0x08) && \ (!test_bit(HCI_QUIRK_BROKEN_MWS_TRANSPORT_CONFIG, &(dev)->quirks))) /* ----- HCI protocols ----- */ #define HCI_PROTO_DEFER 0x01 static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type, __u8 *flags) { switch (type) { case ACL_LINK: return l2cap_connect_ind(hdev, bdaddr); case SCO_LINK: case ESCO_LINK: return sco_connect_ind(hdev, bdaddr, flags); case ISO_LINK: return iso_connect_ind(hdev, bdaddr, flags); default: BT_ERR("unknown link type %d", type); return -EINVAL; } } static inline int hci_proto_disconn_ind(struct hci_conn *conn) { if (conn->type != ACL_LINK && conn->type != LE_LINK) return HCI_ERROR_REMOTE_USER_TERM; return l2cap_disconn_ind(conn); } /* ----- HCI callbacks ----- */ struct hci_cb { struct list_head list; char *name; bool (*match) (struct hci_conn *conn); void (*connect_cfm) (struct hci_conn *conn, __u8 status); void (*disconn_cfm) (struct hci_conn *conn, __u8 status); void (*security_cfm) (struct hci_conn *conn, __u8 status, __u8 encrypt); void (*key_change_cfm) (struct hci_conn *conn, __u8 status); void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role); }; static inline void hci_cb_lookup(struct hci_conn *conn, struct list_head *list) { struct hci_cb *cb, *cpy; rcu_read_lock(); list_for_each_entry_rcu(cb, &hci_cb_list, list) { if (cb->match && cb->match(conn)) { cpy = kmalloc(sizeof(*cpy), GFP_ATOMIC); if (!cpy) break; *cpy = *cb; INIT_LIST_HEAD(&cpy->list); list_add_rcu(&cpy->list, list); } } rcu_read_unlock(); } static inline void hci_connect_cfm(struct hci_conn *conn, __u8 status) { struct list_head list; struct hci_cb *cb, *tmp; INIT_LIST_HEAD(&list); hci_cb_lookup(conn, &list); list_for_each_entry_safe(cb, tmp, &list, list) { if (cb->connect_cfm) cb->connect_cfm(conn, status); kfree(cb); } if (conn->connect_cfm_cb) conn->connect_cfm_cb(conn, status); } static inline void hci_disconn_cfm(struct hci_conn *conn, __u8 reason) { struct list_head list; struct hci_cb *cb, *tmp; INIT_LIST_HEAD(&list); hci_cb_lookup(conn, &list); list_for_each_entry_safe(cb, tmp, &list, list) { if (cb->disconn_cfm) cb->disconn_cfm(conn, reason); kfree(cb); } if (conn->disconn_cfm_cb) conn->disconn_cfm_cb(conn, reason); } static inline void hci_security_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt) { struct list_head list; struct hci_cb *cb, *tmp; INIT_LIST_HEAD(&list); hci_cb_lookup(conn, &list); list_for_each_entry_safe(cb, tmp, &list, list) { if (cb->security_cfm) cb->security_cfm(conn, status, encrypt); kfree(cb); } if (conn->security_cfm_cb) conn->security_cfm_cb(conn, status); } static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status) { __u8 encrypt; if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) return; encrypt = test_bit(HCI_CONN_ENCRYPT, &conn->flags) ? 0x01 : 0x00; hci_security_cfm(conn, status, encrypt); } static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status) { __u8 encrypt; if (conn->state == BT_CONFIG) { if (!status) conn->state = BT_CONNECTED; hci_connect_cfm(conn, status); hci_conn_drop(conn); return; } if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags)) encrypt = 0x00; else if (test_bit(HCI_CONN_AES_CCM, &conn->flags)) encrypt = 0x02; else encrypt = 0x01; if (!status) { if (conn->sec_level == BT_SECURITY_SDP) conn->sec_level = BT_SECURITY_LOW; if (conn->pending_sec_level > conn->sec_level) conn->sec_level = conn->pending_sec_level; } hci_security_cfm(conn, status, encrypt); } static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status) { struct list_head list; struct hci_cb *cb, *tmp; INIT_LIST_HEAD(&list); hci_cb_lookup(conn, &list); list_for_each_entry_safe(cb, tmp, &list, list) { if (cb->key_change_cfm) cb->key_change_cfm(conn, status); kfree(cb); } } static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, __u8 role) { struct list_head list; struct hci_cb *cb, *tmp; INIT_LIST_HEAD(&list); hci_cb_lookup(conn, &list); list_for_each_entry_safe(cb, tmp, &list, list) { if (cb->role_switch_cfm) cb->role_switch_cfm(conn, status, role); kfree(cb); } } static inline bool hci_bdaddr_is_rpa(bdaddr_t *bdaddr, u8 addr_type) { if (addr_type != ADDR_LE_DEV_RANDOM) return false; if ((bdaddr->b[5] & 0xc0) == 0x40) return true; return false; } static inline bool hci_is_identity_address(bdaddr_t *addr, u8 addr_type) { if (addr_type == ADDR_LE_DEV_PUBLIC) return true; /* Check for Random Static address type */ if ((addr->b[5] & 0xc0) == 0xc0) return true; return false; } static inline struct smp_irk *hci_get_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type) { if (!hci_bdaddr_is_rpa(bdaddr, addr_type)) return NULL; return hci_find_irk_by_rpa(hdev, bdaddr); } static inline int hci_check_conn_params(u16 min, u16 max, u16 latency, u16 to_multiplier) { u16 max_latency; if (min > max) { BT_WARN("min %d > max %d", min, max); return -EINVAL; } if (min < 6) { BT_WARN("min %d < 6", min); return -EINVAL; } if (max > 3200) { BT_WARN("max %d > 3200", max); return -EINVAL; } if (to_multiplier < 10) { BT_WARN("to_multiplier %d < 10", to_multiplier); return -EINVAL; } if (to_multiplier > 3200) { BT_WARN("to_multiplier %d > 3200", to_multiplier); return -EINVAL; } if (max >= to_multiplier * 8) { BT_WARN("max %d >= to_multiplier %d * 8", max, to_multiplier); return -EINVAL; } max_latency = (to_multiplier * 4 / max) - 1; if (latency > 499) { BT_WARN("latency %d > 499", latency); return -EINVAL; } if (latency > max_latency) { BT_WARN("latency %d > max_latency %d", latency, max_latency); return -EINVAL; } return 0; } int hci_register_cb(struct hci_cb *hcb); int hci_unregister_cb(struct hci_cb *hcb); int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen, const void *param); int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, const void *param); void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags); void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb); void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb); void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode); void *hci_recv_event_data(struct hci_dev *hdev, __u8 event); u32 hci_conn_get_phy(struct hci_conn *conn); /* ----- HCI Sockets ----- */ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb); void hci_send_to_channel(unsigned short channel, struct sk_buff *skb, int flag, struct sock *skip_sk); void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb); void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event, void *data, u16 data_len, ktime_t tstamp, int flag, struct sock *skip_sk); void hci_sock_dev_event(struct hci_dev *hdev, int event); #define HCI_MGMT_VAR_LEN BIT(0) #define HCI_MGMT_NO_HDEV BIT(1) #define HCI_MGMT_UNTRUSTED BIT(2) #define HCI_MGMT_UNCONFIGURED BIT(3) #define HCI_MGMT_HDEV_OPTIONAL BIT(4) struct hci_mgmt_handler { int (*func) (struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len); size_t data_len; unsigned long flags; }; struct hci_mgmt_chan { struct list_head list; unsigned short channel; size_t handler_count; const struct hci_mgmt_handler *handlers; void (*hdev_init) (struct sock *sk, struct hci_dev *hdev); }; int hci_mgmt_chan_register(struct hci_mgmt_chan *c); void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c); /* Management interface */ #define DISCOV_TYPE_BREDR (BIT(BDADDR_BREDR)) #define DISCOV_TYPE_LE (BIT(BDADDR_LE_PUBLIC) | \ BIT(BDADDR_LE_RANDOM)) #define DISCOV_TYPE_INTERLEAVED (BIT(BDADDR_BREDR) | \ BIT(BDADDR_LE_PUBLIC) | \ BIT(BDADDR_LE_RANDOM)) /* These LE scan and inquiry parameters were chosen according to LE General * Discovery Procedure specification. */ #define DISCOV_LE_SCAN_WIN 0x0012 /* 11.25 msec */ #define DISCOV_LE_SCAN_INT 0x0012 /* 11.25 msec */ #define DISCOV_LE_SCAN_INT_FAST 0x0060 /* 60 msec */ #define DISCOV_LE_SCAN_WIN_FAST 0x0030 /* 30 msec */ #define DISCOV_LE_SCAN_INT_CONN 0x0060 /* 60 msec */ #define DISCOV_LE_SCAN_WIN_CONN 0x0060 /* 60 msec */ #define DISCOV_LE_SCAN_INT_SLOW1 0x0800 /* 1.28 sec */ #define DISCOV_LE_SCAN_WIN_SLOW1 0x0012 /* 11.25 msec */ #define DISCOV_LE_SCAN_INT_SLOW2 0x1000 /* 2.56 sec */ #define DISCOV_LE_SCAN_WIN_SLOW2 0x0024 /* 22.5 msec */ #define DISCOV_CODED_SCAN_INT_FAST 0x0120 /* 180 msec */ #define DISCOV_CODED_SCAN_WIN_FAST 0x0090 /* 90 msec */ #define DISCOV_CODED_SCAN_INT_SLOW1 0x1800 /* 3.84 sec */ #define DISCOV_CODED_SCAN_WIN_SLOW1 0x0036 /* 33.75 msec */ #define DISCOV_CODED_SCAN_INT_SLOW2 0x3000 /* 7.68 sec */ #define DISCOV_CODED_SCAN_WIN_SLOW2 0x006c /* 67.5 msec */ #define DISCOV_LE_TIMEOUT 10240 /* msec */ #define DISCOV_INTERLEAVED_TIMEOUT 5120 /* msec */ #define DISCOV_INTERLEAVED_INQUIRY_LEN 0x04 #define DISCOV_BREDR_INQUIRY_LEN 0x08 #define DISCOV_LE_RESTART_DELAY msecs_to_jiffies(200) /* msec */ #define DISCOV_LE_FAST_ADV_INT_MIN 0x00A0 /* 100 msec */ #define DISCOV_LE_FAST_ADV_INT_MAX 0x00F0 /* 150 msec */ #define DISCOV_LE_PER_ADV_INT_MIN 0x00A0 /* 200 msec */ #define DISCOV_LE_PER_ADV_INT_MAX 0x00A0 /* 200 msec */ #define DISCOV_LE_ADV_MESH_MIN 0x00A0 /* 100 msec */ #define DISCOV_LE_ADV_MESH_MAX 0x00A0 /* 100 msec */ #define INTERVAL_TO_MS(x) (((x) * 10) / 0x10) #define NAME_RESOLVE_DURATION msecs_to_jiffies(10240) /* 10.24 sec */ void mgmt_fill_version_info(void *ver); int mgmt_new_settings(struct hci_dev *hdev); void mgmt_index_added(struct hci_dev *hdev); void mgmt_index_removed(struct hci_dev *hdev); void mgmt_set_powered_failed(struct hci_dev *hdev, int err); void mgmt_power_on(struct hci_dev *hdev, int err); void __mgmt_power_off(struct hci_dev *hdev); void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, bool persistent); void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn, u8 *name, u8 name_len); void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 reason, bool mgmt_connected); void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 status); void mgmt_connect_failed(struct hci_dev *hdev, struct hci_conn *conn, u8 status); void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure); void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status); void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status); int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u32 value, u8 confirm_hint); int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 status); int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 status); int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type); int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 status); int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 status); int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u32 passkey, u8 entered); void mgmt_auth_failed(struct hci_conn *conn, u8 status); void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status); void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class, u8 status); void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status); void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status); void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status); void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 *dev_class, s8 rssi, u32 flags, u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len, u64 instant); void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, s8 rssi, u8 *name, u8 name_len); void mgmt_discovering(struct hci_dev *hdev, u8 discovering); void mgmt_suspending(struct hci_dev *hdev, u8 state); void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr, u8 addr_type); bool mgmt_powering_down(struct hci_dev *hdev); void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent); void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent); void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk, bool persistent); void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type, u8 store_hint, u16 min_interval, u16 max_interval, u16 latency, u16 timeout); void mgmt_smp_complete(struct hci_conn *conn, bool complete); bool mgmt_get_connectable(struct hci_dev *hdev); u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev); void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance); void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev, u8 instance); void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle); int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip); void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle, bdaddr_t *bdaddr, u8 addr_type); int hci_abort_conn(struct hci_conn *conn, u8 reason); u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency, u16 to_multiplier); void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand, __u8 ltk[16], __u8 key_size); void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *bdaddr_type); #define SCO_AIRMODE_MASK 0x0003 #define SCO_AIRMODE_CVSD 0x0000 #define SCO_AIRMODE_TRANSP 0x0003 #define LOCAL_CODEC_ACL_MASK BIT(0) #define LOCAL_CODEC_SCO_MASK BIT(1) #define TRANSPORT_TYPE_MAX 0x04 #endif /* __HCI_CORE_H */
17 13 16 15 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 /* * linux/fs/nls/nls_ascii.c * * Charset ascii translation tables. * Generated automatically from the Unicode and charset * tables from the Unicode Organization (www.unicode.org). * The Unicode to charset table has only exact mappings. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/nls.h> #include <linux/errno.h> static const wchar_t charset2uni[256] = { /* 0x00*/ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f, /* 0x10*/ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f, /* 0x20*/ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f, /* 0x30*/ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f, /* 0x40*/ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f, /* 0x50*/ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f, /* 0x60*/ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f, /* 0x70*/ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f, }; static const unsigned char page00[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ }; static const unsigned char *const page_uni2charset[256] = { page00, }; static const unsigned char charset2lower[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */ 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ }; static const unsigned char charset2upper[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */ 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ }; static int uni2char(wchar_t uni, unsigned char *out, int boundlen) { const unsigned char *uni2charset; unsigned char cl = uni & 0x00ff; unsigned char ch = (uni & 0xff00) >> 8; if (boundlen <= 0) return -ENAMETOOLONG; uni2charset = page_uni2charset[ch]; if (uni2charset && uni2charset[cl]) out[0] = uni2charset[cl]; else return -EINVAL; return 1; } static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni) { *uni = charset2uni[*rawstring]; if (*uni == 0x0000) return -EINVAL; return 1; } static struct nls_table table = { .charset = "ascii", .uni2char = uni2char, .char2uni = char2uni, .charset2lower = charset2lower, .charset2upper = charset2upper, }; static int __init init_nls_ascii(void) { return register_nls(&table); } static void __exit exit_nls_ascii(void) { unregister_nls(&table); } module_init(init_nls_ascii) module_exit(exit_nls_ascii) MODULE_DESCRIPTION("NLS ASCII (United States)"); MODULE_LICENSE("Dual BSD/GPL");
9 6 14 83 1 1 1 9 10 10 10 10 55 55 5 30 31 15 21 5 3 15 17 24 18 1 22 19 3 21 21 24 50 51 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 // SPDX-License-Identifier: GPL-2.0-only /* * irqchip.c: Common API for in kernel interrupt controllers * Copyright (c) 2007, Intel Corporation. * Copyright 2010 Red Hat, Inc. and/or its affiliates. * Copyright (c) 2013, Alexander Graf <agraf@suse.de> * * This file is derived from virt/kvm/irq_comm.c. * * Authors: * Yaozu (Eddie) Dong <Eddie.dong@intel.com> * Alexander Graf <agraf@suse.de> */ #include <linux/kvm_host.h> #include <linux/slab.h> #include <linux/srcu.h> #include <linux/export.h> #include <trace/events/kvm.h> int kvm_irq_map_gsi(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *entries, int gsi) { struct kvm_irq_routing_table *irq_rt; struct kvm_kernel_irq_routing_entry *e; int n = 0; irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu, lockdep_is_held(&kvm->irq_lock)); if (irq_rt && gsi < irq_rt->nr_rt_entries) { hlist_for_each_entry(e, &irq_rt->map[gsi], link) { entries[n] = *e; ++n; } } return n; } int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin) { struct kvm_irq_routing_table *irq_rt; irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu); return irq_rt->chip[irqchip][pin]; } int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi) { struct kvm_kernel_irq_routing_entry route; if (!kvm_arch_irqchip_in_kernel(kvm) || (msi->flags & ~KVM_MSI_VALID_DEVID)) return -EINVAL; route.msi.address_lo = msi->address_lo; route.msi.address_hi = msi->address_hi; route.msi.data = msi->data; route.msi.flags = msi->flags; route.msi.devid = msi->devid; return kvm_set_msi(&route, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1, false); } /* * Return value: * < 0 Interrupt was ignored (masked or not delivered for other reasons) * = 0 Interrupt was coalesced (previous irq is still pending) * > 0 Number of CPUs interrupt was delivered to */ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, bool line_status) { struct kvm_kernel_irq_routing_entry irq_set[KVM_NR_IRQCHIPS]; int ret = -1, i, idx; trace_kvm_set_irq(irq, level, irq_source_id); /* Not possible to detect if the guest uses the PIC or the * IOAPIC. So set the bit in both. The guest will ignore * writes to the unused one. */ idx = srcu_read_lock(&kvm->irq_srcu); i = kvm_irq_map_gsi(kvm, irq_set, irq); srcu_read_unlock(&kvm->irq_srcu, idx); while (i--) { int r; r = irq_set[i].set(&irq_set[i], kvm, irq_source_id, level, line_status); if (r < 0) continue; ret = r + ((ret < 0) ? 0 : ret); } return ret; } static void free_irq_routing_table(struct kvm_irq_routing_table *rt) { int i; if (!rt) return; for (i = 0; i < rt->nr_rt_entries; ++i) { struct kvm_kernel_irq_routing_entry *e; struct hlist_node *n; hlist_for_each_entry_safe(e, n, &rt->map[i], link) { hlist_del(&e->link); kfree(e); } } kfree(rt); } void kvm_free_irq_routing(struct kvm *kvm) { /* Called only during vm destruction. Nobody can use the pointer at this stage */ struct kvm_irq_routing_table *rt = rcu_access_pointer(kvm->irq_routing); free_irq_routing_table(rt); } static int setup_routing_entry(struct kvm *kvm, struct kvm_irq_routing_table *rt, struct kvm_kernel_irq_routing_entry *e, const struct kvm_irq_routing_entry *ue) { struct kvm_kernel_irq_routing_entry *ei; int r; u32 gsi = array_index_nospec(ue->gsi, KVM_MAX_IRQ_ROUTES); /* * Do not allow GSI to be mapped to the same irqchip more than once. * Allow only one to one mapping between GSI and non-irqchip routing. */ hlist_for_each_entry(ei, &rt->map[gsi], link) if (ei->type != KVM_IRQ_ROUTING_IRQCHIP || ue->type != KVM_IRQ_ROUTING_IRQCHIP || ue->u.irqchip.irqchip == ei->irqchip.irqchip) return -EINVAL; e->gsi = gsi; e->type = ue->type; r = kvm_set_routing_entry(kvm, e, ue); if (r) return r; if (e->type == KVM_IRQ_ROUTING_IRQCHIP) rt->chip[e->irqchip.irqchip][e->irqchip.pin] = e->gsi; hlist_add_head(&e->link, &rt->map[e->gsi]); return 0; } void __attribute__((weak)) kvm_arch_irq_routing_update(struct kvm *kvm) { } bool __weak kvm_arch_can_set_irq_routing(struct kvm *kvm) { return true; } int kvm_set_irq_routing(struct kvm *kvm, const struct kvm_irq_routing_entry *ue, unsigned nr, unsigned flags) { struct kvm_irq_routing_table *new, *old; struct kvm_kernel_irq_routing_entry *e; u32 i, j, nr_rt_entries = 0; int r; for (i = 0; i < nr; ++i) { if (ue[i].gsi >= KVM_MAX_IRQ_ROUTES) return -EINVAL; nr_rt_entries = max(nr_rt_entries, ue[i].gsi); } nr_rt_entries += 1; new = kzalloc(struct_size(new, map, nr_rt_entries), GFP_KERNEL_ACCOUNT); if (!new) return -ENOMEM; new->nr_rt_entries = nr_rt_entries; for (i = 0; i < KVM_NR_IRQCHIPS; i++) for (j = 0; j < KVM_IRQCHIP_NUM_PINS; j++) new->chip[i][j] = -1; for (i = 0; i < nr; ++i) { r = -ENOMEM; e = kzalloc(sizeof(*e), GFP_KERNEL_ACCOUNT); if (!e) goto out; r = -EINVAL; switch (ue->type) { case KVM_IRQ_ROUTING_MSI: if (ue->flags & ~KVM_MSI_VALID_DEVID) goto free_entry; break; default: if (ue->flags) goto free_entry; break; } r = setup_routing_entry(kvm, new, e, ue); if (r) goto free_entry; ++ue; } mutex_lock(&kvm->irq_lock); old = rcu_dereference_protected(kvm->irq_routing, 1); rcu_assign_pointer(kvm->irq_routing, new); kvm_irq_routing_update(kvm); kvm_arch_irq_routing_update(kvm); mutex_unlock(&kvm->irq_lock); kvm_arch_post_irq_routing_update(kvm); synchronize_srcu_expedited(&kvm->irq_srcu); new = old; r = 0; goto out; free_entry: kfree(e); out: free_irq_routing_table(new); return r; } /* * Allocate empty IRQ routing by default so that additional setup isn't needed * when userspace-driven IRQ routing is activated, and so that kvm->irq_routing * is guaranteed to be non-NULL. */ int kvm_init_irq_routing(struct kvm *kvm) { struct kvm_irq_routing_table *new; int chip_size; new = kzalloc(struct_size(new, map, 1), GFP_KERNEL_ACCOUNT); if (!new) return -ENOMEM; new->nr_rt_entries = 1; chip_size = sizeof(int) * KVM_NR_IRQCHIPS * KVM_IRQCHIP_NUM_PINS; memset(new->chip, -1, chip_size); RCU_INIT_POINTER(kvm->irq_routing, new); return 0; }
12 105 3 3 21 102 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 /* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM nilfs2 #if !defined(_TRACE_NILFS2_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_NILFS2_H #include <linux/tracepoint.h> struct nilfs_sc_info; #define show_collection_stage(type) \ __print_symbolic(type, \ { NILFS_ST_INIT, "ST_INIT" }, \ { NILFS_ST_GC, "ST_GC" }, \ { NILFS_ST_FILE, "ST_FILE" }, \ { NILFS_ST_IFILE, "ST_IFILE" }, \ { NILFS_ST_CPFILE, "ST_CPFILE" }, \ { NILFS_ST_SUFILE, "ST_SUFILE" }, \ { NILFS_ST_DAT, "ST_DAT" }, \ { NILFS_ST_SR, "ST_SR" }, \ { NILFS_ST_DSYNC, "ST_DSYNC" }, \ { NILFS_ST_DONE, "ST_DONE"}) TRACE_EVENT(nilfs2_collection_stage_transition, TP_PROTO(struct nilfs_sc_info *sci), TP_ARGS(sci), TP_STRUCT__entry( __field(void *, sci) __field(int, stage) ), TP_fast_assign( __entry->sci = sci; __entry->stage = sci->sc_stage.scnt; ), TP_printk("sci = %p stage = %s", __entry->sci, show_collection_stage(__entry->stage)) ); #ifndef TRACE_HEADER_MULTI_READ enum nilfs2_transaction_transition_state { TRACE_NILFS2_TRANSACTION_BEGIN, TRACE_NILFS2_TRANSACTION_COMMIT, TRACE_NILFS2_TRANSACTION_ABORT, TRACE_NILFS2_TRANSACTION_TRYLOCK, TRACE_NILFS2_TRANSACTION_LOCK, TRACE_NILFS2_TRANSACTION_UNLOCK, }; #endif #define show_transaction_state(type) \ __print_symbolic(type, \ { TRACE_NILFS2_TRANSACTION_BEGIN, "BEGIN" }, \ { TRACE_NILFS2_TRANSACTION_COMMIT, "COMMIT" }, \ { TRACE_NILFS2_TRANSACTION_ABORT, "ABORT" }, \ { TRACE_NILFS2_TRANSACTION_TRYLOCK, "TRYLOCK" }, \ { TRACE_NILFS2_TRANSACTION_LOCK, "LOCK" }, \ { TRACE_NILFS2_TRANSACTION_UNLOCK, "UNLOCK" }) TRACE_EVENT(nilfs2_transaction_transition, TP_PROTO(struct super_block *sb, struct nilfs_transaction_info *ti, int count, unsigned int flags, enum nilfs2_transaction_transition_state state), TP_ARGS(sb, ti, count, flags, state), TP_STRUCT__entry( __field(void *, sb) __field(void *, ti) __field(int, count) __field(unsigned int, flags) __field(int, state) ), TP_fast_assign( __entry->sb = sb; __entry->ti = ti; __entry->count = count; __entry->flags = flags; __entry->state = state; ), TP_printk("sb = %p ti = %p count = %d flags = %x state = %s", __entry->sb, __entry->ti, __entry->count, __entry->flags, show_transaction_state(__entry->state)) ); TRACE_EVENT(nilfs2_segment_usage_check, TP_PROTO(struct inode *sufile, __u64 segnum, unsigned long cnt), TP_ARGS(sufile, segnum, cnt), TP_STRUCT__entry( __field(struct inode *, sufile) __field(__u64, segnum) __field(unsigned long, cnt) ), TP_fast_assign( __entry->sufile = sufile; __entry->segnum = segnum; __entry->cnt = cnt; ), TP_printk("sufile = %p segnum = %llu cnt = %lu", __entry->sufile, __entry->segnum, __entry->cnt) ); TRACE_EVENT(nilfs2_segment_usage_allocated, TP_PROTO(struct inode *sufile, __u64 segnum), TP_ARGS(sufile, segnum), TP_STRUCT__entry( __field(struct inode *, sufile) __field(__u64, segnum) ), TP_fast_assign( __entry->sufile = sufile; __entry->segnum = segnum; ), TP_printk("sufile = %p segnum = %llu", __entry->sufile, __entry->segnum) ); TRACE_EVENT(nilfs2_segment_usage_freed, TP_PROTO(struct inode *sufile, __u64 segnum), TP_ARGS(sufile, segnum), TP_STRUCT__entry( __field(struct inode *, sufile) __field(__u64, segnum) ), TP_fast_assign( __entry->sufile = sufile; __entry->segnum = segnum; ), TP_printk("sufile = %p segnum = %llu", __entry->sufile, __entry->segnum) ); TRACE_EVENT(nilfs2_mdt_insert_new_block, TP_PROTO(struct inode *inode, unsigned long ino, unsigned long block), TP_ARGS(inode, ino, block), TP_STRUCT__entry( __field(struct inode *, inode) __field(unsigned long, ino) __field(unsigned long, block) ), TP_fast_assign( __entry->inode = inode; __entry->ino = ino; __entry->block = block; ), TP_printk("inode = %p ino = %lu block = %lu", __entry->inode, __entry->ino, __entry->block) ); TRACE_EVENT(nilfs2_mdt_submit_block, TP_PROTO(struct inode *inode, unsigned long ino, unsigned long blkoff, enum req_op mode), TP_ARGS(inode, ino, blkoff, mode), TP_STRUCT__entry( __field(struct inode *, inode) __field(unsigned long, ino) __field(unsigned long, blkoff) /* * Use field_struct() to avoid is_signed_type() on the * bitwise type enum req_op. */ __field_struct(enum req_op, mode) ), TP_fast_assign( __entry->inode = inode; __entry->ino = ino; __entry->blkoff = blkoff; __entry->mode = mode; ), TP_printk("inode = %p ino = %lu blkoff = %lu mode = %x", __entry->inode, __entry->ino, __entry->blkoff, __entry->mode) ); #endif /* _TRACE_NILFS2_H */ /* This part must be outside protection */ #undef TRACE_INCLUDE_FILE #define TRACE_INCLUDE_FILE nilfs2 #include <trace/define_trace.h>
5 5 5 18 7 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 /* SPDX-License-Identifier: GPL-2.0 */ /* XDP user-space ring structure * Copyright(c) 2018 Intel Corporation. */ #ifndef _LINUX_XSK_QUEUE_H #define _LINUX_XSK_QUEUE_H #include <linux/types.h> #include <linux/if_xdp.h> #include <net/xdp_sock.h> #include <net/xsk_buff_pool.h> #include "xsk.h" struct xdp_ring { u32 producer ____cacheline_aligned_in_smp; /* Hinder the adjacent cache prefetcher to prefetch the consumer * pointer if the producer pointer is touched and vice versa. */ u32 pad1 ____cacheline_aligned_in_smp; u32 consumer ____cacheline_aligned_in_smp; u32 pad2 ____cacheline_aligned_in_smp; u32 flags; u32 pad3 ____cacheline_aligned_in_smp; }; /* Used for the RX and TX queues for packets */ struct xdp_rxtx_ring { struct xdp_ring ptrs; struct xdp_desc desc[] ____cacheline_aligned_in_smp; }; /* Used for the fill and completion queues for buffers */ struct xdp_umem_ring { struct xdp_ring ptrs; u64 desc[] ____cacheline_aligned_in_smp; }; struct xsk_queue { u32 ring_mask; u32 nentries; u32 cached_prod; u32 cached_cons; struct xdp_ring *ring; u64 invalid_descs; u64 queue_empty_descs; size_t ring_vmalloc_size; }; struct parsed_desc { u32 mb; u32 valid; }; /* The structure of the shared state of the rings are a simple * circular buffer, as outlined in * Documentation/core-api/circular-buffers.rst. For the Rx and * completion ring, the kernel is the producer and user space is the * consumer. For the Tx and fill rings, the kernel is the consumer and * user space is the producer. * * producer consumer * * if (LOAD ->consumer) { (A) LOAD.acq ->producer (C) * STORE $data LOAD $data * STORE.rel ->producer (B) STORE.rel ->consumer (D) * } * * (A) pairs with (D), and (B) pairs with (C). * * Starting with (B), it protects the data from being written after * the producer pointer. If this barrier was missing, the consumer * could observe the producer pointer being set and thus load the data * before the producer has written the new data. The consumer would in * this case load the old data. * * (C) protects the consumer from speculatively loading the data before * the producer pointer actually has been read. If we do not have this * barrier, some architectures could load old data as speculative loads * are not discarded as the CPU does not know there is a dependency * between ->producer and data. * * (A) is a control dependency that separates the load of ->consumer * from the stores of $data. In case ->consumer indicates there is no * room in the buffer to store $data we do not. The dependency will * order both of the stores after the loads. So no barrier is needed. * * (D) protects the load of the data to be observed to happen after the * store of the consumer pointer. If we did not have this memory * barrier, the producer could observe the consumer pointer being set * and overwrite the data with a new value before the consumer got the * chance to read the old value. The consumer would thus miss reading * the old entry and very likely read the new entry twice, once right * now and again after circling through the ring. */ /* The operations on the rings are the following: * * producer consumer * * RESERVE entries PEEK in the ring for entries * WRITE data into the ring READ data from the ring * SUBMIT entries RELEASE entries * * The producer reserves one or more entries in the ring. It can then * fill in these entries and finally submit them so that they can be * seen and read by the consumer. * * The consumer peeks into the ring to see if the producer has written * any new entries. If so, the consumer can then read these entries * and when it is done reading them release them back to the producer * so that the producer can use these slots to fill in new entries. * * The function names below reflect these operations. */ /* Functions that read and validate content from consumer rings. */ static inline void __xskq_cons_read_addr_unchecked(struct xsk_queue *q, u32 cached_cons, u64 *addr) { struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; u32 idx = cached_cons & q->ring_mask; *addr = ring->desc[idx]; } static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr) { if (q->cached_cons != q->cached_prod) { __xskq_cons_read_addr_unchecked(q, q->cached_cons, addr); return true; } return false; } static inline bool xp_unused_options_set(u32 options) { return options & ~(XDP_PKT_CONTD | XDP_TX_METADATA); } static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc) { u64 addr = desc->addr - pool->tx_metadata_len; u64 len = desc->len + pool->tx_metadata_len; u64 offset = addr & (pool->chunk_size - 1); if (!desc->len) return false; if (offset + len > pool->chunk_size) return false; if (addr >= pool->addrs_cnt) return false; if (xp_unused_options_set(desc->options)) return false; return true; } static inline bool xp_unaligned_validate_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc) { u64 addr = xp_unaligned_add_offset_to_addr(desc->addr) - pool->tx_metadata_len; u64 len = desc->len + pool->tx_metadata_len; if (!desc->len) return false; if (len > pool->chunk_size) return false; if (addr >= pool->addrs_cnt || addr + len > pool->addrs_cnt || xp_desc_crosses_non_contig_pg(pool, addr, len)) return false; if (xp_unused_options_set(desc->options)) return false; return true; } static inline bool xp_validate_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc) { return pool->unaligned ? xp_unaligned_validate_desc(pool, desc) : xp_aligned_validate_desc(pool, desc); } static inline bool xskq_has_descs(struct xsk_queue *q) { return q->cached_cons != q->cached_prod; } static inline bool xskq_cons_is_valid_desc(struct xsk_queue *q, struct xdp_desc *d, struct xsk_buff_pool *pool) { if (!xp_validate_desc(pool, d)) { q->invalid_descs++; return false; } return true; } static inline bool xskq_cons_read_desc(struct xsk_queue *q, struct xdp_desc *desc, struct xsk_buff_pool *pool) { if (q->cached_cons != q->cached_prod) { struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; u32 idx = q->cached_cons & q->ring_mask; *desc = ring->desc[idx]; return xskq_cons_is_valid_desc(q, desc, pool); } q->queue_empty_descs++; return false; } static inline void xskq_cons_release_n(struct xsk_queue *q, u32 cnt) { q->cached_cons += cnt; } static inline void parse_desc(struct xsk_queue *q, struct xsk_buff_pool *pool, struct xdp_desc *desc, struct parsed_desc *parsed) { parsed->valid = xskq_cons_is_valid_desc(q, desc, pool); parsed->mb = xp_mb_desc(desc); } static inline u32 xskq_cons_read_desc_batch(struct xsk_queue *q, struct xsk_buff_pool *pool, u32 max) { u32 cached_cons = q->cached_cons, nb_entries = 0; struct xdp_desc *descs = pool->tx_descs; u32 total_descs = 0, nr_frags = 0; /* track first entry, if stumble upon *any* invalid descriptor, rewind * current packet that consists of frags and stop the processing */ while (cached_cons != q->cached_prod && nb_entries < max) { struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; u32 idx = cached_cons & q->ring_mask; struct parsed_desc parsed; descs[nb_entries] = ring->desc[idx]; cached_cons++; parse_desc(q, pool, &descs[nb_entries], &parsed); if (unlikely(!parsed.valid)) break; if (likely(!parsed.mb)) { total_descs += (nr_frags + 1); nr_frags = 0; } else { nr_frags++; if (nr_frags == pool->xdp_zc_max_segs) { nr_frags = 0; break; } } nb_entries++; } cached_cons -= nr_frags; /* Release valid plus any invalid entries */ xskq_cons_release_n(q, cached_cons - q->cached_cons); return total_descs; } /* Functions for consumers */ static inline void __xskq_cons_release(struct xsk_queue *q) { smp_store_release(&q->ring->consumer, q->cached_cons); /* D, matchees A */ } static inline void __xskq_cons_peek(struct xsk_queue *q) { /* Refresh the local pointer */ q->cached_prod = smp_load_acquire(&q->ring->producer); /* C, matches B */ } static inline void xskq_cons_get_entries(struct xsk_queue *q) { __xskq_cons_release(q); __xskq_cons_peek(q); } static inline u32 xskq_cons_nb_entries(struct xsk_queue *q, u32 max) { u32 entries = q->cached_prod - q->cached_cons; if (entries >= max) return max; __xskq_cons_peek(q); entries = q->cached_prod - q->cached_cons; return entries >= max ? max : entries; } static inline bool xskq_cons_peek_addr_unchecked(struct xsk_queue *q, u64 *addr) { if (q->cached_prod == q->cached_cons) xskq_cons_get_entries(q); return xskq_cons_read_addr_unchecked(q, addr); } static inline bool xskq_cons_peek_desc(struct xsk_queue *q, struct xdp_desc *desc, struct xsk_buff_pool *pool) { if (q->cached_prod == q->cached_cons) xskq_cons_get_entries(q); return xskq_cons_read_desc(q, desc, pool); } /* To improve performance in the xskq_cons_release functions, only update local state here. * Reflect this to global state when we get new entries from the ring in * xskq_cons_get_entries() and whenever Rx or Tx processing are completed in the NAPI loop. */ static inline void xskq_cons_release(struct xsk_queue *q) { q->cached_cons++; } static inline void xskq_cons_cancel_n(struct xsk_queue *q, u32 cnt) { q->cached_cons -= cnt; } static inline u32 xskq_cons_present_entries(struct xsk_queue *q) { /* No barriers needed since data is not accessed */ return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer); } /* Functions for producers */ static inline u32 xskq_prod_nb_free(struct xsk_queue *q, u32 max) { u32 free_entries = q->nentries - (q->cached_prod - q->cached_cons); if (free_entries >= max) return max; /* Refresh the local tail pointer */ q->cached_cons = READ_ONCE(q->ring->consumer); free_entries = q->nentries - (q->cached_prod - q->cached_cons); return free_entries >= max ? max : free_entries; } static inline bool xskq_prod_is_full(struct xsk_queue *q) { return xskq_prod_nb_free(q, 1) ? false : true; } static inline void xskq_prod_cancel_n(struct xsk_queue *q, u32 cnt) { q->cached_prod -= cnt; } static inline int xskq_prod_reserve(struct xsk_queue *q) { if (xskq_prod_is_full(q)) return -ENOSPC; /* A, matches D */ q->cached_prod++; return 0; } static inline int xskq_prod_reserve_addr(struct xsk_queue *q, u64 addr) { struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; if (xskq_prod_is_full(q)) return -ENOSPC; /* A, matches D */ ring->desc[q->cached_prod++ & q->ring_mask] = addr; return 0; } static inline void xskq_prod_write_addr_batch(struct xsk_queue *q, struct xdp_desc *descs, u32 nb_entries) { struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; u32 i, cached_prod; /* A, matches D */ cached_prod = q->cached_prod; for (i = 0; i < nb_entries; i++) ring->desc[cached_prod++ & q->ring_mask] = descs[i].addr; q->cached_prod = cached_prod; } static inline int xskq_prod_reserve_desc(struct xsk_queue *q, u64 addr, u32 len, u32 flags) { struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; u32 idx; if (xskq_prod_is_full(q)) return -ENOBUFS; /* A, matches D */ idx = q->cached_prod++ & q->ring_mask; ring->desc[idx].addr = addr; ring->desc[idx].len = len; ring->desc[idx].options = flags; return 0; } static inline void __xskq_prod_submit(struct xsk_queue *q, u32 idx) { smp_store_release(&q->ring->producer, idx); /* B, matches C */ } static inline void xskq_prod_submit(struct xsk_queue *q) { __xskq_prod_submit(q, q->cached_prod); } static inline void xskq_prod_submit_n(struct xsk_queue *q, u32 nb_entries) { __xskq_prod_submit(q, q->ring->producer + nb_entries); } static inline bool xskq_prod_is_empty(struct xsk_queue *q) { /* No barriers needed since data is not accessed */ return READ_ONCE(q->ring->consumer) == READ_ONCE(q->ring->producer); } /* For both producers and consumers */ static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q) { return q ? q->invalid_descs : 0; } static inline u64 xskq_nb_queue_empty_descs(struct xsk_queue *q) { return q ? q->queue_empty_descs : 0; } struct xsk_queue *xskq_create(u32 nentries, bool umem_queue); void xskq_destroy(struct xsk_queue *q_ops); #endif /* _LINUX_XSK_QUEUE_H */
3 63 1 166 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 // SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. * All Rights Reserved. */ #ifndef __XFS_LOG_PRIV_H__ #define __XFS_LOG_PRIV_H__ #include "xfs_extent_busy.h" /* for struct xfs_busy_extents */ struct xfs_buf; struct xlog; struct xlog_ticket; struct xfs_mount; /* * get client id from packed copy. * * this hack is here because the xlog_pack code copies four bytes * of xlog_op_header containing the fields oh_clientid, oh_flags * and oh_res2 into the packed copy. * * later on this four byte chunk is treated as an int and the * client id is pulled out. * * this has endian issues, of course. */ static inline uint xlog_get_client_id(__be32 i) { return be32_to_cpu(i) >> 24; } /* * In core log state */ enum xlog_iclog_state { XLOG_STATE_ACTIVE, /* Current IC log being written to */ XLOG_STATE_WANT_SYNC, /* Want to sync this iclog; no more writes */ XLOG_STATE_SYNCING, /* This IC log is syncing */ XLOG_STATE_DONE_SYNC, /* Done syncing to disk */ XLOG_STATE_CALLBACK, /* Callback functions now */ XLOG_STATE_DIRTY, /* Dirty IC log, not ready for ACTIVE status */ }; #define XLOG_STATE_STRINGS \ { XLOG_STATE_ACTIVE, "XLOG_STATE_ACTIVE" }, \ { XLOG_STATE_WANT_SYNC, "XLOG_STATE_WANT_SYNC" }, \ { XLOG_STATE_SYNCING, "XLOG_STATE_SYNCING" }, \ { XLOG_STATE_DONE_SYNC, "XLOG_STATE_DONE_SYNC" }, \ { XLOG_STATE_CALLBACK, "XLOG_STATE_CALLBACK" }, \ { XLOG_STATE_DIRTY, "XLOG_STATE_DIRTY" } /* * In core log flags */ #define XLOG_ICL_NEED_FLUSH (1u << 0) /* iclog needs REQ_PREFLUSH */ #define XLOG_ICL_NEED_FUA (1u << 1) /* iclog needs REQ_FUA */ #define XLOG_ICL_STRINGS \ { XLOG_ICL_NEED_FLUSH, "XLOG_ICL_NEED_FLUSH" }, \ { XLOG_ICL_NEED_FUA, "XLOG_ICL_NEED_FUA" } /* * Log ticket flags */ #define XLOG_TIC_PERM_RESERV (1u << 0) /* permanent reservation */ #define XLOG_TIC_FLAGS \ { XLOG_TIC_PERM_RESERV, "XLOG_TIC_PERM_RESERV" } /* * Below are states for covering allocation transactions. * By covering, we mean changing the h_tail_lsn in the last on-disk * log write such that no allocation transactions will be re-done during * recovery after a system crash. Recovery starts at the last on-disk * log write. * * These states are used to insert dummy log entries to cover * space allocation transactions which can undo non-transactional changes * after a crash. Writes to a file with space * already allocated do not result in any transactions. Allocations * might include space beyond the EOF. So if we just push the EOF a * little, the last transaction for the file could contain the wrong * size. If there is no file system activity, after an allocation * transaction, and the system crashes, the allocation transaction * will get replayed and the file will be truncated. This could * be hours/days/... after the allocation occurred. * * The fix for this is to do two dummy transactions when the * system is idle. We need two dummy transaction because the h_tail_lsn * in the log record header needs to point beyond the last possible * non-dummy transaction. The first dummy changes the h_tail_lsn to * the first transaction before the dummy. The second dummy causes * h_tail_lsn to point to the first dummy. Recovery starts at h_tail_lsn. * * These dummy transactions get committed when everything * is idle (after there has been some activity). * * There are 5 states used to control this. * * IDLE -- no logging has been done on the file system or * we are done covering previous transactions. * NEED -- logging has occurred and we need a dummy transaction * when the log becomes idle. * DONE -- we were in the NEED state and have committed a dummy * transaction. * NEED2 -- we detected that a dummy transaction has gone to the * on disk log with no other transactions. * DONE2 -- we committed a dummy transaction when in the NEED2 state. * * There are two places where we switch states: * * 1.) In xfs_sync, when we detect an idle log and are in NEED or NEED2. * We commit the dummy transaction and switch to DONE or DONE2, * respectively. In all other states, we don't do anything. * * 2.) When we finish writing the on-disk log (xlog_state_clean_log). * * No matter what state we are in, if this isn't the dummy * transaction going out, the next state is NEED. * So, if we aren't in the DONE or DONE2 states, the next state * is NEED. We can't be finishing a write of the dummy record * unless it was committed and the state switched to DONE or DONE2. * * If we are in the DONE state and this was a write of the * dummy transaction, we move to NEED2. * * If we are in the DONE2 state and this was a write of the * dummy transaction, we move to IDLE. * * * Writing only one dummy transaction can get appended to * one file space allocation. When this happens, the log recovery * code replays the space allocation and a file could be truncated. * This is why we have the NEED2 and DONE2 states before going idle. */ #define XLOG_STATE_COVER_IDLE 0 #define XLOG_STATE_COVER_NEED 1 #define XLOG_STATE_COVER_DONE 2 #define XLOG_STATE_COVER_NEED2 3 #define XLOG_STATE_COVER_DONE2 4 #define XLOG_COVER_OPS 5 typedef struct xlog_ticket { struct list_head t_queue; /* reserve/write queue */ struct task_struct *t_task; /* task that owns this ticket */ xlog_tid_t t_tid; /* transaction identifier */ atomic_t t_ref; /* ticket reference count */ int t_curr_res; /* current reservation */ int t_unit_res; /* unit reservation */ char t_ocnt; /* original unit count */ char t_cnt; /* current unit count */ uint8_t t_flags; /* properties of reservation */ int t_iclog_hdrs; /* iclog hdrs in t_curr_res */ } xlog_ticket_t; /* * - A log record header is 512 bytes. There is plenty of room to grow the * xlog_rec_header_t into the reserved space. * - ic_data follows, so a write to disk can start at the beginning of * the iclog. * - ic_forcewait is used to implement synchronous forcing of the iclog to disk. * - ic_next is the pointer to the next iclog in the ring. * - ic_log is a pointer back to the global log structure. * - ic_size is the full size of the log buffer, minus the cycle headers. * - ic_offset is the current number of bytes written to in this iclog. * - ic_refcnt is bumped when someone is writing to the log. * - ic_state is the state of the iclog. * * Because of cacheline contention on large machines, we need to separate * various resources onto different cachelines. To start with, make the * structure cacheline aligned. The following fields can be contended on * by independent processes: * * - ic_callbacks * - ic_refcnt * - fields protected by the global l_icloglock * * so we need to ensure that these fields are located in separate cachelines. * We'll put all the read-only and l_icloglock fields in the first cacheline, * and move everything else out to subsequent cachelines. */ typedef struct xlog_in_core { wait_queue_head_t ic_force_wait; wait_queue_head_t ic_write_wait; struct xlog_in_core *ic_next; struct xlog_in_core *ic_prev; struct xlog *ic_log; u32 ic_size; u32 ic_offset; enum xlog_iclog_state ic_state; unsigned int ic_flags; void *ic_datap; /* pointer to iclog data */ struct list_head ic_callbacks; /* reference counts need their own cacheline */ atomic_t ic_refcnt ____cacheline_aligned_in_smp; xlog_in_core_2_t *ic_data; #define ic_header ic_data->hic_header #ifdef DEBUG bool ic_fail_crc : 1; #endif struct semaphore ic_sema; struct work_struct ic_end_io_work; struct bio ic_bio; struct bio_vec ic_bvec[]; } xlog_in_core_t; /* * The CIL context is used to aggregate per-transaction details as well be * passed to the iclog for checkpoint post-commit processing. After being * passed to the iclog, another context needs to be allocated for tracking the * next set of transactions to be aggregated into a checkpoint. */ struct xfs_cil; struct xfs_cil_ctx { struct xfs_cil *cil; xfs_csn_t sequence; /* chkpt sequence # */ xfs_lsn_t start_lsn; /* first LSN of chkpt commit */ xfs_lsn_t commit_lsn; /* chkpt commit record lsn */ struct xlog_in_core *commit_iclog; struct xlog_ticket *ticket; /* chkpt ticket */ atomic_t space_used; /* aggregate size of regions */ struct xfs_busy_extents busy_extents; struct list_head log_items; /* log items in chkpt */ struct list_head lv_chain; /* logvecs being pushed */ struct list_head iclog_entry; struct list_head committing; /* ctx committing list */ struct work_struct push_work; atomic_t order_id; /* * CPUs that could have added items to the percpu CIL data. Access is * coordinated with xc_ctx_lock. */ struct cpumask cil_pcpmask; }; /* * Per-cpu CIL tracking items */ struct xlog_cil_pcp { int32_t space_used; uint32_t space_reserved; struct list_head busy_extents; struct list_head log_items; }; /* * Committed Item List structure * * This structure is used to track log items that have been committed but not * yet written into the log. It is used only when the delayed logging mount * option is enabled. * * This structure tracks the list of committing checkpoint contexts so * we can avoid the problem of having to hold out new transactions during a * flush until we have a the commit record LSN of the checkpoint. We can * traverse the list of committing contexts in xlog_cil_push_lsn() to find a * sequence match and extract the commit LSN directly from there. If the * checkpoint is still in the process of committing, we can block waiting for * the commit LSN to be determined as well. This should make synchronous * operations almost as efficient as the old logging methods. */ struct xfs_cil { struct xlog *xc_log; unsigned long xc_flags; atomic_t xc_iclog_hdrs; struct workqueue_struct *xc_push_wq; struct rw_semaphore xc_ctx_lock ____cacheline_aligned_in_smp; struct xfs_cil_ctx *xc_ctx; spinlock_t xc_push_lock ____cacheline_aligned_in_smp; xfs_csn_t xc_push_seq; bool xc_push_commit_stable; struct list_head xc_committing; wait_queue_head_t xc_commit_wait; wait_queue_head_t xc_start_wait; xfs_csn_t xc_current_sequence; wait_queue_head_t xc_push_wait; /* background push throttle */ void __percpu *xc_pcp; /* percpu CIL structures */ } ____cacheline_aligned_in_smp; /* xc_flags bit values */ #define XLOG_CIL_EMPTY 1 #define XLOG_CIL_PCP_SPACE 2 /* * The amount of log space we allow the CIL to aggregate is difficult to size. * Whatever we choose, we have to make sure we can get a reservation for the * log space effectively, that it is large enough to capture sufficient * relogging to reduce log buffer IO significantly, but it is not too large for * the log or induces too much latency when writing out through the iclogs. We * track both space consumed and the number of vectors in the checkpoint * context, so we need to decide which to use for limiting. * * Every log buffer we write out during a push needs a header reserved, which * is at least one sector and more for v2 logs. Hence we need a reservation of * at least 512 bytes per 32k of log space just for the LR headers. That means * 16KB of reservation per megabyte of delayed logging space we will consume, * plus various headers. The number of headers will vary based on the num of * io vectors, so limiting on a specific number of vectors is going to result * in transactions of varying size. IOWs, it is more consistent to track and * limit space consumed in the log rather than by the number of objects being * logged in order to prevent checkpoint ticket overruns. * * Further, use of static reservations through the log grant mechanism is * problematic. It introduces a lot of complexity (e.g. reserve grant vs write * grant) and a significant deadlock potential because regranting write space * can block on log pushes. Hence if we have to regrant log space during a log * push, we can deadlock. * * However, we can avoid this by use of a dynamic "reservation stealing" * technique during transaction commit whereby unused reservation space in the * transaction ticket is transferred to the CIL ctx commit ticket to cover the * space needed by the checkpoint transaction. This means that we never need to * specifically reserve space for the CIL checkpoint transaction, nor do we * need to regrant space once the checkpoint completes. This also means the * checkpoint transaction ticket is specific to the checkpoint context, rather * than the CIL itself. * * With dynamic reservations, we can effectively make up arbitrary limits for * the checkpoint size so long as they don't violate any other size rules. * Recovery imposes a rule that no transaction exceed half the log, so we are * limited by that. Furthermore, the log transaction reservation subsystem * tries to keep 25% of the log free, so we need to keep below that limit or we * risk running out of free log space to start any new transactions. * * In order to keep background CIL push efficient, we only need to ensure the * CIL is large enough to maintain sufficient in-memory relogging to avoid * repeated physical writes of frequently modified metadata. If we allow the CIL * to grow to a substantial fraction of the log, then we may be pinning hundreds * of megabytes of metadata in memory until the CIL flushes. This can cause * issues when we are running low on memory - pinned memory cannot be reclaimed, * and the CIL consumes a lot of memory. Hence we need to set an upper physical * size limit for the CIL that limits the maximum amount of memory pinned by the * CIL but does not limit performance by reducing relogging efficiency * significantly. * * As such, the CIL push threshold ends up being the smaller of two thresholds: * - a threshold large enough that it allows CIL to be pushed and progress to be * made without excessive blocking of incoming transaction commits. This is * defined to be 12.5% of the log space - half the 25% push threshold of the * AIL. * - small enough that it doesn't pin excessive amounts of memory but maintains * close to peak relogging efficiency. This is defined to be 16x the iclog * buffer window (32MB) as measurements have shown this to be roughly the * point of diminishing performance increases under highly concurrent * modification workloads. * * To prevent the CIL from overflowing upper commit size bounds, we introduce a * new threshold at which we block committing transactions until the background * CIL commit commences and switches to a new context. While this is not a hard * limit, it forces the process committing a transaction to the CIL to block and * yeild the CPU, giving the CIL push work a chance to be scheduled and start * work. This prevents a process running lots of transactions from overfilling * the CIL because it is not yielding the CPU. We set the blocking limit at * twice the background push space threshold so we keep in line with the AIL * push thresholds. * * Note: this is not a -hard- limit as blocking is applied after the transaction * is inserted into the CIL and the push has been triggered. It is largely a * throttling mechanism that allows the CIL push to be scheduled and run. A hard * limit will be difficult to implement without introducing global serialisation * in the CIL commit fast path, and it's not at all clear that we actually need * such hard limits given the ~7 years we've run without a hard limit before * finding the first situation where a checkpoint size overflow actually * occurred. Hence the simple throttle, and an ASSERT check to tell us that * we've overrun the max size. */ #define XLOG_CIL_SPACE_LIMIT(log) \ min_t(int, (log)->l_logsize >> 3, BBTOB(XLOG_TOTAL_REC_SHIFT(log)) << 4) #define XLOG_CIL_BLOCKING_SPACE_LIMIT(log) \ (XLOG_CIL_SPACE_LIMIT(log) * 2) /* * ticket grant locks, queues and accounting have their own cachlines * as these are quite hot and can be operated on concurrently. */ struct xlog_grant_head { spinlock_t lock ____cacheline_aligned_in_smp; struct list_head waiters; atomic64_t grant; }; /* * The reservation head lsn is not made up of a cycle number and block number. * Instead, it uses a cycle number and byte number. Logs don't expect to * overflow 31 bits worth of byte offset, so using a byte number will mean * that round off problems won't occur when releasing partial reservations. */ struct xlog { /* The following fields don't need locking */ struct xfs_mount *l_mp; /* mount point */ struct xfs_ail *l_ailp; /* AIL log is working with */ struct xfs_cil *l_cilp; /* CIL log is working with */ struct xfs_buftarg *l_targ; /* buftarg of log */ struct workqueue_struct *l_ioend_workqueue; /* for I/O completions */ struct delayed_work l_work; /* background flush work */ long l_opstate; /* operational state */ uint l_quotaoffs_flag; /* XFS_DQ_*, for QUOTAOFFs */ struct list_head *l_buf_cancel_table; struct list_head r_dfops; /* recovered log intent items */ int l_iclog_hsize; /* size of iclog header */ int l_iclog_heads; /* # of iclog header sectors */ uint l_sectBBsize; /* sector size in BBs (2^n) */ int l_iclog_size; /* size of log in bytes */ int l_iclog_bufs; /* number of iclog buffers */ xfs_daddr_t l_logBBstart; /* start block of log */ int l_logsize; /* size of log in bytes */ int l_logBBsize; /* size of log in BB chunks */ /* The following block of fields are changed while holding icloglock */ wait_queue_head_t l_flush_wait ____cacheline_aligned_in_smp; /* waiting for iclog flush */ int l_covered_state;/* state of "covering disk * log entries" */ xlog_in_core_t *l_iclog; /* head log queue */ spinlock_t l_icloglock; /* grab to change iclog state */ int l_curr_cycle; /* Cycle number of log writes */ int l_prev_cycle; /* Cycle number before last * block increment */ int l_curr_block; /* current logical log block */ int l_prev_block; /* previous logical log block */ /* * l_tail_lsn is atomic so it can be set and read without needing to * hold specific locks. To avoid operations contending with other hot * objects, it on a separate cacheline. */ /* lsn of 1st LR with unflushed * buffers */ atomic64_t l_tail_lsn ____cacheline_aligned_in_smp; struct xlog_grant_head l_reserve_head; struct xlog_grant_head l_write_head; uint64_t l_tail_space; struct xfs_kobj l_kobj; /* log recovery lsn tracking (for buffer submission */ xfs_lsn_t l_recovery_lsn; uint32_t l_iclog_roundoff;/* padding roundoff */ }; /* * Bits for operational state */ #define XLOG_ACTIVE_RECOVERY 0 /* in the middle of recovery */ #define XLOG_RECOVERY_NEEDED 1 /* log was recovered */ #define XLOG_IO_ERROR 2 /* log hit an I/O error, and being shutdown */ #define XLOG_TAIL_WARN 3 /* log tail verify warning issued */ #define XLOG_SHUTDOWN_STARTED 4 /* xlog_force_shutdown() exclusion */ static inline bool xlog_recovery_needed(struct xlog *log) { return test_bit(XLOG_RECOVERY_NEEDED, &log->l_opstate); } static inline bool xlog_in_recovery(struct xlog *log) { return test_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate); } static inline bool xlog_is_shutdown(struct xlog *log) { return test_bit(XLOG_IO_ERROR, &log->l_opstate); } /* * Wait until the xlog_force_shutdown() has marked the log as shut down * so xlog_is_shutdown() will always return true. */ static inline void xlog_shutdown_wait( struct xlog *log) { wait_var_event(&log->l_opstate, xlog_is_shutdown(log)); } /* common routines */ extern int xlog_recover( struct xlog *log); extern int xlog_recover_finish( struct xlog *log); extern void xlog_recover_cancel(struct xlog *); extern __le32 xlog_cksum(struct xlog *log, struct xlog_rec_header *rhead, char *dp, int size); extern struct kmem_cache *xfs_log_ticket_cache; struct xlog_ticket *xlog_ticket_alloc(struct xlog *log, int unit_bytes, int count, bool permanent); void xlog_print_tic_res(struct xfs_mount *mp, struct xlog_ticket *ticket); void xlog_print_trans(struct xfs_trans *); int xlog_write(struct xlog *log, struct xfs_cil_ctx *ctx, struct list_head *lv_chain, struct xlog_ticket *tic, uint32_t len); void xfs_log_ticket_ungrant(struct xlog *log, struct xlog_ticket *ticket); void xfs_log_ticket_regrant(struct xlog *log, struct xlog_ticket *ticket); void xlog_state_switch_iclogs(struct xlog *log, struct xlog_in_core *iclog, int eventual_size); int xlog_state_release_iclog(struct xlog *log, struct xlog_in_core *iclog, struct xlog_ticket *ticket); /* * When we crack an atomic LSN, we sample it first so that the value will not * change while we are cracking it into the component values. This means we * will always get consistent component values to work from. This should always * be used to sample and crack LSNs that are stored and updated in atomic * variables. */ static inline void xlog_crack_atomic_lsn(atomic64_t *lsn, uint *cycle, uint *block) { xfs_lsn_t val = atomic64_read(lsn); *cycle = CYCLE_LSN(val); *block = BLOCK_LSN(val); } /* * Calculate and assign a value to an atomic LSN variable from component pieces. */ static inline void xlog_assign_atomic_lsn(atomic64_t *lsn, uint cycle, uint block) { atomic64_set(lsn, xlog_assign_lsn(cycle, block)); } /* * Committed Item List interfaces */ int xlog_cil_init(struct xlog *log); void xlog_cil_init_post_recovery(struct xlog *log); void xlog_cil_destroy(struct xlog *log); bool xlog_cil_empty(struct xlog *log); void xlog_cil_commit(struct xlog *log, struct xfs_trans *tp, xfs_csn_t *commit_seq, bool regrant); void xlog_cil_set_ctx_write_state(struct xfs_cil_ctx *ctx, struct xlog_in_core *iclog); /* * CIL force routines */ void xlog_cil_flush(struct xlog *log); xfs_lsn_t xlog_cil_force_seq(struct xlog *log, xfs_csn_t sequence); static inline void xlog_cil_force(struct xlog *log) { xlog_cil_force_seq(log, log->l_cilp->xc_current_sequence); } /* * Wrapper function for waiting on a wait queue serialised against wakeups * by a spinlock. This matches the semantics of all the wait queues used in the * log code. */ static inline void xlog_wait( struct wait_queue_head *wq, struct spinlock *lock) __releases(lock) { DECLARE_WAITQUEUE(wait, current); add_wait_queue_exclusive(wq, &wait); __set_current_state(TASK_UNINTERRUPTIBLE); spin_unlock(lock); schedule(); remove_wait_queue(wq, &wait); } int xlog_wait_on_iclog(struct xlog_in_core *iclog) __releases(iclog->ic_log->l_icloglock); /* Calculate the distance between two LSNs in bytes */ static inline uint64_t xlog_lsn_sub( struct xlog *log, xfs_lsn_t high, xfs_lsn_t low) { uint32_t hi_cycle = CYCLE_LSN(high); uint32_t hi_block = BLOCK_LSN(high); uint32_t lo_cycle = CYCLE_LSN(low); uint32_t lo_block = BLOCK_LSN(low); if (hi_cycle == lo_cycle) return BBTOB(hi_block - lo_block); ASSERT((hi_cycle == lo_cycle + 1) || xlog_is_shutdown(log)); return (uint64_t)log->l_logsize - BBTOB(lo_block - hi_block); } void xlog_grant_return_space(struct xlog *log, xfs_lsn_t old_head, xfs_lsn_t new_head); /* * The LSN is valid so long as it is behind the current LSN. If it isn't, this * means that the next log record that includes this metadata could have a * smaller LSN. In turn, this means that the modification in the log would not * replay. */ static inline bool xlog_valid_lsn( struct xlog *log, xfs_lsn_t lsn) { int cur_cycle; int cur_block; bool valid = true; /* * First, sample the current lsn without locking to avoid added * contention from metadata I/O. The current cycle and block are updated * (in xlog_state_switch_iclogs()) and read here in a particular order * to avoid false negatives (e.g., thinking the metadata LSN is valid * when it is not). * * The current block is always rewound before the cycle is bumped in * xlog_state_switch_iclogs() to ensure the current LSN is never seen in * a transiently forward state. Instead, we can see the LSN in a * transiently behind state if we happen to race with a cycle wrap. */ cur_cycle = READ_ONCE(log->l_curr_cycle); smp_rmb(); cur_block = READ_ONCE(log->l_curr_block); if ((CYCLE_LSN(lsn) > cur_cycle) || (CYCLE_LSN(lsn) == cur_cycle && BLOCK_LSN(lsn) > cur_block)) { /* * If the metadata LSN appears invalid, it's possible the check * above raced with a wrap to the next log cycle. Grab the lock * to check for sure. */ spin_lock(&log->l_icloglock); cur_cycle = log->l_curr_cycle; cur_block = log->l_curr_block; spin_unlock(&log->l_icloglock); if ((CYCLE_LSN(lsn) > cur_cycle) || (CYCLE_LSN(lsn) == cur_cycle && BLOCK_LSN(lsn) > cur_block)) valid = false; } return valid; } /* * Log vector and shadow buffers can be large, so we need to use kvmalloc() here * to ensure success. Unfortunately, kvmalloc() only allows GFP_KERNEL contexts * to fall back to vmalloc, so we can't actually do anything useful with gfp * flags to control the kmalloc() behaviour within kvmalloc(). Hence kmalloc() * will do direct reclaim and compaction in the slow path, both of which are * horrendously expensive. We just want kmalloc to fail fast and fall back to * vmalloc if it can't get something straight away from the free lists or * buddy allocator. Hence we have to open code kvmalloc outselves here. * * This assumes that the caller uses memalloc_nofs_save task context here, so * despite the use of GFP_KERNEL here, we are going to be doing GFP_NOFS * allocations. This is actually the only way to make vmalloc() do GFP_NOFS * allocations, so lets just all pretend this is a GFP_KERNEL context * operation.... */ static inline void * xlog_kvmalloc( size_t buf_size) { gfp_t flags = GFP_KERNEL; void *p; flags &= ~__GFP_DIRECT_RECLAIM; flags |= __GFP_NOWARN | __GFP_NORETRY; do { p = kmalloc(buf_size, flags); if (!p) p = vmalloc(buf_size); } while (!p); return p; } #endif /* __XFS_LOG_PRIV_H__ */
196 196 135 133 15 15 15 15 15 15 20 9 11 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2013 * Phillip Lougher <phillip@squashfs.org.uk> */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/pagemap.h> #include "squashfs_fs_sb.h" #include "decompressor.h" #include "page_actor.h" /* * This file contains implementations of page_actor for decompressing into * an intermediate buffer, and for decompressing directly into the * page cache. * * Calling code should avoid sleeping between calls to squashfs_first_page() * and squashfs_finish_page(). */ /* Implementation of page_actor for decompressing into intermediate buffer */ static void *cache_first_page(struct squashfs_page_actor *actor) { actor->next_page = 1; return actor->buffer[0]; } static void *cache_next_page(struct squashfs_page_actor *actor) { if (actor->next_page == actor->pages) return NULL; return actor->buffer[actor->next_page++]; } static void cache_finish_page(struct squashfs_page_actor *actor) { /* empty */ } struct squashfs_page_actor *squashfs_page_actor_init(void **buffer, int pages, int length) { struct squashfs_page_actor *actor = kmalloc(sizeof(*actor), GFP_KERNEL); if (actor == NULL) return NULL; actor->length = length ? : pages * PAGE_SIZE; actor->buffer = buffer; actor->pages = pages; actor->next_page = 0; actor->tmp_buffer = NULL; actor->squashfs_first_page = cache_first_page; actor->squashfs_next_page = cache_next_page; actor->squashfs_finish_page = cache_finish_page; return actor; } /* Implementation of page_actor for decompressing directly into page cache. */ static loff_t page_next_index(struct squashfs_page_actor *actor) { return page_folio(actor->page[actor->next_page])->index; } static void *handle_next_page(struct squashfs_page_actor *actor) { int max_pages = (actor->length + PAGE_SIZE - 1) >> PAGE_SHIFT; if (actor->returned_pages == max_pages) return NULL; if ((actor->next_page == actor->pages) || (actor->next_index != page_next_index(actor))) { actor->next_index++; actor->returned_pages++; actor->last_page = NULL; return actor->alloc_buffer ? actor->tmp_buffer : ERR_PTR(-ENOMEM); } actor->next_index++; actor->returned_pages++; actor->last_page = actor->page[actor->next_page]; return actor->pageaddr = kmap_local_page(actor->page[actor->next_page++]); } static void *direct_first_page(struct squashfs_page_actor *actor) { return handle_next_page(actor); } static void *direct_next_page(struct squashfs_page_actor *actor) { if (actor->pageaddr) { kunmap_local(actor->pageaddr); actor->pageaddr = NULL; } return handle_next_page(actor); } static void direct_finish_page(struct squashfs_page_actor *actor) { if (actor->pageaddr) kunmap_local(actor->pageaddr); } struct squashfs_page_actor *squashfs_page_actor_init_special(struct squashfs_sb_info *msblk, struct page **page, int pages, int length, loff_t start_index) { struct squashfs_page_actor *actor = kmalloc(sizeof(*actor), GFP_KERNEL); if (actor == NULL) return NULL; if (msblk->decompressor->alloc_buffer) { actor->tmp_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL); if (actor->tmp_buffer == NULL) { kfree(actor); return NULL; } } else actor->tmp_buffer = NULL; actor->length = length ? : pages * PAGE_SIZE; actor->page = page; actor->pages = pages; actor->next_page = 0; actor->returned_pages = 0; actor->next_index = start_index >> PAGE_SHIFT; actor->pageaddr = NULL; actor->last_page = NULL; actor->alloc_buffer = msblk->decompressor->alloc_buffer; actor->squashfs_first_page = direct_first_page; actor->squashfs_next_page = direct_next_page; actor->squashfs_finish_page = direct_finish_page; return actor; }
13 21 21 2 13 22 13 3 13 4 3 1 13 13 8 7 3 10 10 10 3 2 3 3 3 3 2 1 2 8 8 8 4 8 2 8 22 22 22 21 21 20 1 19 4 15 4 3 2 8 17 3 2 2 2 2 1 1 2 1 1 2 2 2 22 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 // SPDX-License-Identifier: GPL-2.0-or-later /* * Sonix sn9c102p sn9c105 sn9c120 (jpeg) subdriver * * Copyright (C) 2009-2011 Jean-François Moine <http://moinejf.free.fr> * Copyright (C) 2005 Michel Xhaard mxhaard@magic.fr */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "sonixj" #include <linux/input.h> #include "gspca.h" #include "jpeg.h" MODULE_AUTHOR("Jean-François Moine <http://moinejf.free.fr>"); MODULE_DESCRIPTION("GSPCA/SONIX JPEG USB Camera Driver"); MODULE_LICENSE("GPL"); /* specific webcam descriptor */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ atomic_t avg_lum; struct v4l2_ctrl *brightness; struct v4l2_ctrl *contrast; struct v4l2_ctrl *saturation; struct { /* red/blue balance control cluster */ struct v4l2_ctrl *red_bal; struct v4l2_ctrl *blue_bal; }; struct { /* hflip/vflip control cluster */ struct v4l2_ctrl *vflip; struct v4l2_ctrl *hflip; }; struct v4l2_ctrl *gamma; struct v4l2_ctrl *illum; struct v4l2_ctrl *sharpness; struct v4l2_ctrl *freq; u32 exposure; struct work_struct work; u32 pktsz; /* (used by pkt_scan) */ u16 npkt; s8 nchg; s8 short_mark; u8 quality; /* image quality */ #define QUALITY_MIN 25 #define QUALITY_MAX 90 #define QUALITY_DEF 70 u8 reg01; u8 reg17; u8 reg18; u8 flags; s8 ag_cnt; #define AG_CNT_START 13 u8 bridge; #define BRIDGE_SN9C102P 0 #define BRIDGE_SN9C105 1 #define BRIDGE_SN9C110 2 #define BRIDGE_SN9C120 3 u8 sensor; /* Type of image sensor chip */ u8 i2c_addr; u8 jpeg_hdr[JPEG_HDR_SZ]; }; enum sensors { SENSOR_ADCM1700, SENSOR_GC0307, SENSOR_HV7131R, SENSOR_MI0360, SENSOR_MI0360B, SENSOR_MO4000, SENSOR_MT9V111, SENSOR_OM6802, SENSOR_OV7630, SENSOR_OV7648, SENSOR_OV7660, SENSOR_PO1030, SENSOR_PO2030N, SENSOR_SOI768, SENSOR_SP80708, }; static void qual_upd(struct work_struct *work); /* device flags */ #define F_PDN_INV 0x01 /* inverse pin S_PWR_DN / sn_xxx tables */ #define F_ILLUM 0x02 /* presence of illuminator */ /* sn9c1xx definitions */ /* register 0x01 */ #define S_PWR_DN 0x01 /* sensor power down */ #define S_PDN_INV 0x02 /* inverse pin S_PWR_DN */ #define V_TX_EN 0x04 /* video transfer enable */ #define LED 0x08 /* output to pin LED */ #define SCL_SEL_OD 0x20 /* open-drain mode */ #define SYS_SEL_48M 0x40 /* system clock 0: 24MHz, 1: 48MHz */ /* register 0x17 */ #define MCK_SIZE_MASK 0x1f /* sensor master clock */ #define SEN_CLK_EN 0x20 /* enable sensor clock */ #define DEF_EN 0x80 /* defect pixel by 0: soft, 1: hard */ static const struct v4l2_pix_format cif_mode[] = { {352, 288, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 352, .sizeimage = 352 * 288 * 4 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 0}, }; static const struct v4l2_pix_format vga_mode[] = { {160, 120, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 160, .sizeimage = 160 * 120 * 4 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 2}, {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240 * 3 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 1}, {640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 640, /* Note 3 / 8 is not large enough, not even 5 / 8 is ?! */ .sizeimage = 640 * 480 * 3 / 4 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 0}, }; static const u8 sn_adcm1700[0x1c] = { /* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */ 0x00, 0x43, 0x60, 0x00, 0x1a, 0x00, 0x00, 0x00, /* reg8 reg9 rega regb regc regd rege regf */ 0x80, 0x51, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */ 0x03, 0x00, 0x05, 0x01, 0x05, 0x16, 0x12, 0x42, /* reg18 reg19 reg1a reg1b */ 0x06, 0x00, 0x00, 0x00 }; static const u8 sn_gc0307[0x1c] = { /* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */ 0x00, 0x61, 0x62, 0x00, 0x1a, 0x00, 0x00, 0x00, /* reg8 reg9 rega regb regc regd rege regf */ 0x80, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */ 0x03, 0x00, 0x03, 0x01, 0x08, 0x28, 0x1e, 0x02, /* reg18 reg19 reg1a reg1b */ 0x06, 0x00, 0x00, 0x00 }; static const u8 sn_hv7131[0x1c] = { /* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */ 0x00, 0x03, 0x60, 0x00, 0x1a, 0x20, 0x20, 0x20, /* reg8 reg9 rega regb regc regd rege regf */ 0x81, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */ 0x03, 0x00, 0x00, 0x01, 0x03, 0x28, 0x1e, 0x41, /* reg18 reg19 reg1a reg1b */ 0x0a, 0x00, 0x00, 0x00 }; static const u8 sn_mi0360[0x1c] = { /* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */ 0x00, 0x63, 0x40, 0x00, 0x1a, 0x20, 0x20, 0x20, /* reg8 reg9 rega regb regc regd rege regf */ 0x81, 0x5d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */ 0x03, 0x00, 0x00, 0x02, 0x0a, 0x28, 0x1e, 0x61, /* reg18 reg19 reg1a reg1b */ 0x06, 0x00, 0x00, 0x00 }; static const u8 sn_mi0360b[0x1c] = { /* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */ 0x00, 0x61, 0x40, 0x00, 0x1a, 0x00, 0x00, 0x00, /* reg8 reg9 rega regb regc regd rege regf */ 0x81, 0x5d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */ 0x03, 0x00, 0x00, 0x02, 0x0a, 0x28, 0x1e, 0x40, /* reg18 reg19 reg1a reg1b */ 0x06, 0x00, 0x00, 0x00 }; static const u8 sn_mo4000[0x1c] = { /* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */ 0x00, 0x23, 0x60, 0x00, 0x1a, 0x00, 0x20, 0x18, /* reg8 reg9 rega regb regc regd rege regf */ 0x81, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */ 0x03, 0x00, 0x0b, 0x0f, 0x14, 0x28, 0x1e, 0x40, /* reg18 reg19 reg1a reg1b */ 0x08, 0x00, 0x00, 0x00 }; static const u8 sn_mt9v111[0x1c] = { /* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */ 0x00, 0x61, 0x40, 0x00, 0x1a, 0x20, 0x20, 0x20, /* reg8 reg9 rega regb regc regd rege regf */ 0x81, 0x5c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */ 0x03, 0x00, 0x00, 0x02, 0x1c, 0x28, 0x1e, 0x40, /* reg18 reg19 reg1a reg1b */ 0x06, 0x00, 0x00, 0x00 }; static const u8 sn_om6802[0x1c] = { /* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */ 0x00, 0x23, 0x72, 0x00, 0x1a, 0x20, 0x20, 0x19, /* reg8 reg9 rega regb regc regd rege regf */ 0x80, 0x34, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */ 0x03, 0x00, 0x51, 0x01, 0x00, 0x28, 0x1e, 0x40, /* reg18 reg19 reg1a reg1b */ 0x05, 0x00, 0x00, 0x00 }; static const u8 sn_ov7630[0x1c] = { /* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */ 0x00, 0x21, 0x40, 0x00, 0x1a, 0x00, 0x00, 0x00, /* reg8 reg9 rega regb regc regd rege regf */ 0x81, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */ 0x03, 0x00, 0x04, 0x01, 0x0a, 0x28, 0x1e, 0xc2, /* reg18 reg19 reg1a reg1b */ 0x0b, 0x00, 0x00, 0x00 }; static const u8 sn_ov7648[0x1c] = { /* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */ 0x00, 0x63, 0x40, 0x00, 0x1a, 0x20, 0x20, 0x20, /* reg8 reg9 rega regb regc regd rege regf */ 0x81, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */ 0x03, 0x00, 0x00, 0x01, 0x00, 0x28, 0x1e, 0x00, /* reg18 reg19 reg1a reg1b */ 0x0b, 0x00, 0x00, 0x00 }; static const u8 sn_ov7660[0x1c] = { /* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */ 0x00, 0x61, 0x40, 0x00, 0x1a, 0x00, 0x00, 0x00, /* reg8 reg9 rega regb regc regd rege regf */ 0x81, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */ 0x03, 0x00, 0x01, 0x01, 0x08, 0x28, 0x1e, 0x20, /* reg18 reg19 reg1a reg1b */ 0x07, 0x00, 0x00, 0x00 }; static const u8 sn_po1030[0x1c] = { /* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */ 0x00, 0x21, 0x62, 0x00, 0x1a, 0x20, 0x20, 0x20, /* reg8 reg9 rega regb regc regd rege regf */ 0x81, 0x6e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */ 0x03, 0x00, 0x00, 0x06, 0x06, 0x28, 0x1e, 0x00, /* reg18 reg19 reg1a reg1b */ 0x07, 0x00, 0x00, 0x00 }; static const u8 sn_po2030n[0x1c] = { /* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */ 0x00, 0x63, 0x40, 0x00, 0x1a, 0x00, 0x00, 0x00, /* reg8 reg9 rega regb regc regd rege regf */ 0x81, 0x6e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */ 0x03, 0x00, 0x00, 0x01, 0x14, 0x28, 0x1e, 0x00, /* reg18 reg19 reg1a reg1b */ 0x07, 0x00, 0x00, 0x00 }; static const u8 sn_soi768[0x1c] = { /* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */ 0x00, 0x21, 0x40, 0x00, 0x1a, 0x00, 0x00, 0x00, /* reg8 reg9 rega regb regc regd rege regf */ 0x81, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */ 0x03, 0x00, 0x00, 0x01, 0x08, 0x28, 0x1e, 0x00, /* reg18 reg19 reg1a reg1b */ 0x07, 0x00, 0x00, 0x00 }; static const u8 sn_sp80708[0x1c] = { /* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */ 0x00, 0x63, 0x60, 0x00, 0x1a, 0x20, 0x20, 0x20, /* reg8 reg9 rega regb regc regd rege regf */ 0x81, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */ 0x03, 0x00, 0x00, 0x03, 0x04, 0x28, 0x1e, 0x00, /* reg18 reg19 reg1a reg1b */ 0x07, 0x00, 0x00, 0x00 }; /* sequence specific to the sensors - !! index = SENSOR_xxx */ static const u8 *sn_tb[] = { [SENSOR_ADCM1700] = sn_adcm1700, [SENSOR_GC0307] = sn_gc0307, [SENSOR_HV7131R] = sn_hv7131, [SENSOR_MI0360] = sn_mi0360, [SENSOR_MI0360B] = sn_mi0360b, [SENSOR_MO4000] = sn_mo4000, [SENSOR_MT9V111] = sn_mt9v111, [SENSOR_OM6802] = sn_om6802, [SENSOR_OV7630] = sn_ov7630, [SENSOR_OV7648] = sn_ov7648, [SENSOR_OV7660] = sn_ov7660, [SENSOR_PO1030] = sn_po1030, [SENSOR_PO2030N] = sn_po2030n, [SENSOR_SOI768] = sn_soi768, [SENSOR_SP80708] = sn_sp80708, }; /* default gamma table */ static const u8 gamma_def[17] = { 0x00, 0x2d, 0x46, 0x5a, 0x6c, 0x7c, 0x8b, 0x99, 0xa6, 0xb2, 0xbf, 0xca, 0xd5, 0xe0, 0xeb, 0xf5, 0xff }; /* gamma for sensor ADCM1700 */ static const u8 gamma_spec_0[17] = { 0x0f, 0x39, 0x5a, 0x74, 0x86, 0x95, 0xa6, 0xb4, 0xbd, 0xc4, 0xcc, 0xd4, 0xd5, 0xde, 0xe4, 0xed, 0xf5 }; /* gamma for sensors HV7131R and MT9V111 */ static const u8 gamma_spec_1[17] = { 0x08, 0x3a, 0x52, 0x65, 0x75, 0x83, 0x91, 0x9d, 0xa9, 0xb4, 0xbe, 0xc8, 0xd2, 0xdb, 0xe4, 0xed, 0xf5 }; /* gamma for sensor GC0307 */ static const u8 gamma_spec_2[17] = { 0x14, 0x37, 0x50, 0x6a, 0x7c, 0x8d, 0x9d, 0xab, 0xb5, 0xbf, 0xc2, 0xcb, 0xd1, 0xd6, 0xdb, 0xe1, 0xeb }; /* gamma for sensor SP80708 */ static const u8 gamma_spec_3[17] = { 0x0a, 0x2d, 0x4e, 0x68, 0x7d, 0x8f, 0x9f, 0xab, 0xb7, 0xc2, 0xcc, 0xd3, 0xd8, 0xde, 0xe2, 0xe5, 0xe6 }; /* color matrix and offsets */ static const u8 reg84[] = { 0x14, 0x00, 0x27, 0x00, 0x07, 0x00, /* YR YG YB gains */ 0xe8, 0x0f, 0xda, 0x0f, 0x40, 0x00, /* UR UG UB */ 0x3e, 0x00, 0xcd, 0x0f, 0xf7, 0x0f, /* VR VG VB */ 0x00, 0x00, 0x00 /* YUV offsets */ }; #define DELAY 0xdd static const u8 adcm1700_sensor_init[][8] = { {0xa0, 0x51, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xb0, 0x51, 0x04, 0x08, 0x00, 0x00, 0x00, 0x10}, /* reset */ {DELAY, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, {0xb0, 0x51, 0x04, 0x00, 0x00, 0x00, 0x00, 0x10}, {DELAY, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, {0xb0, 0x51, 0x0c, 0xe0, 0x2e, 0x00, 0x00, 0x10}, {0xb0, 0x51, 0x10, 0x02, 0x02, 0x00, 0x00, 0x10}, {0xb0, 0x51, 0x14, 0x0e, 0x0e, 0x00, 0x00, 0x10}, {0xb0, 0x51, 0x1c, 0x00, 0x80, 0x00, 0x00, 0x10}, {0xb0, 0x51, 0x20, 0x01, 0x00, 0x00, 0x00, 0x10}, {DELAY, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, {0xb0, 0x51, 0x04, 0x04, 0x00, 0x00, 0x00, 0x10}, {DELAY, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, {0xb0, 0x51, 0x04, 0x01, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x51, 0xfe, 0x10, 0x00, 0x00, 0x00, 0x10}, {0xb0, 0x51, 0x14, 0x01, 0x00, 0x00, 0x00, 0x10}, {0xb0, 0x51, 0x32, 0x00, 0x00, 0x00, 0x00, 0x10}, {} }; static const u8 adcm1700_sensor_param1[][8] = { {0xb0, 0x51, 0x26, 0xf9, 0x01, 0x00, 0x00, 0x10}, /* exposure? */ {0xd0, 0x51, 0x1e, 0x8e, 0x8e, 0x8e, 0x8e, 0x10}, {0xa0, 0x51, 0xfe, 0x01, 0x00, 0x00, 0x00, 0x10}, {0xb0, 0x51, 0x00, 0x02, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x51, 0xfe, 0x10, 0x00, 0x00, 0x00, 0x10}, {0xb0, 0x51, 0x32, 0x00, 0x72, 0x00, 0x00, 0x10}, {0xd0, 0x51, 0x1e, 0xbe, 0xd7, 0xe8, 0xbe, 0x10}, /* exposure? */ {0xa0, 0x51, 0xfe, 0x01, 0x00, 0x00, 0x00, 0x10}, {0xb0, 0x51, 0x00, 0x02, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x51, 0xfe, 0x10, 0x00, 0x00, 0x00, 0x10}, {0xb0, 0x51, 0x32, 0x00, 0xa2, 0x00, 0x00, 0x10}, {} }; static const u8 gc0307_sensor_init[][8] = { {0xa0, 0x21, 0x43, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x44, 0xa2, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x01, 0x6a, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x02, 0x70, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x10, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x1d, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x11, 0x05, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x05, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x06, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x07, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x08, 0x02, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x09, 0x01, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x0a, 0xe8, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x0b, 0x02, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x0c, 0x80, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x0d, 0x22, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x0e, 0x02, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x0f, 0xb2, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x12, 0x70, 0x00, 0x00, 0x00, 0x10}, {DELAY, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*delay 10ms*/ {0xa0, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x15, 0xb8, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x16, 0x13, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x17, 0x52, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x18, 0x50, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x1e, 0x0d, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x1f, 0x32, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x61, 0x90, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x63, 0x70, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x65, 0x98, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x67, 0x90, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x03, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x04, 0x96, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x45, 0x27, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x47, 0x2c, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x43, 0x47, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x44, 0xd8, 0x00, 0x00, 0x00, 0x10}, {} }; static const u8 gc0307_sensor_param1[][8] = { {0xa0, 0x21, 0x68, 0x13, 0x00, 0x00, 0x00, 0x10}, {0xd0, 0x21, 0x61, 0x80, 0x00, 0x80, 0x00, 0x10}, {0xc0, 0x21, 0x65, 0x80, 0x00, 0x80, 0x00, 0x10}, {0xc0, 0x21, 0x63, 0xa0, 0x00, 0xa6, 0x00, 0x10}, /*param3*/ {0xa0, 0x21, 0x01, 0x6e, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x02, 0x88, 0x00, 0x00, 0x00, 0x10}, {} }; static const u8 hv7131r_sensor_init[][8] = { {0xc1, 0x11, 0x01, 0x08, 0x01, 0x00, 0x00, 0x10}, {0xb1, 0x11, 0x34, 0x17, 0x7f, 0x00, 0x00, 0x10}, {0xd1, 0x11, 0x40, 0xff, 0x7f, 0x7f, 0x7f, 0x10}, /* {0x91, 0x11, 0x44, 0x00, 0x00, 0x00, 0x00, 0x10}, */ {0xd1, 0x11, 0x10, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x11, 0x14, 0x01, 0xe2, 0x02, 0x82, 0x10}, /* {0x91, 0x11, 0x18, 0x00, 0x00, 0x00, 0x00, 0x10}, */ {0xa1, 0x11, 0x01, 0x08, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x11, 0x01, 0x08, 0x00, 0x00, 0x00, 0x10}, {0xc1, 0x11, 0x25, 0x00, 0x61, 0xa8, 0x00, 0x10}, {0xa1, 0x11, 0x30, 0x22, 0x00, 0x00, 0x00, 0x10}, {0xc1, 0x11, 0x31, 0x20, 0x2e, 0x20, 0x00, 0x10}, {0xc1, 0x11, 0x25, 0x00, 0xc3, 0x50, 0x00, 0x10}, {0xa1, 0x11, 0x30, 0x07, 0x00, 0x00, 0x00, 0x10}, /* gain14 */ {0xc1, 0x11, 0x31, 0x10, 0x10, 0x10, 0x00, 0x10}, /* r g b 101a10 */ {0xa1, 0x11, 0x01, 0x08, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x11, 0x20, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x11, 0x21, 0xd0, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x11, 0x22, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x11, 0x23, 0x09, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x11, 0x01, 0x08, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x11, 0x20, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x11, 0x21, 0xd0, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x11, 0x22, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x11, 0x23, 0x10, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x11, 0x01, 0x18, 0x00, 0x00, 0x00, 0x10}, /* set sensor clock */ {} }; static const u8 mi0360_sensor_init[][8] = { {0xb1, 0x5d, 0x07, 0x00, 0x02, 0x00, 0x00, 0x10}, {0xb1, 0x5d, 0x0d, 0x00, 0x01, 0x00, 0x00, 0x10}, {0xb1, 0x5d, 0x0d, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x01, 0x00, 0x08, 0x00, 0x16, 0x10}, {0xd1, 0x5d, 0x03, 0x01, 0xe2, 0x02, 0x82, 0x10}, {0xd1, 0x5d, 0x05, 0x00, 0x09, 0x00, 0x53, 0x10}, {0xb1, 0x5d, 0x0d, 0x00, 0x02, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x10, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x12, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x14, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x16, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x18, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x1a, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xb1, 0x5d, 0x32, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x20, 0x91, 0x01, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x22, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x24, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x26, 0x00, 0x00, 0x00, 0x24, 0x10}, {0xd1, 0x5d, 0x2f, 0xf7, 0xb0, 0x00, 0x04, 0x10}, {0xd1, 0x5d, 0x31, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x33, 0x00, 0x00, 0x01, 0x00, 0x10}, {0xb1, 0x5d, 0x3d, 0x06, 0x8f, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x40, 0x01, 0xe0, 0x00, 0xd1, 0x10}, {0xb1, 0x5d, 0x44, 0x00, 0x82, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x58, 0x00, 0x78, 0x00, 0x43, 0x10}, {0xd1, 0x5d, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x5c, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x5e, 0x00, 0x00, 0xa3, 0x1d, 0x10}, {0xb1, 0x5d, 0x62, 0x04, 0x11, 0x00, 0x00, 0x10}, {0xb1, 0x5d, 0x20, 0x91, 0x01, 0x00, 0x00, 0x10}, {0xb1, 0x5d, 0x20, 0x11, 0x01, 0x00, 0x00, 0x10}, {0xb1, 0x5d, 0x09, 0x00, 0x64, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x2b, 0x00, 0xa0, 0x00, 0xb0, 0x10}, {0xd1, 0x5d, 0x2d, 0x00, 0xa0, 0x00, 0xa0, 0x10}, {0xb1, 0x5d, 0x0a, 0x00, 0x02, 0x00, 0x00, 0x10}, /* sensor clck ?2 */ {0xb1, 0x5d, 0x06, 0x00, 0x30, 0x00, 0x00, 0x10}, {0xb1, 0x5d, 0x05, 0x00, 0x0a, 0x00, 0x00, 0x10}, {0xb1, 0x5d, 0x09, 0x02, 0x35, 0x00, 0x00, 0x10}, /* exposure 2 */ {0xd1, 0x5d, 0x2b, 0x00, 0xb9, 0x00, 0xe3, 0x10}, {0xd1, 0x5d, 0x2d, 0x00, 0x5f, 0x00, 0xb9, 0x10}, /* 42 */ /* {0xb1, 0x5d, 0x35, 0x00, 0x67, 0x00, 0x00, 0x10}, * gain orig */ /* {0xb1, 0x5d, 0x35, 0x00, 0x20, 0x00, 0x00, 0x10}, * gain */ {0xb1, 0x5d, 0x07, 0x00, 0x03, 0x00, 0x00, 0x10}, /* update */ {0xb1, 0x5d, 0x07, 0x00, 0x02, 0x00, 0x00, 0x10}, /* sensor on */ {} }; static const u8 mi0360b_sensor_init[][8] = { {0xb1, 0x5d, 0x07, 0x00, 0x02, 0x00, 0x00, 0x10}, {0xb1, 0x5d, 0x0d, 0x00, 0x01, 0x00, 0x00, 0x10}, {DELAY, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*delay 20ms*/ {0xb1, 0x5d, 0x0d, 0x00, 0x00, 0x00, 0x00, 0x10}, {DELAY, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*delay 20ms*/ {0xd1, 0x5d, 0x01, 0x00, 0x08, 0x00, 0x16, 0x10}, {0xd1, 0x5d, 0x03, 0x01, 0xe2, 0x02, 0x82, 0x10}, {0xd1, 0x5d, 0x05, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xb1, 0x5d, 0x0d, 0x00, 0x02, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x10, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x12, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x14, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x16, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x18, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x1a, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xb1, 0x5d, 0x32, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x20, 0x11, 0x01, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x22, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x24, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x26, 0x00, 0x00, 0x00, 0x24, 0x10}, {0xd1, 0x5d, 0x2f, 0xf7, 0xb0, 0x00, 0x04, 0x10}, {0xd1, 0x5d, 0x31, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x33, 0x00, 0x00, 0x01, 0x00, 0x10}, {0xb1, 0x5d, 0x3d, 0x06, 0x8f, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x40, 0x01, 0xe0, 0x00, 0xd1, 0x10}, {0xb1, 0x5d, 0x44, 0x00, 0x82, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x58, 0x00, 0x78, 0x00, 0x43, 0x10}, {0xd1, 0x5d, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x5c, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x5e, 0x00, 0x00, 0xa3, 0x1d, 0x10}, {0xb1, 0x5d, 0x62, 0x04, 0x11, 0x00, 0x00, 0x10}, {0xb1, 0x5d, 0x20, 0x11, 0x01, 0x00, 0x00, 0x10}, {0xb1, 0x5d, 0x20, 0x11, 0x01, 0x00, 0x00, 0x10}, {0xb1, 0x5d, 0x09, 0x00, 0x64, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x2b, 0x00, 0x33, 0x00, 0xa0, 0x10}, {0xd1, 0x5d, 0x2d, 0x00, 0xa0, 0x00, 0x33, 0x10}, {} }; static const u8 mi0360b_sensor_param1[][8] = { {0xb1, 0x5d, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xb1, 0x5d, 0x06, 0x00, 0x53, 0x00, 0x00, 0x10}, {0xb1, 0x5d, 0x05, 0x00, 0x09, 0x00, 0x00, 0x10}, {0xb1, 0x5d, 0x09, 0x02, 0x35, 0x00, 0x00, 0x10}, /* exposure 2 */ {0xd1, 0x5d, 0x2b, 0x00, 0xd1, 0x01, 0xc9, 0x10}, {0xd1, 0x5d, 0x2d, 0x00, 0xed, 0x00, 0xd1, 0x10}, {0xb1, 0x5d, 0x07, 0x00, 0x03, 0x00, 0x00, 0x10}, /* update */ {0xb1, 0x5d, 0x07, 0x00, 0x02, 0x00, 0x00, 0x10}, /* sensor on */ {} }; static const u8 mo4000_sensor_init[][8] = { {0xa1, 0x21, 0x01, 0x02, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x02, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x03, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x04, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x05, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x05, 0x04, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x06, 0x80, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x06, 0x81, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x11, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x11, 0x20, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x11, 0x30, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x11, 0x38, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x11, 0x38, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x12, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x10, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x0f, 0x20, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x10, 0x20, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x11, 0x38, 0x00, 0x00, 0x00, 0x10}, {} }; static const u8 mt9v111_sensor_init[][8] = { {0xb1, 0x5c, 0x0d, 0x00, 0x01, 0x00, 0x00, 0x10}, /* reset? */ {DELAY, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 20ms */ {0xb1, 0x5c, 0x0d, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xb1, 0x5c, 0x01, 0x00, 0x01, 0x00, 0x00, 0x10}, /* IFP select */ {0xb1, 0x5c, 0x08, 0x04, 0x80, 0x00, 0x00, 0x10}, /* output fmt ctrl */ {0xb1, 0x5c, 0x06, 0x00, 0x00, 0x00, 0x00, 0x10}, /* op mode ctrl */ {0xb1, 0x5c, 0x01, 0x00, 0x04, 0x00, 0x00, 0x10}, /* sensor select */ {0xb1, 0x5c, 0x08, 0x00, 0x08, 0x00, 0x00, 0x10}, /* row start */ {0xb1, 0x5c, 0x02, 0x00, 0x16, 0x00, 0x00, 0x10}, /* col start */ {0xb1, 0x5c, 0x03, 0x01, 0xe7, 0x00, 0x00, 0x10}, /* window height */ {0xb1, 0x5c, 0x04, 0x02, 0x87, 0x00, 0x00, 0x10}, /* window width */ {0xb1, 0x5c, 0x07, 0x30, 0x02, 0x00, 0x00, 0x10}, /* output ctrl */ {0xb1, 0x5c, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x10}, /* shutter delay */ {0xb1, 0x5c, 0x12, 0x00, 0xb0, 0x00, 0x00, 0x10}, /* zoom col start */ {0xb1, 0x5c, 0x13, 0x00, 0x7c, 0x00, 0x00, 0x10}, /* zoom row start */ {0xb1, 0x5c, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x10}, /* digital zoom */ {0xb1, 0x5c, 0x20, 0x00, 0x00, 0x00, 0x00, 0x10}, /* read mode */ {0xb1, 0x5c, 0x20, 0x00, 0x00, 0x00, 0x00, 0x10}, {} }; static const u8 mt9v111_sensor_param1[][8] = { {0xd1, 0x5c, 0x2b, 0x00, 0x33, 0x00, 0xad, 0x10}, /* G1 and B gains */ {0xd1, 0x5c, 0x2d, 0x00, 0xad, 0x00, 0x33, 0x10}, /* R and G2 gains */ {0xb1, 0x5c, 0x06, 0x00, 0x40, 0x00, 0x00, 0x10}, /* vert blanking */ {0xb1, 0x5c, 0x05, 0x00, 0x09, 0x00, 0x00, 0x10}, /* horiz blanking */ {0xb1, 0x5c, 0x35, 0x01, 0xc0, 0x00, 0x00, 0x10}, /* global gain */ {} }; static const u8 om6802_init0[2][8] = { /*fixme: variable*/ {0xa0, 0x34, 0x29, 0x0e, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x34, 0x23, 0xb0, 0x00, 0x00, 0x00, 0x10}, }; static const u8 om6802_sensor_init[][8] = { {0xa0, 0x34, 0xdf, 0x6d, 0x00, 0x00, 0x00, 0x10}, /* factory mode */ {0xa0, 0x34, 0xdd, 0x18, 0x00, 0x00, 0x00, 0x10}, /* output raw RGB */ {0xa0, 0x34, 0x5a, 0xc0, 0x00, 0x00, 0x00, 0x10}, /* {0xa0, 0x34, 0xfb, 0x11, 0x00, 0x00, 0x00, 0x10}, */ {0xa0, 0x34, 0xf0, 0x04, 0x00, 0x00, 0x00, 0x10}, /* auto-exposure speed (0) / white balance mode (auto RGB) */ /* {0xa0, 0x34, 0xf1, 0x02, 0x00, 0x00, 0x00, 0x10}, * set color mode */ /* {0xa0, 0x34, 0xfe, 0x5b, 0x00, 0x00, 0x00, 0x10}, * max AGC value in AE */ /* {0xa0, 0x34, 0xe5, 0x00, 0x00, 0x00, 0x00, 0x10}, * preset AGC */ /* {0xa0, 0x34, 0xe6, 0x00, 0x00, 0x00, 0x00, 0x10}, * preset brightness */ /* {0xa0, 0x34, 0xe7, 0x00, 0x00, 0x00, 0x00, 0x10}, * preset contrast */ /* {0xa0, 0x34, 0xe8, 0x31, 0x00, 0x00, 0x00, 0x10}, * preset gamma */ {0xa0, 0x34, 0xe9, 0x0f, 0x00, 0x00, 0x00, 0x10}, /* luminance mode (0x4f -> AutoExpo on) */ {0xa0, 0x34, 0xe4, 0xff, 0x00, 0x00, 0x00, 0x10}, /* preset shutter */ /* {0xa0, 0x34, 0xef, 0x00, 0x00, 0x00, 0x00, 0x10}, * auto frame rate */ /* {0xa0, 0x34, 0xfb, 0xee, 0x00, 0x00, 0x00, 0x10}, */ {0xa0, 0x34, 0x5d, 0x80, 0x00, 0x00, 0x00, 0x10}, {} }; static const u8 om6802_sensor_param1[][8] = { {0xa0, 0x34, 0x71, 0x84, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x34, 0x72, 0x05, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x34, 0x68, 0x80, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x34, 0x69, 0x01, 0x00, 0x00, 0x00, 0x10}, {} }; static const u8 ov7630_sensor_init[][8] = { {0xa1, 0x21, 0x76, 0x01, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x12, 0xc8, 0x00, 0x00, 0x00, 0x10}, {DELAY, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 20ms */ {0xa1, 0x21, 0x12, 0x48, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x12, 0xc8, 0x00, 0x00, 0x00, 0x10}, {DELAY, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 20ms */ {0xa1, 0x21, 0x12, 0x48, 0x00, 0x00, 0x00, 0x10}, /* win: i2c_r from 00 to 80 */ {0xd1, 0x21, 0x03, 0x80, 0x10, 0x20, 0x80, 0x10}, {0xb1, 0x21, 0x0c, 0x20, 0x20, 0x00, 0x00, 0x10}, /* HDG: 0x11 was 0x00 change to 0x01 for better exposure (15 fps instead of 30) 0x13 was 0xc0 change to 0xc3 for auto gain and exposure */ {0xd1, 0x21, 0x11, 0x01, 0x48, 0xc3, 0x00, 0x10}, {0xb1, 0x21, 0x15, 0x80, 0x03, 0x00, 0x00, 0x10}, {0xd1, 0x21, 0x17, 0x1b, 0xbd, 0x05, 0xf6, 0x10}, {0xa1, 0x21, 0x1b, 0x04, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x21, 0x1f, 0x00, 0x80, 0x80, 0x80, 0x10}, {0xd1, 0x21, 0x23, 0xde, 0x10, 0x8a, 0xa0, 0x10}, {0xc1, 0x21, 0x27, 0xca, 0xa2, 0x74, 0x00, 0x10}, {0xd1, 0x21, 0x2a, 0x88, 0x00, 0x88, 0x01, 0x10}, {0xc1, 0x21, 0x2e, 0x80, 0x00, 0x18, 0x00, 0x10}, {0xa1, 0x21, 0x21, 0x08, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x22, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x2e, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xb1, 0x21, 0x32, 0xc2, 0x08, 0x00, 0x00, 0x10}, {0xb1, 0x21, 0x4c, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x21, 0x60, 0x05, 0x40, 0x12, 0x57, 0x10}, {0xa1, 0x21, 0x64, 0x73, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x21, 0x65, 0x00, 0x55, 0x01, 0xac, 0x10}, {0xa1, 0x21, 0x69, 0x38, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x21, 0x6f, 0x1f, 0x01, 0x00, 0x10, 0x10}, {0xd1, 0x21, 0x73, 0x50, 0x20, 0x02, 0x01, 0x10}, {0xd1, 0x21, 0x77, 0xf3, 0x90, 0x98, 0x98, 0x10}, {0xc1, 0x21, 0x7b, 0x00, 0x4c, 0xf7, 0x00, 0x10}, {0xd1, 0x21, 0x17, 0x1b, 0xbd, 0x05, 0xf6, 0x10}, {0xa1, 0x21, 0x1b, 0x04, 0x00, 0x00, 0x00, 0x10}, {} }; static const u8 ov7630_sensor_param1[][8] = { {0xa1, 0x21, 0x12, 0x48, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x12, 0x48, 0x00, 0x00, 0x00, 0x10}, /*fixme: + 0x12, 0x04*/ /* {0xa1, 0x21, 0x75, 0x82, 0x00, 0x00, 0x00, 0x10}, * COMN * set by setvflip */ {0xa1, 0x21, 0x10, 0x32, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xb1, 0x21, 0x01, 0x80, 0x80, 0x00, 0x00, 0x10}, /* */ /* {0xa1, 0x21, 0x2a, 0x88, 0x00, 0x00, 0x00, 0x10}, * set by setfreq */ /* {0xa1, 0x21, 0x2b, 0x34, 0x00, 0x00, 0x00, 0x10}, * set by setfreq */ /* */ {0xa1, 0x21, 0x10, 0x83, 0x00, 0x00, 0x00, 0x10}, /* {0xb1, 0x21, 0x01, 0x88, 0x70, 0x00, 0x00, 0x10}, */ {} }; static const u8 ov7648_sensor_init[][8] = { {0xa1, 0x21, 0x76, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x12, 0x80, 0x00, 0x00, 0x00, 0x10}, /* reset */ {DELAY, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 20ms */ {0xa1, 0x21, 0x12, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x21, 0x03, 0xa4, 0x30, 0x88, 0x00, 0x10}, {0xb1, 0x21, 0x11, 0x80, 0x08, 0x00, 0x00, 0x10}, {0xc1, 0x21, 0x13, 0xa0, 0x04, 0x84, 0x00, 0x10}, {0xd1, 0x21, 0x17, 0x1a, 0x02, 0xba, 0xf4, 0x10}, {0xa1, 0x21, 0x1b, 0x04, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x21, 0x1f, 0x41, 0xc0, 0x80, 0x80, 0x10}, {0xd1, 0x21, 0x23, 0xde, 0xa0, 0x80, 0x32, 0x10}, {0xd1, 0x21, 0x27, 0xfe, 0xa0, 0x00, 0x91, 0x10}, {0xd1, 0x21, 0x2b, 0x00, 0x88, 0x85, 0x80, 0x10}, {0xc1, 0x21, 0x2f, 0x9c, 0x00, 0xc4, 0x00, 0x10}, {0xd1, 0x21, 0x60, 0xa6, 0x60, 0x88, 0x12, 0x10}, {0xd1, 0x21, 0x64, 0x88, 0x00, 0x00, 0x94, 0x10}, {0xd1, 0x21, 0x68, 0x7a, 0x0c, 0x00, 0x00, 0x10}, {0xd1, 0x21, 0x6c, 0x11, 0x33, 0x22, 0x00, 0x10}, {0xd1, 0x21, 0x70, 0x11, 0x00, 0x10, 0x50, 0x10}, {0xd1, 0x21, 0x74, 0x20, 0x06, 0x00, 0xb5, 0x10}, {0xd1, 0x21, 0x78, 0x8a, 0x00, 0x00, 0x00, 0x10}, {0xb1, 0x21, 0x7c, 0x00, 0x43, 0x00, 0x00, 0x10}, {0xd1, 0x21, 0x21, 0x86, 0x00, 0xde, 0xa0, 0x10}, /* {0xd1, 0x21, 0x25, 0x80, 0x32, 0xfe, 0xa0, 0x10}, jfm done */ /* {0xd1, 0x21, 0x29, 0x00, 0x91, 0x00, 0x88, 0x10}, jfm done */ /* {0xb1, 0x21, 0x2d, 0x85, 0x00, 0x00, 0x00, 0x10}, set by setfreq */ {} }; static const u8 ov7648_sensor_param1[][8] = { /* {0xa1, 0x21, 0x12, 0x08, 0x00, 0x00, 0x00, 0x10}, jfm done */ /* {0xa1, 0x21, 0x75, 0x06, 0x00, 0x00, 0x00, 0x10}, * COMN * set by setvflip */ {0xa1, 0x21, 0x19, 0x02, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x10, 0x32, 0x00, 0x00, 0x00, 0x10}, /* {0xa1, 0x21, 0x16, 0x00, 0x00, 0x00, 0x00, 0x10}, jfm done */ /* {0xa1, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10}, * GAIN - def */ /* {0xb1, 0x21, 0x01, 0x6c, 0x6c, 0x00, 0x00, 0x10}, * B R - def: 80 */ /*...*/ {0xa1, 0x21, 0x11, 0x81, 0x00, 0x00, 0x00, 0x10}, /* CLKRC */ /* {0xa1, 0x21, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x10}, jfm done */ /* {0xa1, 0x21, 0x16, 0x00, 0x00, 0x00, 0x00, 0x10}, jfm done */ /* {0xa1, 0x21, 0x2a, 0x91, 0x00, 0x00, 0x00, 0x10}, jfm done */ /* {0xa1, 0x21, 0x2b, 0x00, 0x00, 0x00, 0x00, 0x10}, jfm done */ /* {0xb1, 0x21, 0x01, 0x64, 0x84, 0x00, 0x00, 0x10}, * B R - def: 80 */ {} }; static const u8 ov7660_sensor_init[][8] = { {0xa1, 0x21, 0x12, 0x80, 0x00, 0x00, 0x00, 0x10}, /* reset SCCB */ {DELAY, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 20ms */ {0xa1, 0x21, 0x12, 0x05, 0x00, 0x00, 0x00, 0x10}, /* Outformat = rawRGB */ {0xa1, 0x21, 0x13, 0xb8, 0x00, 0x00, 0x00, 0x10}, /* init COM8 */ {0xd1, 0x21, 0x00, 0x01, 0x74, 0x92, 0x00, 0x10}, /* GAIN BLUE RED VREF */ {0xd1, 0x21, 0x04, 0x00, 0x7d, 0x62, 0x00, 0x10}, /* COM 1 BAVE GEAVE AECHH */ {0xb1, 0x21, 0x08, 0x83, 0x01, 0x00, 0x00, 0x10}, /* RAVE COM2 */ {0xd1, 0x21, 0x0c, 0x00, 0x08, 0x04, 0x4f, 0x10}, /* COM 3 4 5 6 */ {0xd1, 0x21, 0x10, 0x7f, 0x40, 0x05, 0xff, 0x10}, /* AECH CLKRC COM7 COM8 */ {0xc1, 0x21, 0x14, 0x2c, 0x00, 0x02, 0x00, 0x10}, /* COM9 COM10 */ {0xd1, 0x21, 0x17, 0x10, 0x60, 0x02, 0x7b, 0x10}, /* HSTART HSTOP VSTRT VSTOP */ {0xa1, 0x21, 0x1b, 0x02, 0x00, 0x00, 0x00, 0x10}, /* PSHFT */ {0xb1, 0x21, 0x1e, 0x01, 0x0e, 0x00, 0x00, 0x10}, /* MVFP LAEC */ {0xd1, 0x21, 0x20, 0x07, 0x07, 0x07, 0x07, 0x10}, /* BOS GBOS GROS ROS (BGGR offset) */ /* {0xd1, 0x21, 0x24, 0x68, 0x58, 0xd4, 0x80, 0x10}, */ {0xd1, 0x21, 0x24, 0x78, 0x68, 0xd4, 0x80, 0x10}, /* AEW AEB VPT BBIAS */ {0xd1, 0x21, 0x28, 0x80, 0x30, 0x00, 0x00, 0x10}, /* GbBIAS RSVD EXHCH EXHCL */ {0xd1, 0x21, 0x2c, 0x80, 0x00, 0x00, 0x62, 0x10}, /* RBIAS ADVFL ASDVFH YAVE */ {0xc1, 0x21, 0x30, 0x08, 0x30, 0xb4, 0x00, 0x10}, /* HSYST HSYEN HREF */ {0xd1, 0x21, 0x33, 0x00, 0x07, 0x84, 0x00, 0x10}, /* reserved */ {0xd1, 0x21, 0x37, 0x0c, 0x02, 0x43, 0x00, 0x10}, /* ADC ACOM OFON TSLB */ {0xd1, 0x21, 0x3b, 0x02, 0x6c, 0x19, 0x0e, 0x10}, /* COM11 COM12 COM13 COM14 */ {0xd1, 0x21, 0x3f, 0x41, 0xc1, 0x22, 0x08, 0x10}, /* EDGE COM15 COM16 COM17 */ {0xd1, 0x21, 0x43, 0xf0, 0x10, 0x78, 0xa8, 0x10}, /* reserved */ {0xd1, 0x21, 0x47, 0x60, 0x80, 0x00, 0x00, 0x10}, /* reserved */ {0xd1, 0x21, 0x4b, 0x00, 0x00, 0x00, 0x00, 0x10}, /* reserved */ {0xd1, 0x21, 0x4f, 0x46, 0x36, 0x0f, 0x17, 0x10}, /* MTX 1 2 3 4 */ {0xd1, 0x21, 0x53, 0x7f, 0x96, 0x40, 0x40, 0x10}, /* MTX 5 6 7 8 */ {0xb1, 0x21, 0x57, 0x40, 0x0f, 0x00, 0x00, 0x10}, /* MTX9 MTXS */ {0xd1, 0x21, 0x59, 0xba, 0x9a, 0x22, 0xb9, 0x10}, /* reserved */ {0xd1, 0x21, 0x5d, 0x9b, 0x10, 0xf0, 0x05, 0x10}, /* reserved */ {0xa1, 0x21, 0x61, 0x60, 0x00, 0x00, 0x00, 0x10}, /* reserved */ {0xd1, 0x21, 0x62, 0x00, 0x00, 0x50, 0x30, 0x10}, /* LCC1 LCC2 LCC3 LCC4 */ {0xa1, 0x21, 0x66, 0x00, 0x00, 0x00, 0x00, 0x10}, /* LCC5 */ {0xd1, 0x21, 0x67, 0x80, 0x7a, 0x90, 0x80, 0x10}, /* MANU */ {0xa1, 0x21, 0x6b, 0x0a, 0x00, 0x00, 0x00, 0x10}, /* band gap reference [0:3] DBLV */ {0xd1, 0x21, 0x6c, 0x30, 0x48, 0x80, 0x74, 0x10}, /* gamma curve */ {0xd1, 0x21, 0x70, 0x64, 0x60, 0x5c, 0x58, 0x10}, /* gamma curve */ {0xd1, 0x21, 0x74, 0x54, 0x4c, 0x40, 0x38, 0x10}, /* gamma curve */ {0xd1, 0x21, 0x78, 0x34, 0x30, 0x2f, 0x2b, 0x10}, /* gamma curve */ {0xd1, 0x21, 0x7c, 0x03, 0x07, 0x17, 0x34, 0x10}, /* gamma curve */ {0xd1, 0x21, 0x80, 0x41, 0x4d, 0x58, 0x63, 0x10}, /* gamma curve */ {0xd1, 0x21, 0x84, 0x6e, 0x77, 0x87, 0x95, 0x10}, /* gamma curve */ {0xc1, 0x21, 0x88, 0xaf, 0xc7, 0xdf, 0x00, 0x10}, /* gamma curve */ {0xc1, 0x21, 0x8b, 0x99, 0x99, 0xcf, 0x00, 0x10}, /* reserved */ {0xb1, 0x21, 0x92, 0x00, 0x00, 0x00, 0x00, 0x10}, /* DM_LNL/H */ /* not in all ms-win traces*/ {0xa1, 0x21, 0xa1, 0x00, 0x00, 0x00, 0x00, 0x10}, {} }; static const u8 ov7660_sensor_param1[][8] = { {0xa1, 0x21, 0x1e, 0x01, 0x00, 0x00, 0x00, 0x10}, /* MVFP */ /* bits[3..0]reserved */ {0xa1, 0x21, 0x1e, 0x01, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x03, 0x00, 0x00, 0x00, 0x00, 0x10}, /* VREF vertical frame ctrl */ {0xa1, 0x21, 0x03, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x10, 0x20, 0x00, 0x00, 0x00, 0x10}, /* AECH 0x20 */ {0xa1, 0x21, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x10}, /* ADVFL */ {0xa1, 0x21, 0x2e, 0x00, 0x00, 0x00, 0x00, 0x10}, /* ADVFH */ {0xa1, 0x21, 0x00, 0x1f, 0x00, 0x00, 0x00, 0x10}, /* GAIN */ /* {0xb1, 0x21, 0x01, 0x78, 0x78, 0x00, 0x00, 0x10}, * BLUE */ /****** (some exchanges in the win trace) ******/ /*fixme:param2*/ {0xa1, 0x21, 0x93, 0x00, 0x00, 0x00, 0x00, 0x10},/* dummy line hight */ {0xa1, 0x21, 0x92, 0x25, 0x00, 0x00, 0x00, 0x10}, /* dummy line low */ {0xa1, 0x21, 0x2a, 0x00, 0x00, 0x00, 0x00, 0x10}, /* EXHCH */ {0xa1, 0x21, 0x2b, 0x00, 0x00, 0x00, 0x00, 0x10}, /* EXHCL */ /* {0xa1, 0x21, 0x02, 0x90, 0x00, 0x00, 0x00, 0x10}, * RED */ /****** (some exchanges in the win trace) ******/ /******!! startsensor KO if changed !!****/ /*fixme: param3*/ {0xa1, 0x21, 0x93, 0x01, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x92, 0xff, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x2a, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x2b, 0xc3, 0x00, 0x00, 0x00, 0x10}, {} }; static const u8 po1030_sensor_init[][8] = { /* the sensor registers are described in m5602/m5602_po1030.h */ {0xa1, 0x6e, 0x3f, 0x20, 0x00, 0x00, 0x00, 0x10}, /* sensor reset */ {DELAY, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 20ms */ {0xa1, 0x6e, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x6e, 0x3e, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x04, 0x02, 0xb1, 0x02, 0x39, 0x10}, {0xd1, 0x6e, 0x08, 0x00, 0x01, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x0c, 0x02, 0x7f, 0x01, 0xe0, 0x10}, {0xd1, 0x6e, 0x12, 0x03, 0x02, 0x00, 0x03, 0x10}, {0xd1, 0x6e, 0x16, 0x85, 0x40, 0x4a, 0x40, 0x10}, /* r/g1/b/g2 gains */ {0xc1, 0x6e, 0x1a, 0x00, 0x80, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x1d, 0x08, 0x03, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x23, 0x00, 0xb0, 0x00, 0x94, 0x10}, {0xd1, 0x6e, 0x27, 0x58, 0x00, 0x00, 0x00, 0x10}, {0xb1, 0x6e, 0x2b, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x2d, 0x14, 0x35, 0x61, 0x84, 0x10}, /* gamma corr */ {0xd1, 0x6e, 0x31, 0xa2, 0xbd, 0xd8, 0xff, 0x10}, {0xd1, 0x6e, 0x35, 0x06, 0x1e, 0x12, 0x02, 0x10}, /* color matrix */ {0xd1, 0x6e, 0x39, 0xaa, 0x53, 0x37, 0xd5, 0x10}, {0xa1, 0x6e, 0x3d, 0xf2, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x3e, 0x00, 0x00, 0x80, 0x03, 0x10}, {0xd1, 0x6e, 0x42, 0x03, 0x00, 0x00, 0x00, 0x10}, {0xc1, 0x6e, 0x46, 0x00, 0x80, 0x80, 0x00, 0x10}, {0xd1, 0x6e, 0x4b, 0x02, 0xef, 0x08, 0xcd, 0x10}, {0xd1, 0x6e, 0x4f, 0x00, 0xd0, 0x00, 0xa0, 0x10}, {0xd1, 0x6e, 0x53, 0x01, 0xaa, 0x01, 0x40, 0x10}, {0xd1, 0x6e, 0x5a, 0x50, 0x04, 0x30, 0x03, 0x10}, /* raw rgb bayer */ {0xa1, 0x6e, 0x5e, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x5f, 0x10, 0x40, 0xff, 0x00, 0x10}, {0xd1, 0x6e, 0x63, 0x40, 0x40, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x6b, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x6f, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xc1, 0x6e, 0x73, 0x10, 0x80, 0xeb, 0x00, 0x10}, {} }; static const u8 po1030_sensor_param1[][8] = { /* from ms-win traces - these values change with auto gain/expo/wb.. */ {0xa1, 0x6e, 0x1e, 0x03, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x6e, 0x1e, 0x03, 0x00, 0x00, 0x00, 0x10}, /* mean values */ {0xc1, 0x6e, 0x1a, 0x02, 0xd4, 0xa4, 0x00, 0x10}, /* integlines */ {0xa1, 0x6e, 0x15, 0x04, 0x00, 0x00, 0x00, 0x10}, /* global gain */ {0xc1, 0x6e, 0x16, 0x40, 0x40, 0x40, 0x00, 0x10}, /* r/g1/b gains */ {0xa1, 0x6e, 0x1d, 0x08, 0x00, 0x00, 0x00, 0x10}, /* control1 */ {0xa1, 0x6e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x10}, /* frameheight */ {0xa1, 0x6e, 0x07, 0xd5, 0x00, 0x00, 0x00, 0x10}, /* {0xc1, 0x6e, 0x16, 0x49, 0x40, 0x45, 0x00, 0x10}, */ {} }; static const u8 po2030n_sensor_init[][8] = { {0xa1, 0x6e, 0x1e, 0x1a, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x6e, 0x1f, 0x99, 0x00, 0x00, 0x00, 0x10}, {DELAY, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 10ms */ {0xa1, 0x6e, 0x1e, 0x0a, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x6e, 0x1f, 0x19, 0x00, 0x00, 0x00, 0x10}, {DELAY, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 10ms */ {0xa1, 0x6e, 0x20, 0x44, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x6e, 0x04, 0x03, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x6e, 0x05, 0x70, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x6e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x6e, 0x07, 0x25, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x08, 0x00, 0xd0, 0x00, 0x08, 0x10}, {0xd1, 0x6e, 0x0c, 0x03, 0x50, 0x01, 0xe8, 0x10}, {0xd1, 0x6e, 0x1d, 0x20, 0x0a, 0x19, 0x44, 0x10}, {0xd1, 0x6e, 0x21, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x25, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x29, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x35, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x39, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x3d, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x41, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x45, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x49, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x4d, 0x00, 0x00, 0x00, 0xed, 0x10}, {0xd1, 0x6e, 0x51, 0x17, 0x4a, 0x2f, 0xc0, 0x10}, {0xd1, 0x6e, 0x55, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x59, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x5d, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x61, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x65, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x69, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x6d, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x71, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x75, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x79, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x7d, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x81, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x85, 0x00, 0x00, 0x00, 0x08, 0x10}, {0xd1, 0x6e, 0x89, 0x01, 0xe8, 0x00, 0x01, 0x10}, {0xa1, 0x6e, 0x8d, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x21, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x25, 0x00, 0x00, 0x00, 0x01, 0x10}, {0xd1, 0x6e, 0x29, 0xe6, 0x00, 0xbd, 0x03, 0x10}, {0xd1, 0x6e, 0x2d, 0x41, 0x38, 0x68, 0x40, 0x10}, {0xd1, 0x6e, 0x31, 0x2b, 0x00, 0x36, 0x00, 0x10}, {0xd1, 0x6e, 0x35, 0x30, 0x30, 0x08, 0x00, 0x10}, {0xd1, 0x6e, 0x39, 0x00, 0x00, 0x33, 0x06, 0x10}, {0xb1, 0x6e, 0x3d, 0x06, 0x02, 0x00, 0x00, 0x10}, {} }; static const u8 po2030n_sensor_param1[][8] = { {0xa1, 0x6e, 0x1a, 0x01, 0x00, 0x00, 0x00, 0x10}, {DELAY, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 8ms */ {0xa1, 0x6e, 0x1b, 0xf4, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x6e, 0x15, 0x04, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x16, 0x40, 0x40, 0x40, 0x40, 0x10}, /* RGBG gains */ /*param2*/ {0xa1, 0x6e, 0x1d, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x6e, 0x04, 0x03, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x6e, 0x05, 0x6f, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x6e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x6e, 0x07, 0x25, 0x00, 0x00, 0x00, 0x10}, {} }; static const u8 soi768_sensor_init[][8] = { {0xa1, 0x21, 0x12, 0x80, 0x00, 0x00, 0x00, 0x10}, /* reset */ {DELAY, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 96ms */ {0xa1, 0x21, 0x12, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x13, 0x80, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x0f, 0x03, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x19, 0x00, 0x00, 0x00, 0x00, 0x10}, {} }; static const u8 soi768_sensor_param1[][8] = { {0xa1, 0x21, 0x10, 0x10, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x2e, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xb1, 0x21, 0x01, 0x7f, 0x7f, 0x00, 0x00, 0x10}, /* */ /* {0xa1, 0x21, 0x2e, 0x00, 0x00, 0x00, 0x00, 0x10}, */ /* {0xa1, 0x21, 0x2d, 0x25, 0x00, 0x00, 0x00, 0x10}, */ {0xa1, 0x21, 0x2b, 0x00, 0x00, 0x00, 0x00, 0x10}, /* {0xb1, 0x21, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x10}, */ {0xa1, 0x21, 0x02, 0x8d, 0x00, 0x00, 0x00, 0x10}, /* the next sequence should be used for auto gain */ {0xa1, 0x21, 0x00, 0x07, 0x00, 0x00, 0x00, 0x10}, /* global gain ? : 07 - change with 0x15 at the end */ {0xa1, 0x21, 0x10, 0x3f, 0x00, 0x00, 0x00, 0x10}, /* ???? : 063f */ {0xa1, 0x21, 0x04, 0x06, 0x00, 0x00, 0x00, 0x10}, {0xb1, 0x21, 0x2d, 0x63, 0x03, 0x00, 0x00, 0x10}, /* exposure ? : 0200 - change with 0x1e at the end */ {} }; static const u8 sp80708_sensor_init[][8] = { {0xa1, 0x18, 0x06, 0xf9, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x09, 0x1f, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x0d, 0xc0, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x0c, 0x04, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x0f, 0x0f, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x10, 0x40, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x11, 0x4e, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x12, 0x53, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x15, 0x80, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x19, 0x18, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x1a, 0x10, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x1c, 0x28, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x1d, 0x02, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x1e, 0x10, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x26, 0x04, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x27, 0x1e, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x28, 0x5a, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x29, 0x28, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x2a, 0x78, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x2b, 0x01, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x2c, 0xf7, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x2d, 0x2d, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x2e, 0xd5, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x39, 0x42, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x3a, 0x67, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x3b, 0x87, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x3c, 0xa3, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x3d, 0xb0, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x3e, 0xbc, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x3f, 0xc8, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x40, 0xd4, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x41, 0xdf, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x42, 0xea, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x43, 0xf5, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x45, 0x80, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x46, 0x60, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x47, 0x50, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x48, 0x30, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x49, 0x01, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x4d, 0xae, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x4e, 0x03, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x4f, 0x66, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x50, 0x1c, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x44, 0x10, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x4a, 0x30, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x51, 0x80, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x52, 0x80, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x53, 0x80, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x54, 0x80, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x55, 0x80, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x56, 0x80, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x57, 0xe0, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x58, 0xc0, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x59, 0xab, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x5a, 0xa0, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x5b, 0x99, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x5c, 0x90, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x5e, 0x24, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x5f, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x60, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x61, 0x73, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x63, 0x42, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x64, 0x42, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x65, 0x42, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x66, 0x24, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x67, 0x24, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x68, 0x08, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x2f, 0xc9, 0x00, 0x00, 0x00, 0x10}, {} }; static const u8 sp80708_sensor_param1[][8] = { {0xa1, 0x18, 0x0c, 0x04, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x0c, 0x04, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x03, 0x01, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x04, 0xa4, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x14, 0x3f, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x5d, 0x80, 0x00, 0x00, 0x00, 0x10}, {0xb1, 0x18, 0x11, 0x40, 0x40, 0x00, 0x00, 0x10}, {} }; static const u8 (*sensor_init[])[8] = { [SENSOR_ADCM1700] = adcm1700_sensor_init, [SENSOR_GC0307] = gc0307_sensor_init, [SENSOR_HV7131R] = hv7131r_sensor_init, [SENSOR_MI0360] = mi0360_sensor_init, [SENSOR_MI0360B] = mi0360b_sensor_init, [SENSOR_MO4000] = mo4000_sensor_init, [SENSOR_MT9V111] = mt9v111_sensor_init, [SENSOR_OM6802] = om6802_sensor_init, [SENSOR_OV7630] = ov7630_sensor_init, [SENSOR_OV7648] = ov7648_sensor_init, [SENSOR_OV7660] = ov7660_sensor_init, [SENSOR_PO1030] = po1030_sensor_init, [SENSOR_PO2030N] = po2030n_sensor_init, [SENSOR_SOI768] = soi768_sensor_init, [SENSOR_SP80708] = sp80708_sensor_init, }; /* read <len> bytes to gspca_dev->usb_buf */ static void reg_r(struct gspca_dev *gspca_dev, u16 value, int len) { int ret; if (gspca_dev->usb_err < 0) return; if (len > USB_BUF_SZ) { gspca_err(gspca_dev, "reg_r: buffer overflow\n"); return; } ret = usb_control_msg(gspca_dev->dev, usb_rcvctrlpipe(gspca_dev->dev, 0), 0, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, value, 0, gspca_dev->usb_buf, len, 500); gspca_dbg(gspca_dev, D_USBI, "reg_r [%02x] -> %02x\n", value, gspca_dev->usb_buf[0]); if (ret < 0) { pr_err("reg_r err %d\n", ret); gspca_dev->usb_err = ret; /* * Make sure the buffer is zeroed to avoid uninitialized * values. */ memset(gspca_dev->usb_buf, 0, USB_BUF_SZ); } } static void reg_w1(struct gspca_dev *gspca_dev, u16 value, u8 data) { int ret; if (gspca_dev->usb_err < 0) return; gspca_dbg(gspca_dev, D_USBO, "reg_w1 [%04x] = %02x\n", value, data); gspca_dev->usb_buf[0] = data; ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), 0x08, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, value, 0, gspca_dev->usb_buf, 1, 500); if (ret < 0) { pr_err("reg_w1 err %d\n", ret); gspca_dev->usb_err = ret; } } static void reg_w(struct gspca_dev *gspca_dev, u16 value, const u8 *buffer, int len) { int ret; if (gspca_dev->usb_err < 0) return; gspca_dbg(gspca_dev, D_USBO, "reg_w [%04x] = %02x %02x ..\n", value, buffer[0], buffer[1]); if (len > USB_BUF_SZ) { gspca_err(gspca_dev, "reg_w: buffer overflow\n"); return; } memcpy(gspca_dev->usb_buf, buffer, len); ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), 0x08, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, value, 0, gspca_dev->usb_buf, len, 500); if (ret < 0) { pr_err("reg_w err %d\n", ret); gspca_dev->usb_err = ret; } } /* I2C write 1 byte */ static void i2c_w1(struct gspca_dev *gspca_dev, u8 reg, u8 val) { struct sd *sd = (struct sd *) gspca_dev; int ret; if (gspca_dev->usb_err < 0) return; gspca_dbg(gspca_dev, D_USBO, "i2c_w1 [%02x] = %02x\n", reg, val); switch (sd->sensor) { case SENSOR_ADCM1700: case SENSOR_OM6802: case SENSOR_GC0307: /* i2c command = a0 (100 kHz) */ gspca_dev->usb_buf[0] = 0x80 | (2 << 4); break; default: /* i2c command = a1 (400 kHz) */ gspca_dev->usb_buf[0] = 0x81 | (2 << 4); break; } gspca_dev->usb_buf[1] = sd->i2c_addr; gspca_dev->usb_buf[2] = reg; gspca_dev->usb_buf[3] = val; gspca_dev->usb_buf[4] = 0; gspca_dev->usb_buf[5] = 0; gspca_dev->usb_buf[6] = 0; gspca_dev->usb_buf[7] = 0x10; ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), 0x08, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 0x08, /* value = i2c */ 0, gspca_dev->usb_buf, 8, 500); msleep(2); if (ret < 0) { pr_err("i2c_w1 err %d\n", ret); gspca_dev->usb_err = ret; } } /* I2C write 8 bytes */ static void i2c_w8(struct gspca_dev *gspca_dev, const u8 *buffer) { int ret; if (gspca_dev->usb_err < 0) return; gspca_dbg(gspca_dev, D_USBO, "i2c_w8 [%02x] = %02x ..\n", buffer[2], buffer[3]); memcpy(gspca_dev->usb_buf, buffer, 8); ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), 0x08, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 0x08, 0, /* value, index */ gspca_dev->usb_buf, 8, 500); msleep(2); if (ret < 0) { pr_err("i2c_w8 err %d\n", ret); gspca_dev->usb_err = ret; } } /* sensor read 'len' (1..5) bytes in gspca_dev->usb_buf */ static void i2c_r(struct gspca_dev *gspca_dev, u8 reg, int len) { struct sd *sd = (struct sd *) gspca_dev; u8 mode[8]; switch (sd->sensor) { case SENSOR_ADCM1700: case SENSOR_OM6802: case SENSOR_GC0307: /* i2c command = a0 (100 kHz) */ mode[0] = 0x80 | 0x10; break; default: /* i2c command = 91 (400 kHz) */ mode[0] = 0x81 | 0x10; break; } mode[1] = sd->i2c_addr; mode[2] = reg; mode[3] = 0; mode[4] = 0; mode[5] = 0; mode[6] = 0; mode[7] = 0x10; i2c_w8(gspca_dev, mode); msleep(2); mode[0] = (mode[0] & 0x81) | (len << 4) | 0x02; mode[2] = 0; i2c_w8(gspca_dev, mode); msleep(2); reg_r(gspca_dev, 0x0a, 5); } static void i2c_w_seq(struct gspca_dev *gspca_dev, const u8 (*data)[8]) { while ((*data)[0] != 0) { if ((*data)[0] != DELAY) i2c_w8(gspca_dev, *data); else msleep((*data)[1]); data++; } } /* check the ID of the hv7131 sensor */ /* this sequence is needed because it activates the sensor */ static void hv7131r_probe(struct gspca_dev *gspca_dev) { i2c_w1(gspca_dev, 0x02, 0); /* sensor wakeup */ msleep(10); reg_w1(gspca_dev, 0x02, 0x66); /* Gpio on */ msleep(10); i2c_r(gspca_dev, 0, 5); /* read sensor id */ if (gspca_dev->usb_buf[0] == 0x02 /* chip ID (02 is R) */ && gspca_dev->usb_buf[1] == 0x09 && gspca_dev->usb_buf[2] == 0x01) { gspca_dbg(gspca_dev, D_PROBE, "Sensor HV7131R found\n"); return; } pr_warn("Erroneous HV7131R ID 0x%02x 0x%02x 0x%02x\n", gspca_dev->usb_buf[0], gspca_dev->usb_buf[1], gspca_dev->usb_buf[2]); } static void mi0360_probe(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int i, j; u16 val = 0; static const u8 probe_tb[][4][8] = { { /* mi0360 */ {0xb0, 0x5d, 0x07, 0x00, 0x02, 0x00, 0x00, 0x10}, {0x90, 0x5d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa2, 0x5d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xb0, 0x5d, 0x07, 0x00, 0x00, 0x00, 0x00, 0x10} }, { /* mt9v111 */ {0xb0, 0x5c, 0x01, 0x00, 0x04, 0x00, 0x00, 0x10}, {0x90, 0x5c, 0x36, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa2, 0x5c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10}, {} }, }; for (i = 0; i < ARRAY_SIZE(probe_tb); i++) { reg_w1(gspca_dev, 0x17, 0x62); reg_w1(gspca_dev, 0x01, 0x08); for (j = 0; j < 3; j++) i2c_w8(gspca_dev, probe_tb[i][j]); msleep(2); reg_r(gspca_dev, 0x0a, 5); val = (gspca_dev->usb_buf[3] << 8) | gspca_dev->usb_buf[4]; if (probe_tb[i][3][0] != 0) i2c_w8(gspca_dev, probe_tb[i][3]); reg_w1(gspca_dev, 0x01, 0x29); reg_w1(gspca_dev, 0x17, 0x42); if (val != 0xffff) break; } if (gspca_dev->usb_err < 0) return; switch (val) { case 0x8221: gspca_dbg(gspca_dev, D_PROBE, "Sensor mi0360b\n"); sd->sensor = SENSOR_MI0360B; break; case 0x823a: gspca_dbg(gspca_dev, D_PROBE, "Sensor mt9v111\n"); sd->sensor = SENSOR_MT9V111; break; case 0x8243: gspca_dbg(gspca_dev, D_PROBE, "Sensor mi0360\n"); break; default: gspca_dbg(gspca_dev, D_PROBE, "Unknown sensor %04x - forced to mi0360\n", val); break; } } static void ov7630_probe(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u16 val; /* check ov76xx */ reg_w1(gspca_dev, 0x17, 0x62); reg_w1(gspca_dev, 0x01, 0x08); sd->i2c_addr = 0x21; i2c_r(gspca_dev, 0x0a, 2); val = (gspca_dev->usb_buf[3] << 8) | gspca_dev->usb_buf[4]; reg_w1(gspca_dev, 0x01, 0x29); reg_w1(gspca_dev, 0x17, 0x42); if (gspca_dev->usb_err < 0) return; if (val == 0x7628) { /* soi768 */ sd->sensor = SENSOR_SOI768; /*fixme: only valid for 0c45:613e?*/ gspca_dev->cam.input_flags = V4L2_IN_ST_VFLIP | V4L2_IN_ST_HFLIP; gspca_dbg(gspca_dev, D_PROBE, "Sensor soi768\n"); return; } gspca_dbg(gspca_dev, D_PROBE, "Sensor ov%04x\n", val); } static void ov7648_probe(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u16 val; /* check ov76xx */ reg_w1(gspca_dev, 0x17, 0x62); reg_w1(gspca_dev, 0x01, 0x08); sd->i2c_addr = 0x21; i2c_r(gspca_dev, 0x0a, 2); val = (gspca_dev->usb_buf[3] << 8) | gspca_dev->usb_buf[4]; reg_w1(gspca_dev, 0x01, 0x29); reg_w1(gspca_dev, 0x17, 0x42); if ((val & 0xff00) == 0x7600) { /* ov76xx */ gspca_dbg(gspca_dev, D_PROBE, "Sensor ov%04x\n", val); return; } /* check po1030 */ reg_w1(gspca_dev, 0x17, 0x62); reg_w1(gspca_dev, 0x01, 0x08); sd->i2c_addr = 0x6e; i2c_r(gspca_dev, 0x00, 2); val = (gspca_dev->usb_buf[3] << 8) | gspca_dev->usb_buf[4]; reg_w1(gspca_dev, 0x01, 0x29); reg_w1(gspca_dev, 0x17, 0x42); if (gspca_dev->usb_err < 0) return; if (val == 0x1030) { /* po1030 */ gspca_dbg(gspca_dev, D_PROBE, "Sensor po1030\n"); sd->sensor = SENSOR_PO1030; return; } pr_err("Unknown sensor %04x\n", val); } /* 0c45:6142 sensor may be po2030n, gc0305 or gc0307 */ static void po2030n_probe(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u16 val; /* check gc0307 */ reg_w1(gspca_dev, 0x17, 0x62); reg_w1(gspca_dev, 0x01, 0x08); reg_w1(gspca_dev, 0x02, 0x22); sd->i2c_addr = 0x21; i2c_r(gspca_dev, 0x00, 1); val = gspca_dev->usb_buf[4]; reg_w1(gspca_dev, 0x01, 0x29); /* reset */ reg_w1(gspca_dev, 0x17, 0x42); if (val == 0x99) { /* gc0307 (?) */ gspca_dbg(gspca_dev, D_PROBE, "Sensor gc0307\n"); sd->sensor = SENSOR_GC0307; return; } /* check po2030n */ reg_w1(gspca_dev, 0x17, 0x62); reg_w1(gspca_dev, 0x01, 0x0a); sd->i2c_addr = 0x6e; i2c_r(gspca_dev, 0x00, 2); val = (gspca_dev->usb_buf[3] << 8) | gspca_dev->usb_buf[4]; reg_w1(gspca_dev, 0x01, 0x29); reg_w1(gspca_dev, 0x17, 0x42); if (gspca_dev->usb_err < 0) return; if (val == 0x2030) { gspca_dbg(gspca_dev, D_PROBE, "Sensor po2030n\n"); /* sd->sensor = SENSOR_PO2030N; */ } else { pr_err("Unknown sensor ID %04x\n", val); } } /* this function is called at probe time */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct sd *sd = (struct sd *) gspca_dev; struct cam *cam; sd->bridge = id->driver_info >> 16; sd->sensor = id->driver_info >> 8; sd->flags = id->driver_info; cam = &gspca_dev->cam; if (sd->sensor == SENSOR_ADCM1700) { cam->cam_mode = cif_mode; cam->nmodes = ARRAY_SIZE(cif_mode); } else { cam->cam_mode = vga_mode; cam->nmodes = ARRAY_SIZE(vga_mode); } cam->npkt = 24; /* 24 packets per ISOC message */ sd->ag_cnt = -1; sd->quality = QUALITY_DEF; INIT_WORK(&sd->work, qual_upd); return 0; } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; const u8 *sn9c1xx; u8 regGpio[] = { 0x29, 0x70 }; /* no audio */ u8 regF1; /* setup a selector by bridge */ reg_w1(gspca_dev, 0xf1, 0x01); reg_r(gspca_dev, 0x00, 1); reg_w1(gspca_dev, 0xf1, 0x00); reg_r(gspca_dev, 0x00, 1); /* get sonix chip id */ regF1 = gspca_dev->usb_buf[0]; if (gspca_dev->usb_err < 0) return gspca_dev->usb_err; gspca_dbg(gspca_dev, D_PROBE, "Sonix chip id: %02x\n", regF1); if (gspca_dev->audio) regGpio[1] |= 0x04; /* with audio */ switch (sd->bridge) { case BRIDGE_SN9C102P: case BRIDGE_SN9C105: if (regF1 != 0x11) return -ENODEV; break; default: /* case BRIDGE_SN9C110: */ /* case BRIDGE_SN9C120: */ if (regF1 != 0x12) return -ENODEV; } switch (sd->sensor) { case SENSOR_MI0360: mi0360_probe(gspca_dev); break; case SENSOR_OV7630: ov7630_probe(gspca_dev); break; case SENSOR_OV7648: ov7648_probe(gspca_dev); break; case SENSOR_PO2030N: po2030n_probe(gspca_dev); break; } switch (sd->bridge) { case BRIDGE_SN9C102P: reg_w1(gspca_dev, 0x02, regGpio[1]); break; default: reg_w(gspca_dev, 0x01, regGpio, 2); break; } /* Note we do not disable the sensor clock here (power saving mode), as that also disables the button on the cam. */ reg_w1(gspca_dev, 0xf1, 0x00); /* set the i2c address */ sn9c1xx = sn_tb[sd->sensor]; sd->i2c_addr = sn9c1xx[9]; return gspca_dev->usb_err; } static int sd_s_ctrl(struct v4l2_ctrl *ctrl); static const struct v4l2_ctrl_ops sd_ctrl_ops = { .s_ctrl = sd_s_ctrl, }; /* this function is called at probe time */ static int sd_init_controls(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler; gspca_dev->vdev.ctrl_handler = hdl; v4l2_ctrl_handler_init(hdl, 14); sd->brightness = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_BRIGHTNESS, 0, 255, 1, 128); #define CONTRAST_MAX 127 sd->contrast = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_CONTRAST, 0, CONTRAST_MAX, 1, 20); #define COLORS_DEF 25 sd->saturation = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_SATURATION, 0, 40, 1, COLORS_DEF); sd->red_bal = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_RED_BALANCE, 24, 40, 1, 32); sd->blue_bal = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_BLUE_BALANCE, 24, 40, 1, 32); #define GAMMA_DEF 20 sd->gamma = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_GAMMA, 0, 40, 1, GAMMA_DEF); if (sd->sensor == SENSOR_OM6802) sd->sharpness = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_SHARPNESS, 0, 255, 1, 16); else sd->sharpness = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_SHARPNESS, 0, 255, 1, 90); if (sd->flags & F_ILLUM) sd->illum = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_ILLUMINATORS_1, 0, 1, 1, 0); if (sd->sensor == SENSOR_PO2030N) { gspca_dev->exposure = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_EXPOSURE, 500, 1500, 1, 1024); gspca_dev->gain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_GAIN, 4, 49, 1, 15); sd->hflip = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0); } if (sd->sensor != SENSOR_ADCM1700 && sd->sensor != SENSOR_OV7660 && sd->sensor != SENSOR_PO1030 && sd->sensor != SENSOR_SOI768 && sd->sensor != SENSOR_SP80708) gspca_dev->autogain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_AUTOGAIN, 0, 1, 1, 1); if (sd->sensor == SENSOR_HV7131R || sd->sensor == SENSOR_OV7630 || sd->sensor == SENSOR_OV7648 || sd->sensor == SENSOR_PO2030N) sd->vflip = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0); if (sd->sensor == SENSOR_OV7630 || sd->sensor == SENSOR_OV7648 || sd->sensor == SENSOR_OV7660) sd->freq = v4l2_ctrl_new_std_menu(hdl, &sd_ctrl_ops, V4L2_CID_POWER_LINE_FREQUENCY, V4L2_CID_POWER_LINE_FREQUENCY_60HZ, 0, V4L2_CID_POWER_LINE_FREQUENCY_50HZ); if (hdl->error) { pr_err("Could not initialize controls\n"); return hdl->error; } v4l2_ctrl_cluster(2, &sd->red_bal); if (sd->sensor == SENSOR_PO2030N) { v4l2_ctrl_cluster(2, &sd->vflip); v4l2_ctrl_auto_cluster(3, &gspca_dev->autogain, 0, false); } return 0; } static u32 expo_adjust(struct gspca_dev *gspca_dev, u32 expo) { struct sd *sd = (struct sd *) gspca_dev; switch (sd->sensor) { case SENSOR_GC0307: { int a, b; /* expo = 0..255 -> a = 19..43 */ a = 19 + expo * 25 / 256; i2c_w1(gspca_dev, 0x68, a); a -= 12; b = a * a * 4; /* heuristic */ i2c_w1(gspca_dev, 0x03, b >> 8); i2c_w1(gspca_dev, 0x04, b); break; } case SENSOR_HV7131R: { u8 Expodoit[] = { 0xc1, 0x11, 0x25, 0x00, 0x00, 0x00, 0x00, 0x16 }; Expodoit[3] = expo >> 16; Expodoit[4] = expo >> 8; Expodoit[5] = expo; i2c_w8(gspca_dev, Expodoit); break; } case SENSOR_MI0360: case SENSOR_MI0360B: { u8 expoMi[] = /* exposure 0x0635 -> 4 fp/s 0x10 */ { 0xb1, 0x5d, 0x09, 0x00, 0x00, 0x00, 0x00, 0x16 }; static const u8 doit[] = /* update sensor */ { 0xb1, 0x5d, 0x07, 0x00, 0x03, 0x00, 0x00, 0x10 }; static const u8 sensorgo[] = /* sensor on */ { 0xb1, 0x5d, 0x07, 0x00, 0x02, 0x00, 0x00, 0x10 }; if (expo > 0x0635) expo = 0x0635; else if (expo < 0x0001) expo = 0x0001; expoMi[3] = expo >> 8; expoMi[4] = expo; i2c_w8(gspca_dev, expoMi); i2c_w8(gspca_dev, doit); i2c_w8(gspca_dev, sensorgo); break; } case SENSOR_MO4000: { u8 expoMof[] = { 0xa1, 0x21, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x10 }; u8 expoMo10[] = { 0xa1, 0x21, 0x10, 0x00, 0x00, 0x00, 0x00, 0x10 }; static const u8 gainMo[] = { 0xa1, 0x21, 0x00, 0x10, 0x00, 0x00, 0x00, 0x1d }; if (expo > 0x1fff) expo = 0x1fff; else if (expo < 0x0001) expo = 0x0001; expoMof[3] = (expo & 0x03fc) >> 2; i2c_w8(gspca_dev, expoMof); expoMo10[3] = ((expo & 0x1c00) >> 10) | ((expo & 0x0003) << 4); i2c_w8(gspca_dev, expoMo10); i2c_w8(gspca_dev, gainMo); gspca_dbg(gspca_dev, D_FRAM, "set exposure %d\n", ((expoMo10[3] & 0x07) << 10) | (expoMof[3] << 2) | ((expoMo10[3] & 0x30) >> 4)); break; } case SENSOR_MT9V111: { u8 expo_c1[] = { 0xb1, 0x5c, 0x09, 0x00, 0x00, 0x00, 0x00, 0x10 }; if (expo > 0x0390) expo = 0x0390; else if (expo < 0x0060) expo = 0x0060; expo_c1[3] = expo >> 8; expo_c1[4] = expo; i2c_w8(gspca_dev, expo_c1); break; } case SENSOR_OM6802: { u8 gainOm[] = { 0xa0, 0x34, 0xe5, 0x00, 0x00, 0x00, 0x00, 0x10 }; /* preset AGC - works when AutoExpo = off */ if (expo > 0x03ff) expo = 0x03ff; if (expo < 0x0001) expo = 0x0001; gainOm[3] = expo >> 2; i2c_w8(gspca_dev, gainOm); reg_w1(gspca_dev, 0x96, expo >> 5); gspca_dbg(gspca_dev, D_FRAM, "set exposure %d\n", gainOm[3]); break; } } return expo; } static void setbrightness(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; unsigned int expo; int brightness = sd->brightness->val; u8 k2; k2 = (brightness - 0x80) >> 2; switch (sd->sensor) { case SENSOR_ADCM1700: if (k2 > 0x1f) k2 = 0; /* only positive Y offset */ break; case SENSOR_HV7131R: expo = brightness << 12; if (expo > 0x002dc6c0) expo = 0x002dc6c0; else if (expo < 0x02a0) expo = 0x02a0; sd->exposure = expo_adjust(gspca_dev, expo); break; case SENSOR_MI0360: case SENSOR_MO4000: expo = brightness << 4; sd->exposure = expo_adjust(gspca_dev, expo); break; case SENSOR_MI0360B: expo = brightness << 2; sd->exposure = expo_adjust(gspca_dev, expo); break; case SENSOR_GC0307: expo = brightness; sd->exposure = expo_adjust(gspca_dev, expo); return; /* don't set the Y offset */ case SENSOR_MT9V111: expo = brightness << 2; sd->exposure = expo_adjust(gspca_dev, expo); return; /* don't set the Y offset */ case SENSOR_OM6802: expo = brightness << 2; sd->exposure = expo_adjust(gspca_dev, expo); return; /* Y offset already set */ } reg_w1(gspca_dev, 0x96, k2); /* color matrix Y offset */ } static void setcontrast(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u8 k2; u8 contrast[6]; k2 = sd->contrast->val * 37 / (CONTRAST_MAX + 1) + 37; /* 37..73 */ contrast[0] = (k2 + 1) / 2; /* red */ contrast[1] = 0; contrast[2] = k2; /* green */ contrast[3] = 0; contrast[4] = k2 / 5; /* blue */ contrast[5] = 0; reg_w(gspca_dev, 0x84, contrast, sizeof contrast); } static void setcolors(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int i, v, colors; const s16 *uv; u8 reg8a[12]; /* U & V gains */ static const s16 uv_com[6] = { /* same as reg84 in signed decimal */ -24, -38, 64, /* UR UG UB */ 62, -51, -9 /* VR VG VB */ }; static const s16 uv_mi0360b[6] = { -20, -38, 64, /* UR UG UB */ 60, -51, -9 /* VR VG VB */ }; colors = sd->saturation->val; if (sd->sensor == SENSOR_MI0360B) uv = uv_mi0360b; else uv = uv_com; for (i = 0; i < 6; i++) { v = uv[i] * colors / COLORS_DEF; reg8a[i * 2] = v; reg8a[i * 2 + 1] = (v >> 8) & 0x0f; } reg_w(gspca_dev, 0x8a, reg8a, sizeof reg8a); } static void setredblue(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; if (sd->sensor == SENSOR_PO2030N) { u8 rg1b[] = /* red green1 blue (no g2) */ {0xc1, 0x6e, 0x16, 0x00, 0x40, 0x00, 0x00, 0x10}; /* 0x40 = normal value = gain x 1 */ rg1b[3] = sd->red_bal->val * 2; rg1b[5] = sd->blue_bal->val * 2; i2c_w8(gspca_dev, rg1b); return; } reg_w1(gspca_dev, 0x05, sd->red_bal->val); /* reg_w1(gspca_dev, 0x07, 32); */ reg_w1(gspca_dev, 0x06, sd->blue_bal->val); } static void setgamma(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int i, val; u8 gamma[17]; const u8 *gamma_base; static const u8 delta[17] = { 0x00, 0x14, 0x1c, 0x1c, 0x1c, 0x1c, 0x1b, 0x1a, 0x18, 0x13, 0x10, 0x0e, 0x08, 0x07, 0x04, 0x02, 0x00 }; switch (sd->sensor) { case SENSOR_ADCM1700: gamma_base = gamma_spec_0; break; case SENSOR_HV7131R: case SENSOR_MI0360B: case SENSOR_MT9V111: gamma_base = gamma_spec_1; break; case SENSOR_GC0307: gamma_base = gamma_spec_2; break; case SENSOR_SP80708: gamma_base = gamma_spec_3; break; default: gamma_base = gamma_def; break; } val = sd->gamma->val; for (i = 0; i < sizeof gamma; i++) gamma[i] = gamma_base[i] + delta[i] * (val - GAMMA_DEF) / 32; reg_w(gspca_dev, 0x20, gamma, sizeof gamma); } static void setexposure(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; if (sd->sensor == SENSOR_PO2030N) { u8 rexpo[] = /* 1a: expo H, 1b: expo M */ {0xa1, 0x6e, 0x1a, 0x00, 0x40, 0x00, 0x00, 0x10}; rexpo[3] = gspca_dev->exposure->val >> 8; i2c_w8(gspca_dev, rexpo); msleep(6); rexpo[2] = 0x1b; rexpo[3] = gspca_dev->exposure->val; i2c_w8(gspca_dev, rexpo); } } static void setautogain(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; switch (sd->sensor) { case SENSOR_OV7630: case SENSOR_OV7648: { u8 comb; if (sd->sensor == SENSOR_OV7630) comb = 0xc0; else comb = 0xa0; if (gspca_dev->autogain->val) comb |= 0x03; i2c_w1(&sd->gspca_dev, 0x13, comb); return; } } if (gspca_dev->autogain->val) sd->ag_cnt = AG_CNT_START; else sd->ag_cnt = -1; } static void setgain(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; if (sd->sensor == SENSOR_PO2030N) { u8 rgain[] = /* 15: gain */ {0xa1, 0x6e, 0x15, 0x00, 0x40, 0x00, 0x00, 0x15}; rgain[3] = gspca_dev->gain->val; i2c_w8(gspca_dev, rgain); } } static void sethvflip(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u8 comn; switch (sd->sensor) { case SENSOR_HV7131R: comn = 0x18; /* clkdiv = 1, ablcen = 1 */ if (sd->vflip->val) comn |= 0x01; i2c_w1(gspca_dev, 0x01, comn); /* sctra */ break; case SENSOR_OV7630: comn = 0x02; if (!sd->vflip->val) comn |= 0x80; i2c_w1(gspca_dev, 0x75, comn); break; case SENSOR_OV7648: comn = 0x06; if (sd->vflip->val) comn |= 0x80; i2c_w1(gspca_dev, 0x75, comn); break; case SENSOR_PO2030N: /* Reg. 0x1E: Timing Generator Control Register 2 (Tgcontrol2) * (reset value: 0x0A) * bit7: HM: Horizontal Mirror: 0: disable, 1: enable * bit6: VM: Vertical Mirror: 0: disable, 1: enable * bit5: ST: Shutter Selection: 0: electrical, 1: mechanical * bit4: FT: Single Frame Transfer: 0: disable, 1: enable * bit3-0: X */ comn = 0x0a; if (sd->hflip->val) comn |= 0x80; if (sd->vflip->val) comn |= 0x40; i2c_w1(&sd->gspca_dev, 0x1e, comn); break; } } static void setsharpness(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; reg_w1(gspca_dev, 0x99, sd->sharpness->val); } static void setillum(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; switch (sd->sensor) { case SENSOR_ADCM1700: reg_w1(gspca_dev, 0x02, /* gpio */ sd->illum->val ? 0x64 : 0x60); break; case SENSOR_MT9V111: reg_w1(gspca_dev, 0x02, sd->illum->val ? 0x77 : 0x74); /* should have been: */ /* 0x55 : 0x54); * 370i */ /* 0x66 : 0x64); * Clip */ break; } } static void setfreq(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; if (sd->sensor == SENSOR_OV7660) { u8 com8; com8 = 0xdf; /* auto gain/wb/expo */ switch (sd->freq->val) { case 0: /* Banding filter disabled */ i2c_w1(gspca_dev, 0x13, com8 | 0x20); break; case 1: /* 50 hz */ i2c_w1(gspca_dev, 0x13, com8); i2c_w1(gspca_dev, 0x3b, 0x0a); break; case 2: /* 60 hz */ i2c_w1(gspca_dev, 0x13, com8); i2c_w1(gspca_dev, 0x3b, 0x02); break; } } else { u8 reg2a = 0, reg2b = 0, reg2d = 0; /* Get reg2a / reg2d base values */ switch (sd->sensor) { case SENSOR_OV7630: reg2a = 0x08; reg2d = 0x01; break; case SENSOR_OV7648: reg2a = 0x11; reg2d = 0x81; break; } switch (sd->freq->val) { case 0: /* Banding filter disabled */ break; case 1: /* 50 hz (filter on and framerate adj) */ reg2a |= 0x80; reg2b = 0xac; reg2d |= 0x04; break; case 2: /* 60 hz (filter on, no framerate adj) */ reg2a |= 0x80; reg2d |= 0x04; break; } i2c_w1(gspca_dev, 0x2a, reg2a); i2c_w1(gspca_dev, 0x2b, reg2b); i2c_w1(gspca_dev, 0x2d, reg2d); } } static void setjpegqual(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; jpeg_set_qual(sd->jpeg_hdr, sd->quality); #if USB_BUF_SZ < 64 #error "No room enough in usb_buf for quantization table" #endif memcpy(gspca_dev->usb_buf, &sd->jpeg_hdr[JPEG_QT0_OFFSET], 64); usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), 0x08, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 0x0100, 0, gspca_dev->usb_buf, 64, 500); memcpy(gspca_dev->usb_buf, &sd->jpeg_hdr[JPEG_QT1_OFFSET], 64); usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), 0x08, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 0x0140, 0, gspca_dev->usb_buf, 64, 500); sd->reg18 ^= 0x40; reg_w1(gspca_dev, 0x18, sd->reg18); } /* JPEG quality update */ /* This function is executed from a work queue. */ static void qual_upd(struct work_struct *work) { struct sd *sd = container_of(work, struct sd, work); struct gspca_dev *gspca_dev = &sd->gspca_dev; /* To protect gspca_dev->usb_buf and gspca_dev->usb_err */ mutex_lock(&gspca_dev->usb_lock); gspca_dbg(gspca_dev, D_STREAM, "qual_upd %d%%\n", sd->quality); gspca_dev->usb_err = 0; setjpegqual(gspca_dev); mutex_unlock(&gspca_dev->usb_lock); } /* -- start the camera -- */ static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int i; u8 reg01, reg17; u8 reg0102[2]; const u8 *sn9c1xx; const u8 (*init)[8]; const u8 *reg9a; int mode; static const u8 reg9a_def[] = {0x00, 0x40, 0x20, 0x00, 0x00, 0x00}; static const u8 reg9a_spec[] = {0x00, 0x40, 0x38, 0x30, 0x00, 0x20}; static const u8 regd4[] = {0x60, 0x00, 0x00}; static const u8 C0[] = { 0x2d, 0x2d, 0x3a, 0x05, 0x04, 0x3f }; static const u8 CA[] = { 0x28, 0xd8, 0x14, 0xec }; static const u8 CA_adcm1700[] = { 0x14, 0xec, 0x0a, 0xf6 }; static const u8 CA_po2030n[] = { 0x1e, 0xe2, 0x14, 0xec }; static const u8 CE[] = { 0x32, 0xdd, 0x2d, 0xdd }; /* MI0360 */ static const u8 CE_gc0307[] = { 0x32, 0xce, 0x2d, 0xd3 }; static const u8 CE_ov76xx[] = { 0x32, 0xdd, 0x32, 0xdd }; static const u8 CE_po2030n[] = { 0x14, 0xe7, 0x1e, 0xdd }; /* create the JPEG header */ jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height, gspca_dev->pixfmt.width, 0x21); /* JPEG 422 */ /* initialize the bridge */ sn9c1xx = sn_tb[sd->sensor]; /* sensor clock already enabled in sd_init */ /* reg_w1(gspca_dev, 0xf1, 0x00); */ reg01 = sn9c1xx[1]; if (sd->flags & F_PDN_INV) reg01 ^= S_PDN_INV; /* power down inverted */ reg_w1(gspca_dev, 0x01, reg01); /* configure gpio */ reg0102[0] = reg01; reg0102[1] = sn9c1xx[2]; if (gspca_dev->audio) reg0102[1] |= 0x04; /* keep the audio connection */ reg_w(gspca_dev, 0x01, reg0102, 2); reg_w(gspca_dev, 0x08, &sn9c1xx[8], 2); reg_w(gspca_dev, 0x17, &sn9c1xx[0x17], 5); switch (sd->sensor) { case SENSOR_GC0307: case SENSOR_OV7660: case SENSOR_PO1030: case SENSOR_PO2030N: case SENSOR_SOI768: case SENSOR_SP80708: reg9a = reg9a_spec; break; default: reg9a = reg9a_def; break; } reg_w(gspca_dev, 0x9a, reg9a, 6); reg_w(gspca_dev, 0xd4, regd4, sizeof regd4); reg_w(gspca_dev, 0x03, &sn9c1xx[3], 0x0f); reg17 = sn9c1xx[0x17]; switch (sd->sensor) { case SENSOR_GC0307: msleep(50); /*fixme: is it useful? */ break; case SENSOR_OM6802: msleep(10); reg_w1(gspca_dev, 0x02, 0x73); reg17 |= SEN_CLK_EN; reg_w1(gspca_dev, 0x17, reg17); reg_w1(gspca_dev, 0x01, 0x22); msleep(100); reg01 = SCL_SEL_OD | S_PDN_INV; reg17 &= ~MCK_SIZE_MASK; reg17 |= 0x04; /* clock / 4 */ break; } reg01 |= SYS_SEL_48M; reg_w1(gspca_dev, 0x01, reg01); reg17 |= SEN_CLK_EN; reg_w1(gspca_dev, 0x17, reg17); reg01 &= ~S_PWR_DN; /* sensor power on */ reg_w1(gspca_dev, 0x01, reg01); reg01 &= ~SCL_SEL_OD; /* remove open-drain mode */ reg_w1(gspca_dev, 0x01, reg01); switch (sd->sensor) { case SENSOR_HV7131R: hv7131r_probe(gspca_dev); /*fixme: is it useful? */ break; case SENSOR_OM6802: msleep(10); reg_w1(gspca_dev, 0x01, reg01); i2c_w8(gspca_dev, om6802_init0[0]); i2c_w8(gspca_dev, om6802_init0[1]); msleep(15); reg_w1(gspca_dev, 0x02, 0x71); msleep(150); break; case SENSOR_SP80708: msleep(100); reg_w1(gspca_dev, 0x02, 0x62); break; } /* initialize the sensor */ i2c_w_seq(gspca_dev, sensor_init[sd->sensor]); reg_w1(gspca_dev, 0x15, sn9c1xx[0x15]); reg_w1(gspca_dev, 0x16, sn9c1xx[0x16]); reg_w1(gspca_dev, 0x12, sn9c1xx[0x12]); reg_w1(gspca_dev, 0x13, sn9c1xx[0x13]); reg_w1(gspca_dev, 0x18, sn9c1xx[0x18]); if (sd->sensor == SENSOR_ADCM1700) { reg_w1(gspca_dev, 0xd2, 0x3a); /* AE_H_SIZE = 116 */ reg_w1(gspca_dev, 0xd3, 0x30); /* AE_V_SIZE = 96 */ } else { reg_w1(gspca_dev, 0xd2, 0x6a); /* AE_H_SIZE = 212 */ reg_w1(gspca_dev, 0xd3, 0x50); /* AE_V_SIZE = 160 */ } reg_w1(gspca_dev, 0xc6, 0x00); reg_w1(gspca_dev, 0xc7, 0x00); if (sd->sensor == SENSOR_ADCM1700) { reg_w1(gspca_dev, 0xc8, 0x2c); /* AW_H_STOP = 352 */ reg_w1(gspca_dev, 0xc9, 0x24); /* AW_V_STOP = 288 */ } else { reg_w1(gspca_dev, 0xc8, 0x50); /* AW_H_STOP = 640 */ reg_w1(gspca_dev, 0xc9, 0x3c); /* AW_V_STOP = 480 */ } reg_w1(gspca_dev, 0x18, sn9c1xx[0x18]); switch (sd->sensor) { case SENSOR_OM6802: /* case SENSOR_OV7648: * fixme: sometimes */ break; default: reg17 |= DEF_EN; break; } reg_w1(gspca_dev, 0x17, reg17); reg_w1(gspca_dev, 0x05, 0x00); /* red */ reg_w1(gspca_dev, 0x07, 0x00); /* green */ reg_w1(gspca_dev, 0x06, 0x00); /* blue */ reg_w1(gspca_dev, 0x14, sn9c1xx[0x14]); setgamma(gspca_dev); /*fixme: 8 times with all zeroes and 1 or 2 times with normal values */ for (i = 0; i < 8; i++) reg_w(gspca_dev, 0x84, reg84, sizeof reg84); switch (sd->sensor) { case SENSOR_ADCM1700: case SENSOR_OV7660: case SENSOR_SP80708: reg_w1(gspca_dev, 0x9a, 0x05); break; case SENSOR_GC0307: case SENSOR_MT9V111: case SENSOR_MI0360B: reg_w1(gspca_dev, 0x9a, 0x07); break; case SENSOR_OV7630: case SENSOR_OV7648: reg_w1(gspca_dev, 0x9a, 0x0a); break; case SENSOR_PO2030N: case SENSOR_SOI768: reg_w1(gspca_dev, 0x9a, 0x06); break; default: reg_w1(gspca_dev, 0x9a, 0x08); break; } setsharpness(gspca_dev); reg_w(gspca_dev, 0x84, reg84, sizeof reg84); reg_w1(gspca_dev, 0x05, 0x20); /* red */ reg_w1(gspca_dev, 0x07, 0x20); /* green */ reg_w1(gspca_dev, 0x06, 0x20); /* blue */ init = NULL; mode = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv; reg01 |= SYS_SEL_48M | V_TX_EN; reg17 &= ~MCK_SIZE_MASK; reg17 |= 0x02; /* clock / 2 */ switch (sd->sensor) { case SENSOR_ADCM1700: init = adcm1700_sensor_param1; break; case SENSOR_GC0307: init = gc0307_sensor_param1; break; case SENSOR_HV7131R: case SENSOR_MI0360: if (!mode) reg01 &= ~SYS_SEL_48M; /* 640x480: clk 24Mhz */ reg17 &= ~MCK_SIZE_MASK; reg17 |= 0x01; /* clock / 1 */ break; case SENSOR_MI0360B: init = mi0360b_sensor_param1; break; case SENSOR_MO4000: if (mode) { /* if 320x240 */ reg01 &= ~SYS_SEL_48M; /* clk 24Mz */ reg17 &= ~MCK_SIZE_MASK; reg17 |= 0x01; /* clock / 1 */ } break; case SENSOR_MT9V111: init = mt9v111_sensor_param1; break; case SENSOR_OM6802: init = om6802_sensor_param1; if (!mode) { /* if 640x480 */ reg17 &= ~MCK_SIZE_MASK; reg17 |= 0x04; /* clock / 4 */ } else { reg01 &= ~SYS_SEL_48M; /* clk 24Mz */ reg17 &= ~MCK_SIZE_MASK; reg17 |= 0x02; /* clock / 2 */ } break; case SENSOR_OV7630: init = ov7630_sensor_param1; break; case SENSOR_OV7648: init = ov7648_sensor_param1; reg17 &= ~MCK_SIZE_MASK; reg17 |= 0x01; /* clock / 1 */ break; case SENSOR_OV7660: init = ov7660_sensor_param1; break; case SENSOR_PO1030: init = po1030_sensor_param1; break; case SENSOR_PO2030N: init = po2030n_sensor_param1; break; case SENSOR_SOI768: init = soi768_sensor_param1; break; case SENSOR_SP80708: init = sp80708_sensor_param1; break; } /* more sensor initialization - param1 */ if (init != NULL) { i2c_w_seq(gspca_dev, init); /* init = NULL; */ } reg_w(gspca_dev, 0xc0, C0, 6); switch (sd->sensor) { case SENSOR_ADCM1700: case SENSOR_GC0307: case SENSOR_SOI768: reg_w(gspca_dev, 0xca, CA_adcm1700, 4); break; case SENSOR_PO2030N: reg_w(gspca_dev, 0xca, CA_po2030n, 4); break; default: reg_w(gspca_dev, 0xca, CA, 4); break; } switch (sd->sensor) { case SENSOR_ADCM1700: case SENSOR_OV7630: case SENSOR_OV7648: case SENSOR_OV7660: case SENSOR_SOI768: reg_w(gspca_dev, 0xce, CE_ov76xx, 4); break; case SENSOR_GC0307: reg_w(gspca_dev, 0xce, CE_gc0307, 4); break; case SENSOR_PO2030N: reg_w(gspca_dev, 0xce, CE_po2030n, 4); break; default: reg_w(gspca_dev, 0xce, CE, 4); /* ?? {0x1e, 0xdd, 0x2d, 0xe7} */ break; } /* here change size mode 0 -> VGA; 1 -> CIF */ sd->reg18 = sn9c1xx[0x18] | (mode << 4) | 0x40; reg_w1(gspca_dev, 0x18, sd->reg18); setjpegqual(gspca_dev); reg_w1(gspca_dev, 0x17, reg17); reg_w1(gspca_dev, 0x01, reg01); sd->reg01 = reg01; sd->reg17 = reg17; sd->pktsz = sd->npkt = 0; sd->nchg = sd->short_mark = 0; return gspca_dev->usb_err; } static void sd_stopN(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; static const u8 stophv7131[] = { 0xa1, 0x11, 0x02, 0x09, 0x00, 0x00, 0x00, 0x10 }; static const u8 stopmi0360[] = { 0xb1, 0x5d, 0x07, 0x00, 0x00, 0x00, 0x00, 0x10 }; static const u8 stopov7648[] = { 0xa1, 0x21, 0x76, 0x20, 0x00, 0x00, 0x00, 0x10 }; static const u8 stopsoi768[] = { 0xa1, 0x21, 0x12, 0x80, 0x00, 0x00, 0x00, 0x10 }; u8 reg01; u8 reg17; reg01 = sd->reg01; reg17 = sd->reg17 & ~SEN_CLK_EN; switch (sd->sensor) { case SENSOR_ADCM1700: case SENSOR_GC0307: case SENSOR_PO2030N: case SENSOR_SP80708: reg01 |= LED; reg_w1(gspca_dev, 0x01, reg01); reg01 &= ~(LED | V_TX_EN); reg_w1(gspca_dev, 0x01, reg01); /* reg_w1(gspca_dev, 0x02, 0x??); * LED off ? */ break; case SENSOR_HV7131R: reg01 &= ~V_TX_EN; reg_w1(gspca_dev, 0x01, reg01); i2c_w8(gspca_dev, stophv7131); break; case SENSOR_MI0360: case SENSOR_MI0360B: reg01 &= ~V_TX_EN; reg_w1(gspca_dev, 0x01, reg01); /* reg_w1(gspca_dev, 0x02, 0x40); * LED off ? */ i2c_w8(gspca_dev, stopmi0360); break; case SENSOR_MT9V111: case SENSOR_OM6802: case SENSOR_PO1030: reg01 &= ~V_TX_EN; reg_w1(gspca_dev, 0x01, reg01); break; case SENSOR_OV7630: case SENSOR_OV7648: reg01 &= ~V_TX_EN; reg_w1(gspca_dev, 0x01, reg01); i2c_w8(gspca_dev, stopov7648); break; case SENSOR_OV7660: reg01 &= ~V_TX_EN; reg_w1(gspca_dev, 0x01, reg01); break; case SENSOR_SOI768: i2c_w8(gspca_dev, stopsoi768); break; } reg01 |= SCL_SEL_OD; reg_w1(gspca_dev, 0x01, reg01); reg01 |= S_PWR_DN; /* sensor power down */ reg_w1(gspca_dev, 0x01, reg01); reg_w1(gspca_dev, 0x17, reg17); reg01 &= ~SYS_SEL_48M; /* clock 24MHz */ reg_w1(gspca_dev, 0x01, reg01); reg01 |= LED; reg_w1(gspca_dev, 0x01, reg01); /* Don't disable sensor clock as that disables the button on the cam */ /* reg_w1(gspca_dev, 0xf1, 0x01); */ } /* called on streamoff with alt==0 and on disconnect */ /* the usb_lock is held at entry - restore on exit */ static void sd_stop0(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; mutex_unlock(&gspca_dev->usb_lock); flush_work(&sd->work); mutex_lock(&gspca_dev->usb_lock); } static void do_autogain(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int delta; int expotimes; u8 luma_mean = 130; u8 luma_delta = 20; /* Thanks S., without your advice, autobright should not work :) */ if (sd->ag_cnt < 0) return; if (--sd->ag_cnt >= 0) return; sd->ag_cnt = AG_CNT_START; delta = atomic_read(&sd->avg_lum); gspca_dbg(gspca_dev, D_FRAM, "mean lum %d\n", delta); if (sd->sensor == SENSOR_PO2030N) { gspca_expo_autogain(gspca_dev, delta, luma_mean, luma_delta, 15, 1024); return; } if (delta < luma_mean - luma_delta || delta > luma_mean + luma_delta) { switch (sd->sensor) { case SENSOR_GC0307: expotimes = sd->exposure; expotimes += (luma_mean - delta) >> 6; if (expotimes < 0) expotimes = 0; sd->exposure = expo_adjust(gspca_dev, (unsigned int) expotimes); break; case SENSOR_HV7131R: expotimes = sd->exposure >> 8; expotimes += (luma_mean - delta) >> 4; if (expotimes < 0) expotimes = 0; sd->exposure = expo_adjust(gspca_dev, (unsigned int) (expotimes << 8)); break; case SENSOR_OM6802: case SENSOR_MT9V111: expotimes = sd->exposure; expotimes += (luma_mean - delta) >> 2; if (expotimes < 0) expotimes = 0; sd->exposure = expo_adjust(gspca_dev, (unsigned int) expotimes); setredblue(gspca_dev); break; default: /* case SENSOR_MO4000: */ /* case SENSOR_MI0360: */ /* case SENSOR_MI0360B: */ expotimes = sd->exposure; expotimes += (luma_mean - delta) >> 6; if (expotimes < 0) expotimes = 0; sd->exposure = expo_adjust(gspca_dev, (unsigned int) expotimes); setredblue(gspca_dev); break; } } } /* set the average luminosity from an isoc marker */ static void set_lum(struct sd *sd, u8 *data) { int avg_lum; /* w0 w1 w2 * w3 w4 w5 * w6 w7 w8 */ avg_lum = (data[27] << 8) + data[28] /* w3 */ + (data[31] << 8) + data[32] /* w5 */ + (data[23] << 8) + data[24] /* w1 */ + (data[35] << 8) + data[36] /* w7 */ + (data[29] << 10) + (data[30] << 2); /* w4 * 4 */ avg_lum >>= 10; atomic_set(&sd->avg_lum, avg_lum); } /* scan the URB packets */ /* This function is run at interrupt level. */ static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* isoc packet */ int len) /* iso packet length */ { struct sd *sd = (struct sd *) gspca_dev; int i, new_qual; /* * A frame ends on the marker * ff ff 00 c4 c4 96 .. * which is 62 bytes long and is followed by various information * including statuses and luminosity. * * A marker may be split on two packets. * * The 6th byte of a marker contains the bits: * 0x08: USB full * 0xc0: frame sequence * When the bit 'USB full' is set, the frame must be discarded; * this is also the case when the 2 bytes before the marker are * not the JPEG end of frame ('ff d9'). */ /* count the packets and their size */ sd->npkt++; sd->pktsz += len; /*fixme: assumption about the following code: * - there can be only one marker in a packet */ /* skip the remaining bytes of a short marker */ i = sd->short_mark; if (i != 0) { sd->short_mark = 0; if (i < 0 /* if 'ff' at end of previous packet */ && data[0] == 0xff && data[1] == 0x00) goto marker_found; if (data[0] == 0xff && data[1] == 0xff) { i = 0; goto marker_found; } len -= i; if (len <= 0) return; data += i; } /* search backwards if there is a marker in the packet */ for (i = len - 1; --i >= 0; ) { if (data[i] != 0xff) { i--; continue; } if (data[i + 1] == 0xff) { /* (there may be 'ff ff' inside a marker) */ if (i + 2 >= len || data[i + 2] == 0x00) goto marker_found; } } /* no marker found */ /* add the JPEG header if first fragment */ if (data[len - 1] == 0xff) sd->short_mark = -1; if (gspca_dev->last_packet_type == LAST_PACKET) gspca_frame_add(gspca_dev, FIRST_PACKET, sd->jpeg_hdr, JPEG_HDR_SZ); gspca_frame_add(gspca_dev, INTER_PACKET, data, len); return; /* marker found */ /* if some error, discard the frame and decrease the quality */ marker_found: new_qual = 0; if (i > 2) { if (data[i - 2] != 0xff || data[i - 1] != 0xd9) { gspca_dev->last_packet_type = DISCARD_PACKET; new_qual = -3; } } else if (i + 6 < len) { if (data[i + 6] & 0x08) { gspca_dev->last_packet_type = DISCARD_PACKET; new_qual = -5; } } gspca_frame_add(gspca_dev, LAST_PACKET, data, i); /* compute the filling rate and a new JPEG quality */ if (new_qual == 0) { int r; r = (sd->pktsz * 100) / (sd->npkt * gspca_dev->urb[0]->iso_frame_desc[0].length); if (r >= 85) new_qual = -3; else if (r < 75) new_qual = 2; } if (new_qual != 0) { sd->nchg += new_qual; if (sd->nchg < -6 || sd->nchg >= 12) { sd->nchg = 0; new_qual += sd->quality; if (new_qual < QUALITY_MIN) new_qual = QUALITY_MIN; else if (new_qual > QUALITY_MAX) new_qual = QUALITY_MAX; if (new_qual != sd->quality) { sd->quality = new_qual; schedule_work(&sd->work); } } } else { sd->nchg = 0; } sd->pktsz = sd->npkt = 0; /* if the marker is smaller than 62 bytes, * memorize the number of bytes to skip in the next packet */ if (i + 62 > len) { /* no more usable data */ sd->short_mark = i + 62 - len; return; } if (sd->ag_cnt >= 0) set_lum(sd, data + i); /* if more data, start a new frame */ i += 62; if (i < len) { data += i; len -= i; gspca_frame_add(gspca_dev, FIRST_PACKET, sd->jpeg_hdr, JPEG_HDR_SZ); gspca_frame_add(gspca_dev, INTER_PACKET, data, len); } } static int sd_s_ctrl(struct v4l2_ctrl *ctrl) { struct gspca_dev *gspca_dev = container_of(ctrl->handler, struct gspca_dev, ctrl_handler); gspca_dev->usb_err = 0; if (!gspca_dev->streaming) return 0; switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: setbrightness(gspca_dev); break; case V4L2_CID_CONTRAST: setcontrast(gspca_dev); break; case V4L2_CID_SATURATION: setcolors(gspca_dev); break; case V4L2_CID_RED_BALANCE: setredblue(gspca_dev); break; case V4L2_CID_GAMMA: setgamma(gspca_dev); break; case V4L2_CID_AUTOGAIN: setautogain(gspca_dev); setexposure(gspca_dev); setgain(gspca_dev); break; case V4L2_CID_VFLIP: sethvflip(gspca_dev); break; case V4L2_CID_SHARPNESS: setsharpness(gspca_dev); break; case V4L2_CID_ILLUMINATORS_1: setillum(gspca_dev); break; case V4L2_CID_POWER_LINE_FREQUENCY: setfreq(gspca_dev); break; default: return -EINVAL; } return gspca_dev->usb_err; } #if IS_ENABLED(CONFIG_INPUT) static int sd_int_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* interrupt packet data */ int len) /* interrupt packet length */ { int ret = -EINVAL; if (len == 1 && data[0] == 1) { input_report_key(gspca_dev->input_dev, KEY_CAMERA, 1); input_sync(gspca_dev->input_dev); input_report_key(gspca_dev->input_dev, KEY_CAMERA, 0); input_sync(gspca_dev->input_dev); ret = 0; } return ret; } #endif /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .init_controls = sd_init_controls, .start = sd_start, .stopN = sd_stopN, .stop0 = sd_stop0, .pkt_scan = sd_pkt_scan, .dq_callback = do_autogain, #if IS_ENABLED(CONFIG_INPUT) .int_pkt_scan = sd_int_pkt_scan, #endif }; /* -- module initialisation -- */ #define BS(bridge, sensor) \ .driver_info = (BRIDGE_ ## bridge << 16) \ | (SENSOR_ ## sensor << 8) #define BSF(bridge, sensor, flags) \ .driver_info = (BRIDGE_ ## bridge << 16) \ | (SENSOR_ ## sensor << 8) \ | (flags) static const struct usb_device_id device_table[] = { {USB_DEVICE(0x0458, 0x7025), BSF(SN9C120, MI0360B, F_PDN_INV)}, {USB_DEVICE(0x0458, 0x702e), BS(SN9C120, OV7660)}, {USB_DEVICE(0x045e, 0x00f5), BSF(SN9C105, OV7660, F_PDN_INV)}, {USB_DEVICE(0x045e, 0x00f7), BSF(SN9C105, OV7660, F_PDN_INV)}, {USB_DEVICE(0x0471, 0x0327), BS(SN9C105, MI0360)}, {USB_DEVICE(0x0471, 0x0328), BS(SN9C105, MI0360)}, {USB_DEVICE(0x0471, 0x0330), BS(SN9C105, MI0360)}, {USB_DEVICE(0x06f8, 0x3004), BS(SN9C105, OV7660)}, {USB_DEVICE(0x06f8, 0x3008), BS(SN9C105, OV7660)}, /* {USB_DEVICE(0x0c45, 0x603a), BS(SN9C102P, OV7648)}, */ {USB_DEVICE(0x0c45, 0x6040), BS(SN9C102P, HV7131R)}, /* {USB_DEVICE(0x0c45, 0x607a), BS(SN9C102P, OV7648)}, */ /* {USB_DEVICE(0x0c45, 0x607b), BS(SN9C102P, OV7660)}, */ {USB_DEVICE(0x0c45, 0x607c), BS(SN9C102P, HV7131R)}, /* {USB_DEVICE(0x0c45, 0x607e), BS(SN9C102P, OV7630)}, */ {USB_DEVICE(0x0c45, 0x60c0), BSF(SN9C105, MI0360, F_ILLUM)}, /* or MT9V111 */ /* {USB_DEVICE(0x0c45, 0x60c2), BS(SN9C105, P1030xC)}, */ /* {USB_DEVICE(0x0c45, 0x60c8), BS(SN9C105, OM6802)}, */ /* {USB_DEVICE(0x0c45, 0x60cc), BS(SN9C105, HV7131GP)}, */ {USB_DEVICE(0x0c45, 0x60ce), BS(SN9C105, SP80708)}, {USB_DEVICE(0x0c45, 0x60ec), BS(SN9C105, MO4000)}, /* {USB_DEVICE(0x0c45, 0x60ef), BS(SN9C105, ICM105C)}, */ /* {USB_DEVICE(0x0c45, 0x60fa), BS(SN9C105, OV7648)}, */ /* {USB_DEVICE(0x0c45, 0x60f2), BS(SN9C105, OV7660)}, */ {USB_DEVICE(0x0c45, 0x60fb), BS(SN9C105, OV7660)}, {USB_DEVICE(0x0c45, 0x60fc), BS(SN9C105, HV7131R)}, {USB_DEVICE(0x0c45, 0x60fe), BS(SN9C105, OV7630)}, {USB_DEVICE(0x0c45, 0x6100), BS(SN9C120, MI0360)}, /*sn9c128*/ {USB_DEVICE(0x0c45, 0x6102), BS(SN9C120, PO2030N)}, /* /GC0305*/ /* {USB_DEVICE(0x0c45, 0x6108), BS(SN9C120, OM6802)}, */ {USB_DEVICE(0x0c45, 0x610a), BS(SN9C120, OV7648)}, /*sn9c128*/ {USB_DEVICE(0x0c45, 0x610b), BS(SN9C120, OV7660)}, /*sn9c128*/ {USB_DEVICE(0x0c45, 0x610c), BS(SN9C120, HV7131R)}, /*sn9c128*/ {USB_DEVICE(0x0c45, 0x610e), BS(SN9C120, OV7630)}, /*sn9c128*/ /* {USB_DEVICE(0x0c45, 0x610f), BS(SN9C120, S5K53BEB)}, */ /* {USB_DEVICE(0x0c45, 0x6122), BS(SN9C110, ICM105C)}, */ /* {USB_DEVICE(0x0c45, 0x6123), BS(SN9C110, SanyoCCD)}, */ {USB_DEVICE(0x0c45, 0x6128), BS(SN9C120, OM6802)}, /*sn9c325?*/ /*bw600.inf:*/ {USB_DEVICE(0x0c45, 0x612a), BS(SN9C120, OV7648)}, /*sn9c325?*/ {USB_DEVICE(0x0c45, 0x612b), BS(SN9C110, ADCM1700)}, {USB_DEVICE(0x0c45, 0x612c), BS(SN9C110, MO4000)}, {USB_DEVICE(0x0c45, 0x612e), BS(SN9C110, OV7630)}, /* {USB_DEVICE(0x0c45, 0x612f), BS(SN9C110, ICM105C)}, */ {USB_DEVICE(0x0c45, 0x6130), BS(SN9C120, MI0360)}, /* or MT9V111 / MI0360B */ /* {USB_DEVICE(0x0c45, 0x6132), BS(SN9C120, OV7670)}, */ {USB_DEVICE(0x0c45, 0x6138), BS(SN9C120, MO4000)}, {USB_DEVICE(0x0c45, 0x613a), BS(SN9C120, OV7648)}, {USB_DEVICE(0x0c45, 0x613b), BS(SN9C120, OV7660)}, {USB_DEVICE(0x0c45, 0x613c), BS(SN9C120, HV7131R)}, {USB_DEVICE(0x0c45, 0x613e), BS(SN9C120, OV7630)}, {USB_DEVICE(0x0c45, 0x6142), BS(SN9C120, PO2030N)}, /*sn9c120b*/ /* or GC0305 / GC0307 */ {USB_DEVICE(0x0c45, 0x6143), BS(SN9C120, SP80708)}, /*sn9c120b*/ {USB_DEVICE(0x0c45, 0x6148), BS(SN9C120, OM6802)}, /*sn9c120b*/ {USB_DEVICE(0x0c45, 0x614a), BSF(SN9C120, ADCM1700, F_ILLUM)}, /* {USB_DEVICE(0x0c45, 0x614c), BS(SN9C120, GC0306)}, */ /*sn9c120b*/ {} }; MODULE_DEVICE_TABLE(usb, device_table); /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, .reset_resume = gspca_resume, #endif }; module_usb_driver(sd_driver);
175 70 119 5 150 407 7 366 869 168 11 40 36 83 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 // SPDX-License-Identifier: GPL-2.0+ /* * ext4_jbd2.h * * Written by Stephen C. Tweedie <sct@redhat.com>, 1999 * * Copyright 1998--1999 Red Hat corp --- All Rights Reserved * * Ext4-specific journaling extensions. */ #ifndef _EXT4_JBD2_H #define _EXT4_JBD2_H #include <linux/fs.h> #include <linux/jbd2.h> #include "ext4.h" #define EXT4_JOURNAL(inode) (EXT4_SB((inode)->i_sb)->s_journal) /* Define the number of blocks we need to account to a transaction to * modify one block of data. * * We may have to touch one inode, one bitmap buffer, up to three * indirection blocks, the group and superblock summaries, and the data * block to complete the transaction. * * For extents-enabled fs we may have to allocate and modify up to * 5 levels of tree, data block (for each of these we need bitmap + group * summaries), root which is stored in the inode, sb */ #define EXT4_SINGLEDATA_TRANS_BLOCKS(sb) \ (ext4_has_feature_extents(sb) ? 20U : 8U) /* Extended attribute operations touch at most two data buffers, * two bitmap buffers, and two group summaries, in addition to the inode * and the superblock, which are already accounted for. */ #define EXT4_XATTR_TRANS_BLOCKS 6U /* Define the minimum size for a transaction which modifies data. This * needs to take into account the fact that we may end up modifying two * quota files too (one for the group, one for the user quota). The * superblock only gets updated once, of course, so don't bother * counting that again for the quota updates. */ #define EXT4_DATA_TRANS_BLOCKS(sb) (EXT4_SINGLEDATA_TRANS_BLOCKS(sb) + \ EXT4_XATTR_TRANS_BLOCKS - 2 + \ EXT4_MAXQUOTAS_TRANS_BLOCKS(sb)) /* * Define the number of metadata blocks we need to account to modify data. * * This include super block, inode block, quota blocks and xattr blocks */ #define EXT4_META_TRANS_BLOCKS(sb) (EXT4_XATTR_TRANS_BLOCKS + \ EXT4_MAXQUOTAS_TRANS_BLOCKS(sb)) /* Define an arbitrary limit for the amount of data we will anticipate * writing to any given transaction. For unbounded transactions such as * write(2) and truncate(2) we can write more than this, but we always * start off at the maximum transaction size and grow the transaction * optimistically as we go. */ #define EXT4_MAX_TRANS_DATA 64U /* We break up a large truncate or write transaction once the handle's * buffer credits gets this low, we need either to extend the * transaction or to start a new one. Reserve enough space here for * inode, bitmap, superblock, group and indirection updates for at least * one block, plus two quota updates. Quota allocations are not * needed. */ #define EXT4_RESERVE_TRANS_BLOCKS 12U /* * Number of credits needed if we need to insert an entry into a * directory. For each new index block, we need 4 blocks (old index * block, new index block, bitmap block, bg summary). For normal * htree directories there are 2 levels; if the largedir feature * enabled it's 3 levels. */ #define EXT4_INDEX_EXTRA_TRANS_BLOCKS 12U #ifdef CONFIG_QUOTA /* Amount of blocks needed for quota update - we know that the structure was * allocated so we need to update only data block */ #define EXT4_QUOTA_TRANS_BLOCKS(sb) ((ext4_quota_capable(sb)) ? 1 : 0) /* Amount of blocks needed for quota insert/delete - we do some block writes * but inode, sb and group updates are done only once */ #define EXT4_QUOTA_INIT_BLOCKS(sb) ((ext4_quota_capable(sb)) ?\ (DQUOT_INIT_ALLOC*(EXT4_SINGLEDATA_TRANS_BLOCKS(sb)-3)\ +3+DQUOT_INIT_REWRITE) : 0) #define EXT4_QUOTA_DEL_BLOCKS(sb) ((ext4_quota_capable(sb)) ?\ (DQUOT_DEL_ALLOC*(EXT4_SINGLEDATA_TRANS_BLOCKS(sb)-3)\ +3+DQUOT_DEL_REWRITE) : 0) #else #define EXT4_QUOTA_TRANS_BLOCKS(sb) 0 #define EXT4_QUOTA_INIT_BLOCKS(sb) 0 #define EXT4_QUOTA_DEL_BLOCKS(sb) 0 #endif #define EXT4_MAXQUOTAS_TRANS_BLOCKS(sb) (EXT4_MAXQUOTAS*EXT4_QUOTA_TRANS_BLOCKS(sb)) #define EXT4_MAXQUOTAS_INIT_BLOCKS(sb) (EXT4_MAXQUOTAS*EXT4_QUOTA_INIT_BLOCKS(sb)) #define EXT4_MAXQUOTAS_DEL_BLOCKS(sb) (EXT4_MAXQUOTAS*EXT4_QUOTA_DEL_BLOCKS(sb)) /* * Ext4 handle operation types -- for logging purposes */ #define EXT4_HT_MISC 0 #define EXT4_HT_INODE 1 #define EXT4_HT_WRITE_PAGE 2 #define EXT4_HT_MAP_BLOCKS 3 #define EXT4_HT_DIR 4 #define EXT4_HT_TRUNCATE 5 #define EXT4_HT_QUOTA 6 #define EXT4_HT_RESIZE 7 #define EXT4_HT_MIGRATE 8 #define EXT4_HT_MOVE_EXTENTS 9 #define EXT4_HT_XATTR 10 #define EXT4_HT_EXT_CONVERT 11 #define EXT4_HT_MAX 12 /** * struct ext4_journal_cb_entry - Base structure for callback information. * * This struct is a 'seed' structure for a using with your own callback * structs. If you are using callbacks you must allocate one of these * or another struct of your own definition which has this struct * as it's first element and pass it to ext4_journal_callback_add(). */ struct ext4_journal_cb_entry { /* list information for other callbacks attached to the same handle */ struct list_head jce_list; /* Function to call with this callback structure */ void (*jce_func)(struct super_block *sb, struct ext4_journal_cb_entry *jce, int error); /* user data goes here */ }; /** * ext4_journal_callback_add: add a function to call after transaction commit * @handle: active journal transaction handle to register callback on * @func: callback function to call after the transaction has committed: * @sb: superblock of current filesystem for transaction * @jce: returned journal callback data * @rc: journal state at commit (0 = transaction committed properly) * @jce: journal callback data (internal and function private data struct) * * The registered function will be called in the context of the journal thread * after the transaction for which the handle was created has completed. * * No locks are held when the callback function is called, so it is safe to * call blocking functions from within the callback, but the callback should * not block or run for too long, or the filesystem will be blocked waiting for * the next transaction to commit. No journaling functions can be used, or * there is a risk of deadlock. * * There is no guaranteed calling order of multiple registered callbacks on * the same transaction. */ static inline void _ext4_journal_callback_add(handle_t *handle, struct ext4_journal_cb_entry *jce) { /* Add the jce to transaction's private list */ list_add_tail(&jce->jce_list, &handle->h_transaction->t_private_list); } static inline void ext4_journal_callback_add(handle_t *handle, void (*func)(struct super_block *sb, struct ext4_journal_cb_entry *jce, int rc), struct ext4_journal_cb_entry *jce) { struct ext4_sb_info *sbi = EXT4_SB(handle->h_transaction->t_journal->j_private); /* Add the jce to transaction's private list */ jce->jce_func = func; spin_lock(&sbi->s_md_lock); _ext4_journal_callback_add(handle, jce); spin_unlock(&sbi->s_md_lock); } /** * ext4_journal_callback_del: delete a registered callback * @handle: active journal transaction handle on which callback was registered * @jce: registered journal callback entry to unregister * Return true if object was successfully removed */ static inline bool ext4_journal_callback_try_del(handle_t *handle, struct ext4_journal_cb_entry *jce) { bool deleted; struct ext4_sb_info *sbi = EXT4_SB(handle->h_transaction->t_journal->j_private); spin_lock(&sbi->s_md_lock); deleted = !list_empty(&jce->jce_list); list_del_init(&jce->jce_list); spin_unlock(&sbi->s_md_lock); return deleted; } int ext4_mark_iloc_dirty(handle_t *handle, struct inode *inode, struct ext4_iloc *iloc); /* * On success, We end up with an outstanding reference count against * iloc->bh. This _must_ be cleaned up later. */ int ext4_reserve_inode_write(handle_t *handle, struct inode *inode, struct ext4_iloc *iloc); #define ext4_mark_inode_dirty(__h, __i) \ __ext4_mark_inode_dirty((__h), (__i), __func__, __LINE__) int __ext4_mark_inode_dirty(handle_t *handle, struct inode *inode, const char *func, unsigned int line); int ext4_expand_extra_isize(struct inode *inode, unsigned int new_extra_isize, struct ext4_iloc *iloc); /* * Wrapper functions with which ext4 calls into JBD. */ int __ext4_journal_get_write_access(const char *where, unsigned int line, handle_t *handle, struct super_block *sb, struct buffer_head *bh, enum ext4_journal_trigger_type trigger_type); int __ext4_forget(const char *where, unsigned int line, handle_t *handle, int is_metadata, struct inode *inode, struct buffer_head *bh, ext4_fsblk_t blocknr); int __ext4_journal_get_create_access(const char *where, unsigned int line, handle_t *handle, struct super_block *sb, struct buffer_head *bh, enum ext4_journal_trigger_type trigger_type); int __ext4_handle_dirty_metadata(const char *where, unsigned int line, handle_t *handle, struct inode *inode, struct buffer_head *bh); #define ext4_journal_get_write_access(handle, sb, bh, trigger_type) \ __ext4_journal_get_write_access(__func__, __LINE__, (handle), (sb), \ (bh), (trigger_type)) #define ext4_forget(handle, is_metadata, inode, bh, block_nr) \ __ext4_forget(__func__, __LINE__, (handle), (is_metadata), (inode), \ (bh), (block_nr)) #define ext4_journal_get_create_access(handle, sb, bh, trigger_type) \ __ext4_journal_get_create_access(__func__, __LINE__, (handle), (sb), \ (bh), (trigger_type)) #define ext4_handle_dirty_metadata(handle, inode, bh) \ __ext4_handle_dirty_metadata(__func__, __LINE__, (handle), (inode), \ (bh)) handle_t *__ext4_journal_start_sb(struct inode *inode, struct super_block *sb, unsigned int line, int type, int blocks, int rsv_blocks, int revoke_creds); int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle); #define EXT4_NOJOURNAL_MAX_REF_COUNT ((unsigned long) 4096) /* Note: Do not use this for NULL handles. This is only to determine if * a properly allocated handle is using a journal or not. */ static inline int ext4_handle_valid(handle_t *handle) { if ((unsigned long)handle < EXT4_NOJOURNAL_MAX_REF_COUNT) return 0; return 1; } static inline void ext4_handle_sync(handle_t *handle) { if (ext4_handle_valid(handle)) handle->h_sync = 1; } static inline int ext4_handle_is_aborted(handle_t *handle) { if (ext4_handle_valid(handle)) return is_handle_aborted(handle); return 0; } static inline int ext4_free_metadata_revoke_credits(struct super_block *sb, int blocks) { /* Freeing each metadata block can result in freeing one cluster */ return blocks * EXT4_SB(sb)->s_cluster_ratio; } static inline int ext4_trans_default_revoke_credits(struct super_block *sb) { return ext4_free_metadata_revoke_credits(sb, 8); } #define ext4_journal_start_sb(sb, type, nblocks) \ __ext4_journal_start_sb(NULL, (sb), __LINE__, (type), (nblocks), 0,\ ext4_trans_default_revoke_credits(sb)) #define ext4_journal_start(inode, type, nblocks) \ __ext4_journal_start((inode), __LINE__, (type), (nblocks), 0, \ ext4_trans_default_revoke_credits((inode)->i_sb)) #define ext4_journal_start_with_reserve(inode, type, blocks, rsv_blocks)\ __ext4_journal_start((inode), __LINE__, (type), (blocks), (rsv_blocks),\ ext4_trans_default_revoke_credits((inode)->i_sb)) #define ext4_journal_start_with_revoke(inode, type, blocks, revoke_creds) \ __ext4_journal_start((inode), __LINE__, (type), (blocks), 0, \ (revoke_creds)) static inline handle_t *__ext4_journal_start(struct inode *inode, unsigned int line, int type, int blocks, int rsv_blocks, int revoke_creds) { return __ext4_journal_start_sb(inode, inode->i_sb, line, type, blocks, rsv_blocks, revoke_creds); } #define ext4_journal_stop(handle) \ __ext4_journal_stop(__func__, __LINE__, (handle)) #define ext4_journal_start_reserved(handle, type) \ __ext4_journal_start_reserved((handle), __LINE__, (type)) handle_t *__ext4_journal_start_reserved(handle_t *handle, unsigned int line, int type); static inline handle_t *ext4_journal_current_handle(void) { return journal_current_handle(); } static inline int ext4_journal_extend(handle_t *handle, int nblocks, int revoke) { if (ext4_handle_valid(handle)) return jbd2_journal_extend(handle, nblocks, revoke); return 0; } static inline int ext4_journal_restart(handle_t *handle, int nblocks, int revoke) { if (ext4_handle_valid(handle)) return jbd2__journal_restart(handle, nblocks, revoke, GFP_NOFS); return 0; } int __ext4_journal_ensure_credits(handle_t *handle, int check_cred, int extend_cred, int revoke_cred); /* * Ensure @handle has at least @check_creds credits available. If not, * transaction will be extended or restarted to contain at least @extend_cred * credits. Before restarting transaction @fn is executed to allow for cleanup * before the transaction is restarted. * * The return value is < 0 in case of error, 0 in case the handle has enough * credits or transaction extension succeeded, 1 in case transaction had to be * restarted. */ #define ext4_journal_ensure_credits_fn(handle, check_cred, extend_cred, \ revoke_cred, fn) \ ({ \ __label__ __ensure_end; \ int err = __ext4_journal_ensure_credits((handle), (check_cred), \ (extend_cred), (revoke_cred)); \ \ if (err <= 0) \ goto __ensure_end; \ err = (fn); \ if (err < 0) \ goto __ensure_end; \ err = ext4_journal_restart((handle), (extend_cred), (revoke_cred)); \ if (err == 0) \ err = 1; \ __ensure_end: \ err; \ }) /* * Ensure given handle has at least requested amount of credits available, * possibly restarting transaction if needed. We also make sure the transaction * has space for at least ext4_trans_default_revoke_credits(sb) revoke records * as freeing one or two blocks is very common pattern and requesting this is * very cheap. */ static inline int ext4_journal_ensure_credits(handle_t *handle, int credits, int revoke_creds) { return ext4_journal_ensure_credits_fn(handle, credits, credits, revoke_creds, 0); } static inline int ext4_journal_blocks_per_page(struct inode *inode) { if (EXT4_JOURNAL(inode) != NULL) return jbd2_journal_blocks_per_page(inode); return 0; } static inline int ext4_journal_force_commit(journal_t *journal) { if (journal) return jbd2_journal_force_commit(journal); return 0; } static inline int ext4_jbd2_inode_add_write(handle_t *handle, struct inode *inode, loff_t start_byte, loff_t length) { if (ext4_handle_valid(handle)) return jbd2_journal_inode_ranged_write(handle, EXT4_I(inode)->jinode, start_byte, length); return 0; } static inline int ext4_jbd2_inode_add_wait(handle_t *handle, struct inode *inode, loff_t start_byte, loff_t length) { if (ext4_handle_valid(handle)) return jbd2_journal_inode_ranged_wait(handle, EXT4_I(inode)->jinode, start_byte, length); return 0; } static inline void ext4_update_inode_fsync_trans(handle_t *handle, struct inode *inode, int datasync) { struct ext4_inode_info *ei = EXT4_I(inode); if (ext4_handle_valid(handle) && !is_handle_aborted(handle)) { ei->i_sync_tid = handle->h_transaction->t_tid; if (datasync) ei->i_datasync_tid = handle->h_transaction->t_tid; } } /* super.c */ int ext4_force_commit(struct super_block *sb); /* * Ext4 inode journal modes */ #define EXT4_INODE_JOURNAL_DATA_MODE 0x01 /* journal data mode */ #define EXT4_INODE_ORDERED_DATA_MODE 0x02 /* ordered data mode */ #define EXT4_INODE_WRITEBACK_DATA_MODE 0x04 /* writeback data mode */ int ext4_inode_journal_mode(struct inode *inode); static inline int ext4_should_journal_data(struct inode *inode) { return ext4_inode_journal_mode(inode) & EXT4_INODE_JOURNAL_DATA_MODE; } static inline int ext4_should_order_data(struct inode *inode) { return ext4_inode_journal_mode(inode) & EXT4_INODE_ORDERED_DATA_MODE; } static inline int ext4_should_writeback_data(struct inode *inode) { return ext4_inode_journal_mode(inode) & EXT4_INODE_WRITEBACK_DATA_MODE; } static inline int ext4_free_data_revoke_credits(struct inode *inode, int blocks) { if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) return 0; if (!ext4_should_journal_data(inode)) return 0; /* * Data blocks in one extent are contiguous, just account for partial * clusters at extent boundaries */ return blocks + 2*(EXT4_SB(inode->i_sb)->s_cluster_ratio - 1); } /* * This function controls whether or not we should try to go down the * dioread_nolock code paths, which makes it safe to avoid taking * i_rwsem for direct I/O reads. This only works for extent-based * files, and it doesn't work if data journaling is enabled, since the * dioread_nolock code uses b_private to pass information back to the * I/O completion handler, and this conflicts with the jbd's use of * b_private. */ static inline int ext4_should_dioread_nolock(struct inode *inode) { if (!test_opt(inode->i_sb, DIOREAD_NOLOCK)) return 0; if (!S_ISREG(inode->i_mode)) return 0; if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) return 0; if (ext4_should_journal_data(inode)) return 0; /* temporary fix to prevent generic/422 test failures */ if (!test_opt(inode->i_sb, DELALLOC)) return 0; return 1; } #endif /* _EXT4_JBD2_H */
1 1 1 1 1 1 1 1 1 14 4 1 1 1 1 1 22 21 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 // SPDX-License-Identifier: GPL-2.0-or-later /* * Clock domain and sample rate management functions */ #include <linux/bitops.h> #include <linux/init.h> #include <linux/string.h> #include <linux/usb.h> #include <linux/usb/audio.h> #include <linux/usb/audio-v2.h> #include <linux/usb/audio-v3.h> #include <sound/core.h> #include <sound/info.h> #include <sound/pcm.h> #include "usbaudio.h" #include "card.h" #include "helper.h" #include "clock.h" #include "quirks.h" union uac23_clock_source_desc { struct uac_clock_source_descriptor v2; struct uac3_clock_source_descriptor v3; }; union uac23_clock_selector_desc { struct uac_clock_selector_descriptor v2; struct uac3_clock_selector_descriptor v3; }; union uac23_clock_multiplier_desc { struct uac_clock_multiplier_descriptor v2; struct uac_clock_multiplier_descriptor v3; }; /* check whether the descriptor bLength has the minimal length */ #define DESC_LENGTH_CHECK(p, proto) \ ((proto) == UAC_VERSION_3 ? \ ((p)->v3.bLength >= sizeof((p)->v3)) : \ ((p)->v2.bLength >= sizeof((p)->v2))) #define GET_VAL(p, proto, field) \ ((proto) == UAC_VERSION_3 ? (p)->v3.field : (p)->v2.field) static void *find_uac_clock_desc(struct usb_host_interface *iface, int id, bool (*validator)(void *, int, int), u8 type, int proto) { void *cs = NULL; while ((cs = snd_usb_find_csint_desc(iface->extra, iface->extralen, cs, type))) { if (validator(cs, id, proto)) return cs; } return NULL; } static bool validate_clock_source(void *p, int id, int proto) { union uac23_clock_source_desc *cs = p; if (!DESC_LENGTH_CHECK(cs, proto)) return false; return GET_VAL(cs, proto, bClockID) == id; } static bool validate_clock_selector(void *p, int id, int proto) { union uac23_clock_selector_desc *cs = p; if (!DESC_LENGTH_CHECK(cs, proto)) return false; if (GET_VAL(cs, proto, bClockID) != id) return false; /* additional length check for baCSourceID array (in bNrInPins size) * and two more fields (which sizes depend on the protocol) */ if (proto == UAC_VERSION_3) return cs->v3.bLength >= sizeof(cs->v3) + cs->v3.bNrInPins + 4 /* bmControls */ + 2 /* wCSelectorDescrStr */; else return cs->v2.bLength >= sizeof(cs->v2) + cs->v2.bNrInPins + 1 /* bmControls */ + 1 /* iClockSelector */; } static bool validate_clock_multiplier(void *p, int id, int proto) { union uac23_clock_multiplier_desc *cs = p; if (!DESC_LENGTH_CHECK(cs, proto)) return false; return GET_VAL(cs, proto, bClockID) == id; } #define DEFINE_FIND_HELPER(name, obj, validator, type2, type3) \ static obj *name(struct snd_usb_audio *chip, int id, \ const struct audioformat *fmt) \ { \ struct usb_host_interface *ctrl_intf = \ snd_usb_find_ctrl_interface(chip, fmt->iface); \ return find_uac_clock_desc(ctrl_intf, id, validator, \ fmt->protocol == UAC_VERSION_3 ? (type3) : (type2), \ fmt->protocol); \ } DEFINE_FIND_HELPER(snd_usb_find_clock_source, union uac23_clock_source_desc, validate_clock_source, UAC2_CLOCK_SOURCE, UAC3_CLOCK_SOURCE); DEFINE_FIND_HELPER(snd_usb_find_clock_selector, union uac23_clock_selector_desc, validate_clock_selector, UAC2_CLOCK_SELECTOR, UAC3_CLOCK_SELECTOR); DEFINE_FIND_HELPER(snd_usb_find_clock_multiplier, union uac23_clock_multiplier_desc, validate_clock_multiplier, UAC2_CLOCK_MULTIPLIER, UAC3_CLOCK_MULTIPLIER); static int uac_clock_selector_get_val(struct snd_usb_audio *chip, int selector_id, int iface_no) { struct usb_host_interface *ctrl_intf; unsigned char buf; int ret; ctrl_intf = snd_usb_find_ctrl_interface(chip, iface_no); ret = snd_usb_ctl_msg(chip->dev, usb_rcvctrlpipe(chip->dev, 0), UAC2_CS_CUR, USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN, UAC2_CX_CLOCK_SELECTOR << 8, snd_usb_ctrl_intf(ctrl_intf) | (selector_id << 8), &buf, sizeof(buf)); if (ret < 0) return ret; return buf; } static int uac_clock_selector_set_val(struct snd_usb_audio *chip, int selector_id, unsigned char pin, int iface_no) { struct usb_host_interface *ctrl_intf; int ret; ctrl_intf = snd_usb_find_ctrl_interface(chip, iface_no); ret = snd_usb_ctl_msg(chip->dev, usb_sndctrlpipe(chip->dev, 0), UAC2_CS_CUR, USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT, UAC2_CX_CLOCK_SELECTOR << 8, snd_usb_ctrl_intf(ctrl_intf) | (selector_id << 8), &pin, sizeof(pin)); if (ret < 0) return ret; if (ret != sizeof(pin)) { usb_audio_err(chip, "setting selector (id %d) unexpected length %d\n", selector_id, ret); return -EINVAL; } ret = uac_clock_selector_get_val(chip, selector_id, iface_no); if (ret < 0) return ret; if (ret != pin) { usb_audio_err(chip, "setting selector (id %d) to %x failed (current: %d)\n", selector_id, pin, ret); return -EINVAL; } return ret; } static bool uac_clock_source_is_valid_quirk(struct snd_usb_audio *chip, const struct audioformat *fmt, int source_id) { bool ret = false; int count; unsigned char data; struct usb_device *dev = chip->dev; union uac23_clock_source_desc *cs_desc; struct usb_host_interface *ctrl_intf; ctrl_intf = snd_usb_find_ctrl_interface(chip, fmt->iface); cs_desc = snd_usb_find_clock_source(chip, source_id, fmt); if (!cs_desc) return false; if (fmt->protocol == UAC_VERSION_2) { /* * Assume the clock is valid if clock source supports only one * single sample rate, the terminal is connected directly to it * (there is no clock selector) and clock type is internal. * This is to deal with some Denon DJ controllers that always * reports that clock is invalid. */ if (fmt->nr_rates == 1 && (fmt->clock & 0xff) == cs_desc->v2.bClockID && (cs_desc->v2.bmAttributes & 0x3) != UAC_CLOCK_SOURCE_TYPE_EXT) return true; } /* * MOTU MicroBook IIc * Sample rate changes takes more than 2 seconds for this device. Clock * validity request returns false during that period. */ if (chip->usb_id == USB_ID(0x07fd, 0x0004)) { count = 0; while ((!ret) && (count < 50)) { int err; msleep(100); err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR, USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN, UAC2_CS_CONTROL_CLOCK_VALID << 8, snd_usb_ctrl_intf(ctrl_intf) | (source_id << 8), &data, sizeof(data)); if (err < 0) { dev_warn(&dev->dev, "%s(): cannot get clock validity for id %d\n", __func__, source_id); return false; } ret = !!data; count++; } } return ret; } static bool uac_clock_source_is_valid(struct snd_usb_audio *chip, const struct audioformat *fmt, int source_id) { int err; unsigned char data; struct usb_device *dev = chip->dev; u32 bmControls; union uac23_clock_source_desc *cs_desc; struct usb_host_interface *ctrl_intf; ctrl_intf = snd_usb_find_ctrl_interface(chip, fmt->iface); cs_desc = snd_usb_find_clock_source(chip, source_id, fmt); if (!cs_desc) return false; if (fmt->protocol == UAC_VERSION_3) bmControls = le32_to_cpu(cs_desc->v3.bmControls); else bmControls = cs_desc->v2.bmControls; /* If a clock source can't tell us whether it's valid, we assume it is */ if (!uac_v2v3_control_is_readable(bmControls, UAC2_CS_CONTROL_CLOCK_VALID)) return true; err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR, USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN, UAC2_CS_CONTROL_CLOCK_VALID << 8, snd_usb_ctrl_intf(ctrl_intf) | (source_id << 8), &data, sizeof(data)); if (err < 0) { dev_warn(&dev->dev, "%s(): cannot get clock validity for id %d\n", __func__, source_id); return false; } if (data) return true; else return uac_clock_source_is_valid_quirk(chip, fmt, source_id); } static int __uac_clock_find_source(struct snd_usb_audio *chip, const struct audioformat *fmt, int entity_id, unsigned long *visited, bool validate) { union uac23_clock_source_desc *source; union uac23_clock_selector_desc *selector; union uac23_clock_multiplier_desc *multiplier; int ret, i, cur, err, pins, clock_id; const u8 *sources; int proto = fmt->protocol; bool readable, writeable; u32 bmControls; entity_id &= 0xff; if (test_and_set_bit(entity_id, visited)) { usb_audio_warn(chip, "%s(): recursive clock topology detected, id %d.\n", __func__, entity_id); return -EINVAL; } /* first, see if the ID we're looking at is a clock source already */ source = snd_usb_find_clock_source(chip, entity_id, fmt); if (source) { entity_id = GET_VAL(source, proto, bClockID); if (validate && !uac_clock_source_is_valid(chip, fmt, entity_id)) { usb_audio_err(chip, "clock source %d is not valid, cannot use\n", entity_id); return -ENXIO; } return entity_id; } selector = snd_usb_find_clock_selector(chip, entity_id, fmt); if (selector) { pins = GET_VAL(selector, proto, bNrInPins); clock_id = GET_VAL(selector, proto, bClockID); sources = GET_VAL(selector, proto, baCSourceID); cur = 0; if (proto == UAC_VERSION_3) bmControls = le32_to_cpu(*(__le32 *)(&selector->v3.baCSourceID[0] + pins)); else bmControls = *(__u8 *)(&selector->v2.baCSourceID[0] + pins); readable = uac_v2v3_control_is_readable(bmControls, UAC2_CX_CLOCK_SELECTOR); writeable = uac_v2v3_control_is_writeable(bmControls, UAC2_CX_CLOCK_SELECTOR); if (pins == 1) { ret = 1; goto find_source; } /* for now just warn about buggy device */ if (!readable) usb_audio_warn(chip, "%s(): clock selector control is not readable, id %d\n", __func__, clock_id); /* the entity ID we are looking at is a selector. * find out what it currently selects */ ret = uac_clock_selector_get_val(chip, clock_id, fmt->iface); if (ret < 0) { if (!chip->autoclock) return ret; goto find_others; } /* Selector values are one-based */ if (ret > pins || ret < 1) { usb_audio_err(chip, "%s(): selector reported illegal value, id %d, ret %d\n", __func__, clock_id, ret); if (!chip->autoclock) return -EINVAL; goto find_others; } find_source: cur = ret; ret = __uac_clock_find_source(chip, fmt, sources[ret - 1], visited, validate); if (ret > 0) { /* Skip setting clock selector again for some devices */ if (chip->quirk_flags & QUIRK_FLAG_SKIP_CLOCK_SELECTOR || !writeable) return ret; err = uac_clock_selector_set_val(chip, entity_id, cur, fmt->iface); if (err < 0) { if (pins == 1) { usb_audio_dbg(chip, "%s(): selector returned an error, " "assuming a firmware bug, id %d, ret %d\n", __func__, clock_id, err); return ret; } return err; } } if (!validate || ret > 0 || !chip->autoclock) return ret; find_others: if (!writeable) return -ENXIO; /* The current clock source is invalid, try others. */ for (i = 1; i <= pins; i++) { if (i == cur) continue; ret = __uac_clock_find_source(chip, fmt, sources[i - 1], visited, true); if (ret < 0) continue; err = uac_clock_selector_set_val(chip, entity_id, i, fmt->iface); if (err < 0) continue; usb_audio_info(chip, "found and selected valid clock source %d\n", ret); return ret; } return -ENXIO; } /* FIXME: multipliers only act as pass-thru element for now */ multiplier = snd_usb_find_clock_multiplier(chip, entity_id, fmt); if (multiplier) return __uac_clock_find_source(chip, fmt, GET_VAL(multiplier, proto, bCSourceID), visited, validate); return -EINVAL; } /* * For all kinds of sample rate settings and other device queries, * the clock source (end-leaf) must be used. However, clock selectors, * clock multipliers and sample rate converters may be specified as * clock source input to terminal. This functions walks the clock path * to its end and tries to find the source. * * The 'visited' bitfield is used internally to detect recursive loops. * * Returns the clock source UnitID (>=0) on success, or an error. */ int snd_usb_clock_find_source(struct snd_usb_audio *chip, const struct audioformat *fmt, bool validate) { DECLARE_BITMAP(visited, 256); memset(visited, 0, sizeof(visited)); switch (fmt->protocol) { case UAC_VERSION_2: case UAC_VERSION_3: return __uac_clock_find_source(chip, fmt, fmt->clock, visited, validate); default: return -EINVAL; } } static int set_sample_rate_v1(struct snd_usb_audio *chip, const struct audioformat *fmt, int rate) { struct usb_device *dev = chip->dev; unsigned char data[3]; int err, crate; /* if endpoint doesn't have sampling rate control, bail out */ if (!(fmt->attributes & UAC_EP_CS_ATTR_SAMPLE_RATE)) return 0; data[0] = rate; data[1] = rate >> 8; data[2] = rate >> 16; err = snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), UAC_SET_CUR, USB_TYPE_CLASS | USB_RECIP_ENDPOINT | USB_DIR_OUT, UAC_EP_CS_ATTR_SAMPLE_RATE << 8, fmt->endpoint, data, sizeof(data)); if (err < 0) { dev_err(&dev->dev, "%d:%d: cannot set freq %d to ep %#x\n", fmt->iface, fmt->altsetting, rate, fmt->endpoint); return err; } /* Don't check the sample rate for devices which we know don't * support reading */ if (chip->quirk_flags & QUIRK_FLAG_GET_SAMPLE_RATE) return 0; /* the firmware is likely buggy, don't repeat to fail too many times */ if (chip->sample_rate_read_error > 2) return 0; err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC_GET_CUR, USB_TYPE_CLASS | USB_RECIP_ENDPOINT | USB_DIR_IN, UAC_EP_CS_ATTR_SAMPLE_RATE << 8, fmt->endpoint, data, sizeof(data)); if (err < 0) { dev_err(&dev->dev, "%d:%d: cannot get freq at ep %#x\n", fmt->iface, fmt->altsetting, fmt->endpoint); chip->sample_rate_read_error++; return 0; /* some devices don't support reading */ } crate = data[0] | (data[1] << 8) | (data[2] << 16); if (!crate) { dev_info(&dev->dev, "failed to read current rate; disabling the check\n"); chip->sample_rate_read_error = 3; /* three strikes, see above */ return 0; } if (crate != rate) { dev_warn(&dev->dev, "current rate %d is different from the runtime rate %d\n", crate, rate); // runtime->rate = crate; } return 0; } static int get_sample_rate_v2v3(struct snd_usb_audio *chip, int iface, int altsetting, int clock) { struct usb_device *dev = chip->dev; __le32 data; int err; struct usb_host_interface *ctrl_intf; ctrl_intf = snd_usb_find_ctrl_interface(chip, iface); err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR, USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN, UAC2_CS_CONTROL_SAM_FREQ << 8, snd_usb_ctrl_intf(ctrl_intf) | (clock << 8), &data, sizeof(data)); if (err < 0) { dev_warn(&dev->dev, "%d:%d: cannot get freq (v2/v3): err %d\n", iface, altsetting, err); return 0; } return le32_to_cpu(data); } /* * Try to set the given sample rate: * * Return 0 if the clock source is read-only, the actual rate on success, * or a negative error code. * * This function gets called from format.c to validate each sample rate, too. * Hence no message is shown upon error */ int snd_usb_set_sample_rate_v2v3(struct snd_usb_audio *chip, const struct audioformat *fmt, int clock, int rate) { bool writeable; u32 bmControls; __le32 data; int err; union uac23_clock_source_desc *cs_desc; struct usb_host_interface *ctrl_intf; ctrl_intf = snd_usb_find_ctrl_interface(chip, fmt->iface); cs_desc = snd_usb_find_clock_source(chip, clock, fmt); if (!cs_desc) return 0; if (fmt->protocol == UAC_VERSION_3) bmControls = le32_to_cpu(cs_desc->v3.bmControls); else bmControls = cs_desc->v2.bmControls; writeable = uac_v2v3_control_is_writeable(bmControls, UAC2_CS_CONTROL_SAM_FREQ); if (!writeable) return 0; data = cpu_to_le32(rate); err = snd_usb_ctl_msg(chip->dev, usb_sndctrlpipe(chip->dev, 0), UAC2_CS_CUR, USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT, UAC2_CS_CONTROL_SAM_FREQ << 8, snd_usb_ctrl_intf(ctrl_intf) | (clock << 8), &data, sizeof(data)); if (err < 0) return err; return get_sample_rate_v2v3(chip, fmt->iface, fmt->altsetting, clock); } static int set_sample_rate_v2v3(struct snd_usb_audio *chip, const struct audioformat *fmt, int rate) { int cur_rate, prev_rate; int clock; /* First, try to find a valid clock. This may trigger * automatic clock selection if the current clock is not * valid. */ clock = snd_usb_clock_find_source(chip, fmt, true); if (clock < 0) { /* We did not find a valid clock, but that might be * because the current sample rate does not match an * external clock source. Try again without validation * and we will do another validation after setting the * rate. */ clock = snd_usb_clock_find_source(chip, fmt, false); /* Hardcoded sample rates */ if (chip->quirk_flags & QUIRK_FLAG_IGNORE_CLOCK_SOURCE) return 0; if (clock < 0) return clock; } prev_rate = get_sample_rate_v2v3(chip, fmt->iface, fmt->altsetting, clock); if (prev_rate == rate) goto validation; cur_rate = snd_usb_set_sample_rate_v2v3(chip, fmt, clock, rate); if (cur_rate < 0) { usb_audio_err(chip, "%d:%d: cannot set freq %d (v2/v3): err %d\n", fmt->iface, fmt->altsetting, rate, cur_rate); return cur_rate; } if (!cur_rate) cur_rate = prev_rate; if (cur_rate != rate) { usb_audio_dbg(chip, "%d:%d: freq mismatch: req %d, clock runs @%d\n", fmt->iface, fmt->altsetting, rate, cur_rate); /* continue processing */ } /* FIXME - TEAC devices require the immediate interface setup */ if (USB_ID_VENDOR(chip->usb_id) == 0x0644) { bool cur_base_48k = (rate % 48000 == 0); bool prev_base_48k = (prev_rate % 48000 == 0); if (cur_base_48k != prev_base_48k) { usb_set_interface(chip->dev, fmt->iface, fmt->altsetting); if (chip->quirk_flags & QUIRK_FLAG_IFACE_DELAY) msleep(50); } } validation: /* validate clock after rate change */ if (!uac_clock_source_is_valid(chip, fmt, clock)) return -ENXIO; return 0; } int snd_usb_init_sample_rate(struct snd_usb_audio *chip, const struct audioformat *fmt, int rate) { usb_audio_dbg(chip, "%d:%d Set sample rate %d, clock %d\n", fmt->iface, fmt->altsetting, rate, fmt->clock); switch (fmt->protocol) { case UAC_VERSION_1: default: return set_sample_rate_v1(chip, fmt, rate); case UAC_VERSION_3: if (chip->badd_profile >= UAC3_FUNCTION_SUBCLASS_GENERIC_IO) { if (rate != UAC3_BADD_SAMPLING_RATE) return -ENXIO; else return 0; } fallthrough; case UAC_VERSION_2: return set_sample_rate_v2v3(chip, fmt, rate); } }
3 2 1 9 2 7 1 3 4 3 2 13 13 62 28 1 6 2 1 1 2 2 1 2 1 10 2 3 4 3 7 1 1 8 1 1 1 1 1 1 1 1 1 1 4 3 5 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 // SPDX-License-Identifier: GPL-2.0 /* * RTC subsystem, dev interface * * Copyright (C) 2005 Tower Technologies * Author: Alessandro Zummo <a.zummo@towertech.it> * * based on arch/arm/common/rtctime.c */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/compat.h> #include <linux/module.h> #include <linux/rtc.h> #include <linux/sched/signal.h> #include "rtc-core.h" static dev_t rtc_devt; #define RTC_DEV_MAX 16 /* 16 RTCs should be enough for everyone... */ static int rtc_dev_open(struct inode *inode, struct file *file) { struct rtc_device *rtc = container_of(inode->i_cdev, struct rtc_device, char_dev); if (test_and_set_bit_lock(RTC_DEV_BUSY, &rtc->flags)) return -EBUSY; file->private_data = rtc; spin_lock_irq(&rtc->irq_lock); rtc->irq_data = 0; spin_unlock_irq(&rtc->irq_lock); return 0; } #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL /* * Routine to poll RTC seconds field for change as often as possible, * after first RTC_UIE use timer to reduce polling */ static void rtc_uie_task(struct work_struct *work) { struct rtc_device *rtc = container_of(work, struct rtc_device, uie_task); struct rtc_time tm; int num = 0; int err; err = rtc_read_time(rtc, &tm); spin_lock_irq(&rtc->irq_lock); if (rtc->stop_uie_polling || err) { rtc->uie_task_active = 0; } else if (rtc->oldsecs != tm.tm_sec) { num = (tm.tm_sec + 60 - rtc->oldsecs) % 60; rtc->oldsecs = tm.tm_sec; rtc->uie_timer.expires = jiffies + HZ - (HZ / 10); rtc->uie_timer_active = 1; rtc->uie_task_active = 0; add_timer(&rtc->uie_timer); } else if (schedule_work(&rtc->uie_task) == 0) { rtc->uie_task_active = 0; } spin_unlock_irq(&rtc->irq_lock); if (num) rtc_handle_legacy_irq(rtc, num, RTC_UF); } static void rtc_uie_timer(struct timer_list *t) { struct rtc_device *rtc = from_timer(rtc, t, uie_timer); unsigned long flags; spin_lock_irqsave(&rtc->irq_lock, flags); rtc->uie_timer_active = 0; rtc->uie_task_active = 1; if ((schedule_work(&rtc->uie_task) == 0)) rtc->uie_task_active = 0; spin_unlock_irqrestore(&rtc->irq_lock, flags); } static int clear_uie(struct rtc_device *rtc) { spin_lock_irq(&rtc->irq_lock); if (rtc->uie_irq_active) { rtc->stop_uie_polling = 1; if (rtc->uie_timer_active) { spin_unlock_irq(&rtc->irq_lock); del_timer_sync(&rtc->uie_timer); spin_lock_irq(&rtc->irq_lock); rtc->uie_timer_active = 0; } if (rtc->uie_task_active) { spin_unlock_irq(&rtc->irq_lock); flush_work(&rtc->uie_task); spin_lock_irq(&rtc->irq_lock); } rtc->uie_irq_active = 0; } spin_unlock_irq(&rtc->irq_lock); return 0; } static int set_uie(struct rtc_device *rtc) { struct rtc_time tm; int err; err = rtc_read_time(rtc, &tm); if (err) return err; spin_lock_irq(&rtc->irq_lock); if (!rtc->uie_irq_active) { rtc->uie_irq_active = 1; rtc->stop_uie_polling = 0; rtc->oldsecs = tm.tm_sec; rtc->uie_task_active = 1; if (schedule_work(&rtc->uie_task) == 0) rtc->uie_task_active = 0; } rtc->irq_data = 0; spin_unlock_irq(&rtc->irq_lock); return 0; } int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc, unsigned int enabled) { if (enabled) return set_uie(rtc); else return clear_uie(rtc); } EXPORT_SYMBOL(rtc_dev_update_irq_enable_emul); #endif /* CONFIG_RTC_INTF_DEV_UIE_EMUL */ static ssize_t rtc_dev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct rtc_device *rtc = file->private_data; DECLARE_WAITQUEUE(wait, current); unsigned long data; ssize_t ret; if (count != sizeof(unsigned int) && count < sizeof(unsigned long)) return -EINVAL; add_wait_queue(&rtc->irq_queue, &wait); do { __set_current_state(TASK_INTERRUPTIBLE); spin_lock_irq(&rtc->irq_lock); data = rtc->irq_data; rtc->irq_data = 0; spin_unlock_irq(&rtc->irq_lock); if (data != 0) { ret = 0; break; } if (file->f_flags & O_NONBLOCK) { ret = -EAGAIN; break; } if (signal_pending(current)) { ret = -ERESTARTSYS; break; } schedule(); } while (1); set_current_state(TASK_RUNNING); remove_wait_queue(&rtc->irq_queue, &wait); if (ret == 0) { if (sizeof(int) != sizeof(long) && count == sizeof(unsigned int)) ret = put_user(data, (unsigned int __user *)buf) ?: sizeof(unsigned int); else ret = put_user(data, (unsigned long __user *)buf) ?: sizeof(unsigned long); } return ret; } static __poll_t rtc_dev_poll(struct file *file, poll_table *wait) { struct rtc_device *rtc = file->private_data; unsigned long data; poll_wait(file, &rtc->irq_queue, wait); data = rtc->irq_data; return (data != 0) ? (EPOLLIN | EPOLLRDNORM) : 0; } static long rtc_dev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int err = 0; struct rtc_device *rtc = file->private_data; const struct rtc_class_ops *ops = rtc->ops; struct rtc_time tm; struct rtc_wkalrm alarm; struct rtc_param param; void __user *uarg = (void __user *)arg; err = mutex_lock_interruptible(&rtc->ops_lock); if (err) return err; /* check that the calling task has appropriate permissions * for certain ioctls. doing this check here is useful * to avoid duplicate code in each driver. */ switch (cmd) { case RTC_EPOCH_SET: case RTC_SET_TIME: case RTC_PARAM_SET: if (!capable(CAP_SYS_TIME)) err = -EACCES; break; case RTC_IRQP_SET: if (arg > rtc->max_user_freq && !capable(CAP_SYS_RESOURCE)) err = -EACCES; break; case RTC_PIE_ON: if (rtc->irq_freq > rtc->max_user_freq && !capable(CAP_SYS_RESOURCE)) err = -EACCES; break; } if (err) goto done; /* * Drivers *SHOULD NOT* provide ioctl implementations * for these requests. Instead, provide methods to * support the following code, so that the RTC's main * features are accessible without using ioctls. * * RTC and alarm times will be in UTC, by preference, * but dual-booting with MS-Windows implies RTCs must * use the local wall clock time. */ switch (cmd) { case RTC_ALM_READ: mutex_unlock(&rtc->ops_lock); err = rtc_read_alarm(rtc, &alarm); if (err < 0) return err; if (copy_to_user(uarg, &alarm.time, sizeof(tm))) err = -EFAULT; return err; case RTC_ALM_SET: mutex_unlock(&rtc->ops_lock); if (copy_from_user(&alarm.time, uarg, sizeof(tm))) return -EFAULT; alarm.enabled = 0; alarm.pending = 0; alarm.time.tm_wday = -1; alarm.time.tm_yday = -1; alarm.time.tm_isdst = -1; /* RTC_ALM_SET alarms may be up to 24 hours in the future. * Rather than expecting every RTC to implement "don't care" * for day/month/year fields, just force the alarm to have * the right values for those fields. * * RTC_WKALM_SET should be used instead. Not only does it * eliminate the need for a separate RTC_AIE_ON call, it * doesn't have the "alarm 23:59:59 in the future" race. * * NOTE: some legacy code may have used invalid fields as * wildcards, exposing hardware "periodic alarm" capabilities. * Not supported here. */ { time64_t now, then; err = rtc_read_time(rtc, &tm); if (err < 0) return err; now = rtc_tm_to_time64(&tm); alarm.time.tm_mday = tm.tm_mday; alarm.time.tm_mon = tm.tm_mon; alarm.time.tm_year = tm.tm_year; err = rtc_valid_tm(&alarm.time); if (err < 0) return err; then = rtc_tm_to_time64(&alarm.time); /* alarm may need to wrap into tomorrow */ if (then < now) { rtc_time64_to_tm(now + 24 * 60 * 60, &tm); alarm.time.tm_mday = tm.tm_mday; alarm.time.tm_mon = tm.tm_mon; alarm.time.tm_year = tm.tm_year; } } return rtc_set_alarm(rtc, &alarm); case RTC_RD_TIME: mutex_unlock(&rtc->ops_lock); err = rtc_read_time(rtc, &tm); if (err < 0) return err; if (copy_to_user(uarg, &tm, sizeof(tm))) err = -EFAULT; return err; case RTC_SET_TIME: mutex_unlock(&rtc->ops_lock); if (copy_from_user(&tm, uarg, sizeof(tm))) return -EFAULT; return rtc_set_time(rtc, &tm); case RTC_PIE_ON: err = rtc_irq_set_state(rtc, 1); break; case RTC_PIE_OFF: err = rtc_irq_set_state(rtc, 0); break; case RTC_AIE_ON: mutex_unlock(&rtc->ops_lock); return rtc_alarm_irq_enable(rtc, 1); case RTC_AIE_OFF: mutex_unlock(&rtc->ops_lock); return rtc_alarm_irq_enable(rtc, 0); case RTC_UIE_ON: mutex_unlock(&rtc->ops_lock); return rtc_update_irq_enable(rtc, 1); case RTC_UIE_OFF: mutex_unlock(&rtc->ops_lock); return rtc_update_irq_enable(rtc, 0); case RTC_IRQP_SET: err = rtc_irq_set_freq(rtc, arg); break; case RTC_IRQP_READ: err = put_user(rtc->irq_freq, (unsigned long __user *)uarg); break; case RTC_WKALM_SET: mutex_unlock(&rtc->ops_lock); if (copy_from_user(&alarm, uarg, sizeof(alarm))) return -EFAULT; return rtc_set_alarm(rtc, &alarm); case RTC_WKALM_RD: mutex_unlock(&rtc->ops_lock); err = rtc_read_alarm(rtc, &alarm); if (err < 0) return err; if (copy_to_user(uarg, &alarm, sizeof(alarm))) err = -EFAULT; return err; case RTC_PARAM_GET: if (copy_from_user(&param, uarg, sizeof(param))) { mutex_unlock(&rtc->ops_lock); return -EFAULT; } switch(param.param) { case RTC_PARAM_FEATURES: if (param.index != 0) err = -EINVAL; param.uvalue = rtc->features[0]; break; case RTC_PARAM_CORRECTION: { long offset; mutex_unlock(&rtc->ops_lock); if (param.index != 0) return -EINVAL; err = rtc_read_offset(rtc, &offset); mutex_lock(&rtc->ops_lock); if (err == 0) param.svalue = offset; break; } default: if (rtc->ops->param_get) err = rtc->ops->param_get(rtc->dev.parent, &param); else err = -EINVAL; } if (!err) if (copy_to_user(uarg, &param, sizeof(param))) err = -EFAULT; break; case RTC_PARAM_SET: if (copy_from_user(&param, uarg, sizeof(param))) { mutex_unlock(&rtc->ops_lock); return -EFAULT; } switch(param.param) { case RTC_PARAM_FEATURES: err = -EINVAL; break; case RTC_PARAM_CORRECTION: mutex_unlock(&rtc->ops_lock); if (param.index != 0) return -EINVAL; return rtc_set_offset(rtc, param.svalue); default: if (rtc->ops->param_set) err = rtc->ops->param_set(rtc->dev.parent, &param); else err = -EINVAL; } break; default: /* Finally try the driver's ioctl interface */ if (ops->ioctl) { err = ops->ioctl(rtc->dev.parent, cmd, arg); if (err == -ENOIOCTLCMD) err = -ENOTTY; } else { err = -ENOTTY; } break; } done: mutex_unlock(&rtc->ops_lock); return err; } #ifdef CONFIG_COMPAT #define RTC_IRQP_SET32 _IOW('p', 0x0c, __u32) #define RTC_IRQP_READ32 _IOR('p', 0x0b, __u32) #define RTC_EPOCH_SET32 _IOW('p', 0x0e, __u32) static long rtc_dev_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct rtc_device *rtc = file->private_data; void __user *uarg = compat_ptr(arg); switch (cmd) { case RTC_IRQP_READ32: return put_user(rtc->irq_freq, (__u32 __user *)uarg); case RTC_IRQP_SET32: /* arg is a plain integer, not pointer */ return rtc_dev_ioctl(file, RTC_IRQP_SET, arg); case RTC_EPOCH_SET32: /* arg is a plain integer, not pointer */ return rtc_dev_ioctl(file, RTC_EPOCH_SET, arg); } return rtc_dev_ioctl(file, cmd, (unsigned long)uarg); } #endif static int rtc_dev_fasync(int fd, struct file *file, int on) { struct rtc_device *rtc = file->private_data; return fasync_helper(fd, file, on, &rtc->async_queue); } static int rtc_dev_release(struct inode *inode, struct file *file) { struct rtc_device *rtc = file->private_data; /* We shut down the repeating IRQs that userspace enabled, * since nothing is listening to them. * - Update (UIE) ... currently only managed through ioctls * - Periodic (PIE) ... also used through rtc_*() interface calls * * Leave the alarm alone; it may be set to trigger a system wakeup * later, or be used by kernel code, and is a one-shot event anyway. */ /* Keep ioctl until all drivers are converted */ rtc_dev_ioctl(file, RTC_UIE_OFF, 0); rtc_update_irq_enable(rtc, 0); rtc_irq_set_state(rtc, 0); clear_bit_unlock(RTC_DEV_BUSY, &rtc->flags); return 0; } static const struct file_operations rtc_dev_fops = { .owner = THIS_MODULE, .read = rtc_dev_read, .poll = rtc_dev_poll, .unlocked_ioctl = rtc_dev_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = rtc_dev_compat_ioctl, #endif .open = rtc_dev_open, .release = rtc_dev_release, .fasync = rtc_dev_fasync, }; /* insertion/removal hooks */ void rtc_dev_prepare(struct rtc_device *rtc) { if (!rtc_devt) return; if (rtc->id >= RTC_DEV_MAX) { dev_dbg(&rtc->dev, "too many RTC devices\n"); return; } rtc->dev.devt = MKDEV(MAJOR(rtc_devt), rtc->id); #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL INIT_WORK(&rtc->uie_task, rtc_uie_task); timer_setup(&rtc->uie_timer, rtc_uie_timer, 0); #endif cdev_init(&rtc->char_dev, &rtc_dev_fops); rtc->char_dev.owner = rtc->owner; } void __init rtc_dev_init(void) { int err; err = alloc_chrdev_region(&rtc_devt, 0, RTC_DEV_MAX, "rtc"); if (err < 0) pr_err("failed to allocate char dev region\n"); }
130 16 16 16 114 114 163 1 157 62 4 73 883 11 875 428 428 427 429 16 424 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 // SPDX-License-Identifier: GPL-2.0-only /* * IPv6 library code, needed by static components when full IPv6 support is * not configured or static. These functions are needed by GSO/GRO implementation. */ #include <linux/export.h> #include <net/ip.h> #include <net/ipv6.h> #include <net/ip6_fib.h> #include <net/addrconf.h> #include <net/secure_seq.h> #include <linux/netfilter.h> static u32 __ipv6_select_ident(struct net *net, const struct in6_addr *dst, const struct in6_addr *src) { return get_random_u32_above(0); } /* This function exists only for tap drivers that must support broken * clients requesting UFO without specifying an IPv6 fragment ID. * * This is similar to ipv6_select_ident() but we use an independent hash * seed to limit information leakage. * * The network header must be set before calling this. */ __be32 ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb) { struct in6_addr buf[2]; struct in6_addr *addrs; u32 id; addrs = skb_header_pointer(skb, skb_network_offset(skb) + offsetof(struct ipv6hdr, saddr), sizeof(buf), buf); if (!addrs) return 0; id = __ipv6_select_ident(net, &addrs[1], &addrs[0]); return htonl(id); } EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident); __be32 ipv6_select_ident(struct net *net, const struct in6_addr *daddr, const struct in6_addr *saddr) { u32 id; id = __ipv6_select_ident(net, daddr, saddr); return htonl(id); } EXPORT_SYMBOL(ipv6_select_ident); int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) { unsigned int offset = sizeof(struct ipv6hdr); unsigned int packet_len = skb_tail_pointer(skb) - skb_network_header(skb); int found_rhdr = 0; *nexthdr = &ipv6_hdr(skb)->nexthdr; while (offset <= packet_len) { struct ipv6_opt_hdr *exthdr; switch (**nexthdr) { case NEXTHDR_HOP: break; case NEXTHDR_ROUTING: found_rhdr = 1; break; case NEXTHDR_DEST: #if IS_ENABLED(CONFIG_IPV6_MIP6) if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0) break; #endif if (found_rhdr) return offset; break; default: return offset; } if (offset + sizeof(struct ipv6_opt_hdr) > packet_len) return -EINVAL; exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) + offset); offset += ipv6_optlen(exthdr); if (offset > IPV6_MAXPLEN) return -EINVAL; *nexthdr = &exthdr->nexthdr; } return -EINVAL; } EXPORT_SYMBOL(ip6_find_1stfragopt); #if IS_ENABLED(CONFIG_IPV6) int ip6_dst_hoplimit(struct dst_entry *dst) { int hoplimit = dst_metric_raw(dst, RTAX_HOPLIMIT); if (hoplimit == 0) { struct net_device *dev = dst->dev; struct inet6_dev *idev; rcu_read_lock(); idev = __in6_dev_get(dev); if (idev) hoplimit = READ_ONCE(idev->cnf.hop_limit); else hoplimit = READ_ONCE(dev_net(dev)->ipv6.devconf_all->hop_limit); rcu_read_unlock(); } return hoplimit; } EXPORT_SYMBOL(ip6_dst_hoplimit); #endif int __ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb) { int len; len = skb->len - sizeof(struct ipv6hdr); if (len > IPV6_MAXPLEN) len = 0; ipv6_hdr(skb)->payload_len = htons(len); IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr); /* if egress device is enslaved to an L3 master device pass the * skb to its handler for processing */ skb = l3mdev_ip6_out(sk, skb); if (unlikely(!skb)) return 0; skb->protocol = htons(ETH_P_IPV6); return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, skb, NULL, skb_dst(skb)->dev, dst_output); } EXPORT_SYMBOL_GPL(__ip6_local_out); int ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb) { int err; err = __ip6_local_out(net, sk, skb); if (likely(err == 1)) err = dst_output(net, sk, skb); return err; } EXPORT_SYMBOL_GPL(ip6_local_out);
45 142 146 44 1 145 12 12 12 12 15 7 35 29 15 7 15 179 153 143 9 153 12 3 146 146 9 12 153 35 27 2 28 1 12 13 1 1 35 135 18 35 35 8 1 7 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2008-2014 Mathieu Desnoyers */ #include <linux/module.h> #include <linux/mutex.h> #include <linux/types.h> #include <linux/jhash.h> #include <linux/list.h> #include <linux/rcupdate.h> #include <linux/tracepoint.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/sched/signal.h> #include <linux/sched/task.h> #include <linux/static_key.h> enum tp_func_state { TP_FUNC_0, TP_FUNC_1, TP_FUNC_2, TP_FUNC_N, }; extern tracepoint_ptr_t __start___tracepoints_ptrs[]; extern tracepoint_ptr_t __stop___tracepoints_ptrs[]; enum tp_transition_sync { TP_TRANSITION_SYNC_1_0_1, TP_TRANSITION_SYNC_N_2_1, _NR_TP_TRANSITION_SYNC, }; struct tp_transition_snapshot { unsigned long rcu; bool ongoing; }; /* Protected by tracepoints_mutex */ static struct tp_transition_snapshot tp_transition_snapshot[_NR_TP_TRANSITION_SYNC]; static void tp_rcu_get_state(enum tp_transition_sync sync) { struct tp_transition_snapshot *snapshot = &tp_transition_snapshot[sync]; /* Keep the latest get_state snapshot. */ snapshot->rcu = get_state_synchronize_rcu(); snapshot->ongoing = true; } static void tp_rcu_cond_sync(enum tp_transition_sync sync) { struct tp_transition_snapshot *snapshot = &tp_transition_snapshot[sync]; if (!snapshot->ongoing) return; cond_synchronize_rcu(snapshot->rcu); snapshot->ongoing = false; } /* Set to 1 to enable tracepoint debug output */ static const int tracepoint_debug; #ifdef CONFIG_MODULES /* * Tracepoint module list mutex protects the local module list. */ static DEFINE_MUTEX(tracepoint_module_list_mutex); /* Local list of struct tp_module */ static LIST_HEAD(tracepoint_module_list); #endif /* CONFIG_MODULES */ /* * tracepoints_mutex protects the builtin and module tracepoints. * tracepoints_mutex nests inside tracepoint_module_list_mutex. */ static DEFINE_MUTEX(tracepoints_mutex); /* * Note about RCU : * It is used to delay the free of multiple probes array until a quiescent * state is reached. */ struct tp_probes { struct rcu_head rcu; struct tracepoint_func probes[]; }; /* Called in removal of a func but failed to allocate a new tp_funcs */ static void tp_stub_func(void) { return; } static inline void *allocate_probes(int count) { struct tp_probes *p = kmalloc(struct_size(p, probes, count), GFP_KERNEL); return p == NULL ? NULL : p->probes; } static void rcu_free_old_probes(struct rcu_head *head) { kfree(container_of(head, struct tp_probes, rcu)); } static inline void release_probes(struct tracepoint *tp, struct tracepoint_func *old) { if (old) { struct tp_probes *tp_probes = container_of(old, struct tp_probes, probes[0]); if (tracepoint_is_faultable(tp)) call_rcu_tasks_trace(&tp_probes->rcu, rcu_free_old_probes); else call_rcu(&tp_probes->rcu, rcu_free_old_probes); } } static void debug_print_probes(struct tracepoint_func *funcs) { int i; if (!tracepoint_debug || !funcs) return; for (i = 0; funcs[i].func; i++) printk(KERN_DEBUG "Probe %d : %p\n", i, funcs[i].func); } static struct tracepoint_func * func_add(struct tracepoint_func **funcs, struct tracepoint_func *tp_func, int prio) { struct tracepoint_func *old, *new; int iter_probes; /* Iterate over old probe array. */ int nr_probes = 0; /* Counter for probes */ int pos = -1; /* Insertion position into new array */ if (WARN_ON(!tp_func->func)) return ERR_PTR(-EINVAL); debug_print_probes(*funcs); old = *funcs; if (old) { /* (N -> N+1), (N != 0, 1) probes */ for (iter_probes = 0; old[iter_probes].func; iter_probes++) { if (old[iter_probes].func == tp_stub_func) continue; /* Skip stub functions. */ if (old[iter_probes].func == tp_func->func && old[iter_probes].data == tp_func->data) return ERR_PTR(-EEXIST); nr_probes++; } } /* + 2 : one for new probe, one for NULL func */ new = allocate_probes(nr_probes + 2); if (new == NULL) return ERR_PTR(-ENOMEM); if (old) { nr_probes = 0; for (iter_probes = 0; old[iter_probes].func; iter_probes++) { if (old[iter_probes].func == tp_stub_func) continue; /* Insert before probes of lower priority */ if (pos < 0 && old[iter_probes].prio < prio) pos = nr_probes++; new[nr_probes++] = old[iter_probes]; } if (pos < 0) pos = nr_probes++; /* nr_probes now points to the end of the new array */ } else { pos = 0; nr_probes = 1; /* must point at end of array */ } new[pos] = *tp_func; new[nr_probes].func = NULL; *funcs = new; debug_print_probes(*funcs); return old; } static void *func_remove(struct tracepoint_func **funcs, struct tracepoint_func *tp_func) { int nr_probes = 0, nr_del = 0, i; struct tracepoint_func *old, *new; old = *funcs; if (!old) return ERR_PTR(-ENOENT); debug_print_probes(*funcs); /* (N -> M), (N > 1, M >= 0) probes */ if (tp_func->func) { for (nr_probes = 0; old[nr_probes].func; nr_probes++) { if ((old[nr_probes].func == tp_func->func && old[nr_probes].data == tp_func->data) || old[nr_probes].func == tp_stub_func) nr_del++; } } /* * If probe is NULL, then nr_probes = nr_del = 0, and then the * entire entry will be removed. */ if (nr_probes - nr_del == 0) { /* N -> 0, (N > 1) */ *funcs = NULL; debug_print_probes(*funcs); return old; } else { int j = 0; /* N -> M, (N > 1, M > 0) */ /* + 1 for NULL */ new = allocate_probes(nr_probes - nr_del + 1); if (new) { for (i = 0; old[i].func; i++) { if ((old[i].func != tp_func->func || old[i].data != tp_func->data) && old[i].func != tp_stub_func) new[j++] = old[i]; } new[nr_probes - nr_del].func = NULL; *funcs = new; } else { /* * Failed to allocate, replace the old function * with calls to tp_stub_func. */ for (i = 0; old[i].func; i++) { if (old[i].func == tp_func->func && old[i].data == tp_func->data) WRITE_ONCE(old[i].func, tp_stub_func); } *funcs = old; } } debug_print_probes(*funcs); return old; } /* * Count the number of functions (enum tp_func_state) in a tp_funcs array. */ static enum tp_func_state nr_func_state(const struct tracepoint_func *tp_funcs) { if (!tp_funcs) return TP_FUNC_0; if (!tp_funcs[1].func) return TP_FUNC_1; if (!tp_funcs[2].func) return TP_FUNC_2; return TP_FUNC_N; /* 3 or more */ } static void tracepoint_update_call(struct tracepoint *tp, struct tracepoint_func *tp_funcs) { void *func = tp->iterator; /* Synthetic events do not have static call sites */ if (!tp->static_call_key) return; if (nr_func_state(tp_funcs) == TP_FUNC_1) func = tp_funcs[0].func; __static_call_update(tp->static_call_key, tp->static_call_tramp, func); } /* * Add the probe function to a tracepoint. */ static int tracepoint_add_func(struct tracepoint *tp, struct tracepoint_func *func, int prio, bool warn) { struct tracepoint_func *old, *tp_funcs; int ret; if (tp->ext && tp->ext->regfunc && !static_key_enabled(&tp->key)) { ret = tp->ext->regfunc(); if (ret < 0) return ret; } tp_funcs = rcu_dereference_protected(tp->funcs, lockdep_is_held(&tracepoints_mutex)); old = func_add(&tp_funcs, func, prio); if (IS_ERR(old)) { WARN_ON_ONCE(warn && PTR_ERR(old) != -ENOMEM); return PTR_ERR(old); } /* * rcu_assign_pointer has as smp_store_release() which makes sure * that the new probe callbacks array is consistent before setting * a pointer to it. This array is referenced by __DO_TRACE from * include/linux/tracepoint.h using rcu_dereference_sched(). */ switch (nr_func_state(tp_funcs)) { case TP_FUNC_1: /* 0->1 */ /* * Make sure new static func never uses old data after a * 1->0->1 transition sequence. */ tp_rcu_cond_sync(TP_TRANSITION_SYNC_1_0_1); /* Set static call to first function */ tracepoint_update_call(tp, tp_funcs); /* Both iterator and static call handle NULL tp->funcs */ rcu_assign_pointer(tp->funcs, tp_funcs); static_branch_enable(&tp->key); break; case TP_FUNC_2: /* 1->2 */ /* Set iterator static call */ tracepoint_update_call(tp, tp_funcs); /* * Iterator callback installed before updating tp->funcs. * Requires ordering between RCU assign/dereference and * static call update/call. */ fallthrough; case TP_FUNC_N: /* N->N+1 (N>1) */ rcu_assign_pointer(tp->funcs, tp_funcs); /* * Make sure static func never uses incorrect data after a * N->...->2->1 (N>1) transition sequence. */ if (tp_funcs[0].data != old[0].data) tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1); break; default: WARN_ON_ONCE(1); break; } release_probes(tp, old); return 0; } /* * Remove a probe function from a tracepoint. * Note: only waiting an RCU period after setting elem->call to the empty * function insures that the original callback is not used anymore. This insured * by preempt_disable around the call site. */ static int tracepoint_remove_func(struct tracepoint *tp, struct tracepoint_func *func) { struct tracepoint_func *old, *tp_funcs; tp_funcs = rcu_dereference_protected(tp->funcs, lockdep_is_held(&tracepoints_mutex)); old = func_remove(&tp_funcs, func); if (WARN_ON_ONCE(IS_ERR(old))) return PTR_ERR(old); if (tp_funcs == old) /* Failed allocating new tp_funcs, replaced func with stub */ return 0; switch (nr_func_state(tp_funcs)) { case TP_FUNC_0: /* 1->0 */ /* Removed last function */ if (tp->ext && tp->ext->unregfunc && static_key_enabled(&tp->key)) tp->ext->unregfunc(); static_branch_disable(&tp->key); /* Set iterator static call */ tracepoint_update_call(tp, tp_funcs); /* Both iterator and static call handle NULL tp->funcs */ rcu_assign_pointer(tp->funcs, NULL); /* * Make sure new static func never uses old data after a * 1->0->1 transition sequence. */ tp_rcu_get_state(TP_TRANSITION_SYNC_1_0_1); break; case TP_FUNC_1: /* 2->1 */ rcu_assign_pointer(tp->funcs, tp_funcs); /* * Make sure static func never uses incorrect data after a * N->...->2->1 (N>2) transition sequence. If the first * element's data has changed, then force the synchronization * to prevent current readers that have loaded the old data * from calling the new function. */ if (tp_funcs[0].data != old[0].data) tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1); tp_rcu_cond_sync(TP_TRANSITION_SYNC_N_2_1); /* Set static call to first function */ tracepoint_update_call(tp, tp_funcs); break; case TP_FUNC_2: /* N->N-1 (N>2) */ fallthrough; case TP_FUNC_N: rcu_assign_pointer(tp->funcs, tp_funcs); /* * Make sure static func never uses incorrect data after a * N->...->2->1 (N>2) transition sequence. */ if (tp_funcs[0].data != old[0].data) tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1); break; default: WARN_ON_ONCE(1); break; } release_probes(tp, old); return 0; } /** * tracepoint_probe_register_prio_may_exist - Connect a probe to a tracepoint with priority * @tp: tracepoint * @probe: probe handler * @data: tracepoint data * @prio: priority of this function over other registered functions * * Same as tracepoint_probe_register_prio() except that it will not warn * if the tracepoint is already registered. */ int tracepoint_probe_register_prio_may_exist(struct tracepoint *tp, void *probe, void *data, int prio) { struct tracepoint_func tp_func; int ret; mutex_lock(&tracepoints_mutex); tp_func.func = probe; tp_func.data = data; tp_func.prio = prio; ret = tracepoint_add_func(tp, &tp_func, prio, false); mutex_unlock(&tracepoints_mutex); return ret; } EXPORT_SYMBOL_GPL(tracepoint_probe_register_prio_may_exist); /** * tracepoint_probe_register_prio - Connect a probe to a tracepoint with priority * @tp: tracepoint * @probe: probe handler * @data: tracepoint data * @prio: priority of this function over other registered functions * * Returns 0 if ok, error value on error. * Note: if @tp is within a module, the caller is responsible for * unregistering the probe before the module is gone. This can be * performed either with a tracepoint module going notifier, or from * within module exit functions. */ int tracepoint_probe_register_prio(struct tracepoint *tp, void *probe, void *data, int prio) { struct tracepoint_func tp_func; int ret; mutex_lock(&tracepoints_mutex); tp_func.func = probe; tp_func.data = data; tp_func.prio = prio; ret = tracepoint_add_func(tp, &tp_func, prio, true); mutex_unlock(&tracepoints_mutex); return ret; } EXPORT_SYMBOL_GPL(tracepoint_probe_register_prio); /** * tracepoint_probe_register - Connect a probe to a tracepoint * @tp: tracepoint * @probe: probe handler * @data: tracepoint data * * Returns 0 if ok, error value on error. * Note: if @tp is within a module, the caller is responsible for * unregistering the probe before the module is gone. This can be * performed either with a tracepoint module going notifier, or from * within module exit functions. */ int tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data) { return tracepoint_probe_register_prio(tp, probe, data, TRACEPOINT_DEFAULT_PRIO); } EXPORT_SYMBOL_GPL(tracepoint_probe_register); /** * tracepoint_probe_unregister - Disconnect a probe from a tracepoint * @tp: tracepoint * @probe: probe function pointer * @data: tracepoint data * * Returns 0 if ok, error value on error. */ int tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data) { struct tracepoint_func tp_func; int ret; mutex_lock(&tracepoints_mutex); tp_func.func = probe; tp_func.data = data; ret = tracepoint_remove_func(tp, &tp_func); mutex_unlock(&tracepoints_mutex); return ret; } EXPORT_SYMBOL_GPL(tracepoint_probe_unregister); static void for_each_tracepoint_range( tracepoint_ptr_t *begin, tracepoint_ptr_t *end, void (*fct)(struct tracepoint *tp, void *priv), void *priv) { tracepoint_ptr_t *iter; if (!begin) return; for (iter = begin; iter < end; iter++) fct(tracepoint_ptr_deref(iter), priv); } #ifdef CONFIG_MODULES bool trace_module_has_bad_taint(struct module *mod) { return mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP) | (1 << TAINT_UNSIGNED_MODULE) | (1 << TAINT_TEST) | (1 << TAINT_LIVEPATCH)); } static BLOCKING_NOTIFIER_HEAD(tracepoint_notify_list); /** * register_tracepoint_module_notifier - register tracepoint coming/going notifier * @nb: notifier block * * Notifiers registered with this function are called on module * coming/going with the tracepoint_module_list_mutex held. * The notifier block callback should expect a "struct tp_module" data * pointer. */ int register_tracepoint_module_notifier(struct notifier_block *nb) { struct tp_module *tp_mod; int ret; mutex_lock(&tracepoint_module_list_mutex); ret = blocking_notifier_chain_register(&tracepoint_notify_list, nb); if (ret) goto end; list_for_each_entry(tp_mod, &tracepoint_module_list, list) (void) nb->notifier_call(nb, MODULE_STATE_COMING, tp_mod); end: mutex_unlock(&tracepoint_module_list_mutex); return ret; } EXPORT_SYMBOL_GPL(register_tracepoint_module_notifier); /** * unregister_tracepoint_module_notifier - unregister tracepoint coming/going notifier * @nb: notifier block * * The notifier block callback should expect a "struct tp_module" data * pointer. */ int unregister_tracepoint_module_notifier(struct notifier_block *nb) { struct tp_module *tp_mod; int ret; mutex_lock(&tracepoint_module_list_mutex); ret = blocking_notifier_chain_unregister(&tracepoint_notify_list, nb); if (ret) goto end; list_for_each_entry(tp_mod, &tracepoint_module_list, list) (void) nb->notifier_call(nb, MODULE_STATE_GOING, tp_mod); end: mutex_unlock(&tracepoint_module_list_mutex); return ret; } EXPORT_SYMBOL_GPL(unregister_tracepoint_module_notifier); /* * Ensure the tracer unregistered the module's probes before the module * teardown is performed. Prevents leaks of probe and data pointers. */ static void tp_module_going_check_quiescent(struct tracepoint *tp, void *priv) { WARN_ON_ONCE(tp->funcs); } static int tracepoint_module_coming(struct module *mod) { struct tp_module *tp_mod; if (!mod->num_tracepoints) return 0; /* * We skip modules that taint the kernel, especially those with different * module headers (for forced load), to make sure we don't cause a crash. * Staging, out-of-tree, unsigned GPL, and test modules are fine. */ if (trace_module_has_bad_taint(mod)) return 0; tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL); if (!tp_mod) return -ENOMEM; tp_mod->mod = mod; mutex_lock(&tracepoint_module_list_mutex); list_add_tail(&tp_mod->list, &tracepoint_module_list); blocking_notifier_call_chain(&tracepoint_notify_list, MODULE_STATE_COMING, tp_mod); mutex_unlock(&tracepoint_module_list_mutex); return 0; } static void tracepoint_module_going(struct module *mod) { struct tp_module *tp_mod; if (!mod->num_tracepoints) return; mutex_lock(&tracepoint_module_list_mutex); list_for_each_entry(tp_mod, &tracepoint_module_list, list) { if (tp_mod->mod == mod) { blocking_notifier_call_chain(&tracepoint_notify_list, MODULE_STATE_GOING, tp_mod); list_del(&tp_mod->list); kfree(tp_mod); /* * Called the going notifier before checking for * quiescence. */ for_each_tracepoint_range(mod->tracepoints_ptrs, mod->tracepoints_ptrs + mod->num_tracepoints, tp_module_going_check_quiescent, NULL); break; } } /* * In the case of modules that were tainted at "coming", we'll simply * walk through the list without finding it. We cannot use the "tainted" * flag on "going", in case a module taints the kernel only after being * loaded. */ mutex_unlock(&tracepoint_module_list_mutex); } static int tracepoint_module_notify(struct notifier_block *self, unsigned long val, void *data) { struct module *mod = data; int ret = 0; switch (val) { case MODULE_STATE_COMING: ret = tracepoint_module_coming(mod); break; case MODULE_STATE_LIVE: break; case MODULE_STATE_GOING: tracepoint_module_going(mod); break; case MODULE_STATE_UNFORMED: break; } return notifier_from_errno(ret); } static struct notifier_block tracepoint_module_nb = { .notifier_call = tracepoint_module_notify, .priority = 0, }; static __init int init_tracepoints(void) { int ret; ret = register_module_notifier(&tracepoint_module_nb); if (ret) pr_warn("Failed to register tracepoint module enter notifier\n"); return ret; } __initcall(init_tracepoints); /** * for_each_tracepoint_in_module - iteration on all tracepoints in a module * @mod: module * @fct: callback * @priv: private data */ void for_each_tracepoint_in_module(struct module *mod, void (*fct)(struct tracepoint *tp, struct module *mod, void *priv), void *priv) { tracepoint_ptr_t *begin, *end, *iter; lockdep_assert_held(&tracepoint_module_list_mutex); if (!mod) return; begin = mod->tracepoints_ptrs; end = mod->tracepoints_ptrs + mod->num_tracepoints; for (iter = begin; iter < end; iter++) fct(tracepoint_ptr_deref(iter), mod, priv); } /** * for_each_module_tracepoint - iteration on all tracepoints in all modules * @fct: callback * @priv: private data */ void for_each_module_tracepoint(void (*fct)(struct tracepoint *tp, struct module *mod, void *priv), void *priv) { struct tp_module *tp_mod; mutex_lock(&tracepoint_module_list_mutex); list_for_each_entry(tp_mod, &tracepoint_module_list, list) for_each_tracepoint_in_module(tp_mod->mod, fct, priv); mutex_unlock(&tracepoint_module_list_mutex); } #endif /* CONFIG_MODULES */ /** * for_each_kernel_tracepoint - iteration on all kernel tracepoints * @fct: callback * @priv: private data */ void for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv), void *priv) { for_each_tracepoint_range(__start___tracepoints_ptrs, __stop___tracepoints_ptrs, fct, priv); } EXPORT_SYMBOL_GPL(for_each_kernel_tracepoint); #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS /* NB: reg/unreg are called while guarded with the tracepoints_mutex */ static int sys_tracepoint_refcount; int syscall_regfunc(void) { struct task_struct *p, *t; if (!sys_tracepoint_refcount) { read_lock(&tasklist_lock); for_each_process_thread(p, t) { set_task_syscall_work(t, SYSCALL_TRACEPOINT); } read_unlock(&tasklist_lock); } sys_tracepoint_refcount++; return 0; } void syscall_unregfunc(void) { struct task_struct *p, *t; sys_tracepoint_refcount--; if (!sys_tracepoint_refcount) { read_lock(&tasklist_lock); for_each_process_thread(p, t) { clear_task_syscall_work(t, SYSCALL_TRACEPOINT); } read_unlock(&tasklist_lock); } } #endif
1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 // SPDX-License-Identifier: GPL-2.0 /* * NVMe over Fabrics RDMA target. * Copyright (c) 2015-2016 HGST, a Western Digital Company. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/atomic.h> #include <linux/blk-integrity.h> #include <linux/ctype.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/init.h> #include <linux/module.h> #include <linux/nvme.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/wait.h> #include <linux/inet.h> #include <linux/unaligned.h> #include <rdma/ib_verbs.h> #include <rdma/rdma_cm.h> #include <rdma/rw.h> #include <rdma/ib_cm.h> #include <linux/nvme-rdma.h> #include "nvmet.h" /* * We allow at least 1 page, up to 4 SGEs, and up to 16KB of inline data */ #define NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE PAGE_SIZE #define NVMET_RDMA_MAX_INLINE_SGE 4 #define NVMET_RDMA_MAX_INLINE_DATA_SIZE max_t(int, SZ_16K, PAGE_SIZE) /* Assume mpsmin == device_page_size == 4KB */ #define NVMET_RDMA_MAX_MDTS 8 #define NVMET_RDMA_MAX_METADATA_MDTS 5 #define NVMET_RDMA_BACKLOG 128 #define NVMET_RDMA_DISCRETE_RSP_TAG -1 struct nvmet_rdma_srq; struct nvmet_rdma_cmd { struct ib_sge sge[NVMET_RDMA_MAX_INLINE_SGE + 1]; struct ib_cqe cqe; struct ib_recv_wr wr; struct scatterlist inline_sg[NVMET_RDMA_MAX_INLINE_SGE]; struct nvme_command *nvme_cmd; struct nvmet_rdma_queue *queue; struct nvmet_rdma_srq *nsrq; }; enum { NVMET_RDMA_REQ_INLINE_DATA = (1 << 0), }; struct nvmet_rdma_rsp { struct ib_sge send_sge; struct ib_cqe send_cqe; struct ib_send_wr send_wr; struct nvmet_rdma_cmd *cmd; struct nvmet_rdma_queue *queue; struct ib_cqe read_cqe; struct ib_cqe write_cqe; struct rdma_rw_ctx rw; struct nvmet_req req; bool allocated; u8 n_rdma; u32 flags; u32 invalidate_rkey; struct list_head wait_list; int tag; }; enum nvmet_rdma_queue_state { NVMET_RDMA_Q_CONNECTING, NVMET_RDMA_Q_LIVE, NVMET_RDMA_Q_DISCONNECTING, }; struct nvmet_rdma_queue { struct rdma_cm_id *cm_id; struct ib_qp *qp; struct nvmet_port *port; struct ib_cq *cq; atomic_t sq_wr_avail; struct nvmet_rdma_device *dev; struct nvmet_rdma_srq *nsrq; spinlock_t state_lock; enum nvmet_rdma_queue_state state; struct nvmet_cq nvme_cq; struct nvmet_sq nvme_sq; struct nvmet_rdma_rsp *rsps; struct sbitmap rsp_tags; struct nvmet_rdma_cmd *cmds; struct work_struct release_work; struct list_head rsp_wait_list; struct list_head rsp_wr_wait_list; spinlock_t rsp_wr_wait_lock; int idx; int host_qid; int comp_vector; int recv_queue_size; int send_queue_size; struct list_head queue_list; }; struct nvmet_rdma_port { struct nvmet_port *nport; struct sockaddr_storage addr; struct rdma_cm_id *cm_id; struct delayed_work repair_work; }; struct nvmet_rdma_srq { struct ib_srq *srq; struct nvmet_rdma_cmd *cmds; struct nvmet_rdma_device *ndev; }; struct nvmet_rdma_device { struct ib_device *device; struct ib_pd *pd; struct nvmet_rdma_srq **srqs; int srq_count; size_t srq_size; struct kref ref; struct list_head entry; int inline_data_size; int inline_page_count; }; static bool nvmet_rdma_use_srq; module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444); MODULE_PARM_DESC(use_srq, "Use shared receive queue."); static int srq_size_set(const char *val, const struct kernel_param *kp); static const struct kernel_param_ops srq_size_ops = { .set = srq_size_set, .get = param_get_int, }; static int nvmet_rdma_srq_size = 1024; module_param_cb(srq_size, &srq_size_ops, &nvmet_rdma_srq_size, 0644); MODULE_PARM_DESC(srq_size, "set Shared Receive Queue (SRQ) size, should >= 256 (default: 1024)"); static DEFINE_IDA(nvmet_rdma_queue_ida); static LIST_HEAD(nvmet_rdma_queue_list); static DEFINE_MUTEX(nvmet_rdma_queue_mutex); static LIST_HEAD(device_list); static DEFINE_MUTEX(device_list_mutex); static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp); static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc); static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc); static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc); static void nvmet_rdma_write_data_done(struct ib_cq *cq, struct ib_wc *wc); static void nvmet_rdma_qp_event(struct ib_event *event, void *priv); static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue); static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev, struct nvmet_rdma_rsp *r); static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev, struct nvmet_rdma_rsp *r, int tag); static const struct nvmet_fabrics_ops nvmet_rdma_ops; static int srq_size_set(const char *val, const struct kernel_param *kp) { int n = 0, ret; ret = kstrtoint(val, 10, &n); if (ret != 0 || n < 256) return -EINVAL; return param_set_int(val, kp); } static int num_pages(int len) { return 1 + (((len - 1) & PAGE_MASK) >> PAGE_SHIFT); } static inline bool nvmet_rdma_need_data_in(struct nvmet_rdma_rsp *rsp) { return nvme_is_write(rsp->req.cmd) && rsp->req.transfer_len && !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA); } static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp) { return !nvme_is_write(rsp->req.cmd) && rsp->req.transfer_len && !rsp->req.cqe->status && !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA); } static inline struct nvmet_rdma_rsp * nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue) { struct nvmet_rdma_rsp *rsp = NULL; int tag; tag = sbitmap_get(&queue->rsp_tags); if (tag >= 0) rsp = &queue->rsps[tag]; if (unlikely(!rsp)) { int ret; rsp = kzalloc(sizeof(*rsp), GFP_KERNEL); if (unlikely(!rsp)) return NULL; ret = nvmet_rdma_alloc_rsp(queue->dev, rsp, NVMET_RDMA_DISCRETE_RSP_TAG); if (unlikely(ret)) { kfree(rsp); return NULL; } } return rsp; } static inline void nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp) { if (unlikely(rsp->tag == NVMET_RDMA_DISCRETE_RSP_TAG)) { nvmet_rdma_free_rsp(rsp->queue->dev, rsp); kfree(rsp); return; } sbitmap_clear_bit(&rsp->queue->rsp_tags, rsp->tag); } static void nvmet_rdma_free_inline_pages(struct nvmet_rdma_device *ndev, struct nvmet_rdma_cmd *c) { struct scatterlist *sg; struct ib_sge *sge; int i; if (!ndev->inline_data_size) return; sg = c->inline_sg; sge = &c->sge[1]; for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) { if (sge->length) ib_dma_unmap_page(ndev->device, sge->addr, sge->length, DMA_FROM_DEVICE); if (sg_page(sg)) __free_page(sg_page(sg)); } } static int nvmet_rdma_alloc_inline_pages(struct nvmet_rdma_device *ndev, struct nvmet_rdma_cmd *c) { struct scatterlist *sg; struct ib_sge *sge; struct page *pg; int len; int i; if (!ndev->inline_data_size) return 0; sg = c->inline_sg; sg_init_table(sg, ndev->inline_page_count); sge = &c->sge[1]; len = ndev->inline_data_size; for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) { pg = alloc_page(GFP_KERNEL); if (!pg) goto out_err; sg_assign_page(sg, pg); sge->addr = ib_dma_map_page(ndev->device, pg, 0, PAGE_SIZE, DMA_FROM_DEVICE); if (ib_dma_mapping_error(ndev->device, sge->addr)) goto out_err; sge->length = min_t(int, len, PAGE_SIZE); sge->lkey = ndev->pd->local_dma_lkey; len -= sge->length; } return 0; out_err: for (; i >= 0; i--, sg--, sge--) { if (sge->length) ib_dma_unmap_page(ndev->device, sge->addr, sge->length, DMA_FROM_DEVICE); if (sg_page(sg)) __free_page(sg_page(sg)); } return -ENOMEM; } static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev, struct nvmet_rdma_cmd *c, bool admin) { /* NVMe command / RDMA RECV */ c->nvme_cmd = kmalloc(sizeof(*c->nvme_cmd), GFP_KERNEL); if (!c->nvme_cmd) goto out; c->sge[0].addr = ib_dma_map_single(ndev->device, c->nvme_cmd, sizeof(*c->nvme_cmd), DMA_FROM_DEVICE); if (ib_dma_mapping_error(ndev->device, c->sge[0].addr)) goto out_free_cmd; c->sge[0].length = sizeof(*c->nvme_cmd); c->sge[0].lkey = ndev->pd->local_dma_lkey; if (!admin && nvmet_rdma_alloc_inline_pages(ndev, c)) goto out_unmap_cmd; c->cqe.done = nvmet_rdma_recv_done; c->wr.wr_cqe = &c->cqe; c->wr.sg_list = c->sge; c->wr.num_sge = admin ? 1 : ndev->inline_page_count + 1; return 0; out_unmap_cmd: ib_dma_unmap_single(ndev->device, c->sge[0].addr, sizeof(*c->nvme_cmd), DMA_FROM_DEVICE); out_free_cmd: kfree(c->nvme_cmd); out: return -ENOMEM; } static void nvmet_rdma_free_cmd(struct nvmet_rdma_device *ndev, struct nvmet_rdma_cmd *c, bool admin) { if (!admin) nvmet_rdma_free_inline_pages(ndev, c); ib_dma_unmap_single(ndev->device, c->sge[0].addr, sizeof(*c->nvme_cmd), DMA_FROM_DEVICE); kfree(c->nvme_cmd); } static struct nvmet_rdma_cmd * nvmet_rdma_alloc_cmds(struct nvmet_rdma_device *ndev, int nr_cmds, bool admin) { struct nvmet_rdma_cmd *cmds; int ret = -EINVAL, i; cmds = kcalloc(nr_cmds, sizeof(struct nvmet_rdma_cmd), GFP_KERNEL); if (!cmds) goto out; for (i = 0; i < nr_cmds; i++) { ret = nvmet_rdma_alloc_cmd(ndev, cmds + i, admin); if (ret) goto out_free; } return cmds; out_free: while (--i >= 0) nvmet_rdma_free_cmd(ndev, cmds + i, admin); kfree(cmds); out: return ERR_PTR(ret); } static void nvmet_rdma_free_cmds(struct nvmet_rdma_device *ndev, struct nvmet_rdma_cmd *cmds, int nr_cmds, bool admin) { int i; for (i = 0; i < nr_cmds; i++) nvmet_rdma_free_cmd(ndev, cmds + i, admin); kfree(cmds); } static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev, struct nvmet_rdma_rsp *r, int tag) { /* NVMe CQE / RDMA SEND */ r->req.cqe = kmalloc(sizeof(*r->req.cqe), GFP_KERNEL); if (!r->req.cqe) goto out; r->send_sge.addr = ib_dma_map_single(ndev->device, r->req.cqe, sizeof(*r->req.cqe), DMA_TO_DEVICE); if (ib_dma_mapping_error(ndev->device, r->send_sge.addr)) goto out_free_rsp; if (ib_dma_pci_p2p_dma_supported(ndev->device)) r->req.p2p_client = &ndev->device->dev; r->send_sge.length = sizeof(*r->req.cqe); r->send_sge.lkey = ndev->pd->local_dma_lkey; r->send_cqe.done = nvmet_rdma_send_done; r->send_wr.wr_cqe = &r->send_cqe; r->send_wr.sg_list = &r->send_sge; r->send_wr.num_sge = 1; r->send_wr.send_flags = IB_SEND_SIGNALED; /* Data In / RDMA READ */ r->read_cqe.done = nvmet_rdma_read_data_done; /* Data Out / RDMA WRITE */ r->write_cqe.done = nvmet_rdma_write_data_done; r->tag = tag; return 0; out_free_rsp: kfree(r->req.cqe); out: return -ENOMEM; } static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev, struct nvmet_rdma_rsp *r) { ib_dma_unmap_single(ndev->device, r->send_sge.addr, sizeof(*r->req.cqe), DMA_TO_DEVICE); kfree(r->req.cqe); } static int nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue) { struct nvmet_rdma_device *ndev = queue->dev; int nr_rsps = queue->recv_queue_size * 2; int ret = -ENOMEM, i; if (sbitmap_init_node(&queue->rsp_tags, nr_rsps, -1, GFP_KERNEL, NUMA_NO_NODE, false, true)) goto out; queue->rsps = kcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp), GFP_KERNEL); if (!queue->rsps) goto out_free_sbitmap; for (i = 0; i < nr_rsps; i++) { struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; ret = nvmet_rdma_alloc_rsp(ndev, rsp, i); if (ret) goto out_free; } return 0; out_free: while (--i >= 0) nvmet_rdma_free_rsp(ndev, &queue->rsps[i]); kfree(queue->rsps); out_free_sbitmap: sbitmap_free(&queue->rsp_tags); out: return ret; } static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue) { struct nvmet_rdma_device *ndev = queue->dev; int i, nr_rsps = queue->recv_queue_size * 2; for (i = 0; i < nr_rsps; i++) nvmet_rdma_free_rsp(ndev, &queue->rsps[i]); kfree(queue->rsps); sbitmap_free(&queue->rsp_tags); } static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev, struct nvmet_rdma_cmd *cmd) { int ret; ib_dma_sync_single_for_device(ndev->device, cmd->sge[0].addr, cmd->sge[0].length, DMA_FROM_DEVICE); if (cmd->nsrq) ret = ib_post_srq_recv(cmd->nsrq->srq, &cmd->wr, NULL); else ret = ib_post_recv(cmd->queue->qp, &cmd->wr, NULL); if (unlikely(ret)) pr_err("post_recv cmd failed\n"); return ret; } static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue) { spin_lock(&queue->rsp_wr_wait_lock); while (!list_empty(&queue->rsp_wr_wait_list)) { struct nvmet_rdma_rsp *rsp; bool ret; rsp = list_entry(queue->rsp_wr_wait_list.next, struct nvmet_rdma_rsp, wait_list); list_del(&rsp->wait_list); spin_unlock(&queue->rsp_wr_wait_lock); ret = nvmet_rdma_execute_command(rsp); spin_lock(&queue->rsp_wr_wait_lock); if (!ret) { list_add(&rsp->wait_list, &queue->rsp_wr_wait_list); break; } } spin_unlock(&queue->rsp_wr_wait_lock); } static u16 nvmet_rdma_check_pi_status(struct ib_mr *sig_mr) { struct ib_mr_status mr_status; int ret; u16 status = 0; ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status); if (ret) { pr_err("ib_check_mr_status failed, ret %d\n", ret); return NVME_SC_INVALID_PI; } if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) { switch (mr_status.sig_err.err_type) { case IB_SIG_BAD_GUARD: status = NVME_SC_GUARD_CHECK; break; case IB_SIG_BAD_REFTAG: status = NVME_SC_REFTAG_CHECK; break; case IB_SIG_BAD_APPTAG: status = NVME_SC_APPTAG_CHECK; break; } pr_err("PI error found type %d expected 0x%x vs actual 0x%x\n", mr_status.sig_err.err_type, mr_status.sig_err.expected, mr_status.sig_err.actual); } return status; } static void nvmet_rdma_set_sig_domain(struct blk_integrity *bi, struct nvme_command *cmd, struct ib_sig_domain *domain, u16 control, u8 pi_type) { domain->sig_type = IB_SIG_TYPE_T10_DIF; domain->sig.dif.bg_type = IB_T10DIF_CRC; domain->sig.dif.pi_interval = 1 << bi->interval_exp; domain->sig.dif.ref_tag = le32_to_cpu(cmd->rw.reftag); if (control & NVME_RW_PRINFO_PRCHK_REF) domain->sig.dif.ref_remap = true; domain->sig.dif.app_tag = le16_to_cpu(cmd->rw.lbat); domain->sig.dif.apptag_check_mask = le16_to_cpu(cmd->rw.lbatm); domain->sig.dif.app_escape = true; if (pi_type == NVME_NS_DPS_PI_TYPE3) domain->sig.dif.ref_escape = true; } static void nvmet_rdma_set_sig_attrs(struct nvmet_req *req, struct ib_sig_attrs *sig_attrs) { struct nvme_command *cmd = req->cmd; u16 control = le16_to_cpu(cmd->rw.control); u8 pi_type = req->ns->pi_type; struct blk_integrity *bi; bi = bdev_get_integrity(req->ns->bdev); memset(sig_attrs, 0, sizeof(*sig_attrs)); if (control & NVME_RW_PRINFO_PRACT) { /* for WRITE_INSERT/READ_STRIP no wire domain */ sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE; nvmet_rdma_set_sig_domain(bi, cmd, &sig_attrs->mem, control, pi_type); /* Clear the PRACT bit since HCA will generate/verify the PI */ control &= ~NVME_RW_PRINFO_PRACT; cmd->rw.control = cpu_to_le16(control); /* PI is added by the HW */ req->transfer_len += req->metadata_len; } else { /* for WRITE_PASS/READ_PASS both wire/memory domains exist */ nvmet_rdma_set_sig_domain(bi, cmd, &sig_attrs->wire, control, pi_type); nvmet_rdma_set_sig_domain(bi, cmd, &sig_attrs->mem, control, pi_type); } if (control & NVME_RW_PRINFO_PRCHK_REF) sig_attrs->check_mask |= IB_SIG_CHECK_REFTAG; if (control & NVME_RW_PRINFO_PRCHK_GUARD) sig_attrs->check_mask |= IB_SIG_CHECK_GUARD; if (control & NVME_RW_PRINFO_PRCHK_APP) sig_attrs->check_mask |= IB_SIG_CHECK_APPTAG; } static int nvmet_rdma_rw_ctx_init(struct nvmet_rdma_rsp *rsp, u64 addr, u32 key, struct ib_sig_attrs *sig_attrs) { struct rdma_cm_id *cm_id = rsp->queue->cm_id; struct nvmet_req *req = &rsp->req; int ret; if (req->metadata_len) ret = rdma_rw_ctx_signature_init(&rsp->rw, cm_id->qp, cm_id->port_num, req->sg, req->sg_cnt, req->metadata_sg, req->metadata_sg_cnt, sig_attrs, addr, key, nvmet_data_dir(req)); else ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num, req->sg, req->sg_cnt, 0, addr, key, nvmet_data_dir(req)); return ret; } static void nvmet_rdma_rw_ctx_destroy(struct nvmet_rdma_rsp *rsp) { struct rdma_cm_id *cm_id = rsp->queue->cm_id; struct nvmet_req *req = &rsp->req; if (req->metadata_len) rdma_rw_ctx_destroy_signature(&rsp->rw, cm_id->qp, cm_id->port_num, req->sg, req->sg_cnt, req->metadata_sg, req->metadata_sg_cnt, nvmet_data_dir(req)); else rdma_rw_ctx_destroy(&rsp->rw, cm_id->qp, cm_id->port_num, req->sg, req->sg_cnt, nvmet_data_dir(req)); } static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp) { struct nvmet_rdma_queue *queue = rsp->queue; atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail); if (rsp->n_rdma) nvmet_rdma_rw_ctx_destroy(rsp); if (rsp->req.sg != rsp->cmd->inline_sg) nvmet_req_free_sgls(&rsp->req); if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list))) nvmet_rdma_process_wr_wait_list(queue); nvmet_rdma_put_rsp(rsp); } static void nvmet_rdma_error_comp(struct nvmet_rdma_queue *queue) { if (queue->nvme_sq.ctrl) { nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl); } else { /* * we didn't setup the controller yet in case * of admin connect error, just disconnect and * cleanup the queue */ nvmet_rdma_queue_disconnect(queue); } } static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc) { struct nvmet_rdma_rsp *rsp = container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe); struct nvmet_rdma_queue *queue = wc->qp->qp_context; nvmet_rdma_release_rsp(rsp); if (unlikely(wc->status != IB_WC_SUCCESS && wc->status != IB_WC_WR_FLUSH_ERR)) { pr_err("SEND for CQE 0x%p failed with status %s (%d).\n", wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status); nvmet_rdma_error_comp(queue); } } static void nvmet_rdma_queue_response(struct nvmet_req *req) { struct nvmet_rdma_rsp *rsp = container_of(req, struct nvmet_rdma_rsp, req); struct rdma_cm_id *cm_id = rsp->queue->cm_id; struct ib_send_wr *first_wr; if (rsp->invalidate_rkey) { rsp->send_wr.opcode = IB_WR_SEND_WITH_INV; rsp->send_wr.ex.invalidate_rkey = rsp->invalidate_rkey; } else { rsp->send_wr.opcode = IB_WR_SEND; } if (nvmet_rdma_need_data_out(rsp)) { if (rsp->req.metadata_len) first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp, cm_id->port_num, &rsp->write_cqe, NULL); else first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp, cm_id->port_num, NULL, &rsp->send_wr); } else { first_wr = &rsp->send_wr; } nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd); ib_dma_sync_single_for_device(rsp->queue->dev->device, rsp->send_sge.addr, rsp->send_sge.length, DMA_TO_DEVICE); if (unlikely(ib_post_send(cm_id->qp, first_wr, NULL))) { pr_err("sending cmd response failed\n"); nvmet_rdma_release_rsp(rsp); } } static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc) { struct nvmet_rdma_rsp *rsp = container_of(wc->wr_cqe, struct nvmet_rdma_rsp, read_cqe); struct nvmet_rdma_queue *queue = wc->qp->qp_context; u16 status = 0; WARN_ON(rsp->n_rdma <= 0); atomic_add(rsp->n_rdma, &queue->sq_wr_avail); rsp->n_rdma = 0; if (unlikely(wc->status != IB_WC_SUCCESS)) { nvmet_rdma_rw_ctx_destroy(rsp); nvmet_req_uninit(&rsp->req); nvmet_rdma_release_rsp(rsp); if (wc->status != IB_WC_WR_FLUSH_ERR) { pr_info("RDMA READ for CQE 0x%p failed with status %s (%d).\n", wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status); nvmet_rdma_error_comp(queue); } return; } if (rsp->req.metadata_len) status = nvmet_rdma_check_pi_status(rsp->rw.reg->mr); nvmet_rdma_rw_ctx_destroy(rsp); if (unlikely(status)) nvmet_req_complete(&rsp->req, status); else rsp->req.execute(&rsp->req); } static void nvmet_rdma_write_data_done(struct ib_cq *cq, struct ib_wc *wc) { struct nvmet_rdma_rsp *rsp = container_of(wc->wr_cqe, struct nvmet_rdma_rsp, write_cqe); struct nvmet_rdma_queue *queue = wc->qp->qp_context; struct rdma_cm_id *cm_id = rsp->queue->cm_id; u16 status; if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) return; WARN_ON(rsp->n_rdma <= 0); atomic_add(rsp->n_rdma, &queue->sq_wr_avail); rsp->n_rdma = 0; if (unlikely(wc->status != IB_WC_SUCCESS)) { nvmet_rdma_rw_ctx_destroy(rsp); nvmet_req_uninit(&rsp->req); nvmet_rdma_release_rsp(rsp); if (wc->status != IB_WC_WR_FLUSH_ERR) { pr_info("RDMA WRITE for CQE failed with status %s (%d).\n", ib_wc_status_msg(wc->status), wc->status); nvmet_rdma_error_comp(queue); } return; } /* * Upon RDMA completion check the signature status * - if succeeded send good NVMe response * - if failed send bad NVMe response with appropriate error */ status = nvmet_rdma_check_pi_status(rsp->rw.reg->mr); if (unlikely(status)) rsp->req.cqe->status = cpu_to_le16(status << 1); nvmet_rdma_rw_ctx_destroy(rsp); if (unlikely(ib_post_send(cm_id->qp, &rsp->send_wr, NULL))) { pr_err("sending cmd response failed\n"); nvmet_rdma_release_rsp(rsp); } } static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len, u64 off) { int sg_count = num_pages(len); struct scatterlist *sg; int i; sg = rsp->cmd->inline_sg; for (i = 0; i < sg_count; i++, sg++) { if (i < sg_count - 1) sg_unmark_end(sg); else sg_mark_end(sg); sg->offset = off; sg->length = min_t(int, len, PAGE_SIZE - off); len -= sg->length; if (!i) off = 0; } rsp->req.sg = rsp->cmd->inline_sg; rsp->req.sg_cnt = sg_count; } static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp) { struct nvme_sgl_desc *sgl = &rsp->req.cmd->common.dptr.sgl; u64 off = le64_to_cpu(sgl->addr); u32 len = le32_to_cpu(sgl->length); if (!nvme_is_write(rsp->req.cmd)) { rsp->req.error_loc = offsetof(struct nvme_common_command, opcode); return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; } if (off + len > rsp->queue->dev->inline_data_size) { pr_err("invalid inline data offset!\n"); return NVME_SC_SGL_INVALID_OFFSET | NVME_STATUS_DNR; } /* no data command? */ if (!len) return 0; nvmet_rdma_use_inline_sg(rsp, len, off); rsp->flags |= NVMET_RDMA_REQ_INLINE_DATA; rsp->req.transfer_len += len; return 0; } static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp, struct nvme_keyed_sgl_desc *sgl, bool invalidate) { u64 addr = le64_to_cpu(sgl->addr); u32 key = get_unaligned_le32(sgl->key); struct ib_sig_attrs sig_attrs; int ret; rsp->req.transfer_len = get_unaligned_le24(sgl->length); /* no data command? */ if (!rsp->req.transfer_len) return 0; if (rsp->req.metadata_len) nvmet_rdma_set_sig_attrs(&rsp->req, &sig_attrs); ret = nvmet_req_alloc_sgls(&rsp->req); if (unlikely(ret < 0)) goto error_out; ret = nvmet_rdma_rw_ctx_init(rsp, addr, key, &sig_attrs); if (unlikely(ret < 0)) goto error_out; rsp->n_rdma += ret; if (invalidate) rsp->invalidate_rkey = key; return 0; error_out: rsp->req.transfer_len = 0; return NVME_SC_INTERNAL; } static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp) { struct nvme_keyed_sgl_desc *sgl = &rsp->req.cmd->common.dptr.ksgl; switch (sgl->type >> 4) { case NVME_SGL_FMT_DATA_DESC: switch (sgl->type & 0xf) { case NVME_SGL_FMT_OFFSET: return nvmet_rdma_map_sgl_inline(rsp); default: pr_err("invalid SGL subtype: %#x\n", sgl->type); rsp->req.error_loc = offsetof(struct nvme_common_command, dptr); return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; } case NVME_KEY_SGL_FMT_DATA_DESC: switch (sgl->type & 0xf) { case NVME_SGL_FMT_ADDRESS | NVME_SGL_FMT_INVALIDATE: return nvmet_rdma_map_sgl_keyed(rsp, sgl, true); case NVME_SGL_FMT_ADDRESS: return nvmet_rdma_map_sgl_keyed(rsp, sgl, false); default: pr_err("invalid SGL subtype: %#x\n", sgl->type); rsp->req.error_loc = offsetof(struct nvme_common_command, dptr); return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; } default: pr_err("invalid SGL type: %#x\n", sgl->type); rsp->req.error_loc = offsetof(struct nvme_common_command, dptr); return NVME_SC_SGL_INVALID_TYPE | NVME_STATUS_DNR; } } static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp) { struct nvmet_rdma_queue *queue = rsp->queue; if (unlikely(atomic_sub_return(1 + rsp->n_rdma, &queue->sq_wr_avail) < 0)) { pr_debug("IB send queue full (needed %d): queue %u cntlid %u\n", 1 + rsp->n_rdma, queue->idx, queue->nvme_sq.ctrl->cntlid); atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail); return false; } if (nvmet_rdma_need_data_in(rsp)) { if (rdma_rw_ctx_post(&rsp->rw, queue->qp, queue->cm_id->port_num, &rsp->read_cqe, NULL)) nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR); } else { rsp->req.execute(&rsp->req); } return true; } static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue, struct nvmet_rdma_rsp *cmd) { u16 status; ib_dma_sync_single_for_cpu(queue->dev->device, cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length, DMA_FROM_DEVICE); ib_dma_sync_single_for_cpu(queue->dev->device, cmd->send_sge.addr, cmd->send_sge.length, DMA_TO_DEVICE); if (!nvmet_req_init(&cmd->req, &queue->nvme_cq, &queue->nvme_sq, &nvmet_rdma_ops)) return; status = nvmet_rdma_map_sgl(cmd); if (status) goto out_err; if (unlikely(!nvmet_rdma_execute_command(cmd))) { spin_lock(&queue->rsp_wr_wait_lock); list_add_tail(&cmd->wait_list, &queue->rsp_wr_wait_list); spin_unlock(&queue->rsp_wr_wait_lock); } return; out_err: nvmet_req_complete(&cmd->req, status); } static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc) { struct nvmet_rdma_cmd *cmd = container_of(wc->wr_cqe, struct nvmet_rdma_cmd, cqe); struct nvmet_rdma_queue *queue = wc->qp->qp_context; struct nvmet_rdma_rsp *rsp; if (unlikely(wc->status != IB_WC_SUCCESS)) { if (wc->status != IB_WC_WR_FLUSH_ERR) { pr_err("RECV for CQE 0x%p failed with status %s (%d)\n", wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status); nvmet_rdma_error_comp(queue); } return; } if (unlikely(wc->byte_len < sizeof(struct nvme_command))) { pr_err("Ctrl Fatal Error: capsule size less than 64 bytes\n"); nvmet_rdma_error_comp(queue); return; } cmd->queue = queue; rsp = nvmet_rdma_get_rsp(queue); if (unlikely(!rsp)) { /* * we get here only under memory pressure, * silently drop and have the host retry * as we can't even fail it. */ nvmet_rdma_post_recv(queue->dev, cmd); return; } rsp->queue = queue; rsp->cmd = cmd; rsp->flags = 0; rsp->req.cmd = cmd->nvme_cmd; rsp->req.port = queue->port; rsp->n_rdma = 0; rsp->invalidate_rkey = 0; if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) { unsigned long flags; spin_lock_irqsave(&queue->state_lock, flags); if (queue->state == NVMET_RDMA_Q_CONNECTING) list_add_tail(&rsp->wait_list, &queue->rsp_wait_list); else nvmet_rdma_put_rsp(rsp); spin_unlock_irqrestore(&queue->state_lock, flags); return; } nvmet_rdma_handle_command(queue, rsp); } static void nvmet_rdma_destroy_srq(struct nvmet_rdma_srq *nsrq) { nvmet_rdma_free_cmds(nsrq->ndev, nsrq->cmds, nsrq->ndev->srq_size, false); ib_destroy_srq(nsrq->srq); kfree(nsrq); } static void nvmet_rdma_destroy_srqs(struct nvmet_rdma_device *ndev) { int i; if (!ndev->srqs) return; for (i = 0; i < ndev->srq_count; i++) nvmet_rdma_destroy_srq(ndev->srqs[i]); kfree(ndev->srqs); } static struct nvmet_rdma_srq * nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev) { struct ib_srq_init_attr srq_attr = { NULL, }; size_t srq_size = ndev->srq_size; struct nvmet_rdma_srq *nsrq; struct ib_srq *srq; int ret, i; nsrq = kzalloc(sizeof(*nsrq), GFP_KERNEL); if (!nsrq) return ERR_PTR(-ENOMEM); srq_attr.attr.max_wr = srq_size; srq_attr.attr.max_sge = 1 + ndev->inline_page_count; srq_attr.attr.srq_limit = 0; srq_attr.srq_type = IB_SRQT_BASIC; srq = ib_create_srq(ndev->pd, &srq_attr); if (IS_ERR(srq)) { ret = PTR_ERR(srq); goto out_free; } nsrq->cmds = nvmet_rdma_alloc_cmds(ndev, srq_size, false); if (IS_ERR(nsrq->cmds)) { ret = PTR_ERR(nsrq->cmds); goto out_destroy_srq; } nsrq->srq = srq; nsrq->ndev = ndev; for (i = 0; i < srq_size; i++) { nsrq->cmds[i].nsrq = nsrq; ret = nvmet_rdma_post_recv(ndev, &nsrq->cmds[i]); if (ret) goto out_free_cmds; } return nsrq; out_free_cmds: nvmet_rdma_free_cmds(ndev, nsrq->cmds, srq_size, false); out_destroy_srq: ib_destroy_srq(srq); out_free: kfree(nsrq); return ERR_PTR(ret); } static int nvmet_rdma_init_srqs(struct nvmet_rdma_device *ndev) { int i, ret; if (!ndev->device->attrs.max_srq_wr || !ndev->device->attrs.max_srq) { /* * If SRQs aren't supported we just go ahead and use normal * non-shared receive queues. */ pr_info("SRQ requested but not supported.\n"); return 0; } ndev->srq_size = min(ndev->device->attrs.max_srq_wr, nvmet_rdma_srq_size); ndev->srq_count = min(ndev->device->num_comp_vectors, ndev->device->attrs.max_srq); ndev->srqs = kcalloc(ndev->srq_count, sizeof(*ndev->srqs), GFP_KERNEL); if (!ndev->srqs) return -ENOMEM; for (i = 0; i < ndev->srq_count; i++) { ndev->srqs[i] = nvmet_rdma_init_srq(ndev); if (IS_ERR(ndev->srqs[i])) { ret = PTR_ERR(ndev->srqs[i]); goto err_srq; } } return 0; err_srq: while (--i >= 0) nvmet_rdma_destroy_srq(ndev->srqs[i]); kfree(ndev->srqs); return ret; } static void nvmet_rdma_free_dev(struct kref *ref) { struct nvmet_rdma_device *ndev = container_of(ref, struct nvmet_rdma_device, ref); mutex_lock(&device_list_mutex); list_del(&ndev->entry); mutex_unlock(&device_list_mutex); nvmet_rdma_destroy_srqs(ndev); ib_dealloc_pd(ndev->pd); kfree(ndev); } static struct nvmet_rdma_device * nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id) { struct nvmet_rdma_port *port = cm_id->context; struct nvmet_port *nport = port->nport; struct nvmet_rdma_device *ndev; int inline_page_count; int inline_sge_count; int ret; mutex_lock(&device_list_mutex); list_for_each_entry(ndev, &device_list, entry) { if (ndev->device->node_guid == cm_id->device->node_guid && kref_get_unless_zero(&ndev->ref)) goto out_unlock; } ndev = kzalloc(sizeof(*ndev), GFP_KERNEL); if (!ndev) goto out_err; inline_page_count = num_pages(nport->inline_data_size); inline_sge_count = max(cm_id->device->attrs.max_sge_rd, cm_id->device->attrs.max_recv_sge) - 1; if (inline_page_count > inline_sge_count) { pr_warn("inline_data_size %d cannot be supported by device %s. Reducing to %lu.\n", nport->inline_data_size, cm_id->device->name, inline_sge_count * PAGE_SIZE); nport->inline_data_size = inline_sge_count * PAGE_SIZE; inline_page_count = inline_sge_count; } ndev->inline_data_size = nport->inline_data_size; ndev->inline_page_count = inline_page_count; if (nport->pi_enable && !(cm_id->device->attrs.kernel_cap_flags & IBK_INTEGRITY_HANDOVER)) { pr_warn("T10-PI is not supported by device %s. Disabling it\n", cm_id->device->name); nport->pi_enable = false; } ndev->device = cm_id->device; kref_init(&ndev->ref); ndev->pd = ib_alloc_pd(ndev->device, 0); if (IS_ERR(ndev->pd)) goto out_free_dev; if (nvmet_rdma_use_srq) { ret = nvmet_rdma_init_srqs(ndev); if (ret) goto out_free_pd; } list_add(&ndev->entry, &device_list); out_unlock: mutex_unlock(&device_list_mutex); pr_debug("added %s.\n", ndev->device->name); return ndev; out_free_pd: ib_dealloc_pd(ndev->pd); out_free_dev: kfree(ndev); out_err: mutex_unlock(&device_list_mutex); return NULL; } static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue) { struct ib_qp_init_attr qp_attr = { }; struct nvmet_rdma_device *ndev = queue->dev; int nr_cqe, ret, i, factor; /* * Reserve CQ slots for RECV + RDMA_READ/RDMA_WRITE + RDMA_SEND. */ nr_cqe = queue->recv_queue_size + 2 * queue->send_queue_size; queue->cq = ib_cq_pool_get(ndev->device, nr_cqe + 1, queue->comp_vector, IB_POLL_WORKQUEUE); if (IS_ERR(queue->cq)) { ret = PTR_ERR(queue->cq); pr_err("failed to create CQ cqe= %d ret= %d\n", nr_cqe + 1, ret); goto out; } qp_attr.qp_context = queue; qp_attr.event_handler = nvmet_rdma_qp_event; qp_attr.send_cq = queue->cq; qp_attr.recv_cq = queue->cq; qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR; qp_attr.qp_type = IB_QPT_RC; /* +1 for drain */ qp_attr.cap.max_send_wr = queue->send_queue_size + 1; factor = rdma_rw_mr_factor(ndev->device, queue->cm_id->port_num, 1 << NVMET_RDMA_MAX_MDTS); qp_attr.cap.max_rdma_ctxs = queue->send_queue_size * factor; qp_attr.cap.max_send_sge = max(ndev->device->attrs.max_sge_rd, ndev->device->attrs.max_send_sge); if (queue->nsrq) { qp_attr.srq = queue->nsrq->srq; } else { /* +1 for drain */ qp_attr.cap.max_recv_wr = 1 + queue->recv_queue_size; qp_attr.cap.max_recv_sge = 1 + ndev->inline_page_count; } if (queue->port->pi_enable && queue->host_qid) qp_attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN; ret = rdma_create_qp(queue->cm_id, ndev->pd, &qp_attr); if (ret) { pr_err("failed to create_qp ret= %d\n", ret); goto err_destroy_cq; } queue->qp = queue->cm_id->qp; atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr); pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n", __func__, queue->cq->cqe, qp_attr.cap.max_send_sge, qp_attr.cap.max_send_wr, queue->cm_id); if (!queue->nsrq) { for (i = 0; i < queue->recv_queue_size; i++) { queue->cmds[i].queue = queue; ret = nvmet_rdma_post_recv(ndev, &queue->cmds[i]); if (ret) goto err_destroy_qp; } } out: return ret; err_destroy_qp: rdma_destroy_qp(queue->cm_id); err_destroy_cq: ib_cq_pool_put(queue->cq, nr_cqe + 1); goto out; } static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue) { ib_drain_qp(queue->qp); if (queue->cm_id) rdma_destroy_id(queue->cm_id); ib_destroy_qp(queue->qp); ib_cq_pool_put(queue->cq, queue->recv_queue_size + 2 * queue->send_queue_size + 1); } static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue) { pr_debug("freeing queue %d\n", queue->idx); nvmet_sq_destroy(&queue->nvme_sq); nvmet_rdma_destroy_queue_ib(queue); if (!queue->nsrq) { nvmet_rdma_free_cmds(queue->dev, queue->cmds, queue->recv_queue_size, !queue->host_qid); } nvmet_rdma_free_rsps(queue); ida_free(&nvmet_rdma_queue_ida, queue->idx); kfree(queue); } static void nvmet_rdma_release_queue_work(struct work_struct *w) { struct nvmet_rdma_queue *queue = container_of(w, struct nvmet_rdma_queue, release_work); struct nvmet_rdma_device *dev = queue->dev; nvmet_rdma_free_queue(queue); kref_put(&dev->ref, nvmet_rdma_free_dev); } static int nvmet_rdma_parse_cm_connect_req(struct rdma_conn_param *conn, struct nvmet_rdma_queue *queue) { struct nvme_rdma_cm_req *req; req = (struct nvme_rdma_cm_req *)conn->private_data; if (!req || conn->private_data_len == 0) return NVME_RDMA_CM_INVALID_LEN; if (le16_to_cpu(req->recfmt) != NVME_RDMA_CM_FMT_1_0) return NVME_RDMA_CM_INVALID_RECFMT; queue->host_qid = le16_to_cpu(req->qid); /* * req->hsqsize corresponds to our recv queue size plus 1 * req->hrqsize corresponds to our send queue size */ queue->recv_queue_size = le16_to_cpu(req->hsqsize) + 1; queue->send_queue_size = le16_to_cpu(req->hrqsize); if (!queue->host_qid && queue->recv_queue_size > NVME_AQ_DEPTH) return NVME_RDMA_CM_INVALID_HSQSIZE; /* XXX: Should we enforce some kind of max for IO queues? */ return 0; } static int nvmet_rdma_cm_reject(struct rdma_cm_id *cm_id, enum nvme_rdma_cm_status status) { struct nvme_rdma_cm_rej rej; pr_debug("rejecting connect request: status %d (%s)\n", status, nvme_rdma_cm_msg(status)); rej.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0); rej.sts = cpu_to_le16(status); return rdma_reject(cm_id, (void *)&rej, sizeof(rej), IB_CM_REJ_CONSUMER_DEFINED); } static struct nvmet_rdma_queue * nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev, struct rdma_cm_id *cm_id, struct rdma_cm_event *event) { struct nvmet_rdma_port *port = cm_id->context; struct nvmet_rdma_queue *queue; int ret; queue = kzalloc(sizeof(*queue), GFP_KERNEL); if (!queue) { ret = NVME_RDMA_CM_NO_RSC; goto out_reject; } ret = nvmet_sq_init(&queue->nvme_sq); if (ret) { ret = NVME_RDMA_CM_NO_RSC; goto out_free_queue; } ret = nvmet_rdma_parse_cm_connect_req(&event->param.conn, queue); if (ret) goto out_destroy_sq; /* * Schedules the actual release because calling rdma_destroy_id from * inside a CM callback would trigger a deadlock. (great API design..) */ INIT_WORK(&queue->release_work, nvmet_rdma_release_queue_work); queue->dev = ndev; queue->cm_id = cm_id; queue->port = port->nport; spin_lock_init(&queue->state_lock); queue->state = NVMET_RDMA_Q_CONNECTING; INIT_LIST_HEAD(&queue->rsp_wait_list); INIT_LIST_HEAD(&queue->rsp_wr_wait_list); spin_lock_init(&queue->rsp_wr_wait_lock); INIT_LIST_HEAD(&queue->queue_list); queue->idx = ida_alloc(&nvmet_rdma_queue_ida, GFP_KERNEL); if (queue->idx < 0) { ret = NVME_RDMA_CM_NO_RSC; goto out_destroy_sq; } /* * Spread the io queues across completion vectors, * but still keep all admin queues on vector 0. */ queue->comp_vector = !queue->host_qid ? 0 : queue->idx % ndev->device->num_comp_vectors; ret = nvmet_rdma_alloc_rsps(queue); if (ret) { ret = NVME_RDMA_CM_NO_RSC; goto out_ida_remove; } if (ndev->srqs) { queue->nsrq = ndev->srqs[queue->comp_vector % ndev->srq_count]; } else { queue->cmds = nvmet_rdma_alloc_cmds(ndev, queue->recv_queue_size, !queue->host_qid); if (IS_ERR(queue->cmds)) { ret = NVME_RDMA_CM_NO_RSC; goto out_free_responses; } } ret = nvmet_rdma_create_queue_ib(queue); if (ret) { pr_err("%s: creating RDMA queue failed (%d).\n", __func__, ret); ret = NVME_RDMA_CM_NO_RSC; goto out_free_cmds; } return queue; out_free_cmds: if (!queue->nsrq) { nvmet_rdma_free_cmds(queue->dev, queue->cmds, queue->recv_queue_size, !queue->host_qid); } out_free_responses: nvmet_rdma_free_rsps(queue); out_ida_remove: ida_free(&nvmet_rdma_queue_ida, queue->idx); out_destroy_sq: nvmet_sq_destroy(&queue->nvme_sq); out_free_queue: kfree(queue); out_reject: nvmet_rdma_cm_reject(cm_id, ret); return NULL; } static void nvmet_rdma_qp_event(struct ib_event *event, void *priv) { struct nvmet_rdma_queue *queue = priv; switch (event->event) { case IB_EVENT_COMM_EST: rdma_notify(queue->cm_id, event->event); break; case IB_EVENT_QP_LAST_WQE_REACHED: pr_debug("received last WQE reached event for queue=0x%p\n", queue); break; default: pr_err("received IB QP event: %s (%d)\n", ib_event_msg(event->event), event->event); break; } } static int nvmet_rdma_cm_accept(struct rdma_cm_id *cm_id, struct nvmet_rdma_queue *queue, struct rdma_conn_param *p) { struct rdma_conn_param param = { }; struct nvme_rdma_cm_rep priv = { }; int ret = -ENOMEM; param.rnr_retry_count = 7; param.flow_control = 1; param.initiator_depth = min_t(u8, p->initiator_depth, queue->dev->device->attrs.max_qp_init_rd_atom); param.private_data = &priv; param.private_data_len = sizeof(priv); priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0); priv.crqsize = cpu_to_le16(queue->recv_queue_size); ret = rdma_accept(cm_id, &param); if (ret) pr_err("rdma_accept failed (error code = %d)\n", ret); return ret; } static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id, struct rdma_cm_event *event) { struct nvmet_rdma_device *ndev; struct nvmet_rdma_queue *queue; int ret = -EINVAL; ndev = nvmet_rdma_find_get_device(cm_id); if (!ndev) { nvmet_rdma_cm_reject(cm_id, NVME_RDMA_CM_NO_RSC); return -ECONNREFUSED; } queue = nvmet_rdma_alloc_queue(ndev, cm_id, event); if (!queue) { ret = -ENOMEM; goto put_device; } if (queue->host_qid == 0) { struct nvmet_rdma_queue *q; int pending = 0; /* Check for pending controller teardown */ mutex_lock(&nvmet_rdma_queue_mutex); list_for_each_entry(q, &nvmet_rdma_queue_list, queue_list) { if (q->nvme_sq.ctrl == queue->nvme_sq.ctrl && q->state == NVMET_RDMA_Q_DISCONNECTING) pending++; } mutex_unlock(&nvmet_rdma_queue_mutex); if (pending > NVMET_RDMA_BACKLOG) return NVME_SC_CONNECT_CTRL_BUSY; } ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn); if (ret) { /* * Don't destroy the cm_id in free path, as we implicitly * destroy the cm_id here with non-zero ret code. */ queue->cm_id = NULL; goto free_queue; } mutex_lock(&nvmet_rdma_queue_mutex); list_add_tail(&queue->queue_list, &nvmet_rdma_queue_list); mutex_unlock(&nvmet_rdma_queue_mutex); return 0; free_queue: nvmet_rdma_free_queue(queue); put_device: kref_put(&ndev->ref, nvmet_rdma_free_dev); return ret; } static void nvmet_rdma_queue_established(struct nvmet_rdma_queue *queue) { unsigned long flags; spin_lock_irqsave(&queue->state_lock, flags); if (queue->state != NVMET_RDMA_Q_CONNECTING) { pr_warn("trying to establish a connected queue\n"); goto out_unlock; } queue->state = NVMET_RDMA_Q_LIVE; while (!list_empty(&queue->rsp_wait_list)) { struct nvmet_rdma_rsp *cmd; cmd = list_first_entry(&queue->rsp_wait_list, struct nvmet_rdma_rsp, wait_list); list_del(&cmd->wait_list); spin_unlock_irqrestore(&queue->state_lock, flags); nvmet_rdma_handle_command(queue, cmd); spin_lock_irqsave(&queue->state_lock, flags); } out_unlock: spin_unlock_irqrestore(&queue->state_lock, flags); } static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) { bool disconnect = false; unsigned long flags; pr_debug("cm_id= %p queue->state= %d\n", queue->cm_id, queue->state); spin_lock_irqsave(&queue->state_lock, flags); switch (queue->state) { case NVMET_RDMA_Q_CONNECTING: while (!list_empty(&queue->rsp_wait_list)) { struct nvmet_rdma_rsp *rsp; rsp = list_first_entry(&queue->rsp_wait_list, struct nvmet_rdma_rsp, wait_list); list_del(&rsp->wait_list); nvmet_rdma_put_rsp(rsp); } fallthrough; case NVMET_RDMA_Q_LIVE: queue->state = NVMET_RDMA_Q_DISCONNECTING; disconnect = true; break; case NVMET_RDMA_Q_DISCONNECTING: break; } spin_unlock_irqrestore(&queue->state_lock, flags); if (disconnect) { rdma_disconnect(queue->cm_id); queue_work(nvmet_wq, &queue->release_work); } } static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) { bool disconnect = false; mutex_lock(&nvmet_rdma_queue_mutex); if (!list_empty(&queue->queue_list)) { list_del_init(&queue->queue_list); disconnect = true; } mutex_unlock(&nvmet_rdma_queue_mutex); if (disconnect) __nvmet_rdma_queue_disconnect(queue); } static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id, struct nvmet_rdma_queue *queue) { WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING); mutex_lock(&nvmet_rdma_queue_mutex); if (!list_empty(&queue->queue_list)) list_del_init(&queue->queue_list); mutex_unlock(&nvmet_rdma_queue_mutex); pr_err("failed to connect queue %d\n", queue->idx); queue_work(nvmet_wq, &queue->release_work); } /** * nvmet_rdma_device_removal() - Handle RDMA device removal * @cm_id: rdma_cm id, used for nvmet port * @queue: nvmet rdma queue (cm id qp_context) * * DEVICE_REMOVAL event notifies us that the RDMA device is about * to unplug. Note that this event can be generated on a normal * queue cm_id and/or a device bound listener cm_id (where in this * case queue will be null). * * We registered an ib_client to handle device removal for queues, * so we only need to handle the listening port cm_ids. In this case * we nullify the priv to prevent double cm_id destruction and destroying * the cm_id implicitely by returning a non-zero rc to the callout. */ static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id, struct nvmet_rdma_queue *queue) { struct nvmet_rdma_port *port; if (queue) { /* * This is a queue cm_id. we have registered * an ib_client to handle queues removal * so don't interfear and just return. */ return 0; } port = cm_id->context; /* * This is a listener cm_id. Make sure that * future remove_port won't invoke a double * cm_id destroy. use atomic xchg to make sure * we don't compete with remove_port. */ if (xchg(&port->cm_id, NULL) != cm_id) return 0; /* * We need to return 1 so that the core will destroy * it's own ID. What a great API design.. */ return 1; } static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id, struct rdma_cm_event *event) { struct nvmet_rdma_queue *queue = NULL; int ret = 0; if (cm_id->qp) queue = cm_id->qp->qp_context; pr_debug("%s (%d): status %d id %p\n", rdma_event_msg(event->event), event->event, event->status, cm_id); switch (event->event) { case RDMA_CM_EVENT_CONNECT_REQUEST: ret = nvmet_rdma_queue_connect(cm_id, event); break; case RDMA_CM_EVENT_ESTABLISHED: nvmet_rdma_queue_established(queue); break; case RDMA_CM_EVENT_ADDR_CHANGE: if (!queue) { struct nvmet_rdma_port *port = cm_id->context; queue_delayed_work(nvmet_wq, &port->repair_work, 0); break; } fallthrough; case RDMA_CM_EVENT_DISCONNECTED: case RDMA_CM_EVENT_TIMEWAIT_EXIT: nvmet_rdma_queue_disconnect(queue); break; case RDMA_CM_EVENT_DEVICE_REMOVAL: ret = nvmet_rdma_device_removal(cm_id, queue); break; case RDMA_CM_EVENT_REJECTED: pr_debug("Connection rejected: %s\n", rdma_reject_msg(cm_id, event->status)); fallthrough; case RDMA_CM_EVENT_UNREACHABLE: case RDMA_CM_EVENT_CONNECT_ERROR: nvmet_rdma_queue_connect_fail(cm_id, queue); break; default: pr_err("received unrecognized RDMA CM event %d\n", event->event); break; } return ret; } static void nvmet_rdma_delete_ctrl(struct nvmet_ctrl *ctrl) { struct nvmet_rdma_queue *queue, *n; mutex_lock(&nvmet_rdma_queue_mutex); list_for_each_entry_safe(queue, n, &nvmet_rdma_queue_list, queue_list) { if (queue->nvme_sq.ctrl != ctrl) continue; list_del_init(&queue->queue_list); __nvmet_rdma_queue_disconnect(queue); } mutex_unlock(&nvmet_rdma_queue_mutex); } static void nvmet_rdma_destroy_port_queues(struct nvmet_rdma_port *port) { struct nvmet_rdma_queue *queue, *tmp; struct nvmet_port *nport = port->nport; mutex_lock(&nvmet_rdma_queue_mutex); list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list, queue_list) { if (queue->port != nport) continue; list_del_init(&queue->queue_list); __nvmet_rdma_queue_disconnect(queue); } mutex_unlock(&nvmet_rdma_queue_mutex); } static void nvmet_rdma_disable_port(struct nvmet_rdma_port *port) { struct rdma_cm_id *cm_id = xchg(&port->cm_id, NULL); if (cm_id) rdma_destroy_id(cm_id); /* * Destroy the remaining queues, which are not belong to any * controller yet. Do it here after the RDMA-CM was destroyed * guarantees that no new queue will be created. */ nvmet_rdma_destroy_port_queues(port); } static int nvmet_rdma_enable_port(struct nvmet_rdma_port *port) { struct sockaddr *addr = (struct sockaddr *)&port->addr; struct rdma_cm_id *cm_id; int ret; cm_id = rdma_create_id(&init_net, nvmet_rdma_cm_handler, port, RDMA_PS_TCP, IB_QPT_RC); if (IS_ERR(cm_id)) { pr_err("CM ID creation failed\n"); return PTR_ERR(cm_id); } /* * Allow both IPv4 and IPv6 sockets to bind a single port * at the same time. */ ret = rdma_set_afonly(cm_id, 1); if (ret) { pr_err("rdma_set_afonly failed (%d)\n", ret); goto out_destroy_id; } ret = rdma_bind_addr(cm_id, addr); if (ret) { pr_err("binding CM ID to %pISpcs failed (%d)\n", addr, ret); goto out_destroy_id; } ret = rdma_listen(cm_id, NVMET_RDMA_BACKLOG); if (ret) { pr_err("listening to %pISpcs failed (%d)\n", addr, ret); goto out_destroy_id; } port->cm_id = cm_id; return 0; out_destroy_id: rdma_destroy_id(cm_id); return ret; } static void nvmet_rdma_repair_port_work(struct work_struct *w) { struct nvmet_rdma_port *port = container_of(to_delayed_work(w), struct nvmet_rdma_port, repair_work); int ret; nvmet_rdma_disable_port(port); ret = nvmet_rdma_enable_port(port); if (ret) queue_delayed_work(nvmet_wq, &port->repair_work, 5 * HZ); } static int nvmet_rdma_add_port(struct nvmet_port *nport) { struct nvmet_rdma_port *port; __kernel_sa_family_t af; int ret; port = kzalloc(sizeof(*port), GFP_KERNEL); if (!port) return -ENOMEM; nport->priv = port; port->nport = nport; INIT_DELAYED_WORK(&port->repair_work, nvmet_rdma_repair_port_work); switch (nport->disc_addr.adrfam) { case NVMF_ADDR_FAMILY_IP4: af = AF_INET; break; case NVMF_ADDR_FAMILY_IP6: af = AF_INET6; break; default: pr_err("address family %d not supported\n", nport->disc_addr.adrfam); ret = -EINVAL; goto out_free_port; } if (nport->inline_data_size < 0) { nport->inline_data_size = NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE; } else if (nport->inline_data_size > NVMET_RDMA_MAX_INLINE_DATA_SIZE) { pr_warn("inline_data_size %u is too large, reducing to %u\n", nport->inline_data_size, NVMET_RDMA_MAX_INLINE_DATA_SIZE); nport->inline_data_size = NVMET_RDMA_MAX_INLINE_DATA_SIZE; } if (nport->max_queue_size < 0) { nport->max_queue_size = NVME_RDMA_DEFAULT_QUEUE_SIZE; } else if (nport->max_queue_size > NVME_RDMA_MAX_QUEUE_SIZE) { pr_warn("max_queue_size %u is too large, reducing to %u\n", nport->max_queue_size, NVME_RDMA_MAX_QUEUE_SIZE); nport->max_queue_size = NVME_RDMA_MAX_QUEUE_SIZE; } ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr, nport->disc_addr.trsvcid, &port->addr); if (ret) { pr_err("malformed ip/port passed: %s:%s\n", nport->disc_addr.traddr, nport->disc_addr.trsvcid); goto out_free_port; } ret = nvmet_rdma_enable_port(port); if (ret) goto out_free_port; pr_info("enabling port %d (%pISpcs)\n", le16_to_cpu(nport->disc_addr.portid), (struct sockaddr *)&port->addr); return 0; out_free_port: kfree(port); return ret; } static void nvmet_rdma_remove_port(struct nvmet_port *nport) { struct nvmet_rdma_port *port = nport->priv; cancel_delayed_work_sync(&port->repair_work); nvmet_rdma_disable_port(port); kfree(port); } static void nvmet_rdma_disc_port_addr(struct nvmet_req *req, struct nvmet_port *nport, char *traddr) { struct nvmet_rdma_port *port = nport->priv; struct rdma_cm_id *cm_id = port->cm_id; if (inet_addr_is_any((struct sockaddr *)&cm_id->route.addr.src_addr)) { struct nvmet_rdma_rsp *rsp = container_of(req, struct nvmet_rdma_rsp, req); struct rdma_cm_id *req_cm_id = rsp->queue->cm_id; struct sockaddr *addr = (void *)&req_cm_id->route.addr.src_addr; sprintf(traddr, "%pISc", addr); } else { memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE); } } static ssize_t nvmet_rdma_host_port_addr(struct nvmet_ctrl *ctrl, char *traddr, size_t traddr_len) { struct nvmet_sq *nvme_sq = ctrl->sqs[0]; struct nvmet_rdma_queue *queue = container_of(nvme_sq, struct nvmet_rdma_queue, nvme_sq); return snprintf(traddr, traddr_len, "%pISc", (struct sockaddr *)&queue->cm_id->route.addr.dst_addr); } static u8 nvmet_rdma_get_mdts(const struct nvmet_ctrl *ctrl) { if (ctrl->pi_support) return NVMET_RDMA_MAX_METADATA_MDTS; return NVMET_RDMA_MAX_MDTS; } static u16 nvmet_rdma_get_max_queue_size(const struct nvmet_ctrl *ctrl) { if (ctrl->pi_support) return NVME_RDMA_MAX_METADATA_QUEUE_SIZE; return NVME_RDMA_MAX_QUEUE_SIZE; } static const struct nvmet_fabrics_ops nvmet_rdma_ops = { .owner = THIS_MODULE, .type = NVMF_TRTYPE_RDMA, .msdbd = 1, .flags = NVMF_KEYED_SGLS | NVMF_METADATA_SUPPORTED, .add_port = nvmet_rdma_add_port, .remove_port = nvmet_rdma_remove_port, .queue_response = nvmet_rdma_queue_response, .delete_ctrl = nvmet_rdma_delete_ctrl, .disc_traddr = nvmet_rdma_disc_port_addr, .host_traddr = nvmet_rdma_host_port_addr, .get_mdts = nvmet_rdma_get_mdts, .get_max_queue_size = nvmet_rdma_get_max_queue_size, }; static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data) { struct nvmet_rdma_queue *queue, *tmp; struct nvmet_rdma_device *ndev; bool found = false; mutex_lock(&device_list_mutex); list_for_each_entry(ndev, &device_list, entry) { if (ndev->device == ib_device) { found = true; break; } } mutex_unlock(&device_list_mutex); if (!found) return; /* * IB Device that is used by nvmet controllers is being removed, * delete all queues using this device. */ mutex_lock(&nvmet_rdma_queue_mutex); list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list, queue_list) { if (queue->dev->device != ib_device) continue; pr_info("Removing queue %d\n", queue->idx); list_del_init(&queue->queue_list); __nvmet_rdma_queue_disconnect(queue); } mutex_unlock(&nvmet_rdma_queue_mutex); flush_workqueue(nvmet_wq); } static struct ib_client nvmet_rdma_ib_client = { .name = "nvmet_rdma", .remove = nvmet_rdma_remove_one }; static int __init nvmet_rdma_init(void) { int ret; ret = ib_register_client(&nvmet_rdma_ib_client); if (ret) return ret; ret = nvmet_register_transport(&nvmet_rdma_ops); if (ret) goto err_ib_client; return 0; err_ib_client: ib_unregister_client(&nvmet_rdma_ib_client); return ret; } static void __exit nvmet_rdma_exit(void) { nvmet_unregister_transport(&nvmet_rdma_ops); ib_unregister_client(&nvmet_rdma_ib_client); WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list)); ida_destroy(&nvmet_rdma_queue_ida); } module_init(nvmet_rdma_init); module_exit(nvmet_rdma_exit); MODULE_DESCRIPTION("NVMe target RDMA transport driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("nvmet-transport-1"); /* 1 == NVMF_TRTYPE_RDMA */
18 8 8 18 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 // SPDX-License-Identifier: GPL-2.0+ /* * comedi_usb.c * Comedi USB driver specific functions. * * COMEDI - Linux Control and Measurement Device Interface * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org> */ #include <linux/module.h> #include <linux/comedi/comedi_usb.h> /** * comedi_to_usb_interface() - Return USB interface attached to COMEDI device * @dev: COMEDI device. * * Assuming @dev->hw_dev is non-%NULL, it is assumed to be pointing to a * a &struct device embedded in a &struct usb_interface. * * Return: Attached USB interface if @dev->hw_dev is non-%NULL. * Return %NULL if @dev->hw_dev is %NULL. */ struct usb_interface *comedi_to_usb_interface(struct comedi_device *dev) { return dev->hw_dev ? to_usb_interface(dev->hw_dev) : NULL; } EXPORT_SYMBOL_GPL(comedi_to_usb_interface); /** * comedi_to_usb_dev() - Return USB device attached to COMEDI device * @dev: COMEDI device. * * Assuming @dev->hw_dev is non-%NULL, it is assumed to be pointing to a * a &struct device embedded in a &struct usb_interface. * * Return: USB device to which the USB interface belongs if @dev->hw_dev is * non-%NULL. Return %NULL if @dev->hw_dev is %NULL. */ struct usb_device *comedi_to_usb_dev(struct comedi_device *dev) { struct usb_interface *intf = comedi_to_usb_interface(dev); return intf ? interface_to_usbdev(intf) : NULL; } EXPORT_SYMBOL_GPL(comedi_to_usb_dev); /** * comedi_usb_auto_config() - Configure/probe a USB COMEDI driver * @intf: USB interface. * @driver: Registered COMEDI driver. * @context: Driver specific data, passed to comedi_auto_config(). * * Typically called from the usb_driver (*probe) function. Auto-configure a * COMEDI device, using a pointer to the &struct device embedded in *@intf as * the hardware device. The @context value gets passed through to @driver's * "auto_attach" handler. The "auto_attach" handler may call * comedi_to_usb_interface() on the passed in COMEDI device to recover @intf. * * Return: The result of calling comedi_auto_config() (%0 on success, or * a negative error number on failure). */ int comedi_usb_auto_config(struct usb_interface *intf, struct comedi_driver *driver, unsigned long context) { return comedi_auto_config(&intf->dev, driver, context); } EXPORT_SYMBOL_GPL(comedi_usb_auto_config); /** * comedi_usb_auto_unconfig() - Unconfigure/disconnect a USB COMEDI device * @intf: USB interface. * * Typically called from the usb_driver (*disconnect) function. * Auto-unconfigure a COMEDI device attached to this USB interface, using a * pointer to the &struct device embedded in *@intf as the hardware device. * The COMEDI driver's "detach" handler will be called during unconfiguration * of the COMEDI device. * * Note that the COMEDI device may have already been unconfigured using the * %COMEDI_DEVCONFIG ioctl, in which case this attempt to unconfigure it * again should be ignored. */ void comedi_usb_auto_unconfig(struct usb_interface *intf) { comedi_auto_unconfig(&intf->dev); } EXPORT_SYMBOL_GPL(comedi_usb_auto_unconfig); /** * comedi_usb_driver_register() - Register a USB COMEDI driver * @comedi_driver: COMEDI driver to be registered. * @usb_driver: USB driver to be registered. * * This function is called from the module_init() of USB COMEDI driver modules * to register the COMEDI driver and the USB driver. Do not call it directly, * use the module_comedi_usb_driver() helper macro instead. * * Return: %0 on success, or a negative error number on failure. */ int comedi_usb_driver_register(struct comedi_driver *comedi_driver, struct usb_driver *usb_driver) { int ret; ret = comedi_driver_register(comedi_driver); if (ret < 0) return ret; ret = usb_register(usb_driver); if (ret < 0) { comedi_driver_unregister(comedi_driver); return ret; } return 0; } EXPORT_SYMBOL_GPL(comedi_usb_driver_register); /** * comedi_usb_driver_unregister() - Unregister a USB COMEDI driver * @comedi_driver: COMEDI driver to be registered. * @usb_driver: USB driver to be registered. * * This function is called from the module_exit() of USB COMEDI driver modules * to unregister the USB driver and the COMEDI driver. Do not call it * directly, use the module_comedi_usb_driver() helper macro instead. */ void comedi_usb_driver_unregister(struct comedi_driver *comedi_driver, struct usb_driver *usb_driver) { usb_deregister(usb_driver); comedi_driver_unregister(comedi_driver); } EXPORT_SYMBOL_GPL(comedi_usb_driver_unregister); static int __init comedi_usb_init(void) { return 0; } module_init(comedi_usb_init); static void __exit comedi_usb_exit(void) { } module_exit(comedi_usb_exit); MODULE_AUTHOR("https://www.comedi.org"); MODULE_DESCRIPTION("Comedi USB interface module"); MODULE_LICENSE("GPL");
1 1 1 1 1 1 1 7 1 2 2 2 2 1 7 5 7 7 7 7 1 1 4 4 4 1 1 1 1 4 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 // SPDX-License-Identifier: GPL-2.0-only /* * Overlayfs NFS export support. * * Amir Goldstein <amir73il@gmail.com> * * Copyright (C) 2017-2018 CTERA Networks. All Rights Reserved. */ #include <linux/fs.h> #include <linux/cred.h> #include <linux/mount.h> #include <linux/namei.h> #include <linux/xattr.h> #include <linux/exportfs.h> #include <linux/ratelimit.h> #include "overlayfs.h" static int ovl_encode_maybe_copy_up(struct dentry *dentry) { int err; if (ovl_dentry_upper(dentry)) return 0; err = ovl_copy_up(dentry); if (err) { pr_warn_ratelimited("failed to copy up on encode (%pd2, err=%i)\n", dentry, err); } return err; } /* * Before encoding a non-upper directory file handle from real layer N, we need * to check if it will be possible to reconnect an overlay dentry from the real * lower decoded dentry. This is done by following the overlay ancestry up to a * "layer N connected" ancestor and verifying that all parents along the way are * "layer N connectable". If an ancestor that is NOT "layer N connectable" is * found, we need to copy up an ancestor, which is "layer N connectable", thus * making that ancestor "layer N connected". For example: * * layer 1: /a * layer 2: /a/b/c * * The overlay dentry /a is NOT "layer 2 connectable", because if dir /a is * copied up and renamed, upper dir /a will be indexed by lower dir /a from * layer 1. The dir /a from layer 2 will never be indexed, so the algorithm (*) * in ovl_lookup_real_ancestor() will not be able to lookup a connected overlay * dentry from the connected lower dentry /a/b/c. * * To avoid this problem on decode time, we need to copy up an ancestor of * /a/b/c, which is "layer 2 connectable", on encode time. That ancestor is * /a/b. After copy up (and index) of /a/b, it will become "layer 2 connected" * and when the time comes to decode the file handle from lower dentry /a/b/c, * ovl_lookup_real_ancestor() will find the indexed ancestor /a/b and decoding * a connected overlay dentry will be accomplished. * * (*) the algorithm in ovl_lookup_real_ancestor() can be improved to lookup an * entry /a in the lower layers above layer N and find the indexed dir /a from * layer 1. If that improvement is made, then the check for "layer N connected" * will need to verify there are no redirects in lower layers above N. In the * example above, /a will be "layer 2 connectable". However, if layer 2 dir /a * is a target of a layer 1 redirect, then /a will NOT be "layer 2 connectable": * * layer 1: /A (redirect = /a) * layer 2: /a/b/c */ /* Return the lowest layer for encoding a connectable file handle */ static int ovl_connectable_layer(struct dentry *dentry) { struct ovl_entry *oe = OVL_E(dentry); /* We can get overlay root from root of any layer */ if (dentry == dentry->d_sb->s_root) return ovl_numlower(oe); /* * If it's an unindexed merge dir, then it's not connectable with any * lower layer */ if (ovl_dentry_upper(dentry) && !ovl_test_flag(OVL_INDEX, d_inode(dentry))) return 0; /* We can get upper/overlay path from indexed/lower dentry */ return ovl_lowerstack(oe)->layer->idx; } /* * @dentry is "connected" if all ancestors up to root or a "connected" ancestor * have the same uppermost lower layer as the origin's layer. We may need to * copy up a "connectable" ancestor to make it "connected". A "connected" dentry * cannot become non "connected", so cache positive result in dentry flags. * * Return the connected origin layer or < 0 on error. */ static int ovl_connect_layer(struct dentry *dentry) { struct dentry *next, *parent = NULL; struct ovl_entry *oe = OVL_E(dentry); int origin_layer; int err = 0; if (WARN_ON(dentry == dentry->d_sb->s_root) || WARN_ON(!ovl_dentry_lower(dentry))) return -EIO; origin_layer = ovl_lowerstack(oe)->layer->idx; if (ovl_dentry_test_flag(OVL_E_CONNECTED, dentry)) return origin_layer; /* Find the topmost origin layer connectable ancestor of @dentry */ next = dget(dentry); for (;;) { parent = dget_parent(next); if (WARN_ON(parent == next)) { err = -EIO; break; } /* * If @parent is not origin layer connectable, then copy up * @next which is origin layer connectable and we are done. */ if (ovl_connectable_layer(parent) < origin_layer) { err = ovl_encode_maybe_copy_up(next); break; } /* If @parent is connected or indexed we are done */ if (ovl_dentry_test_flag(OVL_E_CONNECTED, parent) || ovl_test_flag(OVL_INDEX, d_inode(parent))) break; dput(next); next = parent; } dput(parent); dput(next); if (!err) ovl_dentry_set_flag(OVL_E_CONNECTED, dentry); return err ?: origin_layer; } /* * We only need to encode origin if there is a chance that the same object was * encoded pre copy up and then we need to stay consistent with the same * encoding also after copy up. If non-pure upper is not indexed, then it was * copied up before NFS export was enabled. In that case we don't need to worry * about staying consistent with pre copy up encoding and we encode an upper * file handle. Overlay root dentry is a private case of non-indexed upper. * * The following table summarizes the different file handle encodings used for * different overlay object types: * * Object type | Encoding * -------------------------------- * Pure upper | U * Non-indexed upper | U * Indexed upper | L (*) * Non-upper | L (*) * * U = upper file handle * L = lower file handle * * (*) Decoding a connected overlay dir from real lower dentry is not always * possible when there are redirects in lower layers and non-indexed merge dirs. * To mitigate those case, we may copy up the lower dir ancestor before encode * of a decodable file handle for non-upper dir. * * Return 0 for upper file handle, > 0 for lower file handle or < 0 on error. */ static int ovl_check_encode_origin(struct inode *inode) { struct ovl_fs *ofs = OVL_FS(inode->i_sb); bool decodable = ofs->config.nfs_export; struct dentry *dentry; int err; /* No upper layer? */ if (!ovl_upper_mnt(ofs)) return 1; /* Lower file handle for non-upper non-decodable */ if (!ovl_inode_upper(inode) && !decodable) return 1; /* Upper file handle for pure upper */ if (!ovl_inode_lower(inode)) return 0; /* * Root is never indexed, so if there's an upper layer, encode upper for * root. */ if (inode == d_inode(inode->i_sb->s_root)) return 0; /* * Upper decodable file handle for non-indexed upper. */ if (ovl_inode_upper(inode) && decodable && !ovl_test_flag(OVL_INDEX, inode)) return 0; /* * Decoding a merge dir, whose origin's ancestor is under a redirected * lower dir or under a non-indexed upper is not always possible. * ovl_connect_layer() will try to make origin's layer "connected" by * copying up a "connectable" ancestor. */ if (!decodable || !S_ISDIR(inode->i_mode)) return 1; dentry = d_find_any_alias(inode); if (!dentry) return -ENOENT; err = ovl_connect_layer(dentry); dput(dentry); if (err < 0) return err; /* Lower file handle for indexed and non-upper dir/non-dir */ return 1; } static int ovl_dentry_to_fid(struct ovl_fs *ofs, struct inode *inode, u32 *fid, int buflen) { struct ovl_fh *fh = NULL; int err, enc_lower; int len; /* * Check if we should encode a lower or upper file handle and maybe * copy up an ancestor to make lower file handle connectable. */ err = enc_lower = ovl_check_encode_origin(inode); if (enc_lower < 0) goto fail; /* Encode an upper or lower file handle */ fh = ovl_encode_real_fh(ofs, enc_lower ? ovl_inode_lower(inode) : ovl_inode_upper(inode), !enc_lower); if (IS_ERR(fh)) return PTR_ERR(fh); len = OVL_FH_LEN(fh); if (len <= buflen) memcpy(fid, fh, len); err = len; out: kfree(fh); return err; fail: pr_warn_ratelimited("failed to encode file handle (ino=%lu, err=%i)\n", inode->i_ino, err); goto out; } static int ovl_encode_fh(struct inode *inode, u32 *fid, int *max_len, struct inode *parent) { struct ovl_fs *ofs = OVL_FS(inode->i_sb); int bytes, buflen = *max_len << 2; /* TODO: encode connectable file handles */ if (parent) return FILEID_INVALID; bytes = ovl_dentry_to_fid(ofs, inode, fid, buflen); if (bytes <= 0) return FILEID_INVALID; *max_len = bytes >> 2; if (bytes > buflen) return FILEID_INVALID; return OVL_FILEID_V1; } /* * Find or instantiate an overlay dentry from real dentries and index. */ static struct dentry *ovl_obtain_alias(struct super_block *sb, struct dentry *upper_alias, struct ovl_path *lowerpath, struct dentry *index) { struct dentry *lower = lowerpath ? lowerpath->dentry : NULL; struct dentry *upper = upper_alias ?: index; struct inode *inode = NULL; struct ovl_entry *oe; struct ovl_inode_params oip = { .index = index, }; /* We get overlay directory dentries with ovl_lookup_real() */ if (d_is_dir(upper ?: lower)) return ERR_PTR(-EIO); oe = ovl_alloc_entry(!!lower); if (!oe) return ERR_PTR(-ENOMEM); oip.upperdentry = dget(upper); if (lower) { ovl_lowerstack(oe)->dentry = dget(lower); ovl_lowerstack(oe)->layer = lowerpath->layer; } oip.oe = oe; inode = ovl_get_inode(sb, &oip); if (IS_ERR(inode)) { ovl_free_entry(oe); dput(upper); return ERR_CAST(inode); } if (upper) ovl_set_flag(OVL_UPPERDATA, inode); return d_obtain_alias(inode); } /* Get the upper or lower dentry in stack whose on layer @idx */ static struct dentry *ovl_dentry_real_at(struct dentry *dentry, int idx) { struct ovl_entry *oe = OVL_E(dentry); struct ovl_path *lowerstack = ovl_lowerstack(oe); int i; if (!idx) return ovl_dentry_upper(dentry); for (i = 0; i < ovl_numlower(oe); i++) { if (lowerstack[i].layer->idx == idx) return lowerstack[i].dentry; } return NULL; } /* * Lookup a child overlay dentry to get a connected overlay dentry whose real * dentry is @real. If @real is on upper layer, we lookup a child overlay * dentry with the same name as the real dentry. Otherwise, we need to consult * index for lookup. */ static struct dentry *ovl_lookup_real_one(struct dentry *connected, struct dentry *real, const struct ovl_layer *layer) { struct inode *dir = d_inode(connected); struct dentry *this, *parent = NULL; struct name_snapshot name; int err; /* * Lookup child overlay dentry by real name. The dir mutex protects us * from racing with overlay rename. If the overlay dentry that is above * real has already been moved to a parent that is not under the * connected overlay dir, we return -ECHILD and restart the lookup of * connected real path from the top. */ inode_lock_nested(dir, I_MUTEX_PARENT); err = -ECHILD; parent = dget_parent(real); if (ovl_dentry_real_at(connected, layer->idx) != parent) goto fail; /* * We also need to take a snapshot of real dentry name to protect us * from racing with underlying layer rename. In this case, we don't * care about returning ESTALE, only from dereferencing a free name * pointer because we hold no lock on the real dentry. */ take_dentry_name_snapshot(&name, real); /* * No idmap handling here: it's an internal lookup. Could skip * permission checking altogether, but for now just use non-idmap * transformed ids. */ this = lookup_one_len(name.name.name, connected, name.name.len); release_dentry_name_snapshot(&name); err = PTR_ERR(this); if (IS_ERR(this)) { goto fail; } else if (!this || !this->d_inode) { dput(this); err = -ENOENT; goto fail; } else if (ovl_dentry_real_at(this, layer->idx) != real) { dput(this); err = -ESTALE; goto fail; } out: dput(parent); inode_unlock(dir); return this; fail: pr_warn_ratelimited("failed to lookup one by real (%pd2, layer=%d, connected=%pd2, err=%i)\n", real, layer->idx, connected, err); this = ERR_PTR(err); goto out; } static struct dentry *ovl_lookup_real(struct super_block *sb, struct dentry *real, const struct ovl_layer *layer); /* * Lookup an indexed or hashed overlay dentry by real inode. */ static struct dentry *ovl_lookup_real_inode(struct super_block *sb, struct dentry *real, const struct ovl_layer *layer) { struct ovl_fs *ofs = OVL_FS(sb); struct dentry *index = NULL; struct dentry *this = NULL; struct inode *inode; /* * Decoding upper dir from index is expensive, so first try to lookup * overlay dentry in inode/dcache. */ inode = ovl_lookup_inode(sb, real, !layer->idx); if (IS_ERR(inode)) return ERR_CAST(inode); if (inode) { this = d_find_any_alias(inode); iput(inode); } /* * For decoded lower dir file handle, lookup index by origin to check * if lower dir was copied up and and/or removed. */ if (!this && layer->idx && ovl_indexdir(sb) && !WARN_ON(!d_is_dir(real))) { index = ovl_lookup_index(ofs, NULL, real, false); if (IS_ERR(index)) return index; } /* Get connected upper overlay dir from index */ if (index) { struct dentry *upper = ovl_index_upper(ofs, index, true); dput(index); if (IS_ERR_OR_NULL(upper)) return upper; /* * ovl_lookup_real() in lower layer may call recursively once to * ovl_lookup_real() in upper layer. The first level call walks * back lower parents to the topmost indexed parent. The second * recursive call walks back from indexed upper to the topmost * connected/hashed upper parent (or up to root). */ this = ovl_lookup_real(sb, upper, &ofs->layers[0]); dput(upper); } if (IS_ERR_OR_NULL(this)) return this; if (ovl_dentry_real_at(this, layer->idx) != real) { dput(this); this = ERR_PTR(-EIO); } return this; } /* * Lookup an indexed or hashed overlay dentry, whose real dentry is an * ancestor of @real. */ static struct dentry *ovl_lookup_real_ancestor(struct super_block *sb, struct dentry *real, const struct ovl_layer *layer) { struct dentry *next, *parent = NULL; struct dentry *ancestor = ERR_PTR(-EIO); if (real == layer->mnt->mnt_root) return dget(sb->s_root); /* Find the topmost indexed or hashed ancestor */ next = dget(real); for (;;) { parent = dget_parent(next); /* * Lookup a matching overlay dentry in inode/dentry * cache or in index by real inode. */ ancestor = ovl_lookup_real_inode(sb, next, layer); if (ancestor) break; if (parent == layer->mnt->mnt_root) { ancestor = dget(sb->s_root); break; } /* * If @real has been moved out of the layer root directory, * we will eventully hit the real fs root. This cannot happen * by legit overlay rename, so we return error in that case. */ if (parent == next) { ancestor = ERR_PTR(-EXDEV); break; } dput(next); next = parent; } dput(parent); dput(next); return ancestor; } /* * Lookup a connected overlay dentry whose real dentry is @real. * If @real is on upper layer, we lookup a child overlay dentry with the same * path the real dentry. Otherwise, we need to consult index for lookup. */ static struct dentry *ovl_lookup_real(struct super_block *sb, struct dentry *real, const struct ovl_layer *layer) { struct dentry *connected; int err = 0; connected = ovl_lookup_real_ancestor(sb, real, layer); if (IS_ERR(connected)) return connected; while (!err) { struct dentry *next, *this; struct dentry *parent = NULL; struct dentry *real_connected = ovl_dentry_real_at(connected, layer->idx); if (real_connected == real) break; /* Find the topmost dentry not yet connected */ next = dget(real); for (;;) { parent = dget_parent(next); if (parent == real_connected) break; /* * If real has been moved out of 'real_connected', * we will not find 'real_connected' and hit the layer * root. In that case, we need to restart connecting. * This game can go on forever in the worst case. We * may want to consider taking s_vfs_rename_mutex if * this happens more than once. */ if (parent == layer->mnt->mnt_root) { dput(connected); connected = dget(sb->s_root); break; } /* * If real file has been moved out of the layer root * directory, we will eventully hit the real fs root. * This cannot happen by legit overlay rename, so we * return error in that case. */ if (parent == next) { err = -EXDEV; break; } dput(next); next = parent; } if (!err) { this = ovl_lookup_real_one(connected, next, layer); if (IS_ERR(this)) err = PTR_ERR(this); /* * Lookup of child in overlay can fail when racing with * overlay rename of child away from 'connected' parent. * In this case, we need to restart the lookup from the * top, because we cannot trust that 'real_connected' is * still an ancestor of 'real'. There is a good chance * that the renamed overlay ancestor is now in cache, so * ovl_lookup_real_ancestor() will find it and we can * continue to connect exactly from where lookup failed. */ if (err == -ECHILD) { this = ovl_lookup_real_ancestor(sb, real, layer); err = PTR_ERR_OR_ZERO(this); } if (!err) { dput(connected); connected = this; } } dput(parent); dput(next); } if (err) goto fail; return connected; fail: pr_warn_ratelimited("failed to lookup by real (%pd2, layer=%d, connected=%pd2, err=%i)\n", real, layer->idx, connected, err); dput(connected); return ERR_PTR(err); } /* * Get an overlay dentry from upper/lower real dentries and index. */ static struct dentry *ovl_get_dentry(struct super_block *sb, struct dentry *upper, struct ovl_path *lowerpath, struct dentry *index) { struct ovl_fs *ofs = OVL_FS(sb); const struct ovl_layer *layer = upper ? &ofs->layers[0] : lowerpath->layer; struct dentry *real = upper ?: (index ?: lowerpath->dentry); /* * Obtain a disconnected overlay dentry from a non-dir real dentry * and index. */ if (!d_is_dir(real)) return ovl_obtain_alias(sb, upper, lowerpath, index); /* Removed empty directory? */ if ((real->d_flags & DCACHE_DISCONNECTED) || d_unhashed(real)) return ERR_PTR(-ENOENT); /* * If real dentry is connected and hashed, get a connected overlay * dentry whose real dentry is @real. */ return ovl_lookup_real(sb, real, layer); } static struct dentry *ovl_upper_fh_to_d(struct super_block *sb, struct ovl_fh *fh) { struct ovl_fs *ofs = OVL_FS(sb); struct dentry *dentry; struct dentry *upper; if (!ovl_upper_mnt(ofs)) return ERR_PTR(-EACCES); upper = ovl_decode_real_fh(ofs, fh, ovl_upper_mnt(ofs), true); if (IS_ERR_OR_NULL(upper)) return upper; dentry = ovl_get_dentry(sb, upper, NULL, NULL); dput(upper); return dentry; } static struct dentry *ovl_lower_fh_to_d(struct super_block *sb, struct ovl_fh *fh) { struct ovl_fs *ofs = OVL_FS(sb); struct ovl_path origin = { }; struct ovl_path *stack = &origin; struct dentry *dentry = NULL; struct dentry *index = NULL; struct inode *inode; int err; /* First lookup overlay inode in inode cache by origin fh */ err = ovl_check_origin_fh(ofs, fh, false, NULL, &stack); if (err) return ERR_PTR(err); if (!d_is_dir(origin.dentry) || !(origin.dentry->d_flags & DCACHE_DISCONNECTED)) { inode = ovl_lookup_inode(sb, origin.dentry, false); err = PTR_ERR(inode); if (IS_ERR(inode)) goto out_err; if (inode) { dentry = d_find_any_alias(inode); iput(inode); if (dentry) goto out; } } /* Then lookup indexed upper/whiteout by origin fh */ if (ovl_indexdir(sb)) { index = ovl_get_index_fh(ofs, fh); err = PTR_ERR(index); if (IS_ERR(index)) { index = NULL; goto out_err; } } /* Then try to get a connected upper dir by index */ if (index && d_is_dir(index)) { struct dentry *upper = ovl_index_upper(ofs, index, true); err = PTR_ERR(upper); if (IS_ERR_OR_NULL(upper)) goto out_err; dentry = ovl_get_dentry(sb, upper, NULL, NULL); dput(upper); goto out; } /* Find origin.dentry again with ovl_acceptable() layer check */ if (d_is_dir(origin.dentry)) { dput(origin.dentry); origin.dentry = NULL; err = ovl_check_origin_fh(ofs, fh, true, NULL, &stack); if (err) goto out_err; } if (index) { err = ovl_verify_origin(ofs, index, origin.dentry, false); if (err) goto out_err; } /* Get a connected non-upper dir or disconnected non-dir */ dentry = ovl_get_dentry(sb, NULL, &origin, index); out: dput(origin.dentry); dput(index); return dentry; out_err: dentry = ERR_PTR(err); goto out; } static struct ovl_fh *ovl_fid_to_fh(struct fid *fid, int buflen, int fh_type) { struct ovl_fh *fh; /* If on-wire inner fid is aligned - nothing to do */ if (fh_type == OVL_FILEID_V1) return (struct ovl_fh *)fid; if (fh_type != OVL_FILEID_V0) return ERR_PTR(-EINVAL); if (buflen <= OVL_FH_WIRE_OFFSET) return ERR_PTR(-EINVAL); fh = kzalloc(buflen, GFP_KERNEL); if (!fh) return ERR_PTR(-ENOMEM); /* Copy unaligned inner fh into aligned buffer */ memcpy(fh->buf, fid, buflen - OVL_FH_WIRE_OFFSET); return fh; } static struct dentry *ovl_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { struct dentry *dentry = NULL; struct ovl_fh *fh = NULL; int len = fh_len << 2; unsigned int flags = 0; int err; fh = ovl_fid_to_fh(fid, len, fh_type); err = PTR_ERR(fh); if (IS_ERR(fh)) goto out_err; err = ovl_check_fh_len(fh, len); if (err) goto out_err; flags = fh->fb.flags; dentry = (flags & OVL_FH_FLAG_PATH_UPPER) ? ovl_upper_fh_to_d(sb, fh) : ovl_lower_fh_to_d(sb, fh); err = PTR_ERR(dentry); if (IS_ERR(dentry) && err != -ESTALE) goto out_err; out: /* We may have needed to re-align OVL_FILEID_V0 */ if (!IS_ERR_OR_NULL(fh) && fh != (void *)fid) kfree(fh); return dentry; out_err: pr_warn_ratelimited("failed to decode file handle (len=%d, type=%d, flags=%x, err=%i)\n", fh_len, fh_type, flags, err); dentry = ERR_PTR(err); goto out; } static struct dentry *ovl_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { pr_warn_ratelimited("connectable file handles not supported; use 'no_subtree_check' exportfs option.\n"); return ERR_PTR(-EACCES); } static int ovl_get_name(struct dentry *parent, char *name, struct dentry *child) { /* * ovl_fh_to_dentry() returns connected dir overlay dentries and * ovl_fh_to_parent() is not implemented, so we should not get here. */ WARN_ON_ONCE(1); return -EIO; } static struct dentry *ovl_get_parent(struct dentry *dentry) { /* * ovl_fh_to_dentry() returns connected dir overlay dentries, so we * should not get here. */ WARN_ON_ONCE(1); return ERR_PTR(-EIO); } const struct export_operations ovl_export_operations = { .encode_fh = ovl_encode_fh, .fh_to_dentry = ovl_fh_to_dentry, .fh_to_parent = ovl_fh_to_parent, .get_name = ovl_get_name, .get_parent = ovl_get_parent, }; /* encode_fh() encodes non-decodable file handles with nfs_export=off */ const struct export_operations ovl_export_fid_operations = { .encode_fh = ovl_encode_fh, };
33452 87 241 241 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 /* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM x86_fpu #if !defined(_TRACE_FPU_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_FPU_H #include <linux/tracepoint.h> DECLARE_EVENT_CLASS(x86_fpu, TP_PROTO(struct fpu *fpu), TP_ARGS(fpu), TP_STRUCT__entry( __field(struct fpu *, fpu) __field(bool, load_fpu) __field(u64, xfeatures) __field(u64, xcomp_bv) ), TP_fast_assign( __entry->fpu = fpu; __entry->load_fpu = test_thread_flag(TIF_NEED_FPU_LOAD); if (boot_cpu_has(X86_FEATURE_OSXSAVE)) { __entry->xfeatures = fpu->fpstate->regs.xsave.header.xfeatures; __entry->xcomp_bv = fpu->fpstate->regs.xsave.header.xcomp_bv; } ), TP_printk("x86/fpu: %p load: %d xfeatures: %llx xcomp_bv: %llx", __entry->fpu, __entry->load_fpu, __entry->xfeatures, __entry->xcomp_bv ) ); DEFINE_EVENT(x86_fpu, x86_fpu_before_save, TP_PROTO(struct fpu *fpu), TP_ARGS(fpu) ); DEFINE_EVENT(x86_fpu, x86_fpu_after_save, TP_PROTO(struct fpu *fpu), TP_ARGS(fpu) ); DEFINE_EVENT(x86_fpu, x86_fpu_before_restore, TP_PROTO(struct fpu *fpu), TP_ARGS(fpu) ); DEFINE_EVENT(x86_fpu, x86_fpu_after_restore, TP_PROTO(struct fpu *fpu), TP_ARGS(fpu) ); DEFINE_EVENT(x86_fpu, x86_fpu_regs_activated, TP_PROTO(struct fpu *fpu), TP_ARGS(fpu) ); DEFINE_EVENT(x86_fpu, x86_fpu_regs_deactivated, TP_PROTO(struct fpu *fpu), TP_ARGS(fpu) ); DEFINE_EVENT(x86_fpu, x86_fpu_init_state, TP_PROTO(struct fpu *fpu), TP_ARGS(fpu) ); DEFINE_EVENT(x86_fpu, x86_fpu_dropped, TP_PROTO(struct fpu *fpu), TP_ARGS(fpu) ); DEFINE_EVENT(x86_fpu, x86_fpu_copy_src, TP_PROTO(struct fpu *fpu), TP_ARGS(fpu) ); DEFINE_EVENT(x86_fpu, x86_fpu_copy_dst, TP_PROTO(struct fpu *fpu), TP_ARGS(fpu) ); DEFINE_EVENT(x86_fpu, x86_fpu_xstate_check_failed, TP_PROTO(struct fpu *fpu), TP_ARGS(fpu) ); #undef TRACE_INCLUDE_PATH #define TRACE_INCLUDE_PATH asm/trace/ #undef TRACE_INCLUDE_FILE #define TRACE_INCLUDE_FILE fpu #endif /* _TRACE_FPU_H */ /* This part must be outside protection */ #include <trace/define_trace.h>
61 20 20 20 295 236 61 61 61 5 2 2 1 1 7 7 6 7 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 // SPDX-License-Identifier: GPL-2.0 #include "cgroup-internal.h" #include <linux/sched/task.h> #include <linux/slab.h> #include <linux/nsproxy.h> #include <linux/proc_ns.h> /* cgroup namespaces */ static struct ucounts *inc_cgroup_namespaces(struct user_namespace *ns) { return inc_ucount(ns, current_euid(), UCOUNT_CGROUP_NAMESPACES); } static void dec_cgroup_namespaces(struct ucounts *ucounts) { dec_ucount(ucounts, UCOUNT_CGROUP_NAMESPACES); } static struct cgroup_namespace *alloc_cgroup_ns(void) { struct cgroup_namespace *new_ns; int ret; new_ns = kzalloc(sizeof(struct cgroup_namespace), GFP_KERNEL_ACCOUNT); if (!new_ns) return ERR_PTR(-ENOMEM); ret = ns_alloc_inum(&new_ns->ns); if (ret) { kfree(new_ns); return ERR_PTR(ret); } refcount_set(&new_ns->ns.count, 1); new_ns->ns.ops = &cgroupns_operations; return new_ns; } void free_cgroup_ns(struct cgroup_namespace *ns) { put_css_set(ns->root_cset); dec_cgroup_namespaces(ns->ucounts); put_user_ns(ns->user_ns); ns_free_inum(&ns->ns); kfree(ns); } EXPORT_SYMBOL(free_cgroup_ns); struct cgroup_namespace *copy_cgroup_ns(unsigned long flags, struct user_namespace *user_ns, struct cgroup_namespace *old_ns) { struct cgroup_namespace *new_ns; struct ucounts *ucounts; struct css_set *cset; BUG_ON(!old_ns); if (!(flags & CLONE_NEWCGROUP)) { get_cgroup_ns(old_ns); return old_ns; } /* Allow only sysadmin to create cgroup namespace. */ if (!ns_capable(user_ns, CAP_SYS_ADMIN)) return ERR_PTR(-EPERM); ucounts = inc_cgroup_namespaces(user_ns); if (!ucounts) return ERR_PTR(-ENOSPC); /* It is not safe to take cgroup_mutex here */ spin_lock_irq(&css_set_lock); cset = task_css_set(current); get_css_set(cset); spin_unlock_irq(&css_set_lock); new_ns = alloc_cgroup_ns(); if (IS_ERR(new_ns)) { put_css_set(cset); dec_cgroup_namespaces(ucounts); return new_ns; } new_ns->user_ns = get_user_ns(user_ns); new_ns->ucounts = ucounts; new_ns->root_cset = cset; return new_ns; } static inline struct cgroup_namespace *to_cg_ns(struct ns_common *ns) { return container_of(ns, struct cgroup_namespace, ns); } static int cgroupns_install(struct nsset *nsset, struct ns_common *ns) { struct nsproxy *nsproxy = nsset->nsproxy; struct cgroup_namespace *cgroup_ns = to_cg_ns(ns); if (!ns_capable(nsset->cred->user_ns, CAP_SYS_ADMIN) || !ns_capable(cgroup_ns->user_ns, CAP_SYS_ADMIN)) return -EPERM; /* Don't need to do anything if we are attaching to our own cgroupns. */ if (cgroup_ns == nsproxy->cgroup_ns) return 0; get_cgroup_ns(cgroup_ns); put_cgroup_ns(nsproxy->cgroup_ns); nsproxy->cgroup_ns = cgroup_ns; return 0; } static struct ns_common *cgroupns_get(struct task_struct *task) { struct cgroup_namespace *ns = NULL; struct nsproxy *nsproxy; task_lock(task); nsproxy = task->nsproxy; if (nsproxy) { ns = nsproxy->cgroup_ns; get_cgroup_ns(ns); } task_unlock(task); return ns ? &ns->ns : NULL; } static void cgroupns_put(struct ns_common *ns) { put_cgroup_ns(to_cg_ns(ns)); } static struct user_namespace *cgroupns_owner(struct ns_common *ns) { return to_cg_ns(ns)->user_ns; } const struct proc_ns_operations cgroupns_operations = { .name = "cgroup", .type = CLONE_NEWCGROUP, .get = cgroupns_get, .put = cgroupns_put, .install = cgroupns_install, .owner = cgroupns_owner, };
3622 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Global definitions for the Ethernet IEEE 802.3 interface. * * Version: @(#)if_ether.h 1.0.1a 02/08/94 * * Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Donald Becker, <becker@super.org> * Alan Cox, <alan@lxorguk.ukuu.org.uk> * Steve Whitehouse, <gw7rrm@eeshack3.swan.ac.uk> */ #ifndef _LINUX_IF_ETHER_H #define _LINUX_IF_ETHER_H #include <linux/skbuff.h> #include <uapi/linux/if_ether.h> static inline struct ethhdr *eth_hdr(const struct sk_buff *skb) { return (struct ethhdr *)skb_mac_header(skb); } /* Prefer this version in TX path, instead of * skb_reset_mac_header() + eth_hdr() */ static inline struct ethhdr *skb_eth_hdr(const struct sk_buff *skb) { return (struct ethhdr *)skb->data; } static inline struct ethhdr *inner_eth_hdr(const struct sk_buff *skb) { return (struct ethhdr *)skb_inner_mac_header(skb); } int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr); extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len); #endif /* _LINUX_IF_ETHER_H */
3 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _BCACHEFS_ALLOC_BACKGROUND_H #define _BCACHEFS_ALLOC_BACKGROUND_H #include "bcachefs.h" #include "alloc_types.h" #include "buckets.h" #include "debug.h" #include "super.h" /* How out of date a pointer gen is allowed to be: */ #define BUCKET_GC_GEN_MAX 96U static inline bool bch2_dev_bucket_exists(struct bch_fs *c, struct bpos pos) { rcu_read_lock(); struct bch_dev *ca = bch2_dev_rcu_noerror(c, pos.inode); bool ret = ca && bucket_valid(ca, pos.offset); rcu_read_unlock(); return ret; } static inline u64 bucket_to_u64(struct bpos bucket) { return (bucket.inode << 48) | bucket.offset; } static inline struct bpos u64_to_bucket(u64 bucket) { return POS(bucket >> 48, bucket & ~(~0ULL << 48)); } static inline u8 alloc_gc_gen(struct bch_alloc_v4 a) { return a.gen - a.oldest_gen; } static inline void alloc_to_bucket(struct bucket *dst, struct bch_alloc_v4 src) { dst->gen = src.gen; dst->data_type = src.data_type; dst->stripe_sectors = src.stripe_sectors; dst->dirty_sectors = src.dirty_sectors; dst->cached_sectors = src.cached_sectors; dst->stripe = src.stripe; } static inline void __bucket_m_to_alloc(struct bch_alloc_v4 *dst, struct bucket src) { dst->gen = src.gen; dst->data_type = src.data_type; dst->stripe_sectors = src.stripe_sectors; dst->dirty_sectors = src.dirty_sectors; dst->cached_sectors = src.cached_sectors; dst->stripe = src.stripe; } static inline struct bch_alloc_v4 bucket_m_to_alloc(struct bucket b) { struct bch_alloc_v4 ret = {}; __bucket_m_to_alloc(&ret, b); return ret; } static inline enum bch_data_type bucket_data_type(enum bch_data_type data_type) { switch (data_type) { case BCH_DATA_cached: case BCH_DATA_stripe: return BCH_DATA_user; default: return data_type; } } static inline bool bucket_data_type_mismatch(enum bch_data_type bucket, enum bch_data_type ptr) { return !data_type_is_empty(bucket) && bucket_data_type(bucket) != bucket_data_type(ptr); } /* * It is my general preference to use unsigned types for unsigned quantities - * however, these helpers are used in disk accounting calculations run by * triggers where the output will be negated and added to an s64. unsigned is * right out even though all these quantities will fit in 32 bits, since it * won't be sign extended correctly; u64 will negate "correctly", but s64 is the * simpler option here. */ static inline s64 bch2_bucket_sectors_total(struct bch_alloc_v4 a) { return a.stripe_sectors + a.dirty_sectors + a.cached_sectors; } static inline s64 bch2_bucket_sectors_dirty(struct bch_alloc_v4 a) { return a.stripe_sectors + a.dirty_sectors; } static inline s64 bch2_bucket_sectors(struct bch_alloc_v4 a) { return a.data_type == BCH_DATA_cached ? a.cached_sectors : bch2_bucket_sectors_dirty(a); } static inline s64 bch2_bucket_sectors_fragmented(struct bch_dev *ca, struct bch_alloc_v4 a) { int d = bch2_bucket_sectors(a); return d ? max(0, ca->mi.bucket_size - d) : 0; } static inline s64 bch2_gc_bucket_sectors_fragmented(struct bch_dev *ca, struct bucket a) { int d = a.stripe_sectors + a.dirty_sectors; return d ? max(0, ca->mi.bucket_size - d) : 0; } static inline s64 bch2_bucket_sectors_unstriped(struct bch_alloc_v4 a) { return a.data_type == BCH_DATA_stripe ? a.dirty_sectors : 0; } static inline enum bch_data_type alloc_data_type(struct bch_alloc_v4 a, enum bch_data_type data_type) { if (a.stripe) return data_type == BCH_DATA_parity ? data_type : BCH_DATA_stripe; if (bch2_bucket_sectors_dirty(a)) return data_type; if (a.cached_sectors) return BCH_DATA_cached; if (BCH_ALLOC_V4_NEED_DISCARD(&a)) return BCH_DATA_need_discard; if (alloc_gc_gen(a) >= BUCKET_GC_GEN_MAX) return BCH_DATA_need_gc_gens; return BCH_DATA_free; } static inline void alloc_data_type_set(struct bch_alloc_v4 *a, enum bch_data_type data_type) { a->data_type = alloc_data_type(*a, data_type); } static inline u64 alloc_lru_idx_read(struct bch_alloc_v4 a) { return a.data_type == BCH_DATA_cached ? a.io_time[READ] & LRU_TIME_MAX : 0; } #define DATA_TYPES_MOVABLE \ ((1U << BCH_DATA_btree)| \ (1U << BCH_DATA_user)| \ (1U << BCH_DATA_stripe)) static inline bool data_type_movable(enum bch_data_type type) { return (1U << type) & DATA_TYPES_MOVABLE; } static inline u64 alloc_lru_idx_fragmentation(struct bch_alloc_v4 a, struct bch_dev *ca) { if (a.data_type >= BCH_DATA_NR) return 0; if (!data_type_movable(a.data_type) || !bch2_bucket_sectors_fragmented(ca, a)) return 0; /* * avoid overflowing LRU_TIME_BITS on a corrupted fs, when * bucket_sectors_dirty is (much) bigger than bucket_size */ u64 d = min_t(s64, bch2_bucket_sectors_dirty(a), ca->mi.bucket_size); return div_u64(d * (1ULL << 31), ca->mi.bucket_size); } static inline u64 alloc_freespace_genbits(struct bch_alloc_v4 a) { return ((u64) alloc_gc_gen(a) >> 4) << 56; } static inline struct bpos alloc_freespace_pos(struct bpos pos, struct bch_alloc_v4 a) { pos.offset |= alloc_freespace_genbits(a); return pos; } static inline unsigned alloc_v4_u64s_noerror(const struct bch_alloc_v4 *a) { return (BCH_ALLOC_V4_BACKPOINTERS_START(a) ?: BCH_ALLOC_V4_U64s_V0) + BCH_ALLOC_V4_NR_BACKPOINTERS(a) * (sizeof(struct bch_backpointer) / sizeof(u64)); } static inline unsigned alloc_v4_u64s(const struct bch_alloc_v4 *a) { unsigned ret = alloc_v4_u64s_noerror(a); BUG_ON(ret > U8_MAX - BKEY_U64s); return ret; } static inline void set_alloc_v4_u64s(struct bkey_i_alloc_v4 *a) { set_bkey_val_u64s(&a->k, alloc_v4_u64s(&a->v)); } struct bkey_i_alloc_v4 * bch2_trans_start_alloc_update_noupdate(struct btree_trans *, struct btree_iter *, struct bpos); struct bkey_i_alloc_v4 * bch2_trans_start_alloc_update(struct btree_trans *, struct bpos, enum btree_iter_update_trigger_flags); void __bch2_alloc_to_v4(struct bkey_s_c, struct bch_alloc_v4 *); static inline const struct bch_alloc_v4 *bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *convert) { const struct bch_alloc_v4 *ret; if (unlikely(k.k->type != KEY_TYPE_alloc_v4)) goto slowpath; ret = bkey_s_c_to_alloc_v4(k).v; if (BCH_ALLOC_V4_BACKPOINTERS_START(ret) != BCH_ALLOC_V4_U64s) goto slowpath; return ret; slowpath: __bch2_alloc_to_v4(k, convert); return convert; } struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut(struct btree_trans *, struct bkey_s_c); int bch2_bucket_io_time_reset(struct btree_trans *, unsigned, size_t, int); int bch2_alloc_v1_validate(struct bch_fs *, struct bkey_s_c, struct bkey_validate_context); int bch2_alloc_v2_validate(struct bch_fs *, struct bkey_s_c, struct bkey_validate_context); int bch2_alloc_v3_validate(struct bch_fs *, struct bkey_s_c, struct bkey_validate_context); int bch2_alloc_v4_validate(struct bch_fs *, struct bkey_s_c, struct bkey_validate_context); void bch2_alloc_v4_swab(struct bkey_s); void bch2_alloc_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); #define bch2_bkey_ops_alloc ((struct bkey_ops) { \ .key_validate = bch2_alloc_v1_validate, \ .val_to_text = bch2_alloc_to_text, \ .trigger = bch2_trigger_alloc, \ .min_val_size = 8, \ }) #define bch2_bkey_ops_alloc_v2 ((struct bkey_ops) { \ .key_validate = bch2_alloc_v2_validate, \ .val_to_text = bch2_alloc_to_text, \ .trigger = bch2_trigger_alloc, \ .min_val_size = 8, \ }) #define bch2_bkey_ops_alloc_v3 ((struct bkey_ops) { \ .key_validate = bch2_alloc_v3_validate, \ .val_to_text = bch2_alloc_to_text, \ .trigger = bch2_trigger_alloc, \ .min_val_size = 16, \ }) #define bch2_bkey_ops_alloc_v4 ((struct bkey_ops) { \ .key_validate = bch2_alloc_v4_validate, \ .val_to_text = bch2_alloc_to_text, \ .swab = bch2_alloc_v4_swab, \ .trigger = bch2_trigger_alloc, \ .min_val_size = 48, \ }) int bch2_bucket_gens_validate(struct bch_fs *, struct bkey_s_c, struct bkey_validate_context); void bch2_bucket_gens_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); #define bch2_bkey_ops_bucket_gens ((struct bkey_ops) { \ .key_validate = bch2_bucket_gens_validate, \ .val_to_text = bch2_bucket_gens_to_text, \ }) int bch2_bucket_gens_init(struct bch_fs *); static inline bool bkey_is_alloc(const struct bkey *k) { return k->type == KEY_TYPE_alloc || k->type == KEY_TYPE_alloc_v2 || k->type == KEY_TYPE_alloc_v3; } int bch2_alloc_read(struct bch_fs *); int bch2_alloc_key_to_dev_counters(struct btree_trans *, struct bch_dev *, const struct bch_alloc_v4 *, const struct bch_alloc_v4 *, unsigned); int bch2_trigger_alloc(struct btree_trans *, enum btree_id, unsigned, struct bkey_s_c, struct bkey_s, enum btree_iter_update_trigger_flags); int bch2_check_discard_freespace_key(struct btree_trans *, struct btree_iter *, u8 *, bool); int bch2_check_alloc_info(struct bch_fs *); int bch2_check_alloc_to_lru_refs(struct bch_fs *); void bch2_dev_do_discards(struct bch_dev *); void bch2_do_discards(struct bch_fs *); static inline u64 should_invalidate_buckets(struct bch_dev *ca, struct bch_dev_usage u) { u64 want_free = ca->mi.nbuckets >> 7; u64 free = max_t(s64, 0, u.d[BCH_DATA_free].buckets + u.d[BCH_DATA_need_discard].buckets - bch2_dev_buckets_reserved(ca, BCH_WATERMARK_stripe)); return clamp_t(s64, want_free - free, 0, u.d[BCH_DATA_cached].buckets); } void bch2_dev_do_invalidates(struct bch_dev *); void bch2_do_invalidates(struct bch_fs *); static inline struct bch_backpointer *alloc_v4_backpointers(struct bch_alloc_v4 *a) { return (void *) ((u64 *) &a->v + (BCH_ALLOC_V4_BACKPOINTERS_START(a) ?: BCH_ALLOC_V4_U64s_V0)); } static inline const struct bch_backpointer *alloc_v4_backpointers_c(const struct bch_alloc_v4 *a) { return (void *) ((u64 *) &a->v + BCH_ALLOC_V4_BACKPOINTERS_START(a)); } int bch2_dev_freespace_init(struct bch_fs *, struct bch_dev *, u64, u64); int bch2_fs_freespace_init(struct bch_fs *); int bch2_dev_remove_alloc(struct bch_fs *, struct bch_dev *); void bch2_recalc_capacity(struct bch_fs *); u64 bch2_min_rw_member_capacity(struct bch_fs *); void bch2_dev_allocator_remove(struct bch_fs *, struct bch_dev *); void bch2_dev_allocator_add(struct bch_fs *, struct bch_dev *); void bch2_dev_allocator_background_exit(struct bch_dev *); void bch2_dev_allocator_background_init(struct bch_dev *); void bch2_fs_allocator_background_init(struct bch_fs *); #endif /* _BCACHEFS_ALLOC_BACKGROUND_H */
4950 448 4343 1585 4055 3286 1 63 216 4691 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 // SPDX-License-Identifier: GPL-2.0-only /* * ratelimit.c - Do something with rate limit. * * Isolated from kernel/printk.c by Dave Young <hidave.darkstar@gmail.com> * * 2008-05-01 rewrite the function and use a ratelimit_state data struct as * parameter. Now every user can use their own standalone ratelimit_state. */ #include <linux/ratelimit.h> #include <linux/jiffies.h> #include <linux/export.h> /* * __ratelimit - rate limiting * @rs: ratelimit_state data * @func: name of calling function * * This enforces a rate limit: not more than @rs->burst callbacks * in every @rs->interval * * RETURNS: * 0 means callbacks will be suppressed. * 1 means go ahead and do it. */ int ___ratelimit(struct ratelimit_state *rs, const char *func) { /* Paired with WRITE_ONCE() in .proc_handler(). * Changing two values seperately could be inconsistent * and some message could be lost. (See: net_ratelimit_state). */ int interval = READ_ONCE(rs->interval); int burst = READ_ONCE(rs->burst); unsigned long flags; int ret; if (!interval) return 1; /* * If we contend on this state's lock then almost * by definition we are too busy to print a message, * in addition to the one that will be printed by * the entity that is holding the lock already: */ if (!raw_spin_trylock_irqsave(&rs->lock, flags)) return 0; if (!rs->begin) rs->begin = jiffies; if (time_is_before_jiffies(rs->begin + interval)) { if (rs->missed) { if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) { printk_deferred(KERN_WARNING "%s: %d callbacks suppressed\n", func, rs->missed); rs->missed = 0; } } rs->begin = jiffies; rs->printed = 0; } if (burst && burst > rs->printed) { rs->printed++; ret = 1; } else { rs->missed++; ret = 0; } raw_spin_unlock_irqrestore(&rs->lock, flags); return ret; } EXPORT_SYMBOL(___ratelimit);
3 2 1 3 85 82 21 83 1 6 87 81 58 72 1 83 80 24 1 1 80 23 7 11 11 10 1 34 34 105 105 104 21 21 58 57 58 58 53 52 54 54 6 6 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 // SPDX-License-Identifier: GPL-2.0-or-later /* * ALSA sequencer Memory Manager * Copyright (c) 1998 by Frank van de Pol <fvdpol@coil.demon.nl> * Jaroslav Kysela <perex@perex.cz> * 2000 by Takashi Iwai <tiwai@suse.de> */ #include <linux/init.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/sched/signal.h> #include <linux/mm.h> #include <sound/core.h> #include <sound/seq_kernel.h> #include "seq_memory.h" #include "seq_queue.h" #include "seq_info.h" #include "seq_lock.h" static inline int snd_seq_pool_available(struct snd_seq_pool *pool) { return pool->total_elements - atomic_read(&pool->counter); } static inline int snd_seq_output_ok(struct snd_seq_pool *pool) { return snd_seq_pool_available(pool) >= pool->room; } /* * Variable length event: * The event like sysex uses variable length type. * The external data may be stored in three different formats. * 1) kernel space * This is the normal case. * ext.data.len = length * ext.data.ptr = buffer pointer * 2) user space * When an event is generated via read(), the external data is * kept in user space until expanded. * ext.data.len = length | SNDRV_SEQ_EXT_USRPTR * ext.data.ptr = userspace pointer * 3) chained cells * When the variable length event is enqueued (in prioq or fifo), * the external data is decomposed to several cells. * ext.data.len = length | SNDRV_SEQ_EXT_CHAINED * ext.data.ptr = the additiona cell head * -> cell.next -> cell.next -> .. */ /* * exported: * call dump function to expand external data. */ static int get_var_len(const struct snd_seq_event *event) { if ((event->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE) return -EINVAL; return event->data.ext.len & ~SNDRV_SEQ_EXT_MASK; } static int dump_var_event(const struct snd_seq_event *event, snd_seq_dump_func_t func, void *private_data, int offset, int maxlen) { int len, err; struct snd_seq_event_cell *cell; len = get_var_len(event); if (len <= 0) return len; if (len <= offset) return 0; if (maxlen && len > offset + maxlen) len = offset + maxlen; if (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR) { char buf[32]; char __user *curptr = (char __force __user *)event->data.ext.ptr; curptr += offset; len -= offset; while (len > 0) { int size = sizeof(buf); if (len < size) size = len; if (copy_from_user(buf, curptr, size)) return -EFAULT; err = func(private_data, buf, size); if (err < 0) return err; curptr += size; len -= size; } return 0; } if (!(event->data.ext.len & SNDRV_SEQ_EXT_CHAINED)) return func(private_data, event->data.ext.ptr + offset, len - offset); cell = (struct snd_seq_event_cell *)event->data.ext.ptr; for (; len > 0 && cell; cell = cell->next) { int size = sizeof(struct snd_seq_event); char *curptr = (char *)&cell->event; if (offset >= size) { offset -= size; len -= size; continue; } if (len < size) size = len; err = func(private_data, curptr + offset, size - offset); if (err < 0) return err; offset = 0; len -= size; } return 0; } int snd_seq_dump_var_event(const struct snd_seq_event *event, snd_seq_dump_func_t func, void *private_data) { return dump_var_event(event, func, private_data, 0, 0); } EXPORT_SYMBOL(snd_seq_dump_var_event); /* * exported: * expand the variable length event to linear buffer space. */ static int seq_copy_in_kernel(void *ptr, void *src, int size) { char **bufptr = ptr; memcpy(*bufptr, src, size); *bufptr += size; return 0; } static int seq_copy_in_user(void *ptr, void *src, int size) { char __user **bufptr = ptr; if (copy_to_user(*bufptr, src, size)) return -EFAULT; *bufptr += size; return 0; } static int expand_var_event(const struct snd_seq_event *event, int offset, int size, char *buf, bool in_kernel) { if (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR) { if (! in_kernel) return -EINVAL; if (copy_from_user(buf, (char __force __user *)event->data.ext.ptr + offset, size)) return -EFAULT; return 0; } return dump_var_event(event, in_kernel ? seq_copy_in_kernel : seq_copy_in_user, &buf, offset, size); } int snd_seq_expand_var_event(const struct snd_seq_event *event, int count, char *buf, int in_kernel, int size_aligned) { int len, newlen, err; len = get_var_len(event); if (len < 0) return len; newlen = len; if (size_aligned > 0) newlen = roundup(len, size_aligned); if (count < newlen) return -EAGAIN; err = expand_var_event(event, 0, len, buf, in_kernel); if (err < 0) return err; if (len != newlen) { if (in_kernel) memset(buf + len, 0, newlen - len); else if (clear_user((__force void __user *)buf + len, newlen - len)) return -EFAULT; } return newlen; } EXPORT_SYMBOL(snd_seq_expand_var_event); int snd_seq_expand_var_event_at(const struct snd_seq_event *event, int count, char *buf, int offset) { int len, err; len = get_var_len(event); if (len < 0) return len; if (len <= offset) return 0; len -= offset; if (len > count) len = count; err = expand_var_event(event, offset, count, buf, true); if (err < 0) return err; return len; } EXPORT_SYMBOL_GPL(snd_seq_expand_var_event_at); /* * release this cell, free extended data if available */ static inline void free_cell(struct snd_seq_pool *pool, struct snd_seq_event_cell *cell) { cell->next = pool->free; pool->free = cell; atomic_dec(&pool->counter); } void snd_seq_cell_free(struct snd_seq_event_cell * cell) { struct snd_seq_pool *pool; if (snd_BUG_ON(!cell)) return; pool = cell->pool; if (snd_BUG_ON(!pool)) return; guard(spinlock_irqsave)(&pool->lock); free_cell(pool, cell); if (snd_seq_ev_is_variable(&cell->event)) { if (cell->event.data.ext.len & SNDRV_SEQ_EXT_CHAINED) { struct snd_seq_event_cell *curp, *nextptr; curp = cell->event.data.ext.ptr; for (; curp; curp = nextptr) { nextptr = curp->next; curp->next = pool->free; free_cell(pool, curp); } } } if (waitqueue_active(&pool->output_sleep)) { /* has enough space now? */ if (snd_seq_output_ok(pool)) wake_up(&pool->output_sleep); } } /* * allocate an event cell. */ static int snd_seq_cell_alloc(struct snd_seq_pool *pool, struct snd_seq_event_cell **cellp, int nonblock, struct file *file, struct mutex *mutexp) { struct snd_seq_event_cell *cell; unsigned long flags; int err = -EAGAIN; wait_queue_entry_t wait; if (pool == NULL) return -EINVAL; *cellp = NULL; init_waitqueue_entry(&wait, current); spin_lock_irqsave(&pool->lock, flags); if (pool->ptr == NULL) { /* not initialized */ pr_debug("ALSA: seq: pool is not initialized\n"); err = -EINVAL; goto __error; } while (pool->free == NULL && ! nonblock && ! pool->closing) { set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&pool->output_sleep, &wait); spin_unlock_irqrestore(&pool->lock, flags); if (mutexp) mutex_unlock(mutexp); schedule(); if (mutexp) mutex_lock(mutexp); spin_lock_irqsave(&pool->lock, flags); remove_wait_queue(&pool->output_sleep, &wait); /* interrupted? */ if (signal_pending(current)) { err = -ERESTARTSYS; goto __error; } } if (pool->closing) { /* closing.. */ err = -ENOMEM; goto __error; } cell = pool->free; if (cell) { int used; pool->free = cell->next; atomic_inc(&pool->counter); used = atomic_read(&pool->counter); if (pool->max_used < used) pool->max_used = used; pool->event_alloc_success++; /* clear cell pointers */ cell->next = NULL; err = 0; } else pool->event_alloc_failures++; *cellp = cell; __error: spin_unlock_irqrestore(&pool->lock, flags); return err; } /* * duplicate the event to a cell. * if the event has external data, the data is decomposed to additional * cells. */ int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event, struct snd_seq_event_cell **cellp, int nonblock, struct file *file, struct mutex *mutexp) { int ncells, err; unsigned int extlen; struct snd_seq_event_cell *cell; int size; *cellp = NULL; ncells = 0; extlen = 0; if (snd_seq_ev_is_variable(event)) { extlen = event->data.ext.len & ~SNDRV_SEQ_EXT_MASK; ncells = DIV_ROUND_UP(extlen, sizeof(struct snd_seq_event)); } if (ncells >= pool->total_elements) return -ENOMEM; err = snd_seq_cell_alloc(pool, &cell, nonblock, file, mutexp); if (err < 0) return err; /* copy the event */ size = snd_seq_event_packet_size(event); memcpy(&cell->ump, event, size); #if IS_ENABLED(CONFIG_SND_SEQ_UMP) if (size < sizeof(cell->event)) cell->ump.raw.extra = 0; #endif /* decompose */ if (snd_seq_ev_is_variable(event)) { int len = extlen; int is_chained = event->data.ext.len & SNDRV_SEQ_EXT_CHAINED; int is_usrptr = event->data.ext.len & SNDRV_SEQ_EXT_USRPTR; struct snd_seq_event_cell *src, *tmp, *tail; char *buf; cell->event.data.ext.len = extlen | SNDRV_SEQ_EXT_CHAINED; cell->event.data.ext.ptr = NULL; src = (struct snd_seq_event_cell *)event->data.ext.ptr; buf = (char *)event->data.ext.ptr; tail = NULL; while (ncells-- > 0) { size = sizeof(struct snd_seq_event); if (len < size) size = len; err = snd_seq_cell_alloc(pool, &tmp, nonblock, file, mutexp); if (err < 0) goto __error; if (cell->event.data.ext.ptr == NULL) cell->event.data.ext.ptr = tmp; if (tail) tail->next = tmp; tail = tmp; /* copy chunk */ if (is_chained && src) { tmp->event = src->event; src = src->next; } else if (is_usrptr) { if (copy_from_user(&tmp->event, (char __force __user *)buf, size)) { err = -EFAULT; goto __error; } } else { memcpy(&tmp->event, buf, size); } buf += size; len -= size; } } *cellp = cell; return 0; __error: snd_seq_cell_free(cell); return err; } /* poll wait */ int snd_seq_pool_poll_wait(struct snd_seq_pool *pool, struct file *file, poll_table *wait) { poll_wait(file, &pool->output_sleep, wait); return snd_seq_output_ok(pool); } /* allocate room specified number of events */ int snd_seq_pool_init(struct snd_seq_pool *pool) { int cell; struct snd_seq_event_cell *cellptr; if (snd_BUG_ON(!pool)) return -EINVAL; cellptr = kvmalloc_array(pool->size, sizeof(struct snd_seq_event_cell), GFP_KERNEL); if (!cellptr) return -ENOMEM; /* add new cells to the free cell list */ guard(spinlock_irq)(&pool->lock); if (pool->ptr) { kvfree(cellptr); return 0; } pool->ptr = cellptr; pool->free = NULL; for (cell = 0; cell < pool->size; cell++) { cellptr = pool->ptr + cell; cellptr->pool = pool; cellptr->next = pool->free; pool->free = cellptr; } pool->room = (pool->size + 1) / 2; /* init statistics */ pool->max_used = 0; pool->total_elements = pool->size; return 0; } /* refuse the further insertion to the pool */ void snd_seq_pool_mark_closing(struct snd_seq_pool *pool) { if (snd_BUG_ON(!pool)) return; guard(spinlock_irqsave)(&pool->lock); pool->closing = 1; } /* remove events */ int snd_seq_pool_done(struct snd_seq_pool *pool) { struct snd_seq_event_cell *ptr; if (snd_BUG_ON(!pool)) return -EINVAL; /* wait for closing all threads */ if (waitqueue_active(&pool->output_sleep)) wake_up(&pool->output_sleep); while (atomic_read(&pool->counter) > 0) schedule_timeout_uninterruptible(1); /* release all resources */ scoped_guard(spinlock_irq, &pool->lock) { ptr = pool->ptr; pool->ptr = NULL; pool->free = NULL; pool->total_elements = 0; } kvfree(ptr); guard(spinlock_irq)(&pool->lock); pool->closing = 0; return 0; } /* init new memory pool */ struct snd_seq_pool *snd_seq_pool_new(int poolsize) { struct snd_seq_pool *pool; /* create pool block */ pool = kzalloc(sizeof(*pool), GFP_KERNEL); if (!pool) return NULL; spin_lock_init(&pool->lock); pool->ptr = NULL; pool->free = NULL; pool->total_elements = 0; atomic_set(&pool->counter, 0); pool->closing = 0; init_waitqueue_head(&pool->output_sleep); pool->size = poolsize; /* init statistics */ pool->max_used = 0; return pool; } /* remove memory pool */ int snd_seq_pool_delete(struct snd_seq_pool **ppool) { struct snd_seq_pool *pool = *ppool; *ppool = NULL; if (pool == NULL) return 0; snd_seq_pool_mark_closing(pool); snd_seq_pool_done(pool); kfree(pool); return 0; } /* exported to seq_clientmgr.c */ void snd_seq_info_pool(struct snd_info_buffer *buffer, struct snd_seq_pool *pool, char *space) { if (pool == NULL) return; snd_iprintf(buffer, "%sPool size : %d\n", space, pool->total_elements); snd_iprintf(buffer, "%sCells in use : %d\n", space, atomic_read(&pool->counter)); snd_iprintf(buffer, "%sPeak cells in use : %d\n", space, pool->max_used); snd_iprintf(buffer, "%sAlloc success : %d\n", space, pool->event_alloc_success); snd_iprintf(buffer, "%sAlloc failures : %d\n", space, pool->event_alloc_failures); }
32 1 54 32 1 46 142 6 146 2 41 9 218 141 142 52 52 52 175 101 32 32 169 11 44 9 26 28 3 1 38 12 12 12 4 6 122 6 16 40 7 7 7 3 245 22 226 34 245 16 28 4 4 145 145 144 144 142 10 174 175 175 90 91 64 11 70 91 91 175 29 3 169 11 174 172 3 14 56 56 10 10 14 14 14 14 14 3 12 8 3 10 1 3 1 1 1 133 14 100 101 63 1 9 42 4 8 61 138 5 133 33 4 1 39 36 2 51 28 110 75 51 10 2 8 8 7 1 13 32 7 28 1 15 14 13 2 3 73 1 3 4 51 16 5 5 20 7 24 13 52 3 2 28 2 25 2 8 20 1 6 11 6 24 24 5 19 12 7 24 10 26 26 62 4 58 2 57 2 57 33 7 1 11 11 4 5 14 14 14 33 33 33 33 33 4 1 5 4 1 10 10 2 2 1 5 5 27 10 17 14 1 12 25 14 1 18 189 5 186 215 215 198 18 4 1 1 2 168 6 162 9 2 1 151 156 14 37 14 170 39 137 6 5 151 3 141 12 13 1 2 149 24 10 16 131 18 18 16 1 22 88 25 85 15 5 105 3 97 29 4 1 1 25 81 19 42 2 34 2 2 47 5 7 1 39 3 40 6 25 4 2 26 49 49 1 1 47 1 46 1 45 87 5 88 1 87 61 47 11 41 24 64 12 51 52 19 40 8 2 55 6 60 35 10 45 17 12 5 62 45 9 86 4 4 49 46 6 13 6 45 46 8 3 3 4 3 4 2 5 64 15 7 5 1 4 1 3 1 10 9 8 5 96 2 87 88 87 51 65 5 17 8 55 16 6 49 5 5 46 5 6 60 15 20 40 40 3 40 37 4 23 14 2 16 2 10 7 64 14 54 3 84 9 79 12 17 15 1 45 2 37 5 38 17 15 6 17 10 38 12 16 15 5 3 12 1 1 1 1 1 130 112 1 9 4 5 1 1 1 46 46 46 46 87 88 88 88 88 2 11 30 7 16 12 10 12 12 2 9 12 12 8 12 12 9 12 10 6 12 12 4 5 1 6 56 56 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 // SPDX-License-Identifier: GPL-2.0-or-later /* * NET4: Implementation of BSD Unix domain sockets. * * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk> * * Fixes: * Linus Torvalds : Assorted bug cures. * Niibe Yutaka : async I/O support. * Carsten Paeth : PF_UNIX check, address fixes. * Alan Cox : Limit size of allocated blocks. * Alan Cox : Fixed the stupid socketpair bug. * Alan Cox : BSD compatibility fine tuning. * Alan Cox : Fixed a bug in connect when interrupted. * Alan Cox : Sorted out a proper draft version of * file descriptor passing hacked up from * Mike Shaver's work. * Marty Leisner : Fixes to fd passing * Nick Nevin : recvmsg bugfix. * Alan Cox : Started proper garbage collector * Heiko EiBfeldt : Missing verify_area check * Alan Cox : Started POSIXisms * Andreas Schwab : Replace inode by dentry for proper * reference counting * Kirk Petersen : Made this a module * Christoph Rohland : Elegant non-blocking accept/connect algorithm. * Lots of bug fixes. * Alexey Kuznetosv : Repaired (I hope) bugs introduces * by above two patches. * Andrea Arcangeli : If possible we block in connect(2) * if the max backlog of the listen socket * is been reached. This won't break * old apps and it will avoid huge amount * of socks hashed (this for unix_gc() * performances reasons). * Security fix that limits the max * number of socks to 2*max_files and * the number of skb queueable in the * dgram receiver. * Artur Skawina : Hash function optimizations * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8) * Malcolm Beattie : Set peercred for socketpair * Michal Ostrowski : Module initialization cleanup. * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT, * the core infrastructure is doing that * for all net proto families now (2.5.69+) * * Known differences from reference BSD that was tested: * * [TO FIX] * ECONNREFUSED is not returned from one end of a connected() socket to the * other the moment one end closes. * fstat() doesn't return st_dev=0, and give the blksize as high water mark * and a fake inode identifier (nor the BSD first socket fstat twice bug). * [NOT TO FIX] * accept() returns a path name even if the connecting socket has closed * in the meantime (BSD loses the path and gives up). * accept() returns 0 length path for an unbound connector. BSD returns 16 * and a null first byte in the path (but not for gethost/peername - BSD bug ??) * socketpair(...SOCK_RAW..) doesn't panic the kernel. * BSD af_unix apparently has connect forgetting to block properly. * (need to check this with the POSIX spec in detail) * * Differences from 2.0.0-11-... (ANK) * Bug fixes and improvements. * - client shutdown killed server socket. * - removed all useless cli/sti pairs. * * Semantic changes/extensions. * - generic control message passing. * - SCM_CREDENTIALS control message. * - "Abstract" (not FS based) socket bindings. * Abstract names are sequences of bytes (not zero terminated) * started by 0, so that this name space does not intersect * with BSD names. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/sched/signal.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/stat.h> #include <linux/dcache.h> #include <linux/namei.h> #include <linux/socket.h> #include <linux/un.h> #include <linux/fcntl.h> #include <linux/filter.h> #include <linux/termios.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/in.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <net/net_namespace.h> #include <net/sock.h> #include <net/tcp_states.h> #include <net/af_unix.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <net/scm.h> #include <linux/init.h> #include <linux/poll.h> #include <linux/rtnetlink.h> #include <linux/mount.h> #include <net/checksum.h> #include <linux/security.h> #include <linux/splice.h> #include <linux/freezer.h> #include <linux/file.h> #include <linux/btf_ids.h> #include <linux/bpf-cgroup.h> static atomic_long_t unix_nr_socks; static struct hlist_head bsd_socket_buckets[UNIX_HASH_SIZE / 2]; static spinlock_t bsd_socket_locks[UNIX_HASH_SIZE / 2]; /* SMP locking strategy: * hash table is protected with spinlock. * each socket state is protected by separate spinlock. */ #ifdef CONFIG_PROVE_LOCKING #define cmp_ptr(l, r) (((l) > (r)) - ((l) < (r))) static int unix_table_lock_cmp_fn(const struct lockdep_map *a, const struct lockdep_map *b) { return cmp_ptr(a, b); } static int unix_state_lock_cmp_fn(const struct lockdep_map *_a, const struct lockdep_map *_b) { const struct unix_sock *a, *b; a = container_of(_a, struct unix_sock, lock.dep_map); b = container_of(_b, struct unix_sock, lock.dep_map); if (a->sk.sk_state == TCP_LISTEN) { /* unix_stream_connect(): Before the 2nd unix_state_lock(), * * 1. a is TCP_LISTEN. * 2. b is not a. * 3. concurrent connect(b -> a) must fail. * * Except for 2. & 3., the b's state can be any possible * value due to concurrent connect() or listen(). * * 2. is detected in debug_spin_lock_before(), and 3. cannot * be expressed as lock_cmp_fn. */ switch (b->sk.sk_state) { case TCP_CLOSE: case TCP_ESTABLISHED: case TCP_LISTEN: return -1; default: /* Invalid case. */ return 0; } } /* Should never happen. Just to be symmetric. */ if (b->sk.sk_state == TCP_LISTEN) { switch (b->sk.sk_state) { case TCP_CLOSE: case TCP_ESTABLISHED: return 1; default: return 0; } } /* unix_state_double_lock(): ascending address order. */ return cmp_ptr(a, b); } static int unix_recvq_lock_cmp_fn(const struct lockdep_map *_a, const struct lockdep_map *_b) { const struct sock *a, *b; a = container_of(_a, struct sock, sk_receive_queue.lock.dep_map); b = container_of(_b, struct sock, sk_receive_queue.lock.dep_map); /* unix_collect_skb(): listener -> embryo order. */ if (a->sk_state == TCP_LISTEN && unix_sk(b)->listener == a) return -1; /* Should never happen. Just to be symmetric. */ if (b->sk_state == TCP_LISTEN && unix_sk(a)->listener == b) return 1; return 0; } #endif static unsigned int unix_unbound_hash(struct sock *sk) { unsigned long hash = (unsigned long)sk; hash ^= hash >> 16; hash ^= hash >> 8; hash ^= sk->sk_type; return hash & UNIX_HASH_MOD; } static unsigned int unix_bsd_hash(struct inode *i) { return i->i_ino & UNIX_HASH_MOD; } static unsigned int unix_abstract_hash(struct sockaddr_un *sunaddr, int addr_len, int type) { __wsum csum = csum_partial(sunaddr, addr_len, 0); unsigned int hash; hash = (__force unsigned int)csum_fold(csum); hash ^= hash >> 8; hash ^= type; return UNIX_HASH_MOD + 1 + (hash & UNIX_HASH_MOD); } static void unix_table_double_lock(struct net *net, unsigned int hash1, unsigned int hash2) { if (hash1 == hash2) { spin_lock(&net->unx.table.locks[hash1]); return; } if (hash1 > hash2) swap(hash1, hash2); spin_lock(&net->unx.table.locks[hash1]); spin_lock(&net->unx.table.locks[hash2]); } static void unix_table_double_unlock(struct net *net, unsigned int hash1, unsigned int hash2) { if (hash1 == hash2) { spin_unlock(&net->unx.table.locks[hash1]); return; } spin_unlock(&net->unx.table.locks[hash1]); spin_unlock(&net->unx.table.locks[hash2]); } #ifdef CONFIG_SECURITY_NETWORK static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb) { UNIXCB(skb).secid = scm->secid; } static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb) { scm->secid = UNIXCB(skb).secid; } static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb) { return (scm->secid == UNIXCB(skb).secid); } #else static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb) { } static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb) { } static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb) { return true; } #endif /* CONFIG_SECURITY_NETWORK */ static inline int unix_may_send(struct sock *sk, struct sock *osk) { return !unix_peer(osk) || unix_peer(osk) == sk; } static inline int unix_recvq_full_lockless(const struct sock *sk) { return skb_queue_len_lockless(&sk->sk_receive_queue) > sk->sk_max_ack_backlog; } struct sock *unix_peer_get(struct sock *s) { struct sock *peer; unix_state_lock(s); peer = unix_peer(s); if (peer) sock_hold(peer); unix_state_unlock(s); return peer; } EXPORT_SYMBOL_GPL(unix_peer_get); static struct unix_address *unix_create_addr(struct sockaddr_un *sunaddr, int addr_len) { struct unix_address *addr; addr = kmalloc(sizeof(*addr) + addr_len, GFP_KERNEL); if (!addr) return NULL; refcount_set(&addr->refcnt, 1); addr->len = addr_len; memcpy(addr->name, sunaddr, addr_len); return addr; } static inline void unix_release_addr(struct unix_address *addr) { if (refcount_dec_and_test(&addr->refcnt)) kfree(addr); } /* * Check unix socket name: * - should be not zero length. * - if started by not zero, should be NULL terminated (FS object) * - if started by zero, it is abstract name. */ static int unix_validate_addr(struct sockaddr_un *sunaddr, int addr_len) { if (addr_len <= offsetof(struct sockaddr_un, sun_path) || addr_len > sizeof(*sunaddr)) return -EINVAL; if (sunaddr->sun_family != AF_UNIX) return -EINVAL; return 0; } static int unix_mkname_bsd(struct sockaddr_un *sunaddr, int addr_len) { struct sockaddr_storage *addr = (struct sockaddr_storage *)sunaddr; short offset = offsetof(struct sockaddr_storage, __data); BUILD_BUG_ON(offset != offsetof(struct sockaddr_un, sun_path)); /* This may look like an off by one error but it is a bit more * subtle. 108 is the longest valid AF_UNIX path for a binding. * sun_path[108] doesn't as such exist. However in kernel space * we are guaranteed that it is a valid memory location in our * kernel address buffer because syscall functions always pass * a pointer of struct sockaddr_storage which has a bigger buffer * than 108. Also, we must terminate sun_path for strlen() in * getname_kernel(). */ addr->__data[addr_len - offset] = 0; /* Don't pass sunaddr->sun_path to strlen(). Otherwise, 108 will * cause panic if CONFIG_FORTIFY_SOURCE=y. Let __fortify_strlen() * know the actual buffer. */ return strlen(addr->__data) + offset + 1; } static void __unix_remove_socket(struct sock *sk) { sk_del_node_init(sk); } static void __unix_insert_socket(struct net *net, struct sock *sk) { DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk)); sk_add_node(sk, &net->unx.table.buckets[sk->sk_hash]); } static void __unix_set_addr_hash(struct net *net, struct sock *sk, struct unix_address *addr, unsigned int hash) { __unix_remove_socket(sk); smp_store_release(&unix_sk(sk)->addr, addr); sk->sk_hash = hash; __unix_insert_socket(net, sk); } static void unix_remove_socket(struct net *net, struct sock *sk) { spin_lock(&net->unx.table.locks[sk->sk_hash]); __unix_remove_socket(sk); spin_unlock(&net->unx.table.locks[sk->sk_hash]); } static void unix_insert_unbound_socket(struct net *net, struct sock *sk) { spin_lock(&net->unx.table.locks[sk->sk_hash]); __unix_insert_socket(net, sk); spin_unlock(&net->unx.table.locks[sk->sk_hash]); } static void unix_insert_bsd_socket(struct sock *sk) { spin_lock(&bsd_socket_locks[sk->sk_hash]); sk_add_bind_node(sk, &bsd_socket_buckets[sk->sk_hash]); spin_unlock(&bsd_socket_locks[sk->sk_hash]); } static void unix_remove_bsd_socket(struct sock *sk) { if (!hlist_unhashed(&sk->sk_bind_node)) { spin_lock(&bsd_socket_locks[sk->sk_hash]); __sk_del_bind_node(sk); spin_unlock(&bsd_socket_locks[sk->sk_hash]); sk_node_init(&sk->sk_bind_node); } } static struct sock *__unix_find_socket_byname(struct net *net, struct sockaddr_un *sunname, int len, unsigned int hash) { struct sock *s; sk_for_each(s, &net->unx.table.buckets[hash]) { struct unix_sock *u = unix_sk(s); if (u->addr->len == len && !memcmp(u->addr->name, sunname, len)) return s; } return NULL; } static inline struct sock *unix_find_socket_byname(struct net *net, struct sockaddr_un *sunname, int len, unsigned int hash) { struct sock *s; spin_lock(&net->unx.table.locks[hash]); s = __unix_find_socket_byname(net, sunname, len, hash); if (s) sock_hold(s); spin_unlock(&net->unx.table.locks[hash]); return s; } static struct sock *unix_find_socket_byinode(struct inode *i) { unsigned int hash = unix_bsd_hash(i); struct sock *s; spin_lock(&bsd_socket_locks[hash]); sk_for_each_bound(s, &bsd_socket_buckets[hash]) { struct dentry *dentry = unix_sk(s)->path.dentry; if (dentry && d_backing_inode(dentry) == i) { sock_hold(s); spin_unlock(&bsd_socket_locks[hash]); return s; } } spin_unlock(&bsd_socket_locks[hash]); return NULL; } /* Support code for asymmetrically connected dgram sockets * * If a datagram socket is connected to a socket not itself connected * to the first socket (eg, /dev/log), clients may only enqueue more * messages if the present receive queue of the server socket is not * "too large". This means there's a second writeability condition * poll and sendmsg need to test. The dgram recv code will do a wake * up on the peer_wait wait queue of a socket upon reception of a * datagram which needs to be propagated to sleeping would-be writers * since these might not have sent anything so far. This can't be * accomplished via poll_wait because the lifetime of the server * socket might be less than that of its clients if these break their * association with it or if the server socket is closed while clients * are still connected to it and there's no way to inform "a polling * implementation" that it should let go of a certain wait queue * * In order to propagate a wake up, a wait_queue_entry_t of the client * socket is enqueued on the peer_wait queue of the server socket * whose wake function does a wake_up on the ordinary client socket * wait queue. This connection is established whenever a write (or * poll for write) hit the flow control condition and broken when the * association to the server socket is dissolved or after a wake up * was relayed. */ static int unix_dgram_peer_wake_relay(wait_queue_entry_t *q, unsigned mode, int flags, void *key) { struct unix_sock *u; wait_queue_head_t *u_sleep; u = container_of(q, struct unix_sock, peer_wake); __remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait, q); u->peer_wake.private = NULL; /* relaying can only happen while the wq still exists */ u_sleep = sk_sleep(&u->sk); if (u_sleep) wake_up_interruptible_poll(u_sleep, key_to_poll(key)); return 0; } static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other) { struct unix_sock *u, *u_other; int rc; u = unix_sk(sk); u_other = unix_sk(other); rc = 0; spin_lock(&u_other->peer_wait.lock); if (!u->peer_wake.private) { u->peer_wake.private = other; __add_wait_queue(&u_other->peer_wait, &u->peer_wake); rc = 1; } spin_unlock(&u_other->peer_wait.lock); return rc; } static void unix_dgram_peer_wake_disconnect(struct sock *sk, struct sock *other) { struct unix_sock *u, *u_other; u = unix_sk(sk); u_other = unix_sk(other); spin_lock(&u_other->peer_wait.lock); if (u->peer_wake.private == other) { __remove_wait_queue(&u_other->peer_wait, &u->peer_wake); u->peer_wake.private = NULL; } spin_unlock(&u_other->peer_wait.lock); } static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk, struct sock *other) { unix_dgram_peer_wake_disconnect(sk, other); wake_up_interruptible_poll(sk_sleep(sk), EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND); } /* preconditions: * - unix_peer(sk) == other * - association is stable */ static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other) { int connected; connected = unix_dgram_peer_wake_connect(sk, other); /* If other is SOCK_DEAD, we want to make sure we signal * POLLOUT, such that a subsequent write() can get a * -ECONNREFUSED. Otherwise, if we haven't queued any skbs * to other and its full, we will hang waiting for POLLOUT. */ if (unix_recvq_full_lockless(other) && !sock_flag(other, SOCK_DEAD)) return 1; if (connected) unix_dgram_peer_wake_disconnect(sk, other); return 0; } static int unix_writable(const struct sock *sk, unsigned char state) { return state != TCP_LISTEN && (refcount_read(&sk->sk_wmem_alloc) << 2) <= READ_ONCE(sk->sk_sndbuf); } static void unix_write_space(struct sock *sk) { struct socket_wq *wq; rcu_read_lock(); if (unix_writable(sk, READ_ONCE(sk->sk_state))) { wq = rcu_dereference(sk->sk_wq); if (skwq_has_sleeper(wq)) wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND); sk_wake_async_rcu(sk, SOCK_WAKE_SPACE, POLL_OUT); } rcu_read_unlock(); } /* When dgram socket disconnects (or changes its peer), we clear its receive * queue of packets arrived from previous peer. First, it allows to do * flow control based only on wmem_alloc; second, sk connected to peer * may receive messages only from that peer. */ static void unix_dgram_disconnected(struct sock *sk, struct sock *other) { if (!skb_queue_empty(&sk->sk_receive_queue)) { skb_queue_purge_reason(&sk->sk_receive_queue, SKB_DROP_REASON_UNIX_DISCONNECT); wake_up_interruptible_all(&unix_sk(sk)->peer_wait); /* If one link of bidirectional dgram pipe is disconnected, * we signal error. Messages are lost. Do not make this, * when peer was not connected to us. */ if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) { WRITE_ONCE(other->sk_err, ECONNRESET); sk_error_report(other); } } } static void unix_sock_destructor(struct sock *sk) { struct unix_sock *u = unix_sk(sk); skb_queue_purge_reason(&sk->sk_receive_queue, SKB_DROP_REASON_SOCKET_CLOSE); DEBUG_NET_WARN_ON_ONCE(refcount_read(&sk->sk_wmem_alloc)); DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk)); DEBUG_NET_WARN_ON_ONCE(sk->sk_socket); if (!sock_flag(sk, SOCK_DEAD)) { pr_info("Attempt to release alive unix socket: %p\n", sk); return; } if (u->addr) unix_release_addr(u->addr); atomic_long_dec(&unix_nr_socks); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); #ifdef UNIX_REFCNT_DEBUG pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk, atomic_long_read(&unix_nr_socks)); #endif } static void unix_release_sock(struct sock *sk, int embrion) { struct unix_sock *u = unix_sk(sk); struct sock *skpair; struct sk_buff *skb; struct path path; int state; unix_remove_socket(sock_net(sk), sk); unix_remove_bsd_socket(sk); /* Clear state */ unix_state_lock(sk); sock_orphan(sk); WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK); path = u->path; u->path.dentry = NULL; u->path.mnt = NULL; state = sk->sk_state; WRITE_ONCE(sk->sk_state, TCP_CLOSE); skpair = unix_peer(sk); unix_peer(sk) = NULL; unix_state_unlock(sk); #if IS_ENABLED(CONFIG_AF_UNIX_OOB) u->oob_skb = NULL; #endif wake_up_interruptible_all(&u->peer_wait); if (skpair != NULL) { if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) { unix_state_lock(skpair); /* No more writes */ WRITE_ONCE(skpair->sk_shutdown, SHUTDOWN_MASK); if (!skb_queue_empty_lockless(&sk->sk_receive_queue) || embrion) WRITE_ONCE(skpair->sk_err, ECONNRESET); unix_state_unlock(skpair); skpair->sk_state_change(skpair); sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP); } unix_dgram_peer_wake_disconnect(sk, skpair); sock_put(skpair); /* It may now die */ } /* Try to flush out this socket. Throw out buffers at least */ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { if (state == TCP_LISTEN) unix_release_sock(skb->sk, 1); /* passed fds are erased in the kfree_skb hook */ kfree_skb_reason(skb, SKB_DROP_REASON_SOCKET_CLOSE); } if (path.dentry) path_put(&path); sock_put(sk); /* ---- Socket is dead now and most probably destroyed ---- */ /* * Fixme: BSD difference: In BSD all sockets connected to us get * ECONNRESET and we die on the spot. In Linux we behave * like files and pipes do and wait for the last * dereference. * * Can't we simply set sock->err? * * What the above comment does talk about? --ANK(980817) */ if (READ_ONCE(unix_tot_inflight)) unix_gc(); /* Garbage collect fds */ } static void init_peercred(struct sock *sk) { sk->sk_peer_pid = get_pid(task_tgid(current)); sk->sk_peer_cred = get_current_cred(); } static void update_peercred(struct sock *sk) { const struct cred *old_cred; struct pid *old_pid; spin_lock(&sk->sk_peer_lock); old_pid = sk->sk_peer_pid; old_cred = sk->sk_peer_cred; init_peercred(sk); spin_unlock(&sk->sk_peer_lock); put_pid(old_pid); put_cred(old_cred); } static void copy_peercred(struct sock *sk, struct sock *peersk) { lockdep_assert_held(&unix_sk(peersk)->lock); spin_lock(&sk->sk_peer_lock); sk->sk_peer_pid = get_pid(peersk->sk_peer_pid); sk->sk_peer_cred = get_cred(peersk->sk_peer_cred); spin_unlock(&sk->sk_peer_lock); } static int unix_listen(struct socket *sock, int backlog) { int err; struct sock *sk = sock->sk; struct unix_sock *u = unix_sk(sk); err = -EOPNOTSUPP; if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET) goto out; /* Only stream/seqpacket sockets accept */ err = -EINVAL; if (!READ_ONCE(u->addr)) goto out; /* No listens on an unbound socket */ unix_state_lock(sk); if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN) goto out_unlock; if (backlog > sk->sk_max_ack_backlog) wake_up_interruptible_all(&u->peer_wait); sk->sk_max_ack_backlog = backlog; WRITE_ONCE(sk->sk_state, TCP_LISTEN); /* set credentials so connect can copy them */ update_peercred(sk); err = 0; out_unlock: unix_state_unlock(sk); out: return err; } static int unix_release(struct socket *); static int unix_bind(struct socket *, struct sockaddr *, int); static int unix_stream_connect(struct socket *, struct sockaddr *, int addr_len, int flags); static int unix_socketpair(struct socket *, struct socket *); static int unix_accept(struct socket *, struct socket *, struct proto_accept_arg *arg); static int unix_getname(struct socket *, struct sockaddr *, int); static __poll_t unix_poll(struct file *, struct socket *, poll_table *); static __poll_t unix_dgram_poll(struct file *, struct socket *, poll_table *); static int unix_ioctl(struct socket *, unsigned int, unsigned long); #ifdef CONFIG_COMPAT static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); #endif static int unix_shutdown(struct socket *, int); static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t); static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int); static ssize_t unix_stream_splice_read(struct socket *, loff_t *ppos, struct pipe_inode_info *, size_t size, unsigned int flags); static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t); static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int); static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor); static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor); static int unix_dgram_connect(struct socket *, struct sockaddr *, int, int); static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t); static int unix_seqpacket_recvmsg(struct socket *, struct msghdr *, size_t, int); #ifdef CONFIG_PROC_FS static int unix_count_nr_fds(struct sock *sk) { struct sk_buff *skb; struct unix_sock *u; int nr_fds = 0; spin_lock(&sk->sk_receive_queue.lock); skb = skb_peek(&sk->sk_receive_queue); while (skb) { u = unix_sk(skb->sk); nr_fds += atomic_read(&u->scm_stat.nr_fds); skb = skb_peek_next(skb, &sk->sk_receive_queue); } spin_unlock(&sk->sk_receive_queue.lock); return nr_fds; } static void unix_show_fdinfo(struct seq_file *m, struct socket *sock) { struct sock *sk = sock->sk; unsigned char s_state; struct unix_sock *u; int nr_fds = 0; if (sk) { s_state = READ_ONCE(sk->sk_state); u = unix_sk(sk); /* SOCK_STREAM and SOCK_SEQPACKET sockets never change their * sk_state after switching to TCP_ESTABLISHED or TCP_LISTEN. * SOCK_DGRAM is ordinary. So, no lock is needed. */ if (sock->type == SOCK_DGRAM || s_state == TCP_ESTABLISHED) nr_fds = atomic_read(&u->scm_stat.nr_fds); else if (s_state == TCP_LISTEN) nr_fds = unix_count_nr_fds(sk); seq_printf(m, "scm_fds: %u\n", nr_fds); } } #else #define unix_show_fdinfo NULL #endif static const struct proto_ops unix_stream_ops = { .family = PF_UNIX, .owner = THIS_MODULE, .release = unix_release, .bind = unix_bind, .connect = unix_stream_connect, .socketpair = unix_socketpair, .accept = unix_accept, .getname = unix_getname, .poll = unix_poll, .ioctl = unix_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = unix_compat_ioctl, #endif .listen = unix_listen, .shutdown = unix_shutdown, .sendmsg = unix_stream_sendmsg, .recvmsg = unix_stream_recvmsg, .read_skb = unix_stream_read_skb, .mmap = sock_no_mmap, .splice_read = unix_stream_splice_read, .set_peek_off = sk_set_peek_off, .show_fdinfo = unix_show_fdinfo, }; static const struct proto_ops unix_dgram_ops = { .family = PF_UNIX, .owner = THIS_MODULE, .release = unix_release, .bind = unix_bind, .connect = unix_dgram_connect, .socketpair = unix_socketpair, .accept = sock_no_accept, .getname = unix_getname, .poll = unix_dgram_poll, .ioctl = unix_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = unix_compat_ioctl, #endif .listen = sock_no_listen, .shutdown = unix_shutdown, .sendmsg = unix_dgram_sendmsg, .read_skb = unix_read_skb, .recvmsg = unix_dgram_recvmsg, .mmap = sock_no_mmap, .set_peek_off = sk_set_peek_off, .show_fdinfo = unix_show_fdinfo, }; static const struct proto_ops unix_seqpacket_ops = { .family = PF_UNIX, .owner = THIS_MODULE, .release = unix_release, .bind = unix_bind, .connect = unix_stream_connect, .socketpair = unix_socketpair, .accept = unix_accept, .getname = unix_getname, .poll = unix_dgram_poll, .ioctl = unix_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = unix_compat_ioctl, #endif .listen = unix_listen, .shutdown = unix_shutdown, .sendmsg = unix_seqpacket_sendmsg, .recvmsg = unix_seqpacket_recvmsg, .mmap = sock_no_mmap, .set_peek_off = sk_set_peek_off, .show_fdinfo = unix_show_fdinfo, }; static void unix_close(struct sock *sk, long timeout) { /* Nothing to do here, unix socket does not need a ->close(). * This is merely for sockmap. */ } static void unix_unhash(struct sock *sk) { /* Nothing to do here, unix socket does not need a ->unhash(). * This is merely for sockmap. */ } static bool unix_bpf_bypass_getsockopt(int level, int optname) { if (level == SOL_SOCKET) { switch (optname) { case SO_PEERPIDFD: return true; default: return false; } } return false; } struct proto unix_dgram_proto = { .name = "UNIX", .owner = THIS_MODULE, .obj_size = sizeof(struct unix_sock), .close = unix_close, .bpf_bypass_getsockopt = unix_bpf_bypass_getsockopt, #ifdef CONFIG_BPF_SYSCALL .psock_update_sk_prot = unix_dgram_bpf_update_proto, #endif }; struct proto unix_stream_proto = { .name = "UNIX-STREAM", .owner = THIS_MODULE, .obj_size = sizeof(struct unix_sock), .close = unix_close, .unhash = unix_unhash, .bpf_bypass_getsockopt = unix_bpf_bypass_getsockopt, #ifdef CONFIG_BPF_SYSCALL .psock_update_sk_prot = unix_stream_bpf_update_proto, #endif }; static struct sock *unix_create1(struct net *net, struct socket *sock, int kern, int type) { struct unix_sock *u; struct sock *sk; int err; atomic_long_inc(&unix_nr_socks); if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files()) { err = -ENFILE; goto err; } if (type == SOCK_STREAM) sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_stream_proto, kern); else /*dgram and seqpacket */ sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_dgram_proto, kern); if (!sk) { err = -ENOMEM; goto err; } sock_init_data(sock, sk); sk->sk_hash = unix_unbound_hash(sk); sk->sk_allocation = GFP_KERNEL_ACCOUNT; sk->sk_write_space = unix_write_space; sk->sk_max_ack_backlog = READ_ONCE(net->unx.sysctl_max_dgram_qlen); sk->sk_destruct = unix_sock_destructor; lock_set_cmp_fn(&sk->sk_receive_queue.lock, unix_recvq_lock_cmp_fn, NULL); u = unix_sk(sk); u->listener = NULL; u->vertex = NULL; u->path.dentry = NULL; u->path.mnt = NULL; spin_lock_init(&u->lock); lock_set_cmp_fn(&u->lock, unix_state_lock_cmp_fn, NULL); mutex_init(&u->iolock); /* single task reading lock */ mutex_init(&u->bindlock); /* single task binding lock */ init_waitqueue_head(&u->peer_wait); init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay); memset(&u->scm_stat, 0, sizeof(struct scm_stat)); unix_insert_unbound_socket(net, sk); sock_prot_inuse_add(net, sk->sk_prot, 1); return sk; err: atomic_long_dec(&unix_nr_socks); return ERR_PTR(err); } static int unix_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; if (protocol && protocol != PF_UNIX) return -EPROTONOSUPPORT; sock->state = SS_UNCONNECTED; switch (sock->type) { case SOCK_STREAM: sock->ops = &unix_stream_ops; break; /* * Believe it or not BSD has AF_UNIX, SOCK_RAW though * nothing uses it. */ case SOCK_RAW: sock->type = SOCK_DGRAM; fallthrough; case SOCK_DGRAM: sock->ops = &unix_dgram_ops; break; case SOCK_SEQPACKET: sock->ops = &unix_seqpacket_ops; break; default: return -ESOCKTNOSUPPORT; } sk = unix_create1(net, sock, kern, sock->type); if (IS_ERR(sk)) return PTR_ERR(sk); return 0; } static int unix_release(struct socket *sock) { struct sock *sk = sock->sk; if (!sk) return 0; sk->sk_prot->close(sk, 0); unix_release_sock(sk, 0); sock->sk = NULL; return 0; } static struct sock *unix_find_bsd(struct sockaddr_un *sunaddr, int addr_len, int type) { struct inode *inode; struct path path; struct sock *sk; int err; unix_mkname_bsd(sunaddr, addr_len); err = kern_path(sunaddr->sun_path, LOOKUP_FOLLOW, &path); if (err) goto fail; err = path_permission(&path, MAY_WRITE); if (err) goto path_put; err = -ECONNREFUSED; inode = d_backing_inode(path.dentry); if (!S_ISSOCK(inode->i_mode)) goto path_put; sk = unix_find_socket_byinode(inode); if (!sk) goto path_put; err = -EPROTOTYPE; if (sk->sk_type == type) touch_atime(&path); else goto sock_put; path_put(&path); return sk; sock_put: sock_put(sk); path_put: path_put(&path); fail: return ERR_PTR(err); } static struct sock *unix_find_abstract(struct net *net, struct sockaddr_un *sunaddr, int addr_len, int type) { unsigned int hash = unix_abstract_hash(sunaddr, addr_len, type); struct dentry *dentry; struct sock *sk; sk = unix_find_socket_byname(net, sunaddr, addr_len, hash); if (!sk) return ERR_PTR(-ECONNREFUSED); dentry = unix_sk(sk)->path.dentry; if (dentry) touch_atime(&unix_sk(sk)->path); return sk; } static struct sock *unix_find_other(struct net *net, struct sockaddr_un *sunaddr, int addr_len, int type) { struct sock *sk; if (sunaddr->sun_path[0]) sk = unix_find_bsd(sunaddr, addr_len, type); else sk = unix_find_abstract(net, sunaddr, addr_len, type); return sk; } static int unix_autobind(struct sock *sk) { struct unix_sock *u = unix_sk(sk); unsigned int new_hash, old_hash; struct net *net = sock_net(sk); struct unix_address *addr; u32 lastnum, ordernum; int err; err = mutex_lock_interruptible(&u->bindlock); if (err) return err; if (u->addr) goto out; err = -ENOMEM; addr = kzalloc(sizeof(*addr) + offsetof(struct sockaddr_un, sun_path) + 16, GFP_KERNEL); if (!addr) goto out; addr->len = offsetof(struct sockaddr_un, sun_path) + 6; addr->name->sun_family = AF_UNIX; refcount_set(&addr->refcnt, 1); old_hash = sk->sk_hash; ordernum = get_random_u32(); lastnum = ordernum & 0xFFFFF; retry: ordernum = (ordernum + 1) & 0xFFFFF; sprintf(addr->name->sun_path + 1, "%05x", ordernum); new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type); unix_table_double_lock(net, old_hash, new_hash); if (__unix_find_socket_byname(net, addr->name, addr->len, new_hash)) { unix_table_double_unlock(net, old_hash, new_hash); /* __unix_find_socket_byname() may take long time if many names * are already in use. */ cond_resched(); if (ordernum == lastnum) { /* Give up if all names seems to be in use. */ err = -ENOSPC; unix_release_addr(addr); goto out; } goto retry; } __unix_set_addr_hash(net, sk, addr, new_hash); unix_table_double_unlock(net, old_hash, new_hash); err = 0; out: mutex_unlock(&u->bindlock); return err; } static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr, int addr_len) { umode_t mode = S_IFSOCK | (SOCK_INODE(sk->sk_socket)->i_mode & ~current_umask()); struct unix_sock *u = unix_sk(sk); unsigned int new_hash, old_hash; struct net *net = sock_net(sk); struct mnt_idmap *idmap; struct unix_address *addr; struct dentry *dentry; struct path parent; int err; addr_len = unix_mkname_bsd(sunaddr, addr_len); addr = unix_create_addr(sunaddr, addr_len); if (!addr) return -ENOMEM; /* * Get the parent directory, calculate the hash for last * component. */ dentry = kern_path_create(AT_FDCWD, addr->name->sun_path, &parent, 0); if (IS_ERR(dentry)) { err = PTR_ERR(dentry); goto out; } /* * All right, let's create it. */ idmap = mnt_idmap(parent.mnt); err = security_path_mknod(&parent, dentry, mode, 0); if (!err) err = vfs_mknod(idmap, d_inode(parent.dentry), dentry, mode, 0); if (err) goto out_path; err = mutex_lock_interruptible(&u->bindlock); if (err) goto out_unlink; if (u->addr) goto out_unlock; old_hash = sk->sk_hash; new_hash = unix_bsd_hash(d_backing_inode(dentry)); unix_table_double_lock(net, old_hash, new_hash); u->path.mnt = mntget(parent.mnt); u->path.dentry = dget(dentry); __unix_set_addr_hash(net, sk, addr, new_hash); unix_table_double_unlock(net, old_hash, new_hash); unix_insert_bsd_socket(sk); mutex_unlock(&u->bindlock); done_path_create(&parent, dentry); return 0; out_unlock: mutex_unlock(&u->bindlock); err = -EINVAL; out_unlink: /* failed after successful mknod? unlink what we'd created... */ vfs_unlink(idmap, d_inode(parent.dentry), dentry, NULL); out_path: done_path_create(&parent, dentry); out: unix_release_addr(addr); return err == -EEXIST ? -EADDRINUSE : err; } static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr, int addr_len) { struct unix_sock *u = unix_sk(sk); unsigned int new_hash, old_hash; struct net *net = sock_net(sk); struct unix_address *addr; int err; addr = unix_create_addr(sunaddr, addr_len); if (!addr) return -ENOMEM; err = mutex_lock_interruptible(&u->bindlock); if (err) goto out; if (u->addr) { err = -EINVAL; goto out_mutex; } old_hash = sk->sk_hash; new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type); unix_table_double_lock(net, old_hash, new_hash); if (__unix_find_socket_byname(net, addr->name, addr->len, new_hash)) goto out_spin; __unix_set_addr_hash(net, sk, addr, new_hash); unix_table_double_unlock(net, old_hash, new_hash); mutex_unlock(&u->bindlock); return 0; out_spin: unix_table_double_unlock(net, old_hash, new_hash); err = -EADDRINUSE; out_mutex: mutex_unlock(&u->bindlock); out: unix_release_addr(addr); return err; } static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr; struct sock *sk = sock->sk; int err; if (addr_len == offsetof(struct sockaddr_un, sun_path) && sunaddr->sun_family == AF_UNIX) return unix_autobind(sk); err = unix_validate_addr(sunaddr, addr_len); if (err) return err; if (sunaddr->sun_path[0]) err = unix_bind_bsd(sk, sunaddr, addr_len); else err = unix_bind_abstract(sk, sunaddr, addr_len); return err; } static void unix_state_double_lock(struct sock *sk1, struct sock *sk2) { if (unlikely(sk1 == sk2) || !sk2) { unix_state_lock(sk1); return; } if (sk1 > sk2) swap(sk1, sk2); unix_state_lock(sk1); unix_state_lock(sk2); } static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2) { if (unlikely(sk1 == sk2) || !sk2) { unix_state_unlock(sk1); return; } unix_state_unlock(sk1); unix_state_unlock(sk2); } static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) { struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr; struct sock *sk = sock->sk; struct sock *other; int err; err = -EINVAL; if (alen < offsetofend(struct sockaddr, sa_family)) goto out; if (addr->sa_family != AF_UNSPEC) { err = unix_validate_addr(sunaddr, alen); if (err) goto out; err = BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, addr, &alen); if (err) goto out; if ((test_bit(SOCK_PASSCRED, &sock->flags) || test_bit(SOCK_PASSPIDFD, &sock->flags)) && !READ_ONCE(unix_sk(sk)->addr)) { err = unix_autobind(sk); if (err) goto out; } restart: other = unix_find_other(sock_net(sk), sunaddr, alen, sock->type); if (IS_ERR(other)) { err = PTR_ERR(other); goto out; } unix_state_double_lock(sk, other); /* Apparently VFS overslept socket death. Retry. */ if (sock_flag(other, SOCK_DEAD)) { unix_state_double_unlock(sk, other); sock_put(other); goto restart; } err = -EPERM; if (!unix_may_send(sk, other)) goto out_unlock; err = security_unix_may_send(sk->sk_socket, other->sk_socket); if (err) goto out_unlock; WRITE_ONCE(sk->sk_state, TCP_ESTABLISHED); WRITE_ONCE(other->sk_state, TCP_ESTABLISHED); } else { /* * 1003.1g breaking connected state with AF_UNSPEC */ other = NULL; unix_state_double_lock(sk, other); } /* * If it was connected, reconnect. */ if (unix_peer(sk)) { struct sock *old_peer = unix_peer(sk); unix_peer(sk) = other; if (!other) WRITE_ONCE(sk->sk_state, TCP_CLOSE); unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer); unix_state_double_unlock(sk, other); if (other != old_peer) { unix_dgram_disconnected(sk, old_peer); unix_state_lock(old_peer); if (!unix_peer(old_peer)) WRITE_ONCE(old_peer->sk_state, TCP_CLOSE); unix_state_unlock(old_peer); } sock_put(old_peer); } else { unix_peer(sk) = other; unix_state_double_unlock(sk, other); } return 0; out_unlock: unix_state_double_unlock(sk, other); sock_put(other); out: return err; } static long unix_wait_for_peer(struct sock *other, long timeo) __releases(&unix_sk(other)->lock) { struct unix_sock *u = unix_sk(other); int sched; DEFINE_WAIT(wait); prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE); sched = !sock_flag(other, SOCK_DEAD) && !(other->sk_shutdown & RCV_SHUTDOWN) && unix_recvq_full_lockless(other); unix_state_unlock(other); if (sched) timeo = schedule_timeout(timeo); finish_wait(&u->peer_wait, &wait); return timeo; } static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags) { struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr; struct sock *sk = sock->sk, *newsk = NULL, *other = NULL; struct unix_sock *u = unix_sk(sk), *newu, *otheru; struct net *net = sock_net(sk); struct sk_buff *skb = NULL; unsigned char state; long timeo; int err; err = unix_validate_addr(sunaddr, addr_len); if (err) goto out; err = BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, uaddr, &addr_len); if (err) goto out; if ((test_bit(SOCK_PASSCRED, &sock->flags) || test_bit(SOCK_PASSPIDFD, &sock->flags)) && !READ_ONCE(u->addr)) { err = unix_autobind(sk); if (err) goto out; } timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); /* First of all allocate resources. * If we will make it after state is locked, * we will have to recheck all again in any case. */ /* create new sock for complete connection */ newsk = unix_create1(net, NULL, 0, sock->type); if (IS_ERR(newsk)) { err = PTR_ERR(newsk); goto out; } /* Allocate skb for sending to listening sock */ skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL); if (!skb) { err = -ENOMEM; goto out_free_sk; } restart: /* Find listening sock. */ other = unix_find_other(net, sunaddr, addr_len, sk->sk_type); if (IS_ERR(other)) { err = PTR_ERR(other); goto out_free_skb; } unix_state_lock(other); /* Apparently VFS overslept socket death. Retry. */ if (sock_flag(other, SOCK_DEAD)) { unix_state_unlock(other); sock_put(other); goto restart; } if (other->sk_state != TCP_LISTEN || other->sk_shutdown & RCV_SHUTDOWN) { err = -ECONNREFUSED; goto out_unlock; } if (unix_recvq_full_lockless(other)) { if (!timeo) { err = -EAGAIN; goto out_unlock; } timeo = unix_wait_for_peer(other, timeo); sock_put(other); err = sock_intr_errno(timeo); if (signal_pending(current)) goto out_free_skb; goto restart; } /* self connect and simultaneous connect are eliminated * by rejecting TCP_LISTEN socket to avoid deadlock. */ state = READ_ONCE(sk->sk_state); if (unlikely(state != TCP_CLOSE)) { err = state == TCP_ESTABLISHED ? -EISCONN : -EINVAL; goto out_unlock; } unix_state_lock(sk); if (unlikely(sk->sk_state != TCP_CLOSE)) { err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EINVAL; unix_state_unlock(sk); goto out_unlock; } err = security_unix_stream_connect(sk, other, newsk); if (err) { unix_state_unlock(sk); goto out_unlock; } /* The way is open! Fastly set all the necessary fields... */ sock_hold(sk); unix_peer(newsk) = sk; newsk->sk_state = TCP_ESTABLISHED; newsk->sk_type = sk->sk_type; init_peercred(newsk); newu = unix_sk(newsk); newu->listener = other; RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq); otheru = unix_sk(other); /* copy address information from listening to new sock * * The contents of *(otheru->addr) and otheru->path * are seen fully set up here, since we have found * otheru in hash under its lock. Insertion into the * hash chain we'd found it in had been done in an * earlier critical area protected by the chain's lock, * the same one where we'd set *(otheru->addr) contents, * as well as otheru->path and otheru->addr itself. * * Using smp_store_release() here to set newu->addr * is enough to make those stores, as well as stores * to newu->path visible to anyone who gets newu->addr * by smp_load_acquire(). IOW, the same warranties * as for unix_sock instances bound in unix_bind() or * in unix_autobind(). */ if (otheru->path.dentry) { path_get(&otheru->path); newu->path = otheru->path; } refcount_inc(&otheru->addr->refcnt); smp_store_release(&newu->addr, otheru->addr); /* Set credentials */ copy_peercred(sk, other); sock->state = SS_CONNECTED; WRITE_ONCE(sk->sk_state, TCP_ESTABLISHED); sock_hold(newsk); smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */ unix_peer(sk) = newsk; unix_state_unlock(sk); /* take ten and send info to listening sock */ spin_lock(&other->sk_receive_queue.lock); __skb_queue_tail(&other->sk_receive_queue, skb); spin_unlock(&other->sk_receive_queue.lock); unix_state_unlock(other); other->sk_data_ready(other); sock_put(other); return 0; out_unlock: unix_state_unlock(other); sock_put(other); out_free_skb: consume_skb(skb); out_free_sk: unix_release_sock(newsk, 0); out: return err; } static int unix_socketpair(struct socket *socka, struct socket *sockb) { struct sock *ska = socka->sk, *skb = sockb->sk; /* Join our sockets back to back */ sock_hold(ska); sock_hold(skb); unix_peer(ska) = skb; unix_peer(skb) = ska; init_peercred(ska); init_peercred(skb); ska->sk_state = TCP_ESTABLISHED; skb->sk_state = TCP_ESTABLISHED; socka->state = SS_CONNECTED; sockb->state = SS_CONNECTED; return 0; } static void unix_sock_inherit_flags(const struct socket *old, struct socket *new) { if (test_bit(SOCK_PASSCRED, &old->flags)) set_bit(SOCK_PASSCRED, &new->flags); if (test_bit(SOCK_PASSPIDFD, &old->flags)) set_bit(SOCK_PASSPIDFD, &new->flags); if (test_bit(SOCK_PASSSEC, &old->flags)) set_bit(SOCK_PASSSEC, &new->flags); } static int unix_accept(struct socket *sock, struct socket *newsock, struct proto_accept_arg *arg) { struct sock *sk = sock->sk; struct sk_buff *skb; struct sock *tsk; arg->err = -EOPNOTSUPP; if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET) goto out; arg->err = -EINVAL; if (READ_ONCE(sk->sk_state) != TCP_LISTEN) goto out; /* If socket state is TCP_LISTEN it cannot change (for now...), * so that no locks are necessary. */ skb = skb_recv_datagram(sk, (arg->flags & O_NONBLOCK) ? MSG_DONTWAIT : 0, &arg->err); if (!skb) { /* This means receive shutdown. */ if (arg->err == 0) arg->err = -EINVAL; goto out; } tsk = skb->sk; skb_free_datagram(sk, skb); wake_up_interruptible(&unix_sk(sk)->peer_wait); /* attach accepted sock to socket */ unix_state_lock(tsk); unix_update_edges(unix_sk(tsk)); newsock->state = SS_CONNECTED; unix_sock_inherit_flags(sock, newsock); sock_graft(tsk, newsock); unix_state_unlock(tsk); return 0; out: return arg->err; } static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer) { struct sock *sk = sock->sk; struct unix_address *addr; DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr); int err = 0; if (peer) { sk = unix_peer_get(sk); err = -ENOTCONN; if (!sk) goto out; err = 0; } else { sock_hold(sk); } addr = smp_load_acquire(&unix_sk(sk)->addr); if (!addr) { sunaddr->sun_family = AF_UNIX; sunaddr->sun_path[0] = 0; err = offsetof(struct sockaddr_un, sun_path); } else { err = addr->len; memcpy(sunaddr, addr->name, addr->len); if (peer) BPF_CGROUP_RUN_SA_PROG(sk, uaddr, &err, CGROUP_UNIX_GETPEERNAME); else BPF_CGROUP_RUN_SA_PROG(sk, uaddr, &err, CGROUP_UNIX_GETSOCKNAME); } sock_put(sk); out: return err; } /* The "user->unix_inflight" variable is protected by the garbage * collection lock, and we just read it locklessly here. If you go * over the limit, there might be a tiny race in actually noticing * it across threads. Tough. */ static inline bool too_many_unix_fds(struct task_struct *p) { struct user_struct *user = current_user(); if (unlikely(READ_ONCE(user->unix_inflight) > task_rlimit(p, RLIMIT_NOFILE))) return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN); return false; } static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb) { if (too_many_unix_fds(current)) return -ETOOMANYREFS; UNIXCB(skb).fp = scm->fp; scm->fp = NULL; if (unix_prepare_fpl(UNIXCB(skb).fp)) return -ENOMEM; return 0; } static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb) { scm->fp = UNIXCB(skb).fp; UNIXCB(skb).fp = NULL; unix_destroy_fpl(scm->fp); } static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb) { scm->fp = scm_fp_dup(UNIXCB(skb).fp); } static void unix_destruct_scm(struct sk_buff *skb) { struct scm_cookie scm; memset(&scm, 0, sizeof(scm)); scm.pid = UNIXCB(skb).pid; if (UNIXCB(skb).fp) unix_detach_fds(&scm, skb); /* Alas, it calls VFS */ /* So fscking what? fput() had been SMP-safe since the last Summer */ scm_destroy(&scm); sock_wfree(skb); } static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds) { int err = 0; UNIXCB(skb).pid = get_pid(scm->pid); UNIXCB(skb).uid = scm->creds.uid; UNIXCB(skb).gid = scm->creds.gid; UNIXCB(skb).fp = NULL; unix_get_secdata(scm, skb); if (scm->fp && send_fds) err = unix_attach_fds(scm, skb); skb->destructor = unix_destruct_scm; return err; } static bool unix_passcred_enabled(const struct socket *sock, const struct sock *other) { return test_bit(SOCK_PASSCRED, &sock->flags) || test_bit(SOCK_PASSPIDFD, &sock->flags) || !other->sk_socket || test_bit(SOCK_PASSCRED, &other->sk_socket->flags) || test_bit(SOCK_PASSPIDFD, &other->sk_socket->flags); } /* * Some apps rely on write() giving SCM_CREDENTIALS * We include credentials if source or destination socket * asserted SOCK_PASSCRED. */ static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock, const struct sock *other) { if (UNIXCB(skb).pid) return; if (unix_passcred_enabled(sock, other)) { UNIXCB(skb).pid = get_pid(task_tgid(current)); current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid); } } static bool unix_skb_scm_eq(struct sk_buff *skb, struct scm_cookie *scm) { return UNIXCB(skb).pid == scm->pid && uid_eq(UNIXCB(skb).uid, scm->creds.uid) && gid_eq(UNIXCB(skb).gid, scm->creds.gid) && unix_secdata_eq(scm, skb); } static void scm_stat_add(struct sock *sk, struct sk_buff *skb) { struct scm_fp_list *fp = UNIXCB(skb).fp; struct unix_sock *u = unix_sk(sk); if (unlikely(fp && fp->count)) { atomic_add(fp->count, &u->scm_stat.nr_fds); unix_add_edges(fp, u); } } static void scm_stat_del(struct sock *sk, struct sk_buff *skb) { struct scm_fp_list *fp = UNIXCB(skb).fp; struct unix_sock *u = unix_sk(sk); if (unlikely(fp && fp->count)) { atomic_sub(fp->count, &u->scm_stat.nr_fds); unix_del_edges(fp); } } /* * Send AF_UNIX data. */ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk, *other = NULL; struct unix_sock *u = unix_sk(sk); struct scm_cookie scm; struct sk_buff *skb; int data_len = 0; int sk_locked; long timeo; int err; err = scm_send(sock, msg, &scm, false); if (err < 0) return err; wait_for_unix_gc(scm.fp); if (msg->msg_flags & MSG_OOB) { err = -EOPNOTSUPP; goto out; } if (msg->msg_namelen) { err = unix_validate_addr(msg->msg_name, msg->msg_namelen); if (err) goto out; err = BPF_CGROUP_RUN_PROG_UNIX_SENDMSG_LOCK(sk, msg->msg_name, &msg->msg_namelen, NULL); if (err) goto out; } if ((test_bit(SOCK_PASSCRED, &sock->flags) || test_bit(SOCK_PASSPIDFD, &sock->flags)) && !READ_ONCE(u->addr)) { err = unix_autobind(sk); if (err) goto out; } if (len > READ_ONCE(sk->sk_sndbuf) - 32) { err = -EMSGSIZE; goto out; } if (len > SKB_MAX_ALLOC) { data_len = min_t(size_t, len - SKB_MAX_ALLOC, MAX_SKB_FRAGS * PAGE_SIZE); data_len = PAGE_ALIGN(data_len); BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE); } skb = sock_alloc_send_pskb(sk, len - data_len, data_len, msg->msg_flags & MSG_DONTWAIT, &err, PAGE_ALLOC_COSTLY_ORDER); if (!skb) goto out; err = unix_scm_to_skb(&scm, skb, true); if (err < 0) goto out_free; skb_put(skb, len - data_len); skb->data_len = data_len; skb->len = len; err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len); if (err) goto out_free; timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); if (msg->msg_namelen) { lookup: other = unix_find_other(sock_net(sk), msg->msg_name, msg->msg_namelen, sk->sk_type); if (IS_ERR(other)) { err = PTR_ERR(other); goto out_free; } } else { other = unix_peer_get(sk); if (!other) { err = -ENOTCONN; goto out_free; } } if (sk_filter(other, skb) < 0) { /* Toss the packet but do not return any error to the sender */ err = len; goto out_sock_put; } restart: sk_locked = 0; unix_state_lock(other); restart_locked: if (!unix_may_send(sk, other)) { err = -EPERM; goto out_unlock; } if (unlikely(sock_flag(other, SOCK_DEAD))) { /* Check with 1003.1g - what should datagram error */ unix_state_unlock(other); if (sk->sk_type == SOCK_SEQPACKET) { /* We are here only when racing with unix_release_sock() * is clearing @other. Never change state to TCP_CLOSE * unlike SOCK_DGRAM wants. */ err = -EPIPE; goto out_sock_put; } if (!sk_locked) unix_state_lock(sk); if (unix_peer(sk) == other) { unix_peer(sk) = NULL; unix_dgram_peer_wake_disconnect_wakeup(sk, other); WRITE_ONCE(sk->sk_state, TCP_CLOSE); unix_state_unlock(sk); unix_dgram_disconnected(sk, other); sock_put(other); err = -ECONNREFUSED; goto out_sock_put; } unix_state_unlock(sk); if (!msg->msg_namelen) { err = -ECONNRESET; goto out_sock_put; } goto lookup; } if (other->sk_shutdown & RCV_SHUTDOWN) { err = -EPIPE; goto out_unlock; } if (sk->sk_type != SOCK_SEQPACKET) { err = security_unix_may_send(sk->sk_socket, other->sk_socket); if (err) goto out_unlock; } /* other == sk && unix_peer(other) != sk if * - unix_peer(sk) == NULL, destination address bound to sk * - unix_peer(sk) == sk by time of get but disconnected before lock */ if (other != sk && unlikely(unix_peer(other) != sk && unix_recvq_full_lockless(other))) { if (timeo) { timeo = unix_wait_for_peer(other, timeo); err = sock_intr_errno(timeo); if (signal_pending(current)) goto out_sock_put; goto restart; } if (!sk_locked) { unix_state_unlock(other); unix_state_double_lock(sk, other); } if (unix_peer(sk) != other || unix_dgram_peer_wake_me(sk, other)) { err = -EAGAIN; sk_locked = 1; goto out_unlock; } if (!sk_locked) { sk_locked = 1; goto restart_locked; } } if (unlikely(sk_locked)) unix_state_unlock(sk); if (sock_flag(other, SOCK_RCVTSTAMP)) __net_timestamp(skb); maybe_add_creds(skb, sock, other); scm_stat_add(other, skb); skb_queue_tail(&other->sk_receive_queue, skb); unix_state_unlock(other); other->sk_data_ready(other); sock_put(other); scm_destroy(&scm); return len; out_unlock: if (sk_locked) unix_state_unlock(sk); unix_state_unlock(other); out_sock_put: sock_put(other); out_free: consume_skb(skb); out: scm_destroy(&scm); return err; } /* We use paged skbs for stream sockets, and limit occupancy to 32768 * bytes, and a minimum of a full page. */ #define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768)) #if IS_ENABLED(CONFIG_AF_UNIX_OOB) static int queue_oob(struct socket *sock, struct msghdr *msg, struct sock *other, struct scm_cookie *scm, bool fds_sent) { struct unix_sock *ousk = unix_sk(other); struct sk_buff *skb; int err; skb = sock_alloc_send_skb(sock->sk, 1, msg->msg_flags & MSG_DONTWAIT, &err); if (!skb) return err; err = unix_scm_to_skb(scm, skb, !fds_sent); if (err < 0) goto out; skb_put(skb, 1); err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, 1); if (err) goto out; unix_state_lock(other); if (sock_flag(other, SOCK_DEAD) || (other->sk_shutdown & RCV_SHUTDOWN)) { unix_state_unlock(other); err = -EPIPE; goto out; } maybe_add_creds(skb, sock, other); scm_stat_add(other, skb); spin_lock(&other->sk_receive_queue.lock); WRITE_ONCE(ousk->oob_skb, skb); __skb_queue_tail(&other->sk_receive_queue, skb); spin_unlock(&other->sk_receive_queue.lock); sk_send_sigurg(other); unix_state_unlock(other); other->sk_data_ready(other); return 0; out: consume_skb(skb); return err; } #endif static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct sk_buff *skb = NULL; struct sock *other = NULL; struct scm_cookie scm; bool fds_sent = false; int err, sent = 0; err = scm_send(sock, msg, &scm, false); if (err < 0) return err; wait_for_unix_gc(scm.fp); if (msg->msg_flags & MSG_OOB) { err = -EOPNOTSUPP; #if IS_ENABLED(CONFIG_AF_UNIX_OOB) if (len) len--; else #endif goto out_err; } if (msg->msg_namelen) { err = READ_ONCE(sk->sk_state) == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP; goto out_err; } else { other = unix_peer(sk); if (!other) { err = -ENOTCONN; goto out_err; } } if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN) goto out_pipe; while (sent < len) { int size = len - sent; int data_len; if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES)) { skb = sock_alloc_send_pskb(sk, 0, 0, msg->msg_flags & MSG_DONTWAIT, &err, 0); } else { /* Keep two messages in the pipe so it schedules better */ size = min_t(int, size, (READ_ONCE(sk->sk_sndbuf) >> 1) - 64); /* allow fallback to order-0 allocations */ size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ); data_len = max_t(int, 0, size - SKB_MAX_HEAD(0)); data_len = min_t(size_t, size, PAGE_ALIGN(data_len)); skb = sock_alloc_send_pskb(sk, size - data_len, data_len, msg->msg_flags & MSG_DONTWAIT, &err, get_order(UNIX_SKB_FRAGS_SZ)); } if (!skb) goto out_err; /* Only send the fds in the first buffer */ err = unix_scm_to_skb(&scm, skb, !fds_sent); if (err < 0) goto out_free; fds_sent = true; if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES)) { skb->ip_summed = CHECKSUM_UNNECESSARY; err = skb_splice_from_iter(skb, &msg->msg_iter, size, sk->sk_allocation); if (err < 0) goto out_free; size = err; refcount_add(size, &sk->sk_wmem_alloc); } else { skb_put(skb, size - data_len); skb->data_len = data_len; skb->len = size; err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size); if (err) goto out_free; } unix_state_lock(other); if (sock_flag(other, SOCK_DEAD) || (other->sk_shutdown & RCV_SHUTDOWN)) goto out_pipe_unlock; maybe_add_creds(skb, sock, other); scm_stat_add(other, skb); skb_queue_tail(&other->sk_receive_queue, skb); unix_state_unlock(other); other->sk_data_ready(other); sent += size; } #if IS_ENABLED(CONFIG_AF_UNIX_OOB) if (msg->msg_flags & MSG_OOB) { err = queue_oob(sock, msg, other, &scm, fds_sent); if (err) goto out_err; sent++; } #endif scm_destroy(&scm); return sent; out_pipe_unlock: unix_state_unlock(other); out_pipe: if (!sent && !(msg->msg_flags & MSG_NOSIGNAL)) send_sig(SIGPIPE, current, 0); err = -EPIPE; out_free: consume_skb(skb); out_err: scm_destroy(&scm); return sent ? : err; } static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { int err; struct sock *sk = sock->sk; err = sock_error(sk); if (err) return err; if (READ_ONCE(sk->sk_state) != TCP_ESTABLISHED) return -ENOTCONN; if (msg->msg_namelen) msg->msg_namelen = 0; return unix_dgram_sendmsg(sock, msg, len); } static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; if (READ_ONCE(sk->sk_state) != TCP_ESTABLISHED) return -ENOTCONN; return unix_dgram_recvmsg(sock, msg, size, flags); } static void unix_copy_addr(struct msghdr *msg, struct sock *sk) { struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr); if (addr) { msg->msg_namelen = addr->len; memcpy(msg->msg_name, addr->name, addr->len); } } int __unix_dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t size, int flags) { struct scm_cookie scm; struct socket *sock = sk->sk_socket; struct unix_sock *u = unix_sk(sk); struct sk_buff *skb, *last; long timeo; int skip; int err; err = -EOPNOTSUPP; if (flags&MSG_OOB) goto out; timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); do { mutex_lock(&u->iolock); skip = sk_peek_offset(sk, flags); skb = __skb_try_recv_datagram(sk, &sk->sk_receive_queue, flags, &skip, &err, &last); if (skb) { if (!(flags & MSG_PEEK)) scm_stat_del(sk, skb); break; } mutex_unlock(&u->iolock); if (err != -EAGAIN) break; } while (timeo && !__skb_wait_for_more_packets(sk, &sk->sk_receive_queue, &err, &timeo, last)); if (!skb) { /* implies iolock unlocked */ unix_state_lock(sk); /* Signal EOF on disconnected non-blocking SEQPACKET socket. */ if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN && (sk->sk_shutdown & RCV_SHUTDOWN)) err = 0; unix_state_unlock(sk); goto out; } if (wq_has_sleeper(&u->peer_wait)) wake_up_interruptible_sync_poll(&u->peer_wait, EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND); if (msg->msg_name) { unix_copy_addr(msg, skb->sk); BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk, msg->msg_name, &msg->msg_namelen); } if (size > skb->len - skip) size = skb->len - skip; else if (size < skb->len - skip) msg->msg_flags |= MSG_TRUNC; err = skb_copy_datagram_msg(skb, skip, msg, size); if (err) goto out_free; if (sock_flag(sk, SOCK_RCVTSTAMP)) __sock_recv_timestamp(msg, sk, skb); memset(&scm, 0, sizeof(scm)); scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid); unix_set_secdata(&scm, skb); if (!(flags & MSG_PEEK)) { if (UNIXCB(skb).fp) unix_detach_fds(&scm, skb); sk_peek_offset_bwd(sk, skb->len); } else { /* It is questionable: on PEEK we could: - do not return fds - good, but too simple 8) - return fds, and do not return them on read (old strategy, apparently wrong) - clone fds (I chose it for now, it is the most universal solution) POSIX 1003.1g does not actually define this clearly at all. POSIX 1003.1g doesn't define a lot of things clearly however! */ sk_peek_offset_fwd(sk, size); if (UNIXCB(skb).fp) unix_peek_fds(&scm, skb); } err = (flags & MSG_TRUNC) ? skb->len - skip : size; scm_recv_unix(sock, msg, &scm, flags); out_free: skb_free_datagram(sk, skb); mutex_unlock(&u->iolock); out: return err; } static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; #ifdef CONFIG_BPF_SYSCALL const struct proto *prot = READ_ONCE(sk->sk_prot); if (prot != &unix_dgram_proto) return prot->recvmsg(sk, msg, size, flags, NULL); #endif return __unix_dgram_recvmsg(sk, msg, size, flags); } static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor) { struct unix_sock *u = unix_sk(sk); struct sk_buff *skb; int err; mutex_lock(&u->iolock); skb = skb_recv_datagram(sk, MSG_DONTWAIT, &err); mutex_unlock(&u->iolock); if (!skb) return err; return recv_actor(sk, skb); } /* * Sleep until more data has arrived. But check for races.. */ static long unix_stream_data_wait(struct sock *sk, long timeo, struct sk_buff *last, unsigned int last_len, bool freezable) { unsigned int state = TASK_INTERRUPTIBLE | freezable * TASK_FREEZABLE; struct sk_buff *tail; DEFINE_WAIT(wait); unix_state_lock(sk); for (;;) { prepare_to_wait(sk_sleep(sk), &wait, state); tail = skb_peek_tail(&sk->sk_receive_queue); if (tail != last || (tail && tail->len != last_len) || sk->sk_err || (sk->sk_shutdown & RCV_SHUTDOWN) || signal_pending(current) || !timeo) break; sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); unix_state_unlock(sk); timeo = schedule_timeout(timeo); unix_state_lock(sk); if (sock_flag(sk, SOCK_DEAD)) break; sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); } finish_wait(sk_sleep(sk), &wait); unix_state_unlock(sk); return timeo; } static unsigned int unix_skb_len(const struct sk_buff *skb) { return skb->len - UNIXCB(skb).consumed; } struct unix_stream_read_state { int (*recv_actor)(struct sk_buff *, int, int, struct unix_stream_read_state *); struct socket *socket; struct msghdr *msg; struct pipe_inode_info *pipe; size_t size; int flags; unsigned int splice_flags; }; #if IS_ENABLED(CONFIG_AF_UNIX_OOB) static int unix_stream_recv_urg(struct unix_stream_read_state *state) { struct socket *sock = state->socket; struct sock *sk = sock->sk; struct unix_sock *u = unix_sk(sk); int chunk = 1; struct sk_buff *oob_skb; mutex_lock(&u->iolock); unix_state_lock(sk); spin_lock(&sk->sk_receive_queue.lock); if (sock_flag(sk, SOCK_URGINLINE) || !u->oob_skb) { spin_unlock(&sk->sk_receive_queue.lock); unix_state_unlock(sk); mutex_unlock(&u->iolock); return -EINVAL; } oob_skb = u->oob_skb; if (!(state->flags & MSG_PEEK)) WRITE_ONCE(u->oob_skb, NULL); spin_unlock(&sk->sk_receive_queue.lock); unix_state_unlock(sk); chunk = state->recv_actor(oob_skb, 0, chunk, state); if (!(state->flags & MSG_PEEK)) UNIXCB(oob_skb).consumed += 1; mutex_unlock(&u->iolock); if (chunk < 0) return -EFAULT; state->msg->msg_flags |= MSG_OOB; return 1; } static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk, int flags, int copied) { struct sk_buff *read_skb = NULL, *unread_skb = NULL; struct unix_sock *u = unix_sk(sk); if (likely(unix_skb_len(skb) && skb != READ_ONCE(u->oob_skb))) return skb; spin_lock(&sk->sk_receive_queue.lock); if (!unix_skb_len(skb)) { if (copied && (!u->oob_skb || skb == u->oob_skb)) { skb = NULL; } else if (flags & MSG_PEEK) { skb = skb_peek_next(skb, &sk->sk_receive_queue); } else { read_skb = skb; skb = skb_peek_next(skb, &sk->sk_receive_queue); __skb_unlink(read_skb, &sk->sk_receive_queue); } if (!skb) goto unlock; } if (skb != u->oob_skb) goto unlock; if (copied) { skb = NULL; } else if (!(flags & MSG_PEEK)) { WRITE_ONCE(u->oob_skb, NULL); if (!sock_flag(sk, SOCK_URGINLINE)) { __skb_unlink(skb, &sk->sk_receive_queue); unread_skb = skb; skb = skb_peek(&sk->sk_receive_queue); } } else if (!sock_flag(sk, SOCK_URGINLINE)) { skb = skb_peek_next(skb, &sk->sk_receive_queue); } unlock: spin_unlock(&sk->sk_receive_queue.lock); consume_skb(read_skb); kfree_skb_reason(unread_skb, SKB_DROP_REASON_UNIX_SKIP_OOB); return skb; } #endif static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor) { struct unix_sock *u = unix_sk(sk); struct sk_buff *skb; int err; if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)) return -ENOTCONN; mutex_lock(&u->iolock); skb = skb_recv_datagram(sk, MSG_DONTWAIT, &err); mutex_unlock(&u->iolock); if (!skb) return err; #if IS_ENABLED(CONFIG_AF_UNIX_OOB) if (unlikely(skb == READ_ONCE(u->oob_skb))) { bool drop = false; unix_state_lock(sk); if (sock_flag(sk, SOCK_DEAD)) { unix_state_unlock(sk); kfree_skb_reason(skb, SKB_DROP_REASON_SOCKET_CLOSE); return -ECONNRESET; } spin_lock(&sk->sk_receive_queue.lock); if (likely(skb == u->oob_skb)) { WRITE_ONCE(u->oob_skb, NULL); drop = true; } spin_unlock(&sk->sk_receive_queue.lock); unix_state_unlock(sk); if (drop) { kfree_skb_reason(skb, SKB_DROP_REASON_UNIX_SKIP_OOB); return -EAGAIN; } } #endif return recv_actor(sk, skb); } static int unix_stream_read_generic(struct unix_stream_read_state *state, bool freezable) { struct scm_cookie scm; struct socket *sock = state->socket; struct sock *sk = sock->sk; struct unix_sock *u = unix_sk(sk); int copied = 0; int flags = state->flags; int noblock = flags & MSG_DONTWAIT; bool check_creds = false; int target; int err = 0; long timeo; int skip; size_t size = state->size; unsigned int last_len; if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)) { err = -EINVAL; goto out; } if (unlikely(flags & MSG_OOB)) { err = -EOPNOTSUPP; #if IS_ENABLED(CONFIG_AF_UNIX_OOB) err = unix_stream_recv_urg(state); #endif goto out; } target = sock_rcvlowat(sk, flags & MSG_WAITALL, size); timeo = sock_rcvtimeo(sk, noblock); memset(&scm, 0, sizeof(scm)); /* Lock the socket to prevent queue disordering * while sleeps in memcpy_tomsg */ mutex_lock(&u->iolock); skip = max(sk_peek_offset(sk, flags), 0); do { struct sk_buff *skb, *last; int chunk; redo: unix_state_lock(sk); if (sock_flag(sk, SOCK_DEAD)) { err = -ECONNRESET; goto unlock; } last = skb = skb_peek(&sk->sk_receive_queue); last_len = last ? last->len : 0; again: #if IS_ENABLED(CONFIG_AF_UNIX_OOB) if (skb) { skb = manage_oob(skb, sk, flags, copied); if (!skb && copied) { unix_state_unlock(sk); break; } } #endif if (skb == NULL) { if (copied >= target) goto unlock; /* * POSIX 1003.1g mandates this order. */ err = sock_error(sk); if (err) goto unlock; if (sk->sk_shutdown & RCV_SHUTDOWN) goto unlock; unix_state_unlock(sk); if (!timeo) { err = -EAGAIN; break; } mutex_unlock(&u->iolock); timeo = unix_stream_data_wait(sk, timeo, last, last_len, freezable); if (signal_pending(current)) { err = sock_intr_errno(timeo); scm_destroy(&scm); goto out; } mutex_lock(&u->iolock); goto redo; unlock: unix_state_unlock(sk); break; } while (skip >= unix_skb_len(skb)) { skip -= unix_skb_len(skb); last = skb; last_len = skb->len; skb = skb_peek_next(skb, &sk->sk_receive_queue); if (!skb) goto again; } unix_state_unlock(sk); if (check_creds) { /* Never glue messages from different writers */ if (!unix_skb_scm_eq(skb, &scm)) break; } else if (test_bit(SOCK_PASSCRED, &sock->flags) || test_bit(SOCK_PASSPIDFD, &sock->flags)) { /* Copy credentials */ scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid); unix_set_secdata(&scm, skb); check_creds = true; } /* Copy address just once */ if (state->msg && state->msg->msg_name) { DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, state->msg->msg_name); unix_copy_addr(state->msg, skb->sk); BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk, state->msg->msg_name, &state->msg->msg_namelen); sunaddr = NULL; } chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size); chunk = state->recv_actor(skb, skip, chunk, state); if (chunk < 0) { if (copied == 0) copied = -EFAULT; break; } copied += chunk; size -= chunk; /* Mark read part of skb as used */ if (!(flags & MSG_PEEK)) { UNIXCB(skb).consumed += chunk; sk_peek_offset_bwd(sk, chunk); if (UNIXCB(skb).fp) { scm_stat_del(sk, skb); unix_detach_fds(&scm, skb); } if (unix_skb_len(skb)) break; skb_unlink(skb, &sk->sk_receive_queue); consume_skb(skb); if (scm.fp) break; } else { /* It is questionable, see note in unix_dgram_recvmsg. */ if (UNIXCB(skb).fp) unix_peek_fds(&scm, skb); sk_peek_offset_fwd(sk, chunk); if (UNIXCB(skb).fp) break; skip = 0; last = skb; last_len = skb->len; unix_state_lock(sk); skb = skb_peek_next(skb, &sk->sk_receive_queue); if (skb) goto again; unix_state_unlock(sk); break; } } while (size); mutex_unlock(&u->iolock); if (state->msg) scm_recv_unix(sock, state->msg, &scm, flags); else scm_destroy(&scm); out: return copied ? : err; } static int unix_stream_read_actor(struct sk_buff *skb, int skip, int chunk, struct unix_stream_read_state *state) { int ret; ret = skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip, state->msg, chunk); return ret ?: chunk; } int __unix_stream_recvmsg(struct sock *sk, struct msghdr *msg, size_t size, int flags) { struct unix_stream_read_state state = { .recv_actor = unix_stream_read_actor, .socket = sk->sk_socket, .msg = msg, .size = size, .flags = flags }; return unix_stream_read_generic(&state, true); } static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct unix_stream_read_state state = { .recv_actor = unix_stream_read_actor, .socket = sock, .msg = msg, .size = size, .flags = flags }; #ifdef CONFIG_BPF_SYSCALL struct sock *sk = sock->sk; const struct proto *prot = READ_ONCE(sk->sk_prot); if (prot != &unix_stream_proto) return prot->recvmsg(sk, msg, size, flags, NULL); #endif return unix_stream_read_generic(&state, true); } static int unix_stream_splice_actor(struct sk_buff *skb, int skip, int chunk, struct unix_stream_read_state *state) { return skb_splice_bits(skb, state->socket->sk, UNIXCB(skb).consumed + skip, state->pipe, chunk, state->splice_flags); } static ssize_t unix_stream_splice_read(struct socket *sock, loff_t *ppos, struct pipe_inode_info *pipe, size_t size, unsigned int flags) { struct unix_stream_read_state state = { .recv_actor = unix_stream_splice_actor, .socket = sock, .pipe = pipe, .size = size, .splice_flags = flags, }; if (unlikely(*ppos)) return -ESPIPE; if (sock->file->f_flags & O_NONBLOCK || flags & SPLICE_F_NONBLOCK) state.flags = MSG_DONTWAIT; return unix_stream_read_generic(&state, false); } static int unix_shutdown(struct socket *sock, int mode) { struct sock *sk = sock->sk; struct sock *other; if (mode < SHUT_RD || mode > SHUT_RDWR) return -EINVAL; /* This maps: * SHUT_RD (0) -> RCV_SHUTDOWN (1) * SHUT_WR (1) -> SEND_SHUTDOWN (2) * SHUT_RDWR (2) -> SHUTDOWN_MASK (3) */ ++mode; unix_state_lock(sk); WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | mode); other = unix_peer(sk); if (other) sock_hold(other); unix_state_unlock(sk); sk->sk_state_change(sk); if (other && (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) { int peer_mode = 0; const struct proto *prot = READ_ONCE(other->sk_prot); if (prot->unhash) prot->unhash(other); if (mode&RCV_SHUTDOWN) peer_mode |= SEND_SHUTDOWN; if (mode&SEND_SHUTDOWN) peer_mode |= RCV_SHUTDOWN; unix_state_lock(other); WRITE_ONCE(other->sk_shutdown, other->sk_shutdown | peer_mode); unix_state_unlock(other); other->sk_state_change(other); if (peer_mode == SHUTDOWN_MASK) sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP); else if (peer_mode & RCV_SHUTDOWN) sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN); } if (other) sock_put(other); return 0; } long unix_inq_len(struct sock *sk) { struct sk_buff *skb; long amount = 0; if (READ_ONCE(sk->sk_state) == TCP_LISTEN) return -EINVAL; spin_lock(&sk->sk_receive_queue.lock); if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) { skb_queue_walk(&sk->sk_receive_queue, skb) amount += unix_skb_len(skb); } else { skb = skb_peek(&sk->sk_receive_queue); if (skb) amount = skb->len; } spin_unlock(&sk->sk_receive_queue.lock); return amount; } EXPORT_SYMBOL_GPL(unix_inq_len); long unix_outq_len(struct sock *sk) { return sk_wmem_alloc_get(sk); } EXPORT_SYMBOL_GPL(unix_outq_len); static int unix_open_file(struct sock *sk) { struct path path; struct file *f; int fd; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; if (!smp_load_acquire(&unix_sk(sk)->addr)) return -ENOENT; path = unix_sk(sk)->path; if (!path.dentry) return -ENOENT; path_get(&path); fd = get_unused_fd_flags(O_CLOEXEC); if (fd < 0) goto out; f = dentry_open(&path, O_PATH, current_cred()); if (IS_ERR(f)) { put_unused_fd(fd); fd = PTR_ERR(f); goto out; } fd_install(fd, f); out: path_put(&path); return fd; } static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; long amount = 0; int err; switch (cmd) { case SIOCOUTQ: amount = unix_outq_len(sk); err = put_user(amount, (int __user *)arg); break; case SIOCINQ: amount = unix_inq_len(sk); if (amount < 0) err = amount; else err = put_user(amount, (int __user *)arg); break; case SIOCUNIXFILE: err = unix_open_file(sk); break; #if IS_ENABLED(CONFIG_AF_UNIX_OOB) case SIOCATMARK: { struct unix_sock *u = unix_sk(sk); struct sk_buff *skb; int answ = 0; mutex_lock(&u->iolock); skb = skb_peek(&sk->sk_receive_queue); if (skb) { struct sk_buff *oob_skb = READ_ONCE(u->oob_skb); struct sk_buff *next_skb; next_skb = skb_peek_next(skb, &sk->sk_receive_queue); if (skb == oob_skb || (!unix_skb_len(skb) && (!oob_skb || next_skb == oob_skb))) answ = 1; } mutex_unlock(&u->iolock); err = put_user(answ, (int __user *)arg); } break; #endif default: err = -ENOIOCTLCMD; break; } return err; } #ifdef CONFIG_COMPAT static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { return unix_ioctl(sock, cmd, (unsigned long)compat_ptr(arg)); } #endif static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; unsigned char state; __poll_t mask; u8 shutdown; sock_poll_wait(file, sock, wait); mask = 0; shutdown = READ_ONCE(sk->sk_shutdown); state = READ_ONCE(sk->sk_state); /* exceptional events? */ if (READ_ONCE(sk->sk_err)) mask |= EPOLLERR; if (shutdown == SHUTDOWN_MASK) mask |= EPOLLHUP; if (shutdown & RCV_SHUTDOWN) mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM; /* readable? */ if (!skb_queue_empty_lockless(&sk->sk_receive_queue)) mask |= EPOLLIN | EPOLLRDNORM; if (sk_is_readable(sk)) mask |= EPOLLIN | EPOLLRDNORM; #if IS_ENABLED(CONFIG_AF_UNIX_OOB) if (READ_ONCE(unix_sk(sk)->oob_skb)) mask |= EPOLLPRI; #endif /* Connection-based need to check for termination and startup */ if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) && state == TCP_CLOSE) mask |= EPOLLHUP; /* * we set writable also when the other side has shut down the * connection. This prevents stuck sockets. */ if (unix_writable(sk, state)) mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; return mask; } static __poll_t unix_dgram_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk, *other; unsigned int writable; unsigned char state; __poll_t mask; u8 shutdown; sock_poll_wait(file, sock, wait); mask = 0; shutdown = READ_ONCE(sk->sk_shutdown); state = READ_ONCE(sk->sk_state); /* exceptional events? */ if (READ_ONCE(sk->sk_err) || !skb_queue_empty_lockless(&sk->sk_error_queue)) mask |= EPOLLERR | (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0); if (shutdown & RCV_SHUTDOWN) mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM; if (shutdown == SHUTDOWN_MASK) mask |= EPOLLHUP; /* readable? */ if (!skb_queue_empty_lockless(&sk->sk_receive_queue)) mask |= EPOLLIN | EPOLLRDNORM; if (sk_is_readable(sk)) mask |= EPOLLIN | EPOLLRDNORM; /* Connection-based need to check for termination and startup */ if (sk->sk_type == SOCK_SEQPACKET && state == TCP_CLOSE) mask |= EPOLLHUP; /* No write status requested, avoid expensive OUT tests. */ if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT))) return mask; writable = unix_writable(sk, state); if (writable) { unix_state_lock(sk); other = unix_peer(sk); if (other && unix_peer(other) != sk && unix_recvq_full_lockless(other) && unix_dgram_peer_wake_me(sk, other)) writable = 0; unix_state_unlock(sk); } if (writable) mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; else sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); return mask; } #ifdef CONFIG_PROC_FS #define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1) #define get_bucket(x) ((x) >> BUCKET_SPACE) #define get_offset(x) ((x) & ((1UL << BUCKET_SPACE) - 1)) #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o)) static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos) { unsigned long offset = get_offset(*pos); unsigned long bucket = get_bucket(*pos); unsigned long count = 0; struct sock *sk; for (sk = sk_head(&seq_file_net(seq)->unx.table.buckets[bucket]); sk; sk = sk_next(sk)) { if (++count == offset) break; } return sk; } static struct sock *unix_get_first(struct seq_file *seq, loff_t *pos) { unsigned long bucket = get_bucket(*pos); struct net *net = seq_file_net(seq); struct sock *sk; while (bucket < UNIX_HASH_SIZE) { spin_lock(&net->unx.table.locks[bucket]); sk = unix_from_bucket(seq, pos); if (sk) return sk; spin_unlock(&net->unx.table.locks[bucket]); *pos = set_bucket_offset(++bucket, 1); } return NULL; } static struct sock *unix_get_next(struct seq_file *seq, struct sock *sk, loff_t *pos) { unsigned long bucket = get_bucket(*pos); sk = sk_next(sk); if (sk) return sk; spin_unlock(&seq_file_net(seq)->unx.table.locks[bucket]); *pos = set_bucket_offset(++bucket, 1); return unix_get_first(seq, pos); } static void *unix_seq_start(struct seq_file *seq, loff_t *pos) { if (!*pos) return SEQ_START_TOKEN; return unix_get_first(seq, pos); } static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos) { ++*pos; if (v == SEQ_START_TOKEN) return unix_get_first(seq, pos); return unix_get_next(seq, v, pos); } static void unix_seq_stop(struct seq_file *seq, void *v) { struct sock *sk = v; if (sk) spin_unlock(&seq_file_net(seq)->unx.table.locks[sk->sk_hash]); } static int unix_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) seq_puts(seq, "Num RefCount Protocol Flags Type St " "Inode Path\n"); else { struct sock *s = v; struct unix_sock *u = unix_sk(s); unix_state_lock(s); seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu", s, refcount_read(&s->sk_refcnt), 0, s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0, s->sk_type, s->sk_socket ? (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) : (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING), sock_i_ino(s)); if (u->addr) { // under a hash table lock here int i, len; seq_putc(seq, ' '); i = 0; len = u->addr->len - offsetof(struct sockaddr_un, sun_path); if (u->addr->name->sun_path[0]) { len--; } else { seq_putc(seq, '@'); i++; } for ( ; i < len; i++) seq_putc(seq, u->addr->name->sun_path[i] ?: '@'); } unix_state_unlock(s); seq_putc(seq, '\n'); } return 0; } static const struct seq_operations unix_seq_ops = { .start = unix_seq_start, .next = unix_seq_next, .stop = unix_seq_stop, .show = unix_seq_show, }; #ifdef CONFIG_BPF_SYSCALL struct bpf_unix_iter_state { struct seq_net_private p; unsigned int cur_sk; unsigned int end_sk; unsigned int max_sk; struct sock **batch; bool st_bucket_done; }; struct bpf_iter__unix { __bpf_md_ptr(struct bpf_iter_meta *, meta); __bpf_md_ptr(struct unix_sock *, unix_sk); uid_t uid __aligned(8); }; static int unix_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta, struct unix_sock *unix_sk, uid_t uid) { struct bpf_iter__unix ctx; meta->seq_num--; /* skip SEQ_START_TOKEN */ ctx.meta = meta; ctx.unix_sk = unix_sk; ctx.uid = uid; return bpf_iter_run_prog(prog, &ctx); } static int bpf_iter_unix_hold_batch(struct seq_file *seq, struct sock *start_sk) { struct bpf_unix_iter_state *iter = seq->private; unsigned int expected = 1; struct sock *sk; sock_hold(start_sk); iter->batch[iter->end_sk++] = start_sk; for (sk = sk_next(start_sk); sk; sk = sk_next(sk)) { if (iter->end_sk < iter->max_sk) { sock_hold(sk); iter->batch[iter->end_sk++] = sk; } expected++; } spin_unlock(&seq_file_net(seq)->unx.table.locks[start_sk->sk_hash]); return expected; } static void bpf_iter_unix_put_batch(struct bpf_unix_iter_state *iter) { while (iter->cur_sk < iter->end_sk) sock_put(iter->batch[iter->cur_sk++]); } static int bpf_iter_unix_realloc_batch(struct bpf_unix_iter_state *iter, unsigned int new_batch_sz) { struct sock **new_batch; new_batch = kvmalloc(sizeof(*new_batch) * new_batch_sz, GFP_USER | __GFP_NOWARN); if (!new_batch) return -ENOMEM; bpf_iter_unix_put_batch(iter); kvfree(iter->batch); iter->batch = new_batch; iter->max_sk = new_batch_sz; return 0; } static struct sock *bpf_iter_unix_batch(struct seq_file *seq, loff_t *pos) { struct bpf_unix_iter_state *iter = seq->private; unsigned int expected; bool resized = false; struct sock *sk; if (iter->st_bucket_done) *pos = set_bucket_offset(get_bucket(*pos) + 1, 1); again: /* Get a new batch */ iter->cur_sk = 0; iter->end_sk = 0; sk = unix_get_first(seq, pos); if (!sk) return NULL; /* Done */ expected = bpf_iter_unix_hold_batch(seq, sk); if (iter->end_sk == expected) { iter->st_bucket_done = true; return sk; } if (!resized && !bpf_iter_unix_realloc_batch(iter, expected * 3 / 2)) { resized = true; goto again; } return sk; } static void *bpf_iter_unix_seq_start(struct seq_file *seq, loff_t *pos) { if (!*pos) return SEQ_START_TOKEN; /* bpf iter does not support lseek, so it always * continue from where it was stop()-ped. */ return bpf_iter_unix_batch(seq, pos); } static void *bpf_iter_unix_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct bpf_unix_iter_state *iter = seq->private; struct sock *sk; /* Whenever seq_next() is called, the iter->cur_sk is * done with seq_show(), so advance to the next sk in * the batch. */ if (iter->cur_sk < iter->end_sk) sock_put(iter->batch[iter->cur_sk++]); ++*pos; if (iter->cur_sk < iter->end_sk) sk = iter->batch[iter->cur_sk]; else sk = bpf_iter_unix_batch(seq, pos); return sk; } static int bpf_iter_unix_seq_show(struct seq_file *seq, void *v) { struct bpf_iter_meta meta; struct bpf_prog *prog; struct sock *sk = v; uid_t uid; bool slow; int ret; if (v == SEQ_START_TOKEN) return 0; slow = lock_sock_fast(sk); if (unlikely(sk_unhashed(sk))) { ret = SEQ_SKIP; goto unlock; } uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)); meta.seq = seq; prog = bpf_iter_get_info(&meta, false); ret = unix_prog_seq_show(prog, &meta, v, uid); unlock: unlock_sock_fast(sk, slow); return ret; } static void bpf_iter_unix_seq_stop(struct seq_file *seq, void *v) { struct bpf_unix_iter_state *iter = seq->private; struct bpf_iter_meta meta; struct bpf_prog *prog; if (!v) { meta.seq = seq; prog = bpf_iter_get_info(&meta, true); if (prog) (void)unix_prog_seq_show(prog, &meta, v, 0); } if (iter->cur_sk < iter->end_sk) bpf_iter_unix_put_batch(iter); } static const struct seq_operations bpf_iter_unix_seq_ops = { .start = bpf_iter_unix_seq_start, .next = bpf_iter_unix_seq_next, .stop = bpf_iter_unix_seq_stop, .show = bpf_iter_unix_seq_show, }; #endif #endif static const struct net_proto_family unix_family_ops = { .family = PF_UNIX, .create = unix_create, .owner = THIS_MODULE, }; static int __net_init unix_net_init(struct net *net) { int i; net->unx.sysctl_max_dgram_qlen = 10; if (unix_sysctl_register(net)) goto out; #ifdef CONFIG_PROC_FS if (!proc_create_net("unix", 0, net->proc_net, &unix_seq_ops, sizeof(struct seq_net_private))) goto err_sysctl; #endif net->unx.table.locks = kvmalloc_array(UNIX_HASH_SIZE, sizeof(spinlock_t), GFP_KERNEL); if (!net->unx.table.locks) goto err_proc; net->unx.table.buckets = kvmalloc_array(UNIX_HASH_SIZE, sizeof(struct hlist_head), GFP_KERNEL); if (!net->unx.table.buckets) goto free_locks; for (i = 0; i < UNIX_HASH_SIZE; i++) { spin_lock_init(&net->unx.table.locks[i]); lock_set_cmp_fn(&net->unx.table.locks[i], unix_table_lock_cmp_fn, NULL); INIT_HLIST_HEAD(&net->unx.table.buckets[i]); } return 0; free_locks: kvfree(net->unx.table.locks); err_proc: #ifdef CONFIG_PROC_FS remove_proc_entry("unix", net->proc_net); err_sysctl: #endif unix_sysctl_unregister(net); out: return -ENOMEM; } static void __net_exit unix_net_exit(struct net *net) { kvfree(net->unx.table.buckets); kvfree(net->unx.table.locks); unix_sysctl_unregister(net); remove_proc_entry("unix", net->proc_net); } static struct pernet_operations unix_net_ops = { .init = unix_net_init, .exit = unix_net_exit, }; #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS) DEFINE_BPF_ITER_FUNC(unix, struct bpf_iter_meta *meta, struct unix_sock *unix_sk, uid_t uid) #define INIT_BATCH_SZ 16 static int bpf_iter_init_unix(void *priv_data, struct bpf_iter_aux_info *aux) { struct bpf_unix_iter_state *iter = priv_data; int err; err = bpf_iter_init_seq_net(priv_data, aux); if (err) return err; err = bpf_iter_unix_realloc_batch(iter, INIT_BATCH_SZ); if (err) { bpf_iter_fini_seq_net(priv_data); return err; } return 0; } static void bpf_iter_fini_unix(void *priv_data) { struct bpf_unix_iter_state *iter = priv_data; bpf_iter_fini_seq_net(priv_data); kvfree(iter->batch); } static const struct bpf_iter_seq_info unix_seq_info = { .seq_ops = &bpf_iter_unix_seq_ops, .init_seq_private = bpf_iter_init_unix, .fini_seq_private = bpf_iter_fini_unix, .seq_priv_size = sizeof(struct bpf_unix_iter_state), }; static const struct bpf_func_proto * bpf_iter_unix_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { switch (func_id) { case BPF_FUNC_setsockopt: return &bpf_sk_setsockopt_proto; case BPF_FUNC_getsockopt: return &bpf_sk_getsockopt_proto; default: return NULL; } } static struct bpf_iter_reg unix_reg_info = { .target = "unix", .ctx_arg_info_size = 1, .ctx_arg_info = { { offsetof(struct bpf_iter__unix, unix_sk), PTR_TO_BTF_ID_OR_NULL }, }, .get_func_proto = bpf_iter_unix_get_func_proto, .seq_info = &unix_seq_info, }; static void __init bpf_iter_register(void) { unix_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_UNIX]; if (bpf_iter_reg_target(&unix_reg_info)) pr_warn("Warning: could not register bpf iterator unix\n"); } #endif static int __init af_unix_init(void) { int i, rc = -1; BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof_field(struct sk_buff, cb)); for (i = 0; i < UNIX_HASH_SIZE / 2; i++) { spin_lock_init(&bsd_socket_locks[i]); INIT_HLIST_HEAD(&bsd_socket_buckets[i]); } rc = proto_register(&unix_dgram_proto, 1); if (rc != 0) { pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__); goto out; } rc = proto_register(&unix_stream_proto, 1); if (rc != 0) { pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__); proto_unregister(&unix_dgram_proto); goto out; } sock_register(&unix_family_ops); register_pernet_subsys(&unix_net_ops); unix_bpf_build_proto(); #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS) bpf_iter_register(); #endif out: return rc; } /* Later than subsys_initcall() because we depend on stuff initialised there */ fs_initcall(af_unix_init);
2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 /* * Copyright (c) 2016 Laurent Pinchart <laurent.pinchart@ideasonboard.com> * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that copyright * notice and this permission notice appear in supporting documentation, and * that the name of the copyright holders not be used in advertising or * publicity pertaining to distribution of the software without specific, * written prior permission. The copyright holders make no representations * about the suitability of this software for any purpose. It is provided "as * is" without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THIS SOFTWARE. */ #ifndef __DRM_FOURCC_H__ #define __DRM_FOURCC_H__ #include <linux/math.h> #include <linux/types.h> #include <uapi/drm/drm_fourcc.h> /** * DRM_FORMAT_MAX_PLANES - maximum number of planes a DRM format can have */ #define DRM_FORMAT_MAX_PLANES 4u /* * DRM formats are little endian. Define host endian variants for the * most common formats here, to reduce the #ifdefs needed in drivers. * * Note that the DRM_FORMAT_BIG_ENDIAN flag should only be used in * case the format can't be specified otherwise, so we don't end up * with two values describing the same format. */ #ifdef __BIG_ENDIAN # define DRM_FORMAT_HOST_XRGB1555 (DRM_FORMAT_XRGB1555 | \ DRM_FORMAT_BIG_ENDIAN) # define DRM_FORMAT_HOST_RGB565 (DRM_FORMAT_RGB565 | \ DRM_FORMAT_BIG_ENDIAN) # define DRM_FORMAT_HOST_XRGB8888 DRM_FORMAT_BGRX8888 # define DRM_FORMAT_HOST_ARGB8888 DRM_FORMAT_BGRA8888 #else # define DRM_FORMAT_HOST_XRGB1555 DRM_FORMAT_XRGB1555 # define DRM_FORMAT_HOST_RGB565 DRM_FORMAT_RGB565 # define DRM_FORMAT_HOST_XRGB8888 DRM_FORMAT_XRGB8888 # define DRM_FORMAT_HOST_ARGB8888 DRM_FORMAT_ARGB8888 #endif struct drm_device; struct drm_mode_fb_cmd2; /** * struct drm_format_info - information about a DRM format */ struct drm_format_info { /** @format: 4CC format identifier (DRM_FORMAT_*) */ u32 format; /** * @depth: * * Color depth (number of bits per pixel excluding padding bits), * valid for a subset of RGB formats only. This is a legacy field, do * not use in new code and set to 0 for new formats. */ u8 depth; /** @num_planes: Number of color planes (1 to 3) */ u8 num_planes; union { /** * @cpp: * * Number of bytes per pixel (per plane), this is aliased with * @char_per_block. It is deprecated in favour of using the * triplet @char_per_block, @block_w, @block_h for better * describing the pixel format. */ u8 cpp[DRM_FORMAT_MAX_PLANES]; /** * @char_per_block: * * Number of bytes per block (per plane), where blocks are * defined as a rectangle of pixels which are stored next to * each other in a byte aligned memory region. Together with * @block_w and @block_h this is used to properly describe tiles * in tiled formats or to describe groups of pixels in packed * formats for which the memory needed for a single pixel is not * byte aligned. * * @cpp has been kept for historical reasons because there are * a lot of places in drivers where it's used. In drm core for * generic code paths the preferred way is to use * @char_per_block, drm_format_info_block_width() and * drm_format_info_block_height() which allows handling both * block and non-block formats in the same way. * * For formats that are intended to be used only with non-linear * modifiers both @cpp and @char_per_block must be 0 in the * generic format table. Drivers could supply accurate * information from their drm_mode_config.get_format_info hook * if they want the core to be validating the pitch. */ u8 char_per_block[DRM_FORMAT_MAX_PLANES]; }; /** * @block_w: * * Block width in pixels, this is intended to be accessed through * drm_format_info_block_width() */ u8 block_w[DRM_FORMAT_MAX_PLANES]; /** * @block_h: * * Block height in pixels, this is intended to be accessed through * drm_format_info_block_height() */ u8 block_h[DRM_FORMAT_MAX_PLANES]; /** @hsub: Horizontal chroma subsampling factor */ u8 hsub; /** @vsub: Vertical chroma subsampling factor */ u8 vsub; /** @has_alpha: Does the format embeds an alpha component? */ bool has_alpha; /** @is_yuv: Is it a YUV format? */ bool is_yuv; /** @is_color_indexed: Is it a color-indexed format? */ bool is_color_indexed; }; /** * drm_format_info_is_yuv_packed - check that the format info matches a YUV * format with data laid in a single plane * @info: format info * * Returns: * A boolean indicating whether the format info matches a packed YUV format. */ static inline bool drm_format_info_is_yuv_packed(const struct drm_format_info *info) { return info->is_yuv && info->num_planes == 1; } /** * drm_format_info_is_yuv_semiplanar - check that the format info matches a YUV * format with data laid in two planes (luminance and chrominance) * @info: format info * * Returns: * A boolean indicating whether the format info matches a semiplanar YUV format. */ static inline bool drm_format_info_is_yuv_semiplanar(const struct drm_format_info *info) { return info->is_yuv && info->num_planes == 2; } /** * drm_format_info_is_yuv_planar - check that the format info matches a YUV * format with data laid in three planes (one for each YUV component) * @info: format info * * Returns: * A boolean indicating whether the format info matches a planar YUV format. */ static inline bool drm_format_info_is_yuv_planar(const struct drm_format_info *info) { return info->is_yuv && info->num_planes == 3; } /** * drm_format_info_is_yuv_sampling_410 - check that the format info matches a * YUV format with 4:1:0 sub-sampling * @info: format info * * Returns: * A boolean indicating whether the format info matches a YUV format with 4:1:0 * sub-sampling. */ static inline bool drm_format_info_is_yuv_sampling_410(const struct drm_format_info *info) { return info->is_yuv && info->hsub == 4 && info->vsub == 4; } /** * drm_format_info_is_yuv_sampling_411 - check that the format info matches a * YUV format with 4:1:1 sub-sampling * @info: format info * * Returns: * A boolean indicating whether the format info matches a YUV format with 4:1:1 * sub-sampling. */ static inline bool drm_format_info_is_yuv_sampling_411(const struct drm_format_info *info) { return info->is_yuv && info->hsub == 4 && info->vsub == 1; } /** * drm_format_info_is_yuv_sampling_420 - check that the format info matches a * YUV format with 4:2:0 sub-sampling * @info: format info * * Returns: * A boolean indicating whether the format info matches a YUV format with 4:2:0 * sub-sampling. */ static inline bool drm_format_info_is_yuv_sampling_420(const struct drm_format_info *info) { return info->is_yuv && info->hsub == 2 && info->vsub == 2; } /** * drm_format_info_is_yuv_sampling_422 - check that the format info matches a * YUV format with 4:2:2 sub-sampling * @info: format info * * Returns: * A boolean indicating whether the format info matches a YUV format with 4:2:2 * sub-sampling. */ static inline bool drm_format_info_is_yuv_sampling_422(const struct drm_format_info *info) { return info->is_yuv && info->hsub == 2 && info->vsub == 1; } /** * drm_format_info_is_yuv_sampling_444 - check that the format info matches a * YUV format with 4:4:4 sub-sampling * @info: format info * * Returns: * A boolean indicating whether the format info matches a YUV format with 4:4:4 * sub-sampling. */ static inline bool drm_format_info_is_yuv_sampling_444(const struct drm_format_info *info) { return info->is_yuv && info->hsub == 1 && info->vsub == 1; } /** * drm_format_info_plane_width - width of the plane given the first plane * @info: pixel format info * @width: width of the first plane * @plane: plane index * * Returns: * The width of @plane, given that the width of the first plane is @width. */ static inline int drm_format_info_plane_width(const struct drm_format_info *info, int width, int plane) { if (!info || plane >= info->num_planes) return 0; if (plane == 0) return width; return DIV_ROUND_UP(width, info->hsub); } /** * drm_format_info_plane_height - height of the plane given the first plane * @info: pixel format info * @height: height of the first plane * @plane: plane index * * Returns: * The height of @plane, given that the height of the first plane is @height. */ static inline int drm_format_info_plane_height(const struct drm_format_info *info, int height, int plane) { if (!info || plane >= info->num_planes) return 0; if (plane == 0) return height; return DIV_ROUND_UP(height, info->vsub); } const struct drm_format_info *__drm_format_info(u32 format); const struct drm_format_info *drm_format_info(u32 format); const struct drm_format_info * drm_get_format_info(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cmd); uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth); uint32_t drm_driver_legacy_fb_format(struct drm_device *dev, uint32_t bpp, uint32_t depth); uint32_t drm_driver_color_mode_format(struct drm_device *dev, unsigned int color_mode); unsigned int drm_format_info_block_width(const struct drm_format_info *info, int plane); unsigned int drm_format_info_block_height(const struct drm_format_info *info, int plane); unsigned int drm_format_info_bpp(const struct drm_format_info *info, int plane); uint64_t drm_format_info_min_pitch(const struct drm_format_info *info, int plane, unsigned int buffer_width); #endif /* __DRM_FOURCC_H__ */
8 8 1 8 8 1 1 1 1 1 1 1 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/completion.h> #include <linux/buffer_head.h> #include <linux/fs.h> #include <linux/gfs2_ondisk.h> #include <linux/prefetch.h> #include <linux/blkdev.h> #include <linux/rbtree.h> #include <linux/random.h> #include "gfs2.h" #include "incore.h" #include "glock.h" #include "glops.h" #include "lops.h" #include "meta_io.h" #include "quota.h" #include "rgrp.h" #include "super.h" #include "trans.h" #include "util.h" #include "log.h" #include "inode.h" #include "trace_gfs2.h" #include "dir.h" #define BFITNOENT ((u32)~0) #define NO_BLOCK ((u64)~0) struct gfs2_rbm { struct gfs2_rgrpd *rgd; u32 offset; /* The offset is bitmap relative */ int bii; /* Bitmap index */ }; static inline struct gfs2_bitmap *rbm_bi(const struct gfs2_rbm *rbm) { return rbm->rgd->rd_bits + rbm->bii; } static inline u64 gfs2_rbm_to_block(const struct gfs2_rbm *rbm) { BUG_ON(rbm->offset >= rbm->rgd->rd_data); return rbm->rgd->rd_data0 + (rbm_bi(rbm)->bi_start * GFS2_NBBY) + rbm->offset; } /* * These routines are used by the resource group routines (rgrp.c) * to keep track of block allocation. Each block is represented by two * bits. So, each byte represents GFS2_NBBY (i.e. 4) blocks. * * 0 = Free * 1 = Used (not metadata) * 2 = Unlinked (still in use) inode * 3 = Used (metadata) */ struct gfs2_extent { struct gfs2_rbm rbm; u32 len; }; static const char valid_change[16] = { /* current */ /* n */ 0, 1, 1, 1, /* e */ 1, 0, 0, 0, /* w */ 0, 0, 0, 1, 1, 0, 0, 0 }; static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext, struct gfs2_blkreserv *rs, bool nowrap); /** * gfs2_setbit - Set a bit in the bitmaps * @rbm: The position of the bit to set * @do_clone: Also set the clone bitmap, if it exists * @new_state: the new state of the block * */ static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone, unsigned char new_state) { unsigned char *byte1, *byte2, *end, cur_state; struct gfs2_bitmap *bi = rbm_bi(rbm); unsigned int buflen = bi->bi_bytes; const unsigned int bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE; byte1 = bi->bi_bh->b_data + bi->bi_offset + (rbm->offset / GFS2_NBBY); end = bi->bi_bh->b_data + bi->bi_offset + buflen; BUG_ON(byte1 >= end); cur_state = (*byte1 >> bit) & GFS2_BIT_MASK; if (unlikely(!valid_change[new_state * 4 + cur_state])) { struct gfs2_sbd *sdp = rbm->rgd->rd_sbd; fs_warn(sdp, "buf_blk = 0x%x old_state=%d, new_state=%d\n", rbm->offset, cur_state, new_state); fs_warn(sdp, "rgrp=0x%llx bi_start=0x%x biblk: 0x%llx\n", (unsigned long long)rbm->rgd->rd_addr, bi->bi_start, (unsigned long long)bi->bi_bh->b_blocknr); fs_warn(sdp, "bi_offset=0x%x bi_bytes=0x%x block=0x%llx\n", bi->bi_offset, bi->bi_bytes, (unsigned long long)gfs2_rbm_to_block(rbm)); dump_stack(); gfs2_consist_rgrpd(rbm->rgd); return; } *byte1 ^= (cur_state ^ new_state) << bit; if (do_clone && bi->bi_clone) { byte2 = bi->bi_clone + bi->bi_offset + (rbm->offset / GFS2_NBBY); cur_state = (*byte2 >> bit) & GFS2_BIT_MASK; *byte2 ^= (cur_state ^ new_state) << bit; } } /** * gfs2_testbit - test a bit in the bitmaps * @rbm: The bit to test * @use_clone: If true, test the clone bitmap, not the official bitmap. * * Some callers like gfs2_unaligned_extlen need to test the clone bitmaps, * not the "real" bitmaps, to avoid allocating recently freed blocks. * * Returns: The two bit block state of the requested bit */ static inline u8 gfs2_testbit(const struct gfs2_rbm *rbm, bool use_clone) { struct gfs2_bitmap *bi = rbm_bi(rbm); const u8 *buffer; const u8 *byte; unsigned int bit; if (use_clone && bi->bi_clone) buffer = bi->bi_clone; else buffer = bi->bi_bh->b_data; buffer += bi->bi_offset; byte = buffer + (rbm->offset / GFS2_NBBY); bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE; return (*byte >> bit) & GFS2_BIT_MASK; } /** * gfs2_bit_search - search bitmap for a state * @ptr: Pointer to bitmap data * @mask: Mask to use (normally 0x55555.... but adjusted for search start) * @state: The state we are searching for * * We xor the bitmap data with a pattern which is the bitwise opposite * of what we are looking for. This gives rise to a pattern of ones * wherever there is a match. Since we have two bits per entry, we * take this pattern, shift it down by one place and then and it with * the original. All the even bit positions (0,2,4, etc) then represent * successful matches, so we mask with 0x55555..... to remove the unwanted * odd bit positions. * * This allows searching of a whole u64 at once (32 blocks) with a * single test (on 64 bit arches). */ static inline u64 gfs2_bit_search(const __le64 *ptr, u64 mask, u8 state) { u64 tmp; static const u64 search[] = { [0] = 0xffffffffffffffffULL, [1] = 0xaaaaaaaaaaaaaaaaULL, [2] = 0x5555555555555555ULL, [3] = 0x0000000000000000ULL, }; tmp = le64_to_cpu(*ptr) ^ search[state]; tmp &= (tmp >> 1); tmp &= mask; return tmp; } /** * rs_cmp - multi-block reservation range compare * @start: start of the new reservation * @len: number of blocks in the new reservation * @rs: existing reservation to compare against * * returns: 1 if the block range is beyond the reach of the reservation * -1 if the block range is before the start of the reservation * 0 if the block range overlaps with the reservation */ static inline int rs_cmp(u64 start, u32 len, struct gfs2_blkreserv *rs) { if (start >= rs->rs_start + rs->rs_requested) return 1; if (rs->rs_start >= start + len) return -1; return 0; } /** * gfs2_bitfit - Search an rgrp's bitmap buffer to find a bit-pair representing * a block in a given allocation state. * @buf: the buffer that holds the bitmaps * @len: the length (in bytes) of the buffer * @goal: start search at this block's bit-pair (within @buffer) * @state: GFS2_BLKST_XXX the state of the block we're looking for. * * Scope of @goal and returned block number is only within this bitmap buffer, * not entire rgrp or filesystem. @buffer will be offset from the actual * beginning of a bitmap block buffer, skipping any header structures, but * headers are always a multiple of 64 bits long so that the buffer is * always aligned to a 64 bit boundary. * * The size of the buffer is in bytes, but is it assumed that it is * always ok to read a complete multiple of 64 bits at the end * of the block in case the end is no aligned to a natural boundary. * * Return: the block number (bitmap buffer scope) that was found */ static u32 gfs2_bitfit(const u8 *buf, const unsigned int len, u32 goal, u8 state) { u32 spoint = (goal << 1) & ((8*sizeof(u64)) - 1); const __le64 *ptr = ((__le64 *)buf) + (goal >> 5); const __le64 *end = (__le64 *)(buf + ALIGN(len, sizeof(u64))); u64 tmp; u64 mask = 0x5555555555555555ULL; u32 bit; /* Mask off bits we don't care about at the start of the search */ mask <<= spoint; tmp = gfs2_bit_search(ptr, mask, state); ptr++; while(tmp == 0 && ptr < end) { tmp = gfs2_bit_search(ptr, 0x5555555555555555ULL, state); ptr++; } /* Mask off any bits which are more than len bytes from the start */ if (ptr == end && (len & (sizeof(u64) - 1))) tmp &= (((u64)~0) >> (64 - 8*(len & (sizeof(u64) - 1)))); /* Didn't find anything, so return */ if (tmp == 0) return BFITNOENT; ptr--; bit = __ffs64(tmp); bit /= 2; /* two bits per entry in the bitmap */ return (((const unsigned char *)ptr - buf) * GFS2_NBBY) + bit; } /** * gfs2_rbm_from_block - Set the rbm based upon rgd and block number * @rbm: The rbm with rgd already set correctly * @block: The block number (filesystem relative) * * This sets the bi and offset members of an rbm based on a * resource group and a filesystem relative block number. The * resource group must be set in the rbm on entry, the bi and * offset members will be set by this function. * * Returns: 0 on success, or an error code */ static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block) { if (!rgrp_contains_block(rbm->rgd, block)) return -E2BIG; rbm->bii = 0; rbm->offset = block - rbm->rgd->rd_data0; /* Check if the block is within the first block */ if (rbm->offset < rbm_bi(rbm)->bi_blocks) return 0; /* Adjust for the size diff between gfs2_meta_header and gfs2_rgrp */ rbm->offset += (sizeof(struct gfs2_rgrp) - sizeof(struct gfs2_meta_header)) * GFS2_NBBY; rbm->bii = rbm->offset / rbm->rgd->rd_sbd->sd_blocks_per_bitmap; rbm->offset -= rbm->bii * rbm->rgd->rd_sbd->sd_blocks_per_bitmap; return 0; } /** * gfs2_rbm_add - add a number of blocks to an rbm * @rbm: The rbm with rgd already set correctly * @blocks: The number of blocks to add to rpm * * This function takes an existing rbm structure and adds a number of blocks to * it. * * Returns: True if the new rbm would point past the end of the rgrp. */ static bool gfs2_rbm_add(struct gfs2_rbm *rbm, u32 blocks) { struct gfs2_rgrpd *rgd = rbm->rgd; struct gfs2_bitmap *bi = rgd->rd_bits + rbm->bii; if (rbm->offset + blocks < bi->bi_blocks) { rbm->offset += blocks; return false; } blocks -= bi->bi_blocks - rbm->offset; for(;;) { bi++; if (bi == rgd->rd_bits + rgd->rd_length) return true; if (blocks < bi->bi_blocks) { rbm->offset = blocks; rbm->bii = bi - rgd->rd_bits; return false; } blocks -= bi->bi_blocks; } } /** * gfs2_unaligned_extlen - Look for free blocks which are not byte aligned * @rbm: Position to search (value/result) * @n_unaligned: Number of unaligned blocks to check * @len: Decremented for each block found (terminate on zero) * * Returns: true if a non-free block is encountered or the end of the resource * group is reached. */ static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *len) { u32 n; u8 res; for (n = 0; n < n_unaligned; n++) { res = gfs2_testbit(rbm, true); if (res != GFS2_BLKST_FREE) return true; (*len)--; if (*len == 0) return true; if (gfs2_rbm_add(rbm, 1)) return true; } return false; } /** * gfs2_free_extlen - Return extent length of free blocks * @rrbm: Starting position * @len: Max length to check * * Starting at the block specified by the rbm, see how many free blocks * there are, not reading more than len blocks ahead. This can be done * using memchr_inv when the blocks are byte aligned, but has to be done * on a block by block basis in case of unaligned blocks. Also this * function can cope with bitmap boundaries (although it must stop on * a resource group boundary) * * Returns: Number of free blocks in the extent */ static u32 gfs2_free_extlen(const struct gfs2_rbm *rrbm, u32 len) { struct gfs2_rbm rbm = *rrbm; u32 n_unaligned = rbm.offset & 3; u32 size = len; u32 bytes; u32 chunk_size; u8 *ptr, *start, *end; u64 block; struct gfs2_bitmap *bi; if (n_unaligned && gfs2_unaligned_extlen(&rbm, 4 - n_unaligned, &len)) goto out; n_unaligned = len & 3; /* Start is now byte aligned */ while (len > 3) { bi = rbm_bi(&rbm); start = bi->bi_bh->b_data; if (bi->bi_clone) start = bi->bi_clone; start += bi->bi_offset; end = start + bi->bi_bytes; BUG_ON(rbm.offset & 3); start += (rbm.offset / GFS2_NBBY); bytes = min_t(u32, len / GFS2_NBBY, (end - start)); ptr = memchr_inv(start, 0, bytes); chunk_size = ((ptr == NULL) ? bytes : (ptr - start)); chunk_size *= GFS2_NBBY; BUG_ON(len < chunk_size); len -= chunk_size; block = gfs2_rbm_to_block(&rbm); if (gfs2_rbm_from_block(&rbm, block + chunk_size)) { n_unaligned = 0; break; } if (ptr) { n_unaligned = 3; break; } n_unaligned = len & 3; } /* Deal with any bits left over at the end */ if (n_unaligned) gfs2_unaligned_extlen(&rbm, n_unaligned, &len); out: return size - len; } /** * gfs2_bitcount - count the number of bits in a certain state * @rgd: the resource group descriptor * @buffer: the buffer that holds the bitmaps * @buflen: the length (in bytes) of the buffer * @state: the state of the block we're looking for * * Returns: The number of bits */ static u32 gfs2_bitcount(struct gfs2_rgrpd *rgd, const u8 *buffer, unsigned int buflen, u8 state) { const u8 *byte = buffer; const u8 *end = buffer + buflen; const u8 state1 = state << 2; const u8 state2 = state << 4; const u8 state3 = state << 6; u32 count = 0; for (; byte < end; byte++) { if (((*byte) & 0x03) == state) count++; if (((*byte) & 0x0C) == state1) count++; if (((*byte) & 0x30) == state2) count++; if (((*byte) & 0xC0) == state3) count++; } return count; } /** * gfs2_rgrp_verify - Verify that a resource group is consistent * @rgd: the rgrp * */ void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd) { struct gfs2_sbd *sdp = rgd->rd_sbd; struct gfs2_bitmap *bi = NULL; u32 length = rgd->rd_length; u32 count[4], tmp; int buf, x; memset(count, 0, 4 * sizeof(u32)); /* Count # blocks in each of 4 possible allocation states */ for (buf = 0; buf < length; buf++) { bi = rgd->rd_bits + buf; for (x = 0; x < 4; x++) count[x] += gfs2_bitcount(rgd, bi->bi_bh->b_data + bi->bi_offset, bi->bi_bytes, x); } if (count[0] != rgd->rd_free) { gfs2_lm(sdp, "free data mismatch: %u != %u\n", count[0], rgd->rd_free); gfs2_consist_rgrpd(rgd); return; } tmp = rgd->rd_data - rgd->rd_free - rgd->rd_dinodes; if (count[1] != tmp) { gfs2_lm(sdp, "used data mismatch: %u != %u\n", count[1], tmp); gfs2_consist_rgrpd(rgd); return; } if (count[2] + count[3] != rgd->rd_dinodes) { gfs2_lm(sdp, "used metadata mismatch: %u != %u\n", count[2] + count[3], rgd->rd_dinodes); gfs2_consist_rgrpd(rgd); return; } } /** * gfs2_blk2rgrpd - Find resource group for a given data/meta block number * @sdp: The GFS2 superblock * @blk: The data block number * @exact: True if this needs to be an exact match * * The @exact argument should be set to true by most callers. The exception * is when we need to match blocks which are not represented by the rgrp * bitmap, but which are part of the rgrp (i.e. padding blocks) which are * there for alignment purposes. Another way of looking at it is that @exact * matches only valid data/metadata blocks, but with @exact false, it will * match any block within the extent of the rgrp. * * Returns: The resource group, or NULL if not found */ struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk, bool exact) { struct rb_node *n, *next; struct gfs2_rgrpd *cur; spin_lock(&sdp->sd_rindex_spin); n = sdp->sd_rindex_tree.rb_node; while (n) { cur = rb_entry(n, struct gfs2_rgrpd, rd_node); next = NULL; if (blk < cur->rd_addr) next = n->rb_left; else if (blk >= cur->rd_data0 + cur->rd_data) next = n->rb_right; if (next == NULL) { spin_unlock(&sdp->sd_rindex_spin); if (exact) { if (blk < cur->rd_addr) return NULL; if (blk >= cur->rd_data0 + cur->rd_data) return NULL; } return cur; } n = next; } spin_unlock(&sdp->sd_rindex_spin); return NULL; } /** * gfs2_rgrpd_get_first - get the first Resource Group in the filesystem * @sdp: The GFS2 superblock * * Returns: The first rgrp in the filesystem */ struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp) { const struct rb_node *n; struct gfs2_rgrpd *rgd; spin_lock(&sdp->sd_rindex_spin); n = rb_first(&sdp->sd_rindex_tree); rgd = rb_entry(n, struct gfs2_rgrpd, rd_node); spin_unlock(&sdp->sd_rindex_spin); return rgd; } /** * gfs2_rgrpd_get_next - get the next RG * @rgd: the resource group descriptor * * Returns: The next rgrp */ struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd) { struct gfs2_sbd *sdp = rgd->rd_sbd; const struct rb_node *n; spin_lock(&sdp->sd_rindex_spin); n = rb_next(&rgd->rd_node); if (n == NULL) n = rb_first(&sdp->sd_rindex_tree); if (unlikely(&rgd->rd_node == n)) { spin_unlock(&sdp->sd_rindex_spin); return NULL; } rgd = rb_entry(n, struct gfs2_rgrpd, rd_node); spin_unlock(&sdp->sd_rindex_spin); return rgd; } void check_and_update_goal(struct gfs2_inode *ip) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); if (!ip->i_goal || gfs2_blk2rgrpd(sdp, ip->i_goal, 1) == NULL) ip->i_goal = ip->i_no_addr; } void gfs2_free_clones(struct gfs2_rgrpd *rgd) { int x; for (x = 0; x < rgd->rd_length; x++) { struct gfs2_bitmap *bi = rgd->rd_bits + x; kfree(bi->bi_clone); bi->bi_clone = NULL; } } static void dump_rs(struct seq_file *seq, const struct gfs2_blkreserv *rs, const char *fs_id_buf) { struct gfs2_inode *ip = container_of(rs, struct gfs2_inode, i_res); gfs2_print_dbg(seq, "%s B: n:%llu s:%llu f:%u\n", fs_id_buf, (unsigned long long)ip->i_no_addr, (unsigned long long)rs->rs_start, rs->rs_requested); } /** * __rs_deltree - remove a multi-block reservation from the rgd tree * @rs: The reservation to remove * */ static void __rs_deltree(struct gfs2_blkreserv *rs) { struct gfs2_rgrpd *rgd; if (!gfs2_rs_active(rs)) return; rgd = rs->rs_rgd; trace_gfs2_rs(rs, TRACE_RS_TREEDEL); rb_erase(&rs->rs_node, &rgd->rd_rstree); RB_CLEAR_NODE(&rs->rs_node); if (rs->rs_requested) { /* return requested blocks to the rgrp */ BUG_ON(rs->rs_rgd->rd_requested < rs->rs_requested); rs->rs_rgd->rd_requested -= rs->rs_requested; /* The rgrp extent failure point is likely not to increase; it will only do so if the freed blocks are somehow contiguous with a span of free blocks that follows. Still, it will force the number to be recalculated later. */ rgd->rd_extfail_pt += rs->rs_requested; rs->rs_requested = 0; } } /** * gfs2_rs_deltree - remove a multi-block reservation from the rgd tree * @rs: The reservation to remove * */ void gfs2_rs_deltree(struct gfs2_blkreserv *rs) { struct gfs2_rgrpd *rgd; rgd = rs->rs_rgd; if (rgd) { spin_lock(&rgd->rd_rsspin); __rs_deltree(rs); BUG_ON(rs->rs_requested); spin_unlock(&rgd->rd_rsspin); } } /** * gfs2_rs_delete - delete a multi-block reservation * @ip: The inode for this reservation * */ void gfs2_rs_delete(struct gfs2_inode *ip) { struct inode *inode = &ip->i_inode; down_write(&ip->i_rw_mutex); if (atomic_read(&inode->i_writecount) <= 1) gfs2_rs_deltree(&ip->i_res); up_write(&ip->i_rw_mutex); } /** * return_all_reservations - return all reserved blocks back to the rgrp. * @rgd: the rgrp that needs its space back * * We previously reserved a bunch of blocks for allocation. Now we need to * give them back. This leave the reservation structures in tact, but removes * all of their corresponding "no-fly zones". */ static void return_all_reservations(struct gfs2_rgrpd *rgd) { struct rb_node *n; struct gfs2_blkreserv *rs; spin_lock(&rgd->rd_rsspin); while ((n = rb_first(&rgd->rd_rstree))) { rs = rb_entry(n, struct gfs2_blkreserv, rs_node); __rs_deltree(rs); } spin_unlock(&rgd->rd_rsspin); } void gfs2_clear_rgrpd(struct gfs2_sbd *sdp) { struct rb_node *n; struct gfs2_rgrpd *rgd; struct gfs2_glock *gl; while ((n = rb_first(&sdp->sd_rindex_tree))) { rgd = rb_entry(n, struct gfs2_rgrpd, rd_node); gl = rgd->rd_gl; rb_erase(n, &sdp->sd_rindex_tree); if (gl) { if (gl->gl_state != LM_ST_UNLOCKED) { gfs2_glock_cb(gl, LM_ST_UNLOCKED); flush_delayed_work(&gl->gl_work); } gfs2_rgrp_brelse(rgd); glock_clear_object(gl, rgd); gfs2_glock_put(gl); } gfs2_free_clones(rgd); return_all_reservations(rgd); kfree(rgd->rd_bits); rgd->rd_bits = NULL; kmem_cache_free(gfs2_rgrpd_cachep, rgd); } } /** * compute_bitstructs - Compute the bitmap sizes * @rgd: The resource group descriptor * * Calculates bitmap descriptors, one for each block that contains bitmap data * * Returns: errno */ static int compute_bitstructs(struct gfs2_rgrpd *rgd) { struct gfs2_sbd *sdp = rgd->rd_sbd; struct gfs2_bitmap *bi; u32 length = rgd->rd_length; /* # blocks in hdr & bitmap */ u32 bytes_left, bytes; int x; if (!length) return -EINVAL; rgd->rd_bits = kcalloc(length, sizeof(struct gfs2_bitmap), GFP_NOFS); if (!rgd->rd_bits) return -ENOMEM; bytes_left = rgd->rd_bitbytes; for (x = 0; x < length; x++) { bi = rgd->rd_bits + x; bi->bi_flags = 0; /* small rgrp; bitmap stored completely in header block */ if (length == 1) { bytes = bytes_left; bi->bi_offset = sizeof(struct gfs2_rgrp); bi->bi_start = 0; bi->bi_bytes = bytes; bi->bi_blocks = bytes * GFS2_NBBY; /* header block */ } else if (x == 0) { bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp); bi->bi_offset = sizeof(struct gfs2_rgrp); bi->bi_start = 0; bi->bi_bytes = bytes; bi->bi_blocks = bytes * GFS2_NBBY; /* last block */ } else if (x + 1 == length) { bytes = bytes_left; bi->bi_offset = sizeof(struct gfs2_meta_header); bi->bi_start = rgd->rd_bitbytes - bytes_left; bi->bi_bytes = bytes; bi->bi_blocks = bytes * GFS2_NBBY; /* other blocks */ } else { bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header); bi->bi_offset = sizeof(struct gfs2_meta_header); bi->bi_start = rgd->rd_bitbytes - bytes_left; bi->bi_bytes = bytes; bi->bi_blocks = bytes * GFS2_NBBY; } bytes_left -= bytes; } if (bytes_left) { gfs2_consist_rgrpd(rgd); return -EIO; } bi = rgd->rd_bits + (length - 1); if ((bi->bi_start + bi->bi_bytes) * GFS2_NBBY != rgd->rd_data) { gfs2_lm(sdp, "ri_addr=%llu " "ri_length=%u " "ri_data0=%llu " "ri_data=%u " "ri_bitbytes=%u " "start=%u len=%u offset=%u\n", (unsigned long long)rgd->rd_addr, rgd->rd_length, (unsigned long long)rgd->rd_data0, rgd->rd_data, rgd->rd_bitbytes, bi->bi_start, bi->bi_bytes, bi->bi_offset); gfs2_consist_rgrpd(rgd); return -EIO; } return 0; } /** * gfs2_ri_total - Total up the file system space, according to the rindex. * @sdp: the filesystem * */ u64 gfs2_ri_total(struct gfs2_sbd *sdp) { u64 total_data = 0; struct inode *inode = sdp->sd_rindex; struct gfs2_inode *ip = GFS2_I(inode); char buf[sizeof(struct gfs2_rindex)]; int error, rgrps; for (rgrps = 0;; rgrps++) { loff_t pos = rgrps * sizeof(struct gfs2_rindex); if (pos + sizeof(struct gfs2_rindex) > i_size_read(inode)) break; error = gfs2_internal_read(ip, buf, &pos, sizeof(struct gfs2_rindex)); if (error != sizeof(struct gfs2_rindex)) break; total_data += be32_to_cpu(((struct gfs2_rindex *)buf)->ri_data); } return total_data; } static int rgd_insert(struct gfs2_rgrpd *rgd) { struct gfs2_sbd *sdp = rgd->rd_sbd; struct rb_node **newn = &sdp->sd_rindex_tree.rb_node, *parent = NULL; /* Figure out where to put new node */ while (*newn) { struct gfs2_rgrpd *cur = rb_entry(*newn, struct gfs2_rgrpd, rd_node); parent = *newn; if (rgd->rd_addr < cur->rd_addr) newn = &((*newn)->rb_left); else if (rgd->rd_addr > cur->rd_addr) newn = &((*newn)->rb_right); else return -EEXIST; } rb_link_node(&rgd->rd_node, parent, newn); rb_insert_color(&rgd->rd_node, &sdp->sd_rindex_tree); sdp->sd_rgrps++; return 0; } /** * read_rindex_entry - Pull in a new resource index entry from the disk * @ip: Pointer to the rindex inode * * Returns: 0 on success, > 0 on EOF, error code otherwise */ static int read_rindex_entry(struct gfs2_inode *ip) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); loff_t pos = sdp->sd_rgrps * sizeof(struct gfs2_rindex); struct gfs2_rindex buf; int error; struct gfs2_rgrpd *rgd; if (pos >= i_size_read(&ip->i_inode)) return 1; error = gfs2_internal_read(ip, (char *)&buf, &pos, sizeof(struct gfs2_rindex)); if (error != sizeof(struct gfs2_rindex)) return (error == 0) ? 1 : error; rgd = kmem_cache_zalloc(gfs2_rgrpd_cachep, GFP_NOFS); error = -ENOMEM; if (!rgd) return error; rgd->rd_sbd = sdp; rgd->rd_addr = be64_to_cpu(buf.ri_addr); rgd->rd_length = be32_to_cpu(buf.ri_length); rgd->rd_data0 = be64_to_cpu(buf.ri_data0); rgd->rd_data = be32_to_cpu(buf.ri_data); rgd->rd_bitbytes = be32_to_cpu(buf.ri_bitbytes); spin_lock_init(&rgd->rd_rsspin); mutex_init(&rgd->rd_mutex); error = gfs2_glock_get(sdp, rgd->rd_addr, &gfs2_rgrp_glops, CREATE, &rgd->rd_gl); if (error) goto fail; error = compute_bitstructs(rgd); if (error) goto fail_glock; rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr; rgd->rd_flags &= ~GFS2_RDF_PREFERRED; if (rgd->rd_data > sdp->sd_max_rg_data) sdp->sd_max_rg_data = rgd->rd_data; spin_lock(&sdp->sd_rindex_spin); error = rgd_insert(rgd); spin_unlock(&sdp->sd_rindex_spin); if (!error) { glock_set_object(rgd->rd_gl, rgd); return 0; } error = 0; /* someone else read in the rgrp; free it and ignore it */ fail_glock: gfs2_glock_put(rgd->rd_gl); fail: kfree(rgd->rd_bits); rgd->rd_bits = NULL; kmem_cache_free(gfs2_rgrpd_cachep, rgd); return error; } /** * set_rgrp_preferences - Run all the rgrps, selecting some we prefer to use * @sdp: the GFS2 superblock * * The purpose of this function is to select a subset of the resource groups * and mark them as PREFERRED. We do it in such a way that each node prefers * to use a unique set of rgrps to minimize glock contention. */ static void set_rgrp_preferences(struct gfs2_sbd *sdp) { struct gfs2_rgrpd *rgd, *first; int i; /* Skip an initial number of rgrps, based on this node's journal ID. That should start each node out on its own set. */ rgd = gfs2_rgrpd_get_first(sdp); for (i = 0; i < sdp->sd_lockstruct.ls_jid; i++) rgd = gfs2_rgrpd_get_next(rgd); first = rgd; do { rgd->rd_flags |= GFS2_RDF_PREFERRED; for (i = 0; i < sdp->sd_journals; i++) { rgd = gfs2_rgrpd_get_next(rgd); if (!rgd || rgd == first) break; } } while (rgd && rgd != first); } /** * gfs2_ri_update - Pull in a new resource index from the disk * @ip: pointer to the rindex inode * * Returns: 0 on successful update, error code otherwise */ static int gfs2_ri_update(struct gfs2_inode *ip) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); int error; do { error = read_rindex_entry(ip); } while (error == 0); if (error < 0) return error; if (RB_EMPTY_ROOT(&sdp->sd_rindex_tree)) { fs_err(sdp, "no resource groups found in the file system.\n"); return -ENOENT; } set_rgrp_preferences(sdp); sdp->sd_rindex_uptodate = 1; return 0; } /** * gfs2_rindex_update - Update the rindex if required * @sdp: The GFS2 superblock * * We grab a lock on the rindex inode to make sure that it doesn't * change whilst we are performing an operation. We keep this lock * for quite long periods of time compared to other locks. This * doesn't matter, since it is shared and it is very, very rarely * accessed in the exclusive mode (i.e. only when expanding the filesystem). * * This makes sure that we're using the latest copy of the resource index * special file, which might have been updated if someone expanded the * filesystem (via gfs2_grow utility), which adds new resource groups. * * Returns: 0 on succeess, error code otherwise */ int gfs2_rindex_update(struct gfs2_sbd *sdp) { struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex); struct gfs2_glock *gl = ip->i_gl; struct gfs2_holder ri_gh; int error = 0; int unlock_required = 0; /* Read new copy from disk if we don't have the latest */ if (!sdp->sd_rindex_uptodate) { if (!gfs2_glock_is_locked_by_me(gl)) { error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, &ri_gh); if (error) return error; unlock_required = 1; } if (!sdp->sd_rindex_uptodate) error = gfs2_ri_update(ip); if (unlock_required) gfs2_glock_dq_uninit(&ri_gh); } return error; } static void gfs2_rgrp_in(struct gfs2_rgrpd *rgd, const void *buf) { const struct gfs2_rgrp *str = buf; u32 rg_flags; rg_flags = be32_to_cpu(str->rg_flags); rg_flags &= ~GFS2_RDF_MASK; rgd->rd_flags &= GFS2_RDF_MASK; rgd->rd_flags |= rg_flags; rgd->rd_free = be32_to_cpu(str->rg_free); rgd->rd_dinodes = be32_to_cpu(str->rg_dinodes); rgd->rd_igeneration = be64_to_cpu(str->rg_igeneration); /* rd_data0, rd_data and rd_bitbytes already set from rindex */ } static void gfs2_rgrp_ondisk2lvb(struct gfs2_rgrp_lvb *rgl, const void *buf) { const struct gfs2_rgrp *str = buf; rgl->rl_magic = cpu_to_be32(GFS2_MAGIC); rgl->rl_flags = str->rg_flags; rgl->rl_free = str->rg_free; rgl->rl_dinodes = str->rg_dinodes; rgl->rl_igeneration = str->rg_igeneration; rgl->__pad = 0UL; } static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf) { struct gfs2_rgrpd *next = gfs2_rgrpd_get_next(rgd); struct gfs2_rgrp *str = buf; u32 crc; str->rg_flags = cpu_to_be32(rgd->rd_flags & ~GFS2_RDF_MASK); str->rg_free = cpu_to_be32(rgd->rd_free); str->rg_dinodes = cpu_to_be32(rgd->rd_dinodes); if (next == NULL) str->rg_skip = 0; else if (next->rd_addr > rgd->rd_addr) str->rg_skip = cpu_to_be32(next->rd_addr - rgd->rd_addr); str->rg_igeneration = cpu_to_be64(rgd->rd_igeneration); str->rg_data0 = cpu_to_be64(rgd->rd_data0); str->rg_data = cpu_to_be32(rgd->rd_data); str->rg_bitbytes = cpu_to_be32(rgd->rd_bitbytes); str->rg_crc = 0; crc = gfs2_disk_hash(buf, sizeof(struct gfs2_rgrp)); str->rg_crc = cpu_to_be32(crc); memset(&str->rg_reserved, 0, sizeof(str->rg_reserved)); gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, buf); } static int gfs2_rgrp_lvb_valid(struct gfs2_rgrpd *rgd) { struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl; struct gfs2_rgrp *str = (struct gfs2_rgrp *)rgd->rd_bits[0].bi_bh->b_data; struct gfs2_sbd *sdp = rgd->rd_sbd; int valid = 1; if (rgl->rl_flags != str->rg_flags) { fs_warn(sdp, "GFS2: rgd: %llu lvb flag mismatch %u/%u", (unsigned long long)rgd->rd_addr, be32_to_cpu(rgl->rl_flags), be32_to_cpu(str->rg_flags)); valid = 0; } if (rgl->rl_free != str->rg_free) { fs_warn(sdp, "GFS2: rgd: %llu lvb free mismatch %u/%u", (unsigned long long)rgd->rd_addr, be32_to_cpu(rgl->rl_free), be32_to_cpu(str->rg_free)); valid = 0; } if (rgl->rl_dinodes != str->rg_dinodes) { fs_warn(sdp, "GFS2: rgd: %llu lvb dinode mismatch %u/%u", (unsigned long long)rgd->rd_addr, be32_to_cpu(rgl->rl_dinodes), be32_to_cpu(str->rg_dinodes)); valid = 0; } if (rgl->rl_igeneration != str->rg_igeneration) { fs_warn(sdp, "GFS2: rgd: %llu lvb igen mismatch %llu/%llu", (unsigned long long)rgd->rd_addr, (unsigned long long)be64_to_cpu(rgl->rl_igeneration), (unsigned long long)be64_to_cpu(str->rg_igeneration)); valid = 0; } return valid; } static u32 count_unlinked(struct gfs2_rgrpd *rgd) { struct gfs2_bitmap *bi; const u32 length = rgd->rd_length; const u8 *buffer = NULL; u32 i, goal, count = 0; for (i = 0, bi = rgd->rd_bits; i < length; i++, bi++) { goal = 0; buffer = bi->bi_bh->b_data + bi->bi_offset; WARN_ON(!buffer_uptodate(bi->bi_bh)); while (goal < bi->bi_blocks) { goal = gfs2_bitfit(buffer, bi->bi_bytes, goal, GFS2_BLKST_UNLINKED); if (goal == BFITNOENT) break; count++; goal++; } } return count; } static void rgrp_set_bitmap_flags(struct gfs2_rgrpd *rgd) { struct gfs2_bitmap *bi; int x; if (rgd->rd_free) { for (x = 0; x < rgd->rd_length; x++) { bi = rgd->rd_bits + x; clear_bit(GBF_FULL, &bi->bi_flags); } } else { for (x = 0; x < rgd->rd_length; x++) { bi = rgd->rd_bits + x; set_bit(GBF_FULL, &bi->bi_flags); } } } /** * gfs2_rgrp_go_instantiate - Read in a RG's header and bitmaps * @gl: the glock representing the rgrpd to read in * * Read in all of a Resource Group's header and bitmap blocks. * Caller must eventually call gfs2_rgrp_brelse() to free the bitmaps. * * Returns: errno */ int gfs2_rgrp_go_instantiate(struct gfs2_glock *gl) { struct gfs2_rgrpd *rgd = gl->gl_object; struct gfs2_sbd *sdp = rgd->rd_sbd; unsigned int length = rgd->rd_length; struct gfs2_bitmap *bi; unsigned int x, y; int error; if (rgd->rd_bits[0].bi_bh != NULL) return 0; for (x = 0; x < length; x++) { bi = rgd->rd_bits + x; error = gfs2_meta_read(gl, rgd->rd_addr + x, 0, 0, &bi->bi_bh); if (error) goto fail; } for (y = length; y--;) { bi = rgd->rd_bits + y; error = gfs2_meta_wait(sdp, bi->bi_bh); if (error) goto fail; if (gfs2_metatype_check(sdp, bi->bi_bh, y ? GFS2_METATYPE_RB : GFS2_METATYPE_RG)) { error = -EIO; goto fail; } } gfs2_rgrp_in(rgd, (rgd->rd_bits[0].bi_bh)->b_data); rgrp_set_bitmap_flags(rgd); rgd->rd_flags |= GFS2_RDF_CHECK; rgd->rd_free_clone = rgd->rd_free; GLOCK_BUG_ON(rgd->rd_gl, rgd->rd_reserved); /* max out the rgrp allocation failure point */ rgd->rd_extfail_pt = rgd->rd_free; if (cpu_to_be32(GFS2_MAGIC) != rgd->rd_rgl->rl_magic) { rgd->rd_rgl->rl_unlinked = cpu_to_be32(count_unlinked(rgd)); gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data); } else if (sdp->sd_args.ar_rgrplvb) { if (!gfs2_rgrp_lvb_valid(rgd)){ gfs2_consist_rgrpd(rgd); error = -EIO; goto fail; } if (rgd->rd_rgl->rl_unlinked == 0) rgd->rd_flags &= ~GFS2_RDF_CHECK; } return 0; fail: while (x--) { bi = rgd->rd_bits + x; brelse(bi->bi_bh); bi->bi_bh = NULL; gfs2_assert_warn(sdp, !bi->bi_clone); } return error; } static int update_rgrp_lvb(struct gfs2_rgrpd *rgd, struct gfs2_holder *gh) { u32 rl_flags; if (!test_bit(GLF_INSTANTIATE_NEEDED, &gh->gh_gl->gl_flags)) return 0; if (cpu_to_be32(GFS2_MAGIC) != rgd->rd_rgl->rl_magic) return gfs2_instantiate(gh); rl_flags = be32_to_cpu(rgd->rd_rgl->rl_flags); rl_flags &= ~GFS2_RDF_MASK; rgd->rd_flags &= GFS2_RDF_MASK; rgd->rd_flags |= (rl_flags | GFS2_RDF_CHECK); if (rgd->rd_rgl->rl_unlinked == 0) rgd->rd_flags &= ~GFS2_RDF_CHECK; rgd->rd_free = be32_to_cpu(rgd->rd_rgl->rl_free); rgrp_set_bitmap_flags(rgd); rgd->rd_free_clone = rgd->rd_free; GLOCK_BUG_ON(rgd->rd_gl, rgd->rd_reserved); /* max out the rgrp allocation failure point */ rgd->rd_extfail_pt = rgd->rd_free; rgd->rd_dinodes = be32_to_cpu(rgd->rd_rgl->rl_dinodes); rgd->rd_igeneration = be64_to_cpu(rgd->rd_rgl->rl_igeneration); return 0; } /** * gfs2_rgrp_brelse - Release RG bitmaps read in with gfs2_rgrp_bh_get() * @rgd: The resource group * */ void gfs2_rgrp_brelse(struct gfs2_rgrpd *rgd) { int x, length = rgd->rd_length; for (x = 0; x < length; x++) { struct gfs2_bitmap *bi = rgd->rd_bits + x; if (bi->bi_bh) { brelse(bi->bi_bh); bi->bi_bh = NULL; } } set_bit(GLF_INSTANTIATE_NEEDED, &rgd->rd_gl->gl_flags); } int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset, struct buffer_head *bh, const struct gfs2_bitmap *bi, unsigned minlen, u64 *ptrimmed) { struct super_block *sb = sdp->sd_vfs; u64 blk; sector_t start = 0; sector_t nr_blks = 0; int rv = -EIO; unsigned int x; u32 trimmed = 0; u8 diff; for (x = 0; x < bi->bi_bytes; x++) { const u8 *clone = bi->bi_clone ? bi->bi_clone : bi->bi_bh->b_data; clone += bi->bi_offset; clone += x; if (bh) { const u8 *orig = bh->b_data + bi->bi_offset + x; diff = ~(*orig | (*orig >> 1)) & (*clone | (*clone >> 1)); } else { diff = ~(*clone | (*clone >> 1)); } diff &= 0x55; if (diff == 0) continue; blk = offset + ((bi->bi_start + x) * GFS2_NBBY); while(diff) { if (diff & 1) { if (nr_blks == 0) goto start_new_extent; if ((start + nr_blks) != blk) { if (nr_blks >= minlen) { rv = sb_issue_discard(sb, start, nr_blks, GFP_NOFS, 0); if (rv) goto fail; trimmed += nr_blks; } nr_blks = 0; start_new_extent: start = blk; } nr_blks++; } diff >>= 2; blk++; } } if (nr_blks >= minlen) { rv = sb_issue_discard(sb, start, nr_blks, GFP_NOFS, 0); if (rv) goto fail; trimmed += nr_blks; } if (ptrimmed) *ptrimmed = trimmed; return 0; fail: if (sdp->sd_args.ar_discard) fs_warn(sdp, "error %d on discard request, turning discards off for this filesystem\n", rv); sdp->sd_args.ar_discard = 0; return rv; } /** * gfs2_fitrim - Generate discard requests for unused bits of the filesystem * @filp: Any file on the filesystem * @argp: Pointer to the arguments (also used to pass result) * * Returns: 0 on success, otherwise error code */ int gfs2_fitrim(struct file *filp, void __user *argp) { struct inode *inode = file_inode(filp); struct gfs2_sbd *sdp = GFS2_SB(inode); struct block_device *bdev = sdp->sd_vfs->s_bdev; struct buffer_head *bh; struct gfs2_rgrpd *rgd; struct gfs2_rgrpd *rgd_end; struct gfs2_holder gh; struct fstrim_range r; int ret = 0; u64 amt; u64 trimmed = 0; u64 start, end, minlen; unsigned int x; unsigned bs_shift = sdp->sd_sb.sb_bsize_shift; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) return -EROFS; if (!bdev_max_discard_sectors(bdev)) return -EOPNOTSUPP; if (copy_from_user(&r, argp, sizeof(r))) return -EFAULT; ret = gfs2_rindex_update(sdp); if (ret) return ret; start = r.start >> bs_shift; end = start + (r.len >> bs_shift); minlen = max_t(u64, r.minlen, sdp->sd_sb.sb_bsize); minlen = max_t(u64, minlen, bdev_discard_granularity(bdev)) >> bs_shift; if (end <= start || minlen > sdp->sd_max_rg_data) return -EINVAL; rgd = gfs2_blk2rgrpd(sdp, start, 0); rgd_end = gfs2_blk2rgrpd(sdp, end, 0); if ((gfs2_rgrpd_get_first(sdp) == gfs2_rgrpd_get_next(rgd_end)) && (start > rgd_end->rd_data0 + rgd_end->rd_data)) return -EINVAL; /* start is beyond the end of the fs */ while (1) { ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, LM_FLAG_NODE_SCOPE, &gh); if (ret) goto out; if (!(rgd->rd_flags & GFS2_RGF_TRIMMED)) { /* Trim each bitmap in the rgrp */ for (x = 0; x < rgd->rd_length; x++) { struct gfs2_bitmap *bi = rgd->rd_bits + x; rgrp_lock_local(rgd); ret = gfs2_rgrp_send_discards(sdp, rgd->rd_data0, NULL, bi, minlen, &amt); rgrp_unlock_local(rgd); if (ret) { gfs2_glock_dq_uninit(&gh); goto out; } trimmed += amt; } /* Mark rgrp as having been trimmed */ ret = gfs2_trans_begin(sdp, RES_RG_HDR, 0); if (ret == 0) { bh = rgd->rd_bits[0].bi_bh; rgrp_lock_local(rgd); rgd->rd_flags |= GFS2_RGF_TRIMMED; gfs2_trans_add_meta(rgd->rd_gl, bh); gfs2_rgrp_out(rgd, bh->b_data); rgrp_unlock_local(rgd); gfs2_trans_end(sdp); } } gfs2_glock_dq_uninit(&gh); if (rgd == rgd_end) break; rgd = gfs2_rgrpd_get_next(rgd); } out: r.len = trimmed << bs_shift; if (copy_to_user(argp, &r, sizeof(r))) return -EFAULT; return ret; } /** * rs_insert - insert a new multi-block reservation into the rgrp's rb_tree * @ip: the inode structure * */ static void rs_insert(struct gfs2_inode *ip) { struct rb_node **newn, *parent = NULL; int rc; struct gfs2_blkreserv *rs = &ip->i_res; struct gfs2_rgrpd *rgd = rs->rs_rgd; BUG_ON(gfs2_rs_active(rs)); spin_lock(&rgd->rd_rsspin); newn = &rgd->rd_rstree.rb_node; while (*newn) { struct gfs2_blkreserv *cur = rb_entry(*newn, struct gfs2_blkreserv, rs_node); parent = *newn; rc = rs_cmp(rs->rs_start, rs->rs_requested, cur); if (rc > 0) newn = &((*newn)->rb_right); else if (rc < 0) newn = &((*newn)->rb_left); else { spin_unlock(&rgd->rd_rsspin); WARN_ON(1); return; } } rb_link_node(&rs->rs_node, parent, newn); rb_insert_color(&rs->rs_node, &rgd->rd_rstree); /* Do our rgrp accounting for the reservation */ rgd->rd_requested += rs->rs_requested; /* blocks requested */ spin_unlock(&rgd->rd_rsspin); trace_gfs2_rs(rs, TRACE_RS_INSERT); } /** * rgd_free - return the number of free blocks we can allocate * @rgd: the resource group * @rs: The reservation to free * * This function returns the number of free blocks for an rgrp. * That's the clone-free blocks (blocks that are free, not including those * still being used for unlinked files that haven't been deleted.) * * It also subtracts any blocks reserved by someone else, but does not * include free blocks that are still part of our current reservation, * because obviously we can (and will) allocate them. */ static inline u32 rgd_free(struct gfs2_rgrpd *rgd, struct gfs2_blkreserv *rs) { u32 tot_reserved, tot_free; if (WARN_ON_ONCE(rgd->rd_requested < rs->rs_requested)) return 0; tot_reserved = rgd->rd_requested - rs->rs_requested; if (rgd->rd_free_clone < tot_reserved) tot_reserved = 0; tot_free = rgd->rd_free_clone - tot_reserved; return tot_free; } /** * rg_mblk_search - find a group of multiple free blocks to form a reservation * @rgd: the resource group descriptor * @ip: pointer to the inode for which we're reserving blocks * @ap: the allocation parameters * */ static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip, const struct gfs2_alloc_parms *ap) { struct gfs2_rbm rbm = { .rgd = rgd, }; u64 goal; struct gfs2_blkreserv *rs = &ip->i_res; u32 extlen; u32 free_blocks, blocks_available; int ret; struct inode *inode = &ip->i_inode; spin_lock(&rgd->rd_rsspin); free_blocks = rgd_free(rgd, rs); if (rgd->rd_free_clone < rgd->rd_requested) free_blocks = 0; blocks_available = rgd->rd_free_clone - rgd->rd_reserved; if (rgd == rs->rs_rgd) blocks_available += rs->rs_reserved; spin_unlock(&rgd->rd_rsspin); if (S_ISDIR(inode->i_mode)) extlen = 1; else { extlen = max_t(u32, atomic_read(&ip->i_sizehint), ap->target); extlen = clamp(extlen, (u32)RGRP_RSRV_MINBLKS, free_blocks); } if (free_blocks < extlen || blocks_available < extlen) return; /* Find bitmap block that contains bits for goal block */ if (rgrp_contains_block(rgd, ip->i_goal)) goal = ip->i_goal; else goal = rgd->rd_last_alloc + rgd->rd_data0; if (WARN_ON(gfs2_rbm_from_block(&rbm, goal))) return; ret = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, &extlen, &ip->i_res, true); if (ret == 0) { rs->rs_start = gfs2_rbm_to_block(&rbm); rs->rs_requested = extlen; rs_insert(ip); } else { if (goal == rgd->rd_last_alloc + rgd->rd_data0) rgd->rd_last_alloc = 0; } } /** * gfs2_next_unreserved_block - Return next block that is not reserved * @rgd: The resource group * @block: The starting block * @length: The required length * @ignore_rs: Reservation to ignore * * If the block does not appear in any reservation, then return the * block number unchanged. If it does appear in the reservation, then * keep looking through the tree of reservations in order to find the * first block number which is not reserved. */ static u64 gfs2_next_unreserved_block(struct gfs2_rgrpd *rgd, u64 block, u32 length, struct gfs2_blkreserv *ignore_rs) { struct gfs2_blkreserv *rs; struct rb_node *n; int rc; spin_lock(&rgd->rd_rsspin); n = rgd->rd_rstree.rb_node; while (n) { rs = rb_entry(n, struct gfs2_blkreserv, rs_node); rc = rs_cmp(block, length, rs); if (rc < 0) n = n->rb_left; else if (rc > 0) n = n->rb_right; else break; } if (n) { while (rs_cmp(block, length, rs) == 0 && rs != ignore_rs) { block = rs->rs_start + rs->rs_requested; n = n->rb_right; if (n == NULL) break; rs = rb_entry(n, struct gfs2_blkreserv, rs_node); } } spin_unlock(&rgd->rd_rsspin); return block; } /** * gfs2_reservation_check_and_update - Check for reservations during block alloc * @rbm: The current position in the resource group * @rs: Our own reservation * @minext: The minimum extent length * @maxext: A pointer to the maximum extent structure * * This checks the current position in the rgrp to see whether there is * a reservation covering this block. If not then this function is a * no-op. If there is, then the position is moved to the end of the * contiguous reservation(s) so that we are pointing at the first * non-reserved block. * * Returns: 0 if no reservation, 1 if @rbm has changed, otherwise an error */ static int gfs2_reservation_check_and_update(struct gfs2_rbm *rbm, struct gfs2_blkreserv *rs, u32 minext, struct gfs2_extent *maxext) { u64 block = gfs2_rbm_to_block(rbm); u32 extlen = 1; u64 nblock; /* * If we have a minimum extent length, then skip over any extent * which is less than the min extent length in size. */ if (minext > 1) { extlen = gfs2_free_extlen(rbm, minext); if (extlen <= maxext->len) goto fail; } /* * Check the extent which has been found against the reservations * and skip if parts of it are already reserved */ nblock = gfs2_next_unreserved_block(rbm->rgd, block, extlen, rs); if (nblock == block) { if (!minext || extlen >= minext) return 0; if (extlen > maxext->len) { maxext->len = extlen; maxext->rbm = *rbm; } } else { u64 len = nblock - block; if (len >= (u64)1 << 32) return -E2BIG; extlen = len; } fail: if (gfs2_rbm_add(rbm, extlen)) return -E2BIG; return 1; } /** * gfs2_rbm_find - Look for blocks of a particular state * @rbm: Value/result starting position and final position * @state: The state which we want to find * @minext: Pointer to the requested extent length * This is updated to be the actual reservation size. * @rs: Our own reservation (NULL to skip checking for reservations) * @nowrap: Stop looking at the end of the rgrp, rather than wrapping * around until we've reached the starting point. * * Side effects: * - If looking for free blocks, we set GBF_FULL on each bitmap which * has no free blocks in it. * - If looking for free blocks, we set rd_extfail_pt on each rgrp which * has come up short on a free block search. * * Returns: 0 on success, -ENOSPC if there is no block of the requested state */ static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext, struct gfs2_blkreserv *rs, bool nowrap) { bool scan_from_start = rbm->bii == 0 && rbm->offset == 0; struct buffer_head *bh; int last_bii; u32 offset; u8 *buffer; bool wrapped = false; int ret; struct gfs2_bitmap *bi; struct gfs2_extent maxext = { .rbm.rgd = rbm->rgd, }; /* * Determine the last bitmap to search. If we're not starting at the * beginning of a bitmap, we need to search that bitmap twice to scan * the entire resource group. */ last_bii = rbm->bii - (rbm->offset == 0); while(1) { bi = rbm_bi(rbm); if (test_bit(GBF_FULL, &bi->bi_flags) && (state == GFS2_BLKST_FREE)) goto next_bitmap; bh = bi->bi_bh; buffer = bh->b_data + bi->bi_offset; WARN_ON(!buffer_uptodate(bh)); if (state != GFS2_BLKST_UNLINKED && bi->bi_clone) buffer = bi->bi_clone + bi->bi_offset; offset = gfs2_bitfit(buffer, bi->bi_bytes, rbm->offset, state); if (offset == BFITNOENT) { if (state == GFS2_BLKST_FREE && rbm->offset == 0) set_bit(GBF_FULL, &bi->bi_flags); goto next_bitmap; } rbm->offset = offset; if (!rs || !minext) return 0; ret = gfs2_reservation_check_and_update(rbm, rs, *minext, &maxext); if (ret == 0) return 0; if (ret > 0) goto next_iter; if (ret == -E2BIG) { rbm->bii = 0; rbm->offset = 0; goto res_covered_end_of_rgrp; } return ret; next_bitmap: /* Find next bitmap in the rgrp */ rbm->offset = 0; rbm->bii++; if (rbm->bii == rbm->rgd->rd_length) rbm->bii = 0; res_covered_end_of_rgrp: if (rbm->bii == 0) { if (wrapped) break; wrapped = true; if (nowrap) break; } next_iter: /* Have we scanned the entire resource group? */ if (wrapped && rbm->bii > last_bii) break; } if (state != GFS2_BLKST_FREE) return -ENOSPC; /* If the extent was too small, and it's smaller than the smallest to have failed before, remember for future reference that it's useless to search this rgrp again for this amount or more. */ if (wrapped && (scan_from_start || rbm->bii > last_bii) && *minext < rbm->rgd->rd_extfail_pt) rbm->rgd->rd_extfail_pt = *minext - 1; /* If the maximum extent we found is big enough to fulfill the minimum requirements, use it anyway. */ if (maxext.len) { *rbm = maxext.rbm; *minext = maxext.len; return 0; } return -ENOSPC; } /** * try_rgrp_unlink - Look for any unlinked, allocated, but unused inodes * @rgd: The rgrp * @last_unlinked: block address of the last dinode we unlinked * @skip: block address we should explicitly not unlink * * Returns: 0 if no error * The inode, if one has been found, in inode. */ static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip) { u64 block; struct gfs2_sbd *sdp = rgd->rd_sbd; struct gfs2_glock *gl; struct gfs2_inode *ip; int error; int found = 0; struct gfs2_rbm rbm = { .rgd = rgd, .bii = 0, .offset = 0 }; while (1) { error = gfs2_rbm_find(&rbm, GFS2_BLKST_UNLINKED, NULL, NULL, true); if (error == -ENOSPC) break; if (WARN_ON_ONCE(error)) break; block = gfs2_rbm_to_block(&rbm); if (gfs2_rbm_from_block(&rbm, block + 1)) break; if (*last_unlinked != NO_BLOCK && block <= *last_unlinked) continue; if (block == skip) continue; *last_unlinked = block; error = gfs2_glock_get(sdp, block, &gfs2_iopen_glops, CREATE, &gl); if (error) continue; /* If the inode is already in cache, we can ignore it here * because the existing inode disposal code will deal with * it when all refs have gone away. Accessing gl_object like * this is not safe in general. Here it is ok because we do * not dereference the pointer, and we only need an approx * answer to whether it is NULL or not. */ ip = gl->gl_object; if (ip || !gfs2_queue_verify_delete(gl, false)) gfs2_glock_put(gl); else found++; /* Limit reclaim to sensible number of tasks */ if (found > NR_CPUS) return; } rgd->rd_flags &= ~GFS2_RDF_CHECK; return; } /** * gfs2_rgrp_congested - Use stats to figure out whether an rgrp is congested * @rgd: The rgrp in question * @loops: An indication of how picky we can be (0=very, 1=less so) * * This function uses the recently added glock statistics in order to * figure out whether a parciular resource group is suffering from * contention from multiple nodes. This is done purely on the basis * of timings, since this is the only data we have to work with and * our aim here is to reject a resource group which is highly contended * but (very important) not to do this too often in order to ensure that * we do not land up introducing fragmentation by changing resource * groups when not actually required. * * The calculation is fairly simple, we want to know whether the SRTTB * (i.e. smoothed round trip time for blocking operations) to acquire * the lock for this rgrp's glock is significantly greater than the * time taken for resource groups on average. We introduce a margin in * the form of the variable @var which is computed as the sum of the two * respective variences, and multiplied by a factor depending on @loops * and whether we have a lot of data to base the decision on. This is * then tested against the square difference of the means in order to * decide whether the result is statistically significant or not. * * Returns: A boolean verdict on the congestion status */ static bool gfs2_rgrp_congested(const struct gfs2_rgrpd *rgd, int loops) { const struct gfs2_glock *gl = rgd->rd_gl; const struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; struct gfs2_lkstats *st; u64 r_dcount, l_dcount; u64 l_srttb, a_srttb = 0; s64 srttb_diff; u64 sqr_diff; u64 var; int cpu, nonzero = 0; preempt_disable(); for_each_present_cpu(cpu) { st = &per_cpu_ptr(sdp->sd_lkstats, cpu)->lkstats[LM_TYPE_RGRP]; if (st->stats[GFS2_LKS_SRTTB]) { a_srttb += st->stats[GFS2_LKS_SRTTB]; nonzero++; } } st = &this_cpu_ptr(sdp->sd_lkstats)->lkstats[LM_TYPE_RGRP]; if (nonzero) do_div(a_srttb, nonzero); r_dcount = st->stats[GFS2_LKS_DCOUNT]; var = st->stats[GFS2_LKS_SRTTVARB] + gl->gl_stats.stats[GFS2_LKS_SRTTVARB]; preempt_enable(); l_srttb = gl->gl_stats.stats[GFS2_LKS_SRTTB]; l_dcount = gl->gl_stats.stats[GFS2_LKS_DCOUNT]; if ((l_dcount < 1) || (r_dcount < 1) || (a_srttb == 0)) return false; srttb_diff = a_srttb - l_srttb; sqr_diff = srttb_diff * srttb_diff; var *= 2; if (l_dcount < 8 || r_dcount < 8) var *= 2; if (loops == 1) var *= 2; return ((srttb_diff < 0) && (sqr_diff > var)); } /** * gfs2_rgrp_used_recently - test if an rgrp has been used recently * @rs: The block reservation with the rgrp to test * @msecs: The time limit in milliseconds * * Returns: True if the rgrp glock has been used within the time limit */ static bool gfs2_rgrp_used_recently(const struct gfs2_blkreserv *rs, u64 msecs) { u64 tdiff; tdiff = ktime_to_ns(ktime_sub(ktime_get_real(), rs->rs_rgd->rd_gl->gl_dstamp)); return tdiff > (msecs * 1000 * 1000); } static u32 gfs2_orlov_skip(const struct gfs2_inode *ip) { const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); return get_random_u32() % sdp->sd_rgrps; } static bool gfs2_select_rgrp(struct gfs2_rgrpd **pos, const struct gfs2_rgrpd *begin) { struct gfs2_rgrpd *rgd = *pos; struct gfs2_sbd *sdp = rgd->rd_sbd; rgd = gfs2_rgrpd_get_next(rgd); if (rgd == NULL) rgd = gfs2_rgrpd_get_first(sdp); *pos = rgd; if (rgd != begin) /* If we didn't wrap */ return true; return false; } /** * fast_to_acquire - determine if a resource group will be fast to acquire * @rgd: The rgrp * * If this is one of our preferred rgrps, it should be quicker to acquire, * because we tried to set ourselves up as dlm lock master. */ static inline int fast_to_acquire(struct gfs2_rgrpd *rgd) { struct gfs2_glock *gl = rgd->rd_gl; if (gl->gl_state != LM_ST_UNLOCKED && list_empty(&gl->gl_holders) && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) && !test_bit(GLF_DEMOTE, &gl->gl_flags)) return 1; if (rgd->rd_flags & GFS2_RDF_PREFERRED) return 1; return 0; } /** * gfs2_inplace_reserve - Reserve space in the filesystem * @ip: the inode to reserve space for * @ap: the allocation parameters * * We try our best to find an rgrp that has at least ap->target blocks * available. After a couple of passes (loops == 2), the prospects of finding * such an rgrp diminish. At this stage, we return the first rgrp that has * at least ap->min_target blocks available. * * Returns: 0 on success, * -ENOMEM if a suitable rgrp can't be found * errno otherwise */ int gfs2_inplace_reserve(struct gfs2_inode *ip, struct gfs2_alloc_parms *ap) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); struct gfs2_rgrpd *begin = NULL; struct gfs2_blkreserv *rs = &ip->i_res; int error = 0, flags = LM_FLAG_NODE_SCOPE; bool rg_locked; u64 last_unlinked = NO_BLOCK; u32 target = ap->target; int loops = 0; u32 free_blocks, blocks_available, skip = 0; BUG_ON(rs->rs_reserved); if (sdp->sd_args.ar_rgrplvb) flags |= GL_SKIP; if (gfs2_assert_warn(sdp, target)) return -EINVAL; if (gfs2_rs_active(rs)) { begin = rs->rs_rgd; } else if (rs->rs_rgd && rgrp_contains_block(rs->rs_rgd, ip->i_goal)) { begin = rs->rs_rgd; } else { check_and_update_goal(ip); rs->rs_rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1); } if (S_ISDIR(ip->i_inode.i_mode) && (ap->aflags & GFS2_AF_ORLOV)) skip = gfs2_orlov_skip(ip); if (rs->rs_rgd == NULL) return -EBADSLT; while (loops < 3) { struct gfs2_rgrpd *rgd; rg_locked = gfs2_glock_is_locked_by_me(rs->rs_rgd->rd_gl); if (rg_locked) { rgrp_lock_local(rs->rs_rgd); } else { if (skip && skip--) goto next_rgrp; if (!gfs2_rs_active(rs)) { if (loops == 0 && !fast_to_acquire(rs->rs_rgd)) goto next_rgrp; if ((loops < 2) && gfs2_rgrp_used_recently(rs, 1000) && gfs2_rgrp_congested(rs->rs_rgd, loops)) goto next_rgrp; } error = gfs2_glock_nq_init(rs->rs_rgd->rd_gl, LM_ST_EXCLUSIVE, flags, &ip->i_rgd_gh); if (unlikely(error)) return error; rgrp_lock_local(rs->rs_rgd); if (!gfs2_rs_active(rs) && (loops < 2) && gfs2_rgrp_congested(rs->rs_rgd, loops)) goto skip_rgrp; if (sdp->sd_args.ar_rgrplvb) { error = update_rgrp_lvb(rs->rs_rgd, &ip->i_rgd_gh); if (unlikely(error)) { rgrp_unlock_local(rs->rs_rgd); gfs2_glock_dq_uninit(&ip->i_rgd_gh); return error; } } } /* Skip unusable resource groups */ if ((rs->rs_rgd->rd_flags & (GFS2_RGF_NOALLOC | GFS2_RDF_ERROR)) || (loops == 0 && target > rs->rs_rgd->rd_extfail_pt)) goto skip_rgrp; if (sdp->sd_args.ar_rgrplvb) { error = gfs2_instantiate(&ip->i_rgd_gh); if (error) goto skip_rgrp; } /* Get a reservation if we don't already have one */ if (!gfs2_rs_active(rs)) rg_mblk_search(rs->rs_rgd, ip, ap); /* Skip rgrps when we can't get a reservation on first pass */ if (!gfs2_rs_active(rs) && (loops < 1)) goto check_rgrp; /* If rgrp has enough free space, use it */ rgd = rs->rs_rgd; spin_lock(&rgd->rd_rsspin); free_blocks = rgd_free(rgd, rs); blocks_available = rgd->rd_free_clone - rgd->rd_reserved; if (free_blocks < target || blocks_available < target) { spin_unlock(&rgd->rd_rsspin); goto check_rgrp; } rs->rs_reserved = ap->target; if (rs->rs_reserved > blocks_available) rs->rs_reserved = blocks_available; rgd->rd_reserved += rs->rs_reserved; spin_unlock(&rgd->rd_rsspin); rgrp_unlock_local(rs->rs_rgd); return 0; check_rgrp: /* Check for unlinked inodes which can be reclaimed */ if (rs->rs_rgd->rd_flags & GFS2_RDF_CHECK) try_rgrp_unlink(rs->rs_rgd, &last_unlinked, ip->i_no_addr); skip_rgrp: rgrp_unlock_local(rs->rs_rgd); /* Drop reservation, if we couldn't use reserved rgrp */ if (gfs2_rs_active(rs)) gfs2_rs_deltree(rs); /* Unlock rgrp if required */ if (!rg_locked) gfs2_glock_dq_uninit(&ip->i_rgd_gh); next_rgrp: /* Find the next rgrp, and continue looking */ if (gfs2_select_rgrp(&rs->rs_rgd, begin)) continue; if (skip) continue; /* If we've scanned all the rgrps, but found no free blocks * then this checks for some less likely conditions before * trying again. */ loops++; /* Check that fs hasn't grown if writing to rindex */ if (ip == GFS2_I(sdp->sd_rindex) && !sdp->sd_rindex_uptodate) { error = gfs2_ri_update(ip); if (error) return error; } /* Flushing the log may release space */ if (loops == 2) { if (ap->min_target) target = ap->min_target; gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL | GFS2_LFC_INPLACE_RESERVE); } } return -ENOSPC; } /** * gfs2_inplace_release - release an inplace reservation * @ip: the inode the reservation was taken out on * * Release a reservation made by gfs2_inplace_reserve(). */ void gfs2_inplace_release(struct gfs2_inode *ip) { struct gfs2_blkreserv *rs = &ip->i_res; if (rs->rs_reserved) { struct gfs2_rgrpd *rgd = rs->rs_rgd; spin_lock(&rgd->rd_rsspin); GLOCK_BUG_ON(rgd->rd_gl, rgd->rd_reserved < rs->rs_reserved); rgd->rd_reserved -= rs->rs_reserved; spin_unlock(&rgd->rd_rsspin); rs->rs_reserved = 0; } if (gfs2_holder_initialized(&ip->i_rgd_gh)) gfs2_glock_dq_uninit(&ip->i_rgd_gh); } /** * gfs2_alloc_extent - allocate an extent from a given bitmap * @rbm: the resource group information * @dinode: TRUE if the first block we allocate is for a dinode * @n: The extent length (value/result) * * Add the bitmap buffer to the transaction. * Set the found bits to @new_state to change block's allocation state. */ static void gfs2_alloc_extent(const struct gfs2_rbm *rbm, bool dinode, unsigned int *n) { struct gfs2_rbm pos = { .rgd = rbm->rgd, }; const unsigned int elen = *n; u64 block; int ret; *n = 1; block = gfs2_rbm_to_block(rbm); gfs2_trans_add_meta(rbm->rgd->rd_gl, rbm_bi(rbm)->bi_bh); gfs2_setbit(rbm, true, dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED); block++; while (*n < elen) { ret = gfs2_rbm_from_block(&pos, block); if (ret || gfs2_testbit(&pos, true) != GFS2_BLKST_FREE) break; gfs2_trans_add_meta(pos.rgd->rd_gl, rbm_bi(&pos)->bi_bh); gfs2_setbit(&pos, true, GFS2_BLKST_USED); (*n)++; block++; } } /** * rgblk_free - Change alloc state of given block(s) * @sdp: the filesystem * @rgd: the resource group the blocks are in * @bstart: the start of a run of blocks to free * @blen: the length of the block run (all must lie within ONE RG!) * @new_state: GFS2_BLKST_XXX the after-allocation block state */ static void rgblk_free(struct gfs2_sbd *sdp, struct gfs2_rgrpd *rgd, u64 bstart, u32 blen, unsigned char new_state) { struct gfs2_rbm rbm; struct gfs2_bitmap *bi, *bi_prev = NULL; rbm.rgd = rgd; if (WARN_ON_ONCE(gfs2_rbm_from_block(&rbm, bstart))) return; while (blen--) { bi = rbm_bi(&rbm); if (bi != bi_prev) { if (!bi->bi_clone) { bi->bi_clone = kmalloc(bi->bi_bh->b_size, GFP_NOFS | __GFP_NOFAIL); memcpy(bi->bi_clone + bi->bi_offset, bi->bi_bh->b_data + bi->bi_offset, bi->bi_bytes); } gfs2_trans_add_meta(rbm.rgd->rd_gl, bi->bi_bh); bi_prev = bi; } gfs2_setbit(&rbm, false, new_state); gfs2_rbm_add(&rbm, 1); } } /** * gfs2_rgrp_dump - print out an rgrp * @seq: The iterator * @rgd: The rgrp in question * @fs_id_buf: pointer to file system id (if requested) * */ void gfs2_rgrp_dump(struct seq_file *seq, struct gfs2_rgrpd *rgd, const char *fs_id_buf) { struct gfs2_blkreserv *trs; const struct rb_node *n; spin_lock(&rgd->rd_rsspin); gfs2_print_dbg(seq, "%s R: n:%llu f:%02x b:%u/%u i:%u q:%u r:%u e:%u\n", fs_id_buf, (unsigned long long)rgd->rd_addr, rgd->rd_flags, rgd->rd_free, rgd->rd_free_clone, rgd->rd_dinodes, rgd->rd_requested, rgd->rd_reserved, rgd->rd_extfail_pt); if (rgd->rd_sbd->sd_args.ar_rgrplvb && rgd->rd_rgl) { struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl; gfs2_print_dbg(seq, "%s L: f:%02x b:%u i:%u\n", fs_id_buf, be32_to_cpu(rgl->rl_flags), be32_to_cpu(rgl->rl_free), be32_to_cpu(rgl->rl_dinodes)); } for (n = rb_first(&rgd->rd_rstree); n; n = rb_next(&trs->rs_node)) { trs = rb_entry(n, struct gfs2_blkreserv, rs_node); dump_rs(seq, trs, fs_id_buf); } spin_unlock(&rgd->rd_rsspin); } static void gfs2_rgrp_error(struct gfs2_rgrpd *rgd) { struct gfs2_sbd *sdp = rgd->rd_sbd; char fs_id_buf[sizeof(sdp->sd_fsname) + 7]; fs_warn(sdp, "rgrp %llu has an error, marking it readonly until umount\n", (unsigned long long)rgd->rd_addr); fs_warn(sdp, "umount on all nodes and run fsck.gfs2 to fix the error\n"); sprintf(fs_id_buf, "fsid=%s: ", sdp->sd_fsname); gfs2_rgrp_dump(NULL, rgd, fs_id_buf); rgd->rd_flags |= GFS2_RDF_ERROR; } /** * gfs2_adjust_reservation - Adjust (or remove) a reservation after allocation * @ip: The inode we have just allocated blocks for * @rbm: The start of the allocated blocks * @len: The extent length * * Adjusts a reservation after an allocation has taken place. If the * reservation does not match the allocation, or if it is now empty * then it is removed. */ static void gfs2_adjust_reservation(struct gfs2_inode *ip, const struct gfs2_rbm *rbm, unsigned len) { struct gfs2_blkreserv *rs = &ip->i_res; struct gfs2_rgrpd *rgd = rbm->rgd; BUG_ON(rs->rs_reserved < len); rs->rs_reserved -= len; if (gfs2_rs_active(rs)) { u64 start = gfs2_rbm_to_block(rbm); if (rs->rs_start == start) { unsigned int rlen; rs->rs_start += len; rlen = min(rs->rs_requested, len); rs->rs_requested -= rlen; rgd->rd_requested -= rlen; trace_gfs2_rs(rs, TRACE_RS_CLAIM); if (rs->rs_start < rgd->rd_data0 + rgd->rd_data && rs->rs_requested) return; /* We used up our block reservation, so we should reserve more blocks next time. */ atomic_add(RGRP_RSRV_ADDBLKS, &ip->i_sizehint); } __rs_deltree(rs); } } /** * gfs2_set_alloc_start - Set starting point for block allocation * @rbm: The rbm which will be set to the required location * @ip: The gfs2 inode * @dinode: Flag to say if allocation includes a new inode * * This sets the starting point from the reservation if one is active * otherwise it falls back to guessing a start point based on the * inode's goal block or the last allocation point in the rgrp. */ static void gfs2_set_alloc_start(struct gfs2_rbm *rbm, const struct gfs2_inode *ip, bool dinode) { u64 goal; if (gfs2_rs_active(&ip->i_res)) { goal = ip->i_res.rs_start; } else { if (!dinode && rgrp_contains_block(rbm->rgd, ip->i_goal)) goal = ip->i_goal; else goal = rbm->rgd->rd_last_alloc + rbm->rgd->rd_data0; } if (WARN_ON_ONCE(gfs2_rbm_from_block(rbm, goal))) { rbm->bii = 0; rbm->offset = 0; } } /** * gfs2_alloc_blocks - Allocate one or more blocks of data and/or a dinode * @ip: the inode to allocate the block for * @bn: Used to return the starting block number * @nblocks: requested number of blocks/extent length (value/result) * @dinode: 1 if we're allocating a dinode block, else 0 * * Returns: 0 or error */ int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks, bool dinode) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); struct buffer_head *dibh; struct gfs2_rbm rbm = { .rgd = ip->i_res.rs_rgd, }; u64 block; /* block, within the file system scope */ u32 minext = 1; int error = -ENOSPC; BUG_ON(ip->i_res.rs_reserved < *nblocks); rgrp_lock_local(rbm.rgd); if (gfs2_rs_active(&ip->i_res)) { gfs2_set_alloc_start(&rbm, ip, dinode); error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, &minext, &ip->i_res, false); } if (error == -ENOSPC) { gfs2_set_alloc_start(&rbm, ip, dinode); error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, &minext, NULL, false); } /* Since all blocks are reserved in advance, this shouldn't happen */ if (error) { fs_warn(sdp, "inum=%llu error=%d, nblocks=%u, full=%d fail_pt=%d\n", (unsigned long long)ip->i_no_addr, error, *nblocks, test_bit(GBF_FULL, &rbm.rgd->rd_bits->bi_flags), rbm.rgd->rd_extfail_pt); goto rgrp_error; } gfs2_alloc_extent(&rbm, dinode, nblocks); block = gfs2_rbm_to_block(&rbm); rbm.rgd->rd_last_alloc = block - rbm.rgd->rd_data0; if (!dinode) { ip->i_goal = block + *nblocks - 1; error = gfs2_meta_inode_buffer(ip, &dibh); if (error == 0) { struct gfs2_dinode *di = (struct gfs2_dinode *)dibh->b_data; gfs2_trans_add_meta(ip->i_gl, dibh); di->di_goal_meta = di->di_goal_data = cpu_to_be64(ip->i_goal); brelse(dibh); } } spin_lock(&rbm.rgd->rd_rsspin); gfs2_adjust_reservation(ip, &rbm, *nblocks); if (rbm.rgd->rd_free < *nblocks || rbm.rgd->rd_reserved < *nblocks) { fs_warn(sdp, "nblocks=%u\n", *nblocks); spin_unlock(&rbm.rgd->rd_rsspin); goto rgrp_error; } GLOCK_BUG_ON(rbm.rgd->rd_gl, rbm.rgd->rd_reserved < *nblocks); GLOCK_BUG_ON(rbm.rgd->rd_gl, rbm.rgd->rd_free_clone < *nblocks); GLOCK_BUG_ON(rbm.rgd->rd_gl, rbm.rgd->rd_free < *nblocks); rbm.rgd->rd_reserved -= *nblocks; rbm.rgd->rd_free_clone -= *nblocks; rbm.rgd->rd_free -= *nblocks; spin_unlock(&rbm.rgd->rd_rsspin); if (dinode) { u64 generation; rbm.rgd->rd_dinodes++; generation = rbm.rgd->rd_igeneration++; if (generation == 0) generation = rbm.rgd->rd_igeneration++; ip->i_generation = generation; } gfs2_trans_add_meta(rbm.rgd->rd_gl, rbm.rgd->rd_bits[0].bi_bh); gfs2_rgrp_out(rbm.rgd, rbm.rgd->rd_bits[0].bi_bh->b_data); rgrp_unlock_local(rbm.rgd); gfs2_statfs_change(sdp, 0, -(s64)*nblocks, dinode ? 1 : 0); if (dinode) gfs2_trans_remove_revoke(sdp, block, *nblocks); gfs2_quota_change(ip, *nblocks, ip->i_inode.i_uid, ip->i_inode.i_gid); trace_gfs2_block_alloc(ip, rbm.rgd, block, *nblocks, dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED); *bn = block; return 0; rgrp_error: rgrp_unlock_local(rbm.rgd); gfs2_rgrp_error(rbm.rgd); return -EIO; } /** * __gfs2_free_blocks - free a contiguous run of block(s) * @ip: the inode these blocks are being freed from * @rgd: the resource group the blocks are in * @bstart: first block of a run of contiguous blocks * @blen: the length of the block run * @meta: 1 if the blocks represent metadata * */ void __gfs2_free_blocks(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd, u64 bstart, u32 blen, int meta) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); rgrp_lock_local(rgd); rgblk_free(sdp, rgd, bstart, blen, GFS2_BLKST_FREE); trace_gfs2_block_alloc(ip, rgd, bstart, blen, GFS2_BLKST_FREE); rgd->rd_free += blen; rgd->rd_flags &= ~GFS2_RGF_TRIMMED; gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh); gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data); rgrp_unlock_local(rgd); /* Directories keep their data in the metadata address space */ if (meta || ip->i_depth || gfs2_is_jdata(ip)) gfs2_journal_wipe(ip, bstart, blen); } /** * gfs2_free_meta - free a contiguous run of data block(s) * @ip: the inode these blocks are being freed from * @rgd: the resource group the blocks are in * @bstart: first block of a run of contiguous blocks * @blen: the length of the block run * */ void gfs2_free_meta(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd, u64 bstart, u32 blen) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); __gfs2_free_blocks(ip, rgd, bstart, blen, 1); gfs2_statfs_change(sdp, 0, +blen, 0); gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid); } void gfs2_unlink_di(struct inode *inode) { struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); struct gfs2_rgrpd *rgd; u64 blkno = ip->i_no_addr; rgd = gfs2_blk2rgrpd(sdp, blkno, true); if (!rgd) return; rgrp_lock_local(rgd); rgblk_free(sdp, rgd, blkno, 1, GFS2_BLKST_UNLINKED); trace_gfs2_block_alloc(ip, rgd, blkno, 1, GFS2_BLKST_UNLINKED); gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh); gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data); be32_add_cpu(&rgd->rd_rgl->rl_unlinked, 1); rgrp_unlock_local(rgd); } void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip) { struct gfs2_sbd *sdp = rgd->rd_sbd; rgrp_lock_local(rgd); rgblk_free(sdp, rgd, ip->i_no_addr, 1, GFS2_BLKST_FREE); if (!rgd->rd_dinodes) gfs2_consist_rgrpd(rgd); rgd->rd_dinodes--; rgd->rd_free++; gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh); gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data); be32_add_cpu(&rgd->rd_rgl->rl_unlinked, -1); rgrp_unlock_local(rgd); gfs2_statfs_change(sdp, 0, +1, -1); trace_gfs2_block_alloc(ip, rgd, ip->i_no_addr, 1, GFS2_BLKST_FREE); gfs2_quota_change(ip, -1, ip->i_inode.i_uid, ip->i_inode.i_gid); gfs2_journal_wipe(ip, ip->i_no_addr, 1); } /** * gfs2_check_blk_type - Check the type of a block * @sdp: The superblock * @no_addr: The block number to check * @type: The block type we are looking for * * The inode glock of @no_addr must be held. The @type to check for is either * GFS2_BLKST_DINODE or GFS2_BLKST_UNLINKED; checking for type GFS2_BLKST_FREE * or GFS2_BLKST_USED would make no sense. * * Returns: 0 if the block type matches the expected type * -ESTALE if it doesn't match * or -ve errno if something went wrong while checking */ int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type) { struct gfs2_rgrpd *rgd; struct gfs2_holder rgd_gh; struct gfs2_rbm rbm; int error = -EINVAL; rgd = gfs2_blk2rgrpd(sdp, no_addr, 1); if (!rgd) goto fail; error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_SHARED, 0, &rgd_gh); if (error) goto fail; rbm.rgd = rgd; error = gfs2_rbm_from_block(&rbm, no_addr); if (!WARN_ON_ONCE(error)) { /* * No need to take the local resource group lock here; the * inode glock of @no_addr provides the necessary * synchronization in case the block is an inode. (In case * the block is not an inode, the block type will not match * the @type we are looking for.) */ if (gfs2_testbit(&rbm, false) != type) error = -ESTALE; } gfs2_glock_dq_uninit(&rgd_gh); fail: return error; } /** * gfs2_rlist_add - add a RG to a list of RGs * @ip: the inode * @rlist: the list of resource groups * @block: the block * * Figure out what RG a block belongs to and add that RG to the list * * FIXME: Don't use NOFAIL * */ void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist, u64 block) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); struct gfs2_rgrpd *rgd; struct gfs2_rgrpd **tmp; unsigned int new_space; unsigned int x; if (gfs2_assert_warn(sdp, !rlist->rl_ghs)) return; /* * The resource group last accessed is kept in the last position. */ if (rlist->rl_rgrps) { rgd = rlist->rl_rgd[rlist->rl_rgrps - 1]; if (rgrp_contains_block(rgd, block)) return; rgd = gfs2_blk2rgrpd(sdp, block, 1); } else { rgd = ip->i_res.rs_rgd; if (!rgd || !rgrp_contains_block(rgd, block)) rgd = gfs2_blk2rgrpd(sdp, block, 1); } if (!rgd) { fs_err(sdp, "rlist_add: no rgrp for block %llu\n", (unsigned long long)block); return; } for (x = 0; x < rlist->rl_rgrps; x++) { if (rlist->rl_rgd[x] == rgd) { swap(rlist->rl_rgd[x], rlist->rl_rgd[rlist->rl_rgrps - 1]); return; } } if (rlist->rl_rgrps == rlist->rl_space) { new_space = rlist->rl_space + 10; tmp = kcalloc(new_space, sizeof(struct gfs2_rgrpd *), GFP_NOFS | __GFP_NOFAIL); if (rlist->rl_rgd) { memcpy(tmp, rlist->rl_rgd, rlist->rl_space * sizeof(struct gfs2_rgrpd *)); kfree(rlist->rl_rgd); } rlist->rl_space = new_space; rlist->rl_rgd = tmp; } rlist->rl_rgd[rlist->rl_rgrps++] = rgd; } /** * gfs2_rlist_alloc - all RGs have been added to the rlist, now allocate * and initialize an array of glock holders for them * @rlist: the list of resource groups * @state: the state we're requesting * @flags: the modifier flags * * FIXME: Don't use NOFAIL * */ void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state, u16 flags) { unsigned int x; rlist->rl_ghs = kmalloc_array(rlist->rl_rgrps, sizeof(struct gfs2_holder), GFP_NOFS | __GFP_NOFAIL); for (x = 0; x < rlist->rl_rgrps; x++) gfs2_holder_init(rlist->rl_rgd[x]->rd_gl, state, flags, &rlist->rl_ghs[x]); } /** * gfs2_rlist_free - free a resource group list * @rlist: the list of resource groups * */ void gfs2_rlist_free(struct gfs2_rgrp_list *rlist) { unsigned int x; kfree(rlist->rl_rgd); if (rlist->rl_ghs) { for (x = 0; x < rlist->rl_rgrps; x++) gfs2_holder_uninit(&rlist->rl_ghs[x]); kfree(rlist->rl_ghs); rlist->rl_ghs = NULL; } } void rgrp_lock_local(struct gfs2_rgrpd *rgd) { mutex_lock(&rgd->rd_mutex); } void rgrp_unlock_local(struct gfs2_rgrpd *rgd) { mutex_unlock(&rgd->rd_mutex); }
3 45 113 24 90 132 4 124 3 3 3 84 81 23 51 2 7 4 2 2 21 21 726 709 19 130 123 3 1 4 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 // SPDX-License-Identifier: GPL-2.0-or-later /* * lwtunnel Infrastructure for light weight tunnels like mpls * * Authors: Roopa Prabhu, <roopa@cumulusnetworks.com> */ #include <linux/capability.h> #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/lwtunnel.h> #include <linux/in.h> #include <linux/init.h> #include <linux/err.h> #include <net/lwtunnel.h> #include <net/rtnetlink.h> #include <net/ip6_fib.h> #include <net/rtnh.h> DEFINE_STATIC_KEY_FALSE(nf_hooks_lwtunnel_enabled); EXPORT_SYMBOL_GPL(nf_hooks_lwtunnel_enabled); #ifdef CONFIG_MODULES static const char *lwtunnel_encap_str(enum lwtunnel_encap_types encap_type) { /* Only lwt encaps implemented without using an interface for * the encap need to return a string here. */ switch (encap_type) { case LWTUNNEL_ENCAP_MPLS: return "MPLS"; case LWTUNNEL_ENCAP_ILA: return "ILA"; case LWTUNNEL_ENCAP_SEG6: return "SEG6"; case LWTUNNEL_ENCAP_BPF: return "BPF"; case LWTUNNEL_ENCAP_SEG6_LOCAL: return "SEG6LOCAL"; case LWTUNNEL_ENCAP_RPL: return "RPL"; case LWTUNNEL_ENCAP_IOAM6: return "IOAM6"; case LWTUNNEL_ENCAP_XFRM: /* module autoload not supported for encap type */ return NULL; case LWTUNNEL_ENCAP_IP6: case LWTUNNEL_ENCAP_IP: case LWTUNNEL_ENCAP_NONE: case __LWTUNNEL_ENCAP_MAX: /* should not have got here */ WARN_ON(1); break; } return NULL; } #endif /* CONFIG_MODULES */ struct lwtunnel_state *lwtunnel_state_alloc(int encap_len) { struct lwtunnel_state *lws; lws = kzalloc(sizeof(*lws) + encap_len, GFP_ATOMIC); return lws; } EXPORT_SYMBOL_GPL(lwtunnel_state_alloc); static const struct lwtunnel_encap_ops __rcu * lwtun_encaps[LWTUNNEL_ENCAP_MAX + 1] __read_mostly; int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *ops, unsigned int num) { if (num > LWTUNNEL_ENCAP_MAX) return -ERANGE; return !cmpxchg((const struct lwtunnel_encap_ops **) &lwtun_encaps[num], NULL, ops) ? 0 : -1; } EXPORT_SYMBOL_GPL(lwtunnel_encap_add_ops); int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *ops, unsigned int encap_type) { int ret; if (encap_type == LWTUNNEL_ENCAP_NONE || encap_type > LWTUNNEL_ENCAP_MAX) return -ERANGE; ret = (cmpxchg((const struct lwtunnel_encap_ops **) &lwtun_encaps[encap_type], ops, NULL) == ops) ? 0 : -1; synchronize_net(); return ret; } EXPORT_SYMBOL_GPL(lwtunnel_encap_del_ops); int lwtunnel_build_state(struct net *net, u16 encap_type, struct nlattr *encap, unsigned int family, const void *cfg, struct lwtunnel_state **lws, struct netlink_ext_ack *extack) { const struct lwtunnel_encap_ops *ops; bool found = false; int ret = -EINVAL; if (encap_type == LWTUNNEL_ENCAP_NONE || encap_type > LWTUNNEL_ENCAP_MAX) { NL_SET_ERR_MSG_ATTR(extack, encap, "Unknown LWT encapsulation type"); return ret; } ret = -EOPNOTSUPP; rcu_read_lock(); ops = rcu_dereference(lwtun_encaps[encap_type]); if (likely(ops && ops->build_state && try_module_get(ops->owner))) found = true; rcu_read_unlock(); if (found) { ret = ops->build_state(net, encap, family, cfg, lws, extack); if (ret) module_put(ops->owner); } else { /* don't rely on -EOPNOTSUPP to detect match as build_state * handlers could return it */ NL_SET_ERR_MSG_ATTR(extack, encap, "LWT encapsulation type not supported"); } return ret; } EXPORT_SYMBOL_GPL(lwtunnel_build_state); int lwtunnel_valid_encap_type(u16 encap_type, struct netlink_ext_ack *extack) { const struct lwtunnel_encap_ops *ops; int ret = -EINVAL; if (encap_type == LWTUNNEL_ENCAP_NONE || encap_type > LWTUNNEL_ENCAP_MAX) { NL_SET_ERR_MSG(extack, "Unknown lwt encapsulation type"); return ret; } rcu_read_lock(); ops = rcu_dereference(lwtun_encaps[encap_type]); rcu_read_unlock(); #ifdef CONFIG_MODULES if (!ops) { const char *encap_type_str = lwtunnel_encap_str(encap_type); if (encap_type_str) { __rtnl_unlock(); request_module("rtnl-lwt-%s", encap_type_str); rtnl_lock(); rcu_read_lock(); ops = rcu_dereference(lwtun_encaps[encap_type]); rcu_read_unlock(); } } #endif ret = ops ? 0 : -EOPNOTSUPP; if (ret < 0) NL_SET_ERR_MSG(extack, "lwt encapsulation type not supported"); return ret; } EXPORT_SYMBOL_GPL(lwtunnel_valid_encap_type); int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int remaining, struct netlink_ext_ack *extack) { struct rtnexthop *rtnh = (struct rtnexthop *)attr; struct nlattr *nla_entype; struct nlattr *attrs; u16 encap_type; int attrlen; while (rtnh_ok(rtnh, remaining)) { attrlen = rtnh_attrlen(rtnh); if (attrlen > 0) { attrs = rtnh_attrs(rtnh); nla_entype = nla_find(attrs, attrlen, RTA_ENCAP_TYPE); if (nla_entype) { if (nla_len(nla_entype) < sizeof(u16)) { NL_SET_ERR_MSG(extack, "Invalid RTA_ENCAP_TYPE"); return -EINVAL; } encap_type = nla_get_u16(nla_entype); if (lwtunnel_valid_encap_type(encap_type, extack) != 0) return -EOPNOTSUPP; } } rtnh = rtnh_next(rtnh, &remaining); } return 0; } EXPORT_SYMBOL_GPL(lwtunnel_valid_encap_type_attr); void lwtstate_free(struct lwtunnel_state *lws) { const struct lwtunnel_encap_ops *ops = lwtun_encaps[lws->type]; if (ops->destroy_state) { ops->destroy_state(lws); kfree_rcu(lws, rcu); } else { kfree(lws); } module_put(ops->owner); } EXPORT_SYMBOL_GPL(lwtstate_free); int lwtunnel_fill_encap(struct sk_buff *skb, struct lwtunnel_state *lwtstate, int encap_attr, int encap_type_attr) { const struct lwtunnel_encap_ops *ops; struct nlattr *nest; int ret; if (!lwtstate) return 0; if (lwtstate->type == LWTUNNEL_ENCAP_NONE || lwtstate->type > LWTUNNEL_ENCAP_MAX) return 0; nest = nla_nest_start_noflag(skb, encap_attr); if (!nest) return -EMSGSIZE; ret = -EOPNOTSUPP; rcu_read_lock(); ops = rcu_dereference(lwtun_encaps[lwtstate->type]); if (likely(ops && ops->fill_encap)) ret = ops->fill_encap(skb, lwtstate); rcu_read_unlock(); if (ret) goto nla_put_failure; nla_nest_end(skb, nest); ret = nla_put_u16(skb, encap_type_attr, lwtstate->type); if (ret) goto nla_put_failure; return 0; nla_put_failure: nla_nest_cancel(skb, nest); return (ret == -EOPNOTSUPP ? 0 : ret); } EXPORT_SYMBOL_GPL(lwtunnel_fill_encap); int lwtunnel_get_encap_size(struct lwtunnel_state *lwtstate) { const struct lwtunnel_encap_ops *ops; int ret = 0; if (!lwtstate) return 0; if (lwtstate->type == LWTUNNEL_ENCAP_NONE || lwtstate->type > LWTUNNEL_ENCAP_MAX) return 0; rcu_read_lock(); ops = rcu_dereference(lwtun_encaps[lwtstate->type]); if (likely(ops && ops->get_encap_size)) ret = nla_total_size(ops->get_encap_size(lwtstate)); rcu_read_unlock(); return ret; } EXPORT_SYMBOL_GPL(lwtunnel_get_encap_size); int lwtunnel_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b) { const struct lwtunnel_encap_ops *ops; int ret = 0; if (!a && !b) return 0; if (!a || !b) return 1; if (a->type != b->type) return 1; if (a->type == LWTUNNEL_ENCAP_NONE || a->type > LWTUNNEL_ENCAP_MAX) return 0; rcu_read_lock(); ops = rcu_dereference(lwtun_encaps[a->type]); if (likely(ops && ops->cmp_encap)) ret = ops->cmp_encap(a, b); rcu_read_unlock(); return ret; } EXPORT_SYMBOL_GPL(lwtunnel_cmp_encap); int lwtunnel_output(struct net *net, struct sock *sk, struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); const struct lwtunnel_encap_ops *ops; struct lwtunnel_state *lwtstate; int ret = -EINVAL; if (!dst) goto drop; lwtstate = dst->lwtstate; if (lwtstate->type == LWTUNNEL_ENCAP_NONE || lwtstate->type > LWTUNNEL_ENCAP_MAX) return 0; ret = -EOPNOTSUPP; rcu_read_lock(); ops = rcu_dereference(lwtun_encaps[lwtstate->type]); if (likely(ops && ops->output)) ret = ops->output(net, sk, skb); rcu_read_unlock(); if (ret == -EOPNOTSUPP) goto drop; return ret; drop: kfree_skb(skb); return ret; } EXPORT_SYMBOL_GPL(lwtunnel_output); int lwtunnel_xmit(struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); const struct lwtunnel_encap_ops *ops; struct lwtunnel_state *lwtstate; int ret = -EINVAL; if (!dst) goto drop; lwtstate = dst->lwtstate; if (lwtstate->type == LWTUNNEL_ENCAP_NONE || lwtstate->type > LWTUNNEL_ENCAP_MAX) return 0; ret = -EOPNOTSUPP; rcu_read_lock(); ops = rcu_dereference(lwtun_encaps[lwtstate->type]); if (likely(ops && ops->xmit)) ret = ops->xmit(skb); rcu_read_unlock(); if (ret == -EOPNOTSUPP) goto drop; return ret; drop: kfree_skb(skb); return ret; } EXPORT_SYMBOL_GPL(lwtunnel_xmit); int lwtunnel_input(struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); const struct lwtunnel_encap_ops *ops; struct lwtunnel_state *lwtstate; int ret = -EINVAL; if (!dst) goto drop; lwtstate = dst->lwtstate; if (lwtstate->type == LWTUNNEL_ENCAP_NONE || lwtstate->type > LWTUNNEL_ENCAP_MAX) return 0; ret = -EOPNOTSUPP; rcu_read_lock(); ops = rcu_dereference(lwtun_encaps[lwtstate->type]); if (likely(ops && ops->input)) ret = ops->input(skb); rcu_read_unlock(); if (ret == -EOPNOTSUPP) goto drop; return ret; drop: kfree_skb(skb); return ret; } EXPORT_SYMBOL_GPL(lwtunnel_input);
2 2 2 1 1 1 24 22 2 22 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 // SPDX-License-Identifier: GPL-2.0-or-later /* * Host Side support for RNDIS Networking Links * Copyright (C) 2005 by David Brownell */ #include <linux/module.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/workqueue.h> #include <linux/slab.h> #include <linux/mii.h> #include <linux/usb.h> #include <linux/usb/cdc.h> #include <linux/usb/usbnet.h> #include <linux/usb/rndis_host.h> /* * RNDIS is NDIS remoted over USB. It's a MSFT variant of CDC ACM ... of * course ACM was intended for modems, not Ethernet links! USB's standard * for Ethernet links is "CDC Ethernet", which is significantly simpler. * * NOTE that Microsoft's "RNDIS 1.0" specification is incomplete. Issues * include: * - Power management in particular relies on information that's scattered * through other documentation, and which is incomplete or incorrect even * there. * - There are various undocumented protocol requirements, such as the * need to send unused garbage in control-OUT messages. * - In some cases, MS-Windows will emit undocumented requests; this * matters more to peripheral implementations than host ones. * * Moreover there's a no-open-specs variant of RNDIS called "ActiveSync". * * For these reasons and others, ** USE OF RNDIS IS STRONGLY DISCOURAGED ** in * favor of such non-proprietary alternatives as CDC Ethernet or the newer (and * currently rare) "Ethernet Emulation Model" (EEM). */ /* * RNDIS notifications from device: command completion; "reverse" * keepalives; etc */ void rndis_status(struct usbnet *dev, struct urb *urb) { netdev_dbg(dev->net, "rndis status urb, len %d stat %d\n", urb->actual_length, urb->status); // FIXME for keepalives, respond immediately (asynchronously) // if not an RNDIS status, do like cdc_status(dev,urb) does } EXPORT_SYMBOL_GPL(rndis_status); /* * RNDIS indicate messages. */ static void rndis_msg_indicate(struct usbnet *dev, struct rndis_indicate *msg, int buflen) { struct cdc_state *info = (void *)&dev->data; struct device *udev = &info->control->dev; if (dev->driver_info->indication) { dev->driver_info->indication(dev, msg, buflen); } else { u32 status = le32_to_cpu(msg->status); switch (status) { case RNDIS_STATUS_MEDIA_CONNECT: dev_info(udev, "rndis media connect\n"); break; case RNDIS_STATUS_MEDIA_DISCONNECT: dev_info(udev, "rndis media disconnect\n"); break; default: dev_info(udev, "rndis indication: 0x%08x\n", status); } } } /* * RPC done RNDIS-style. Caller guarantees: * - message is properly byteswapped * - there's no other request pending * - buf can hold up to 1KB response (required by RNDIS spec) * On return, the first few entries are already byteswapped. * * Call context is likely probe(), before interface name is known, * which is why we won't try to use it in the diagnostics. */ int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf, int buflen) { struct cdc_state *info = (void *) &dev->data; struct usb_cdc_notification notification; int master_ifnum; int retval; int partial; unsigned count; u32 xid = 0, msg_len, request_id, msg_type, rsp, status; /* REVISIT when this gets called from contexts other than probe() or * disconnect(): either serialize, or dispatch responses on xid */ msg_type = le32_to_cpu(buf->msg_type); /* Issue the request; xid is unique, don't bother byteswapping it */ if (likely(msg_type != RNDIS_MSG_HALT && msg_type != RNDIS_MSG_RESET)) { xid = dev->xid++; if (!xid) xid = dev->xid++; buf->request_id = (__force __le32) xid; } master_ifnum = info->control->cur_altsetting->desc.bInterfaceNumber; retval = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), USB_CDC_SEND_ENCAPSULATED_COMMAND, USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, master_ifnum, buf, le32_to_cpu(buf->msg_len), RNDIS_CONTROL_TIMEOUT_MS); if (unlikely(retval < 0 || xid == 0)) return retval; /* Some devices don't respond on the control channel until * polled on the status channel, so do that first. */ if (dev->driver_info->data & RNDIS_DRIVER_DATA_POLL_STATUS) { retval = usb_interrupt_msg( dev->udev, usb_rcvintpipe(dev->udev, dev->status->desc.bEndpointAddress), &notification, sizeof(notification), &partial, RNDIS_CONTROL_TIMEOUT_MS); if (unlikely(retval < 0)) return retval; } /* Poll the control channel; the request probably completed immediately */ rsp = le32_to_cpu(buf->msg_type) | RNDIS_MSG_COMPLETION; for (count = 0; count < 10; count++) { memset(buf, 0, CONTROL_BUFFER_SIZE); retval = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), USB_CDC_GET_ENCAPSULATED_RESPONSE, USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, master_ifnum, buf, buflen, RNDIS_CONTROL_TIMEOUT_MS); if (likely(retval >= 8)) { msg_type = le32_to_cpu(buf->msg_type); msg_len = le32_to_cpu(buf->msg_len); status = le32_to_cpu(buf->status); request_id = (__force u32) buf->request_id; if (likely(msg_type == rsp)) { if (likely(request_id == xid)) { if (unlikely(rsp == RNDIS_MSG_RESET_C)) return 0; if (likely(RNDIS_STATUS_SUCCESS == status)) return 0; dev_dbg(&info->control->dev, "rndis reply status %08x\n", status); return -EL3RST; } dev_dbg(&info->control->dev, "rndis reply id %d expected %d\n", request_id, xid); /* then likely retry */ } else switch (msg_type) { case RNDIS_MSG_INDICATE: /* fault/event */ rndis_msg_indicate(dev, (void *)buf, buflen); break; case RNDIS_MSG_KEEPALIVE: { /* ping */ struct rndis_keepalive_c *msg = (void *)buf; msg->msg_type = cpu_to_le32(RNDIS_MSG_KEEPALIVE_C); msg->msg_len = cpu_to_le32(sizeof *msg); msg->status = cpu_to_le32(RNDIS_STATUS_SUCCESS); retval = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), USB_CDC_SEND_ENCAPSULATED_COMMAND, USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, master_ifnum, msg, sizeof *msg, RNDIS_CONTROL_TIMEOUT_MS); if (unlikely(retval < 0)) dev_dbg(&info->control->dev, "rndis keepalive err %d\n", retval); } break; default: dev_dbg(&info->control->dev, "unexpected rndis msg %08x len %d\n", le32_to_cpu(buf->msg_type), msg_len); } } else { /* device probably issued a protocol stall; ignore */ dev_dbg(&info->control->dev, "rndis response error, code %d\n", retval); } msleep(40); } dev_dbg(&info->control->dev, "rndis response timeout\n"); return -ETIMEDOUT; } EXPORT_SYMBOL_GPL(rndis_command); /* * rndis_query: * * Performs a query for @oid along with 0 or more bytes of payload as * specified by @in_len. If @reply_len is not set to -1 then the reply * length is checked against this value, resulting in an error if it * doesn't match. * * NOTE: Adding a payload exactly or greater than the size of the expected * response payload is an evident requirement MSFT added for ActiveSync. * * The only exception is for OIDs that return a variably sized response, * in which case no payload should be added. This undocumented (and * nonsensical!) issue was found by sniffing protocol requests from the * ActiveSync 4.1 Windows driver. */ static int rndis_query(struct usbnet *dev, struct usb_interface *intf, void *buf, u32 oid, u32 in_len, void **reply, int *reply_len) { int retval; union { void *buf; struct rndis_msg_hdr *header; struct rndis_query *get; struct rndis_query_c *get_c; } u; u32 off, len; u.buf = buf; memset(u.get, 0, sizeof *u.get + in_len); u.get->msg_type = cpu_to_le32(RNDIS_MSG_QUERY); u.get->msg_len = cpu_to_le32(sizeof *u.get + in_len); u.get->oid = cpu_to_le32(oid); u.get->len = cpu_to_le32(in_len); u.get->offset = cpu_to_le32(20); retval = rndis_command(dev, u.header, CONTROL_BUFFER_SIZE); if (unlikely(retval < 0)) { dev_err(&intf->dev, "RNDIS_MSG_QUERY(0x%08x) failed, %d\n", oid, retval); return retval; } off = le32_to_cpu(u.get_c->offset); len = le32_to_cpu(u.get_c->len); if (unlikely((off > CONTROL_BUFFER_SIZE - 8) || (len > CONTROL_BUFFER_SIZE - 8 - off))) goto response_error; if (*reply_len != -1 && len != *reply_len) goto response_error; *reply = (unsigned char *) &u.get_c->request_id + off; *reply_len = len; return retval; response_error: dev_err(&intf->dev, "RNDIS_MSG_QUERY(0x%08x) " "invalid response - off %d len %d\n", oid, off, len); return -EDOM; } /* same as usbnet_netdev_ops but MTU change not allowed */ static const struct net_device_ops rndis_netdev_ops = { .ndo_open = usbnet_open, .ndo_stop = usbnet_stop, .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, .ndo_get_stats64 = dev_get_tstats64, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; int generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags) { int retval; struct net_device *net = dev->net; struct cdc_state *info = (void *) &dev->data; union { void *buf; struct rndis_msg_hdr *header; struct rndis_init *init; struct rndis_init_c *init_c; struct rndis_query *get; struct rndis_query_c *get_c; struct rndis_set *set; struct rndis_set_c *set_c; struct rndis_halt *halt; } u; u32 tmp; __le32 phym_unspec, *phym; int reply_len; unsigned char *bp; /* we can't rely on i/o from stack working, or stack allocation */ u.buf = kmalloc(CONTROL_BUFFER_SIZE, GFP_KERNEL); if (!u.buf) return -ENOMEM; retval = usbnet_generic_cdc_bind(dev, intf); if (retval < 0) goto fail; u.init->msg_type = cpu_to_le32(RNDIS_MSG_INIT); u.init->msg_len = cpu_to_le32(sizeof *u.init); u.init->major_version = cpu_to_le32(1); u.init->minor_version = cpu_to_le32(0); /* max transfer (in spec) is 0x4000 at full speed, but for * TX we'll stick to one Ethernet packet plus RNDIS framing. * For RX we handle drivers that zero-pad to end-of-packet. * Don't let userspace change these settings. * * NOTE: there still seems to be weirdness here, as if we need * to do some more things to make sure WinCE targets accept this. * They default to jumbograms of 8KB or 16KB, which is absurd * for such low data rates and which is also more than Linux * can usually expect to allocate for SKB data... */ net->hard_header_len += sizeof (struct rndis_data_hdr); dev->hard_mtu = net->mtu + net->hard_header_len; dev->maxpacket = usb_maxpacket(dev->udev, dev->out); if (dev->maxpacket == 0) { netif_dbg(dev, probe, dev->net, "dev->maxpacket can't be 0\n"); retval = -EINVAL; goto fail_and_release; } dev->rx_urb_size = dev->hard_mtu + (dev->maxpacket + 1); dev->rx_urb_size &= ~(dev->maxpacket - 1); u.init->max_transfer_size = cpu_to_le32(dev->rx_urb_size); net->netdev_ops = &rndis_netdev_ops; retval = rndis_command(dev, u.header, CONTROL_BUFFER_SIZE); if (unlikely(retval < 0)) { /* it might not even be an RNDIS device!! */ dev_err(&intf->dev, "RNDIS init failed, %d\n", retval); goto fail_and_release; } tmp = le32_to_cpu(u.init_c->max_transfer_size); if (tmp < dev->hard_mtu) { if (tmp <= net->hard_header_len) { dev_err(&intf->dev, "dev can't take %u byte packets (max %u)\n", dev->hard_mtu, tmp); retval = -EINVAL; goto halt_fail_and_release; } dev_warn(&intf->dev, "dev can't take %u byte packets (max %u), " "adjusting MTU to %u\n", dev->hard_mtu, tmp, tmp - net->hard_header_len); dev->hard_mtu = tmp; net->mtu = dev->hard_mtu - net->hard_header_len; } /* REVISIT: peripheral "alignment" request is ignored ... */ dev_dbg(&intf->dev, "hard mtu %u (%u from dev), rx buflen %zu, align %d\n", dev->hard_mtu, tmp, dev->rx_urb_size, 1 << le32_to_cpu(u.init_c->packet_alignment)); /* module has some device initialization code needs to be done right * after RNDIS_INIT */ if (dev->driver_info->early_init && dev->driver_info->early_init(dev) != 0) goto halt_fail_and_release; /* Check physical medium */ phym = NULL; reply_len = sizeof *phym; retval = rndis_query(dev, intf, u.buf, RNDIS_OID_GEN_PHYSICAL_MEDIUM, reply_len, (void **)&phym, &reply_len); if (retval != 0 || !phym) { /* OID is optional so don't fail here. */ phym_unspec = cpu_to_le32(RNDIS_PHYSICAL_MEDIUM_UNSPECIFIED); phym = &phym_unspec; } if ((flags & FLAG_RNDIS_PHYM_WIRELESS) && le32_to_cpup(phym) != RNDIS_PHYSICAL_MEDIUM_WIRELESS_LAN) { netif_dbg(dev, probe, dev->net, "driver requires wireless physical medium, but device is not\n"); retval = -ENODEV; goto halt_fail_and_release; } if ((flags & FLAG_RNDIS_PHYM_NOT_WIRELESS) && le32_to_cpup(phym) == RNDIS_PHYSICAL_MEDIUM_WIRELESS_LAN) { netif_dbg(dev, probe, dev->net, "driver requires non-wireless physical medium, but device is wireless.\n"); retval = -ENODEV; goto halt_fail_and_release; } /* Get designated host ethernet address */ reply_len = ETH_ALEN; retval = rndis_query(dev, intf, u.buf, RNDIS_OID_802_3_PERMANENT_ADDRESS, 48, (void **) &bp, &reply_len); if (unlikely(retval< 0)) { dev_err(&intf->dev, "rndis get ethaddr, %d\n", retval); goto halt_fail_and_release; } eth_hw_addr_set(net, bp); /* set a nonzero filter to enable data transfers */ memset(u.set, 0, sizeof *u.set); u.set->msg_type = cpu_to_le32(RNDIS_MSG_SET); u.set->msg_len = cpu_to_le32(4 + sizeof *u.set); u.set->oid = cpu_to_le32(RNDIS_OID_GEN_CURRENT_PACKET_FILTER); u.set->len = cpu_to_le32(4); u.set->offset = cpu_to_le32((sizeof *u.set) - 8); *(__le32 *)(u.buf + sizeof *u.set) = cpu_to_le32(RNDIS_DEFAULT_FILTER); retval = rndis_command(dev, u.header, CONTROL_BUFFER_SIZE); if (unlikely(retval < 0)) { dev_err(&intf->dev, "rndis set packet filter, %d\n", retval); goto halt_fail_and_release; } retval = 0; kfree(u.buf); return retval; halt_fail_and_release: memset(u.halt, 0, sizeof *u.halt); u.halt->msg_type = cpu_to_le32(RNDIS_MSG_HALT); u.halt->msg_len = cpu_to_le32(sizeof *u.halt); (void) rndis_command(dev, (void *)u.halt, CONTROL_BUFFER_SIZE); fail_and_release: usb_set_intfdata(info->data, NULL); usb_driver_release_interface(driver_of(intf), info->data); info->data = NULL; fail: kfree(u.buf); return retval; } EXPORT_SYMBOL_GPL(generic_rndis_bind); static int rndis_bind(struct usbnet *dev, struct usb_interface *intf) { return generic_rndis_bind(dev, intf, FLAG_RNDIS_PHYM_NOT_WIRELESS); } static int zte_rndis_bind(struct usbnet *dev, struct usb_interface *intf) { int status = rndis_bind(dev, intf); if (!status && (dev->net->dev_addr[0] & 0x02)) eth_hw_addr_random(dev->net); return status; } void rndis_unbind(struct usbnet *dev, struct usb_interface *intf) { struct rndis_halt *halt; /* try to clear any rndis state/activity (no i/o from stack!) */ halt = kzalloc(CONTROL_BUFFER_SIZE, GFP_KERNEL); if (halt) { halt->msg_type = cpu_to_le32(RNDIS_MSG_HALT); halt->msg_len = cpu_to_le32(sizeof *halt); (void) rndis_command(dev, (void *)halt, CONTROL_BUFFER_SIZE); kfree(halt); } usbnet_cdc_unbind(dev, intf); } EXPORT_SYMBOL_GPL(rndis_unbind); /* * DATA -- host must not write zlps */ int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb) { bool dst_mac_fixup; /* This check is no longer done by usbnet */ if (skb->len < dev->net->hard_header_len) return 0; dst_mac_fixup = !!(dev->driver_info->data & RNDIS_DRIVER_DATA_DST_MAC_FIXUP); /* peripheral may have batched packets to us... */ while (likely(skb->len)) { struct rndis_data_hdr *hdr = (void *)skb->data; struct sk_buff *skb2; u32 msg_type, msg_len, data_offset, data_len; msg_type = le32_to_cpu(hdr->msg_type); msg_len = le32_to_cpu(hdr->msg_len); data_offset = le32_to_cpu(hdr->data_offset); data_len = le32_to_cpu(hdr->data_len); /* don't choke if we see oob, per-packet data, etc */ if (unlikely(msg_type != RNDIS_MSG_PACKET || skb->len < msg_len || (data_offset + data_len + 8) > msg_len)) { dev->net->stats.rx_frame_errors++; netdev_dbg(dev->net, "bad rndis message %d/%d/%d/%d, len %d\n", le32_to_cpu(hdr->msg_type), msg_len, data_offset, data_len, skb->len); return 0; } skb_pull(skb, 8 + data_offset); /* at most one packet left? */ if (likely((data_len - skb->len) <= sizeof *hdr)) { skb_trim(skb, data_len); break; } /* try to return all the packets in the batch */ skb2 = skb_clone(skb, GFP_ATOMIC); if (unlikely(!skb2)) break; skb_pull(skb, msg_len - sizeof *hdr); skb_trim(skb2, data_len); if (unlikely(dst_mac_fixup)) usbnet_cdc_zte_rx_fixup(dev, skb2); usbnet_skb_return(dev, skb2); } /* caller will usbnet_skb_return the remaining packet */ if (unlikely(dst_mac_fixup)) usbnet_cdc_zte_rx_fixup(dev, skb); return 1; } EXPORT_SYMBOL_GPL(rndis_rx_fixup); struct sk_buff * rndis_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) { struct rndis_data_hdr *hdr; struct sk_buff *skb2; unsigned len = skb->len; if (likely(!skb_cloned(skb))) { int room = skb_headroom(skb); /* enough head room as-is? */ if (unlikely((sizeof *hdr) <= room)) goto fill; /* enough room, but needs to be readjusted? */ room += skb_tailroom(skb); if (likely((sizeof *hdr) <= room)) { skb->data = memmove(skb->head + sizeof *hdr, skb->data, len); skb_set_tail_pointer(skb, len); goto fill; } } /* create a new skb, with the correct size (and tailpad) */ skb2 = skb_copy_expand(skb, sizeof *hdr, 1, flags); dev_kfree_skb_any(skb); if (unlikely(!skb2)) return skb2; skb = skb2; /* fill out the RNDIS header. we won't bother trying to batch * packets; Linux minimizes wasted bandwidth through tx queues. */ fill: hdr = __skb_push(skb, sizeof *hdr); memset(hdr, 0, sizeof *hdr); hdr->msg_type = cpu_to_le32(RNDIS_MSG_PACKET); hdr->msg_len = cpu_to_le32(skb->len); hdr->data_offset = cpu_to_le32(sizeof(*hdr) - 8); hdr->data_len = cpu_to_le32(len); /* FIXME make the last packet always be short ... */ return skb; } EXPORT_SYMBOL_GPL(rndis_tx_fixup); static const struct driver_info rndis_info = { .description = "RNDIS device", .flags = FLAG_ETHER | FLAG_POINTTOPOINT | FLAG_FRAMING_RN | FLAG_NO_SETINT, .bind = rndis_bind, .unbind = rndis_unbind, .status = rndis_status, .rx_fixup = rndis_rx_fixup, .tx_fixup = rndis_tx_fixup, }; static const struct driver_info rndis_poll_status_info = { .description = "RNDIS device (poll status before control)", .flags = FLAG_ETHER | FLAG_POINTTOPOINT | FLAG_FRAMING_RN | FLAG_NO_SETINT, .data = RNDIS_DRIVER_DATA_POLL_STATUS, .bind = rndis_bind, .unbind = rndis_unbind, .status = rndis_status, .rx_fixup = rndis_rx_fixup, .tx_fixup = rndis_tx_fixup, }; static const struct driver_info zte_rndis_info = { .description = "ZTE RNDIS device", .flags = FLAG_ETHER | FLAG_POINTTOPOINT | FLAG_FRAMING_RN | FLAG_NO_SETINT, .data = RNDIS_DRIVER_DATA_DST_MAC_FIXUP, .bind = zte_rndis_bind, .unbind = rndis_unbind, .status = rndis_status, .rx_fixup = rndis_rx_fixup, .tx_fixup = rndis_tx_fixup, }; /*-------------------------------------------------------------------------*/ static const struct usb_device_id products [] = { { /* 2Wire HomePortal 1000SW */ USB_DEVICE_AND_INTERFACE_INFO(0x1630, 0x0042, USB_CLASS_COMM, 2 /* ACM */, 0x0ff), .driver_info = (unsigned long) &rndis_poll_status_info, }, { /* Hytera Communications DMR radios' "Radio to PC Network" */ USB_VENDOR_AND_INTERFACE_INFO(0x238b, USB_CLASS_COMM, 2 /* ACM */, 0x0ff), .driver_info = (unsigned long)&rndis_info, }, { /* ZTE WWAN modules */ USB_VENDOR_AND_INTERFACE_INFO(0x19d2, USB_CLASS_WIRELESS_CONTROLLER, 1, 3), .driver_info = (unsigned long)&zte_rndis_info, }, { /* ZTE WWAN modules, ACM flavour */ USB_VENDOR_AND_INTERFACE_INFO(0x19d2, USB_CLASS_COMM, 2 /* ACM */, 0x0ff), .driver_info = (unsigned long)&zte_rndis_info, }, { /* RNDIS is MSFT's un-official variant of CDC ACM */ USB_INTERFACE_INFO(USB_CLASS_COMM, 2 /* ACM */, 0x0ff), .driver_info = (unsigned long) &rndis_info, }, { /* "ActiveSync" is an undocumented variant of RNDIS, used in WM5 */ USB_INTERFACE_INFO(USB_CLASS_MISC, 1, 1), .driver_info = (unsigned long) &rndis_poll_status_info, }, { /* RNDIS for tethering */ USB_INTERFACE_INFO(USB_CLASS_WIRELESS_CONTROLLER, 1, 3), .driver_info = (unsigned long) &rndis_info, }, { /* Novatel Verizon USB730L */ USB_INTERFACE_INFO(USB_CLASS_MISC, 4, 1), .driver_info = (unsigned long) &rndis_info, }, { }, // END }; MODULE_DEVICE_TABLE(usb, products); static struct usb_driver rndis_driver = { .name = "rndis_host", .id_table = products, .probe = usbnet_probe, .disconnect = usbnet_disconnect, .suspend = usbnet_suspend, .resume = usbnet_resume, .disable_hub_initiated_lpm = 1, }; module_usb_driver(rndis_driver); MODULE_AUTHOR("David Brownell"); MODULE_DESCRIPTION("USB Host side RNDIS driver"); MODULE_LICENSE("GPL");
9 538 541 472 260 425 74 9 8 1 276 276 274 76 909 926 926 22 909 908 909 909 907 644 326 899 9 56 905 908 908 908 346 345 331 23 9 329 1108 1108 1109 1120 3 2 416 951 943 947 944 4 1111 1111 1110 8 1 3 1 13 13 9 2 3 1 2 2 6 4 2 6 6 4 1 1 5 4 5 4 4 411 413 410 412 412 302 278 303 304 219 90 304 304 303 313 3 308 2 43 264 5 1 302 2 6 298 3 302 3 15 13 13 10 11 5 12 5 8 11 11 11 11 11 11 11 11 5 8 11 497 33 722 312 307 709 725 708 499 497 300 413 500 189 410 501 2 493 494 493 402 728 724 728 226 21 707 433 727 727 192 78 593 77 230 228 230 7 722 2 1 50 50 1 456 664 668 598 553 454 256 70 291 16 6 15 11 133 96 73 91 22 91 12 4 10 12 133 328 1 1 1 3 4 4 3 2 3 7 12 221 210 193 1 1 7 12 1 9 281 10 31 18 2 1 14 13 275 275 18 17 241 2 2 5 5 5 1 1 1 1 1 1 19 6 3 8 12 12 3 1 1 7 6 1 1 1 8 12 9 1 1 15 2 3 11 6 6 6 1 41 41 3 1 1 31 6 3 23 6 21 26 24 2 26 9 1 24 3 5 2 9 9 11 11 11 4 25 1 1 3 21 19 1 18 56 3 55 55 54 27 27 27 23 6 21 21 21 21 10 10 21 31 31 226 227 226 227 227 12 12 12 1 230 12 12 215 5 264 240 32 1 17 213 11 20 7 4 9 20 6 9 5 316 20 20 20 31 265 10 9 1 9 10 3 354 13 341 10 1826 1810 194 863 45 97 42 592 22 16 46 4 90 277 267 114 34 23 286 99 35 292 281 18 272 22 5 24 1 24 44 400 44 42 44 735 736 150 391 437 619 621 619 418 418 418 619 392 437 619 621 418 620 418 417 417 416 416 418 84 404 389 392 99 99 98 254 305 306 70 227 76 99 3 3 3 3 3 3 3 3 3 3 3 3 3 56 55 55 55 55 55 8 4 2 2 4 4 55 48 15 10 1 1 8 7 3 2 3 1 4 15 15 15 4 10 11 2 1 12 2 11 3 4 10 2 7 4 6 4 8 2 1 4 10 2 2 46 1 1 44 33 9 39 2 42 33 1 2 6 5 2 9 2 33 20 1 15 16 88 450 40 489 3 489 488 485 1 4 7 484 486 3 488 4 1259 1259 4 2 1262 2 2 1259 18 18 18 18 18 14 8 3 6 8 4 4 2 1 1 3 3 5 9 3 10 2 3 27 9 18 2 2 2 3 11 8 11 27 21 4 2 1 12 3 3 3 1 3 21 22 15 11 1 11 4 5 8 2 2 9 11 479 1716 1712 1717 1713 1725 13 29 55 15 32 1717 8 5 6 1716 1792 1895 702 1703 17 2 1 1 2 3 9 13 12 4 1 2 25 25 2 15 1 7 19 2 17 11 4 326 4 2 25 306 2 136 199 324 4 324 5 1 1 4 3 11 5 6 6 3 6 320 321 8 8 477 30 455 479 30 55 55 55 429 429 428 2 427 428 89 931 932 933 931 56 56 933 1 932 1 933 413 56 56 56 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995 5996 5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046 6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 6057 6058 6059 6060 6061 6062 6063 6064 6065 6066 6067 6068 6069 6070 6071 6072 6073 6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090 6091 6092 6093 6094 6095 6096 6097 6098 6099 6100 6101 6102 6103 6104 6105 6106 6107 6108 6109 6110 6111 6112 6113 6114 6115 6116 6117 6118 6119 6120 6121 6122 6123 6124 6125 6126 6127 6128 6129 6130 6131 6132 6133 6134 6135 6136 6137 6138 6139 6140 6141 6142 6143 6144 6145 6146 6147 6148 6149 6150 6151 6152 6153 6154 6155 6156 6157 6158 6159 6160 6161 6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180 6181 6182 6183 6184 6185 6186 6187 6188 6189 6190 6191 6192 6193 6194 6195 6196 6197 6198 6199 6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227 6228 6229 6230 6231 6232 6233 6234 6235 6236 6237 6238 6239 6240 6241 6242 6243 6244 6245 6246 6247 6248 6249 6250 6251 6252 6253 6254 6255 6256 6257 6258 6259 6260 6261 6262 6263 6264 6265 6266 6267 6268 6269 6270 6271 6272 6273 6274 6275 6276 6277 6278 6279 6280 6281 6282 6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317 6318 6319 6320 6321 6322 6323 6324 6325 6326 6327 6328 6329 6330 6331 6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346 6347 6348 6349 6350 6351 6352 6353 6354 6355 6356 6357 6358 6359 6360 6361 6362 6363 6364 6365 6366 6367 6368 6369 6370 6371 6372 6373 6374 6375 6376 6377 6378 6379 6380 6381 6382 6383 6384 6385 6386 6387 6388 6389 6390 6391 6392 6393 6394 6395 6396 6397 6398 6399 6400 6401 6402 6403 6404 6405 6406 6407 6408 6409 6410 6411 6412 6413 6414 6415 6416 6417 6418 6419 6420 6421 6422 6423 6424 6425 6426 6427 6428 6429 6430 6431 6432 6433 6434 6435 6436 6437 6438 6439 6440 6441 6442 6443 6444 6445 6446 6447 6448 6449 6450 6451 6452 6453 6454 6455 6456 6457 6458 6459 6460 6461 6462 6463 6464 6465 6466 6467 6468 6469 6470 6471 6472 6473 6474 6475 6476 6477 6478 6479 6480 6481 6482 6483 6484 6485 6486 6487 6488 6489 6490 6491 6492 6493 6494 6495 6496 6497 6498 6499 6500 6501 6502 6503 6504 6505 6506 6507 6508 6509 6510 6511 6512 6513 6514 6515 6516 6517 6518 6519 6520 6521 6522 6523 6524 6525 6526 6527 6528 6529 6530 6531 6532 6533 6534 6535 6536 6537 6538 6539 6540 6541 6542 6543 6544 6545 6546 6547 6548 6549 6550 6551 6552 6553 6554 6555 6556 6557 6558 6559 6560 6561 6562 6563 6564 6565 6566 6567 6568 6569 6570 6571 6572 6573 6574 6575 6576 6577 6578 6579 6580 6581 6582 6583 6584 6585 6586 6587 6588 6589 6590 6591 6592 6593 6594 6595 6596 6597 6598 6599 6600 6601 6602 6603 6604 6605 6606 6607 6608 6609 6610 6611 6612 6613 6614 6615 6616 6617 6618 6619 6620 6621 6622 6623 6624 6625 6626 6627 6628 6629 6630 6631 6632 6633 6634 6635 6636 6637 6638 6639 6640 6641 6642 6643 6644 6645 6646 6647 6648 6649 6650 6651 6652 6653 6654 6655 6656 6657 6658 6659 6660 6661 6662 6663 6664 6665 6666 6667 6668 6669 6670 6671 6672 6673 6674 6675 6676 6677 6678 6679 6680 6681 6682 6683 6684 6685 6686 6687 6688 6689 6690 6691 6692 6693 6694 6695 6696 6697 6698 6699 6700 6701 6702 6703 6704 6705 6706 6707 6708 6709 6710 6711 6712 6713 6714 6715 6716 6717 6718 6719 6720 6721 6722 6723 6724 6725 6726 6727 6728 6729 6730 6731 6732 6733 6734 6735 6736 6737 6738 6739 6740 6741 6742 6743 6744 6745 6746 6747 6748 6749 6750 6751 6752 6753 6754 6755 6756 6757 6758 6759 6760 6761 6762 6763 6764 6765 6766 6767 6768 6769 6770 6771 6772 6773 6774 6775 6776 6777 6778 6779 6780 6781 6782 6783 6784 6785 6786 6787 6788 6789 6790 6791 6792 6793 6794 6795 6796 6797 6798 6799 6800 6801 6802 6803 6804 6805 6806 6807 6808 6809 6810 6811 6812 6813 6814 6815 6816 6817 6818 6819 6820 6821 6822 6823 6824 6825 6826 6827 6828 6829 6830 6831 6832 6833 6834 6835 6836 6837 6838 6839 6840 6841 6842 6843 6844 6845 6846 6847 6848 6849 6850 6851 6852 6853 6854 6855 6856 6857 6858 6859 6860 6861 6862 6863 6864 6865 6866 6867 6868 6869 6870 6871 6872 6873 6874 6875 6876 6877 6878 6879 6880 6881 6882 6883 6884 6885 6886 6887 6888 6889 6890 6891 6892 6893 6894 6895 6896 6897 6898 6899 6900 6901 6902 6903 6904 6905 6906 6907 6908 6909 6910 6911 6912 6913 6914 6915 6916 6917 6918 6919 6920 6921 6922 6923 6924 6925 6926 6927 6928 6929 6930 6931 6932 6933 6934 6935 6936 6937 6938 6939 6940 6941 6942 6943 6944 6945 6946 6947 6948 6949 6950 6951 6952 6953 6954 6955 6956 6957 6958 6959 6960 6961 6962 6963 6964 6965 6966 6967 6968 6969 6970 6971 6972 6973 6974 6975 6976 6977 6978 6979 6980 6981 6982 6983 6984 6985 6986 6987 6988 6989 6990 6991 6992 6993 6994 6995 6996 6997 6998 6999 7000 7001 7002 7003 7004 7005 7006 7007 7008 7009 7010 7011 7012 7013 7014 7015 7016 7017 7018 7019 7020 7021 7022 7023 7024 7025 7026 7027 7028 7029 7030 7031 7032 7033 7034 7035 7036 7037 7038 7039 7040 7041 7042 7043 7044 7045 7046 7047 7048 7049 7050 7051 7052 7053 7054 7055 7056 7057 7058 7059 7060 7061 7062 7063 7064 7065 7066 7067 7068 7069 7070 7071 7072 7073 7074 7075 7076 7077 7078 7079 7080 7081 7082 7083 7084 7085 7086 7087 7088 7089 7090 7091 7092 7093 7094 7095 7096 7097 7098 7099 7100 7101 7102 7103 7104 7105 7106 7107 7108 7109 7110 7111 7112 7113 7114 7115 7116 7117 7118 7119 7120 7121 7122 7123 7124 7125 7126 7127 7128 7129 7130 7131 7132 7133 7134 7135 7136 7137 7138 7139 7140 7141 7142 7143 7144 7145 7146 7147 7148 7149 7150 7151 7152 7153 7154 7155 7156 7157 7158 7159 7160 7161 7162 7163 7164 7165 7166 7167 7168 7169 7170 7171 7172 7173 7174 7175 7176 7177 7178 7179 7180 7181 7182 7183 7184 7185 7186 7187 7188 7189 7190 7191 7192 7193 7194 7195 7196 7197 7198 7199 7200 7201 7202 7203 7204 7205 7206 7207 7208 7209 7210 7211 7212 7213 7214 7215 7216 7217 7218 7219 7220 7221 7222 7223 7224 7225 7226 7227 7228 7229 7230 7231 7232 7233 7234 7235 7236 7237 7238 7239 7240 7241 7242 7243 7244 7245 7246 7247 7248 7249 7250 7251 7252 7253 7254 7255 7256 7257 7258 7259 7260 7261 7262 7263 7264 7265 7266 7267 7268 7269 7270 7271 7272 7273 7274 7275 7276 7277 7278 7279 7280 7281 7282 7283 7284 7285 7286 7287 7288 7289 7290 7291 7292 7293 7294 7295 7296 7297 7298 7299 7300 7301 7302 7303 7304 7305 7306 7307 7308 7309 7310 7311 7312 7313 7314 7315 7316 7317 7318 7319 7320 7321 7322 7323 7324 7325 7326 7327 7328 7329 7330 7331 7332 7333 7334 7335 7336 7337 7338 7339 7340 7341 7342 7343 7344 7345 7346 7347 7348 7349 7350 7351 7352 7353 7354 7355 7356 7357 7358 7359 7360 7361 7362 7363 7364 7365 7366 7367 7368 7369 7370 7371 7372 7373 7374 7375 7376 7377 7378 7379 7380 7381 7382 7383 7384 7385 7386 7387 7388 7389 7390 7391 7392 7393 7394 7395 7396 7397 7398 7399 7400 7401 7402 7403 7404 7405 7406 7407 7408 7409 7410 7411 7412 7413 7414 7415 7416 7417 7418 7419 7420 7421 7422 7423 7424 7425 7426 7427 7428 7429 7430 7431 7432 7433 7434 7435 7436 7437 7438 7439 7440 7441 7442 7443 7444 7445 7446 7447 7448 7449 7450 7451 7452 7453 7454 7455 7456 7457 7458 7459 7460 7461 7462 7463 7464 7465 7466 7467 7468 7469 7470 7471 7472 7473 7474 7475 7476 7477 7478 7479 7480 7481 7482 7483 7484 7485 7486 7487 7488 7489 7490 7491 7492 7493 7494 7495 7496 7497 7498 7499 7500 7501 7502 7503 7504 7505 7506 7507 7508 7509 7510 7511 7512 7513 7514 7515 7516 7517 7518 // SPDX-License-Identifier: GPL-2.0-or-later /* * IPv6 Address [auto]configuration * Linux INET6 implementation * * Authors: * Pedro Roque <roque@di.fc.ul.pt> * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> */ /* * Changes: * * Janos Farkas : delete timer on ifdown * <chexum@bankinf.banki.hu> * Andi Kleen : kill double kfree on module * unload. * Maciej W. Rozycki : FDDI support * sekiya@USAGI : Don't send too many RS * packets. * yoshfuji@USAGI : Fixed interval between DAD * packets. * YOSHIFUJI Hideaki @USAGI : improved accuracy of * address validation timer. * YOSHIFUJI Hideaki @USAGI : Privacy Extensions (RFC3041) * support. * Yuji SEKIYA @USAGI : Don't assign a same IPv6 * address on a same interface. * YOSHIFUJI Hideaki @USAGI : ARCnet support * YOSHIFUJI Hideaki @USAGI : convert /proc/net/if_inet6 to * seq_file. * YOSHIFUJI Hideaki @USAGI : improved source address * selection; consider scope, * status etc. */ #define pr_fmt(fmt) "IPv6: " fmt #include <linux/errno.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/sched/signal.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/inet.h> #include <linux/in6.h> #include <linux/netdevice.h> #include <linux/if_addr.h> #include <linux/if_arp.h> #include <linux/if_arcnet.h> #include <linux/if_infiniband.h> #include <linux/route.h> #include <linux/inetdevice.h> #include <linux/init.h> #include <linux/slab.h> #ifdef CONFIG_SYSCTL #include <linux/sysctl.h> #endif #include <linux/capability.h> #include <linux/delay.h> #include <linux/notifier.h> #include <linux/string.h> #include <linux/hash.h> #include <net/ip_tunnels.h> #include <net/net_namespace.h> #include <net/sock.h> #include <net/snmp.h> #include <net/6lowpan.h> #include <net/firewire.h> #include <net/ipv6.h> #include <net/protocol.h> #include <net/ndisc.h> #include <net/ip6_route.h> #include <net/addrconf.h> #include <net/tcp.h> #include <net/ip.h> #include <net/netlink.h> #include <net/pkt_sched.h> #include <net/l3mdev.h> #include <linux/if_tunnel.h> #include <linux/rtnetlink.h> #include <linux/netconf.h> #include <linux/random.h> #include <linux/uaccess.h> #include <linux/unaligned.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/export.h> #include <linux/ioam6.h> #define IPV6_MAX_STRLEN \ sizeof("ffff:ffff:ffff:ffff:ffff:ffff:255.255.255.255") static inline u32 cstamp_delta(unsigned long cstamp) { return (cstamp - INITIAL_JIFFIES) * 100UL / HZ; } static inline s32 rfc3315_s14_backoff_init(s32 irt) { /* multiply 'initial retransmission time' by 0.9 .. 1.1 */ u64 tmp = get_random_u32_inclusive(900000, 1100000) * (u64)irt; do_div(tmp, 1000000); return (s32)tmp; } static inline s32 rfc3315_s14_backoff_update(s32 rt, s32 mrt) { /* multiply 'retransmission timeout' by 1.9 .. 2.1 */ u64 tmp = get_random_u32_inclusive(1900000, 2100000) * (u64)rt; do_div(tmp, 1000000); if ((s32)tmp > mrt) { /* multiply 'maximum retransmission time' by 0.9 .. 1.1 */ tmp = get_random_u32_inclusive(900000, 1100000) * (u64)mrt; do_div(tmp, 1000000); } return (s32)tmp; } #ifdef CONFIG_SYSCTL static int addrconf_sysctl_register(struct inet6_dev *idev); static void addrconf_sysctl_unregister(struct inet6_dev *idev); #else static inline int addrconf_sysctl_register(struct inet6_dev *idev) { return 0; } static inline void addrconf_sysctl_unregister(struct inet6_dev *idev) { } #endif static void ipv6_gen_rnd_iid(struct in6_addr *addr); static int ipv6_generate_eui64(u8 *eui, struct net_device *dev); static int ipv6_count_addresses(const struct inet6_dev *idev); static int ipv6_generate_stable_address(struct in6_addr *addr, u8 dad_count, const struct inet6_dev *idev); #define IN6_ADDR_HSIZE_SHIFT 8 #define IN6_ADDR_HSIZE (1 << IN6_ADDR_HSIZE_SHIFT) static void addrconf_verify(struct net *net); static void addrconf_verify_rtnl(struct net *net); static struct workqueue_struct *addrconf_wq; static void addrconf_join_anycast(struct inet6_ifaddr *ifp); static void addrconf_leave_anycast(struct inet6_ifaddr *ifp); static void addrconf_type_change(struct net_device *dev, unsigned long event); static int addrconf_ifdown(struct net_device *dev, bool unregister); static struct fib6_info *addrconf_get_prefix_route(const struct in6_addr *pfx, int plen, const struct net_device *dev, u32 flags, u32 noflags, bool no_gw); static void addrconf_dad_start(struct inet6_ifaddr *ifp); static void addrconf_dad_work(struct work_struct *w); static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id, bool send_na); static void addrconf_dad_run(struct inet6_dev *idev, bool restart); static void addrconf_rs_timer(struct timer_list *t); static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa); static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa); static void inet6_prefix_notify(int event, struct inet6_dev *idev, struct prefix_info *pinfo); static struct ipv6_devconf ipv6_devconf __read_mostly = { .forwarding = 0, .hop_limit = IPV6_DEFAULT_HOPLIMIT, .mtu6 = IPV6_MIN_MTU, .accept_ra = 1, .accept_redirects = 1, .autoconf = 1, .force_mld_version = 0, .mldv1_unsolicited_report_interval = 10 * HZ, .mldv2_unsolicited_report_interval = HZ, .dad_transmits = 1, .rtr_solicits = MAX_RTR_SOLICITATIONS, .rtr_solicit_interval = RTR_SOLICITATION_INTERVAL, .rtr_solicit_max_interval = RTR_SOLICITATION_MAX_INTERVAL, .rtr_solicit_delay = MAX_RTR_SOLICITATION_DELAY, .use_tempaddr = 0, .temp_valid_lft = TEMP_VALID_LIFETIME, .temp_prefered_lft = TEMP_PREFERRED_LIFETIME, .regen_min_advance = REGEN_MIN_ADVANCE, .regen_max_retry = REGEN_MAX_RETRY, .max_desync_factor = MAX_DESYNC_FACTOR, .max_addresses = IPV6_MAX_ADDRESSES, .accept_ra_defrtr = 1, .ra_defrtr_metric = IP6_RT_PRIO_USER, .accept_ra_from_local = 0, .accept_ra_min_hop_limit= 1, .accept_ra_min_lft = 0, .accept_ra_pinfo = 1, #ifdef CONFIG_IPV6_ROUTER_PREF .accept_ra_rtr_pref = 1, .rtr_probe_interval = 60 * HZ, #ifdef CONFIG_IPV6_ROUTE_INFO .accept_ra_rt_info_min_plen = 0, .accept_ra_rt_info_max_plen = 0, #endif #endif .proxy_ndp = 0, .accept_source_route = 0, /* we do not accept RH0 by default. */ .disable_ipv6 = 0, .accept_dad = 0, .suppress_frag_ndisc = 1, .accept_ra_mtu = 1, .stable_secret = { .initialized = false, }, .use_oif_addrs_only = 0, .ignore_routes_with_linkdown = 0, .keep_addr_on_down = 0, .seg6_enabled = 0, #ifdef CONFIG_IPV6_SEG6_HMAC .seg6_require_hmac = 0, #endif .enhanced_dad = 1, .addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64, .disable_policy = 0, .rpl_seg_enabled = 0, .ioam6_enabled = 0, .ioam6_id = IOAM6_DEFAULT_IF_ID, .ioam6_id_wide = IOAM6_DEFAULT_IF_ID_WIDE, .ndisc_evict_nocarrier = 1, .ra_honor_pio_life = 0, .ra_honor_pio_pflag = 0, }; static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = { .forwarding = 0, .hop_limit = IPV6_DEFAULT_HOPLIMIT, .mtu6 = IPV6_MIN_MTU, .accept_ra = 1, .accept_redirects = 1, .autoconf = 1, .force_mld_version = 0, .mldv1_unsolicited_report_interval = 10 * HZ, .mldv2_unsolicited_report_interval = HZ, .dad_transmits = 1, .rtr_solicits = MAX_RTR_SOLICITATIONS, .rtr_solicit_interval = RTR_SOLICITATION_INTERVAL, .rtr_solicit_max_interval = RTR_SOLICITATION_MAX_INTERVAL, .rtr_solicit_delay = MAX_RTR_SOLICITATION_DELAY, .use_tempaddr = 0, .temp_valid_lft = TEMP_VALID_LIFETIME, .temp_prefered_lft = TEMP_PREFERRED_LIFETIME, .regen_min_advance = REGEN_MIN_ADVANCE, .regen_max_retry = REGEN_MAX_RETRY, .max_desync_factor = MAX_DESYNC_FACTOR, .max_addresses = IPV6_MAX_ADDRESSES, .accept_ra_defrtr = 1, .ra_defrtr_metric = IP6_RT_PRIO_USER, .accept_ra_from_local = 0, .accept_ra_min_hop_limit= 1, .accept_ra_min_lft = 0, .accept_ra_pinfo = 1, #ifdef CONFIG_IPV6_ROUTER_PREF .accept_ra_rtr_pref = 1, .rtr_probe_interval = 60 * HZ, #ifdef CONFIG_IPV6_ROUTE_INFO .accept_ra_rt_info_min_plen = 0, .accept_ra_rt_info_max_plen = 0, #endif #endif .proxy_ndp = 0, .accept_source_route = 0, /* we do not accept RH0 by default. */ .disable_ipv6 = 0, .accept_dad = 1, .suppress_frag_ndisc = 1, .accept_ra_mtu = 1, .stable_secret = { .initialized = false, }, .use_oif_addrs_only = 0, .ignore_routes_with_linkdown = 0, .keep_addr_on_down = 0, .seg6_enabled = 0, #ifdef CONFIG_IPV6_SEG6_HMAC .seg6_require_hmac = 0, #endif .enhanced_dad = 1, .addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64, .disable_policy = 0, .rpl_seg_enabled = 0, .ioam6_enabled = 0, .ioam6_id = IOAM6_DEFAULT_IF_ID, .ioam6_id_wide = IOAM6_DEFAULT_IF_ID_WIDE, .ndisc_evict_nocarrier = 1, .ra_honor_pio_life = 0, .ra_honor_pio_pflag = 0, }; /* Check if link is ready: is it up and is a valid qdisc available */ static inline bool addrconf_link_ready(const struct net_device *dev) { return netif_oper_up(dev) && !qdisc_tx_is_noop(dev); } static void addrconf_del_rs_timer(struct inet6_dev *idev) { if (del_timer(&idev->rs_timer)) __in6_dev_put(idev); } static void addrconf_del_dad_work(struct inet6_ifaddr *ifp) { if (cancel_delayed_work(&ifp->dad_work)) __in6_ifa_put(ifp); } static void addrconf_mod_rs_timer(struct inet6_dev *idev, unsigned long when) { if (!mod_timer(&idev->rs_timer, jiffies + when)) in6_dev_hold(idev); } static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp, unsigned long delay) { in6_ifa_hold(ifp); if (mod_delayed_work(addrconf_wq, &ifp->dad_work, delay)) in6_ifa_put(ifp); } static int snmp6_alloc_dev(struct inet6_dev *idev) { int i; idev->stats.ipv6 = alloc_percpu_gfp(struct ipstats_mib, GFP_KERNEL_ACCOUNT); if (!idev->stats.ipv6) goto err_ip; for_each_possible_cpu(i) { struct ipstats_mib *addrconf_stats; addrconf_stats = per_cpu_ptr(idev->stats.ipv6, i); u64_stats_init(&addrconf_stats->syncp); } idev->stats.icmpv6dev = kzalloc(sizeof(struct icmpv6_mib_device), GFP_KERNEL); if (!idev->stats.icmpv6dev) goto err_icmp; idev->stats.icmpv6msgdev = kzalloc(sizeof(struct icmpv6msg_mib_device), GFP_KERNEL_ACCOUNT); if (!idev->stats.icmpv6msgdev) goto err_icmpmsg; return 0; err_icmpmsg: kfree(idev->stats.icmpv6dev); err_icmp: free_percpu(idev->stats.ipv6); err_ip: return -ENOMEM; } static struct inet6_dev *ipv6_add_dev(struct net_device *dev) { struct inet6_dev *ndev; int err = -ENOMEM; ASSERT_RTNL(); if (dev->mtu < IPV6_MIN_MTU && dev != blackhole_netdev) return ERR_PTR(-EINVAL); ndev = kzalloc(sizeof(*ndev), GFP_KERNEL_ACCOUNT); if (!ndev) return ERR_PTR(err); rwlock_init(&ndev->lock); ndev->dev = dev; INIT_LIST_HEAD(&ndev->addr_list); timer_setup(&ndev->rs_timer, addrconf_rs_timer, 0); memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf)); if (ndev->cnf.stable_secret.initialized) ndev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY; ndev->cnf.mtu6 = dev->mtu; ndev->ra_mtu = 0; ndev->nd_parms = neigh_parms_alloc(dev, &nd_tbl); if (!ndev->nd_parms) { kfree(ndev); return ERR_PTR(err); } if (ndev->cnf.forwarding) dev_disable_lro(dev); /* We refer to the device */ netdev_hold(dev, &ndev->dev_tracker, GFP_KERNEL); if (snmp6_alloc_dev(ndev) < 0) { netdev_dbg(dev, "%s: cannot allocate memory for statistics\n", __func__); neigh_parms_release(&nd_tbl, ndev->nd_parms); netdev_put(dev, &ndev->dev_tracker); kfree(ndev); return ERR_PTR(err); } if (dev != blackhole_netdev) { if (snmp6_register_dev(ndev) < 0) { netdev_dbg(dev, "%s: cannot create /proc/net/dev_snmp6/%s\n", __func__, dev->name); goto err_release; } } /* One reference from device. */ refcount_set(&ndev->refcnt, 1); if (dev->flags & (IFF_NOARP | IFF_LOOPBACK)) ndev->cnf.accept_dad = -1; #if IS_ENABLED(CONFIG_IPV6_SIT) if (dev->type == ARPHRD_SIT && (dev->priv_flags & IFF_ISATAP)) { pr_info("%s: Disabled Multicast RS\n", dev->name); ndev->cnf.rtr_solicits = 0; } #endif INIT_LIST_HEAD(&ndev->tempaddr_list); ndev->desync_factor = U32_MAX; if ((dev->flags&IFF_LOOPBACK) || dev->type == ARPHRD_TUNNEL || dev->type == ARPHRD_TUNNEL6 || dev->type == ARPHRD_SIT || dev->type == ARPHRD_NONE) { ndev->cnf.use_tempaddr = -1; } ndev->token = in6addr_any; if (netif_running(dev) && addrconf_link_ready(dev)) ndev->if_flags |= IF_READY; ipv6_mc_init_dev(ndev); ndev->tstamp = jiffies; if (dev != blackhole_netdev) { err = addrconf_sysctl_register(ndev); if (err) { ipv6_mc_destroy_dev(ndev); snmp6_unregister_dev(ndev); goto err_release; } } /* protected by rtnl_lock */ rcu_assign_pointer(dev->ip6_ptr, ndev); if (dev != blackhole_netdev) { /* Join interface-local all-node multicast group */ ipv6_dev_mc_inc(dev, &in6addr_interfacelocal_allnodes); /* Join all-node multicast group */ ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes); /* Join all-router multicast group if forwarding is set */ if (ndev->cnf.forwarding && (dev->flags & IFF_MULTICAST)) ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters); } return ndev; err_release: neigh_parms_release(&nd_tbl, ndev->nd_parms); ndev->dead = 1; in6_dev_finish_destroy(ndev); return ERR_PTR(err); } static struct inet6_dev *ipv6_find_idev(struct net_device *dev) { struct inet6_dev *idev; ASSERT_RTNL(); idev = __in6_dev_get(dev); if (!idev) { idev = ipv6_add_dev(dev); if (IS_ERR(idev)) return idev; } if (dev->flags&IFF_UP) ipv6_mc_up(idev); return idev; } static int inet6_netconf_msgsize_devconf(int type) { int size = NLMSG_ALIGN(sizeof(struct netconfmsg)) + nla_total_size(4); /* NETCONFA_IFINDEX */ bool all = false; if (type == NETCONFA_ALL) all = true; if (all || type == NETCONFA_FORWARDING) size += nla_total_size(4); #ifdef CONFIG_IPV6_MROUTE if (all || type == NETCONFA_MC_FORWARDING) size += nla_total_size(4); #endif if (all || type == NETCONFA_PROXY_NEIGH) size += nla_total_size(4); if (all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) size += nla_total_size(4); return size; } static int inet6_netconf_fill_devconf(struct sk_buff *skb, int ifindex, struct ipv6_devconf *devconf, u32 portid, u32 seq, int event, unsigned int flags, int type) { struct nlmsghdr *nlh; struct netconfmsg *ncm; bool all = false; nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg), flags); if (!nlh) return -EMSGSIZE; if (type == NETCONFA_ALL) all = true; ncm = nlmsg_data(nlh); ncm->ncm_family = AF_INET6; if (nla_put_s32(skb, NETCONFA_IFINDEX, ifindex) < 0) goto nla_put_failure; if (!devconf) goto out; if ((all || type == NETCONFA_FORWARDING) && nla_put_s32(skb, NETCONFA_FORWARDING, READ_ONCE(devconf->forwarding)) < 0) goto nla_put_failure; #ifdef CONFIG_IPV6_MROUTE if ((all || type == NETCONFA_MC_FORWARDING) && nla_put_s32(skb, NETCONFA_MC_FORWARDING, atomic_read(&devconf->mc_forwarding)) < 0) goto nla_put_failure; #endif if ((all || type == NETCONFA_PROXY_NEIGH) && nla_put_s32(skb, NETCONFA_PROXY_NEIGH, READ_ONCE(devconf->proxy_ndp)) < 0) goto nla_put_failure; if ((all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) && nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN, READ_ONCE(devconf->ignore_routes_with_linkdown)) < 0) goto nla_put_failure; out: nlmsg_end(skb, nlh); return 0; nla_put_failure: nlmsg_cancel(skb, nlh); return -EMSGSIZE; } void inet6_netconf_notify_devconf(struct net *net, int event, int type, int ifindex, struct ipv6_devconf *devconf) { struct sk_buff *skb; int err = -ENOBUFS; skb = nlmsg_new(inet6_netconf_msgsize_devconf(type), GFP_KERNEL); if (!skb) goto errout; err = inet6_netconf_fill_devconf(skb, ifindex, devconf, 0, 0, event, 0, type); if (err < 0) { /* -EMSGSIZE implies BUG in inet6_netconf_msgsize_devconf() */ WARN_ON(err == -EMSGSIZE); kfree_skb(skb); goto errout; } rtnl_notify(skb, net, 0, RTNLGRP_IPV6_NETCONF, NULL, GFP_KERNEL); return; errout: rtnl_set_sk_err(net, RTNLGRP_IPV6_NETCONF, err); } static const struct nla_policy devconf_ipv6_policy[NETCONFA_MAX+1] = { [NETCONFA_IFINDEX] = { .len = sizeof(int) }, [NETCONFA_FORWARDING] = { .len = sizeof(int) }, [NETCONFA_PROXY_NEIGH] = { .len = sizeof(int) }, [NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN] = { .len = sizeof(int) }, }; static int inet6_netconf_valid_get_req(struct sk_buff *skb, const struct nlmsghdr *nlh, struct nlattr **tb, struct netlink_ext_ack *extack) { int i, err; if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(struct netconfmsg))) { NL_SET_ERR_MSG_MOD(extack, "Invalid header for netconf get request"); return -EINVAL; } if (!netlink_strict_get_check(skb)) return nlmsg_parse_deprecated(nlh, sizeof(struct netconfmsg), tb, NETCONFA_MAX, devconf_ipv6_policy, extack); err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct netconfmsg), tb, NETCONFA_MAX, devconf_ipv6_policy, extack); if (err) return err; for (i = 0; i <= NETCONFA_MAX; i++) { if (!tb[i]) continue; switch (i) { case NETCONFA_IFINDEX: break; default: NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in netconf get request"); return -EINVAL; } } return 0; } static int inet6_netconf_get_devconf(struct sk_buff *in_skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct net *net = sock_net(in_skb->sk); struct nlattr *tb[NETCONFA_MAX+1]; struct inet6_dev *in6_dev = NULL; struct net_device *dev = NULL; struct sk_buff *skb; struct ipv6_devconf *devconf; int ifindex; int err; err = inet6_netconf_valid_get_req(in_skb, nlh, tb, extack); if (err < 0) return err; if (!tb[NETCONFA_IFINDEX]) return -EINVAL; err = -EINVAL; ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]); switch (ifindex) { case NETCONFA_IFINDEX_ALL: devconf = net->ipv6.devconf_all; break; case NETCONFA_IFINDEX_DEFAULT: devconf = net->ipv6.devconf_dflt; break; default: dev = dev_get_by_index(net, ifindex); if (!dev) return -EINVAL; in6_dev = in6_dev_get(dev); if (!in6_dev) goto errout; devconf = &in6_dev->cnf; break; } err = -ENOBUFS; skb = nlmsg_new(inet6_netconf_msgsize_devconf(NETCONFA_ALL), GFP_KERNEL); if (!skb) goto errout; err = inet6_netconf_fill_devconf(skb, ifindex, devconf, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq, RTM_NEWNETCONF, 0, NETCONFA_ALL); if (err < 0) { /* -EMSGSIZE implies BUG in inet6_netconf_msgsize_devconf() */ WARN_ON(err == -EMSGSIZE); kfree_skb(skb); goto errout; } err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid); errout: if (in6_dev) in6_dev_put(in6_dev); dev_put(dev); return err; } /* Combine dev_addr_genid and dev_base_seq to detect changes. */ static u32 inet6_base_seq(const struct net *net) { u32 res = atomic_read(&net->ipv6.dev_addr_genid) + READ_ONCE(net->dev_base_seq); /* Must not return 0 (see nl_dump_check_consistent()). * Chose a value far away from 0. */ if (!res) res = 0x80000000; return res; } static int inet6_netconf_dump_devconf(struct sk_buff *skb, struct netlink_callback *cb) { const struct nlmsghdr *nlh = cb->nlh; struct net *net = sock_net(skb->sk); struct { unsigned long ifindex; unsigned int all_default; } *ctx = (void *)cb->ctx; struct net_device *dev; struct inet6_dev *idev; int err = 0; if (cb->strict_check) { struct netlink_ext_ack *extack = cb->extack; struct netconfmsg *ncm; if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ncm))) { NL_SET_ERR_MSG_MOD(extack, "Invalid header for netconf dump request"); return -EINVAL; } if (nlmsg_attrlen(nlh, sizeof(*ncm))) { NL_SET_ERR_MSG_MOD(extack, "Invalid data after header in netconf dump request"); return -EINVAL; } } rcu_read_lock(); for_each_netdev_dump(net, dev, ctx->ifindex) { idev = __in6_dev_get(dev); if (!idev) continue; err = inet6_netconf_fill_devconf(skb, dev->ifindex, &idev->cnf, NETLINK_CB(cb->skb).portid, nlh->nlmsg_seq, RTM_NEWNETCONF, NLM_F_MULTI, NETCONFA_ALL); if (err < 0) goto done; } if (ctx->all_default == 0) { err = inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL, net->ipv6.devconf_all, NETLINK_CB(cb->skb).portid, nlh->nlmsg_seq, RTM_NEWNETCONF, NLM_F_MULTI, NETCONFA_ALL); if (err < 0) goto done; ctx->all_default++; } if (ctx->all_default == 1) { err = inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT, net->ipv6.devconf_dflt, NETLINK_CB(cb->skb).portid, nlh->nlmsg_seq, RTM_NEWNETCONF, NLM_F_MULTI, NETCONFA_ALL); if (err < 0) goto done; ctx->all_default++; } done: rcu_read_unlock(); return err; } #ifdef CONFIG_SYSCTL static void dev_forward_change(struct inet6_dev *idev) { struct net_device *dev; struct inet6_ifaddr *ifa; LIST_HEAD(tmp_addr_list); if (!idev) return; dev = idev->dev; if (idev->cnf.forwarding) dev_disable_lro(dev); if (dev->flags & IFF_MULTICAST) { if (idev->cnf.forwarding) { ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters); ipv6_dev_mc_inc(dev, &in6addr_interfacelocal_allrouters); ipv6_dev_mc_inc(dev, &in6addr_sitelocal_allrouters); } else { ipv6_dev_mc_dec(dev, &in6addr_linklocal_allrouters); ipv6_dev_mc_dec(dev, &in6addr_interfacelocal_allrouters); ipv6_dev_mc_dec(dev, &in6addr_sitelocal_allrouters); } } read_lock_bh(&idev->lock); list_for_each_entry(ifa, &idev->addr_list, if_list) { if (ifa->flags&IFA_F_TENTATIVE) continue; list_add_tail(&ifa->if_list_aux, &tmp_addr_list); } read_unlock_bh(&idev->lock); while (!list_empty(&tmp_addr_list)) { ifa = list_first_entry(&tmp_addr_list, struct inet6_ifaddr, if_list_aux); list_del(&ifa->if_list_aux); if (idev->cnf.forwarding) addrconf_join_anycast(ifa); else addrconf_leave_anycast(ifa); } inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF, NETCONFA_FORWARDING, dev->ifindex, &idev->cnf); } static void addrconf_forward_change(struct net *net, __s32 newf) { struct net_device *dev; struct inet6_dev *idev; for_each_netdev(net, dev) { idev = __in6_dev_get_rtnl_net(dev); if (idev) { int changed = (!idev->cnf.forwarding) ^ (!newf); WRITE_ONCE(idev->cnf.forwarding, newf); if (changed) dev_forward_change(idev); } } } static int addrconf_fixup_forwarding(const struct ctl_table *table, int *p, int newf) { struct net *net = (struct net *)table->extra2; int old; if (!rtnl_net_trylock(net)) return restart_syscall(); old = *p; WRITE_ONCE(*p, newf); if (p == &net->ipv6.devconf_dflt->forwarding) { if ((!newf) ^ (!old)) inet6_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_FORWARDING, NETCONFA_IFINDEX_DEFAULT, net->ipv6.devconf_dflt); rtnl_net_unlock(net); return 0; } if (p == &net->ipv6.devconf_all->forwarding) { int old_dflt = net->ipv6.devconf_dflt->forwarding; WRITE_ONCE(net->ipv6.devconf_dflt->forwarding, newf); if ((!newf) ^ (!old_dflt)) inet6_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_FORWARDING, NETCONFA_IFINDEX_DEFAULT, net->ipv6.devconf_dflt); addrconf_forward_change(net, newf); if ((!newf) ^ (!old)) inet6_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_FORWARDING, NETCONFA_IFINDEX_ALL, net->ipv6.devconf_all); } else if ((!newf) ^ (!old)) dev_forward_change((struct inet6_dev *)table->extra1); rtnl_net_unlock(net); if (newf) rt6_purge_dflt_routers(net); return 1; } static void addrconf_linkdown_change(struct net *net, __s32 newf) { struct net_device *dev; struct inet6_dev *idev; for_each_netdev(net, dev) { idev = __in6_dev_get_rtnl_net(dev); if (idev) { int changed = (!idev->cnf.ignore_routes_with_linkdown) ^ (!newf); WRITE_ONCE(idev->cnf.ignore_routes_with_linkdown, newf); if (changed) inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN, dev->ifindex, &idev->cnf); } } } static int addrconf_fixup_linkdown(const struct ctl_table *table, int *p, int newf) { struct net *net = (struct net *)table->extra2; int old; if (!rtnl_net_trylock(net)) return restart_syscall(); old = *p; WRITE_ONCE(*p, newf); if (p == &net->ipv6.devconf_dflt->ignore_routes_with_linkdown) { if ((!newf) ^ (!old)) inet6_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN, NETCONFA_IFINDEX_DEFAULT, net->ipv6.devconf_dflt); rtnl_net_unlock(net); return 0; } if (p == &net->ipv6.devconf_all->ignore_routes_with_linkdown) { WRITE_ONCE(net->ipv6.devconf_dflt->ignore_routes_with_linkdown, newf); addrconf_linkdown_change(net, newf); if ((!newf) ^ (!old)) inet6_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN, NETCONFA_IFINDEX_ALL, net->ipv6.devconf_all); } rtnl_net_unlock(net); return 1; } #endif /* Nobody refers to this ifaddr, destroy it */ void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp) { WARN_ON(!hlist_unhashed(&ifp->addr_lst)); #ifdef NET_REFCNT_DEBUG pr_debug("%s\n", __func__); #endif in6_dev_put(ifp->idev); if (cancel_delayed_work(&ifp->dad_work)) pr_notice("delayed DAD work was pending while freeing ifa=%p\n", ifp); if (ifp->state != INET6_IFADDR_STATE_DEAD) { pr_warn("Freeing alive inet6 address %p\n", ifp); return; } kfree_rcu(ifp, rcu); } static void ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp) { struct list_head *p; int ifp_scope = ipv6_addr_src_scope(&ifp->addr); /* * Each device address list is sorted in order of scope - * global before linklocal. */ list_for_each(p, &idev->addr_list) { struct inet6_ifaddr *ifa = list_entry(p, struct inet6_ifaddr, if_list); if (ifp_scope >= ipv6_addr_src_scope(&ifa->addr)) break; } list_add_tail_rcu(&ifp->if_list, p); } static u32 inet6_addr_hash(const struct net *net, const struct in6_addr *addr) { u32 val = __ipv6_addr_jhash(addr, net_hash_mix(net)); return hash_32(val, IN6_ADDR_HSIZE_SHIFT); } static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr, struct net_device *dev, unsigned int hash) { struct inet6_ifaddr *ifp; hlist_for_each_entry(ifp, &net->ipv6.inet6_addr_lst[hash], addr_lst) { if (ipv6_addr_equal(&ifp->addr, addr)) { if (!dev || ifp->idev->dev == dev) return true; } } return false; } static int ipv6_add_addr_hash(struct net_device *dev, struct inet6_ifaddr *ifa) { struct net *net = dev_net(dev); unsigned int hash = inet6_addr_hash(net, &ifa->addr); int err = 0; spin_lock_bh(&net->ipv6.addrconf_hash_lock); /* Ignore adding duplicate addresses on an interface */ if (ipv6_chk_same_addr(net, &ifa->addr, dev, hash)) { netdev_dbg(dev, "ipv6_add_addr: already assigned\n"); err = -EEXIST; } else { hlist_add_head_rcu(&ifa->addr_lst, &net->ipv6.inet6_addr_lst[hash]); } spin_unlock_bh(&net->ipv6.addrconf_hash_lock); return err; } /* On success it returns ifp with increased reference count */ static struct inet6_ifaddr * ipv6_add_addr(struct inet6_dev *idev, struct ifa6_config *cfg, bool can_block, struct netlink_ext_ack *extack) { gfp_t gfp_flags = can_block ? GFP_KERNEL : GFP_ATOMIC; int addr_type = ipv6_addr_type(cfg->pfx); struct net *net = dev_net(idev->dev); struct inet6_ifaddr *ifa = NULL; struct fib6_info *f6i = NULL; int err = 0; if (addr_type == IPV6_ADDR_ANY) { NL_SET_ERR_MSG_MOD(extack, "Invalid address"); return ERR_PTR(-EADDRNOTAVAIL); } else if (addr_type & IPV6_ADDR_MULTICAST && !(cfg->ifa_flags & IFA_F_MCAUTOJOIN)) { NL_SET_ERR_MSG_MOD(extack, "Cannot assign multicast address without \"IFA_F_MCAUTOJOIN\" flag"); return ERR_PTR(-EADDRNOTAVAIL); } else if (!(idev->dev->flags & IFF_LOOPBACK) && !netif_is_l3_master(idev->dev) && addr_type & IPV6_ADDR_LOOPBACK) { NL_SET_ERR_MSG_MOD(extack, "Cannot assign loopback address on this device"); return ERR_PTR(-EADDRNOTAVAIL); } if (idev->dead) { NL_SET_ERR_MSG_MOD(extack, "device is going away"); err = -ENODEV; goto out; } if (idev->cnf.disable_ipv6) { NL_SET_ERR_MSG_MOD(extack, "IPv6 is disabled on this device"); err = -EACCES; goto out; } /* validator notifier needs to be blocking; * do not call in atomic context */ if (can_block) { struct in6_validator_info i6vi = { .i6vi_addr = *cfg->pfx, .i6vi_dev = idev, .extack = extack, }; err = inet6addr_validator_notifier_call_chain(NETDEV_UP, &i6vi); err = notifier_to_errno(err); if (err < 0) goto out; } ifa = kzalloc(sizeof(*ifa), gfp_flags | __GFP_ACCOUNT); if (!ifa) { err = -ENOBUFS; goto out; } f6i = addrconf_f6i_alloc(net, idev, cfg->pfx, false, gfp_flags, extack); if (IS_ERR(f6i)) { err = PTR_ERR(f6i); f6i = NULL; goto out; } neigh_parms_data_state_setall(idev->nd_parms); ifa->addr = *cfg->pfx; if (cfg->peer_pfx) ifa->peer_addr = *cfg->peer_pfx; spin_lock_init(&ifa->lock); INIT_DELAYED_WORK(&ifa->dad_work, addrconf_dad_work); INIT_HLIST_NODE(&ifa->addr_lst); ifa->scope = cfg->scope; ifa->prefix_len = cfg->plen; ifa->rt_priority = cfg->rt_priority; ifa->flags = cfg->ifa_flags; ifa->ifa_proto = cfg->ifa_proto; /* No need to add the TENTATIVE flag for addresses with NODAD */ if (!(cfg->ifa_flags & IFA_F_NODAD)) ifa->flags |= IFA_F_TENTATIVE; ifa->valid_lft = cfg->valid_lft; ifa->prefered_lft = cfg->preferred_lft; ifa->cstamp = ifa->tstamp = jiffies; ifa->tokenized = false; ifa->rt = f6i; ifa->idev = idev; in6_dev_hold(idev); /* For caller */ refcount_set(&ifa->refcnt, 1); rcu_read_lock(); err = ipv6_add_addr_hash(idev->dev, ifa); if (err < 0) { rcu_read_unlock(); goto out; } write_lock_bh(&idev->lock); /* Add to inet6_dev unicast addr list. */ ipv6_link_dev_addr(idev, ifa); if (ifa->flags&IFA_F_TEMPORARY) { list_add(&ifa->tmp_list, &idev->tempaddr_list); in6_ifa_hold(ifa); } in6_ifa_hold(ifa); write_unlock_bh(&idev->lock); rcu_read_unlock(); inet6addr_notifier_call_chain(NETDEV_UP, ifa); out: if (unlikely(err < 0)) { fib6_info_release(f6i); if (ifa) { if (ifa->idev) in6_dev_put(ifa->idev); kfree(ifa); } ifa = ERR_PTR(err); } return ifa; } enum cleanup_prefix_rt_t { CLEANUP_PREFIX_RT_NOP, /* no cleanup action for prefix route */ CLEANUP_PREFIX_RT_DEL, /* delete the prefix route */ CLEANUP_PREFIX_RT_EXPIRE, /* update the lifetime of the prefix route */ }; /* * Check, whether the prefix for ifp would still need a prefix route * after deleting ifp. The function returns one of the CLEANUP_PREFIX_RT_* * constants. * * 1) we don't purge prefix if address was not permanent. * prefix is managed by its own lifetime. * 2) we also don't purge, if the address was IFA_F_NOPREFIXROUTE. * 3) if there are no addresses, delete prefix. * 4) if there are still other permanent address(es), * corresponding prefix is still permanent. * 5) if there are still other addresses with IFA_F_NOPREFIXROUTE, * don't purge the prefix, assume user space is managing it. * 6) otherwise, update prefix lifetime to the * longest valid lifetime among the corresponding * addresses on the device. * Note: subsequent RA will update lifetime. **/ static enum cleanup_prefix_rt_t check_cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long *expires) { struct inet6_ifaddr *ifa; struct inet6_dev *idev = ifp->idev; unsigned long lifetime; enum cleanup_prefix_rt_t action = CLEANUP_PREFIX_RT_DEL; *expires = jiffies; list_for_each_entry(ifa, &idev->addr_list, if_list) { if (ifa == ifp) continue; if (ifa->prefix_len != ifp->prefix_len || !ipv6_prefix_equal(&ifa->addr, &ifp->addr, ifp->prefix_len)) continue; if (ifa->flags & (IFA_F_PERMANENT | IFA_F_NOPREFIXROUTE)) return CLEANUP_PREFIX_RT_NOP; action = CLEANUP_PREFIX_RT_EXPIRE; spin_lock(&ifa->lock); lifetime = addrconf_timeout_fixup(ifa->valid_lft, HZ); /* * Note: Because this address is * not permanent, lifetime < * LONG_MAX / HZ here. */ if (time_before(*expires, ifa->tstamp + lifetime * HZ)) *expires = ifa->tstamp + lifetime * HZ; spin_unlock(&ifa->lock); } return action; } static void cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long expires, bool del_rt, bool del_peer) { struct fib6_table *table; struct fib6_info *f6i; f6i = addrconf_get_prefix_route(del_peer ? &ifp->peer_addr : &ifp->addr, ifp->prefix_len, ifp->idev->dev, 0, RTF_DEFAULT, true); if (f6i) { if (del_rt) ip6_del_rt(dev_net(ifp->idev->dev), f6i, false); else { if (!(f6i->fib6_flags & RTF_EXPIRES)) { table = f6i->fib6_table; spin_lock_bh(&table->tb6_lock); fib6_set_expires(f6i, expires); fib6_add_gc_list(f6i); spin_unlock_bh(&table->tb6_lock); } fib6_info_release(f6i); } } } /* This function wants to get referenced ifp and releases it before return */ static void ipv6_del_addr(struct inet6_ifaddr *ifp) { enum cleanup_prefix_rt_t action = CLEANUP_PREFIX_RT_NOP; struct net *net = dev_net(ifp->idev->dev); unsigned long expires; int state; ASSERT_RTNL(); spin_lock_bh(&ifp->lock); state = ifp->state; ifp->state = INET6_IFADDR_STATE_DEAD; spin_unlock_bh(&ifp->lock); if (state == INET6_IFADDR_STATE_DEAD) goto out; spin_lock_bh(&net->ipv6.addrconf_hash_lock); hlist_del_init_rcu(&ifp->addr_lst); spin_unlock_bh(&net->ipv6.addrconf_hash_lock); write_lock_bh(&ifp->idev->lock); if (ifp->flags&IFA_F_TEMPORARY) { list_del(&ifp->tmp_list); if (ifp->ifpub) { in6_ifa_put(ifp->ifpub); ifp->ifpub = NULL; } __in6_ifa_put(ifp); } if (ifp->flags & IFA_F_PERMANENT && !(ifp->flags & IFA_F_NOPREFIXROUTE)) action = check_cleanup_prefix_route(ifp, &expires); list_del_rcu(&ifp->if_list); __in6_ifa_put(ifp); write_unlock_bh(&ifp->idev->lock); addrconf_del_dad_work(ifp); ipv6_ifa_notify(RTM_DELADDR, ifp); inet6addr_notifier_call_chain(NETDEV_DOWN, ifp); if (action != CLEANUP_PREFIX_RT_NOP) { cleanup_prefix_route(ifp, expires, action == CLEANUP_PREFIX_RT_DEL, false); } /* clean up prefsrc entries */ rt6_remove_prefsrc(ifp); out: in6_ifa_put(ifp); } static unsigned long ipv6_get_regen_advance(const struct inet6_dev *idev) { return READ_ONCE(idev->cnf.regen_min_advance) + READ_ONCE(idev->cnf.regen_max_retry) * READ_ONCE(idev->cnf.dad_transmits) * max(NEIGH_VAR(idev->nd_parms, RETRANS_TIME), HZ/100) / HZ; } static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, bool block) { struct inet6_dev *idev = ifp->idev; unsigned long tmp_tstamp, age; unsigned long regen_advance; unsigned long now = jiffies; u32 if_public_preferred_lft; s32 cnf_temp_preferred_lft; struct inet6_ifaddr *ift; struct ifa6_config cfg; long max_desync_factor; struct in6_addr addr; int ret = 0; write_lock_bh(&idev->lock); retry: in6_dev_hold(idev); if (READ_ONCE(idev->cnf.use_tempaddr) <= 0) { write_unlock_bh(&idev->lock); pr_info("%s: use_tempaddr is disabled\n", __func__); in6_dev_put(idev); ret = -1; goto out; } spin_lock_bh(&ifp->lock); if (ifp->regen_count++ >= READ_ONCE(idev->cnf.regen_max_retry)) { WRITE_ONCE(idev->cnf.use_tempaddr, -1); /*XXX*/ spin_unlock_bh(&ifp->lock); write_unlock_bh(&idev->lock); pr_warn("%s: regeneration time exceeded - disabled temporary address support\n", __func__); in6_dev_put(idev); ret = -1; goto out; } in6_ifa_hold(ifp); memcpy(addr.s6_addr, ifp->addr.s6_addr, 8); ipv6_gen_rnd_iid(&addr); age = (now - ifp->tstamp) / HZ; regen_advance = ipv6_get_regen_advance(idev); /* recalculate max_desync_factor each time and update * idev->desync_factor if it's larger */ cnf_temp_preferred_lft = READ_ONCE(idev->cnf.temp_prefered_lft); max_desync_factor = min_t(long, READ_ONCE(idev->cnf.max_desync_factor), cnf_temp_preferred_lft - regen_advance); if (unlikely(idev->desync_factor > max_desync_factor)) { if (max_desync_factor > 0) { get_random_bytes(&idev->desync_factor, sizeof(idev->desync_factor)); idev->desync_factor %= max_desync_factor; } else { idev->desync_factor = 0; } } if_public_preferred_lft = ifp->prefered_lft; memset(&cfg, 0, sizeof(cfg)); cfg.valid_lft = min_t(__u32, ifp->valid_lft, READ_ONCE(idev->cnf.temp_valid_lft) + age); cfg.preferred_lft = cnf_temp_preferred_lft + age - idev->desync_factor; cfg.preferred_lft = min_t(__u32, if_public_preferred_lft, cfg.preferred_lft); cfg.preferred_lft = min_t(__u32, cfg.valid_lft, cfg.preferred_lft); cfg.plen = ifp->prefix_len; tmp_tstamp = ifp->tstamp; spin_unlock_bh(&ifp->lock); write_unlock_bh(&idev->lock); /* From RFC 4941: * * A temporary address is created only if this calculated Preferred * Lifetime is greater than REGEN_ADVANCE time units. In * particular, an implementation must not create a temporary address * with a zero Preferred Lifetime. * * ... * * When creating a temporary address, the lifetime values MUST be * derived from the corresponding prefix as follows: * * ... * * * Its Preferred Lifetime is the lower of the Preferred Lifetime * of the public address or TEMP_PREFERRED_LIFETIME - * DESYNC_FACTOR. * * To comply with the RFC's requirements, clamp the preferred lifetime * to a minimum of regen_advance, unless that would exceed valid_lft or * ifp->prefered_lft. * * Use age calculation as in addrconf_verify to avoid unnecessary * temporary addresses being generated. */ age = (now - tmp_tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ; if (cfg.preferred_lft <= regen_advance + age) { cfg.preferred_lft = regen_advance + age + 1; if (cfg.preferred_lft > cfg.valid_lft || cfg.preferred_lft > if_public_preferred_lft) { in6_ifa_put(ifp); in6_dev_put(idev); ret = -1; goto out; } } cfg.ifa_flags = IFA_F_TEMPORARY; /* set in addrconf_prefix_rcv() */ if (ifp->flags & IFA_F_OPTIMISTIC) cfg.ifa_flags |= IFA_F_OPTIMISTIC; cfg.pfx = &addr; cfg.scope = ipv6_addr_scope(cfg.pfx); ift = ipv6_add_addr(idev, &cfg, block, NULL); if (IS_ERR(ift)) { in6_ifa_put(ifp); in6_dev_put(idev); pr_info("%s: retry temporary address regeneration\n", __func__); write_lock_bh(&idev->lock); goto retry; } spin_lock_bh(&ift->lock); ift->ifpub = ifp; ift->cstamp = now; ift->tstamp = tmp_tstamp; spin_unlock_bh(&ift->lock); addrconf_dad_start(ift); in6_ifa_put(ift); in6_dev_put(idev); out: return ret; } /* * Choose an appropriate source address (RFC3484) */ enum { IPV6_SADDR_RULE_INIT = 0, IPV6_SADDR_RULE_LOCAL, IPV6_SADDR_RULE_SCOPE, IPV6_SADDR_RULE_PREFERRED, #ifdef CONFIG_IPV6_MIP6 IPV6_SADDR_RULE_HOA, #endif IPV6_SADDR_RULE_OIF, IPV6_SADDR_RULE_LABEL, IPV6_SADDR_RULE_PRIVACY, IPV6_SADDR_RULE_ORCHID, IPV6_SADDR_RULE_PREFIX, #ifdef CONFIG_IPV6_OPTIMISTIC_DAD IPV6_SADDR_RULE_NOT_OPTIMISTIC, #endif IPV6_SADDR_RULE_MAX }; struct ipv6_saddr_score { int rule; int addr_type; struct inet6_ifaddr *ifa; DECLARE_BITMAP(scorebits, IPV6_SADDR_RULE_MAX); int scopedist; int matchlen; }; struct ipv6_saddr_dst { const struct in6_addr *addr; int ifindex; int scope; int label; unsigned int prefs; }; static inline int ipv6_saddr_preferred(int type) { if (type & (IPV6_ADDR_MAPPED|IPV6_ADDR_COMPATv4|IPV6_ADDR_LOOPBACK)) return 1; return 0; } static bool ipv6_use_optimistic_addr(const struct net *net, const struct inet6_dev *idev) { #ifdef CONFIG_IPV6_OPTIMISTIC_DAD if (!idev) return false; if (!READ_ONCE(net->ipv6.devconf_all->optimistic_dad) && !READ_ONCE(idev->cnf.optimistic_dad)) return false; if (!READ_ONCE(net->ipv6.devconf_all->use_optimistic) && !READ_ONCE(idev->cnf.use_optimistic)) return false; return true; #else return false; #endif } static bool ipv6_allow_optimistic_dad(const struct net *net, const struct inet6_dev *idev) { #ifdef CONFIG_IPV6_OPTIMISTIC_DAD if (!idev) return false; if (!READ_ONCE(net->ipv6.devconf_all->optimistic_dad) && !READ_ONCE(idev->cnf.optimistic_dad)) return false; return true; #else return false; #endif } static int ipv6_get_saddr_eval(struct net *net, struct ipv6_saddr_score *score, struct ipv6_saddr_dst *dst, int i) { int ret; if (i <= score->rule) { switch (i) { case IPV6_SADDR_RULE_SCOPE: ret = score->scopedist; break; case IPV6_SADDR_RULE_PREFIX: ret = score->matchlen; break; default: ret = !!test_bit(i, score->scorebits); } goto out; } switch (i) { case IPV6_SADDR_RULE_INIT: /* Rule 0: remember if hiscore is not ready yet */ ret = !!score->ifa; break; case IPV6_SADDR_RULE_LOCAL: /* Rule 1: Prefer same address */ ret = ipv6_addr_equal(&score->ifa->addr, dst->addr); break; case IPV6_SADDR_RULE_SCOPE: /* Rule 2: Prefer appropriate scope * * ret * ^ * -1 | d 15 * ---+--+-+---> scope * | * | d is scope of the destination. * B-d | \ * | \ <- smaller scope is better if * B-15 | \ if scope is enough for destination. * | ret = B - scope (-1 <= scope >= d <= 15). * d-C-1 | / * |/ <- greater is better * -C / if scope is not enough for destination. * /| ret = scope - C (-1 <= d < scope <= 15). * * d - C - 1 < B -15 (for all -1 <= d <= 15). * C > d + 14 - B >= 15 + 14 - B = 29 - B. * Assume B = 0 and we get C > 29. */ ret = __ipv6_addr_src_scope(score->addr_type); if (ret >= dst->scope) ret = -ret; else ret -= 128; /* 30 is enough */ score->scopedist = ret; break; case IPV6_SADDR_RULE_PREFERRED: { /* Rule 3: Avoid deprecated and optimistic addresses */ u8 avoid = IFA_F_DEPRECATED; if (!ipv6_use_optimistic_addr(net, score->ifa->idev)) avoid |= IFA_F_OPTIMISTIC; ret = ipv6_saddr_preferred(score->addr_type) || !(score->ifa->flags & avoid); break; } #ifdef CONFIG_IPV6_MIP6 case IPV6_SADDR_RULE_HOA: { /* Rule 4: Prefer home address */ int prefhome = !(dst->prefs & IPV6_PREFER_SRC_COA); ret = !(score->ifa->flags & IFA_F_HOMEADDRESS) ^ prefhome; break; } #endif case IPV6_SADDR_RULE_OIF: /* Rule 5: Prefer outgoing interface */ ret = (!dst->ifindex || dst->ifindex == score->ifa->idev->dev->ifindex); break; case IPV6_SADDR_RULE_LABEL: /* Rule 6: Prefer matching label */ ret = ipv6_addr_label(net, &score->ifa->addr, score->addr_type, score->ifa->idev->dev->ifindex) == dst->label; break; case IPV6_SADDR_RULE_PRIVACY: { /* Rule 7: Prefer public address * Note: prefer temporary address if use_tempaddr >= 2 */ int preftmp = dst->prefs & (IPV6_PREFER_SRC_PUBLIC|IPV6_PREFER_SRC_TMP) ? !!(dst->prefs & IPV6_PREFER_SRC_TMP) : READ_ONCE(score->ifa->idev->cnf.use_tempaddr) >= 2; ret = (!(score->ifa->flags & IFA_F_TEMPORARY)) ^ preftmp; break; } case IPV6_SADDR_RULE_ORCHID: /* Rule 8-: Prefer ORCHID vs ORCHID or * non-ORCHID vs non-ORCHID */ ret = !(ipv6_addr_orchid(&score->ifa->addr) ^ ipv6_addr_orchid(dst->addr)); break; case IPV6_SADDR_RULE_PREFIX: /* Rule 8: Use longest matching prefix */ ret = ipv6_addr_diff(&score->ifa->addr, dst->addr); if (ret > score->ifa->prefix_len) ret = score->ifa->prefix_len; score->matchlen = ret; break; #ifdef CONFIG_IPV6_OPTIMISTIC_DAD case IPV6_SADDR_RULE_NOT_OPTIMISTIC: /* Optimistic addresses still have lower precedence than other * preferred addresses. */ ret = !(score->ifa->flags & IFA_F_OPTIMISTIC); break; #endif default: ret = 0; } if (ret) __set_bit(i, score->scorebits); score->rule = i; out: return ret; } static int __ipv6_dev_get_saddr(struct net *net, struct ipv6_saddr_dst *dst, struct inet6_dev *idev, struct ipv6_saddr_score *scores, int hiscore_idx) { struct ipv6_saddr_score *score = &scores[1 - hiscore_idx], *hiscore = &scores[hiscore_idx]; list_for_each_entry_rcu(score->ifa, &idev->addr_list, if_list) { int i; /* * - Tentative Address (RFC2462 section 5.4) * - A tentative address is not considered * "assigned to an interface" in the traditional * sense, unless it is also flagged as optimistic. * - Candidate Source Address (section 4) * - In any case, anycast addresses, multicast * addresses, and the unspecified address MUST * NOT be included in a candidate set. */ if ((score->ifa->flags & IFA_F_TENTATIVE) && (!(score->ifa->flags & IFA_F_OPTIMISTIC))) continue; score->addr_type = __ipv6_addr_type(&score->ifa->addr); if (unlikely(score->addr_type == IPV6_ADDR_ANY || score->addr_type & IPV6_ADDR_MULTICAST)) { net_dbg_ratelimited("ADDRCONF: unspecified / multicast address assigned as unicast address on %s", idev->dev->name); continue; } score->rule = -1; bitmap_zero(score->scorebits, IPV6_SADDR_RULE_MAX); for (i = 0; i < IPV6_SADDR_RULE_MAX; i++) { int minihiscore, miniscore; minihiscore = ipv6_get_saddr_eval(net, hiscore, dst, i); miniscore = ipv6_get_saddr_eval(net, score, dst, i); if (minihiscore > miniscore) { if (i == IPV6_SADDR_RULE_SCOPE && score->scopedist > 0) { /* * special case: * each remaining entry * has too small (not enough) * scope, because ifa entries * are sorted by their scope * values. */ goto out; } break; } else if (minihiscore < miniscore) { swap(hiscore, score); hiscore_idx = 1 - hiscore_idx; /* restore our iterator */ score->ifa = hiscore->ifa; break; } } } out: return hiscore_idx; } static int ipv6_get_saddr_master(struct net *net, const struct net_device *dst_dev, const struct net_device *master, struct ipv6_saddr_dst *dst, struct ipv6_saddr_score *scores, int hiscore_idx) { struct inet6_dev *idev; idev = __in6_dev_get(dst_dev); if (idev) hiscore_idx = __ipv6_dev_get_saddr(net, dst, idev, scores, hiscore_idx); idev = __in6_dev_get(master); if (idev) hiscore_idx = __ipv6_dev_get_saddr(net, dst, idev, scores, hiscore_idx); return hiscore_idx; } int ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev, const struct in6_addr *daddr, unsigned int prefs, struct in6_addr *saddr) { struct ipv6_saddr_score scores[2], *hiscore; struct ipv6_saddr_dst dst; struct inet6_dev *idev; struct net_device *dev; int dst_type; bool use_oif_addr = false; int hiscore_idx = 0; int ret = 0; dst_type = __ipv6_addr_type(daddr); dst.addr = daddr; dst.ifindex = dst_dev ? dst_dev->ifindex : 0; dst.scope = __ipv6_addr_src_scope(dst_type); dst.label = ipv6_addr_label(net, daddr, dst_type, dst.ifindex); dst.prefs = prefs; scores[hiscore_idx].rule = -1; scores[hiscore_idx].ifa = NULL; rcu_read_lock(); /* Candidate Source Address (section 4) * - multicast and link-local destination address, * the set of candidate source address MUST only * include addresses assigned to interfaces * belonging to the same link as the outgoing * interface. * (- For site-local destination addresses, the * set of candidate source addresses MUST only * include addresses assigned to interfaces * belonging to the same site as the outgoing * interface.) * - "It is RECOMMENDED that the candidate source addresses * be the set of unicast addresses assigned to the * interface that will be used to send to the destination * (the 'outgoing' interface)." (RFC 6724) */ if (dst_dev) { idev = __in6_dev_get(dst_dev); if ((dst_type & IPV6_ADDR_MULTICAST) || dst.scope <= IPV6_ADDR_SCOPE_LINKLOCAL || (idev && READ_ONCE(idev->cnf.use_oif_addrs_only))) { use_oif_addr = true; } } if (use_oif_addr) { if (idev) hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx); } else { const struct net_device *master; int master_idx = 0; /* if dst_dev exists and is enslaved to an L3 device, then * prefer addresses from dst_dev and then the master over * any other enslaved devices in the L3 domain. */ master = l3mdev_master_dev_rcu(dst_dev); if (master) { master_idx = master->ifindex; hiscore_idx = ipv6_get_saddr_master(net, dst_dev, master, &dst, scores, hiscore_idx); if (scores[hiscore_idx].ifa && scores[hiscore_idx].scopedist >= 0) goto out; } for_each_netdev_rcu(net, dev) { /* only consider addresses on devices in the * same L3 domain */ if (l3mdev_master_ifindex_rcu(dev) != master_idx) continue; idev = __in6_dev_get(dev); if (!idev) continue; hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx); } } out: hiscore = &scores[hiscore_idx]; if (!hiscore->ifa) ret = -EADDRNOTAVAIL; else *saddr = hiscore->ifa->addr; rcu_read_unlock(); return ret; } EXPORT_SYMBOL(ipv6_dev_get_saddr); static int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr, u32 banned_flags) { struct inet6_ifaddr *ifp; int err = -EADDRNOTAVAIL; list_for_each_entry_reverse(ifp, &idev->addr_list, if_list) { if (ifp->scope > IFA_LINK) break; if (ifp->scope == IFA_LINK && !(ifp->flags & banned_flags)) { *addr = ifp->addr; err = 0; break; } } return err; } int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr, u32 banned_flags) { struct inet6_dev *idev; int err = -EADDRNOTAVAIL; rcu_read_lock(); idev = __in6_dev_get(dev); if (idev) { read_lock_bh(&idev->lock); err = __ipv6_get_lladdr(idev, addr, banned_flags); read_unlock_bh(&idev->lock); } rcu_read_unlock(); return err; } static int ipv6_count_addresses(const struct inet6_dev *idev) { const struct inet6_ifaddr *ifp; int cnt = 0; rcu_read_lock(); list_for_each_entry_rcu(ifp, &idev->addr_list, if_list) cnt++; rcu_read_unlock(); return cnt; } int ipv6_chk_addr(struct net *net, const struct in6_addr *addr, const struct net_device *dev, int strict) { return ipv6_chk_addr_and_flags(net, addr, dev, !dev, strict, IFA_F_TENTATIVE); } EXPORT_SYMBOL(ipv6_chk_addr); /* device argument is used to find the L3 domain of interest. If * skip_dev_check is set, then the ifp device is not checked against * the passed in dev argument. So the 2 cases for addresses checks are: * 1. does the address exist in the L3 domain that dev is part of * (skip_dev_check = true), or * * 2. does the address exist on the specific device * (skip_dev_check = false) */ static struct net_device * __ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr, const struct net_device *dev, bool skip_dev_check, int strict, u32 banned_flags) { unsigned int hash = inet6_addr_hash(net, addr); struct net_device *l3mdev, *ndev; struct inet6_ifaddr *ifp; u32 ifp_flags; rcu_read_lock(); l3mdev = l3mdev_master_dev_rcu(dev); if (skip_dev_check) dev = NULL; hlist_for_each_entry_rcu(ifp, &net->ipv6.inet6_addr_lst[hash], addr_lst) { ndev = ifp->idev->dev; if (l3mdev_master_dev_rcu(ndev) != l3mdev) continue; /* Decouple optimistic from tentative for evaluation here. * Ban optimistic addresses explicitly, when required. */ ifp_flags = (ifp->flags&IFA_F_OPTIMISTIC) ? (ifp->flags&~IFA_F_TENTATIVE) : ifp->flags; if (ipv6_addr_equal(&ifp->addr, addr) && !(ifp_flags&banned_flags) && (!dev || ndev == dev || !(ifp->scope&(IFA_LINK|IFA_HOST) || strict))) { rcu_read_unlock(); return ndev; } } rcu_read_unlock(); return NULL; } int ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr, const struct net_device *dev, bool skip_dev_check, int strict, u32 banned_flags) { return __ipv6_chk_addr_and_flags(net, addr, dev, skip_dev_check, strict, banned_flags) ? 1 : 0; } EXPORT_SYMBOL(ipv6_chk_addr_and_flags); /* Compares an address/prefix_len with addresses on device @dev. * If one is found it returns true. */ bool ipv6_chk_custom_prefix(const struct in6_addr *addr, const unsigned int prefix_len, struct net_device *dev) { const struct inet6_ifaddr *ifa; const struct inet6_dev *idev; bool ret = false; rcu_read_lock(); idev = __in6_dev_get(dev); if (idev) { list_for_each_entry_rcu(ifa, &idev->addr_list, if_list) { ret = ipv6_prefix_equal(addr, &ifa->addr, prefix_len); if (ret) break; } } rcu_read_unlock(); return ret; } EXPORT_SYMBOL(ipv6_chk_custom_prefix); int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev) { const struct inet6_ifaddr *ifa; const struct inet6_dev *idev; int onlink; onlink = 0; rcu_read_lock(); idev = __in6_dev_get(dev); if (idev) { list_for_each_entry_rcu(ifa, &idev->addr_list, if_list) { onlink = ipv6_prefix_equal(addr, &ifa->addr, ifa->prefix_len); if (onlink) break; } } rcu_read_unlock(); return onlink; } EXPORT_SYMBOL(ipv6_chk_prefix); /** * ipv6_dev_find - find the first device with a given source address. * @net: the net namespace * @addr: the source address * @dev: used to find the L3 domain of interest * * The caller should be protected by RCU, or RTNL. */ struct net_device *ipv6_dev_find(struct net *net, const struct in6_addr *addr, struct net_device *dev) { return __ipv6_chk_addr_and_flags(net, addr, dev, !dev, 1, IFA_F_TENTATIVE); } EXPORT_SYMBOL(ipv6_dev_find); struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *addr, struct net_device *dev, int strict) { unsigned int hash = inet6_addr_hash(net, addr); struct inet6_ifaddr *ifp, *result = NULL; rcu_read_lock(); hlist_for_each_entry_rcu(ifp, &net->ipv6.inet6_addr_lst[hash], addr_lst) { if (ipv6_addr_equal(&ifp->addr, addr)) { if (!dev || ifp->idev->dev == dev || !(ifp->scope&(IFA_LINK|IFA_HOST) || strict)) { if (in6_ifa_hold_safe(ifp)) { result = ifp; break; } } } } rcu_read_unlock(); return result; } /* Gets referenced address, destroys ifaddr */ static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed) { if (dad_failed) ifp->flags |= IFA_F_DADFAILED; if (ifp->flags&IFA_F_TEMPORARY) { struct inet6_ifaddr *ifpub; spin_lock_bh(&ifp->lock); ifpub = ifp->ifpub; if (ifpub) { in6_ifa_hold(ifpub); spin_unlock_bh(&ifp->lock); ipv6_create_tempaddr(ifpub, true); in6_ifa_put(ifpub); } else { spin_unlock_bh(&ifp->lock); } ipv6_del_addr(ifp); } else if (ifp->flags&IFA_F_PERMANENT || !dad_failed) { spin_lock_bh(&ifp->lock); addrconf_del_dad_work(ifp); ifp->flags |= IFA_F_TENTATIVE; if (dad_failed) ifp->flags &= ~IFA_F_OPTIMISTIC; spin_unlock_bh(&ifp->lock); if (dad_failed) ipv6_ifa_notify(0, ifp); in6_ifa_put(ifp); } else { ipv6_del_addr(ifp); } } static int addrconf_dad_end(struct inet6_ifaddr *ifp) { int err = -ENOENT; spin_lock_bh(&ifp->lock); if (ifp->state == INET6_IFADDR_STATE_DAD) { ifp->state = INET6_IFADDR_STATE_POSTDAD; err = 0; } spin_unlock_bh(&ifp->lock); return err; } void addrconf_dad_failure(struct sk_buff *skb, struct inet6_ifaddr *ifp) { struct inet6_dev *idev = ifp->idev; struct net *net = dev_net(idev->dev); int max_addresses; if (addrconf_dad_end(ifp)) { in6_ifa_put(ifp); return; } net_info_ratelimited("%s: IPv6 duplicate address %pI6c used by %pM detected!\n", ifp->idev->dev->name, &ifp->addr, eth_hdr(skb)->h_source); spin_lock_bh(&ifp->lock); if (ifp->flags & IFA_F_STABLE_PRIVACY) { struct in6_addr new_addr; struct inet6_ifaddr *ifp2; int retries = ifp->stable_privacy_retry + 1; struct ifa6_config cfg = { .pfx = &new_addr, .plen = ifp->prefix_len, .ifa_flags = ifp->flags, .valid_lft = ifp->valid_lft, .preferred_lft = ifp->prefered_lft, .scope = ifp->scope, }; if (retries > net->ipv6.sysctl.idgen_retries) { net_info_ratelimited("%s: privacy stable address generation failed because of DAD conflicts!\n", ifp->idev->dev->name); goto errdad; } new_addr = ifp->addr; if (ipv6_generate_stable_address(&new_addr, retries, idev)) goto errdad; spin_unlock_bh(&ifp->lock); max_addresses = READ_ONCE(idev->cnf.max_addresses); if (max_addresses && ipv6_count_addresses(idev) >= max_addresses) goto lock_errdad; net_info_ratelimited("%s: generating new stable privacy address because of DAD conflict\n", ifp->idev->dev->name); ifp2 = ipv6_add_addr(idev, &cfg, false, NULL); if (IS_ERR(ifp2)) goto lock_errdad; spin_lock_bh(&ifp2->lock); ifp2->stable_privacy_retry = retries; ifp2->state = INET6_IFADDR_STATE_PREDAD; spin_unlock_bh(&ifp2->lock); addrconf_mod_dad_work(ifp2, net->ipv6.sysctl.idgen_delay); in6_ifa_put(ifp2); lock_errdad: spin_lock_bh(&ifp->lock); } errdad: /* transition from _POSTDAD to _ERRDAD */ ifp->state = INET6_IFADDR_STATE_ERRDAD; spin_unlock_bh(&ifp->lock); addrconf_mod_dad_work(ifp, 0); in6_ifa_put(ifp); } /* Join to solicited addr multicast group. * caller must hold RTNL */ void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr) { struct in6_addr maddr; if (dev->flags&(IFF_LOOPBACK|IFF_NOARP)) return; addrconf_addr_solict_mult(addr, &maddr); ipv6_dev_mc_inc(dev, &maddr); } /* caller must hold RTNL */ void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr) { struct in6_addr maddr; if (idev->dev->flags&(IFF_LOOPBACK|IFF_NOARP)) return; addrconf_addr_solict_mult(addr, &maddr); __ipv6_dev_mc_dec(idev, &maddr); } /* caller must hold RTNL */ static void addrconf_join_anycast(struct inet6_ifaddr *ifp) { struct in6_addr addr; if (ifp->prefix_len >= 127) /* RFC 6164 */ return; ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len); if (ipv6_addr_any(&addr)) return; __ipv6_dev_ac_inc(ifp->idev, &addr); } /* caller must hold RTNL */ static void addrconf_leave_anycast(struct inet6_ifaddr *ifp) { struct in6_addr addr; if (ifp->prefix_len >= 127) /* RFC 6164 */ return; ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len); if (ipv6_addr_any(&addr)) return; __ipv6_dev_ac_dec(ifp->idev, &addr); } static int addrconf_ifid_6lowpan(u8 *eui, struct net_device *dev) { switch (dev->addr_len) { case ETH_ALEN: memcpy(eui, dev->dev_addr, 3); eui[3] = 0xFF; eui[4] = 0xFE; memcpy(eui + 5, dev->dev_addr + 3, 3); break; case EUI64_ADDR_LEN: memcpy(eui, dev->dev_addr, EUI64_ADDR_LEN); eui[0] ^= 2; break; default: return -1; } return 0; } static int addrconf_ifid_ieee1394(u8 *eui, struct net_device *dev) { const union fwnet_hwaddr *ha; if (dev->addr_len != FWNET_ALEN) return -1; ha = (const union fwnet_hwaddr *)dev->dev_addr; memcpy(eui, &ha->uc.uniq_id, sizeof(ha->uc.uniq_id)); eui[0] ^= 2; return 0; } static int addrconf_ifid_arcnet(u8 *eui, struct net_device *dev) { /* XXX: inherit EUI-64 from other interface -- yoshfuji */ if (dev->addr_len != ARCNET_ALEN) return -1; memset(eui, 0, 7); eui[7] = *(u8 *)dev->dev_addr; return 0; } static int addrconf_ifid_infiniband(u8 *eui, struct net_device *dev) { if (dev->addr_len != INFINIBAND_ALEN) return -1; memcpy(eui, dev->dev_addr + 12, 8); eui[0] |= 2; return 0; } static int __ipv6_isatap_ifid(u8 *eui, __be32 addr) { if (addr == 0) return -1; eui[0] = (ipv4_is_zeronet(addr) || ipv4_is_private_10(addr) || ipv4_is_loopback(addr) || ipv4_is_linklocal_169(addr) || ipv4_is_private_172(addr) || ipv4_is_test_192(addr) || ipv4_is_anycast_6to4(addr) || ipv4_is_private_192(addr) || ipv4_is_test_198(addr) || ipv4_is_multicast(addr) || ipv4_is_lbcast(addr)) ? 0x00 : 0x02; eui[1] = 0; eui[2] = 0x5E; eui[3] = 0xFE; memcpy(eui + 4, &addr, 4); return 0; } static int addrconf_ifid_sit(u8 *eui, struct net_device *dev) { if (dev->priv_flags & IFF_ISATAP) return __ipv6_isatap_ifid(eui, *(__be32 *)dev->dev_addr); return -1; } static int addrconf_ifid_gre(u8 *eui, struct net_device *dev) { return __ipv6_isatap_ifid(eui, *(__be32 *)dev->dev_addr); } static int addrconf_ifid_ip6tnl(u8 *eui, struct net_device *dev) { memcpy(eui, dev->perm_addr, 3); memcpy(eui + 5, dev->perm_addr + 3, 3); eui[3] = 0xFF; eui[4] = 0xFE; eui[0] ^= 2; return 0; } static int ipv6_generate_eui64(u8 *eui, struct net_device *dev) { switch (dev->type) { case ARPHRD_ETHER: case ARPHRD_FDDI: return addrconf_ifid_eui48(eui, dev); case ARPHRD_ARCNET: return addrconf_ifid_arcnet(eui, dev); case ARPHRD_INFINIBAND: return addrconf_ifid_infiniband(eui, dev); case ARPHRD_SIT: return addrconf_ifid_sit(eui, dev); case ARPHRD_IPGRE: case ARPHRD_TUNNEL: return addrconf_ifid_gre(eui, dev); case ARPHRD_6LOWPAN: return addrconf_ifid_6lowpan(eui, dev); case ARPHRD_IEEE1394: return addrconf_ifid_ieee1394(eui, dev); case ARPHRD_TUNNEL6: case ARPHRD_IP6GRE: case ARPHRD_RAWIP: return addrconf_ifid_ip6tnl(eui, dev); } return -1; } static int ipv6_inherit_eui64(u8 *eui, struct inet6_dev *idev) { int err = -1; struct inet6_ifaddr *ifp; read_lock_bh(&idev->lock); list_for_each_entry_reverse(ifp, &idev->addr_list, if_list) { if (ifp->scope > IFA_LINK) break; if (ifp->scope == IFA_LINK && !(ifp->flags&IFA_F_TENTATIVE)) { memcpy(eui, ifp->addr.s6_addr+8, 8); err = 0; break; } } read_unlock_bh(&idev->lock); return err; } /* Generation of a randomized Interface Identifier * draft-ietf-6man-rfc4941bis, Section 3.3.1 */ static void ipv6_gen_rnd_iid(struct in6_addr *addr) { regen: get_random_bytes(&addr->s6_addr[8], 8); /* <draft-ietf-6man-rfc4941bis-08.txt>, Section 3.3.1: * check if generated address is not inappropriate: * * - Reserved IPv6 Interface Identifiers * - XXX: already assigned to an address on the device */ /* Subnet-router anycast: 0000:0000:0000:0000 */ if (!(addr->s6_addr32[2] | addr->s6_addr32[3])) goto regen; /* IANA Ethernet block: 0200:5EFF:FE00:0000-0200:5EFF:FE00:5212 * Proxy Mobile IPv6: 0200:5EFF:FE00:5213 * IANA Ethernet block: 0200:5EFF:FE00:5214-0200:5EFF:FEFF:FFFF */ if (ntohl(addr->s6_addr32[2]) == 0x02005eff && (ntohl(addr->s6_addr32[3]) & 0Xff000000) == 0xfe000000) goto regen; /* Reserved subnet anycast addresses */ if (ntohl(addr->s6_addr32[2]) == 0xfdffffff && ntohl(addr->s6_addr32[3]) >= 0Xffffff80) goto regen; } /* * Add prefix route. */ static void addrconf_prefix_route(struct in6_addr *pfx, int plen, u32 metric, struct net_device *dev, unsigned long expires, u32 flags, gfp_t gfp_flags) { struct fib6_config cfg = { .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX, .fc_metric = metric ? : IP6_RT_PRIO_ADDRCONF, .fc_ifindex = dev->ifindex, .fc_expires = expires, .fc_dst_len = plen, .fc_flags = RTF_UP | flags, .fc_nlinfo.nl_net = dev_net(dev), .fc_protocol = RTPROT_KERNEL, .fc_type = RTN_UNICAST, }; cfg.fc_dst = *pfx; /* Prevent useless cloning on PtP SIT. This thing is done here expecting that the whole class of non-broadcast devices need not cloning. */ #if IS_ENABLED(CONFIG_IPV6_SIT) if (dev->type == ARPHRD_SIT && (dev->flags & IFF_POINTOPOINT)) cfg.fc_flags |= RTF_NONEXTHOP; #endif ip6_route_add(&cfg, gfp_flags, NULL); } static struct fib6_info *addrconf_get_prefix_route(const struct in6_addr *pfx, int plen, const struct net_device *dev, u32 flags, u32 noflags, bool no_gw) { struct fib6_node *fn; struct fib6_info *rt = NULL; struct fib6_table *table; u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX; table = fib6_get_table(dev_net(dev), tb_id); if (!table) return NULL; rcu_read_lock(); fn = fib6_locate(&table->tb6_root, pfx, plen, NULL, 0, true); if (!fn) goto out; for_each_fib6_node_rt_rcu(fn) { /* prefix routes only use builtin fib6_nh */ if (rt->nh) continue; if (rt->fib6_nh->fib_nh_dev->ifindex != dev->ifindex) continue; if (no_gw && rt->fib6_nh->fib_nh_gw_family) continue; if ((rt->fib6_flags & flags) != flags) continue; if ((rt->fib6_flags & noflags) != 0) continue; if (!fib6_info_hold_safe(rt)) continue; break; } out: rcu_read_unlock(); return rt; } /* Create "default" multicast route to the interface */ static void addrconf_add_mroute(struct net_device *dev) { struct fib6_config cfg = { .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_LOCAL, .fc_metric = IP6_RT_PRIO_ADDRCONF, .fc_ifindex = dev->ifindex, .fc_dst_len = 8, .fc_flags = RTF_UP, .fc_type = RTN_MULTICAST, .fc_nlinfo.nl_net = dev_net(dev), .fc_protocol = RTPROT_KERNEL, }; ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0); ip6_route_add(&cfg, GFP_KERNEL, NULL); } static struct inet6_dev *addrconf_add_dev(struct net_device *dev) { struct inet6_dev *idev; ASSERT_RTNL(); idev = ipv6_find_idev(dev); if (IS_ERR(idev)) return idev; if (idev->cnf.disable_ipv6) return ERR_PTR(-EACCES); /* Add default multicast route */ if (!(dev->flags & IFF_LOOPBACK) && !netif_is_l3_master(dev)) addrconf_add_mroute(dev); return idev; } static void delete_tempaddrs(struct inet6_dev *idev, struct inet6_ifaddr *ifp) { struct inet6_ifaddr *ift, *tmp; write_lock_bh(&idev->lock); list_for_each_entry_safe(ift, tmp, &idev->tempaddr_list, tmp_list) { if (ift->ifpub != ifp) continue; in6_ifa_hold(ift); write_unlock_bh(&idev->lock); ipv6_del_addr(ift); write_lock_bh(&idev->lock); } write_unlock_bh(&idev->lock); } static void manage_tempaddrs(struct inet6_dev *idev, struct inet6_ifaddr *ifp, __u32 valid_lft, __u32 prefered_lft, bool create, unsigned long now) { u32 flags; struct inet6_ifaddr *ift; read_lock_bh(&idev->lock); /* update all temporary addresses in the list */ list_for_each_entry(ift, &idev->tempaddr_list, tmp_list) { int age, max_valid, max_prefered; if (ifp != ift->ifpub) continue; /* RFC 4941 section 3.3: * If a received option will extend the lifetime of a public * address, the lifetimes of temporary addresses should * be extended, subject to the overall constraint that no * temporary addresses should ever remain "valid" or "preferred" * for a time longer than (TEMP_VALID_LIFETIME) or * (TEMP_PREFERRED_LIFETIME - DESYNC_FACTOR), respectively. */ age = (now - ift->cstamp) / HZ; max_valid = READ_ONCE(idev->cnf.temp_valid_lft) - age; if (max_valid < 0) max_valid = 0; max_prefered = READ_ONCE(idev->cnf.temp_prefered_lft) - idev->desync_factor - age; if (max_prefered < 0) max_prefered = 0; if (valid_lft > max_valid) valid_lft = max_valid; if (prefered_lft > max_prefered) prefered_lft = max_prefered; spin_lock(&ift->lock); flags = ift->flags; ift->valid_lft = valid_lft; ift->prefered_lft = prefered_lft; ift->tstamp = now; if (prefered_lft > 0) ift->flags &= ~IFA_F_DEPRECATED; spin_unlock(&ift->lock); if (!(flags&IFA_F_TENTATIVE)) ipv6_ifa_notify(0, ift); } /* Also create a temporary address if it's enabled but no temporary * address currently exists. * However, we get called with valid_lft == 0, prefered_lft == 0, create == false * as part of cleanup (ie. deleting the mngtmpaddr). * We don't want that to result in creating a new temporary ip address. */ if (list_empty(&idev->tempaddr_list) && (valid_lft || prefered_lft)) create = true; if (create && READ_ONCE(idev->cnf.use_tempaddr) > 0) { /* When a new public address is created as described * in [ADDRCONF], also create a new temporary address. */ read_unlock_bh(&idev->lock); ipv6_create_tempaddr(ifp, false); } else { read_unlock_bh(&idev->lock); } } static bool is_addr_mode_generate_stable(struct inet6_dev *idev) { return idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY || idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_RANDOM; } int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev, const struct prefix_info *pinfo, struct inet6_dev *in6_dev, const struct in6_addr *addr, int addr_type, u32 addr_flags, bool sllao, bool tokenized, __u32 valid_lft, u32 prefered_lft) { struct inet6_ifaddr *ifp = ipv6_get_ifaddr(net, addr, dev, 1); int create = 0, update_lft = 0; if (!ifp && valid_lft) { int max_addresses = READ_ONCE(in6_dev->cnf.max_addresses); struct ifa6_config cfg = { .pfx = addr, .plen = pinfo->prefix_len, .ifa_flags = addr_flags, .valid_lft = valid_lft, .preferred_lft = prefered_lft, .scope = addr_type & IPV6_ADDR_SCOPE_MASK, .ifa_proto = IFAPROT_KERNEL_RA }; #ifdef CONFIG_IPV6_OPTIMISTIC_DAD if ((READ_ONCE(net->ipv6.devconf_all->optimistic_dad) || READ_ONCE(in6_dev->cnf.optimistic_dad)) && !net->ipv6.devconf_all->forwarding && sllao) cfg.ifa_flags |= IFA_F_OPTIMISTIC; #endif /* Do not allow to create too much of autoconfigured * addresses; this would be too easy way to crash kernel. */ if (!max_addresses || ipv6_count_addresses(in6_dev) < max_addresses) ifp = ipv6_add_addr(in6_dev, &cfg, false, NULL); if (IS_ERR_OR_NULL(ifp)) return -1; create = 1; spin_lock_bh(&ifp->lock); ifp->flags |= IFA_F_MANAGETEMPADDR; ifp->cstamp = jiffies; ifp->tokenized = tokenized; spin_unlock_bh(&ifp->lock); addrconf_dad_start(ifp); } if (ifp) { u32 flags; unsigned long now; u32 stored_lft; /* update lifetime (RFC2462 5.5.3 e) */ spin_lock_bh(&ifp->lock); now = jiffies; if (ifp->valid_lft > (now - ifp->tstamp) / HZ) stored_lft = ifp->valid_lft - (now - ifp->tstamp) / HZ; else stored_lft = 0; /* RFC4862 Section 5.5.3e: * "Note that the preferred lifetime of the * corresponding address is always reset to * the Preferred Lifetime in the received * Prefix Information option, regardless of * whether the valid lifetime is also reset or * ignored." * * So we should always update prefered_lft here. */ update_lft = !create && stored_lft; if (update_lft && !READ_ONCE(in6_dev->cnf.ra_honor_pio_life)) { const u32 minimum_lft = min_t(u32, stored_lft, MIN_VALID_LIFETIME); valid_lft = max(valid_lft, minimum_lft); } if (update_lft) { ifp->valid_lft = valid_lft; ifp->prefered_lft = prefered_lft; WRITE_ONCE(ifp->tstamp, now); flags = ifp->flags; ifp->flags &= ~IFA_F_DEPRECATED; spin_unlock_bh(&ifp->lock); if (!(flags&IFA_F_TENTATIVE)) ipv6_ifa_notify(0, ifp); } else spin_unlock_bh(&ifp->lock); manage_tempaddrs(in6_dev, ifp, valid_lft, prefered_lft, create, now); in6_ifa_put(ifp); addrconf_verify(net); } return 0; } EXPORT_SYMBOL_GPL(addrconf_prefix_rcv_add_addr); void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao) { struct prefix_info *pinfo; struct fib6_table *table; __u32 valid_lft; __u32 prefered_lft; int addr_type, err; u32 addr_flags = 0; struct inet6_dev *in6_dev; struct net *net = dev_net(dev); bool ignore_autoconf = false; pinfo = (struct prefix_info *) opt; if (len < sizeof(struct prefix_info)) { netdev_dbg(dev, "addrconf: prefix option too short\n"); return; } /* * Validation checks ([ADDRCONF], page 19) */ addr_type = ipv6_addr_type(&pinfo->prefix); if (addr_type & (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL)) return; valid_lft = ntohl(pinfo->valid); prefered_lft = ntohl(pinfo->prefered); if (prefered_lft > valid_lft) { net_warn_ratelimited("addrconf: prefix option has invalid lifetime\n"); return; } in6_dev = in6_dev_get(dev); if (!in6_dev) { net_dbg_ratelimited("addrconf: device %s not configured\n", dev->name); return; } if (valid_lft != 0 && valid_lft < in6_dev->cnf.accept_ra_min_lft) goto put; /* * Two things going on here: * 1) Add routes for on-link prefixes * 2) Configure prefixes with the auto flag set */ if (pinfo->onlink) { struct fib6_info *rt; unsigned long rt_expires; /* Avoid arithmetic overflow. Really, we could * save rt_expires in seconds, likely valid_lft, * but it would require division in fib gc, that it * not good. */ if (HZ > USER_HZ) rt_expires = addrconf_timeout_fixup(valid_lft, HZ); else rt_expires = addrconf_timeout_fixup(valid_lft, USER_HZ); if (addrconf_finite_timeout(rt_expires)) rt_expires *= HZ; rt = addrconf_get_prefix_route(&pinfo->prefix, pinfo->prefix_len, dev, RTF_ADDRCONF | RTF_PREFIX_RT, RTF_DEFAULT, true); if (rt) { /* Autoconf prefix route */ if (valid_lft == 0) { ip6_del_rt(net, rt, false); rt = NULL; } else { table = rt->fib6_table; spin_lock_bh(&table->tb6_lock); if (addrconf_finite_timeout(rt_expires)) { /* not infinity */ fib6_set_expires(rt, jiffies + rt_expires); fib6_add_gc_list(rt); } else { fib6_clean_expires(rt); fib6_remove_gc_list(rt); } spin_unlock_bh(&table->tb6_lock); } } else if (valid_lft) { clock_t expires = 0; int flags = RTF_ADDRCONF | RTF_PREFIX_RT; if (addrconf_finite_timeout(rt_expires)) { /* not infinity */ flags |= RTF_EXPIRES; expires = jiffies_to_clock_t(rt_expires); } addrconf_prefix_route(&pinfo->prefix, pinfo->prefix_len, 0, dev, expires, flags, GFP_ATOMIC); } fib6_info_release(rt); } /* Try to figure out our local address for this prefix */ ignore_autoconf = READ_ONCE(in6_dev->cnf.ra_honor_pio_pflag) && pinfo->preferpd; if (pinfo->autoconf && in6_dev->cnf.autoconf && !ignore_autoconf) { struct in6_addr addr; bool tokenized = false, dev_addr_generated = false; if (pinfo->prefix_len == 64) { memcpy(&addr, &pinfo->prefix, 8); if (!ipv6_addr_any(&in6_dev->token)) { read_lock_bh(&in6_dev->lock); memcpy(addr.s6_addr + 8, in6_dev->token.s6_addr + 8, 8); read_unlock_bh(&in6_dev->lock); tokenized = true; } else if (is_addr_mode_generate_stable(in6_dev) && !ipv6_generate_stable_address(&addr, 0, in6_dev)) { addr_flags |= IFA_F_STABLE_PRIVACY; goto ok; } else if (ipv6_generate_eui64(addr.s6_addr + 8, dev) && ipv6_inherit_eui64(addr.s6_addr + 8, in6_dev)) { goto put; } else { dev_addr_generated = true; } goto ok; } net_dbg_ratelimited("IPv6 addrconf: prefix with wrong length %d\n", pinfo->prefix_len); goto put; ok: err = addrconf_prefix_rcv_add_addr(net, dev, pinfo, in6_dev, &addr, addr_type, addr_flags, sllao, tokenized, valid_lft, prefered_lft); if (err) goto put; /* Ignore error case here because previous prefix add addr was * successful which will be notified. */ ndisc_ops_prefix_rcv_add_addr(net, dev, pinfo, in6_dev, &addr, addr_type, addr_flags, sllao, tokenized, valid_lft, prefered_lft, dev_addr_generated); } inet6_prefix_notify(RTM_NEWPREFIX, in6_dev, pinfo); put: in6_dev_put(in6_dev); } static int addrconf_set_sit_dstaddr(struct net *net, struct net_device *dev, struct in6_ifreq *ireq) { struct ip_tunnel_parm_kern p = { }; int err; if (!(ipv6_addr_type(&ireq->ifr6_addr) & IPV6_ADDR_COMPATv4)) return -EADDRNOTAVAIL; p.iph.daddr = ireq->ifr6_addr.s6_addr32[3]; p.iph.version = 4; p.iph.ihl = 5; p.iph.protocol = IPPROTO_IPV6; p.iph.ttl = 64; if (!dev->netdev_ops->ndo_tunnel_ctl) return -EOPNOTSUPP; err = dev->netdev_ops->ndo_tunnel_ctl(dev, &p, SIOCADDTUNNEL); if (err) return err; dev = __dev_get_by_name(net, p.name); if (!dev) return -ENOBUFS; return dev_open(dev, NULL); } /* * Set destination address. * Special case for SIT interfaces where we create a new "virtual" * device. */ int addrconf_set_dstaddr(struct net *net, void __user *arg) { struct net_device *dev; struct in6_ifreq ireq; int err = -ENODEV; if (!IS_ENABLED(CONFIG_IPV6_SIT)) return -ENODEV; if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq))) return -EFAULT; rtnl_net_lock(net); dev = __dev_get_by_index(net, ireq.ifr6_ifindex); if (dev && dev->type == ARPHRD_SIT) err = addrconf_set_sit_dstaddr(net, dev, &ireq); rtnl_net_unlock(net); return err; } static int ipv6_mc_config(struct sock *sk, bool join, const struct in6_addr *addr, int ifindex) { int ret; ASSERT_RTNL(); lock_sock(sk); if (join) ret = ipv6_sock_mc_join(sk, ifindex, addr); else ret = ipv6_sock_mc_drop(sk, ifindex, addr); release_sock(sk); return ret; } /* * Manual configuration of address on an interface */ static int inet6_addr_add(struct net *net, struct net_device *dev, struct ifa6_config *cfg, clock_t expires, u32 flags, struct netlink_ext_ack *extack) { struct inet6_ifaddr *ifp; struct inet6_dev *idev; ASSERT_RTNL_NET(net); if (cfg->plen > 128) { NL_SET_ERR_MSG_MOD(extack, "Invalid prefix length"); return -EINVAL; } if (cfg->ifa_flags & IFA_F_MANAGETEMPADDR && cfg->plen != 64) { NL_SET_ERR_MSG_MOD(extack, "address with \"mngtmpaddr\" flag must have a prefix length of 64"); return -EINVAL; } idev = addrconf_add_dev(dev); if (IS_ERR(idev)) { NL_SET_ERR_MSG_MOD(extack, "IPv6 is disabled on this device"); return PTR_ERR(idev); } if (cfg->ifa_flags & IFA_F_MCAUTOJOIN) { int ret = ipv6_mc_config(net->ipv6.mc_autojoin_sk, true, cfg->pfx, dev->ifindex); if (ret < 0) { NL_SET_ERR_MSG_MOD(extack, "Multicast auto join failed"); return ret; } } cfg->scope = ipv6_addr_scope(cfg->pfx); ifp = ipv6_add_addr(idev, cfg, true, extack); if (!IS_ERR(ifp)) { if (!(cfg->ifa_flags & IFA_F_NOPREFIXROUTE)) { addrconf_prefix_route(&ifp->addr, ifp->prefix_len, ifp->rt_priority, dev, expires, flags, GFP_KERNEL); } /* Send a netlink notification if DAD is enabled and * optimistic flag is not set */ if (!(ifp->flags & (IFA_F_OPTIMISTIC | IFA_F_NODAD))) ipv6_ifa_notify(0, ifp); /* * Note that section 3.1 of RFC 4429 indicates * that the Optimistic flag should not be set for * manually configured addresses */ addrconf_dad_start(ifp); if (cfg->ifa_flags & IFA_F_MANAGETEMPADDR) manage_tempaddrs(idev, ifp, cfg->valid_lft, cfg->preferred_lft, true, jiffies); in6_ifa_put(ifp); addrconf_verify_rtnl(net); return 0; } else if (cfg->ifa_flags & IFA_F_MCAUTOJOIN) { ipv6_mc_config(net->ipv6.mc_autojoin_sk, false, cfg->pfx, dev->ifindex); } return PTR_ERR(ifp); } static int inet6_addr_del(struct net *net, int ifindex, u32 ifa_flags, const struct in6_addr *pfx, unsigned int plen, struct netlink_ext_ack *extack) { struct inet6_ifaddr *ifp; struct inet6_dev *idev; struct net_device *dev; if (plen > 128) { NL_SET_ERR_MSG_MOD(extack, "Invalid prefix length"); return -EINVAL; } dev = __dev_get_by_index(net, ifindex); if (!dev) { NL_SET_ERR_MSG_MOD(extack, "Unable to find the interface"); return -ENODEV; } idev = __in6_dev_get_rtnl_net(dev); if (!idev) { NL_SET_ERR_MSG_MOD(extack, "IPv6 is disabled on this device"); return -ENXIO; } read_lock_bh(&idev->lock); list_for_each_entry(ifp, &idev->addr_list, if_list) { if (ifp->prefix_len == plen && ipv6_addr_equal(pfx, &ifp->addr)) { in6_ifa_hold(ifp); read_unlock_bh(&idev->lock); ipv6_del_addr(ifp); if (!(ifp->flags & IFA_F_TEMPORARY) && (ifp->flags & IFA_F_MANAGETEMPADDR)) delete_tempaddrs(idev, ifp); addrconf_verify_rtnl(net); if (ipv6_addr_is_multicast(pfx)) { ipv6_mc_config(net->ipv6.mc_autojoin_sk, false, pfx, dev->ifindex); } return 0; } } read_unlock_bh(&idev->lock); NL_SET_ERR_MSG_MOD(extack, "address not found"); return -EADDRNOTAVAIL; } int addrconf_add_ifaddr(struct net *net, void __user *arg) { struct ifa6_config cfg = { .ifa_flags = IFA_F_PERMANENT, .preferred_lft = INFINITY_LIFE_TIME, .valid_lft = INFINITY_LIFE_TIME, }; struct net_device *dev; struct in6_ifreq ireq; int err; if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM; if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq))) return -EFAULT; cfg.pfx = &ireq.ifr6_addr; cfg.plen = ireq.ifr6_prefixlen; rtnl_net_lock(net); dev = __dev_get_by_index(net, ireq.ifr6_ifindex); if (dev) err = inet6_addr_add(net, dev, &cfg, 0, 0, NULL); else err = -ENODEV; rtnl_net_unlock(net); return err; } int addrconf_del_ifaddr(struct net *net, void __user *arg) { struct in6_ifreq ireq; int err; if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM; if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq))) return -EFAULT; rtnl_net_lock(net); err = inet6_addr_del(net, ireq.ifr6_ifindex, 0, &ireq.ifr6_addr, ireq.ifr6_prefixlen, NULL); rtnl_net_unlock(net); return err; } static void add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int plen, int scope, u8 proto) { struct inet6_ifaddr *ifp; struct ifa6_config cfg = { .pfx = addr, .plen = plen, .ifa_flags = IFA_F_PERMANENT, .valid_lft = INFINITY_LIFE_TIME, .preferred_lft = INFINITY_LIFE_TIME, .scope = scope, .ifa_proto = proto }; ifp = ipv6_add_addr(idev, &cfg, true, NULL); if (!IS_ERR(ifp)) { spin_lock_bh(&ifp->lock); ifp->flags &= ~IFA_F_TENTATIVE; spin_unlock_bh(&ifp->lock); rt_genid_bump_ipv6(dev_net(idev->dev)); ipv6_ifa_notify(RTM_NEWADDR, ifp); in6_ifa_put(ifp); } } #if IS_ENABLED(CONFIG_IPV6_SIT) || IS_ENABLED(CONFIG_NET_IPGRE) || IS_ENABLED(CONFIG_IPV6_GRE) static void add_v4_addrs(struct inet6_dev *idev) { struct in6_addr addr; struct net_device *dev; struct net *net = dev_net(idev->dev); int scope, plen, offset = 0; u32 pflags = 0; ASSERT_RTNL(); memset(&addr, 0, sizeof(struct in6_addr)); /* in case of IP6GRE the dev_addr is an IPv6 and therefore we use only the last 4 bytes */ if (idev->dev->addr_len == sizeof(struct in6_addr)) offset = sizeof(struct in6_addr) - 4; memcpy(&addr.s6_addr32[3], idev->dev->dev_addr + offset, 4); if (!(idev->dev->flags & IFF_POINTOPOINT) && idev->dev->type == ARPHRD_SIT) { scope = IPV6_ADDR_COMPATv4; plen = 96; pflags |= RTF_NONEXTHOP; } else { if (idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_NONE) return; addr.s6_addr32[0] = htonl(0xfe800000); scope = IFA_LINK; plen = 64; } if (addr.s6_addr32[3]) { add_addr(idev, &addr, plen, scope, IFAPROT_UNSPEC); addrconf_prefix_route(&addr, plen, 0, idev->dev, 0, pflags, GFP_KERNEL); return; } for_each_netdev(net, dev) { struct in_device *in_dev = __in_dev_get_rtnl(dev); if (in_dev && (dev->flags & IFF_UP)) { struct in_ifaddr *ifa; int flag = scope; in_dev_for_each_ifa_rtnl(ifa, in_dev) { addr.s6_addr32[3] = ifa->ifa_local; if (ifa->ifa_scope == RT_SCOPE_LINK) continue; if (ifa->ifa_scope >= RT_SCOPE_HOST) { if (idev->dev->flags&IFF_POINTOPOINT) continue; flag |= IFA_HOST; } add_addr(idev, &addr, plen, flag, IFAPROT_UNSPEC); addrconf_prefix_route(&addr, plen, 0, idev->dev, 0, pflags, GFP_KERNEL); } } } } #endif static void init_loopback(struct net_device *dev) { struct inet6_dev *idev; /* ::1 */ ASSERT_RTNL(); idev = ipv6_find_idev(dev); if (IS_ERR(idev)) { pr_debug("%s: add_dev failed\n", __func__); return; } add_addr(idev, &in6addr_loopback, 128, IFA_HOST, IFAPROT_KERNEL_LO); } void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr *addr, u32 flags) { struct ifa6_config cfg = { .pfx = addr, .plen = 64, .ifa_flags = flags | IFA_F_PERMANENT, .valid_lft = INFINITY_LIFE_TIME, .preferred_lft = INFINITY_LIFE_TIME, .scope = IFA_LINK, .ifa_proto = IFAPROT_KERNEL_LL }; struct inet6_ifaddr *ifp; #ifdef CONFIG_IPV6_OPTIMISTIC_DAD if ((READ_ONCE(dev_net(idev->dev)->ipv6.devconf_all->optimistic_dad) || READ_ONCE(idev->cnf.optimistic_dad)) && !dev_net(idev->dev)->ipv6.devconf_all->forwarding) cfg.ifa_flags |= IFA_F_OPTIMISTIC; #endif ifp = ipv6_add_addr(idev, &cfg, true, NULL); if (!IS_ERR(ifp)) { addrconf_prefix_route(&ifp->addr, ifp->prefix_len, 0, idev->dev, 0, 0, GFP_ATOMIC); addrconf_dad_start(ifp); in6_ifa_put(ifp); } } EXPORT_SYMBOL_GPL(addrconf_add_linklocal); static bool ipv6_reserved_interfaceid(struct in6_addr address) { if ((address.s6_addr32[2] | address.s6_addr32[3]) == 0) return true; if (address.s6_addr32[2] == htonl(0x02005eff) && ((address.s6_addr32[3] & htonl(0xfe000000)) == htonl(0xfe000000))) return true; if (address.s6_addr32[2] == htonl(0xfdffffff) && ((address.s6_addr32[3] & htonl(0xffffff80)) == htonl(0xffffff80))) return true; return false; } static int ipv6_generate_stable_address(struct in6_addr *address, u8 dad_count, const struct inet6_dev *idev) { static DEFINE_SPINLOCK(lock); static __u32 digest[SHA1_DIGEST_WORDS]; static __u32 workspace[SHA1_WORKSPACE_WORDS]; static union { char __data[SHA1_BLOCK_SIZE]; struct { struct in6_addr secret; __be32 prefix[2]; unsigned char hwaddr[MAX_ADDR_LEN]; u8 dad_count; } __packed; } data; struct in6_addr secret; struct in6_addr temp; struct net *net = dev_net(idev->dev); BUILD_BUG_ON(sizeof(data.__data) != sizeof(data)); if (idev->cnf.stable_secret.initialized) secret = idev->cnf.stable_secret.secret; else if (net->ipv6.devconf_dflt->stable_secret.initialized) secret = net->ipv6.devconf_dflt->stable_secret.secret; else return -1; retry: spin_lock_bh(&lock); sha1_init(digest); memset(&data, 0, sizeof(data)); memset(workspace, 0, sizeof(workspace)); memcpy(data.hwaddr, idev->dev->perm_addr, idev->dev->addr_len); data.prefix[0] = address->s6_addr32[0]; data.prefix[1] = address->s6_addr32[1]; data.secret = secret; data.dad_count = dad_count; sha1_transform(digest, data.__data, workspace); temp = *address; temp.s6_addr32[2] = (__force __be32)digest[0]; temp.s6_addr32[3] = (__force __be32)digest[1]; spin_unlock_bh(&lock); if (ipv6_reserved_interfaceid(temp)) { dad_count++; if (dad_count > dev_net(idev->dev)->ipv6.sysctl.idgen_retries) return -1; goto retry; } *address = temp; return 0; } static void ipv6_gen_mode_random_init(struct inet6_dev *idev) { struct ipv6_stable_secret *s = &idev->cnf.stable_secret; if (s->initialized) return; s = &idev->cnf.stable_secret; get_random_bytes(&s->secret, sizeof(s->secret)); s->initialized = true; } static void addrconf_addr_gen(struct inet6_dev *idev, bool prefix_route) { struct in6_addr addr; /* no link local addresses on L3 master devices */ if (netif_is_l3_master(idev->dev)) return; /* no link local addresses on devices flagged as slaves */ if (idev->dev->priv_flags & IFF_NO_ADDRCONF) return; ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0); switch (idev->cnf.addr_gen_mode) { case IN6_ADDR_GEN_MODE_RANDOM: ipv6_gen_mode_random_init(idev); fallthrough; case IN6_ADDR_GEN_MODE_STABLE_PRIVACY: if (!ipv6_generate_stable_address(&addr, 0, idev)) addrconf_add_linklocal(idev, &addr, IFA_F_STABLE_PRIVACY); else if (prefix_route) addrconf_prefix_route(&addr, 64, 0, idev->dev, 0, 0, GFP_KERNEL); break; case IN6_ADDR_GEN_MODE_EUI64: /* addrconf_add_linklocal also adds a prefix_route and we * only need to care about prefix routes if ipv6_generate_eui64 * couldn't generate one. */ if (ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) == 0) addrconf_add_linklocal(idev, &addr, 0); else if (prefix_route) addrconf_prefix_route(&addr, 64, 0, idev->dev, 0, 0, GFP_KERNEL); break; case IN6_ADDR_GEN_MODE_NONE: default: /* will not add any link local address */ break; } } static void addrconf_dev_config(struct net_device *dev) { struct inet6_dev *idev; ASSERT_RTNL(); if ((dev->type != ARPHRD_ETHER) && (dev->type != ARPHRD_FDDI) && (dev->type != ARPHRD_ARCNET) && (dev->type != ARPHRD_INFINIBAND) && (dev->type != ARPHRD_IEEE1394) && (dev->type != ARPHRD_TUNNEL6) && (dev->type != ARPHRD_6LOWPAN) && (dev->type != ARPHRD_TUNNEL) && (dev->type != ARPHRD_NONE) && (dev->type != ARPHRD_RAWIP)) { /* Alas, we support only Ethernet autoconfiguration. */ idev = __in6_dev_get(dev); if (!IS_ERR_OR_NULL(idev) && dev->flags & IFF_UP && dev->flags & IFF_MULTICAST) ipv6_mc_up(idev); return; } idev = addrconf_add_dev(dev); if (IS_ERR(idev)) return; /* this device type has no EUI support */ if (dev->type == ARPHRD_NONE && idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64) WRITE_ONCE(idev->cnf.addr_gen_mode, IN6_ADDR_GEN_MODE_RANDOM); addrconf_addr_gen(idev, false); } #if IS_ENABLED(CONFIG_IPV6_SIT) static void addrconf_sit_config(struct net_device *dev) { struct inet6_dev *idev; ASSERT_RTNL(); /* * Configure the tunnel with one of our IPv4 * addresses... we should configure all of * our v4 addrs in the tunnel */ idev = ipv6_find_idev(dev); if (IS_ERR(idev)) { pr_debug("%s: add_dev failed\n", __func__); return; } if (dev->priv_flags & IFF_ISATAP) { addrconf_addr_gen(idev, false); return; } add_v4_addrs(idev); if (dev->flags&IFF_POINTOPOINT) addrconf_add_mroute(dev); } #endif #if IS_ENABLED(CONFIG_NET_IPGRE) || IS_ENABLED(CONFIG_IPV6_GRE) static void addrconf_gre_config(struct net_device *dev) { struct inet6_dev *idev; ASSERT_RTNL(); idev = ipv6_find_idev(dev); if (IS_ERR(idev)) { pr_debug("%s: add_dev failed\n", __func__); return; } if (dev->type == ARPHRD_ETHER) { addrconf_addr_gen(idev, true); return; } add_v4_addrs(idev); if (dev->flags & IFF_POINTOPOINT) addrconf_add_mroute(dev); } #endif static void addrconf_init_auto_addrs(struct net_device *dev) { switch (dev->type) { #if IS_ENABLED(CONFIG_IPV6_SIT) case ARPHRD_SIT: addrconf_sit_config(dev); break; #endif #if IS_ENABLED(CONFIG_NET_IPGRE) || IS_ENABLED(CONFIG_IPV6_GRE) case ARPHRD_IP6GRE: case ARPHRD_IPGRE: addrconf_gre_config(dev); break; #endif case ARPHRD_LOOPBACK: init_loopback(dev); break; default: addrconf_dev_config(dev); break; } } static int fixup_permanent_addr(struct net *net, struct inet6_dev *idev, struct inet6_ifaddr *ifp) { /* !fib6_node means the host route was removed from the * FIB, for example, if 'lo' device is taken down. In that * case regenerate the host route. */ if (!ifp->rt || !ifp->rt->fib6_node) { struct fib6_info *f6i, *prev; f6i = addrconf_f6i_alloc(net, idev, &ifp->addr, false, GFP_ATOMIC, NULL); if (IS_ERR(f6i)) return PTR_ERR(f6i); /* ifp->rt can be accessed outside of rtnl */ spin_lock(&ifp->lock); prev = ifp->rt; ifp->rt = f6i; spin_unlock(&ifp->lock); fib6_info_release(prev); } if (!(ifp->flags & IFA_F_NOPREFIXROUTE)) { addrconf_prefix_route(&ifp->addr, ifp->prefix_len, ifp->rt_priority, idev->dev, 0, 0, GFP_ATOMIC); } if (ifp->state == INET6_IFADDR_STATE_PREDAD) addrconf_dad_start(ifp); return 0; } static void addrconf_permanent_addr(struct net *net, struct net_device *dev) { struct inet6_ifaddr *ifp, *tmp; struct inet6_dev *idev; idev = __in6_dev_get(dev); if (!idev) return; write_lock_bh(&idev->lock); list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) { if ((ifp->flags & IFA_F_PERMANENT) && fixup_permanent_addr(net, idev, ifp) < 0) { write_unlock_bh(&idev->lock); in6_ifa_hold(ifp); ipv6_del_addr(ifp); write_lock_bh(&idev->lock); net_info_ratelimited("%s: Failed to add prefix route for address %pI6c; dropping\n", idev->dev->name, &ifp->addr); } } write_unlock_bh(&idev->lock); } static int addrconf_notify(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct netdev_notifier_change_info *change_info; struct netdev_notifier_changeupper_info *info; struct inet6_dev *idev = __in6_dev_get(dev); struct net *net = dev_net(dev); int run_pending = 0; int err; switch (event) { case NETDEV_REGISTER: if (!idev && dev->mtu >= IPV6_MIN_MTU) { idev = ipv6_add_dev(dev); if (IS_ERR(idev)) return notifier_from_errno(PTR_ERR(idev)); } break; case NETDEV_CHANGEMTU: /* if MTU under IPV6_MIN_MTU stop IPv6 on this interface. */ if (dev->mtu < IPV6_MIN_MTU) { addrconf_ifdown(dev, dev != net->loopback_dev); break; } if (idev) { rt6_mtu_change(dev, dev->mtu); WRITE_ONCE(idev->cnf.mtu6, dev->mtu); break; } /* allocate new idev */ idev = ipv6_add_dev(dev); if (IS_ERR(idev)) break; /* device is still not ready */ if (!(idev->if_flags & IF_READY)) break; run_pending = 1; fallthrough; case NETDEV_UP: case NETDEV_CHANGE: if (idev && idev->cnf.disable_ipv6) break; if (dev->priv_flags & IFF_NO_ADDRCONF) { if (event == NETDEV_UP && !IS_ERR_OR_NULL(idev) && dev->flags & IFF_UP && dev->flags & IFF_MULTICAST) ipv6_mc_up(idev); break; } if (event == NETDEV_UP) { /* restore routes for permanent addresses */ addrconf_permanent_addr(net, dev); if (!addrconf_link_ready(dev)) { /* device is not ready yet. */ pr_debug("ADDRCONF(NETDEV_UP): %s: link is not ready\n", dev->name); break; } if (!idev && dev->mtu >= IPV6_MIN_MTU) idev = ipv6_add_dev(dev); if (!IS_ERR_OR_NULL(idev)) { idev->if_flags |= IF_READY; run_pending = 1; } } else if (event == NETDEV_CHANGE) { if (!addrconf_link_ready(dev)) { /* device is still not ready. */ rt6_sync_down_dev(dev, event); break; } if (!IS_ERR_OR_NULL(idev)) { if (idev->if_flags & IF_READY) { /* device is already configured - * but resend MLD reports, we might * have roamed and need to update * multicast snooping switches */ ipv6_mc_up(idev); change_info = ptr; if (change_info->flags_changed & IFF_NOARP) addrconf_dad_run(idev, true); rt6_sync_up(dev, RTNH_F_LINKDOWN); break; } idev->if_flags |= IF_READY; } pr_debug("ADDRCONF(NETDEV_CHANGE): %s: link becomes ready\n", dev->name); run_pending = 1; } addrconf_init_auto_addrs(dev); if (!IS_ERR_OR_NULL(idev)) { if (run_pending) addrconf_dad_run(idev, false); /* Device has an address by now */ rt6_sync_up(dev, RTNH_F_DEAD); /* * If the MTU changed during the interface down, * when the interface up, the changed MTU must be * reflected in the idev as well as routers. */ if (idev->cnf.mtu6 != dev->mtu && dev->mtu >= IPV6_MIN_MTU) { rt6_mtu_change(dev, dev->mtu); WRITE_ONCE(idev->cnf.mtu6, dev->mtu); } WRITE_ONCE(idev->tstamp, jiffies); inet6_ifinfo_notify(RTM_NEWLINK, idev); /* * If the changed mtu during down is lower than * IPV6_MIN_MTU stop IPv6 on this interface. */ if (dev->mtu < IPV6_MIN_MTU) addrconf_ifdown(dev, dev != net->loopback_dev); } break; case NETDEV_DOWN: case NETDEV_UNREGISTER: /* * Remove all addresses from this interface. */ addrconf_ifdown(dev, event != NETDEV_DOWN); break; case NETDEV_CHANGENAME: if (idev) { snmp6_unregister_dev(idev); addrconf_sysctl_unregister(idev); err = addrconf_sysctl_register(idev); if (err) return notifier_from_errno(err); err = snmp6_register_dev(idev); if (err) { addrconf_sysctl_unregister(idev); return notifier_from_errno(err); } } break; case NETDEV_PRE_TYPE_CHANGE: case NETDEV_POST_TYPE_CHANGE: if (idev) addrconf_type_change(dev, event); break; case NETDEV_CHANGEUPPER: info = ptr; /* flush all routes if dev is linked to or unlinked from * an L3 master device (e.g., VRF) */ if (info->upper_dev && netif_is_l3_master(info->upper_dev)) addrconf_ifdown(dev, false); } return NOTIFY_OK; } /* * addrconf module should be notified of a device going up */ static struct notifier_block ipv6_dev_notf = { .notifier_call = addrconf_notify, .priority = ADDRCONF_NOTIFY_PRIORITY, }; static void addrconf_type_change(struct net_device *dev, unsigned long event) { struct inet6_dev *idev; ASSERT_RTNL(); idev = __in6_dev_get(dev); if (event == NETDEV_POST_TYPE_CHANGE) ipv6_mc_remap(idev); else if (event == NETDEV_PRE_TYPE_CHANGE) ipv6_mc_unmap(idev); } static bool addr_is_local(const struct in6_addr *addr) { return ipv6_addr_type(addr) & (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK); } static int addrconf_ifdown(struct net_device *dev, bool unregister) { unsigned long event = unregister ? NETDEV_UNREGISTER : NETDEV_DOWN; struct net *net = dev_net(dev); struct inet6_dev *idev; struct inet6_ifaddr *ifa; LIST_HEAD(tmp_addr_list); bool keep_addr = false; bool was_ready; int state, i; ASSERT_RTNL(); rt6_disable_ip(dev, event); idev = __in6_dev_get(dev); if (!idev) return -ENODEV; /* * Step 1: remove reference to ipv6 device from parent device. * Do not dev_put! */ if (unregister) { idev->dead = 1; /* protected by rtnl_lock */ RCU_INIT_POINTER(dev->ip6_ptr, NULL); /* Step 1.5: remove snmp6 entry */ snmp6_unregister_dev(idev); } /* combine the user config with event to determine if permanent * addresses are to be removed from address hash table */ if (!unregister && !idev->cnf.disable_ipv6) { /* aggregate the system setting and interface setting */ int _keep_addr = READ_ONCE(net->ipv6.devconf_all->keep_addr_on_down); if (!_keep_addr) _keep_addr = READ_ONCE(idev->cnf.keep_addr_on_down); keep_addr = (_keep_addr > 0); } /* Step 2: clear hash table */ for (i = 0; i < IN6_ADDR_HSIZE; i++) { struct hlist_head *h = &net->ipv6.inet6_addr_lst[i]; spin_lock_bh(&net->ipv6.addrconf_hash_lock); restart: hlist_for_each_entry_rcu(ifa, h, addr_lst) { if (ifa->idev == idev) { addrconf_del_dad_work(ifa); /* combined flag + permanent flag decide if * address is retained on a down event */ if (!keep_addr || !(ifa->flags & IFA_F_PERMANENT) || addr_is_local(&ifa->addr)) { hlist_del_init_rcu(&ifa->addr_lst); goto restart; } } } spin_unlock_bh(&net->ipv6.addrconf_hash_lock); } write_lock_bh(&idev->lock); addrconf_del_rs_timer(idev); /* Step 2: clear flags for stateless addrconf, repeated down * detection */ was_ready = idev->if_flags & IF_READY; if (!unregister) idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY); /* Step 3: clear tempaddr list */ while (!list_empty(&idev->tempaddr_list)) { ifa = list_first_entry(&idev->tempaddr_list, struct inet6_ifaddr, tmp_list); list_del(&ifa->tmp_list); write_unlock_bh(&idev->lock); spin_lock_bh(&ifa->lock); if (ifa->ifpub) { in6_ifa_put(ifa->ifpub); ifa->ifpub = NULL; } spin_unlock_bh(&ifa->lock); in6_ifa_put(ifa); write_lock_bh(&idev->lock); } list_for_each_entry(ifa, &idev->addr_list, if_list) list_add_tail(&ifa->if_list_aux, &tmp_addr_list); write_unlock_bh(&idev->lock); while (!list_empty(&tmp_addr_list)) { struct fib6_info *rt = NULL; bool keep; ifa = list_first_entry(&tmp_addr_list, struct inet6_ifaddr, if_list_aux); list_del(&ifa->if_list_aux); addrconf_del_dad_work(ifa); keep = keep_addr && (ifa->flags & IFA_F_PERMANENT) && !addr_is_local(&ifa->addr); spin_lock_bh(&ifa->lock); if (keep) { /* set state to skip the notifier below */ state = INET6_IFADDR_STATE_DEAD; ifa->state = INET6_IFADDR_STATE_PREDAD; if (!(ifa->flags & IFA_F_NODAD)) ifa->flags |= IFA_F_TENTATIVE; rt = ifa->rt; ifa->rt = NULL; } else { state = ifa->state; ifa->state = INET6_IFADDR_STATE_DEAD; } spin_unlock_bh(&ifa->lock); if (rt) ip6_del_rt(net, rt, false); if (state != INET6_IFADDR_STATE_DEAD) { __ipv6_ifa_notify(RTM_DELADDR, ifa); inet6addr_notifier_call_chain(NETDEV_DOWN, ifa); } else { if (idev->cnf.forwarding) addrconf_leave_anycast(ifa); addrconf_leave_solict(ifa->idev, &ifa->addr); } if (!keep) { write_lock_bh(&idev->lock); list_del_rcu(&ifa->if_list); write_unlock_bh(&idev->lock); in6_ifa_put(ifa); } } /* Step 5: Discard anycast and multicast list */ if (unregister) { ipv6_ac_destroy_dev(idev); ipv6_mc_destroy_dev(idev); } else if (was_ready) { ipv6_mc_down(idev); } WRITE_ONCE(idev->tstamp, jiffies); idev->ra_mtu = 0; /* Last: Shot the device (if unregistered) */ if (unregister) { addrconf_sysctl_unregister(idev); neigh_parms_release(&nd_tbl, idev->nd_parms); neigh_ifdown(&nd_tbl, dev); in6_dev_put(idev); } return 0; } static void addrconf_rs_timer(struct timer_list *t) { struct inet6_dev *idev = from_timer(idev, t, rs_timer); struct net_device *dev = idev->dev; struct in6_addr lladdr; int rtr_solicits; write_lock(&idev->lock); if (idev->dead || !(idev->if_flags & IF_READY)) goto out; if (!ipv6_accept_ra(idev)) goto out; /* Announcement received after solicitation was sent */ if (idev->if_flags & IF_RA_RCVD) goto out; rtr_solicits = READ_ONCE(idev->cnf.rtr_solicits); if (idev->rs_probes++ < rtr_solicits || rtr_solicits < 0) { write_unlock(&idev->lock); if (!ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE)) ndisc_send_rs(dev, &lladdr, &in6addr_linklocal_allrouters); else goto put; write_lock(&idev->lock); idev->rs_interval = rfc3315_s14_backoff_update( idev->rs_interval, READ_ONCE(idev->cnf.rtr_solicit_max_interval)); /* The wait after the last probe can be shorter */ addrconf_mod_rs_timer(idev, (idev->rs_probes == READ_ONCE(idev->cnf.rtr_solicits)) ? READ_ONCE(idev->cnf.rtr_solicit_delay) : idev->rs_interval); } else { /* * Note: we do not support deprecated "all on-link" * assumption any longer. */ pr_debug("%s: no IPv6 routers present\n", idev->dev->name); } out: write_unlock(&idev->lock); put: in6_dev_put(idev); } /* * Duplicate Address Detection */ static void addrconf_dad_kick(struct inet6_ifaddr *ifp) { struct inet6_dev *idev = ifp->idev; unsigned long rand_num; u64 nonce; if (ifp->flags & IFA_F_OPTIMISTIC) rand_num = 0; else rand_num = get_random_u32_below( READ_ONCE(idev->cnf.rtr_solicit_delay) ? : 1); nonce = 0; if (READ_ONCE(idev->cnf.enhanced_dad) || READ_ONCE(dev_net(idev->dev)->ipv6.devconf_all->enhanced_dad)) { do get_random_bytes(&nonce, 6); while (nonce == 0); } ifp->dad_nonce = nonce; ifp->dad_probes = READ_ONCE(idev->cnf.dad_transmits); addrconf_mod_dad_work(ifp, rand_num); } static void addrconf_dad_begin(struct inet6_ifaddr *ifp) { struct inet6_dev *idev = ifp->idev; struct net_device *dev = idev->dev; bool bump_id, notify = false; struct net *net; addrconf_join_solict(dev, &ifp->addr); read_lock_bh(&idev->lock); spin_lock(&ifp->lock); if (ifp->state == INET6_IFADDR_STATE_DEAD) goto out; net = dev_net(dev); if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) || (READ_ONCE(net->ipv6.devconf_all->accept_dad) < 1 && READ_ONCE(idev->cnf.accept_dad) < 1) || !(ifp->flags&IFA_F_TENTATIVE) || ifp->flags & IFA_F_NODAD) { bool send_na = false; if (ifp->flags & IFA_F_TENTATIVE && !(ifp->flags & IFA_F_OPTIMISTIC)) send_na = true; bump_id = ifp->flags & IFA_F_TENTATIVE; ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED); spin_unlock(&ifp->lock); read_unlock_bh(&idev->lock); addrconf_dad_completed(ifp, bump_id, send_na); return; } if (!(idev->if_flags & IF_READY)) { spin_unlock(&ifp->lock); read_unlock_bh(&idev->lock); /* * If the device is not ready: * - keep it tentative if it is a permanent address. * - otherwise, kill it. */ in6_ifa_hold(ifp); addrconf_dad_stop(ifp, 0); return; } /* * Optimistic nodes can start receiving * Frames right away */ if (ifp->flags & IFA_F_OPTIMISTIC) { ip6_ins_rt(net, ifp->rt); if (ipv6_use_optimistic_addr(net, idev)) { /* Because optimistic nodes can use this address, * notify listeners. If DAD fails, RTM_DELADDR is sent. */ notify = true; } } addrconf_dad_kick(ifp); out: spin_unlock(&ifp->lock); read_unlock_bh(&idev->lock); if (notify) ipv6_ifa_notify(RTM_NEWADDR, ifp); } static void addrconf_dad_start(struct inet6_ifaddr *ifp) { bool begin_dad = false; spin_lock_bh(&ifp->lock); if (ifp->state != INET6_IFADDR_STATE_DEAD) { ifp->state = INET6_IFADDR_STATE_PREDAD; begin_dad = true; } spin_unlock_bh(&ifp->lock); if (begin_dad) addrconf_mod_dad_work(ifp, 0); } static void addrconf_dad_work(struct work_struct *w) { struct inet6_ifaddr *ifp = container_of(to_delayed_work(w), struct inet6_ifaddr, dad_work); struct inet6_dev *idev = ifp->idev; bool bump_id, disable_ipv6 = false; struct in6_addr mcaddr; struct net *net; enum { DAD_PROCESS, DAD_BEGIN, DAD_ABORT, } action = DAD_PROCESS; net = dev_net(idev->dev); rtnl_net_lock(net); spin_lock_bh(&ifp->lock); if (ifp->state == INET6_IFADDR_STATE_PREDAD) { action = DAD_BEGIN; ifp->state = INET6_IFADDR_STATE_DAD; } else if (ifp->state == INET6_IFADDR_STATE_ERRDAD) { action = DAD_ABORT; ifp->state = INET6_IFADDR_STATE_POSTDAD; if ((READ_ONCE(net->ipv6.devconf_all->accept_dad) > 1 || READ_ONCE(idev->cnf.accept_dad) > 1) && !idev->cnf.disable_ipv6 && !(ifp->flags & IFA_F_STABLE_PRIVACY)) { struct in6_addr addr; addr.s6_addr32[0] = htonl(0xfe800000); addr.s6_addr32[1] = 0; if (!ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) && ipv6_addr_equal(&ifp->addr, &addr)) { /* DAD failed for link-local based on MAC */ WRITE_ONCE(idev->cnf.disable_ipv6, 1); pr_info("%s: IPv6 being disabled!\n", ifp->idev->dev->name); disable_ipv6 = true; } } } spin_unlock_bh(&ifp->lock); if (action == DAD_BEGIN) { addrconf_dad_begin(ifp); goto out; } else if (action == DAD_ABORT) { in6_ifa_hold(ifp); addrconf_dad_stop(ifp, 1); if (disable_ipv6) addrconf_ifdown(idev->dev, false); goto out; } if (!ifp->dad_probes && addrconf_dad_end(ifp)) goto out; write_lock_bh(&idev->lock); if (idev->dead || !(idev->if_flags & IF_READY)) { write_unlock_bh(&idev->lock); goto out; } spin_lock(&ifp->lock); if (ifp->state == INET6_IFADDR_STATE_DEAD) { spin_unlock(&ifp->lock); write_unlock_bh(&idev->lock); goto out; } if (ifp->dad_probes == 0) { bool send_na = false; /* * DAD was successful */ if (ifp->flags & IFA_F_TENTATIVE && !(ifp->flags & IFA_F_OPTIMISTIC)) send_na = true; bump_id = ifp->flags & IFA_F_TENTATIVE; ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED); spin_unlock(&ifp->lock); write_unlock_bh(&idev->lock); addrconf_dad_completed(ifp, bump_id, send_na); goto out; } ifp->dad_probes--; addrconf_mod_dad_work(ifp, max(NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME), HZ/100)); spin_unlock(&ifp->lock); write_unlock_bh(&idev->lock); /* send a neighbour solicitation for our addr */ addrconf_addr_solict_mult(&ifp->addr, &mcaddr); ndisc_send_ns(ifp->idev->dev, &ifp->addr, &mcaddr, &in6addr_any, ifp->dad_nonce); out: in6_ifa_put(ifp); rtnl_net_unlock(net); } /* ifp->idev must be at least read locked */ static bool ipv6_lonely_lladdr(struct inet6_ifaddr *ifp) { struct inet6_ifaddr *ifpiter; struct inet6_dev *idev = ifp->idev; list_for_each_entry_reverse(ifpiter, &idev->addr_list, if_list) { if (ifpiter->scope > IFA_LINK) break; if (ifp != ifpiter && ifpiter->scope == IFA_LINK && (ifpiter->flags & (IFA_F_PERMANENT|IFA_F_TENTATIVE| IFA_F_OPTIMISTIC|IFA_F_DADFAILED)) == IFA_F_PERMANENT) return false; } return true; } static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id, bool send_na) { struct net_device *dev = ifp->idev->dev; struct in6_addr lladdr; bool send_rs, send_mld; addrconf_del_dad_work(ifp); /* * Configure the address for reception. Now it is valid. */ ipv6_ifa_notify(RTM_NEWADDR, ifp); /* If added prefix is link local and we are prepared to process router advertisements, start sending router solicitations. */ read_lock_bh(&ifp->idev->lock); send_mld = ifp->scope == IFA_LINK && ipv6_lonely_lladdr(ifp); send_rs = send_mld && ipv6_accept_ra(ifp->idev) && READ_ONCE(ifp->idev->cnf.rtr_solicits) != 0 && (dev->flags & IFF_LOOPBACK) == 0 && (dev->type != ARPHRD_TUNNEL) && !netif_is_team_port(dev); read_unlock_bh(&ifp->idev->lock); /* While dad is in progress mld report's source address is in6_addrany. * Resend with proper ll now. */ if (send_mld) ipv6_mc_dad_complete(ifp->idev); /* send unsolicited NA if enabled */ if (send_na && (READ_ONCE(ifp->idev->cnf.ndisc_notify) || READ_ONCE(dev_net(dev)->ipv6.devconf_all->ndisc_notify))) { ndisc_send_na(dev, &in6addr_linklocal_allnodes, &ifp->addr, /*router=*/ !!ifp->idev->cnf.forwarding, /*solicited=*/ false, /*override=*/ true, /*inc_opt=*/ true); } if (send_rs) { /* * If a host as already performed a random delay * [...] as part of DAD [...] there is no need * to delay again before sending the first RS */ if (ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE)) return; ndisc_send_rs(dev, &lladdr, &in6addr_linklocal_allrouters); write_lock_bh(&ifp->idev->lock); spin_lock(&ifp->lock); ifp->idev->rs_interval = rfc3315_s14_backoff_init( READ_ONCE(ifp->idev->cnf.rtr_solicit_interval)); ifp->idev->rs_probes = 1; ifp->idev->if_flags |= IF_RS_SENT; addrconf_mod_rs_timer(ifp->idev, ifp->idev->rs_interval); spin_unlock(&ifp->lock); write_unlock_bh(&ifp->idev->lock); } if (bump_id) rt_genid_bump_ipv6(dev_net(dev)); /* Make sure that a new temporary address will be created * before this temporary address becomes deprecated. */ if (ifp->flags & IFA_F_TEMPORARY) addrconf_verify_rtnl(dev_net(dev)); } static void addrconf_dad_run(struct inet6_dev *idev, bool restart) { struct inet6_ifaddr *ifp; read_lock_bh(&idev->lock); list_for_each_entry(ifp, &idev->addr_list, if_list) { spin_lock(&ifp->lock); if ((ifp->flags & IFA_F_TENTATIVE && ifp->state == INET6_IFADDR_STATE_DAD) || restart) { if (restart) ifp->state = INET6_IFADDR_STATE_PREDAD; addrconf_dad_kick(ifp); } spin_unlock(&ifp->lock); } read_unlock_bh(&idev->lock); } #ifdef CONFIG_PROC_FS struct if6_iter_state { struct seq_net_private p; int bucket; int offset; }; static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos) { struct if6_iter_state *state = seq->private; struct net *net = seq_file_net(seq); struct inet6_ifaddr *ifa = NULL; int p = 0; /* initial bucket if pos is 0 */ if (pos == 0) { state->bucket = 0; state->offset = 0; } for (; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) { hlist_for_each_entry_rcu(ifa, &net->ipv6.inet6_addr_lst[state->bucket], addr_lst) { /* sync with offset */ if (p < state->offset) { p++; continue; } return ifa; } /* prepare for next bucket */ state->offset = 0; p = 0; } return NULL; } static struct inet6_ifaddr *if6_get_next(struct seq_file *seq, struct inet6_ifaddr *ifa) { struct if6_iter_state *state = seq->private; struct net *net = seq_file_net(seq); hlist_for_each_entry_continue_rcu(ifa, addr_lst) { state->offset++; return ifa; } state->offset = 0; while (++state->bucket < IN6_ADDR_HSIZE) { hlist_for_each_entry_rcu(ifa, &net->ipv6.inet6_addr_lst[state->bucket], addr_lst) { return ifa; } } return NULL; } static void *if6_seq_start(struct seq_file *seq, loff_t *pos) __acquires(rcu) { rcu_read_lock(); return if6_get_first(seq, *pos); } static void *if6_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct inet6_ifaddr *ifa; ifa = if6_get_next(seq, v); ++*pos; return ifa; } static void if6_seq_stop(struct seq_file *seq, void *v) __releases(rcu) { rcu_read_unlock(); } static int if6_seq_show(struct seq_file *seq, void *v) { struct inet6_ifaddr *ifp = (struct inet6_ifaddr *)v; seq_printf(seq, "%pi6 %02x %02x %02x %02x %8s\n", &ifp->addr, ifp->idev->dev->ifindex, ifp->prefix_len, ifp->scope, (u8) ifp->flags, ifp->idev->dev->name); return 0; } static const struct seq_operations if6_seq_ops = { .start = if6_seq_start, .next = if6_seq_next, .show = if6_seq_show, .stop = if6_seq_stop, }; static int __net_init if6_proc_net_init(struct net *net) { if (!proc_create_net("if_inet6", 0444, net->proc_net, &if6_seq_ops, sizeof(struct if6_iter_state))) return -ENOMEM; return 0; } static void __net_exit if6_proc_net_exit(struct net *net) { remove_proc_entry("if_inet6", net->proc_net); } static struct pernet_operations if6_proc_net_ops = { .init = if6_proc_net_init, .exit = if6_proc_net_exit, }; int __init if6_proc_init(void) { return register_pernet_subsys(&if6_proc_net_ops); } void if6_proc_exit(void) { unregister_pernet_subsys(&if6_proc_net_ops); } #endif /* CONFIG_PROC_FS */ #if IS_ENABLED(CONFIG_IPV6_MIP6) /* Check if address is a home address configured on any interface. */ int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr) { unsigned int hash = inet6_addr_hash(net, addr); struct inet6_ifaddr *ifp = NULL; int ret = 0; rcu_read_lock(); hlist_for_each_entry_rcu(ifp, &net->ipv6.inet6_addr_lst[hash], addr_lst) { if (ipv6_addr_equal(&ifp->addr, addr) && (ifp->flags & IFA_F_HOMEADDRESS)) { ret = 1; break; } } rcu_read_unlock(); return ret; } #endif /* RFC6554 has some algorithm to avoid loops in segment routing by * checking if the segments contains any of a local interface address. * * Quote: * * To detect loops in the SRH, a router MUST determine if the SRH * includes multiple addresses assigned to any interface on that router. * If such addresses appear more than once and are separated by at least * one address not assigned to that router. */ int ipv6_chk_rpl_srh_loop(struct net *net, const struct in6_addr *segs, unsigned char nsegs) { const struct in6_addr *addr; int i, ret = 0, found = 0; struct inet6_ifaddr *ifp; bool separated = false; unsigned int hash; bool hash_found; rcu_read_lock(); for (i = 0; i < nsegs; i++) { addr = &segs[i]; hash = inet6_addr_hash(net, addr); hash_found = false; hlist_for_each_entry_rcu(ifp, &net->ipv6.inet6_addr_lst[hash], addr_lst) { if (ipv6_addr_equal(&ifp->addr, addr)) { hash_found = true; break; } } if (hash_found) { if (found > 1 && separated) { ret = 1; break; } separated = false; found++; } else { separated = true; } } rcu_read_unlock(); return ret; } /* * Periodic address status verification */ static void addrconf_verify_rtnl(struct net *net) { unsigned long now, next, next_sec, next_sched; struct inet6_ifaddr *ifp; int i; ASSERT_RTNL(); rcu_read_lock_bh(); now = jiffies; next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY); cancel_delayed_work(&net->ipv6.addr_chk_work); for (i = 0; i < IN6_ADDR_HSIZE; i++) { restart: hlist_for_each_entry_rcu_bh(ifp, &net->ipv6.inet6_addr_lst[i], addr_lst) { unsigned long age; /* When setting preferred_lft to a value not zero or * infinity, while valid_lft is infinity * IFA_F_PERMANENT has a non-infinity life time. */ if ((ifp->flags & IFA_F_PERMANENT) && (ifp->prefered_lft == INFINITY_LIFE_TIME)) continue; spin_lock(&ifp->lock); /* We try to batch several events at once. */ age = (now - ifp->tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ; if ((ifp->flags&IFA_F_TEMPORARY) && !(ifp->flags&IFA_F_TENTATIVE) && ifp->prefered_lft != INFINITY_LIFE_TIME && !ifp->regen_count && ifp->ifpub) { /* This is a non-regenerated temporary addr. */ unsigned long regen_advance = ipv6_get_regen_advance(ifp->idev); if (age + regen_advance >= ifp->prefered_lft) { struct inet6_ifaddr *ifpub = ifp->ifpub; if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next)) next = ifp->tstamp + ifp->prefered_lft * HZ; ifp->regen_count++; in6_ifa_hold(ifp); in6_ifa_hold(ifpub); spin_unlock(&ifp->lock); spin_lock(&ifpub->lock); ifpub->regen_count = 0; spin_unlock(&ifpub->lock); rcu_read_unlock_bh(); ipv6_create_tempaddr(ifpub, true); in6_ifa_put(ifpub); in6_ifa_put(ifp); rcu_read_lock_bh(); goto restart; } else if (time_before(ifp->tstamp + ifp->prefered_lft * HZ - regen_advance * HZ, next)) next = ifp->tstamp + ifp->prefered_lft * HZ - regen_advance * HZ; } if (ifp->valid_lft != INFINITY_LIFE_TIME && age >= ifp->valid_lft) { spin_unlock(&ifp->lock); in6_ifa_hold(ifp); rcu_read_unlock_bh(); ipv6_del_addr(ifp); rcu_read_lock_bh(); goto restart; } else if (ifp->prefered_lft == INFINITY_LIFE_TIME) { spin_unlock(&ifp->lock); continue; } else if (age >= ifp->prefered_lft) { /* jiffies - ifp->tstamp > age >= ifp->prefered_lft */ int deprecate = 0; if (!(ifp->flags&IFA_F_DEPRECATED)) { deprecate = 1; ifp->flags |= IFA_F_DEPRECATED; } if ((ifp->valid_lft != INFINITY_LIFE_TIME) && (time_before(ifp->tstamp + ifp->valid_lft * HZ, next))) next = ifp->tstamp + ifp->valid_lft * HZ; spin_unlock(&ifp->lock); if (deprecate) { in6_ifa_hold(ifp); ipv6_ifa_notify(0, ifp); in6_ifa_put(ifp); goto restart; } } else { /* ifp->prefered_lft <= ifp->valid_lft */ if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next)) next = ifp->tstamp + ifp->prefered_lft * HZ; spin_unlock(&ifp->lock); } } } next_sec = round_jiffies_up(next); next_sched = next; /* If rounded timeout is accurate enough, accept it. */ if (time_before(next_sec, next + ADDRCONF_TIMER_FUZZ)) next_sched = next_sec; /* And minimum interval is ADDRCONF_TIMER_FUZZ_MAX. */ if (time_before(next_sched, jiffies + ADDRCONF_TIMER_FUZZ_MAX)) next_sched = jiffies + ADDRCONF_TIMER_FUZZ_MAX; pr_debug("now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n", now, next, next_sec, next_sched); mod_delayed_work(addrconf_wq, &net->ipv6.addr_chk_work, next_sched - now); rcu_read_unlock_bh(); } static void addrconf_verify_work(struct work_struct *w) { struct net *net = container_of(to_delayed_work(w), struct net, ipv6.addr_chk_work); rtnl_net_lock(net); addrconf_verify_rtnl(net); rtnl_net_unlock(net); } static void addrconf_verify(struct net *net) { mod_delayed_work(addrconf_wq, &net->ipv6.addr_chk_work, 0); } static struct in6_addr *extract_addr(struct nlattr *addr, struct nlattr *local, struct in6_addr **peer_pfx) { struct in6_addr *pfx = NULL; *peer_pfx = NULL; if (addr) pfx = nla_data(addr); if (local) { if (pfx && nla_memcmp(local, pfx, sizeof(*pfx))) *peer_pfx = pfx; pfx = nla_data(local); } return pfx; } static const struct nla_policy ifa_ipv6_policy[IFA_MAX+1] = { [IFA_ADDRESS] = { .len = sizeof(struct in6_addr) }, [IFA_LOCAL] = { .len = sizeof(struct in6_addr) }, [IFA_CACHEINFO] = { .len = sizeof(struct ifa_cacheinfo) }, [IFA_FLAGS] = { .len = sizeof(u32) }, [IFA_RT_PRIORITY] = { .len = sizeof(u32) }, [IFA_TARGET_NETNSID] = { .type = NLA_S32 }, [IFA_PROTO] = { .type = NLA_U8 }, }; static int inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct ifaddrmsg *ifm; struct nlattr *tb[IFA_MAX+1]; struct in6_addr *pfx, *peer_pfx; u32 ifa_flags; int err; err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy, extack); if (err < 0) return err; ifm = nlmsg_data(nlh); pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer_pfx); if (!pfx) return -EINVAL; ifa_flags = nla_get_u32_default(tb[IFA_FLAGS], ifm->ifa_flags); /* We ignore other flags so far. */ ifa_flags &= IFA_F_MANAGETEMPADDR; rtnl_net_lock(net); err = inet6_addr_del(net, ifm->ifa_index, ifa_flags, pfx, ifm->ifa_prefixlen, extack); rtnl_net_unlock(net); return err; } static int modify_prefix_route(struct net *net, struct inet6_ifaddr *ifp, unsigned long expires, u32 flags, bool modify_peer) { struct fib6_table *table; struct fib6_info *f6i; u32 prio; f6i = addrconf_get_prefix_route(modify_peer ? &ifp->peer_addr : &ifp->addr, ifp->prefix_len, ifp->idev->dev, 0, RTF_DEFAULT, true); if (!f6i) return -ENOENT; prio = ifp->rt_priority ? : IP6_RT_PRIO_ADDRCONF; if (f6i->fib6_metric != prio) { /* delete old one */ ip6_del_rt(dev_net(ifp->idev->dev), f6i, false); /* add new one */ addrconf_prefix_route(modify_peer ? &ifp->peer_addr : &ifp->addr, ifp->prefix_len, ifp->rt_priority, ifp->idev->dev, expires, flags, GFP_KERNEL); return 0; } if (f6i != net->ipv6.fib6_null_entry) { table = f6i->fib6_table; spin_lock_bh(&table->tb6_lock); if (!(flags & RTF_EXPIRES)) { fib6_clean_expires(f6i); fib6_remove_gc_list(f6i); } else { fib6_set_expires(f6i, expires); fib6_add_gc_list(f6i); } spin_unlock_bh(&table->tb6_lock); } fib6_info_release(f6i); return 0; } static int inet6_addr_modify(struct net *net, struct inet6_ifaddr *ifp, struct ifa6_config *cfg, clock_t expires, u32 flags) { bool was_managetempaddr; bool new_peer = false; bool had_prefixroute; ASSERT_RTNL_NET(net); if (cfg->ifa_flags & IFA_F_MANAGETEMPADDR && (ifp->flags & IFA_F_TEMPORARY || ifp->prefix_len != 64)) return -EINVAL; if (!(ifp->flags & IFA_F_TENTATIVE) || ifp->flags & IFA_F_DADFAILED) cfg->ifa_flags &= ~IFA_F_OPTIMISTIC; if (cfg->peer_pfx && memcmp(&ifp->peer_addr, cfg->peer_pfx, sizeof(struct in6_addr))) { if (!ipv6_addr_any(&ifp->peer_addr)) cleanup_prefix_route(ifp, expires, true, true); new_peer = true; } spin_lock_bh(&ifp->lock); was_managetempaddr = ifp->flags & IFA_F_MANAGETEMPADDR; had_prefixroute = ifp->flags & IFA_F_PERMANENT && !(ifp->flags & IFA_F_NOPREFIXROUTE); ifp->flags &= ~(IFA_F_DEPRECATED | IFA_F_PERMANENT | IFA_F_NODAD | IFA_F_HOMEADDRESS | IFA_F_MANAGETEMPADDR | IFA_F_NOPREFIXROUTE); ifp->flags |= cfg->ifa_flags; WRITE_ONCE(ifp->tstamp, jiffies); WRITE_ONCE(ifp->valid_lft, cfg->valid_lft); WRITE_ONCE(ifp->prefered_lft, cfg->preferred_lft); WRITE_ONCE(ifp->ifa_proto, cfg->ifa_proto); if (cfg->rt_priority && cfg->rt_priority != ifp->rt_priority) WRITE_ONCE(ifp->rt_priority, cfg->rt_priority); if (new_peer) ifp->peer_addr = *cfg->peer_pfx; spin_unlock_bh(&ifp->lock); if (!(ifp->flags&IFA_F_TENTATIVE)) ipv6_ifa_notify(0, ifp); if (!(cfg->ifa_flags & IFA_F_NOPREFIXROUTE)) { int rc = -ENOENT; if (had_prefixroute) rc = modify_prefix_route(net, ifp, expires, flags, false); /* prefix route could have been deleted; if so restore it */ if (rc == -ENOENT) { addrconf_prefix_route(&ifp->addr, ifp->prefix_len, ifp->rt_priority, ifp->idev->dev, expires, flags, GFP_KERNEL); } if (had_prefixroute && !ipv6_addr_any(&ifp->peer_addr)) rc = modify_prefix_route(net, ifp, expires, flags, true); if (rc == -ENOENT && !ipv6_addr_any(&ifp->peer_addr)) { addrconf_prefix_route(&ifp->peer_addr, ifp->prefix_len, ifp->rt_priority, ifp->idev->dev, expires, flags, GFP_KERNEL); } } else if (had_prefixroute) { enum cleanup_prefix_rt_t action; unsigned long rt_expires; write_lock_bh(&ifp->idev->lock); action = check_cleanup_prefix_route(ifp, &rt_expires); write_unlock_bh(&ifp->idev->lock); if (action != CLEANUP_PREFIX_RT_NOP) { cleanup_prefix_route(ifp, rt_expires, action == CLEANUP_PREFIX_RT_DEL, false); } } if (was_managetempaddr || ifp->flags & IFA_F_MANAGETEMPADDR) { if (was_managetempaddr && !(ifp->flags & IFA_F_MANAGETEMPADDR)) delete_tempaddrs(ifp->idev, ifp); else manage_tempaddrs(ifp->idev, ifp, cfg->valid_lft, cfg->preferred_lft, !was_managetempaddr, jiffies); } addrconf_verify_rtnl(net); return 0; } static int inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct nlattr *tb[IFA_MAX+1]; struct in6_addr *peer_pfx; struct inet6_ifaddr *ifa; struct net_device *dev; struct inet6_dev *idev; struct ifa6_config cfg; struct ifaddrmsg *ifm; unsigned long timeout; clock_t expires; u32 flags; int err; err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy, extack); if (err < 0) return err; memset(&cfg, 0, sizeof(cfg)); ifm = nlmsg_data(nlh); cfg.pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer_pfx); if (!cfg.pfx) return -EINVAL; cfg.peer_pfx = peer_pfx; cfg.plen = ifm->ifa_prefixlen; if (tb[IFA_RT_PRIORITY]) cfg.rt_priority = nla_get_u32(tb[IFA_RT_PRIORITY]); if (tb[IFA_PROTO]) cfg.ifa_proto = nla_get_u8(tb[IFA_PROTO]); cfg.ifa_flags = nla_get_u32_default(tb[IFA_FLAGS], ifm->ifa_flags); /* We ignore other flags so far. */ cfg.ifa_flags &= IFA_F_NODAD | IFA_F_HOMEADDRESS | IFA_F_MANAGETEMPADDR | IFA_F_NOPREFIXROUTE | IFA_F_MCAUTOJOIN | IFA_F_OPTIMISTIC; cfg.ifa_flags |= IFA_F_PERMANENT; cfg.valid_lft = INFINITY_LIFE_TIME; cfg.preferred_lft = INFINITY_LIFE_TIME; expires = 0; flags = 0; if (tb[IFA_CACHEINFO]) { struct ifa_cacheinfo *ci; ci = nla_data(tb[IFA_CACHEINFO]); cfg.valid_lft = ci->ifa_valid; cfg.preferred_lft = ci->ifa_prefered; if (!cfg.valid_lft || cfg.preferred_lft > cfg.valid_lft) { NL_SET_ERR_MSG_MOD(extack, "address lifetime invalid"); return -EINVAL; } timeout = addrconf_timeout_fixup(cfg.valid_lft, HZ); if (addrconf_finite_timeout(timeout)) { cfg.ifa_flags &= ~IFA_F_PERMANENT; cfg.valid_lft = timeout; expires = jiffies_to_clock_t(timeout * HZ); flags = RTF_EXPIRES; } timeout = addrconf_timeout_fixup(cfg.preferred_lft, HZ); if (addrconf_finite_timeout(timeout)) { if (timeout == 0) cfg.ifa_flags |= IFA_F_DEPRECATED; cfg.preferred_lft = timeout; } } rtnl_net_lock(net); dev = __dev_get_by_index(net, ifm->ifa_index); if (!dev) { NL_SET_ERR_MSG_MOD(extack, "Unable to find the interface"); err = -ENODEV; goto unlock; } idev = ipv6_find_idev(dev); if (IS_ERR(idev)) { err = PTR_ERR(idev); goto unlock; } if (!ipv6_allow_optimistic_dad(net, idev)) cfg.ifa_flags &= ~IFA_F_OPTIMISTIC; if (cfg.ifa_flags & IFA_F_NODAD && cfg.ifa_flags & IFA_F_OPTIMISTIC) { NL_SET_ERR_MSG(extack, "IFA_F_NODAD and IFA_F_OPTIMISTIC are mutually exclusive"); err = -EINVAL; goto unlock; } ifa = ipv6_get_ifaddr(net, cfg.pfx, dev, 1); if (!ifa) { /* * It would be best to check for !NLM_F_CREATE here but * userspace already relies on not having to provide this. */ err = inet6_addr_add(net, dev, &cfg, expires, flags, extack); goto unlock; } if (nlh->nlmsg_flags & NLM_F_EXCL || !(nlh->nlmsg_flags & NLM_F_REPLACE)) { NL_SET_ERR_MSG_MOD(extack, "address already assigned"); err = -EEXIST; } else { err = inet6_addr_modify(net, ifa, &cfg, expires, flags); } in6_ifa_put(ifa); unlock: rtnl_net_unlock(net); return err; } static void put_ifaddrmsg(struct nlmsghdr *nlh, u8 prefixlen, u32 flags, u8 scope, int ifindex) { struct ifaddrmsg *ifm; ifm = nlmsg_data(nlh); ifm->ifa_family = AF_INET6; ifm->ifa_prefixlen = prefixlen; ifm->ifa_flags = flags; ifm->ifa_scope = scope; ifm->ifa_index = ifindex; } static int put_cacheinfo(struct sk_buff *skb, unsigned long cstamp, unsigned long tstamp, u32 preferred, u32 valid) { struct ifa_cacheinfo ci; ci.cstamp = cstamp_delta(cstamp); ci.tstamp = cstamp_delta(tstamp); ci.ifa_prefered = preferred; ci.ifa_valid = valid; return nla_put(skb, IFA_CACHEINFO, sizeof(ci), &ci); } static inline int rt_scope(int ifa_scope) { if (ifa_scope & IFA_HOST) return RT_SCOPE_HOST; else if (ifa_scope & IFA_LINK) return RT_SCOPE_LINK; else if (ifa_scope & IFA_SITE) return RT_SCOPE_SITE; else return RT_SCOPE_UNIVERSE; } static inline int inet6_ifaddr_msgsize(void) { return NLMSG_ALIGN(sizeof(struct ifaddrmsg)) + nla_total_size(16) /* IFA_LOCAL */ + nla_total_size(16) /* IFA_ADDRESS */ + nla_total_size(sizeof(struct ifa_cacheinfo)) + nla_total_size(4) /* IFA_FLAGS */ + nla_total_size(1) /* IFA_PROTO */ + nla_total_size(4) /* IFA_RT_PRIORITY */; } static int inet6_fill_ifaddr(struct sk_buff *skb, const struct inet6_ifaddr *ifa, struct inet6_fill_args *args) { struct nlmsghdr *nlh; u32 preferred, valid; u32 flags, priority; u8 proto; nlh = nlmsg_put(skb, args->portid, args->seq, args->event, sizeof(struct ifaddrmsg), args->flags); if (!nlh) return -EMSGSIZE; flags = READ_ONCE(ifa->flags); put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope), ifa->idev->dev->ifindex); if (args->netnsid >= 0 && nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid)) goto error; preferred = READ_ONCE(ifa->prefered_lft); valid = READ_ONCE(ifa->valid_lft); if (!((flags & IFA_F_PERMANENT) && (preferred == INFINITY_LIFE_TIME))) { if (preferred != INFINITY_LIFE_TIME) { long tval = (jiffies - READ_ONCE(ifa->tstamp)) / HZ; if (preferred > tval) preferred -= tval; else preferred = 0; if (valid != INFINITY_LIFE_TIME) { if (valid > tval) valid -= tval; else valid = 0; } } } else { preferred = INFINITY_LIFE_TIME; valid = INFINITY_LIFE_TIME; } if (!ipv6_addr_any(&ifa->peer_addr)) { if (nla_put_in6_addr(skb, IFA_LOCAL, &ifa->addr) < 0 || nla_put_in6_addr(skb, IFA_ADDRESS, &ifa->peer_addr) < 0) goto error; } else { if (nla_put_in6_addr(skb, IFA_ADDRESS, &ifa->addr) < 0) goto error; } priority = READ_ONCE(ifa->rt_priority); if (priority && nla_put_u32(skb, IFA_RT_PRIORITY, priority)) goto error; if (put_cacheinfo(skb, ifa->cstamp, READ_ONCE(ifa->tstamp), preferred, valid) < 0) goto error; if (nla_put_u32(skb, IFA_FLAGS, flags) < 0) goto error; proto = READ_ONCE(ifa->ifa_proto); if (proto && nla_put_u8(skb, IFA_PROTO, proto)) goto error; nlmsg_end(skb, nlh); return 0; error: nlmsg_cancel(skb, nlh); return -EMSGSIZE; } int inet6_fill_ifmcaddr(struct sk_buff *skb, const struct ifmcaddr6 *ifmca, struct inet6_fill_args *args) { int ifindex = ifmca->idev->dev->ifindex; u8 scope = RT_SCOPE_UNIVERSE; struct nlmsghdr *nlh; if (!args->force_rt_scope_universe && ipv6_addr_scope(&ifmca->mca_addr) & IFA_SITE) scope = RT_SCOPE_SITE; nlh = nlmsg_put(skb, args->portid, args->seq, args->event, sizeof(struct ifaddrmsg), args->flags); if (!nlh) return -EMSGSIZE; if (args->netnsid >= 0 && nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid)) { nlmsg_cancel(skb, nlh); return -EMSGSIZE; } put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex); if (nla_put_in6_addr(skb, IFA_MULTICAST, &ifmca->mca_addr) < 0 || put_cacheinfo(skb, ifmca->mca_cstamp, READ_ONCE(ifmca->mca_tstamp), INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) { nlmsg_cancel(skb, nlh); return -EMSGSIZE; } nlmsg_end(skb, nlh); return 0; } int inet6_fill_ifacaddr(struct sk_buff *skb, const struct ifacaddr6 *ifaca, struct inet6_fill_args *args) { struct net_device *dev = fib6_info_nh_dev(ifaca->aca_rt); int ifindex = dev ? dev->ifindex : 1; u8 scope = RT_SCOPE_UNIVERSE; struct nlmsghdr *nlh; if (ipv6_addr_scope(&ifaca->aca_addr) & IFA_SITE) scope = RT_SCOPE_SITE; nlh = nlmsg_put(skb, args->portid, args->seq, args->event, sizeof(struct ifaddrmsg), args->flags); if (!nlh) return -EMSGSIZE; if (args->netnsid >= 0 && nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid)) { nlmsg_cancel(skb, nlh); return -EMSGSIZE; } put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex); if (nla_put_in6_addr(skb, IFA_ANYCAST, &ifaca->aca_addr) < 0 || put_cacheinfo(skb, ifaca->aca_cstamp, READ_ONCE(ifaca->aca_tstamp), INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) { nlmsg_cancel(skb, nlh); return -EMSGSIZE; } nlmsg_end(skb, nlh); return 0; } /* called with rcu_read_lock() */ static int in6_dump_addrs(const struct inet6_dev *idev, struct sk_buff *skb, struct netlink_callback *cb, int *s_ip_idx, struct inet6_fill_args *fillargs) { const struct ifmcaddr6 *ifmca; const struct ifacaddr6 *ifaca; int ip_idx = 0; int err = 0; switch (fillargs->type) { case UNICAST_ADDR: { const struct inet6_ifaddr *ifa; fillargs->event = RTM_NEWADDR; /* unicast address incl. temp addr */ list_for_each_entry_rcu(ifa, &idev->addr_list, if_list) { if (ip_idx < *s_ip_idx) goto next; err = inet6_fill_ifaddr(skb, ifa, fillargs); if (err < 0) break; nl_dump_check_consistent(cb, nlmsg_hdr(skb)); next: ip_idx++; } break; } case MULTICAST_ADDR: fillargs->event = RTM_GETMULTICAST; /* multicast address */ for (ifmca = rcu_dereference(idev->mc_list); ifmca; ifmca = rcu_dereference(ifmca->next), ip_idx++) { if (ip_idx < *s_ip_idx) continue; err = inet6_fill_ifmcaddr(skb, ifmca, fillargs); if (err < 0) break; } break; case ANYCAST_ADDR: fillargs->event = RTM_GETANYCAST; /* anycast address */ for (ifaca = rcu_dereference(idev->ac_list); ifaca; ifaca = rcu_dereference(ifaca->aca_next), ip_idx++) { if (ip_idx < *s_ip_idx) continue; err = inet6_fill_ifacaddr(skb, ifaca, fillargs); if (err < 0) break; } break; default: break; } *s_ip_idx = err ? ip_idx : 0; return err; } static int inet6_valid_dump_ifaddr_req(const struct nlmsghdr *nlh, struct inet6_fill_args *fillargs, struct net **tgt_net, struct sock *sk, struct netlink_callback *cb) { struct netlink_ext_ack *extack = cb->extack; struct nlattr *tb[IFA_MAX+1]; struct ifaddrmsg *ifm; int err, i; if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) { NL_SET_ERR_MSG_MOD(extack, "Invalid header for address dump request"); return -EINVAL; } ifm = nlmsg_data(nlh); if (ifm->ifa_prefixlen || ifm->ifa_flags || ifm->ifa_scope) { NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for address dump request"); return -EINVAL; } fillargs->ifindex = ifm->ifa_index; if (fillargs->ifindex) { cb->answer_flags |= NLM_F_DUMP_FILTERED; fillargs->flags |= NLM_F_DUMP_FILTERED; } err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy, extack); if (err < 0) return err; for (i = 0; i <= IFA_MAX; ++i) { if (!tb[i]) continue; if (i == IFA_TARGET_NETNSID) { struct net *net; fillargs->netnsid = nla_get_s32(tb[i]); net = rtnl_get_net_ns_capable(sk, fillargs->netnsid); if (IS_ERR(net)) { fillargs->netnsid = -1; NL_SET_ERR_MSG_MOD(extack, "Invalid target network namespace id"); return PTR_ERR(net); } *tgt_net = net; } else { NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in dump request"); return -EINVAL; } } return 0; } static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb, enum addr_type_t type) { struct net *tgt_net = sock_net(skb->sk); const struct nlmsghdr *nlh = cb->nlh; struct inet6_fill_args fillargs = { .portid = NETLINK_CB(cb->skb).portid, .seq = cb->nlh->nlmsg_seq, .flags = NLM_F_MULTI, .netnsid = -1, .type = type, .force_rt_scope_universe = false, }; struct { unsigned long ifindex; int ip_idx; } *ctx = (void *)cb->ctx; struct net_device *dev; struct inet6_dev *idev; int err = 0; rcu_read_lock(); if (cb->strict_check) { err = inet6_valid_dump_ifaddr_req(nlh, &fillargs, &tgt_net, skb->sk, cb); if (err < 0) goto done; err = 0; if (fillargs.ifindex) { dev = dev_get_by_index_rcu(tgt_net, fillargs.ifindex); if (!dev) { err = -ENODEV; goto done; } idev = __in6_dev_get(dev); if (idev) err = in6_dump_addrs(idev, skb, cb, &ctx->ip_idx, &fillargs); goto done; } } cb->seq = inet6_base_seq(tgt_net); for_each_netdev_dump(tgt_net, dev, ctx->ifindex) { idev = __in6_dev_get(dev); if (!idev) continue; err = in6_dump_addrs(idev, skb, cb, &ctx->ip_idx, &fillargs); if (err < 0) goto done; } done: rcu_read_unlock(); if (fillargs.netnsid >= 0) put_net(tgt_net); return err; } static int inet6_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) { enum addr_type_t type = UNICAST_ADDR; return inet6_dump_addr(skb, cb, type); } static int inet6_dump_ifmcaddr(struct sk_buff *skb, struct netlink_callback *cb) { enum addr_type_t type = MULTICAST_ADDR; return inet6_dump_addr(skb, cb, type); } static int inet6_dump_ifacaddr(struct sk_buff *skb, struct netlink_callback *cb) { enum addr_type_t type = ANYCAST_ADDR; return inet6_dump_addr(skb, cb, type); } static int inet6_rtm_valid_getaddr_req(struct sk_buff *skb, const struct nlmsghdr *nlh, struct nlattr **tb, struct netlink_ext_ack *extack) { struct ifaddrmsg *ifm; int i, err; if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) { NL_SET_ERR_MSG_MOD(extack, "Invalid header for get address request"); return -EINVAL; } if (!netlink_strict_get_check(skb)) return nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy, extack); ifm = nlmsg_data(nlh); if (ifm->ifa_prefixlen || ifm->ifa_flags || ifm->ifa_scope) { NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for get address request"); return -EINVAL; } err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy, extack); if (err) return err; for (i = 0; i <= IFA_MAX; i++) { if (!tb[i]) continue; switch (i) { case IFA_TARGET_NETNSID: case IFA_ADDRESS: case IFA_LOCAL: break; default: NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in get address request"); return -EINVAL; } } return 0; } static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct net *tgt_net = sock_net(in_skb->sk); struct inet6_fill_args fillargs = { .portid = NETLINK_CB(in_skb).portid, .seq = nlh->nlmsg_seq, .event = RTM_NEWADDR, .flags = 0, .netnsid = -1, .force_rt_scope_universe = false, }; struct ifaddrmsg *ifm; struct nlattr *tb[IFA_MAX+1]; struct in6_addr *addr = NULL, *peer; struct net_device *dev = NULL; struct inet6_ifaddr *ifa; struct sk_buff *skb; int err; err = inet6_rtm_valid_getaddr_req(in_skb, nlh, tb, extack); if (err < 0) return err; if (tb[IFA_TARGET_NETNSID]) { fillargs.netnsid = nla_get_s32(tb[IFA_TARGET_NETNSID]); tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(in_skb).sk, fillargs.netnsid); if (IS_ERR(tgt_net)) return PTR_ERR(tgt_net); } addr = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer); if (!addr) { err = -EINVAL; goto errout; } ifm = nlmsg_data(nlh); if (ifm->ifa_index) dev = dev_get_by_index(tgt_net, ifm->ifa_index); ifa = ipv6_get_ifaddr(tgt_net, addr, dev, 1); if (!ifa) { err = -EADDRNOTAVAIL; goto errout; } skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_KERNEL); if (!skb) { err = -ENOBUFS; goto errout_ifa; } err = inet6_fill_ifaddr(skb, ifa, &fillargs); if (err < 0) { /* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */ WARN_ON(err == -EMSGSIZE); kfree_skb(skb); goto errout_ifa; } err = rtnl_unicast(skb, tgt_net, NETLINK_CB(in_skb).portid); errout_ifa: in6_ifa_put(ifa); errout: dev_put(dev); if (fillargs.netnsid >= 0) put_net(tgt_net); return err; } static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa) { struct sk_buff *skb; struct net *net = dev_net(ifa->idev->dev); struct inet6_fill_args fillargs = { .portid = 0, .seq = 0, .event = event, .flags = 0, .netnsid = -1, .force_rt_scope_universe = false, }; int err = -ENOBUFS; skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_ATOMIC); if (!skb) goto errout; err = inet6_fill_ifaddr(skb, ifa, &fillargs); if (err < 0) { /* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */ WARN_ON(err == -EMSGSIZE); kfree_skb(skb); goto errout; } rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC); return; errout: rtnl_set_sk_err(net, RTNLGRP_IPV6_IFADDR, err); } static void ipv6_store_devconf(const struct ipv6_devconf *cnf, __s32 *array, int bytes) { BUG_ON(bytes < (DEVCONF_MAX * 4)); memset(array, 0, bytes); array[DEVCONF_FORWARDING] = READ_ONCE(cnf->forwarding); array[DEVCONF_HOPLIMIT] = READ_ONCE(cnf->hop_limit); array[DEVCONF_MTU6] = READ_ONCE(cnf->mtu6); array[DEVCONF_ACCEPT_RA] = READ_ONCE(cnf->accept_ra); array[DEVCONF_ACCEPT_REDIRECTS] = READ_ONCE(cnf->accept_redirects); array[DEVCONF_AUTOCONF] = READ_ONCE(cnf->autoconf); array[DEVCONF_DAD_TRANSMITS] = READ_ONCE(cnf->dad_transmits); array[DEVCONF_RTR_SOLICITS] = READ_ONCE(cnf->rtr_solicits); array[DEVCONF_RTR_SOLICIT_INTERVAL] = jiffies_to_msecs(READ_ONCE(cnf->rtr_solicit_interval)); array[DEVCONF_RTR_SOLICIT_MAX_INTERVAL] = jiffies_to_msecs(READ_ONCE(cnf->rtr_solicit_max_interval)); array[DEVCONF_RTR_SOLICIT_DELAY] = jiffies_to_msecs(READ_ONCE(cnf->rtr_solicit_delay)); array[DEVCONF_FORCE_MLD_VERSION] = READ_ONCE(cnf->force_mld_version); array[DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL] = jiffies_to_msecs(READ_ONCE(cnf->mldv1_unsolicited_report_interval)); array[DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL] = jiffies_to_msecs(READ_ONCE(cnf->mldv2_unsolicited_report_interval)); array[DEVCONF_USE_TEMPADDR] = READ_ONCE(cnf->use_tempaddr); array[DEVCONF_TEMP_VALID_LFT] = READ_ONCE(cnf->temp_valid_lft); array[DEVCONF_TEMP_PREFERED_LFT] = READ_ONCE(cnf->temp_prefered_lft); array[DEVCONF_REGEN_MAX_RETRY] = READ_ONCE(cnf->regen_max_retry); array[DEVCONF_MAX_DESYNC_FACTOR] = READ_ONCE(cnf->max_desync_factor); array[DEVCONF_MAX_ADDRESSES] = READ_ONCE(cnf->max_addresses); array[DEVCONF_ACCEPT_RA_DEFRTR] = READ_ONCE(cnf->accept_ra_defrtr); array[DEVCONF_RA_DEFRTR_METRIC] = READ_ONCE(cnf->ra_defrtr_metric); array[DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT] = READ_ONCE(cnf->accept_ra_min_hop_limit); array[DEVCONF_ACCEPT_RA_PINFO] = READ_ONCE(cnf->accept_ra_pinfo); #ifdef CONFIG_IPV6_ROUTER_PREF array[DEVCONF_ACCEPT_RA_RTR_PREF] = READ_ONCE(cnf->accept_ra_rtr_pref); array[DEVCONF_RTR_PROBE_INTERVAL] = jiffies_to_msecs(READ_ONCE(cnf->rtr_probe_interval)); #ifdef CONFIG_IPV6_ROUTE_INFO array[DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN] = READ_ONCE(cnf->accept_ra_rt_info_min_plen); array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] = READ_ONCE(cnf->accept_ra_rt_info_max_plen); #endif #endif array[DEVCONF_PROXY_NDP] = READ_ONCE(cnf->proxy_ndp); array[DEVCONF_ACCEPT_SOURCE_ROUTE] = READ_ONCE(cnf->accept_source_route); #ifdef CONFIG_IPV6_OPTIMISTIC_DAD array[DEVCONF_OPTIMISTIC_DAD] = READ_ONCE(cnf->optimistic_dad); array[DEVCONF_USE_OPTIMISTIC] = READ_ONCE(cnf->use_optimistic); #endif #ifdef CONFIG_IPV6_MROUTE array[DEVCONF_MC_FORWARDING] = atomic_read(&cnf->mc_forwarding); #endif array[DEVCONF_DISABLE_IPV6] = READ_ONCE(cnf->disable_ipv6); array[DEVCONF_ACCEPT_DAD] = READ_ONCE(cnf->accept_dad); array[DEVCONF_FORCE_TLLAO] = READ_ONCE(cnf->force_tllao); array[DEVCONF_NDISC_NOTIFY] = READ_ONCE(cnf->ndisc_notify); array[DEVCONF_SUPPRESS_FRAG_NDISC] = READ_ONCE(cnf->suppress_frag_ndisc); array[DEVCONF_ACCEPT_RA_FROM_LOCAL] = READ_ONCE(cnf->accept_ra_from_local); array[DEVCONF_ACCEPT_RA_MTU] = READ_ONCE(cnf->accept_ra_mtu); array[DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN] = READ_ONCE(cnf->ignore_routes_with_linkdown); /* we omit DEVCONF_STABLE_SECRET for now */ array[DEVCONF_USE_OIF_ADDRS_ONLY] = READ_ONCE(cnf->use_oif_addrs_only); array[DEVCONF_DROP_UNICAST_IN_L2_MULTICAST] = READ_ONCE(cnf->drop_unicast_in_l2_multicast); array[DEVCONF_DROP_UNSOLICITED_NA] = READ_ONCE(cnf->drop_unsolicited_na); array[DEVCONF_KEEP_ADDR_ON_DOWN] = READ_ONCE(cnf->keep_addr_on_down); array[DEVCONF_SEG6_ENABLED] = READ_ONCE(cnf->seg6_enabled); #ifdef CONFIG_IPV6_SEG6_HMAC array[DEVCONF_SEG6_REQUIRE_HMAC] = READ_ONCE(cnf->seg6_require_hmac); #endif array[DEVCONF_ENHANCED_DAD] = READ_ONCE(cnf->enhanced_dad); array[DEVCONF_ADDR_GEN_MODE] = READ_ONCE(cnf->addr_gen_mode); array[DEVCONF_DISABLE_POLICY] = READ_ONCE(cnf->disable_policy); array[DEVCONF_NDISC_TCLASS] = READ_ONCE(cnf->ndisc_tclass); array[DEVCONF_RPL_SEG_ENABLED] = READ_ONCE(cnf->rpl_seg_enabled); array[DEVCONF_IOAM6_ENABLED] = READ_ONCE(cnf->ioam6_enabled); array[DEVCONF_IOAM6_ID] = READ_ONCE(cnf->ioam6_id); array[DEVCONF_IOAM6_ID_WIDE] = READ_ONCE(cnf->ioam6_id_wide); array[DEVCONF_NDISC_EVICT_NOCARRIER] = READ_ONCE(cnf->ndisc_evict_nocarrier); array[DEVCONF_ACCEPT_UNTRACKED_NA] = READ_ONCE(cnf->accept_untracked_na); array[DEVCONF_ACCEPT_RA_MIN_LFT] = READ_ONCE(cnf->accept_ra_min_lft); } static inline size_t inet6_ifla6_size(void) { return nla_total_size(4) /* IFLA_INET6_FLAGS */ + nla_total_size(sizeof(struct ifla_cacheinfo)) + nla_total_size(DEVCONF_MAX * 4) /* IFLA_INET6_CONF */ + nla_total_size(IPSTATS_MIB_MAX * 8) /* IFLA_INET6_STATS */ + nla_total_size(ICMP6_MIB_MAX * 8) /* IFLA_INET6_ICMP6STATS */ + nla_total_size(sizeof(struct in6_addr)) /* IFLA_INET6_TOKEN */ + nla_total_size(1) /* IFLA_INET6_ADDR_GEN_MODE */ + nla_total_size(4) /* IFLA_INET6_RA_MTU */ + 0; } static inline size_t inet6_if_nlmsg_size(void) { return NLMSG_ALIGN(sizeof(struct ifinfomsg)) + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ + nla_total_size(4) /* IFLA_MTU */ + nla_total_size(4) /* IFLA_LINK */ + nla_total_size(1) /* IFLA_OPERSTATE */ + nla_total_size(inet6_ifla6_size()); /* IFLA_PROTINFO */ } static inline void __snmp6_fill_statsdev(u64 *stats, atomic_long_t *mib, int bytes) { int i; int pad = bytes - sizeof(u64) * ICMP6_MIB_MAX; BUG_ON(pad < 0); /* Use put_unaligned() because stats may not be aligned for u64. */ put_unaligned(ICMP6_MIB_MAX, &stats[0]); for (i = 1; i < ICMP6_MIB_MAX; i++) put_unaligned(atomic_long_read(&mib[i]), &stats[i]); memset(&stats[ICMP6_MIB_MAX], 0, pad); } static inline void __snmp6_fill_stats64(u64 *stats, void __percpu *mib, int bytes, size_t syncpoff) { int i, c; u64 buff[IPSTATS_MIB_MAX]; int pad = bytes - sizeof(u64) * IPSTATS_MIB_MAX; BUG_ON(pad < 0); memset(buff, 0, sizeof(buff)); buff[0] = IPSTATS_MIB_MAX; for_each_possible_cpu(c) { for (i = 1; i < IPSTATS_MIB_MAX; i++) buff[i] += snmp_get_cpu_field64(mib, c, i, syncpoff); } memcpy(stats, buff, IPSTATS_MIB_MAX * sizeof(u64)); memset(&stats[IPSTATS_MIB_MAX], 0, pad); } static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype, int bytes) { switch (attrtype) { case IFLA_INET6_STATS: __snmp6_fill_stats64(stats, idev->stats.ipv6, bytes, offsetof(struct ipstats_mib, syncp)); break; case IFLA_INET6_ICMP6STATS: __snmp6_fill_statsdev(stats, idev->stats.icmpv6dev->mibs, bytes); break; } } static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev, u32 ext_filter_mask) { struct ifla_cacheinfo ci; struct nlattr *nla; u32 ra_mtu; if (nla_put_u32(skb, IFLA_INET6_FLAGS, READ_ONCE(idev->if_flags))) goto nla_put_failure; ci.max_reasm_len = IPV6_MAXPLEN; ci.tstamp = cstamp_delta(READ_ONCE(idev->tstamp)); ci.reachable_time = jiffies_to_msecs(idev->nd_parms->reachable_time); ci.retrans_time = jiffies_to_msecs(NEIGH_VAR(idev->nd_parms, RETRANS_TIME)); if (nla_put(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci)) goto nla_put_failure; nla = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32)); if (!nla) goto nla_put_failure; ipv6_store_devconf(&idev->cnf, nla_data(nla), nla_len(nla)); /* XXX - MC not implemented */ if (ext_filter_mask & RTEXT_FILTER_SKIP_STATS) return 0; nla = nla_reserve(skb, IFLA_INET6_STATS, IPSTATS_MIB_MAX * sizeof(u64)); if (!nla) goto nla_put_failure; snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_STATS, nla_len(nla)); nla = nla_reserve(skb, IFLA_INET6_ICMP6STATS, ICMP6_MIB_MAX * sizeof(u64)); if (!nla) goto nla_put_failure; snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_ICMP6STATS, nla_len(nla)); nla = nla_reserve(skb, IFLA_INET6_TOKEN, sizeof(struct in6_addr)); if (!nla) goto nla_put_failure; read_lock_bh(&idev->lock); memcpy(nla_data(nla), idev->token.s6_addr, nla_len(nla)); read_unlock_bh(&idev->lock); if (nla_put_u8(skb, IFLA_INET6_ADDR_GEN_MODE, READ_ONCE(idev->cnf.addr_gen_mode))) goto nla_put_failure; ra_mtu = READ_ONCE(idev->ra_mtu); if (ra_mtu && nla_put_u32(skb, IFLA_INET6_RA_MTU, ra_mtu)) goto nla_put_failure; return 0; nla_put_failure: return -EMSGSIZE; } static size_t inet6_get_link_af_size(const struct net_device *dev, u32 ext_filter_mask) { if (!__in6_dev_get(dev)) return 0; return inet6_ifla6_size(); } static int inet6_fill_link_af(struct sk_buff *skb, const struct net_device *dev, u32 ext_filter_mask) { struct inet6_dev *idev = __in6_dev_get(dev); if (!idev) return -ENODATA; if (inet6_fill_ifla6_attrs(skb, idev, ext_filter_mask) < 0) return -EMSGSIZE; return 0; } static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token, struct netlink_ext_ack *extack) { struct inet6_ifaddr *ifp; struct net_device *dev = idev->dev; bool clear_token, update_rs = false; struct in6_addr ll_addr; ASSERT_RTNL(); if (!token) return -EINVAL; if (dev->flags & IFF_LOOPBACK) { NL_SET_ERR_MSG_MOD(extack, "Device is loopback"); return -EINVAL; } if (dev->flags & IFF_NOARP) { NL_SET_ERR_MSG_MOD(extack, "Device does not do neighbour discovery"); return -EINVAL; } if (!ipv6_accept_ra(idev)) { NL_SET_ERR_MSG_MOD(extack, "Router advertisement is disabled on device"); return -EINVAL; } if (READ_ONCE(idev->cnf.rtr_solicits) == 0) { NL_SET_ERR_MSG(extack, "Router solicitation is disabled on device"); return -EINVAL; } write_lock_bh(&idev->lock); BUILD_BUG_ON(sizeof(token->s6_addr) != 16); memcpy(idev->token.s6_addr + 8, token->s6_addr + 8, 8); write_unlock_bh(&idev->lock); clear_token = ipv6_addr_any(token); if (clear_token) goto update_lft; if (!idev->dead && (idev->if_flags & IF_READY) && !ipv6_get_lladdr(dev, &ll_addr, IFA_F_TENTATIVE | IFA_F_OPTIMISTIC)) { /* If we're not ready, then normal ifup will take care * of this. Otherwise, we need to request our rs here. */ ndisc_send_rs(dev, &ll_addr, &in6addr_linklocal_allrouters); update_rs = true; } update_lft: write_lock_bh(&idev->lock); if (update_rs) { idev->if_flags |= IF_RS_SENT; idev->rs_interval = rfc3315_s14_backoff_init( READ_ONCE(idev->cnf.rtr_solicit_interval)); idev->rs_probes = 1; addrconf_mod_rs_timer(idev, idev->rs_interval); } /* Well, that's kinda nasty ... */ list_for_each_entry(ifp, &idev->addr_list, if_list) { spin_lock(&ifp->lock); if (ifp->tokenized) { ifp->valid_lft = 0; ifp->prefered_lft = 0; } spin_unlock(&ifp->lock); } write_unlock_bh(&idev->lock); inet6_ifinfo_notify(RTM_NEWLINK, idev); addrconf_verify_rtnl(dev_net(dev)); return 0; } static const struct nla_policy inet6_af_policy[IFLA_INET6_MAX + 1] = { [IFLA_INET6_ADDR_GEN_MODE] = { .type = NLA_U8 }, [IFLA_INET6_TOKEN] = { .len = sizeof(struct in6_addr) }, [IFLA_INET6_RA_MTU] = { .type = NLA_REJECT, .reject_message = "IFLA_INET6_RA_MTU can not be set" }, }; static int check_addr_gen_mode(int mode) { if (mode != IN6_ADDR_GEN_MODE_EUI64 && mode != IN6_ADDR_GEN_MODE_NONE && mode != IN6_ADDR_GEN_MODE_STABLE_PRIVACY && mode != IN6_ADDR_GEN_MODE_RANDOM) return -EINVAL; return 1; } static int check_stable_privacy(struct inet6_dev *idev, struct net *net, int mode) { if (mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY && !idev->cnf.stable_secret.initialized && !net->ipv6.devconf_dflt->stable_secret.initialized) return -EINVAL; return 1; } static int inet6_validate_link_af(const struct net_device *dev, const struct nlattr *nla, struct netlink_ext_ack *extack) { struct nlattr *tb[IFLA_INET6_MAX + 1]; struct inet6_dev *idev = NULL; int err; if (dev) { idev = __in6_dev_get(dev); if (!idev) return -EAFNOSUPPORT; } err = nla_parse_nested_deprecated(tb, IFLA_INET6_MAX, nla, inet6_af_policy, extack); if (err) return err; if (!tb[IFLA_INET6_TOKEN] && !tb[IFLA_INET6_ADDR_GEN_MODE]) return -EINVAL; if (tb[IFLA_INET6_ADDR_GEN_MODE]) { u8 mode = nla_get_u8(tb[IFLA_INET6_ADDR_GEN_MODE]); if (check_addr_gen_mode(mode) < 0) return -EINVAL; if (dev && check_stable_privacy(idev, dev_net(dev), mode) < 0) return -EINVAL; } return 0; } static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla, struct netlink_ext_ack *extack) { struct inet6_dev *idev = __in6_dev_get(dev); struct nlattr *tb[IFLA_INET6_MAX + 1]; int err; if (!idev) return -EAFNOSUPPORT; if (nla_parse_nested_deprecated(tb, IFLA_INET6_MAX, nla, NULL, NULL) < 0) return -EINVAL; if (tb[IFLA_INET6_TOKEN]) { err = inet6_set_iftoken(idev, nla_data(tb[IFLA_INET6_TOKEN]), extack); if (err) return err; } if (tb[IFLA_INET6_ADDR_GEN_MODE]) { u8 mode = nla_get_u8(tb[IFLA_INET6_ADDR_GEN_MODE]); WRITE_ONCE(idev->cnf.addr_gen_mode, mode); } return 0; } static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev, u32 portid, u32 seq, int event, unsigned int flags) { struct net_device *dev = idev->dev; struct ifinfomsg *hdr; struct nlmsghdr *nlh; int ifindex, iflink; void *protoinfo; nlh = nlmsg_put(skb, portid, seq, event, sizeof(*hdr), flags); if (!nlh) return -EMSGSIZE; hdr = nlmsg_data(nlh); hdr->ifi_family = AF_INET6; hdr->__ifi_pad = 0; hdr->ifi_type = dev->type; ifindex = READ_ONCE(dev->ifindex); hdr->ifi_index = ifindex; hdr->ifi_flags = dev_get_flags(dev); hdr->ifi_change = 0; iflink = dev_get_iflink(dev); if (nla_put_string(skb, IFLA_IFNAME, dev->name) || (dev->addr_len && nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) || nla_put_u32(skb, IFLA_MTU, READ_ONCE(dev->mtu)) || (ifindex != iflink && nla_put_u32(skb, IFLA_LINK, iflink)) || nla_put_u8(skb, IFLA_OPERSTATE, netif_running(dev) ? READ_ONCE(dev->operstate) : IF_OPER_DOWN)) goto nla_put_failure; protoinfo = nla_nest_start_noflag(skb, IFLA_PROTINFO); if (!protoinfo) goto nla_put_failure; if (inet6_fill_ifla6_attrs(skb, idev, 0) < 0) goto nla_put_failure; nla_nest_end(skb, protoinfo); nlmsg_end(skb, nlh); return 0; nla_put_failure: nlmsg_cancel(skb, nlh); return -EMSGSIZE; } static int inet6_valid_dump_ifinfo(const struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct ifinfomsg *ifm; if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) { NL_SET_ERR_MSG_MOD(extack, "Invalid header for link dump request"); return -EINVAL; } if (nlmsg_attrlen(nlh, sizeof(*ifm))) { NL_SET_ERR_MSG_MOD(extack, "Invalid data after header"); return -EINVAL; } ifm = nlmsg_data(nlh); if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags || ifm->ifi_change || ifm->ifi_index) { NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for dump request"); return -EINVAL; } return 0; } static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); struct { unsigned long ifindex; } *ctx = (void *)cb->ctx; struct net_device *dev; struct inet6_dev *idev; int err; /* only requests using strict checking can pass data to * influence the dump */ if (cb->strict_check) { err = inet6_valid_dump_ifinfo(cb->nlh, cb->extack); if (err < 0) return err; } err = 0; rcu_read_lock(); for_each_netdev_dump(net, dev, ctx->ifindex) { idev = __in6_dev_get(dev); if (!idev) continue; err = inet6_fill_ifinfo(skb, idev, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, RTM_NEWLINK, NLM_F_MULTI); if (err < 0) break; } rcu_read_unlock(); return err; } void inet6_ifinfo_notify(int event, struct inet6_dev *idev) { struct sk_buff *skb; struct net *net = dev_net(idev->dev); int err = -ENOBUFS; skb = nlmsg_new(inet6_if_nlmsg_size(), GFP_ATOMIC); if (!skb) goto errout; err = inet6_fill_ifinfo(skb, idev, 0, 0, event, 0); if (err < 0) { /* -EMSGSIZE implies BUG in inet6_if_nlmsg_size() */ WARN_ON(err == -EMSGSIZE); kfree_skb(skb); goto errout; } rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFINFO, NULL, GFP_ATOMIC); return; errout: rtnl_set_sk_err(net, RTNLGRP_IPV6_IFINFO, err); } static inline size_t inet6_prefix_nlmsg_size(void) { return NLMSG_ALIGN(sizeof(struct prefixmsg)) + nla_total_size(sizeof(struct in6_addr)) + nla_total_size(sizeof(struct prefix_cacheinfo)); } static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev, struct prefix_info *pinfo, u32 portid, u32 seq, int event, unsigned int flags) { struct prefixmsg *pmsg; struct nlmsghdr *nlh; struct prefix_cacheinfo ci; nlh = nlmsg_put(skb, portid, seq, event, sizeof(*pmsg), flags); if (!nlh) return -EMSGSIZE; pmsg = nlmsg_data(nlh); pmsg->prefix_family = AF_INET6; pmsg->prefix_pad1 = 0; pmsg->prefix_pad2 = 0; pmsg->prefix_ifindex = idev->dev->ifindex; pmsg->prefix_len = pinfo->prefix_len; pmsg->prefix_type = pinfo->type; pmsg->prefix_pad3 = 0; pmsg->prefix_flags = pinfo->flags; if (nla_put(skb, PREFIX_ADDRESS, sizeof(pinfo->prefix), &pinfo->prefix)) goto nla_put_failure; ci.preferred_time = ntohl(pinfo->prefered); ci.valid_time = ntohl(pinfo->valid); if (nla_put(skb, PREFIX_CACHEINFO, sizeof(ci), &ci)) goto nla_put_failure; nlmsg_end(skb, nlh); return 0; nla_put_failure: nlmsg_cancel(skb, nlh); return -EMSGSIZE; } static void inet6_prefix_notify(int event, struct inet6_dev *idev, struct prefix_info *pinfo) { struct sk_buff *skb; struct net *net = dev_net(idev->dev); int err = -ENOBUFS; skb = nlmsg_new(inet6_prefix_nlmsg_size(), GFP_ATOMIC); if (!skb) goto errout; err = inet6_fill_prefix(skb, idev, pinfo, 0, 0, event, 0); if (err < 0) { /* -EMSGSIZE implies BUG in inet6_prefix_nlmsg_size() */ WARN_ON(err == -EMSGSIZE); kfree_skb(skb); goto errout; } rtnl_notify(skb, net, 0, RTNLGRP_IPV6_PREFIX, NULL, GFP_ATOMIC); return; errout: rtnl_set_sk_err(net, RTNLGRP_IPV6_PREFIX, err); } static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) { struct net *net = dev_net(ifp->idev->dev); if (event) ASSERT_RTNL(); inet6_ifa_notify(event ? : RTM_NEWADDR, ifp); switch (event) { case RTM_NEWADDR: /* * If the address was optimistic we inserted the route at the * start of our DAD process, so we don't need to do it again. * If the device was taken down in the middle of the DAD * cycle there is a race where we could get here without a * host route, so nothing to insert. That will be fixed when * the device is brought up. */ if (ifp->rt && !rcu_access_pointer(ifp->rt->fib6_node)) { ip6_ins_rt(net, ifp->rt); } else if (!ifp->rt && (ifp->idev->dev->flags & IFF_UP)) { pr_warn("BUG: Address %pI6c on device %s is missing its host route.\n", &ifp->addr, ifp->idev->dev->name); } if (ifp->idev->cnf.forwarding) addrconf_join_anycast(ifp); if (!ipv6_addr_any(&ifp->peer_addr)) addrconf_prefix_route(&ifp->peer_addr, 128, ifp->rt_priority, ifp->idev->dev, 0, 0, GFP_ATOMIC); break; case RTM_DELADDR: if (ifp->idev->cnf.forwarding) addrconf_leave_anycast(ifp); addrconf_leave_solict(ifp->idev, &ifp->addr); if (!ipv6_addr_any(&ifp->peer_addr)) { struct fib6_info *rt; rt = addrconf_get_prefix_route(&ifp->peer_addr, 128, ifp->idev->dev, 0, 0, false); if (rt) ip6_del_rt(net, rt, false); } if (ifp->rt) { ip6_del_rt(net, ifp->rt, false); ifp->rt = NULL; } rt_genid_bump_ipv6(net); break; } atomic_inc(&net->ipv6.dev_addr_genid); } static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) { if (likely(ifp->idev->dead == 0)) __ipv6_ifa_notify(event, ifp); } #ifdef CONFIG_SYSCTL static int addrconf_sysctl_forward(const struct ctl_table *ctl, int write, void *buffer, size_t *lenp, loff_t *ppos) { int *valp = ctl->data; int val = *valp; loff_t pos = *ppos; struct ctl_table lctl; int ret; /* * ctl->data points to idev->cnf.forwarding, we should * not modify it until we get the rtnl lock. */ lctl = *ctl; lctl.data = &val; ret = proc_dointvec(&lctl, write, buffer, lenp, ppos); if (write) ret = addrconf_fixup_forwarding(ctl, valp, val); if (ret) *ppos = pos; return ret; } static int addrconf_sysctl_mtu(const struct ctl_table *ctl, int write, void *buffer, size_t *lenp, loff_t *ppos) { struct inet6_dev *idev = ctl->extra1; int min_mtu = IPV6_MIN_MTU; struct ctl_table lctl; lctl = *ctl; lctl.extra1 = &min_mtu; lctl.extra2 = idev ? &idev->dev->mtu : NULL; return proc_dointvec_minmax(&lctl, write, buffer, lenp, ppos); } static void dev_disable_change(struct inet6_dev *idev) { struct netdev_notifier_info info; if (!idev || !idev->dev) return; netdev_notifier_info_init(&info, idev->dev); if (idev->cnf.disable_ipv6) addrconf_notify(NULL, NETDEV_DOWN, &info); else addrconf_notify(NULL, NETDEV_UP, &info); } static void addrconf_disable_change(struct net *net, __s32 newf) { struct net_device *dev; struct inet6_dev *idev; for_each_netdev(net, dev) { idev = __in6_dev_get_rtnl_net(dev); if (idev) { int changed = (!idev->cnf.disable_ipv6) ^ (!newf); WRITE_ONCE(idev->cnf.disable_ipv6, newf); if (changed) dev_disable_change(idev); } } } static int addrconf_disable_ipv6(const struct ctl_table *table, int *p, int newf) { struct net *net = (struct net *)table->extra2; int old; if (p == &net->ipv6.devconf_dflt->disable_ipv6) { WRITE_ONCE(*p, newf); return 0; } if (!rtnl_net_trylock(net)) return restart_syscall(); old = *p; WRITE_ONCE(*p, newf); if (p == &net->ipv6.devconf_all->disable_ipv6) { WRITE_ONCE(net->ipv6.devconf_dflt->disable_ipv6, newf); addrconf_disable_change(net, newf); } else if ((!newf) ^ (!old)) { dev_disable_change((struct inet6_dev *)table->extra1); } rtnl_net_unlock(net); return 0; } static int addrconf_sysctl_disable(const struct ctl_table *ctl, int write, void *buffer, size_t *lenp, loff_t *ppos) { int *valp = ctl->data; int val = *valp; loff_t pos = *ppos; struct ctl_table lctl; int ret; /* * ctl->data points to idev->cnf.disable_ipv6, we should * not modify it until we get the rtnl lock. */ lctl = *ctl; lctl.data = &val; ret = proc_dointvec(&lctl, write, buffer, lenp, ppos); if (write) ret = addrconf_disable_ipv6(ctl, valp, val); if (ret) *ppos = pos; return ret; } static int addrconf_sysctl_proxy_ndp(const struct ctl_table *ctl, int write, void *buffer, size_t *lenp, loff_t *ppos) { int *valp = ctl->data; int ret; int old, new; old = *valp; ret = proc_dointvec(ctl, write, buffer, lenp, ppos); new = *valp; if (write && old != new) { struct net *net = ctl->extra2; if (!rtnl_net_trylock(net)) return restart_syscall(); if (valp == &net->ipv6.devconf_dflt->proxy_ndp) { inet6_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_PROXY_NEIGH, NETCONFA_IFINDEX_DEFAULT, net->ipv6.devconf_dflt); } else if (valp == &net->ipv6.devconf_all->proxy_ndp) { inet6_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_PROXY_NEIGH, NETCONFA_IFINDEX_ALL, net->ipv6.devconf_all); } else { struct inet6_dev *idev = ctl->extra1; inet6_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_PROXY_NEIGH, idev->dev->ifindex, &idev->cnf); } rtnl_net_unlock(net); } return ret; } static int addrconf_sysctl_addr_gen_mode(const struct ctl_table *ctl, int write, void *buffer, size_t *lenp, loff_t *ppos) { int ret = 0; u32 new_val; struct inet6_dev *idev = (struct inet6_dev *)ctl->extra1; struct net *net = (struct net *)ctl->extra2; struct ctl_table tmp = { .data = &new_val, .maxlen = sizeof(new_val), .mode = ctl->mode, }; if (!rtnl_net_trylock(net)) return restart_syscall(); new_val = *((u32 *)ctl->data); ret = proc_douintvec(&tmp, write, buffer, lenp, ppos); if (ret != 0) goto out; if (write) { if (check_addr_gen_mode(new_val) < 0) { ret = -EINVAL; goto out; } if (idev) { if (check_stable_privacy(idev, net, new_val) < 0) { ret = -EINVAL; goto out; } if (idev->cnf.addr_gen_mode != new_val) { WRITE_ONCE(idev->cnf.addr_gen_mode, new_val); addrconf_init_auto_addrs(idev->dev); } } else if (&net->ipv6.devconf_all->addr_gen_mode == ctl->data) { struct net_device *dev; WRITE_ONCE(net->ipv6.devconf_dflt->addr_gen_mode, new_val); for_each_netdev(net, dev) { idev = __in6_dev_get_rtnl_net(dev); if (idev && idev->cnf.addr_gen_mode != new_val) { WRITE_ONCE(idev->cnf.addr_gen_mode, new_val); addrconf_init_auto_addrs(idev->dev); } } } WRITE_ONCE(*((u32 *)ctl->data), new_val); } out: rtnl_net_unlock(net); return ret; } static int addrconf_sysctl_stable_secret(const struct ctl_table *ctl, int write, void *buffer, size_t *lenp, loff_t *ppos) { int err; struct in6_addr addr; char str[IPV6_MAX_STRLEN]; struct ctl_table lctl = *ctl; struct net *net = ctl->extra2; struct ipv6_stable_secret *secret = ctl->data; if (&net->ipv6.devconf_all->stable_secret == ctl->data) return -EIO; lctl.maxlen = IPV6_MAX_STRLEN; lctl.data = str; if (!rtnl_net_trylock(net)) return restart_syscall(); if (!write && !secret->initialized) { err = -EIO; goto out; } err = snprintf(str, sizeof(str), "%pI6", &secret->secret); if (err >= sizeof(str)) { err = -EIO; goto out; } err = proc_dostring(&lctl, write, buffer, lenp, ppos); if (err || !write) goto out; if (in6_pton(str, -1, addr.in6_u.u6_addr8, -1, NULL) != 1) { err = -EIO; goto out; } secret->initialized = true; secret->secret = addr; if (&net->ipv6.devconf_dflt->stable_secret == ctl->data) { struct net_device *dev; for_each_netdev(net, dev) { struct inet6_dev *idev = __in6_dev_get_rtnl_net(dev); if (idev) { WRITE_ONCE(idev->cnf.addr_gen_mode, IN6_ADDR_GEN_MODE_STABLE_PRIVACY); } } } else { struct inet6_dev *idev = ctl->extra1; WRITE_ONCE(idev->cnf.addr_gen_mode, IN6_ADDR_GEN_MODE_STABLE_PRIVACY); } out: rtnl_net_unlock(net); return err; } static int addrconf_sysctl_ignore_routes_with_linkdown(const struct ctl_table *ctl, int write, void *buffer, size_t *lenp, loff_t *ppos) { int *valp = ctl->data; int val = *valp; loff_t pos = *ppos; struct ctl_table lctl; int ret; /* ctl->data points to idev->cnf.ignore_routes_when_linkdown * we should not modify it until we get the rtnl lock. */ lctl = *ctl; lctl.data = &val; ret = proc_dointvec(&lctl, write, buffer, lenp, ppos); if (write) ret = addrconf_fixup_linkdown(ctl, valp, val); if (ret) *ppos = pos; return ret; } static void addrconf_set_nopolicy(struct rt6_info *rt, int action) { if (rt) { if (action) rt->dst.flags |= DST_NOPOLICY; else rt->dst.flags &= ~DST_NOPOLICY; } } static void addrconf_disable_policy_idev(struct inet6_dev *idev, int val) { struct inet6_ifaddr *ifa; read_lock_bh(&idev->lock); list_for_each_entry(ifa, &idev->addr_list, if_list) { spin_lock(&ifa->lock); if (ifa->rt) { /* host routes only use builtin fib6_nh */ struct fib6_nh *nh = ifa->rt->fib6_nh; int cpu; rcu_read_lock(); ifa->rt->dst_nopolicy = val ? true : false; if (nh->rt6i_pcpu) { for_each_possible_cpu(cpu) { struct rt6_info **rtp; rtp = per_cpu_ptr(nh->rt6i_pcpu, cpu); addrconf_set_nopolicy(*rtp, val); } } rcu_read_unlock(); } spin_unlock(&ifa->lock); } read_unlock_bh(&idev->lock); } static int addrconf_disable_policy(const struct ctl_table *ctl, int *valp, int val) { struct net *net = (struct net *)ctl->extra2; struct inet6_dev *idev; if (valp == &net->ipv6.devconf_dflt->disable_policy) { WRITE_ONCE(*valp, val); return 0; } if (!rtnl_net_trylock(net)) return restart_syscall(); WRITE_ONCE(*valp, val); if (valp == &net->ipv6.devconf_all->disable_policy) { struct net_device *dev; for_each_netdev(net, dev) { idev = __in6_dev_get_rtnl_net(dev); if (idev) addrconf_disable_policy_idev(idev, val); } } else { idev = (struct inet6_dev *)ctl->extra1; addrconf_disable_policy_idev(idev, val); } rtnl_net_unlock(net); return 0; } static int addrconf_sysctl_disable_policy(const struct ctl_table *ctl, int write, void *buffer, size_t *lenp, loff_t *ppos) { int *valp = ctl->data; int val = *valp; loff_t pos = *ppos; struct ctl_table lctl; int ret; lctl = *ctl; lctl.data = &val; ret = proc_dointvec(&lctl, write, buffer, lenp, ppos); if (write && (*valp != val)) ret = addrconf_disable_policy(ctl, valp, val); if (ret) *ppos = pos; return ret; } static int minus_one = -1; static const int two_five_five = 255; static u32 ioam6_if_id_max = U16_MAX; static const struct ctl_table addrconf_sysctl[] = { { .procname = "forwarding", .data = &ipv6_devconf.forwarding, .maxlen = sizeof(int), .mode = 0644, .proc_handler = addrconf_sysctl_forward, }, { .procname = "hop_limit", .data = &ipv6_devconf.hop_limit, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = (void *)SYSCTL_ONE, .extra2 = (void *)&two_five_five, }, { .procname = "mtu", .data = &ipv6_devconf.mtu6, .maxlen = sizeof(int), .mode = 0644, .proc_handler = addrconf_sysctl_mtu, }, { .procname = "accept_ra", .data = &ipv6_devconf.accept_ra, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "accept_redirects", .data = &ipv6_devconf.accept_redirects, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "autoconf", .data = &ipv6_devconf.autoconf, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "dad_transmits", .data = &ipv6_devconf.dad_transmits, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "router_solicitations", .data = &ipv6_devconf.rtr_solicits, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &minus_one, }, { .procname = "router_solicitation_interval", .data = &ipv6_devconf.rtr_solicit_interval, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "router_solicitation_max_interval", .data = &ipv6_devconf.rtr_solicit_max_interval, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "router_solicitation_delay", .data = &ipv6_devconf.rtr_solicit_delay, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "force_mld_version", .data = &ipv6_devconf.force_mld_version, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "mldv1_unsolicited_report_interval", .data = &ipv6_devconf.mldv1_unsolicited_report_interval, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_ms_jiffies, }, { .procname = "mldv2_unsolicited_report_interval", .data = &ipv6_devconf.mldv2_unsolicited_report_interval, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_ms_jiffies, }, { .procname = "use_tempaddr", .data = &ipv6_devconf.use_tempaddr, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "temp_valid_lft", .data = &ipv6_devconf.temp_valid_lft, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "temp_prefered_lft", .data = &ipv6_devconf.temp_prefered_lft, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "regen_min_advance", .data = &ipv6_devconf.regen_min_advance, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "regen_max_retry", .data = &ipv6_devconf.regen_max_retry, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "max_desync_factor", .data = &ipv6_devconf.max_desync_factor, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "max_addresses", .data = &ipv6_devconf.max_addresses, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "accept_ra_defrtr", .data = &ipv6_devconf.accept_ra_defrtr, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "ra_defrtr_metric", .data = &ipv6_devconf.ra_defrtr_metric, .maxlen = sizeof(u32), .mode = 0644, .proc_handler = proc_douintvec_minmax, .extra1 = (void *)SYSCTL_ONE, }, { .procname = "accept_ra_min_hop_limit", .data = &ipv6_devconf.accept_ra_min_hop_limit, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "accept_ra_min_lft", .data = &ipv6_devconf.accept_ra_min_lft, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "accept_ra_pinfo", .data = &ipv6_devconf.accept_ra_pinfo, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "ra_honor_pio_life", .data = &ipv6_devconf.ra_honor_pio_life, .maxlen = sizeof(u8), .mode = 0644, .proc_handler = proc_dou8vec_minmax, .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE, }, { .procname = "ra_honor_pio_pflag", .data = &ipv6_devconf.ra_honor_pio_pflag, .maxlen = sizeof(u8), .mode = 0644, .proc_handler = proc_dou8vec_minmax, .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE, }, #ifdef CONFIG_IPV6_ROUTER_PREF { .procname = "accept_ra_rtr_pref", .data = &ipv6_devconf.accept_ra_rtr_pref, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "router_probe_interval", .data = &ipv6_devconf.rtr_probe_interval, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, #ifdef CONFIG_IPV6_ROUTE_INFO { .procname = "accept_ra_rt_info_min_plen", .data = &ipv6_devconf.accept_ra_rt_info_min_plen, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "accept_ra_rt_info_max_plen", .data = &ipv6_devconf.accept_ra_rt_info_max_plen, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #endif { .procname = "proxy_ndp", .data = &ipv6_devconf.proxy_ndp, .maxlen = sizeof(int), .mode = 0644, .proc_handler = addrconf_sysctl_proxy_ndp, }, { .procname = "accept_source_route", .data = &ipv6_devconf.accept_source_route, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #ifdef CONFIG_IPV6_OPTIMISTIC_DAD { .procname = "optimistic_dad", .data = &ipv6_devconf.optimistic_dad, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "use_optimistic", .data = &ipv6_devconf.use_optimistic, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_IPV6_MROUTE { .procname = "mc_forwarding", .data = &ipv6_devconf.mc_forwarding, .maxlen = sizeof(int), .mode = 0444, .proc_handler = proc_dointvec, }, #endif { .procname = "disable_ipv6", .data = &ipv6_devconf.disable_ipv6, .maxlen = sizeof(int), .mode = 0644, .proc_handler = addrconf_sysctl_disable, }, { .procname = "accept_dad", .data = &ipv6_devconf.accept_dad, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "force_tllao", .data = &ipv6_devconf.force_tllao, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .procname = "ndisc_notify", .data = &ipv6_devconf.ndisc_notify, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .procname = "suppress_frag_ndisc", .data = &ipv6_devconf.suppress_frag_ndisc, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .procname = "accept_ra_from_local", .data = &ipv6_devconf.accept_ra_from_local, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "accept_ra_mtu", .data = &ipv6_devconf.accept_ra_mtu, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "stable_secret", .data = &ipv6_devconf.stable_secret, .maxlen = IPV6_MAX_STRLEN, .mode = 0600, .proc_handler = addrconf_sysctl_stable_secret, }, { .procname = "use_oif_addrs_only", .data = &ipv6_devconf.use_oif_addrs_only, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "ignore_routes_with_linkdown", .data = &ipv6_devconf.ignore_routes_with_linkdown, .maxlen = sizeof(int), .mode = 0644, .proc_handler = addrconf_sysctl_ignore_routes_with_linkdown, }, { .procname = "drop_unicast_in_l2_multicast", .data = &ipv6_devconf.drop_unicast_in_l2_multicast, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "drop_unsolicited_na", .data = &ipv6_devconf.drop_unsolicited_na, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "keep_addr_on_down", .data = &ipv6_devconf.keep_addr_on_down, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "seg6_enabled", .data = &ipv6_devconf.seg6_enabled, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #ifdef CONFIG_IPV6_SEG6_HMAC { .procname = "seg6_require_hmac", .data = &ipv6_devconf.seg6_require_hmac, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif { .procname = "enhanced_dad", .data = &ipv6_devconf.enhanced_dad, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "addr_gen_mode", .data = &ipv6_devconf.addr_gen_mode, .maxlen = sizeof(int), .mode = 0644, .proc_handler = addrconf_sysctl_addr_gen_mode, }, { .procname = "disable_policy", .data = &ipv6_devconf.disable_policy, .maxlen = sizeof(int), .mode = 0644, .proc_handler = addrconf_sysctl_disable_policy, }, { .procname = "ndisc_tclass", .data = &ipv6_devconf.ndisc_tclass, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = (void *)SYSCTL_ZERO, .extra2 = (void *)&two_five_five, }, { .procname = "rpl_seg_enabled", .data = &ipv6_devconf.rpl_seg_enabled, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "ioam6_enabled", .data = &ipv6_devconf.ioam6_enabled, .maxlen = sizeof(u8), .mode = 0644, .proc_handler = proc_dou8vec_minmax, .extra1 = (void *)SYSCTL_ZERO, .extra2 = (void *)SYSCTL_ONE, }, { .procname = "ioam6_id", .data = &ipv6_devconf.ioam6_id, .maxlen = sizeof(u32), .mode = 0644,